// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.34.2
// protoc v5.27.2
// source: flow/flow.proto
package flow
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
anypb "google.golang.org/protobuf/types/known/anypb"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type FlowType int32
const (
FlowType_UNKNOWN_TYPE FlowType = 0
FlowType_L3_L4 FlowType = 1 // not sure about the underscore here, but `L34` also reads strange
FlowType_L7 FlowType = 2
FlowType_SOCK FlowType = 3
)
// Enum value maps for FlowType.
var (
FlowType_name = map[int32]string{
0: "UNKNOWN_TYPE",
1: "L3_L4",
2: "L7",
3: "SOCK",
}
FlowType_value = map[string]int32{
"UNKNOWN_TYPE": 0,
"L3_L4": 1,
"L7": 2,
"SOCK": 3,
}
)
func (x FlowType) Enum() *FlowType {
p := new(FlowType)
*p = x
return p
}
func (x FlowType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (FlowType) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[0].Descriptor()
}
func (FlowType) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[0]
}
func (x FlowType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use FlowType.Descriptor instead.
func (FlowType) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{0}
}
// These types correspond to definitions in pkg/policy/l4.go.
type AuthType int32
const (
AuthType_DISABLED AuthType = 0
AuthType_SPIRE AuthType = 1
AuthType_TEST_ALWAYS_FAIL AuthType = 2
)
// Enum value maps for AuthType.
var (
AuthType_name = map[int32]string{
0: "DISABLED",
1: "SPIRE",
2: "TEST_ALWAYS_FAIL",
}
AuthType_value = map[string]int32{
"DISABLED": 0,
"SPIRE": 1,
"TEST_ALWAYS_FAIL": 2,
}
)
func (x AuthType) Enum() *AuthType {
p := new(AuthType)
*p = x
return p
}
func (x AuthType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (AuthType) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[1].Descriptor()
}
func (AuthType) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[1]
}
func (x AuthType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use AuthType.Descriptor instead.
func (AuthType) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{1}
}
type TraceObservationPoint int32
const (
// Cilium treats 0 as TO_LXC, but its's something we should work to remove.
// This is intentionally set as unknown, so proto API can guarantee the
// observation point is always going to be present on trace events.
TraceObservationPoint_UNKNOWN_POINT TraceObservationPoint = 0
// TO_PROXY indicates network packets are transmitted towards the l7 proxy.
TraceObservationPoint_TO_PROXY TraceObservationPoint = 1
// TO_HOST indicates network packets are transmitted towards the host
// namespace.
TraceObservationPoint_TO_HOST TraceObservationPoint = 2
// TO_STACK indicates network packets are transmitted towards the Linux
// kernel network stack on host machine.
TraceObservationPoint_TO_STACK TraceObservationPoint = 3
// TO_OVERLAY indicates network packets are transmitted towards the tunnel
// device.
TraceObservationPoint_TO_OVERLAY TraceObservationPoint = 4
// TO_ENDPOINT indicates network packets are transmitted towards endpoints
// (containers).
TraceObservationPoint_TO_ENDPOINT TraceObservationPoint = 101
// FROM_ENDPOINT indicates network packets were received from endpoints
// (containers).
TraceObservationPoint_FROM_ENDPOINT TraceObservationPoint = 5
// FROM_PROXY indicates network packets were received from the l7 proxy.
TraceObservationPoint_FROM_PROXY TraceObservationPoint = 6
// FROM_HOST indicates network packets were received from the host
// namespace.
TraceObservationPoint_FROM_HOST TraceObservationPoint = 7
// FROM_STACK indicates network packets were received from the Linux kernel
// network stack on host machine.
TraceObservationPoint_FROM_STACK TraceObservationPoint = 8
// FROM_OVERLAY indicates network packets were received from the tunnel
// device.
TraceObservationPoint_FROM_OVERLAY TraceObservationPoint = 9
// FROM_NETWORK indicates network packets were received from native
// devices.
TraceObservationPoint_FROM_NETWORK TraceObservationPoint = 10
// TO_NETWORK indicates network packets are transmitted towards native
// devices.
TraceObservationPoint_TO_NETWORK TraceObservationPoint = 11
)
// Enum value maps for TraceObservationPoint.
var (
TraceObservationPoint_name = map[int32]string{
0: "UNKNOWN_POINT",
1: "TO_PROXY",
2: "TO_HOST",
3: "TO_STACK",
4: "TO_OVERLAY",
101: "TO_ENDPOINT",
5: "FROM_ENDPOINT",
6: "FROM_PROXY",
7: "FROM_HOST",
8: "FROM_STACK",
9: "FROM_OVERLAY",
10: "FROM_NETWORK",
11: "TO_NETWORK",
}
TraceObservationPoint_value = map[string]int32{
"UNKNOWN_POINT": 0,
"TO_PROXY": 1,
"TO_HOST": 2,
"TO_STACK": 3,
"TO_OVERLAY": 4,
"TO_ENDPOINT": 101,
"FROM_ENDPOINT": 5,
"FROM_PROXY": 6,
"FROM_HOST": 7,
"FROM_STACK": 8,
"FROM_OVERLAY": 9,
"FROM_NETWORK": 10,
"TO_NETWORK": 11,
}
)
func (x TraceObservationPoint) Enum() *TraceObservationPoint {
p := new(TraceObservationPoint)
*p = x
return p
}
func (x TraceObservationPoint) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (TraceObservationPoint) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[2].Descriptor()
}
func (TraceObservationPoint) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[2]
}
func (x TraceObservationPoint) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use TraceObservationPoint.Descriptor instead.
func (TraceObservationPoint) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{2}
}
type TraceReason int32
const (
TraceReason_TRACE_REASON_UNKNOWN TraceReason = 0
TraceReason_NEW TraceReason = 1
TraceReason_ESTABLISHED TraceReason = 2
TraceReason_REPLY TraceReason = 3
TraceReason_RELATED TraceReason = 4
// Deprecated: Marked as deprecated in flow/flow.proto.
TraceReason_REOPENED TraceReason = 5
TraceReason_SRV6_ENCAP TraceReason = 6
TraceReason_SRV6_DECAP TraceReason = 7
TraceReason_ENCRYPT_OVERLAY TraceReason = 8
)
// Enum value maps for TraceReason.
var (
TraceReason_name = map[int32]string{
0: "TRACE_REASON_UNKNOWN",
1: "NEW",
2: "ESTABLISHED",
3: "REPLY",
4: "RELATED",
5: "REOPENED",
6: "SRV6_ENCAP",
7: "SRV6_DECAP",
8: "ENCRYPT_OVERLAY",
}
TraceReason_value = map[string]int32{
"TRACE_REASON_UNKNOWN": 0,
"NEW": 1,
"ESTABLISHED": 2,
"REPLY": 3,
"RELATED": 4,
"REOPENED": 5,
"SRV6_ENCAP": 6,
"SRV6_DECAP": 7,
"ENCRYPT_OVERLAY": 8,
}
)
func (x TraceReason) Enum() *TraceReason {
p := new(TraceReason)
*p = x
return p
}
func (x TraceReason) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (TraceReason) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[3].Descriptor()
}
func (TraceReason) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[3]
}
func (x TraceReason) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use TraceReason.Descriptor instead.
func (TraceReason) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{3}
}
// This enum corresponds to Cilium's L7 accesslog [FlowType](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L26):
type L7FlowType int32
const (
L7FlowType_UNKNOWN_L7_TYPE L7FlowType = 0
L7FlowType_REQUEST L7FlowType = 1
L7FlowType_RESPONSE L7FlowType = 2
L7FlowType_SAMPLE L7FlowType = 3
)
// Enum value maps for L7FlowType.
var (
L7FlowType_name = map[int32]string{
0: "UNKNOWN_L7_TYPE",
1: "REQUEST",
2: "RESPONSE",
3: "SAMPLE",
}
L7FlowType_value = map[string]int32{
"UNKNOWN_L7_TYPE": 0,
"REQUEST": 1,
"RESPONSE": 2,
"SAMPLE": 3,
}
)
func (x L7FlowType) Enum() *L7FlowType {
p := new(L7FlowType)
*p = x
return p
}
func (x L7FlowType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (L7FlowType) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[4].Descriptor()
}
func (L7FlowType) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[4]
}
func (x L7FlowType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use L7FlowType.Descriptor instead.
func (L7FlowType) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{4}
}
type IPVersion int32
const (
IPVersion_IP_NOT_USED IPVersion = 0
IPVersion_IPv4 IPVersion = 1
IPVersion_IPv6 IPVersion = 2
)
// Enum value maps for IPVersion.
var (
IPVersion_name = map[int32]string{
0: "IP_NOT_USED",
1: "IPv4",
2: "IPv6",
}
IPVersion_value = map[string]int32{
"IP_NOT_USED": 0,
"IPv4": 1,
"IPv6": 2,
}
)
func (x IPVersion) Enum() *IPVersion {
p := new(IPVersion)
*p = x
return p
}
func (x IPVersion) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (IPVersion) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[5].Descriptor()
}
func (IPVersion) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[5]
}
func (x IPVersion) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use IPVersion.Descriptor instead.
func (IPVersion) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{5}
}
type Verdict int32
const (
// UNKNOWN is used if there is no verdict for this flow event
Verdict_VERDICT_UNKNOWN Verdict = 0
// FORWARDED is used for flow events where the trace point has forwarded
// this packet or connection to the next processing entity.
Verdict_FORWARDED Verdict = 1
// DROPPED is used for flow events where the connection or packet has
// been dropped (e.g. due to a malformed packet, it being rejected by a
// network policy etc). The exact drop reason may be found in drop_reason_desc.
Verdict_DROPPED Verdict = 2
// ERROR is used for flow events where an error occurred during processing
Verdict_ERROR Verdict = 3
// AUDIT is used on policy verdict events in policy audit mode, to
// denominate flows that would have been dropped by policy if audit mode
// was turned off
Verdict_AUDIT Verdict = 4
// REDIRECTED is used for flow events which have been redirected to the proxy
Verdict_REDIRECTED Verdict = 5
// TRACED is used for flow events which have been observed at a trace point,
// but no particular verdict has been reached yet
Verdict_TRACED Verdict = 6
// TRANSLATED is used for flow events where an address has been translated
Verdict_TRANSLATED Verdict = 7
)
// Enum value maps for Verdict.
var (
Verdict_name = map[int32]string{
0: "VERDICT_UNKNOWN",
1: "FORWARDED",
2: "DROPPED",
3: "ERROR",
4: "AUDIT",
5: "REDIRECTED",
6: "TRACED",
7: "TRANSLATED",
}
Verdict_value = map[string]int32{
"VERDICT_UNKNOWN": 0,
"FORWARDED": 1,
"DROPPED": 2,
"ERROR": 3,
"AUDIT": 4,
"REDIRECTED": 5,
"TRACED": 6,
"TRANSLATED": 7,
}
)
func (x Verdict) Enum() *Verdict {
p := new(Verdict)
*p = x
return p
}
func (x Verdict) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (Verdict) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[6].Descriptor()
}
func (Verdict) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[6]
}
func (x Verdict) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use Verdict.Descriptor instead.
func (Verdict) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{6}
}
// These values are shared with pkg/monitor/api/drop.go and bpf/lib/common.h.
// Note that non-drop reasons (i.e. values less than api.DropMin) are not used
// here.
type DropReason int32
const (
// non-drop reasons
DropReason_DROP_REASON_UNKNOWN DropReason = 0
// drop reasons
//
// Deprecated: Marked as deprecated in flow/flow.proto.
DropReason_INVALID_SOURCE_MAC DropReason = 130
// Deprecated: Marked as deprecated in flow/flow.proto.
DropReason_INVALID_DESTINATION_MAC DropReason = 131
DropReason_INVALID_SOURCE_IP DropReason = 132
DropReason_POLICY_DENIED DropReason = 133
DropReason_INVALID_PACKET_DROPPED DropReason = 134
DropReason_CT_TRUNCATED_OR_INVALID_HEADER DropReason = 135
DropReason_CT_MISSING_TCP_ACK_FLAG DropReason = 136
DropReason_CT_UNKNOWN_L4_PROTOCOL DropReason = 137
// Deprecated: Marked as deprecated in flow/flow.proto.
DropReason_CT_CANNOT_CREATE_ENTRY_FROM_PACKET DropReason = 138
DropReason_UNSUPPORTED_L3_PROTOCOL DropReason = 139
DropReason_MISSED_TAIL_CALL DropReason = 140
DropReason_ERROR_WRITING_TO_PACKET DropReason = 141
DropReason_UNKNOWN_L4_PROTOCOL DropReason = 142
DropReason_UNKNOWN_ICMPV4_CODE DropReason = 143
DropReason_UNKNOWN_ICMPV4_TYPE DropReason = 144
DropReason_UNKNOWN_ICMPV6_CODE DropReason = 145
DropReason_UNKNOWN_ICMPV6_TYPE DropReason = 146
DropReason_ERROR_RETRIEVING_TUNNEL_KEY DropReason = 147
// Deprecated: Marked as deprecated in flow/flow.proto.
DropReason_ERROR_RETRIEVING_TUNNEL_OPTIONS DropReason = 148
// Deprecated: Marked as deprecated in flow/flow.proto.
DropReason_INVALID_GENEVE_OPTION DropReason = 149
DropReason_UNKNOWN_L3_TARGET_ADDRESS DropReason = 150
DropReason_STALE_OR_UNROUTABLE_IP DropReason = 151
// Deprecated: Marked as deprecated in flow/flow.proto.
DropReason_NO_MATCHING_LOCAL_CONTAINER_FOUND DropReason = 152
DropReason_ERROR_WHILE_CORRECTING_L3_CHECKSUM DropReason = 153
DropReason_ERROR_WHILE_CORRECTING_L4_CHECKSUM DropReason = 154
DropReason_CT_MAP_INSERTION_FAILED DropReason = 155
DropReason_INVALID_IPV6_EXTENSION_HEADER DropReason = 156
DropReason_IP_FRAGMENTATION_NOT_SUPPORTED DropReason = 157
DropReason_SERVICE_BACKEND_NOT_FOUND DropReason = 158
DropReason_NO_TUNNEL_OR_ENCAPSULATION_ENDPOINT DropReason = 160
DropReason_FAILED_TO_INSERT_INTO_PROXYMAP DropReason = 161
DropReason_REACHED_EDT_RATE_LIMITING_DROP_HORIZON DropReason = 162
DropReason_UNKNOWN_CONNECTION_TRACKING_STATE DropReason = 163
DropReason_LOCAL_HOST_IS_UNREACHABLE DropReason = 164
DropReason_NO_CONFIGURATION_AVAILABLE_TO_PERFORM_POLICY_DECISION DropReason = 165
DropReason_UNSUPPORTED_L2_PROTOCOL DropReason = 166
DropReason_NO_MAPPING_FOR_NAT_MASQUERADE DropReason = 167
DropReason_UNSUPPORTED_PROTOCOL_FOR_NAT_MASQUERADE DropReason = 168
DropReason_FIB_LOOKUP_FAILED DropReason = 169
DropReason_ENCAPSULATION_TRAFFIC_IS_PROHIBITED DropReason = 170
DropReason_INVALID_IDENTITY DropReason = 171
DropReason_UNKNOWN_SENDER DropReason = 172
DropReason_NAT_NOT_NEEDED DropReason = 173
DropReason_IS_A_CLUSTERIP DropReason = 174
DropReason_FIRST_LOGICAL_DATAGRAM_FRAGMENT_NOT_FOUND DropReason = 175
DropReason_FORBIDDEN_ICMPV6_MESSAGE DropReason = 176
DropReason_DENIED_BY_LB_SRC_RANGE_CHECK DropReason = 177
DropReason_SOCKET_LOOKUP_FAILED DropReason = 178
DropReason_SOCKET_ASSIGN_FAILED DropReason = 179
DropReason_PROXY_REDIRECTION_NOT_SUPPORTED_FOR_PROTOCOL DropReason = 180
DropReason_POLICY_DENY DropReason = 181
DropReason_VLAN_FILTERED DropReason = 182
DropReason_INVALID_VNI DropReason = 183
DropReason_INVALID_TC_BUFFER DropReason = 184
DropReason_NO_SID DropReason = 185
// Deprecated: Marked as deprecated in flow/flow.proto.
DropReason_MISSING_SRV6_STATE DropReason = 186
DropReason_NAT46 DropReason = 187
DropReason_NAT64 DropReason = 188
DropReason_AUTH_REQUIRED DropReason = 189
DropReason_CT_NO_MAP_FOUND DropReason = 190
DropReason_SNAT_NO_MAP_FOUND DropReason = 191
DropReason_INVALID_CLUSTER_ID DropReason = 192
DropReason_UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP DropReason = 193
DropReason_NO_EGRESS_GATEWAY DropReason = 194
DropReason_UNENCRYPTED_TRAFFIC DropReason = 195
DropReason_TTL_EXCEEDED DropReason = 196
DropReason_NO_NODE_ID DropReason = 197
DropReason_DROP_RATE_LIMITED DropReason = 198
DropReason_IGMP_HANDLED DropReason = 199
DropReason_IGMP_SUBSCRIBED DropReason = 200
DropReason_MULTICAST_HANDLED DropReason = 201
// A BPF program wants to tail call into bpf_host, but the host datapath
// hasn't been loaded yet.
DropReason_DROP_HOST_NOT_READY DropReason = 202
// A BPF program wants to tail call some endpoint's policy program in the
// POLICY_CALL_MAP, but the program is not available.
DropReason_DROP_EP_NOT_READY DropReason = 203
)
// Enum value maps for DropReason.
var (
DropReason_name = map[int32]string{
0: "DROP_REASON_UNKNOWN",
130: "INVALID_SOURCE_MAC",
131: "INVALID_DESTINATION_MAC",
132: "INVALID_SOURCE_IP",
133: "POLICY_DENIED",
134: "INVALID_PACKET_DROPPED",
135: "CT_TRUNCATED_OR_INVALID_HEADER",
136: "CT_MISSING_TCP_ACK_FLAG",
137: "CT_UNKNOWN_L4_PROTOCOL",
138: "CT_CANNOT_CREATE_ENTRY_FROM_PACKET",
139: "UNSUPPORTED_L3_PROTOCOL",
140: "MISSED_TAIL_CALL",
141: "ERROR_WRITING_TO_PACKET",
142: "UNKNOWN_L4_PROTOCOL",
143: "UNKNOWN_ICMPV4_CODE",
144: "UNKNOWN_ICMPV4_TYPE",
145: "UNKNOWN_ICMPV6_CODE",
146: "UNKNOWN_ICMPV6_TYPE",
147: "ERROR_RETRIEVING_TUNNEL_KEY",
148: "ERROR_RETRIEVING_TUNNEL_OPTIONS",
149: "INVALID_GENEVE_OPTION",
150: "UNKNOWN_L3_TARGET_ADDRESS",
151: "STALE_OR_UNROUTABLE_IP",
152: "NO_MATCHING_LOCAL_CONTAINER_FOUND",
153: "ERROR_WHILE_CORRECTING_L3_CHECKSUM",
154: "ERROR_WHILE_CORRECTING_L4_CHECKSUM",
155: "CT_MAP_INSERTION_FAILED",
156: "INVALID_IPV6_EXTENSION_HEADER",
157: "IP_FRAGMENTATION_NOT_SUPPORTED",
158: "SERVICE_BACKEND_NOT_FOUND",
160: "NO_TUNNEL_OR_ENCAPSULATION_ENDPOINT",
161: "FAILED_TO_INSERT_INTO_PROXYMAP",
162: "REACHED_EDT_RATE_LIMITING_DROP_HORIZON",
163: "UNKNOWN_CONNECTION_TRACKING_STATE",
164: "LOCAL_HOST_IS_UNREACHABLE",
165: "NO_CONFIGURATION_AVAILABLE_TO_PERFORM_POLICY_DECISION",
166: "UNSUPPORTED_L2_PROTOCOL",
167: "NO_MAPPING_FOR_NAT_MASQUERADE",
168: "UNSUPPORTED_PROTOCOL_FOR_NAT_MASQUERADE",
169: "FIB_LOOKUP_FAILED",
170: "ENCAPSULATION_TRAFFIC_IS_PROHIBITED",
171: "INVALID_IDENTITY",
172: "UNKNOWN_SENDER",
173: "NAT_NOT_NEEDED",
174: "IS_A_CLUSTERIP",
175: "FIRST_LOGICAL_DATAGRAM_FRAGMENT_NOT_FOUND",
176: "FORBIDDEN_ICMPV6_MESSAGE",
177: "DENIED_BY_LB_SRC_RANGE_CHECK",
178: "SOCKET_LOOKUP_FAILED",
179: "SOCKET_ASSIGN_FAILED",
180: "PROXY_REDIRECTION_NOT_SUPPORTED_FOR_PROTOCOL",
181: "POLICY_DENY",
182: "VLAN_FILTERED",
183: "INVALID_VNI",
184: "INVALID_TC_BUFFER",
185: "NO_SID",
186: "MISSING_SRV6_STATE",
187: "NAT46",
188: "NAT64",
189: "AUTH_REQUIRED",
190: "CT_NO_MAP_FOUND",
191: "SNAT_NO_MAP_FOUND",
192: "INVALID_CLUSTER_ID",
193: "UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP",
194: "NO_EGRESS_GATEWAY",
195: "UNENCRYPTED_TRAFFIC",
196: "TTL_EXCEEDED",
197: "NO_NODE_ID",
198: "DROP_RATE_LIMITED",
199: "IGMP_HANDLED",
200: "IGMP_SUBSCRIBED",
201: "MULTICAST_HANDLED",
202: "DROP_HOST_NOT_READY",
203: "DROP_EP_NOT_READY",
}
DropReason_value = map[string]int32{
"DROP_REASON_UNKNOWN": 0,
"INVALID_SOURCE_MAC": 130,
"INVALID_DESTINATION_MAC": 131,
"INVALID_SOURCE_IP": 132,
"POLICY_DENIED": 133,
"INVALID_PACKET_DROPPED": 134,
"CT_TRUNCATED_OR_INVALID_HEADER": 135,
"CT_MISSING_TCP_ACK_FLAG": 136,
"CT_UNKNOWN_L4_PROTOCOL": 137,
"CT_CANNOT_CREATE_ENTRY_FROM_PACKET": 138,
"UNSUPPORTED_L3_PROTOCOL": 139,
"MISSED_TAIL_CALL": 140,
"ERROR_WRITING_TO_PACKET": 141,
"UNKNOWN_L4_PROTOCOL": 142,
"UNKNOWN_ICMPV4_CODE": 143,
"UNKNOWN_ICMPV4_TYPE": 144,
"UNKNOWN_ICMPV6_CODE": 145,
"UNKNOWN_ICMPV6_TYPE": 146,
"ERROR_RETRIEVING_TUNNEL_KEY": 147,
"ERROR_RETRIEVING_TUNNEL_OPTIONS": 148,
"INVALID_GENEVE_OPTION": 149,
"UNKNOWN_L3_TARGET_ADDRESS": 150,
"STALE_OR_UNROUTABLE_IP": 151,
"NO_MATCHING_LOCAL_CONTAINER_FOUND": 152,
"ERROR_WHILE_CORRECTING_L3_CHECKSUM": 153,
"ERROR_WHILE_CORRECTING_L4_CHECKSUM": 154,
"CT_MAP_INSERTION_FAILED": 155,
"INVALID_IPV6_EXTENSION_HEADER": 156,
"IP_FRAGMENTATION_NOT_SUPPORTED": 157,
"SERVICE_BACKEND_NOT_FOUND": 158,
"NO_TUNNEL_OR_ENCAPSULATION_ENDPOINT": 160,
"FAILED_TO_INSERT_INTO_PROXYMAP": 161,
"REACHED_EDT_RATE_LIMITING_DROP_HORIZON": 162,
"UNKNOWN_CONNECTION_TRACKING_STATE": 163,
"LOCAL_HOST_IS_UNREACHABLE": 164,
"NO_CONFIGURATION_AVAILABLE_TO_PERFORM_POLICY_DECISION": 165,
"UNSUPPORTED_L2_PROTOCOL": 166,
"NO_MAPPING_FOR_NAT_MASQUERADE": 167,
"UNSUPPORTED_PROTOCOL_FOR_NAT_MASQUERADE": 168,
"FIB_LOOKUP_FAILED": 169,
"ENCAPSULATION_TRAFFIC_IS_PROHIBITED": 170,
"INVALID_IDENTITY": 171,
"UNKNOWN_SENDER": 172,
"NAT_NOT_NEEDED": 173,
"IS_A_CLUSTERIP": 174,
"FIRST_LOGICAL_DATAGRAM_FRAGMENT_NOT_FOUND": 175,
"FORBIDDEN_ICMPV6_MESSAGE": 176,
"DENIED_BY_LB_SRC_RANGE_CHECK": 177,
"SOCKET_LOOKUP_FAILED": 178,
"SOCKET_ASSIGN_FAILED": 179,
"PROXY_REDIRECTION_NOT_SUPPORTED_FOR_PROTOCOL": 180,
"POLICY_DENY": 181,
"VLAN_FILTERED": 182,
"INVALID_VNI": 183,
"INVALID_TC_BUFFER": 184,
"NO_SID": 185,
"MISSING_SRV6_STATE": 186,
"NAT46": 187,
"NAT64": 188,
"AUTH_REQUIRED": 189,
"CT_NO_MAP_FOUND": 190,
"SNAT_NO_MAP_FOUND": 191,
"INVALID_CLUSTER_ID": 192,
"UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP": 193,
"NO_EGRESS_GATEWAY": 194,
"UNENCRYPTED_TRAFFIC": 195,
"TTL_EXCEEDED": 196,
"NO_NODE_ID": 197,
"DROP_RATE_LIMITED": 198,
"IGMP_HANDLED": 199,
"IGMP_SUBSCRIBED": 200,
"MULTICAST_HANDLED": 201,
"DROP_HOST_NOT_READY": 202,
"DROP_EP_NOT_READY": 203,
}
)
func (x DropReason) Enum() *DropReason {
p := new(DropReason)
*p = x
return p
}
func (x DropReason) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (DropReason) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[7].Descriptor()
}
func (DropReason) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[7]
}
func (x DropReason) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use DropReason.Descriptor instead.
func (DropReason) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{7}
}
type TrafficDirection int32
const (
TrafficDirection_TRAFFIC_DIRECTION_UNKNOWN TrafficDirection = 0
TrafficDirection_INGRESS TrafficDirection = 1
TrafficDirection_EGRESS TrafficDirection = 2
)
// Enum value maps for TrafficDirection.
var (
TrafficDirection_name = map[int32]string{
0: "TRAFFIC_DIRECTION_UNKNOWN",
1: "INGRESS",
2: "EGRESS",
}
TrafficDirection_value = map[string]int32{
"TRAFFIC_DIRECTION_UNKNOWN": 0,
"INGRESS": 1,
"EGRESS": 2,
}
)
func (x TrafficDirection) Enum() *TrafficDirection {
p := new(TrafficDirection)
*p = x
return p
}
func (x TrafficDirection) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (TrafficDirection) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[8].Descriptor()
}
func (TrafficDirection) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[8]
}
func (x TrafficDirection) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use TrafficDirection.Descriptor instead.
func (TrafficDirection) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{8}
}
// These values are shared with pkg/monitor/api/datapath_debug.go and bpf/lib/dbg.h.
type DebugCapturePoint int32
const (
DebugCapturePoint_DBG_CAPTURE_POINT_UNKNOWN DebugCapturePoint = 0
DebugCapturePoint_DBG_CAPTURE_DELIVERY DebugCapturePoint = 4
DebugCapturePoint_DBG_CAPTURE_FROM_LB DebugCapturePoint = 5
DebugCapturePoint_DBG_CAPTURE_AFTER_V46 DebugCapturePoint = 6
DebugCapturePoint_DBG_CAPTURE_AFTER_V64 DebugCapturePoint = 7
DebugCapturePoint_DBG_CAPTURE_PROXY_PRE DebugCapturePoint = 8
DebugCapturePoint_DBG_CAPTURE_PROXY_POST DebugCapturePoint = 9
DebugCapturePoint_DBG_CAPTURE_SNAT_PRE DebugCapturePoint = 10
DebugCapturePoint_DBG_CAPTURE_SNAT_POST DebugCapturePoint = 11
)
// Enum value maps for DebugCapturePoint.
var (
DebugCapturePoint_name = map[int32]string{
0: "DBG_CAPTURE_POINT_UNKNOWN",
4: "DBG_CAPTURE_DELIVERY",
5: "DBG_CAPTURE_FROM_LB",
6: "DBG_CAPTURE_AFTER_V46",
7: "DBG_CAPTURE_AFTER_V64",
8: "DBG_CAPTURE_PROXY_PRE",
9: "DBG_CAPTURE_PROXY_POST",
10: "DBG_CAPTURE_SNAT_PRE",
11: "DBG_CAPTURE_SNAT_POST",
}
DebugCapturePoint_value = map[string]int32{
"DBG_CAPTURE_POINT_UNKNOWN": 0,
"DBG_CAPTURE_DELIVERY": 4,
"DBG_CAPTURE_FROM_LB": 5,
"DBG_CAPTURE_AFTER_V46": 6,
"DBG_CAPTURE_AFTER_V64": 7,
"DBG_CAPTURE_PROXY_PRE": 8,
"DBG_CAPTURE_PROXY_POST": 9,
"DBG_CAPTURE_SNAT_PRE": 10,
"DBG_CAPTURE_SNAT_POST": 11,
}
)
func (x DebugCapturePoint) Enum() *DebugCapturePoint {
p := new(DebugCapturePoint)
*p = x
return p
}
func (x DebugCapturePoint) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (DebugCapturePoint) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[9].Descriptor()
}
func (DebugCapturePoint) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[9]
}
func (x DebugCapturePoint) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use DebugCapturePoint.Descriptor instead.
func (DebugCapturePoint) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{9}
}
// EventType are constants are based on the ones from <linux/perf_event.h>.
type EventType int32
const (
EventType_UNKNOWN EventType = 0
// EventSample is equivalent to PERF_RECORD_SAMPLE.
EventType_EventSample EventType = 9
// RecordLost is equivalent to PERF_RECORD_LOST.
EventType_RecordLost EventType = 2
)
// Enum value maps for EventType.
var (
EventType_name = map[int32]string{
0: "UNKNOWN",
9: "EventSample",
2: "RecordLost",
}
EventType_value = map[string]int32{
"UNKNOWN": 0,
"EventSample": 9,
"RecordLost": 2,
}
)
func (x EventType) Enum() *EventType {
p := new(EventType)
*p = x
return p
}
func (x EventType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (EventType) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[10].Descriptor()
}
func (EventType) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[10]
}
func (x EventType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use EventType.Descriptor instead.
func (EventType) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{10}
}
type LostEventSource int32
const (
LostEventSource_UNKNOWN_LOST_EVENT_SOURCE LostEventSource = 0
// PERF_EVENT_RING_BUFFER indicates that events were dropped in the BPF
// perf event ring buffer, indicating that userspace agent did not keep up
// with the events produced by the datapath.
LostEventSource_PERF_EVENT_RING_BUFFER LostEventSource = 1
// OBSERVER_EVENTS_QUEUE indicates that events were dropped because the
// Hubble events queue was full, indicating that the Hubble observer did
// not keep up.
LostEventSource_OBSERVER_EVENTS_QUEUE LostEventSource = 2
// HUBBLE_RING_BUFFER indicates that the event was dropped because it could
// not be read from Hubble's ring buffer in time before being overwritten.
LostEventSource_HUBBLE_RING_BUFFER LostEventSource = 3
)
// Enum value maps for LostEventSource.
var (
LostEventSource_name = map[int32]string{
0: "UNKNOWN_LOST_EVENT_SOURCE",
1: "PERF_EVENT_RING_BUFFER",
2: "OBSERVER_EVENTS_QUEUE",
3: "HUBBLE_RING_BUFFER",
}
LostEventSource_value = map[string]int32{
"UNKNOWN_LOST_EVENT_SOURCE": 0,
"PERF_EVENT_RING_BUFFER": 1,
"OBSERVER_EVENTS_QUEUE": 2,
"HUBBLE_RING_BUFFER": 3,
}
)
func (x LostEventSource) Enum() *LostEventSource {
p := new(LostEventSource)
*p = x
return p
}
func (x LostEventSource) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (LostEventSource) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[11].Descriptor()
}
func (LostEventSource) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[11]
}
func (x LostEventSource) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use LostEventSource.Descriptor instead.
func (LostEventSource) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{11}
}
// AgentEventType is the type of agent event. These values are shared with type
// AgentNotification in pkg/monitor/api/types.go.
type AgentEventType int32
const (
AgentEventType_AGENT_EVENT_UNKNOWN AgentEventType = 0
AgentEventType_AGENT_STARTED AgentEventType = 2
AgentEventType_POLICY_UPDATED AgentEventType = 3
AgentEventType_POLICY_DELETED AgentEventType = 4
AgentEventType_ENDPOINT_REGENERATE_SUCCESS AgentEventType = 5
AgentEventType_ENDPOINT_REGENERATE_FAILURE AgentEventType = 6
AgentEventType_ENDPOINT_CREATED AgentEventType = 7
AgentEventType_ENDPOINT_DELETED AgentEventType = 8
AgentEventType_IPCACHE_UPSERTED AgentEventType = 9
AgentEventType_IPCACHE_DELETED AgentEventType = 10
AgentEventType_SERVICE_UPSERTED AgentEventType = 11
AgentEventType_SERVICE_DELETED AgentEventType = 12
)
// Enum value maps for AgentEventType.
var (
AgentEventType_name = map[int32]string{
0: "AGENT_EVENT_UNKNOWN",
2: "AGENT_STARTED",
3: "POLICY_UPDATED",
4: "POLICY_DELETED",
5: "ENDPOINT_REGENERATE_SUCCESS",
6: "ENDPOINT_REGENERATE_FAILURE",
7: "ENDPOINT_CREATED",
8: "ENDPOINT_DELETED",
9: "IPCACHE_UPSERTED",
10: "IPCACHE_DELETED",
11: "SERVICE_UPSERTED",
12: "SERVICE_DELETED",
}
AgentEventType_value = map[string]int32{
"AGENT_EVENT_UNKNOWN": 0,
"AGENT_STARTED": 2,
"POLICY_UPDATED": 3,
"POLICY_DELETED": 4,
"ENDPOINT_REGENERATE_SUCCESS": 5,
"ENDPOINT_REGENERATE_FAILURE": 6,
"ENDPOINT_CREATED": 7,
"ENDPOINT_DELETED": 8,
"IPCACHE_UPSERTED": 9,
"IPCACHE_DELETED": 10,
"SERVICE_UPSERTED": 11,
"SERVICE_DELETED": 12,
}
)
func (x AgentEventType) Enum() *AgentEventType {
p := new(AgentEventType)
*p = x
return p
}
func (x AgentEventType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (AgentEventType) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[12].Descriptor()
}
func (AgentEventType) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[12]
}
func (x AgentEventType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use AgentEventType.Descriptor instead.
func (AgentEventType) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{12}
}
// This mirrors enum xlate_point in bpf/lib/trace_sock.h
type SocketTranslationPoint int32
const (
SocketTranslationPoint_SOCK_XLATE_POINT_UNKNOWN SocketTranslationPoint = 0
SocketTranslationPoint_SOCK_XLATE_POINT_PRE_DIRECTION_FWD SocketTranslationPoint = 1 // Pre service translation
SocketTranslationPoint_SOCK_XLATE_POINT_POST_DIRECTION_FWD SocketTranslationPoint = 2 // Post service translation
SocketTranslationPoint_SOCK_XLATE_POINT_PRE_DIRECTION_REV SocketTranslationPoint = 3 // Pre reverse service translation
SocketTranslationPoint_SOCK_XLATE_POINT_POST_DIRECTION_REV SocketTranslationPoint = 4 // Post reverse service translation
)
// Enum value maps for SocketTranslationPoint.
var (
SocketTranslationPoint_name = map[int32]string{
0: "SOCK_XLATE_POINT_UNKNOWN",
1: "SOCK_XLATE_POINT_PRE_DIRECTION_FWD",
2: "SOCK_XLATE_POINT_POST_DIRECTION_FWD",
3: "SOCK_XLATE_POINT_PRE_DIRECTION_REV",
4: "SOCK_XLATE_POINT_POST_DIRECTION_REV",
}
SocketTranslationPoint_value = map[string]int32{
"SOCK_XLATE_POINT_UNKNOWN": 0,
"SOCK_XLATE_POINT_PRE_DIRECTION_FWD": 1,
"SOCK_XLATE_POINT_POST_DIRECTION_FWD": 2,
"SOCK_XLATE_POINT_PRE_DIRECTION_REV": 3,
"SOCK_XLATE_POINT_POST_DIRECTION_REV": 4,
}
)
func (x SocketTranslationPoint) Enum() *SocketTranslationPoint {
p := new(SocketTranslationPoint)
*p = x
return p
}
func (x SocketTranslationPoint) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (SocketTranslationPoint) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[13].Descriptor()
}
func (SocketTranslationPoint) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[13]
}
func (x SocketTranslationPoint) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use SocketTranslationPoint.Descriptor instead.
func (SocketTranslationPoint) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{13}
}
// These values are shared with pkg/monitor/api/datapath_debug.go and bpf/lib/dbg.h.
type DebugEventType int32
const (
DebugEventType_DBG_EVENT_UNKNOWN DebugEventType = 0
DebugEventType_DBG_GENERIC DebugEventType = 1
DebugEventType_DBG_LOCAL_DELIVERY DebugEventType = 2
DebugEventType_DBG_ENCAP DebugEventType = 3
DebugEventType_DBG_LXC_FOUND DebugEventType = 4
DebugEventType_DBG_POLICY_DENIED DebugEventType = 5
DebugEventType_DBG_CT_LOOKUP DebugEventType = 6
DebugEventType_DBG_CT_LOOKUP_REV DebugEventType = 7
DebugEventType_DBG_CT_MATCH DebugEventType = 8
DebugEventType_DBG_CT_CREATED DebugEventType = 9
DebugEventType_DBG_CT_CREATED2 DebugEventType = 10
DebugEventType_DBG_ICMP6_HANDLE DebugEventType = 11
DebugEventType_DBG_ICMP6_REQUEST DebugEventType = 12
DebugEventType_DBG_ICMP6_NS DebugEventType = 13
DebugEventType_DBG_ICMP6_TIME_EXCEEDED DebugEventType = 14
DebugEventType_DBG_CT_VERDICT DebugEventType = 15
DebugEventType_DBG_DECAP DebugEventType = 16
DebugEventType_DBG_PORT_MAP DebugEventType = 17
DebugEventType_DBG_ERROR_RET DebugEventType = 18
DebugEventType_DBG_TO_HOST DebugEventType = 19
DebugEventType_DBG_TO_STACK DebugEventType = 20
DebugEventType_DBG_PKT_HASH DebugEventType = 21
DebugEventType_DBG_LB6_LOOKUP_FRONTEND DebugEventType = 22
DebugEventType_DBG_LB6_LOOKUP_FRONTEND_FAIL DebugEventType = 23
DebugEventType_DBG_LB6_LOOKUP_BACKEND_SLOT DebugEventType = 24
DebugEventType_DBG_LB6_LOOKUP_BACKEND_SLOT_SUCCESS DebugEventType = 25
DebugEventType_DBG_LB6_LOOKUP_BACKEND_SLOT_V2_FAIL DebugEventType = 26
DebugEventType_DBG_LB6_LOOKUP_BACKEND_FAIL DebugEventType = 27
DebugEventType_DBG_LB6_REVERSE_NAT_LOOKUP DebugEventType = 28
DebugEventType_DBG_LB6_REVERSE_NAT DebugEventType = 29
DebugEventType_DBG_LB4_LOOKUP_FRONTEND DebugEventType = 30
DebugEventType_DBG_LB4_LOOKUP_FRONTEND_FAIL DebugEventType = 31
DebugEventType_DBG_LB4_LOOKUP_BACKEND_SLOT DebugEventType = 32
DebugEventType_DBG_LB4_LOOKUP_BACKEND_SLOT_SUCCESS DebugEventType = 33
DebugEventType_DBG_LB4_LOOKUP_BACKEND_SLOT_V2_FAIL DebugEventType = 34
DebugEventType_DBG_LB4_LOOKUP_BACKEND_FAIL DebugEventType = 35
DebugEventType_DBG_LB4_REVERSE_NAT_LOOKUP DebugEventType = 36
DebugEventType_DBG_LB4_REVERSE_NAT DebugEventType = 37
DebugEventType_DBG_LB4_LOOPBACK_SNAT DebugEventType = 38
DebugEventType_DBG_LB4_LOOPBACK_SNAT_REV DebugEventType = 39
DebugEventType_DBG_CT_LOOKUP4 DebugEventType = 40
DebugEventType_DBG_RR_BACKEND_SLOT_SEL DebugEventType = 41
DebugEventType_DBG_REV_PROXY_LOOKUP DebugEventType = 42
DebugEventType_DBG_REV_PROXY_FOUND DebugEventType = 43
DebugEventType_DBG_REV_PROXY_UPDATE DebugEventType = 44
DebugEventType_DBG_L4_POLICY DebugEventType = 45
DebugEventType_DBG_NETDEV_IN_CLUSTER DebugEventType = 46
DebugEventType_DBG_NETDEV_ENCAP4 DebugEventType = 47
DebugEventType_DBG_CT_LOOKUP4_1 DebugEventType = 48
DebugEventType_DBG_CT_LOOKUP4_2 DebugEventType = 49
DebugEventType_DBG_CT_CREATED4 DebugEventType = 50
DebugEventType_DBG_CT_LOOKUP6_1 DebugEventType = 51
DebugEventType_DBG_CT_LOOKUP6_2 DebugEventType = 52
DebugEventType_DBG_CT_CREATED6 DebugEventType = 53
DebugEventType_DBG_SKIP_PROXY DebugEventType = 54
DebugEventType_DBG_L4_CREATE DebugEventType = 55
DebugEventType_DBG_IP_ID_MAP_FAILED4 DebugEventType = 56
DebugEventType_DBG_IP_ID_MAP_FAILED6 DebugEventType = 57
DebugEventType_DBG_IP_ID_MAP_SUCCEED4 DebugEventType = 58
DebugEventType_DBG_IP_ID_MAP_SUCCEED6 DebugEventType = 59
DebugEventType_DBG_LB_STALE_CT DebugEventType = 60
DebugEventType_DBG_INHERIT_IDENTITY DebugEventType = 61
DebugEventType_DBG_SK_LOOKUP4 DebugEventType = 62
DebugEventType_DBG_SK_LOOKUP6 DebugEventType = 63
DebugEventType_DBG_SK_ASSIGN DebugEventType = 64
DebugEventType_DBG_L7_LB DebugEventType = 65
DebugEventType_DBG_SKIP_POLICY DebugEventType = 66
)
// Enum value maps for DebugEventType.
var (
DebugEventType_name = map[int32]string{
0: "DBG_EVENT_UNKNOWN",
1: "DBG_GENERIC",
2: "DBG_LOCAL_DELIVERY",
3: "DBG_ENCAP",
4: "DBG_LXC_FOUND",
5: "DBG_POLICY_DENIED",
6: "DBG_CT_LOOKUP",
7: "DBG_CT_LOOKUP_REV",
8: "DBG_CT_MATCH",
9: "DBG_CT_CREATED",
10: "DBG_CT_CREATED2",
11: "DBG_ICMP6_HANDLE",
12: "DBG_ICMP6_REQUEST",
13: "DBG_ICMP6_NS",
14: "DBG_ICMP6_TIME_EXCEEDED",
15: "DBG_CT_VERDICT",
16: "DBG_DECAP",
17: "DBG_PORT_MAP",
18: "DBG_ERROR_RET",
19: "DBG_TO_HOST",
20: "DBG_TO_STACK",
21: "DBG_PKT_HASH",
22: "DBG_LB6_LOOKUP_FRONTEND",
23: "DBG_LB6_LOOKUP_FRONTEND_FAIL",
24: "DBG_LB6_LOOKUP_BACKEND_SLOT",
25: "DBG_LB6_LOOKUP_BACKEND_SLOT_SUCCESS",
26: "DBG_LB6_LOOKUP_BACKEND_SLOT_V2_FAIL",
27: "DBG_LB6_LOOKUP_BACKEND_FAIL",
28: "DBG_LB6_REVERSE_NAT_LOOKUP",
29: "DBG_LB6_REVERSE_NAT",
30: "DBG_LB4_LOOKUP_FRONTEND",
31: "DBG_LB4_LOOKUP_FRONTEND_FAIL",
32: "DBG_LB4_LOOKUP_BACKEND_SLOT",
33: "DBG_LB4_LOOKUP_BACKEND_SLOT_SUCCESS",
34: "DBG_LB4_LOOKUP_BACKEND_SLOT_V2_FAIL",
35: "DBG_LB4_LOOKUP_BACKEND_FAIL",
36: "DBG_LB4_REVERSE_NAT_LOOKUP",
37: "DBG_LB4_REVERSE_NAT",
38: "DBG_LB4_LOOPBACK_SNAT",
39: "DBG_LB4_LOOPBACK_SNAT_REV",
40: "DBG_CT_LOOKUP4",
41: "DBG_RR_BACKEND_SLOT_SEL",
42: "DBG_REV_PROXY_LOOKUP",
43: "DBG_REV_PROXY_FOUND",
44: "DBG_REV_PROXY_UPDATE",
45: "DBG_L4_POLICY",
46: "DBG_NETDEV_IN_CLUSTER",
47: "DBG_NETDEV_ENCAP4",
48: "DBG_CT_LOOKUP4_1",
49: "DBG_CT_LOOKUP4_2",
50: "DBG_CT_CREATED4",
51: "DBG_CT_LOOKUP6_1",
52: "DBG_CT_LOOKUP6_2",
53: "DBG_CT_CREATED6",
54: "DBG_SKIP_PROXY",
55: "DBG_L4_CREATE",
56: "DBG_IP_ID_MAP_FAILED4",
57: "DBG_IP_ID_MAP_FAILED6",
58: "DBG_IP_ID_MAP_SUCCEED4",
59: "DBG_IP_ID_MAP_SUCCEED6",
60: "DBG_LB_STALE_CT",
61: "DBG_INHERIT_IDENTITY",
62: "DBG_SK_LOOKUP4",
63: "DBG_SK_LOOKUP6",
64: "DBG_SK_ASSIGN",
65: "DBG_L7_LB",
66: "DBG_SKIP_POLICY",
}
DebugEventType_value = map[string]int32{
"DBG_EVENT_UNKNOWN": 0,
"DBG_GENERIC": 1,
"DBG_LOCAL_DELIVERY": 2,
"DBG_ENCAP": 3,
"DBG_LXC_FOUND": 4,
"DBG_POLICY_DENIED": 5,
"DBG_CT_LOOKUP": 6,
"DBG_CT_LOOKUP_REV": 7,
"DBG_CT_MATCH": 8,
"DBG_CT_CREATED": 9,
"DBG_CT_CREATED2": 10,
"DBG_ICMP6_HANDLE": 11,
"DBG_ICMP6_REQUEST": 12,
"DBG_ICMP6_NS": 13,
"DBG_ICMP6_TIME_EXCEEDED": 14,
"DBG_CT_VERDICT": 15,
"DBG_DECAP": 16,
"DBG_PORT_MAP": 17,
"DBG_ERROR_RET": 18,
"DBG_TO_HOST": 19,
"DBG_TO_STACK": 20,
"DBG_PKT_HASH": 21,
"DBG_LB6_LOOKUP_FRONTEND": 22,
"DBG_LB6_LOOKUP_FRONTEND_FAIL": 23,
"DBG_LB6_LOOKUP_BACKEND_SLOT": 24,
"DBG_LB6_LOOKUP_BACKEND_SLOT_SUCCESS": 25,
"DBG_LB6_LOOKUP_BACKEND_SLOT_V2_FAIL": 26,
"DBG_LB6_LOOKUP_BACKEND_FAIL": 27,
"DBG_LB6_REVERSE_NAT_LOOKUP": 28,
"DBG_LB6_REVERSE_NAT": 29,
"DBG_LB4_LOOKUP_FRONTEND": 30,
"DBG_LB4_LOOKUP_FRONTEND_FAIL": 31,
"DBG_LB4_LOOKUP_BACKEND_SLOT": 32,
"DBG_LB4_LOOKUP_BACKEND_SLOT_SUCCESS": 33,
"DBG_LB4_LOOKUP_BACKEND_SLOT_V2_FAIL": 34,
"DBG_LB4_LOOKUP_BACKEND_FAIL": 35,
"DBG_LB4_REVERSE_NAT_LOOKUP": 36,
"DBG_LB4_REVERSE_NAT": 37,
"DBG_LB4_LOOPBACK_SNAT": 38,
"DBG_LB4_LOOPBACK_SNAT_REV": 39,
"DBG_CT_LOOKUP4": 40,
"DBG_RR_BACKEND_SLOT_SEL": 41,
"DBG_REV_PROXY_LOOKUP": 42,
"DBG_REV_PROXY_FOUND": 43,
"DBG_REV_PROXY_UPDATE": 44,
"DBG_L4_POLICY": 45,
"DBG_NETDEV_IN_CLUSTER": 46,
"DBG_NETDEV_ENCAP4": 47,
"DBG_CT_LOOKUP4_1": 48,
"DBG_CT_LOOKUP4_2": 49,
"DBG_CT_CREATED4": 50,
"DBG_CT_LOOKUP6_1": 51,
"DBG_CT_LOOKUP6_2": 52,
"DBG_CT_CREATED6": 53,
"DBG_SKIP_PROXY": 54,
"DBG_L4_CREATE": 55,
"DBG_IP_ID_MAP_FAILED4": 56,
"DBG_IP_ID_MAP_FAILED6": 57,
"DBG_IP_ID_MAP_SUCCEED4": 58,
"DBG_IP_ID_MAP_SUCCEED6": 59,
"DBG_LB_STALE_CT": 60,
"DBG_INHERIT_IDENTITY": 61,
"DBG_SK_LOOKUP4": 62,
"DBG_SK_LOOKUP6": 63,
"DBG_SK_ASSIGN": 64,
"DBG_L7_LB": 65,
"DBG_SKIP_POLICY": 66,
}
)
func (x DebugEventType) Enum() *DebugEventType {
p := new(DebugEventType)
*p = x
return p
}
func (x DebugEventType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (DebugEventType) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[14].Descriptor()
}
func (DebugEventType) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[14]
}
func (x DebugEventType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use DebugEventType.Descriptor instead.
func (DebugEventType) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{14}
}
type Flow struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Time *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"`
// uuid is a universally unique identifier for this flow.
Uuid string `protobuf:"bytes,34,opt,name=uuid,proto3" json:"uuid,omitempty"`
Verdict Verdict `protobuf:"varint,2,opt,name=verdict,proto3,enum=flow.Verdict" json:"verdict,omitempty"`
// only applicable to Verdict = DROPPED.
// deprecated in favor of drop_reason_desc.
//
// Deprecated: Marked as deprecated in flow/flow.proto.
DropReason uint32 `protobuf:"varint,3,opt,name=drop_reason,json=dropReason,proto3" json:"drop_reason,omitempty"`
// auth_type is the authentication type specified for the flow in Cilium Network Policy.
// Only set on policy verdict events.
AuthType AuthType `protobuf:"varint,35,opt,name=auth_type,json=authType,proto3,enum=flow.AuthType" json:"auth_type,omitempty"`
// l2
Ethernet *Ethernet `protobuf:"bytes,4,opt,name=ethernet,proto3" json:"ethernet,omitempty"`
// l3
IP *IP `protobuf:"bytes,5,opt,name=IP,proto3" json:"IP,omitempty"`
// l4
L4 *Layer4 `protobuf:"bytes,6,opt,name=l4,proto3" json:"l4,omitempty"`
Source *Endpoint `protobuf:"bytes,8,opt,name=source,proto3" json:"source,omitempty"`
Destination *Endpoint `protobuf:"bytes,9,opt,name=destination,proto3" json:"destination,omitempty"`
Type FlowType `protobuf:"varint,10,opt,name=Type,proto3,enum=flow.FlowType" json:"Type,omitempty"`
// NodeName is the name of the node from which this Flow was captured.
NodeName string `protobuf:"bytes,11,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
// node labels in `foo=bar` format.
NodeLabels []string `protobuf:"bytes,37,rep,name=node_labels,json=nodeLabels,proto3" json:"node_labels,omitempty"`
// all names the source IP can have.
SourceNames []string `protobuf:"bytes,13,rep,name=source_names,json=sourceNames,proto3" json:"source_names,omitempty"`
// all names the destination IP can have.
DestinationNames []string `protobuf:"bytes,14,rep,name=destination_names,json=destinationNames,proto3" json:"destination_names,omitempty"`
// L7 information. This field is set if and only if FlowType is L7.
L7 *Layer7 `protobuf:"bytes,15,opt,name=l7,proto3" json:"l7,omitempty"`
// Deprecated. This suffers from false negatives due to protobuf not being
// able to distinguish between the value being false or it being absent.
// Please use is_reply instead.
//
// Deprecated: Marked as deprecated in flow/flow.proto.
Reply bool `protobuf:"varint,16,opt,name=reply,proto3" json:"reply,omitempty"`
// EventType of the originating Cilium event
EventType *CiliumEventType `protobuf:"bytes,19,opt,name=event_type,json=eventType,proto3" json:"event_type,omitempty"`
// source_service contains the service name of the source
SourceService *Service `protobuf:"bytes,20,opt,name=source_service,json=sourceService,proto3" json:"source_service,omitempty"`
// destination_service contains the service name of the destination
DestinationService *Service `protobuf:"bytes,21,opt,name=destination_service,json=destinationService,proto3" json:"destination_service,omitempty"`
// traffic_direction of the connection, e.g. ingress or egress
TrafficDirection TrafficDirection `protobuf:"varint,22,opt,name=traffic_direction,json=trafficDirection,proto3,enum=flow.TrafficDirection" json:"traffic_direction,omitempty"`
// policy_match_type is only applicable to the cilium event type PolicyVerdict
// https://github.com/cilium/cilium/blob/e831859b5cc336c6d964a6d35bbd34d1840e21b9/pkg/monitor/datapath_policy.go#L50
PolicyMatchType uint32 `protobuf:"varint,23,opt,name=policy_match_type,json=policyMatchType,proto3" json:"policy_match_type,omitempty"`
// Only applicable to cilium trace notifications, blank for other types.
TraceObservationPoint TraceObservationPoint `protobuf:"varint,24,opt,name=trace_observation_point,json=traceObservationPoint,proto3,enum=flow.TraceObservationPoint" json:"trace_observation_point,omitempty"`
// Cilium datapath trace reason info.
TraceReason TraceReason `protobuf:"varint,36,opt,name=trace_reason,json=traceReason,proto3,enum=flow.TraceReason" json:"trace_reason,omitempty"`
// only applicable to Verdict = DROPPED.
DropReasonDesc DropReason `protobuf:"varint,25,opt,name=drop_reason_desc,json=dropReasonDesc,proto3,enum=flow.DropReason" json:"drop_reason_desc,omitempty"`
// is_reply indicates that this was a packet (L4) or message (L7) in the
// reply direction. May be absent (in which case it is unknown whether it
// is a reply or not).
IsReply *wrapperspb.BoolValue `protobuf:"bytes,26,opt,name=is_reply,json=isReply,proto3" json:"is_reply,omitempty"`
// Only applicable to cilium debug capture events, blank for other types
DebugCapturePoint DebugCapturePoint `protobuf:"varint,27,opt,name=debug_capture_point,json=debugCapturePoint,proto3,enum=flow.DebugCapturePoint" json:"debug_capture_point,omitempty"`
// interface is the network interface on which this flow was observed
Interface *NetworkInterface `protobuf:"bytes,28,opt,name=interface,proto3" json:"interface,omitempty"`
// proxy_port indicates the port of the proxy to which the flow was forwarded
ProxyPort uint32 `protobuf:"varint,29,opt,name=proxy_port,json=proxyPort,proto3" json:"proxy_port,omitempty"`
// trace_context contains information about a trace related to the flow, if
// any.
TraceContext *TraceContext `protobuf:"bytes,30,opt,name=trace_context,json=traceContext,proto3" json:"trace_context,omitempty"`
// sock_xlate_point is the socket translation point.
// Only applicable to TraceSock notifications, blank for other types
SockXlatePoint SocketTranslationPoint `protobuf:"varint,31,opt,name=sock_xlate_point,json=sockXlatePoint,proto3,enum=flow.SocketTranslationPoint" json:"sock_xlate_point,omitempty"`
// socket_cookie is the Linux kernel socket cookie for this flow.
// Only applicable to TraceSock notifications, zero for other types
SocketCookie uint64 `protobuf:"varint,32,opt,name=socket_cookie,json=socketCookie,proto3" json:"socket_cookie,omitempty"`
// cgroup_id of the process which emitted this event.
// Only applicable to TraceSock notifications, zero for other types
CgroupId uint64 `protobuf:"varint,33,opt,name=cgroup_id,json=cgroupId,proto3" json:"cgroup_id,omitempty"`
// This is a temporary workaround to support summary field for pb.Flow without
// duplicating logic from the old parser. This field will be removed once we
// fully migrate to the new parser.
//
// Deprecated: Marked as deprecated in flow/flow.proto.
Summary string `protobuf:"bytes,100000,opt,name=Summary,proto3" json:"Summary,omitempty"`
// extensions can be used to add arbitrary additional metadata to flows.
// This can be used to extend functionality for other Hubble compatible
// APIs, or experiment with new functionality without needing to change the public API.
Extensions *anypb.Any `protobuf:"bytes,150000,opt,name=extensions,proto3" json:"extensions,omitempty"`
// The CiliumNetworkPolicies allowing the egress of the flow.
EgressAllowedBy []*Policy `protobuf:"bytes,21001,rep,name=egress_allowed_by,json=egressAllowedBy,proto3" json:"egress_allowed_by,omitempty"`
// The CiliumNetworkPolicies allowing the ingress of the flow.
IngressAllowedBy []*Policy `protobuf:"bytes,21002,rep,name=ingress_allowed_by,json=ingressAllowedBy,proto3" json:"ingress_allowed_by,omitempty"`
// The CiliumNetworkPolicies denying the egress of the flow.
EgressDeniedBy []*Policy `protobuf:"bytes,21004,rep,name=egress_denied_by,json=egressDeniedBy,proto3" json:"egress_denied_by,omitempty"`
// The CiliumNetworkPolicies denying the ingress of the flow.
IngressDeniedBy []*Policy `protobuf:"bytes,21005,rep,name=ingress_denied_by,json=ingressDeniedBy,proto3" json:"ingress_denied_by,omitempty"`
}
func (x *Flow) Reset() {
*x = Flow{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Flow) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Flow) ProtoMessage() {}
func (x *Flow) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Flow.ProtoReflect.Descriptor instead.
func (*Flow) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{0}
}
func (x *Flow) GetTime() *timestamppb.Timestamp {
if x != nil {
return x.Time
}
return nil
}
func (x *Flow) GetUuid() string {
if x != nil {
return x.Uuid
}
return ""
}
func (x *Flow) GetVerdict() Verdict {
if x != nil {
return x.Verdict
}
return Verdict_VERDICT_UNKNOWN
}
// Deprecated: Marked as deprecated in flow/flow.proto.
func (x *Flow) GetDropReason() uint32 {
if x != nil {
return x.DropReason
}
return 0
}
func (x *Flow) GetAuthType() AuthType {
if x != nil {
return x.AuthType
}
return AuthType_DISABLED
}
func (x *Flow) GetEthernet() *Ethernet {
if x != nil {
return x.Ethernet
}
return nil
}
func (x *Flow) GetIP() *IP {
if x != nil {
return x.IP
}
return nil
}
func (x *Flow) GetL4() *Layer4 {
if x != nil {
return x.L4
}
return nil
}
func (x *Flow) GetSource() *Endpoint {
if x != nil {
return x.Source
}
return nil
}
func (x *Flow) GetDestination() *Endpoint {
if x != nil {
return x.Destination
}
return nil
}
func (x *Flow) GetType() FlowType {
if x != nil {
return x.Type
}
return FlowType_UNKNOWN_TYPE
}
func (x *Flow) GetNodeName() string {
if x != nil {
return x.NodeName
}
return ""
}
func (x *Flow) GetNodeLabels() []string {
if x != nil {
return x.NodeLabels
}
return nil
}
func (x *Flow) GetSourceNames() []string {
if x != nil {
return x.SourceNames
}
return nil
}
func (x *Flow) GetDestinationNames() []string {
if x != nil {
return x.DestinationNames
}
return nil
}
func (x *Flow) GetL7() *Layer7 {
if x != nil {
return x.L7
}
return nil
}
// Deprecated: Marked as deprecated in flow/flow.proto.
func (x *Flow) GetReply() bool {
if x != nil {
return x.Reply
}
return false
}
func (x *Flow) GetEventType() *CiliumEventType {
if x != nil {
return x.EventType
}
return nil
}
func (x *Flow) GetSourceService() *Service {
if x != nil {
return x.SourceService
}
return nil
}
func (x *Flow) GetDestinationService() *Service {
if x != nil {
return x.DestinationService
}
return nil
}
func (x *Flow) GetTrafficDirection() TrafficDirection {
if x != nil {
return x.TrafficDirection
}
return TrafficDirection_TRAFFIC_DIRECTION_UNKNOWN
}
func (x *Flow) GetPolicyMatchType() uint32 {
if x != nil {
return x.PolicyMatchType
}
return 0
}
func (x *Flow) GetTraceObservationPoint() TraceObservationPoint {
if x != nil {
return x.TraceObservationPoint
}
return TraceObservationPoint_UNKNOWN_POINT
}
func (x *Flow) GetTraceReason() TraceReason {
if x != nil {
return x.TraceReason
}
return TraceReason_TRACE_REASON_UNKNOWN
}
func (x *Flow) GetDropReasonDesc() DropReason {
if x != nil {
return x.DropReasonDesc
}
return DropReason_DROP_REASON_UNKNOWN
}
func (x *Flow) GetIsReply() *wrapperspb.BoolValue {
if x != nil {
return x.IsReply
}
return nil
}
func (x *Flow) GetDebugCapturePoint() DebugCapturePoint {
if x != nil {
return x.DebugCapturePoint
}
return DebugCapturePoint_DBG_CAPTURE_POINT_UNKNOWN
}
func (x *Flow) GetInterface() *NetworkInterface {
if x != nil {
return x.Interface
}
return nil
}
func (x *Flow) GetProxyPort() uint32 {
if x != nil {
return x.ProxyPort
}
return 0
}
func (x *Flow) GetTraceContext() *TraceContext {
if x != nil {
return x.TraceContext
}
return nil
}
func (x *Flow) GetSockXlatePoint() SocketTranslationPoint {
if x != nil {
return x.SockXlatePoint
}
return SocketTranslationPoint_SOCK_XLATE_POINT_UNKNOWN
}
func (x *Flow) GetSocketCookie() uint64 {
if x != nil {
return x.SocketCookie
}
return 0
}
func (x *Flow) GetCgroupId() uint64 {
if x != nil {
return x.CgroupId
}
return 0
}
// Deprecated: Marked as deprecated in flow/flow.proto.
func (x *Flow) GetSummary() string {
if x != nil {
return x.Summary
}
return ""
}
func (x *Flow) GetExtensions() *anypb.Any {
if x != nil {
return x.Extensions
}
return nil
}
func (x *Flow) GetEgressAllowedBy() []*Policy {
if x != nil {
return x.EgressAllowedBy
}
return nil
}
func (x *Flow) GetIngressAllowedBy() []*Policy {
if x != nil {
return x.IngressAllowedBy
}
return nil
}
func (x *Flow) GetEgressDeniedBy() []*Policy {
if x != nil {
return x.EgressDeniedBy
}
return nil
}
func (x *Flow) GetIngressDeniedBy() []*Policy {
if x != nil {
return x.IngressDeniedBy
}
return nil
}
type Layer4 struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Types that are assignable to Protocol:
//
// *Layer4_TCP
// *Layer4_UDP
// *Layer4_ICMPv4
// *Layer4_ICMPv6
// *Layer4_SCTP
Protocol isLayer4_Protocol `protobuf_oneof:"protocol"`
}
func (x *Layer4) Reset() {
*x = Layer4{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Layer4) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Layer4) ProtoMessage() {}
func (x *Layer4) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Layer4.ProtoReflect.Descriptor instead.
func (*Layer4) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{1}
}
func (m *Layer4) GetProtocol() isLayer4_Protocol {
if m != nil {
return m.Protocol
}
return nil
}
func (x *Layer4) GetTCP() *TCP {
if x, ok := x.GetProtocol().(*Layer4_TCP); ok {
return x.TCP
}
return nil
}
func (x *Layer4) GetUDP() *UDP {
if x, ok := x.GetProtocol().(*Layer4_UDP); ok {
return x.UDP
}
return nil
}
func (x *Layer4) GetICMPv4() *ICMPv4 {
if x, ok := x.GetProtocol().(*Layer4_ICMPv4); ok {
return x.ICMPv4
}
return nil
}
func (x *Layer4) GetICMPv6() *ICMPv6 {
if x, ok := x.GetProtocol().(*Layer4_ICMPv6); ok {
return x.ICMPv6
}
return nil
}
func (x *Layer4) GetSCTP() *SCTP {
if x, ok := x.GetProtocol().(*Layer4_SCTP); ok {
return x.SCTP
}
return nil
}
type isLayer4_Protocol interface {
isLayer4_Protocol()
}
type Layer4_TCP struct {
TCP *TCP `protobuf:"bytes,1,opt,name=TCP,proto3,oneof"`
}
type Layer4_UDP struct {
UDP *UDP `protobuf:"bytes,2,opt,name=UDP,proto3,oneof"`
}
type Layer4_ICMPv4 struct {
// ICMP is technically not L4, but mutually exclusive with the above
ICMPv4 *ICMPv4 `protobuf:"bytes,3,opt,name=ICMPv4,proto3,oneof"`
}
type Layer4_ICMPv6 struct {
ICMPv6 *ICMPv6 `protobuf:"bytes,4,opt,name=ICMPv6,proto3,oneof"`
}
type Layer4_SCTP struct {
SCTP *SCTP `protobuf:"bytes,5,opt,name=SCTP,proto3,oneof"`
}
func (*Layer4_TCP) isLayer4_Protocol() {}
func (*Layer4_UDP) isLayer4_Protocol() {}
func (*Layer4_ICMPv4) isLayer4_Protocol() {}
func (*Layer4_ICMPv6) isLayer4_Protocol() {}
func (*Layer4_SCTP) isLayer4_Protocol() {}
// Message for L7 flow, which roughly corresponds to Cilium's accesslog [LogRecord](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L141):
type Layer7 struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Type L7FlowType `protobuf:"varint,1,opt,name=type,proto3,enum=flow.L7FlowType" json:"type,omitempty"`
// Latency of the response
LatencyNs uint64 `protobuf:"varint,2,opt,name=latency_ns,json=latencyNs,proto3" json:"latency_ns,omitempty"`
// L7 field. This field is set if and only if FlowType is L7.
//
// Types that are assignable to Record:
//
// *Layer7_Dns
// *Layer7_Http
// *Layer7_Kafka
Record isLayer7_Record `protobuf_oneof:"record"`
}
func (x *Layer7) Reset() {
*x = Layer7{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Layer7) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Layer7) ProtoMessage() {}
func (x *Layer7) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Layer7.ProtoReflect.Descriptor instead.
func (*Layer7) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{2}
}
func (x *Layer7) GetType() L7FlowType {
if x != nil {
return x.Type
}
return L7FlowType_UNKNOWN_L7_TYPE
}
func (x *Layer7) GetLatencyNs() uint64 {
if x != nil {
return x.LatencyNs
}
return 0
}
func (m *Layer7) GetRecord() isLayer7_Record {
if m != nil {
return m.Record
}
return nil
}
func (x *Layer7) GetDns() *DNS {
if x, ok := x.GetRecord().(*Layer7_Dns); ok {
return x.Dns
}
return nil
}
func (x *Layer7) GetHttp() *HTTP {
if x, ok := x.GetRecord().(*Layer7_Http); ok {
return x.Http
}
return nil
}
func (x *Layer7) GetKafka() *Kafka {
if x, ok := x.GetRecord().(*Layer7_Kafka); ok {
return x.Kafka
}
return nil
}
type isLayer7_Record interface {
isLayer7_Record()
}
type Layer7_Dns struct {
Dns *DNS `protobuf:"bytes,100,opt,name=dns,proto3,oneof"`
}
type Layer7_Http struct {
Http *HTTP `protobuf:"bytes,101,opt,name=http,proto3,oneof"`
}
type Layer7_Kafka struct {
Kafka *Kafka `protobuf:"bytes,102,opt,name=kafka,proto3,oneof"`
}
func (*Layer7_Dns) isLayer7_Record() {}
func (*Layer7_Http) isLayer7_Record() {}
func (*Layer7_Kafka) isLayer7_Record() {}
// TraceContext contains trace context propagation data, i.e. information about a
// distributed trace.
// For more information about trace context, check the [W3C Trace Context specification](https://www.w3.org/TR/trace-context/).
type TraceContext struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// parent identifies the incoming request in a tracing system.
Parent *TraceParent `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
}
func (x *TraceContext) Reset() {
*x = TraceContext{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TraceContext) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TraceContext) ProtoMessage() {}
func (x *TraceContext) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TraceContext.ProtoReflect.Descriptor instead.
func (*TraceContext) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{3}
}
func (x *TraceContext) GetParent() *TraceParent {
if x != nil {
return x.Parent
}
return nil
}
// TraceParent identifies the incoming request in a tracing system.
type TraceParent struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// trace_id is a unique value that identifies a trace. It is a byte array
// represented as a hex string.
TraceId string `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
}
func (x *TraceParent) Reset() {
*x = TraceParent{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TraceParent) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TraceParent) ProtoMessage() {}
func (x *TraceParent) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TraceParent.ProtoReflect.Descriptor instead.
func (*TraceParent) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{4}
}
func (x *TraceParent) GetTraceId() string {
if x != nil {
return x.TraceId
}
return ""
}
type Endpoint struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
ID uint32 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
Identity uint32 `protobuf:"varint,2,opt,name=identity,proto3" json:"identity,omitempty"`
ClusterName string `protobuf:"bytes,7,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
Namespace string `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"`
// labels in `foo=bar` format.
Labels []string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"`
PodName string `protobuf:"bytes,5,opt,name=pod_name,json=podName,proto3" json:"pod_name,omitempty"`
Workloads []*Workload `protobuf:"bytes,6,rep,name=workloads,proto3" json:"workloads,omitempty"`
}
func (x *Endpoint) Reset() {
*x = Endpoint{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Endpoint) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Endpoint) ProtoMessage() {}
func (x *Endpoint) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Endpoint.ProtoReflect.Descriptor instead.
func (*Endpoint) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{5}
}
func (x *Endpoint) GetID() uint32 {
if x != nil {
return x.ID
}
return 0
}
func (x *Endpoint) GetIdentity() uint32 {
if x != nil {
return x.Identity
}
return 0
}
func (x *Endpoint) GetClusterName() string {
if x != nil {
return x.ClusterName
}
return ""
}
func (x *Endpoint) GetNamespace() string {
if x != nil {
return x.Namespace
}
return ""
}
func (x *Endpoint) GetLabels() []string {
if x != nil {
return x.Labels
}
return nil
}
func (x *Endpoint) GetPodName() string {
if x != nil {
return x.PodName
}
return ""
}
func (x *Endpoint) GetWorkloads() []*Workload {
if x != nil {
return x.Workloads
}
return nil
}
type Workload struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"`
}
func (x *Workload) Reset() {
*x = Workload{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Workload) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Workload) ProtoMessage() {}
func (x *Workload) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Workload.ProtoReflect.Descriptor instead.
func (*Workload) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{6}
}
func (x *Workload) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *Workload) GetKind() string {
if x != nil {
return x.Kind
}
return ""
}
type TCP struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
SourcePort uint32 `protobuf:"varint,1,opt,name=source_port,json=sourcePort,proto3" json:"source_port,omitempty"`
DestinationPort uint32 `protobuf:"varint,2,opt,name=destination_port,json=destinationPort,proto3" json:"destination_port,omitempty"`
Flags *TCPFlags `protobuf:"bytes,3,opt,name=flags,proto3" json:"flags,omitempty"`
}
func (x *TCP) Reset() {
*x = TCP{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TCP) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TCP) ProtoMessage() {}
func (x *TCP) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TCP.ProtoReflect.Descriptor instead.
func (*TCP) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{7}
}
func (x *TCP) GetSourcePort() uint32 {
if x != nil {
return x.SourcePort
}
return 0
}
func (x *TCP) GetDestinationPort() uint32 {
if x != nil {
return x.DestinationPort
}
return 0
}
func (x *TCP) GetFlags() *TCPFlags {
if x != nil {
return x.Flags
}
return nil
}
type IP struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"`
// source_xlated is the post translation source IP when the flow was SNATed
// (and in that case source is the the original source IP).
SourceXlated string `protobuf:"bytes,5,opt,name=source_xlated,json=sourceXlated,proto3" json:"source_xlated,omitempty"`
Destination string `protobuf:"bytes,2,opt,name=destination,proto3" json:"destination,omitempty"`
IpVersion IPVersion `protobuf:"varint,3,opt,name=ipVersion,proto3,enum=flow.IPVersion" json:"ipVersion,omitempty"`
// This field indicates whether the TraceReasonEncryptMask is set or not.
// https://github.com/cilium/cilium/blob/ba0ed147bd5bb342f67b1794c2ad13c6e99d5236/pkg/monitor/datapath_trace.go#L27
Encrypted bool `protobuf:"varint,4,opt,name=encrypted,proto3" json:"encrypted,omitempty"`
}
func (x *IP) Reset() {
*x = IP{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *IP) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IP) ProtoMessage() {}
func (x *IP) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[8]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IP.ProtoReflect.Descriptor instead.
func (*IP) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{8}
}
func (x *IP) GetSource() string {
if x != nil {
return x.Source
}
return ""
}
func (x *IP) GetSourceXlated() string {
if x != nil {
return x.SourceXlated
}
return ""
}
func (x *IP) GetDestination() string {
if x != nil {
return x.Destination
}
return ""
}
func (x *IP) GetIpVersion() IPVersion {
if x != nil {
return x.IpVersion
}
return IPVersion_IP_NOT_USED
}
func (x *IP) GetEncrypted() bool {
if x != nil {
return x.Encrypted
}
return false
}
type Ethernet struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"`
Destination string `protobuf:"bytes,2,opt,name=destination,proto3" json:"destination,omitempty"`
}
func (x *Ethernet) Reset() {
*x = Ethernet{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Ethernet) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Ethernet) ProtoMessage() {}
func (x *Ethernet) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[9]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Ethernet.ProtoReflect.Descriptor instead.
func (*Ethernet) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{9}
}
func (x *Ethernet) GetSource() string {
if x != nil {
return x.Source
}
return ""
}
func (x *Ethernet) GetDestination() string {
if x != nil {
return x.Destination
}
return ""
}
type TCPFlags struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
FIN bool `protobuf:"varint,1,opt,name=FIN,proto3" json:"FIN,omitempty"`
SYN bool `protobuf:"varint,2,opt,name=SYN,proto3" json:"SYN,omitempty"`
RST bool `protobuf:"varint,3,opt,name=RST,proto3" json:"RST,omitempty"`
PSH bool `protobuf:"varint,4,opt,name=PSH,proto3" json:"PSH,omitempty"`
ACK bool `protobuf:"varint,5,opt,name=ACK,proto3" json:"ACK,omitempty"`
URG bool `protobuf:"varint,6,opt,name=URG,proto3" json:"URG,omitempty"`
ECE bool `protobuf:"varint,7,opt,name=ECE,proto3" json:"ECE,omitempty"`
CWR bool `protobuf:"varint,8,opt,name=CWR,proto3" json:"CWR,omitempty"`
NS bool `protobuf:"varint,9,opt,name=NS,proto3" json:"NS,omitempty"`
}
func (x *TCPFlags) Reset() {
*x = TCPFlags{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TCPFlags) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TCPFlags) ProtoMessage() {}
func (x *TCPFlags) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[10]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TCPFlags.ProtoReflect.Descriptor instead.
func (*TCPFlags) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{10}
}
func (x *TCPFlags) GetFIN() bool {
if x != nil {
return x.FIN
}
return false
}
func (x *TCPFlags) GetSYN() bool {
if x != nil {
return x.SYN
}
return false
}
func (x *TCPFlags) GetRST() bool {
if x != nil {
return x.RST
}
return false
}
func (x *TCPFlags) GetPSH() bool {
if x != nil {
return x.PSH
}
return false
}
func (x *TCPFlags) GetACK() bool {
if x != nil {
return x.ACK
}
return false
}
func (x *TCPFlags) GetURG() bool {
if x != nil {
return x.URG
}
return false
}
func (x *TCPFlags) GetECE() bool {
if x != nil {
return x.ECE
}
return false
}
func (x *TCPFlags) GetCWR() bool {
if x != nil {
return x.CWR
}
return false
}
func (x *TCPFlags) GetNS() bool {
if x != nil {
return x.NS
}
return false
}
type UDP struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
SourcePort uint32 `protobuf:"varint,1,opt,name=source_port,json=sourcePort,proto3" json:"source_port,omitempty"`
DestinationPort uint32 `protobuf:"varint,2,opt,name=destination_port,json=destinationPort,proto3" json:"destination_port,omitempty"`
}
func (x *UDP) Reset() {
*x = UDP{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *UDP) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UDP) ProtoMessage() {}
func (x *UDP) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[11]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UDP.ProtoReflect.Descriptor instead.
func (*UDP) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{11}
}
func (x *UDP) GetSourcePort() uint32 {
if x != nil {
return x.SourcePort
}
return 0
}
func (x *UDP) GetDestinationPort() uint32 {
if x != nil {
return x.DestinationPort
}
return 0
}
type SCTP struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
SourcePort uint32 `protobuf:"varint,1,opt,name=source_port,json=sourcePort,proto3" json:"source_port,omitempty"`
DestinationPort uint32 `protobuf:"varint,2,opt,name=destination_port,json=destinationPort,proto3" json:"destination_port,omitempty"`
}
func (x *SCTP) Reset() {
*x = SCTP{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SCTP) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SCTP) ProtoMessage() {}
func (x *SCTP) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[12]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SCTP.ProtoReflect.Descriptor instead.
func (*SCTP) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{12}
}
func (x *SCTP) GetSourcePort() uint32 {
if x != nil {
return x.SourcePort
}
return 0
}
func (x *SCTP) GetDestinationPort() uint32 {
if x != nil {
return x.DestinationPort
}
return 0
}
type ICMPv4 struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Type uint32 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"`
Code uint32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"`
}
func (x *ICMPv4) Reset() {
*x = ICMPv4{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ICMPv4) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ICMPv4) ProtoMessage() {}
func (x *ICMPv4) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[13]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ICMPv4.ProtoReflect.Descriptor instead.
func (*ICMPv4) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{13}
}
func (x *ICMPv4) GetType() uint32 {
if x != nil {
return x.Type
}
return 0
}
func (x *ICMPv4) GetCode() uint32 {
if x != nil {
return x.Code
}
return 0
}
type ICMPv6 struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Type uint32 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"`
Code uint32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"`
}
func (x *ICMPv6) Reset() {
*x = ICMPv6{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ICMPv6) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ICMPv6) ProtoMessage() {}
func (x *ICMPv6) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[14]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ICMPv6.ProtoReflect.Descriptor instead.
func (*ICMPv6) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{14}
}
func (x *ICMPv6) GetType() uint32 {
if x != nil {
return x.Type
}
return 0
}
func (x *ICMPv6) GetCode() uint32 {
if x != nil {
return x.Code
}
return 0
}
type Policy struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
Labels []string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty"`
Revision uint64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"`
}
func (x *Policy) Reset() {
*x = Policy{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Policy) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Policy) ProtoMessage() {}
func (x *Policy) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[15]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Policy.ProtoReflect.Descriptor instead.
func (*Policy) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{15}
}
func (x *Policy) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *Policy) GetNamespace() string {
if x != nil {
return x.Namespace
}
return ""
}
func (x *Policy) GetLabels() []string {
if x != nil {
return x.Labels
}
return nil
}
func (x *Policy) GetRevision() uint64 {
if x != nil {
return x.Revision
}
return 0
}
// EventTypeFilter is a filter describing a particular event type.
type EventTypeFilter struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// type is the primary flow type as defined by:
// github.com/cilium/cilium/pkg/monitor/api.MessageType*
Type int32 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"`
// match_sub_type is set to true when matching on the sub_type should
// be done. This flag is required as 0 is a valid sub_type.
MatchSubType bool `protobuf:"varint,2,opt,name=match_sub_type,json=matchSubType,proto3" json:"match_sub_type,omitempty"`
// sub_type is the secondary type, e.g.
// - github.com/cilium/cilium/pkg/monitor/api.Trace*
SubType int32 `protobuf:"varint,3,opt,name=sub_type,json=subType,proto3" json:"sub_type,omitempty"`
}
func (x *EventTypeFilter) Reset() {
*x = EventTypeFilter{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *EventTypeFilter) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EventTypeFilter) ProtoMessage() {}
func (x *EventTypeFilter) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[16]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EventTypeFilter.ProtoReflect.Descriptor instead.
func (*EventTypeFilter) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{16}
}
func (x *EventTypeFilter) GetType() int32 {
if x != nil {
return x.Type
}
return 0
}
func (x *EventTypeFilter) GetMatchSubType() bool {
if x != nil {
return x.MatchSubType
}
return false
}
func (x *EventTypeFilter) GetSubType() int32 {
if x != nil {
return x.SubType
}
return 0
}
// CiliumEventType from which the flow originated.
type CiliumEventType struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// type of event the flow originated from, i.e.
// github.com/cilium/cilium/pkg/monitor/api.MessageType*
Type int32 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"`
// sub_type may indicate more details depending on type, e.g.
// - github.com/cilium/cilium/pkg/monitor/api.Trace*
// - github.com/cilium/cilium/pkg/monitor/api.Drop*
// - github.com/cilium/cilium/pkg/monitor/api.DbgCapture*
SubType int32 `protobuf:"varint,2,opt,name=sub_type,json=subType,proto3" json:"sub_type,omitempty"`
}
func (x *CiliumEventType) Reset() {
*x = CiliumEventType{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *CiliumEventType) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CiliumEventType) ProtoMessage() {}
func (x *CiliumEventType) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[17]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CiliumEventType.ProtoReflect.Descriptor instead.
func (*CiliumEventType) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{17}
}
func (x *CiliumEventType) GetType() int32 {
if x != nil {
return x.Type
}
return 0
}
func (x *CiliumEventType) GetSubType() int32 {
if x != nil {
return x.SubType
}
return 0
}
// FlowFilter represent an individual flow filter. All fields are optional. If
// multiple fields are set, then all fields must match for the filter to match.
type FlowFilter struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// uuid filters by a list of flow uuids.
Uuid []string `protobuf:"bytes,29,rep,name=uuid,proto3" json:"uuid,omitempty"`
// source_ip filters by a list of source ips. Each of the source ips can be
// specified as an exact match (e.g. "1.1.1.1") or as a CIDR range (e.g.
// "1.1.1.0/24").
SourceIp []string `protobuf:"bytes,1,rep,name=source_ip,json=sourceIp,proto3" json:"source_ip,omitempty"`
// source_ip_xlated filters by a list IPs. Each of the IPs can be specified
// as an exact match (e.g. "1.1.1.1") or as a CIDR range (e.g.
// "1.1.1.0/24").
SourceIpXlated []string `protobuf:"bytes,34,rep,name=source_ip_xlated,json=sourceIpXlated,proto3" json:"source_ip_xlated,omitempty"`
// source_pod filters by a list of source pod name prefixes, optionally
// within a given namespace (e.g. "xwing", "kube-system/coredns-").
// The pod name can be omitted to only filter by namespace
// (e.g. "kube-system/") or the namespace can be omitted to filter for
// pods in any namespace (e.g. "/xwing")
SourcePod []string `protobuf:"bytes,2,rep,name=source_pod,json=sourcePod,proto3" json:"source_pod,omitempty"`
// source_fqdn filters by a list of source fully qualified domain names
SourceFqdn []string `protobuf:"bytes,7,rep,name=source_fqdn,json=sourceFqdn,proto3" json:"source_fqdn,omitempty"`
// source_labels filters on a list of source label selectors. Selectors
// support the full Kubernetes label selector syntax.
SourceLabel []string `protobuf:"bytes,10,rep,name=source_label,json=sourceLabel,proto3" json:"source_label,omitempty"`
// source_service filters on a list of source service names. This field
// supports the same syntax as the source_pod field.
SourceService []string `protobuf:"bytes,16,rep,name=source_service,json=sourceService,proto3" json:"source_service,omitempty"`
// source_workload filters by a list of source workload.
SourceWorkload []*Workload `protobuf:"bytes,26,rep,name=source_workload,json=sourceWorkload,proto3" json:"source_workload,omitempty"`
// destination_ip filters by a list of destination ips. Each of the
// destination ips can be specified as an exact match (e.g. "1.1.1.1") or
// as a CIDR range (e.g. "1.1.1.0/24").
DestinationIp []string `protobuf:"bytes,3,rep,name=destination_ip,json=destinationIp,proto3" json:"destination_ip,omitempty"`
// destination_pod filters by a list of destination pod names
DestinationPod []string `protobuf:"bytes,4,rep,name=destination_pod,json=destinationPod,proto3" json:"destination_pod,omitempty"`
// destination_fqdn filters by a list of destination fully qualified domain names
DestinationFqdn []string `protobuf:"bytes,8,rep,name=destination_fqdn,json=destinationFqdn,proto3" json:"destination_fqdn,omitempty"`
// destination_label filters on a list of destination label selectors
DestinationLabel []string `protobuf:"bytes,11,rep,name=destination_label,json=destinationLabel,proto3" json:"destination_label,omitempty"`
// destination_service filters on a list of destination service names
DestinationService []string `protobuf:"bytes,17,rep,name=destination_service,json=destinationService,proto3" json:"destination_service,omitempty"`
// destination_workload filters by a list of destination workload.
DestinationWorkload []*Workload `protobuf:"bytes,27,rep,name=destination_workload,json=destinationWorkload,proto3" json:"destination_workload,omitempty"`
// traffic_direction filters flow by direction of the connection, e.g.
// ingress or egress.
TrafficDirection []TrafficDirection `protobuf:"varint,30,rep,packed,name=traffic_direction,json=trafficDirection,proto3,enum=flow.TrafficDirection" json:"traffic_direction,omitempty"`
// only return Flows that were classified with a particular verdict.
Verdict []Verdict `protobuf:"varint,5,rep,packed,name=verdict,proto3,enum=flow.Verdict" json:"verdict,omitempty"`
// only applicable to Verdict = DROPPED (e.g. "POLICY_DENIED", "UNSUPPORTED_L3_PROTOCOL")
DropReasonDesc []DropReason `protobuf:"varint,33,rep,packed,name=drop_reason_desc,json=dropReasonDesc,proto3,enum=flow.DropReason" json:"drop_reason_desc,omitempty"`
// interface is the network interface on which this flow was observed.
Interface []*NetworkInterface `protobuf:"bytes,35,rep,name=interface,proto3" json:"interface,omitempty"`
// event_type is the list of event types to filter on
EventType []*EventTypeFilter `protobuf:"bytes,6,rep,name=event_type,json=eventType,proto3" json:"event_type,omitempty"`
// http_status_code is a list of string prefixes (e.g. "4+", "404", "5+")
// to filter on the HTTP status code
HttpStatusCode []string `protobuf:"bytes,9,rep,name=http_status_code,json=httpStatusCode,proto3" json:"http_status_code,omitempty"`
// protocol filters flows by L4 or L7 protocol, e.g. (e.g. "tcp", "http")
Protocol []string `protobuf:"bytes,12,rep,name=protocol,proto3" json:"protocol,omitempty"`
// source_port filters flows by L4 source port
SourcePort []string `protobuf:"bytes,13,rep,name=source_port,json=sourcePort,proto3" json:"source_port,omitempty"`
// destination_port filters flows by L4 destination port
DestinationPort []string `protobuf:"bytes,14,rep,name=destination_port,json=destinationPort,proto3" json:"destination_port,omitempty"`
// reply filters flows based on the direction of the flow.
Reply []bool `protobuf:"varint,15,rep,packed,name=reply,proto3" json:"reply,omitempty"`
// dns_query filters L7 DNS flows by query patterns (RE2 regex), e.g. 'kube.*local'.
DnsQuery []string `protobuf:"bytes,18,rep,name=dns_query,json=dnsQuery,proto3" json:"dns_query,omitempty"`
// source_identity filters by the security identity of the source endpoint.
SourceIdentity []uint32 `protobuf:"varint,19,rep,packed,name=source_identity,json=sourceIdentity,proto3" json:"source_identity,omitempty"`
// destination_identity filters by the security identity of the destination endpoint.
DestinationIdentity []uint32 `protobuf:"varint,20,rep,packed,name=destination_identity,json=destinationIdentity,proto3" json:"destination_identity,omitempty"`
// GET, POST, PUT, etc. methods. This type of field is well suited for an
// enum but every single existing place is using a string already.
HttpMethod []string `protobuf:"bytes,21,rep,name=http_method,json=httpMethod,proto3" json:"http_method,omitempty"`
// http_path is a list of regular expressions to filter on the HTTP path.
HttpPath []string `protobuf:"bytes,22,rep,name=http_path,json=httpPath,proto3" json:"http_path,omitempty"`
// http_url is a list of regular expressions to filter on the HTTP URL.
HttpUrl []string `protobuf:"bytes,31,rep,name=http_url,json=httpUrl,proto3" json:"http_url,omitempty"`
// http_header is a list of key:value pairs to filter on the HTTP headers.
HttpHeader []*HTTPHeader `protobuf:"bytes,32,rep,name=http_header,json=httpHeader,proto3" json:"http_header,omitempty"`
// tcp_flags filters flows based on TCP header flags
TcpFlags []*TCPFlags `protobuf:"bytes,23,rep,name=tcp_flags,json=tcpFlags,proto3" json:"tcp_flags,omitempty"`
// node_name is a list of patterns to filter on the node name, e.g. "k8s*",
// "test-cluster/*.domain.com", "cluster-name/" etc.
NodeName []string `protobuf:"bytes,24,rep,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
// node_labels filters on a list of node label selectors. Selectors support
// the full Kubernetes label selector syntax.
NodeLabels []string `protobuf:"bytes,36,rep,name=node_labels,json=nodeLabels,proto3" json:"node_labels,omitempty"`
// filter based on IP version (ipv4 or ipv6)
IpVersion []IPVersion `protobuf:"varint,25,rep,packed,name=ip_version,json=ipVersion,proto3,enum=flow.IPVersion" json:"ip_version,omitempty"`
// trace_id filters flows by trace ID
TraceId []string `protobuf:"bytes,28,rep,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
// experimental contains filters that are not stable yet. Support for
// experimental features is always optional and subject to change.
Experimental *FlowFilter_Experimental `protobuf:"bytes,999,opt,name=experimental,proto3" json:"experimental,omitempty"`
}
func (x *FlowFilter) Reset() {
*x = FlowFilter{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *FlowFilter) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FlowFilter) ProtoMessage() {}
func (x *FlowFilter) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[18]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FlowFilter.ProtoReflect.Descriptor instead.
func (*FlowFilter) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{18}
}
func (x *FlowFilter) GetUuid() []string {
if x != nil {
return x.Uuid
}
return nil
}
func (x *FlowFilter) GetSourceIp() []string {
if x != nil {
return x.SourceIp
}
return nil
}
func (x *FlowFilter) GetSourceIpXlated() []string {
if x != nil {
return x.SourceIpXlated
}
return nil
}
func (x *FlowFilter) GetSourcePod() []string {
if x != nil {
return x.SourcePod
}
return nil
}
func (x *FlowFilter) GetSourceFqdn() []string {
if x != nil {
return x.SourceFqdn
}
return nil
}
func (x *FlowFilter) GetSourceLabel() []string {
if x != nil {
return x.SourceLabel
}
return nil
}
func (x *FlowFilter) GetSourceService() []string {
if x != nil {
return x.SourceService
}
return nil
}
func (x *FlowFilter) GetSourceWorkload() []*Workload {
if x != nil {
return x.SourceWorkload
}
return nil
}
func (x *FlowFilter) GetDestinationIp() []string {
if x != nil {
return x.DestinationIp
}
return nil
}
func (x *FlowFilter) GetDestinationPod() []string {
if x != nil {
return x.DestinationPod
}
return nil
}
func (x *FlowFilter) GetDestinationFqdn() []string {
if x != nil {
return x.DestinationFqdn
}
return nil
}
func (x *FlowFilter) GetDestinationLabel() []string {
if x != nil {
return x.DestinationLabel
}
return nil
}
func (x *FlowFilter) GetDestinationService() []string {
if x != nil {
return x.DestinationService
}
return nil
}
func (x *FlowFilter) GetDestinationWorkload() []*Workload {
if x != nil {
return x.DestinationWorkload
}
return nil
}
func (x *FlowFilter) GetTrafficDirection() []TrafficDirection {
if x != nil {
return x.TrafficDirection
}
return nil
}
func (x *FlowFilter) GetVerdict() []Verdict {
if x != nil {
return x.Verdict
}
return nil
}
func (x *FlowFilter) GetDropReasonDesc() []DropReason {
if x != nil {
return x.DropReasonDesc
}
return nil
}
func (x *FlowFilter) GetInterface() []*NetworkInterface {
if x != nil {
return x.Interface
}
return nil
}
func (x *FlowFilter) GetEventType() []*EventTypeFilter {
if x != nil {
return x.EventType
}
return nil
}
func (x *FlowFilter) GetHttpStatusCode() []string {
if x != nil {
return x.HttpStatusCode
}
return nil
}
func (x *FlowFilter) GetProtocol() []string {
if x != nil {
return x.Protocol
}
return nil
}
func (x *FlowFilter) GetSourcePort() []string {
if x != nil {
return x.SourcePort
}
return nil
}
func (x *FlowFilter) GetDestinationPort() []string {
if x != nil {
return x.DestinationPort
}
return nil
}
func (x *FlowFilter) GetReply() []bool {
if x != nil {
return x.Reply
}
return nil
}
func (x *FlowFilter) GetDnsQuery() []string {
if x != nil {
return x.DnsQuery
}
return nil
}
func (x *FlowFilter) GetSourceIdentity() []uint32 {
if x != nil {
return x.SourceIdentity
}
return nil
}
func (x *FlowFilter) GetDestinationIdentity() []uint32 {
if x != nil {
return x.DestinationIdentity
}
return nil
}
func (x *FlowFilter) GetHttpMethod() []string {
if x != nil {
return x.HttpMethod
}
return nil
}
func (x *FlowFilter) GetHttpPath() []string {
if x != nil {
return x.HttpPath
}
return nil
}
func (x *FlowFilter) GetHttpUrl() []string {
if x != nil {
return x.HttpUrl
}
return nil
}
func (x *FlowFilter) GetHttpHeader() []*HTTPHeader {
if x != nil {
return x.HttpHeader
}
return nil
}
func (x *FlowFilter) GetTcpFlags() []*TCPFlags {
if x != nil {
return x.TcpFlags
}
return nil
}
func (x *FlowFilter) GetNodeName() []string {
if x != nil {
return x.NodeName
}
return nil
}
func (x *FlowFilter) GetNodeLabels() []string {
if x != nil {
return x.NodeLabels
}
return nil
}
func (x *FlowFilter) GetIpVersion() []IPVersion {
if x != nil {
return x.IpVersion
}
return nil
}
func (x *FlowFilter) GetTraceId() []string {
if x != nil {
return x.TraceId
}
return nil
}
func (x *FlowFilter) GetExperimental() *FlowFilter_Experimental {
if x != nil {
return x.Experimental
}
return nil
}
// DNS flow. This is basically directly mapped from Cilium's [LogRecordDNS](https://github.com/cilium/cilium/blob/04f3889d627774f79e56d14ddbc165b3169e2d01/pkg/proxy/accesslog/record.go#L264):
type DNS struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// DNS name that's being looked up: e.g. "isovalent.com."
Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"`
// List of IP addresses in the DNS response.
Ips []string `protobuf:"bytes,2,rep,name=ips,proto3" json:"ips,omitempty"`
// TTL in the DNS response.
Ttl uint32 `protobuf:"varint,3,opt,name=ttl,proto3" json:"ttl,omitempty"`
// List of CNames in the DNS response.
Cnames []string `protobuf:"bytes,4,rep,name=cnames,proto3" json:"cnames,omitempty"`
// Corresponds to DNSDataSource defined in:
//
// https://github.com/cilium/cilium/blob/04f3889d627774f79e56d14ddbc165b3169e2d01/pkg/proxy/accesslog/record.go#L253
ObservationSource string `protobuf:"bytes,5,opt,name=observation_source,json=observationSource,proto3" json:"observation_source,omitempty"`
// Return code of the DNS request defined in:
//
// https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml#dns-parameters-6
Rcode uint32 `protobuf:"varint,6,opt,name=rcode,proto3" json:"rcode,omitempty"`
// String representation of qtypes defined in:
//
// https://tools.ietf.org/html/rfc1035#section-3.2.3
Qtypes []string `protobuf:"bytes,7,rep,name=qtypes,proto3" json:"qtypes,omitempty"`
// String representation of rrtypes defined in:
// https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml#dns-parameters-4
Rrtypes []string `protobuf:"bytes,8,rep,name=rrtypes,proto3" json:"rrtypes,omitempty"`
}
func (x *DNS) Reset() {
*x = DNS{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DNS) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DNS) ProtoMessage() {}
func (x *DNS) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[19]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DNS.ProtoReflect.Descriptor instead.
func (*DNS) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{19}
}
func (x *DNS) GetQuery() string {
if x != nil {
return x.Query
}
return ""
}
func (x *DNS) GetIps() []string {
if x != nil {
return x.Ips
}
return nil
}
func (x *DNS) GetTtl() uint32 {
if x != nil {
return x.Ttl
}
return 0
}
func (x *DNS) GetCnames() []string {
if x != nil {
return x.Cnames
}
return nil
}
func (x *DNS) GetObservationSource() string {
if x != nil {
return x.ObservationSource
}
return ""
}
func (x *DNS) GetRcode() uint32 {
if x != nil {
return x.Rcode
}
return 0
}
func (x *DNS) GetQtypes() []string {
if x != nil {
return x.Qtypes
}
return nil
}
func (x *DNS) GetRrtypes() []string {
if x != nil {
return x.Rrtypes
}
return nil
}
type HTTPHeader struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
func (x *HTTPHeader) Reset() {
*x = HTTPHeader{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *HTTPHeader) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HTTPHeader) ProtoMessage() {}
func (x *HTTPHeader) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[20]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HTTPHeader.ProtoReflect.Descriptor instead.
func (*HTTPHeader) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{20}
}
func (x *HTTPHeader) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
func (x *HTTPHeader) GetValue() string {
if x != nil {
return x.Value
}
return ""
}
// L7 information for HTTP flows. It corresponds to Cilium's [accesslog.LogRecordHTTP](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L206) type.
type HTTP struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"`
Url string `protobuf:"bytes,3,opt,name=url,proto3" json:"url,omitempty"`
Protocol string `protobuf:"bytes,4,opt,name=protocol,proto3" json:"protocol,omitempty"`
Headers []*HTTPHeader `protobuf:"bytes,5,rep,name=headers,proto3" json:"headers,omitempty"`
}
func (x *HTTP) Reset() {
*x = HTTP{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *HTTP) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HTTP) ProtoMessage() {}
func (x *HTTP) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[21]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HTTP.ProtoReflect.Descriptor instead.
func (*HTTP) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{21}
}
func (x *HTTP) GetCode() uint32 {
if x != nil {
return x.Code
}
return 0
}
func (x *HTTP) GetMethod() string {
if x != nil {
return x.Method
}
return ""
}
func (x *HTTP) GetUrl() string {
if x != nil {
return x.Url
}
return ""
}
func (x *HTTP) GetProtocol() string {
if x != nil {
return x.Protocol
}
return ""
}
func (x *HTTP) GetHeaders() []*HTTPHeader {
if x != nil {
return x.Headers
}
return nil
}
// L7 information for Kafka flows. It corresponds to Cilium's [accesslog.LogRecordKafka](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L229) type.
type Kafka struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"`
ApiVersion int32 `protobuf:"varint,2,opt,name=api_version,json=apiVersion,proto3" json:"api_version,omitempty"`
ApiKey string `protobuf:"bytes,3,opt,name=api_key,json=apiKey,proto3" json:"api_key,omitempty"`
CorrelationId int32 `protobuf:"varint,4,opt,name=correlation_id,json=correlationId,proto3" json:"correlation_id,omitempty"`
Topic string `protobuf:"bytes,5,opt,name=topic,proto3" json:"topic,omitempty"`
}
func (x *Kafka) Reset() {
*x = Kafka{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Kafka) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Kafka) ProtoMessage() {}
func (x *Kafka) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[22]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Kafka.ProtoReflect.Descriptor instead.
func (*Kafka) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{22}
}
func (x *Kafka) GetErrorCode() int32 {
if x != nil {
return x.ErrorCode
}
return 0
}
func (x *Kafka) GetApiVersion() int32 {
if x != nil {
return x.ApiVersion
}
return 0
}
func (x *Kafka) GetApiKey() string {
if x != nil {
return x.ApiKey
}
return ""
}
func (x *Kafka) GetCorrelationId() int32 {
if x != nil {
return x.CorrelationId
}
return 0
}
func (x *Kafka) GetTopic() string {
if x != nil {
return x.Topic
}
return ""
}
type Service struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
}
func (x *Service) Reset() {
*x = Service{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Service) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Service) ProtoMessage() {}
func (x *Service) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[23]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Service.ProtoReflect.Descriptor instead.
func (*Service) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{23}
}
func (x *Service) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *Service) GetNamespace() string {
if x != nil {
return x.Namespace
}
return ""
}
// LostEvent is a message which notifies consumers about a loss of events
// that happened before the events were captured by Hubble.
type LostEvent struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// source is the location where events got lost.
Source LostEventSource `protobuf:"varint,1,opt,name=source,proto3,enum=flow.LostEventSource" json:"source,omitempty"`
// num_events_lost is the number of events that haven been lost at source.
NumEventsLost uint64 `protobuf:"varint,2,opt,name=num_events_lost,json=numEventsLost,proto3" json:"num_events_lost,omitempty"`
// cpu on which the event was lost if the source of lost events is
// PERF_EVENT_RING_BUFFER.
Cpu *wrapperspb.Int32Value `protobuf:"bytes,3,opt,name=cpu,proto3" json:"cpu,omitempty"`
}
func (x *LostEvent) Reset() {
*x = LostEvent{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *LostEvent) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LostEvent) ProtoMessage() {}
func (x *LostEvent) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[24]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LostEvent.ProtoReflect.Descriptor instead.
func (*LostEvent) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{24}
}
func (x *LostEvent) GetSource() LostEventSource {
if x != nil {
return x.Source
}
return LostEventSource_UNKNOWN_LOST_EVENT_SOURCE
}
func (x *LostEvent) GetNumEventsLost() uint64 {
if x != nil {
return x.NumEventsLost
}
return 0
}
func (x *LostEvent) GetCpu() *wrapperspb.Int32Value {
if x != nil {
return x.Cpu
}
return nil
}
type AgentEvent struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Type AgentEventType `protobuf:"varint,1,opt,name=type,proto3,enum=flow.AgentEventType" json:"type,omitempty"`
// Types that are assignable to Notification:
//
// *AgentEvent_Unknown
// *AgentEvent_AgentStart
// *AgentEvent_PolicyUpdate
// *AgentEvent_EndpointRegenerate
// *AgentEvent_EndpointUpdate
// *AgentEvent_IpcacheUpdate
// *AgentEvent_ServiceUpsert
// *AgentEvent_ServiceDelete
Notification isAgentEvent_Notification `protobuf_oneof:"notification"`
}
func (x *AgentEvent) Reset() {
*x = AgentEvent{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *AgentEvent) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AgentEvent) ProtoMessage() {}
func (x *AgentEvent) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[25]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AgentEvent.ProtoReflect.Descriptor instead.
func (*AgentEvent) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{25}
}
func (x *AgentEvent) GetType() AgentEventType {
if x != nil {
return x.Type
}
return AgentEventType_AGENT_EVENT_UNKNOWN
}
func (m *AgentEvent) GetNotification() isAgentEvent_Notification {
if m != nil {
return m.Notification
}
return nil
}
func (x *AgentEvent) GetUnknown() *AgentEventUnknown {
if x, ok := x.GetNotification().(*AgentEvent_Unknown); ok {
return x.Unknown
}
return nil
}
func (x *AgentEvent) GetAgentStart() *TimeNotification {
if x, ok := x.GetNotification().(*AgentEvent_AgentStart); ok {
return x.AgentStart
}
return nil
}
func (x *AgentEvent) GetPolicyUpdate() *PolicyUpdateNotification {
if x, ok := x.GetNotification().(*AgentEvent_PolicyUpdate); ok {
return x.PolicyUpdate
}
return nil
}
func (x *AgentEvent) GetEndpointRegenerate() *EndpointRegenNotification {
if x, ok := x.GetNotification().(*AgentEvent_EndpointRegenerate); ok {
return x.EndpointRegenerate
}
return nil
}
func (x *AgentEvent) GetEndpointUpdate() *EndpointUpdateNotification {
if x, ok := x.GetNotification().(*AgentEvent_EndpointUpdate); ok {
return x.EndpointUpdate
}
return nil
}
func (x *AgentEvent) GetIpcacheUpdate() *IPCacheNotification {
if x, ok := x.GetNotification().(*AgentEvent_IpcacheUpdate); ok {
return x.IpcacheUpdate
}
return nil
}
func (x *AgentEvent) GetServiceUpsert() *ServiceUpsertNotification {
if x, ok := x.GetNotification().(*AgentEvent_ServiceUpsert); ok {
return x.ServiceUpsert
}
return nil
}
func (x *AgentEvent) GetServiceDelete() *ServiceDeleteNotification {
if x, ok := x.GetNotification().(*AgentEvent_ServiceDelete); ok {
return x.ServiceDelete
}
return nil
}
type isAgentEvent_Notification interface {
isAgentEvent_Notification()
}
type AgentEvent_Unknown struct {
Unknown *AgentEventUnknown `protobuf:"bytes,100,opt,name=unknown,proto3,oneof"`
}
type AgentEvent_AgentStart struct {
AgentStart *TimeNotification `protobuf:"bytes,101,opt,name=agent_start,json=agentStart,proto3,oneof"`
}
type AgentEvent_PolicyUpdate struct {
// used for POLICY_UPDATED and POLICY_DELETED
PolicyUpdate *PolicyUpdateNotification `protobuf:"bytes,102,opt,name=policy_update,json=policyUpdate,proto3,oneof"`
}
type AgentEvent_EndpointRegenerate struct {
// used for ENDPOINT_REGENERATE_SUCCESS and ENDPOINT_REGENERATE_FAILURE
EndpointRegenerate *EndpointRegenNotification `protobuf:"bytes,103,opt,name=endpoint_regenerate,json=endpointRegenerate,proto3,oneof"`
}
type AgentEvent_EndpointUpdate struct {
// used for ENDPOINT_CREATED and ENDPOINT_DELETED
EndpointUpdate *EndpointUpdateNotification `protobuf:"bytes,104,opt,name=endpoint_update,json=endpointUpdate,proto3,oneof"`
}
type AgentEvent_IpcacheUpdate struct {
// used for IPCACHE_UPSERTED and IPCACHE_DELETED
IpcacheUpdate *IPCacheNotification `protobuf:"bytes,105,opt,name=ipcache_update,json=ipcacheUpdate,proto3,oneof"`
}
type AgentEvent_ServiceUpsert struct {
ServiceUpsert *ServiceUpsertNotification `protobuf:"bytes,106,opt,name=service_upsert,json=serviceUpsert,proto3,oneof"`
}
type AgentEvent_ServiceDelete struct {
ServiceDelete *ServiceDeleteNotification `protobuf:"bytes,107,opt,name=service_delete,json=serviceDelete,proto3,oneof"`
}
func (*AgentEvent_Unknown) isAgentEvent_Notification() {}
func (*AgentEvent_AgentStart) isAgentEvent_Notification() {}
func (*AgentEvent_PolicyUpdate) isAgentEvent_Notification() {}
func (*AgentEvent_EndpointRegenerate) isAgentEvent_Notification() {}
func (*AgentEvent_EndpointUpdate) isAgentEvent_Notification() {}
func (*AgentEvent_IpcacheUpdate) isAgentEvent_Notification() {}
func (*AgentEvent_ServiceUpsert) isAgentEvent_Notification() {}
func (*AgentEvent_ServiceDelete) isAgentEvent_Notification() {}
type AgentEventUnknown struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
Notification string `protobuf:"bytes,2,opt,name=notification,proto3" json:"notification,omitempty"`
}
func (x *AgentEventUnknown) Reset() {
*x = AgentEventUnknown{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *AgentEventUnknown) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AgentEventUnknown) ProtoMessage() {}
func (x *AgentEventUnknown) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[26]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AgentEventUnknown.ProtoReflect.Descriptor instead.
func (*AgentEventUnknown) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{26}
}
func (x *AgentEventUnknown) GetType() string {
if x != nil {
return x.Type
}
return ""
}
func (x *AgentEventUnknown) GetNotification() string {
if x != nil {
return x.Notification
}
return ""
}
type TimeNotification struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Time *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"`
}
func (x *TimeNotification) Reset() {
*x = TimeNotification{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TimeNotification) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNotification) ProtoMessage() {}
func (x *TimeNotification) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[27]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNotification.ProtoReflect.Descriptor instead.
func (*TimeNotification) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{27}
}
func (x *TimeNotification) GetTime() *timestamppb.Timestamp {
if x != nil {
return x.Time
}
return nil
}
type PolicyUpdateNotification struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Labels []string `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"`
Revision uint64 `protobuf:"varint,2,opt,name=revision,proto3" json:"revision,omitempty"`
RuleCount int64 `protobuf:"varint,3,opt,name=rule_count,json=ruleCount,proto3" json:"rule_count,omitempty"`
}
func (x *PolicyUpdateNotification) Reset() {
*x = PolicyUpdateNotification{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *PolicyUpdateNotification) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PolicyUpdateNotification) ProtoMessage() {}
func (x *PolicyUpdateNotification) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[28]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PolicyUpdateNotification.ProtoReflect.Descriptor instead.
func (*PolicyUpdateNotification) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{28}
}
func (x *PolicyUpdateNotification) GetLabels() []string {
if x != nil {
return x.Labels
}
return nil
}
func (x *PolicyUpdateNotification) GetRevision() uint64 {
if x != nil {
return x.Revision
}
return 0
}
func (x *PolicyUpdateNotification) GetRuleCount() int64 {
if x != nil {
return x.RuleCount
}
return 0
}
type EndpointRegenNotification struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
Labels []string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty"`
Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"`
}
func (x *EndpointRegenNotification) Reset() {
*x = EndpointRegenNotification{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *EndpointRegenNotification) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EndpointRegenNotification) ProtoMessage() {}
func (x *EndpointRegenNotification) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[29]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EndpointRegenNotification.ProtoReflect.Descriptor instead.
func (*EndpointRegenNotification) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{29}
}
func (x *EndpointRegenNotification) GetId() uint64 {
if x != nil {
return x.Id
}
return 0
}
func (x *EndpointRegenNotification) GetLabels() []string {
if x != nil {
return x.Labels
}
return nil
}
func (x *EndpointRegenNotification) GetError() string {
if x != nil {
return x.Error
}
return ""
}
type EndpointUpdateNotification struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
Labels []string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty"`
Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"`
PodName string `protobuf:"bytes,4,opt,name=pod_name,json=podName,proto3" json:"pod_name,omitempty"`
Namespace string `protobuf:"bytes,5,opt,name=namespace,proto3" json:"namespace,omitempty"`
}
func (x *EndpointUpdateNotification) Reset() {
*x = EndpointUpdateNotification{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *EndpointUpdateNotification) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EndpointUpdateNotification) ProtoMessage() {}
func (x *EndpointUpdateNotification) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[30]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EndpointUpdateNotification.ProtoReflect.Descriptor instead.
func (*EndpointUpdateNotification) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{30}
}
func (x *EndpointUpdateNotification) GetId() uint64 {
if x != nil {
return x.Id
}
return 0
}
func (x *EndpointUpdateNotification) GetLabels() []string {
if x != nil {
return x.Labels
}
return nil
}
func (x *EndpointUpdateNotification) GetError() string {
if x != nil {
return x.Error
}
return ""
}
func (x *EndpointUpdateNotification) GetPodName() string {
if x != nil {
return x.PodName
}
return ""
}
func (x *EndpointUpdateNotification) GetNamespace() string {
if x != nil {
return x.Namespace
}
return ""
}
type IPCacheNotification struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Cidr string `protobuf:"bytes,1,opt,name=cidr,proto3" json:"cidr,omitempty"`
Identity uint32 `protobuf:"varint,2,opt,name=identity,proto3" json:"identity,omitempty"`
OldIdentity *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=old_identity,json=oldIdentity,proto3" json:"old_identity,omitempty"`
HostIp string `protobuf:"bytes,4,opt,name=host_ip,json=hostIp,proto3" json:"host_ip,omitempty"`
OldHostIp string `protobuf:"bytes,5,opt,name=old_host_ip,json=oldHostIp,proto3" json:"old_host_ip,omitempty"`
EncryptKey uint32 `protobuf:"varint,6,opt,name=encrypt_key,json=encryptKey,proto3" json:"encrypt_key,omitempty"`
Namespace string `protobuf:"bytes,7,opt,name=namespace,proto3" json:"namespace,omitempty"`
PodName string `protobuf:"bytes,8,opt,name=pod_name,json=podName,proto3" json:"pod_name,omitempty"`
}
func (x *IPCacheNotification) Reset() {
*x = IPCacheNotification{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *IPCacheNotification) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IPCacheNotification) ProtoMessage() {}
func (x *IPCacheNotification) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[31]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IPCacheNotification.ProtoReflect.Descriptor instead.
func (*IPCacheNotification) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{31}
}
func (x *IPCacheNotification) GetCidr() string {
if x != nil {
return x.Cidr
}
return ""
}
func (x *IPCacheNotification) GetIdentity() uint32 {
if x != nil {
return x.Identity
}
return 0
}
func (x *IPCacheNotification) GetOldIdentity() *wrapperspb.UInt32Value {
if x != nil {
return x.OldIdentity
}
return nil
}
func (x *IPCacheNotification) GetHostIp() string {
if x != nil {
return x.HostIp
}
return ""
}
func (x *IPCacheNotification) GetOldHostIp() string {
if x != nil {
return x.OldHostIp
}
return ""
}
func (x *IPCacheNotification) GetEncryptKey() uint32 {
if x != nil {
return x.EncryptKey
}
return 0
}
func (x *IPCacheNotification) GetNamespace() string {
if x != nil {
return x.Namespace
}
return ""
}
func (x *IPCacheNotification) GetPodName() string {
if x != nil {
return x.PodName
}
return ""
}
type ServiceUpsertNotificationAddr struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Ip string `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"`
Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"`
}
func (x *ServiceUpsertNotificationAddr) Reset() {
*x = ServiceUpsertNotificationAddr{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ServiceUpsertNotificationAddr) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ServiceUpsertNotificationAddr) ProtoMessage() {}
func (x *ServiceUpsertNotificationAddr) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[32]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ServiceUpsertNotificationAddr.ProtoReflect.Descriptor instead.
func (*ServiceUpsertNotificationAddr) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{32}
}
func (x *ServiceUpsertNotificationAddr) GetIp() string {
if x != nil {
return x.Ip
}
return ""
}
func (x *ServiceUpsertNotificationAddr) GetPort() uint32 {
if x != nil {
return x.Port
}
return 0
}
type ServiceUpsertNotification struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
FrontendAddress *ServiceUpsertNotificationAddr `protobuf:"bytes,2,opt,name=frontend_address,json=frontendAddress,proto3" json:"frontend_address,omitempty"`
BackendAddresses []*ServiceUpsertNotificationAddr `protobuf:"bytes,3,rep,name=backend_addresses,json=backendAddresses,proto3" json:"backend_addresses,omitempty"`
Type string `protobuf:"bytes,4,opt,name=type,proto3" json:"type,omitempty"`
// Deprecated: Marked as deprecated in flow/flow.proto.
TrafficPolicy string `protobuf:"bytes,5,opt,name=traffic_policy,json=trafficPolicy,proto3" json:"traffic_policy,omitempty"`
Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
Namespace string `protobuf:"bytes,7,opt,name=namespace,proto3" json:"namespace,omitempty"`
ExtTrafficPolicy string `protobuf:"bytes,8,opt,name=ext_traffic_policy,json=extTrafficPolicy,proto3" json:"ext_traffic_policy,omitempty"`
IntTrafficPolicy string `protobuf:"bytes,9,opt,name=int_traffic_policy,json=intTrafficPolicy,proto3" json:"int_traffic_policy,omitempty"`
}
func (x *ServiceUpsertNotification) Reset() {
*x = ServiceUpsertNotification{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[33]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ServiceUpsertNotification) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ServiceUpsertNotification) ProtoMessage() {}
func (x *ServiceUpsertNotification) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[33]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ServiceUpsertNotification.ProtoReflect.Descriptor instead.
func (*ServiceUpsertNotification) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{33}
}
func (x *ServiceUpsertNotification) GetId() uint32 {
if x != nil {
return x.Id
}
return 0
}
func (x *ServiceUpsertNotification) GetFrontendAddress() *ServiceUpsertNotificationAddr {
if x != nil {
return x.FrontendAddress
}
return nil
}
func (x *ServiceUpsertNotification) GetBackendAddresses() []*ServiceUpsertNotificationAddr {
if x != nil {
return x.BackendAddresses
}
return nil
}
func (x *ServiceUpsertNotification) GetType() string {
if x != nil {
return x.Type
}
return ""
}
// Deprecated: Marked as deprecated in flow/flow.proto.
func (x *ServiceUpsertNotification) GetTrafficPolicy() string {
if x != nil {
return x.TrafficPolicy
}
return ""
}
func (x *ServiceUpsertNotification) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *ServiceUpsertNotification) GetNamespace() string {
if x != nil {
return x.Namespace
}
return ""
}
func (x *ServiceUpsertNotification) GetExtTrafficPolicy() string {
if x != nil {
return x.ExtTrafficPolicy
}
return ""
}
func (x *ServiceUpsertNotification) GetIntTrafficPolicy() string {
if x != nil {
return x.IntTrafficPolicy
}
return ""
}
type ServiceDeleteNotification struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
}
func (x *ServiceDeleteNotification) Reset() {
*x = ServiceDeleteNotification{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[34]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ServiceDeleteNotification) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ServiceDeleteNotification) ProtoMessage() {}
func (x *ServiceDeleteNotification) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[34]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ServiceDeleteNotification.ProtoReflect.Descriptor instead.
func (*ServiceDeleteNotification) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{34}
}
func (x *ServiceDeleteNotification) GetId() uint32 {
if x != nil {
return x.Id
}
return 0
}
type NetworkInterface struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Index uint32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
}
func (x *NetworkInterface) Reset() {
*x = NetworkInterface{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[35]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *NetworkInterface) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NetworkInterface) ProtoMessage() {}
func (x *NetworkInterface) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[35]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NetworkInterface.ProtoReflect.Descriptor instead.
func (*NetworkInterface) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{35}
}
func (x *NetworkInterface) GetIndex() uint32 {
if x != nil {
return x.Index
}
return 0
}
func (x *NetworkInterface) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type DebugEvent struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Type DebugEventType `protobuf:"varint,1,opt,name=type,proto3,enum=flow.DebugEventType" json:"type,omitempty"`
Source *Endpoint `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"`
Hash *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=hash,proto3" json:"hash,omitempty"`
Arg1 *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=arg1,proto3" json:"arg1,omitempty"`
Arg2 *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=arg2,proto3" json:"arg2,omitempty"`
Arg3 *wrapperspb.UInt32Value `protobuf:"bytes,6,opt,name=arg3,proto3" json:"arg3,omitempty"`
Message string `protobuf:"bytes,7,opt,name=message,proto3" json:"message,omitempty"`
Cpu *wrapperspb.Int32Value `protobuf:"bytes,8,opt,name=cpu,proto3" json:"cpu,omitempty"`
}
func (x *DebugEvent) Reset() {
*x = DebugEvent{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[36]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DebugEvent) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DebugEvent) ProtoMessage() {}
func (x *DebugEvent) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[36]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DebugEvent.ProtoReflect.Descriptor instead.
func (*DebugEvent) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{36}
}
func (x *DebugEvent) GetType() DebugEventType {
if x != nil {
return x.Type
}
return DebugEventType_DBG_EVENT_UNKNOWN
}
func (x *DebugEvent) GetSource() *Endpoint {
if x != nil {
return x.Source
}
return nil
}
func (x *DebugEvent) GetHash() *wrapperspb.UInt32Value {
if x != nil {
return x.Hash
}
return nil
}
func (x *DebugEvent) GetArg1() *wrapperspb.UInt32Value {
if x != nil {
return x.Arg1
}
return nil
}
func (x *DebugEvent) GetArg2() *wrapperspb.UInt32Value {
if x != nil {
return x.Arg2
}
return nil
}
func (x *DebugEvent) GetArg3() *wrapperspb.UInt32Value {
if x != nil {
return x.Arg3
}
return nil
}
func (x *DebugEvent) GetMessage() string {
if x != nil {
return x.Message
}
return ""
}
func (x *DebugEvent) GetCpu() *wrapperspb.Int32Value {
if x != nil {
return x.Cpu
}
return nil
}
// Experimental contains filters that are not stable yet. Support for
// experimental features is always optional and subject to change.
type FlowFilter_Experimental struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// cel_expression takes a common expression language (CEL) expression
// returning a boolean to determine if the filter matched or not.
// You can use the `_flow` variable to access fields on the flow using
// the flow.Flow protobuf field names.
// See https://github.com/google/cel-spec/blob/v0.14.0/doc/intro.md#introduction
// for more details on CEL and accessing the protobuf fields in CEL.
// Using CEL has performance cost compared to other filters, so prefer
// using non-CEL filters when possible, and try to specify CEL filters
// last in the list of FlowFilters.
CelExpression []string `protobuf:"bytes,1,rep,name=cel_expression,json=celExpression,proto3" json:"cel_expression,omitempty"`
}
func (x *FlowFilter_Experimental) Reset() {
*x = FlowFilter_Experimental{}
if protoimpl.UnsafeEnabled {
mi := &file_flow_flow_proto_msgTypes[37]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *FlowFilter_Experimental) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FlowFilter_Experimental) ProtoMessage() {}
func (x *FlowFilter_Experimental) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[37]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FlowFilter_Experimental.ProtoReflect.Descriptor instead.
func (*FlowFilter_Experimental) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{18, 0}
}
func (x *FlowFilter_Experimental) GetCelExpression() []string {
if x != nil {
return x.CelExpression
}
return nil
}
var File_flow_flow_proto protoreflect.FileDescriptor
var file_flow_flow_proto_rawDesc = []byte{
0x0a, 0x0f, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x12, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x22, 0xc9, 0x0e, 0x0a, 0x04, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x2e, 0x0a, 0x04,
0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04,
0x75, 0x75, 0x69, 0x64, 0x18, 0x22, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64,
0x12, 0x27, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0e, 0x32, 0x0d, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x56, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74,
0x52, 0x07, 0x76, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0b, 0x64, 0x72, 0x6f,
0x70, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x02,
0x18, 0x01, 0x52, 0x0a, 0x64, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x2b,
0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x23, 0x20, 0x01, 0x28,
0x0e, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70,
0x65, 0x52, 0x08, 0x61, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x08, 0x65,
0x74, 0x68, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x74, 0x68, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x52, 0x08, 0x65,
0x74, 0x68, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x02, 0x49, 0x50, 0x18, 0x05, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x52, 0x02, 0x49,
0x50, 0x12, 0x1c, 0x0a, 0x02, 0x6c, 0x34, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e,
0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x34, 0x52, 0x02, 0x6c, 0x34, 0x12,
0x26, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52,
0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x30, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69,
0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66,
0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0b, 0x64, 0x65,
0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x54, 0x79, 0x70,
0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x46,
0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a,
0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09,
0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x6f,
0x64, 0x65, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x25, 0x20, 0x03, 0x28, 0x09, 0x52,
0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28,
0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2b,
0x0a, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61,
0x6d, 0x65, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69,
0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x02, 0x6c,
0x37, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4c,
0x61, 0x79, 0x65, 0x72, 0x37, 0x52, 0x02, 0x6c, 0x37, 0x12, 0x18, 0x0a, 0x05, 0x72, 0x65, 0x70,
0x6c, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x05, 0x72, 0x65,
0x70, 0x6c, 0x79, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70,
0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x43,
0x69, 0x6c, 0x69, 0x75, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09,
0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x0e, 0x73, 0x6f, 0x75,
0x72, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x0d, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12,
0x3e, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73,
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x66,
0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x12, 0x64, 0x65, 0x73,
0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12,
0x43, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63,
0x74, 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x6f,
0x77, 0x2e, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69,
0x6f, 0x6e, 0x52, 0x10, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63,
0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x6d,
0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0d, 0x52,
0x0f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65,
0x12, 0x53, 0x0a, 0x17, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28,
0x0e, 0x32, 0x1b, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x4f, 0x62,
0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x15,
0x74, 0x72, 0x61, 0x63, 0x65, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x72,
0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x66, 0x6c,
0x6f, 0x77, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x0b,
0x74, 0x72, 0x61, 0x63, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x10, 0x64,
0x72, 0x6f, 0x70, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x18,
0x19, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x72, 0x6f,
0x70, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x0e, 0x64, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x61,
0x73, 0x6f, 0x6e, 0x44, 0x65, 0x73, 0x63, 0x12, 0x35, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x72, 0x65,
0x70, 0x6c, 0x79, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c,
0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x69, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x47,
0x0a, 0x13, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x63, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x5f,
0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x66, 0x6c,
0x6f, 0x77, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x50,
0x6f, 0x69, 0x6e, 0x74, 0x52, 0x11, 0x64, 0x65, 0x62, 0x75, 0x67, 0x43, 0x61, 0x70, 0x74, 0x75,
0x72, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72,
0x66, 0x61, 0x63, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x6f,
0x77, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61,
0x63, 0x65, 0x52, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a,
0x0a, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x1d, 0x20, 0x01, 0x28,
0x0d, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x0d,
0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x1e, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65,
0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f,
0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x46, 0x0a, 0x10, 0x73, 0x6f, 0x63, 0x6b, 0x5f, 0x78, 0x6c,
0x61, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0e, 0x32,
0x1c, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x72, 0x61,
0x6e, 0x73, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x73,
0x6f, 0x63, 0x6b, 0x58, 0x6c, 0x61, 0x74, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x23, 0x0a,
0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, 0x20,
0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x6f, 0x6f, 0x6b,
0x69, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18,
0x21, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, 0x12,
0x1e, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0xa0, 0x8d, 0x06, 0x20, 0x01,
0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12,
0x36, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf0, 0x93,
0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74,
0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3a, 0x0a, 0x11, 0x65, 0x67, 0x72, 0x65, 0x73,
0x73, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x89, 0xa4, 0x01,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x50, 0x6f, 0x6c, 0x69,
0x63, 0x79, 0x52, 0x0f, 0x65, 0x67, 0x72, 0x65, 0x73, 0x73, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65,
0x64, 0x42, 0x79, 0x12, 0x3c, 0x0a, 0x12, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x61,
0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x8a, 0xa4, 0x01, 0x20, 0x03, 0x28,
0x0b, 0x32, 0x0c, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52,
0x10, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x42,
0x79, 0x12, 0x38, 0x0a, 0x10, 0x65, 0x67, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x64, 0x65, 0x6e, 0x69,
0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x8c, 0xa4, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e,
0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0e, 0x65, 0x67, 0x72,
0x65, 0x73, 0x73, 0x44, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x42, 0x79, 0x12, 0x3a, 0x0a, 0x11, 0x69,
0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x5f, 0x62, 0x79,
0x18, 0x8d, 0xa4, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e,
0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x44,
0x65, 0x6e, 0x69, 0x65, 0x64, 0x42, 0x79, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08,
0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x11, 0x10, 0x12, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22,
0xc4, 0x01, 0x0a, 0x06, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x34, 0x12, 0x1d, 0x0a, 0x03, 0x54, 0x43,
0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54,
0x43, 0x50, 0x48, 0x00, 0x52, 0x03, 0x54, 0x43, 0x50, 0x12, 0x1d, 0x0a, 0x03, 0x55, 0x44, 0x50,
0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x55, 0x44,
0x50, 0x48, 0x00, 0x52, 0x03, 0x55, 0x44, 0x50, 0x12, 0x26, 0x0a, 0x06, 0x49, 0x43, 0x4d, 0x50,
0x76, 0x34, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e,
0x49, 0x43, 0x4d, 0x50, 0x76, 0x34, 0x48, 0x00, 0x52, 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x34,
0x12, 0x26, 0x0a, 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x36, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x0c, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x36, 0x48, 0x00,
0x52, 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x36, 0x12, 0x20, 0x0a, 0x04, 0x53, 0x43, 0x54, 0x50,
0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x43,
0x54, 0x50, 0x48, 0x00, 0x52, 0x04, 0x53, 0x43, 0x54, 0x50, 0x42, 0x0a, 0x0a, 0x08, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0xbd, 0x01, 0x0a, 0x06, 0x4c, 0x61, 0x79, 0x65, 0x72,
0x37, 0x12, 0x24, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4c, 0x37, 0x46, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70,
0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e,
0x63, 0x79, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6c, 0x61, 0x74,
0x65, 0x6e, 0x63, 0x79, 0x4e, 0x73, 0x12, 0x1d, 0x0a, 0x03, 0x64, 0x6e, 0x73, 0x18, 0x64, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x4e, 0x53, 0x48, 0x00,
0x52, 0x03, 0x64, 0x6e, 0x73, 0x12, 0x20, 0x0a, 0x04, 0x68, 0x74, 0x74, 0x70, 0x18, 0x65, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x48,
0x00, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, 0x12, 0x23, 0x0a, 0x05, 0x6b, 0x61, 0x66, 0x6b, 0x61,
0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4b, 0x61,
0x66, 0x6b, 0x61, 0x48, 0x00, 0x52, 0x05, 0x6b, 0x61, 0x66, 0x6b, 0x61, 0x42, 0x08, 0x0a, 0x06,
0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x22, 0x39, 0x0a, 0x0c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43,
0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x29, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x72,
0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
0x74, 0x22, 0x28, 0x0a, 0x0b, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74,
0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x22, 0xd8, 0x01, 0x0a, 0x08,
0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e,
0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e,
0x74, 0x69, 0x74, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f,
0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73,
0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73,
0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65,
0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18,
0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x19, 0x0a,
0x08, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52,
0x07, 0x70, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b,
0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c,
0x6f, 0x77, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x09, 0x77, 0x6f, 0x72,
0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x22, 0x32, 0x0a, 0x08, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f,
0x61, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x02,
0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x77, 0x0a, 0x03, 0x54, 0x43,
0x50, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f,
0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x64, 0x65,
0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x24, 0x0a,
0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66,
0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x43, 0x50, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x05, 0x66, 0x6c,
0x61, 0x67, 0x73, 0x22, 0xb0, 0x01, 0x0a, 0x02, 0x49, 0x50, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72,
0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x78, 0x6c, 0x61,
0x74, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x58, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69,
0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x09, 0x69, 0x70, 0x56,
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x66,
0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x69,
0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x6e, 0x63, 0x72,
0x79, 0x70, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, 0x6e, 0x63,
0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x22, 0x44, 0x0a, 0x08, 0x45, 0x74, 0x68, 0x65, 0x72, 0x6e,
0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65,
0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xaa, 0x01, 0x0a,
0x08, 0x54, 0x43, 0x50, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x46, 0x49, 0x4e,
0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x46, 0x49, 0x4e, 0x12, 0x10, 0x0a, 0x03, 0x53,
0x59, 0x4e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x53, 0x59, 0x4e, 0x12, 0x10, 0x0a,
0x03, 0x52, 0x53, 0x54, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x52, 0x53, 0x54, 0x12,
0x10, 0x0a, 0x03, 0x50, 0x53, 0x48, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x50, 0x53,
0x48, 0x12, 0x10, 0x0a, 0x03, 0x41, 0x43, 0x4b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03,
0x41, 0x43, 0x4b, 0x12, 0x10, 0x0a, 0x03, 0x55, 0x52, 0x47, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08,
0x52, 0x03, 0x55, 0x52, 0x47, 0x12, 0x10, 0x0a, 0x03, 0x45, 0x43, 0x45, 0x18, 0x07, 0x20, 0x01,
0x28, 0x08, 0x52, 0x03, 0x45, 0x43, 0x45, 0x12, 0x10, 0x0a, 0x03, 0x43, 0x57, 0x52, 0x18, 0x08,
0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x43, 0x57, 0x52, 0x12, 0x0e, 0x0a, 0x02, 0x4e, 0x53, 0x18,
0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x4e, 0x53, 0x22, 0x51, 0x0a, 0x03, 0x55, 0x44, 0x50,
0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18,
0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x72,
0x74, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x64, 0x65, 0x73,
0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x52, 0x0a, 0x04,
0x53, 0x43, 0x54, 0x50, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70,
0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52,
0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74,
0x22, 0x30, 0x0a, 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x34, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79,
0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12,
0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f,
0x64, 0x65, 0x22, 0x30, 0x0a, 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x36, 0x12, 0x12, 0x0a, 0x04,
0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65,
0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04,
0x63, 0x6f, 0x64, 0x65, 0x22, 0x6e, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x12,
0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65,
0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09,
0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69,
0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x72, 0x65, 0x76, 0x69,
0x73, 0x69, 0x6f, 0x6e, 0x22, 0x66, 0x0a, 0x0f, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70,
0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18,
0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x6d,
0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20,
0x01, 0x28, 0x08, 0x52, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70,
0x65, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20,
0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x22, 0x40, 0x0a, 0x0f,
0x43, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12,
0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x74,
0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18,
0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x22, 0xba,
0x0c, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a,
0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x1d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69,
0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x01,
0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x12, 0x28,
0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x5f, 0x78, 0x6c, 0x61, 0x74,
0x65, 0x64, 0x18, 0x22, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
0x49, 0x70, 0x58, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x6f, 0x75, 0x72,
0x63, 0x65, 0x5f, 0x70, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x73, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x5f, 0x66, 0x71, 0x64, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x46, 0x71, 0x64, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72,
0x63, 0x65, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b,
0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x25, 0x0a, 0x0e, 0x73,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x10, 0x20,
0x03, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x12, 0x37, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x77, 0x6f, 0x72,
0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c,
0x6f, 0x77, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x0e, 0x73, 0x6f, 0x75,
0x72, 0x63, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x64,
0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x70, 0x18, 0x03, 0x20,
0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x49, 0x70, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x5f, 0x70, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x64, 0x65, 0x73,
0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x64,
0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x71, 0x64, 0x6e, 0x18,
0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x46, 0x71, 0x64, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x0b, 0x20, 0x03, 0x28,
0x09, 0x52, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61,
0x62, 0x65, 0x6c, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x11, 0x20, 0x03, 0x28, 0x09,
0x52, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x12, 0x41, 0x0a, 0x14, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x1b, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f,
0x61, 0x64, 0x52, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57,
0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x43, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x66, 0x66,
0x69, 0x63, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x1e, 0x20, 0x03,
0x28, 0x0e, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69,
0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x72, 0x61, 0x66,
0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x07,
0x76, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x0d, 0x2e,
0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x56, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, 0x52, 0x07, 0x76, 0x65,
0x72, 0x64, 0x69, 0x63, 0x74, 0x12, 0x3a, 0x0a, 0x10, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x72, 0x65,
0x61, 0x73, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x18, 0x21, 0x20, 0x03, 0x28, 0x0e, 0x32,
0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x61, 0x73, 0x6f,
0x6e, 0x52, 0x0e, 0x64, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x44, 0x65, 0x73,
0x63, 0x12, 0x34, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x18, 0x23,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4e, 0x65, 0x74, 0x77,
0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x52, 0x09, 0x69, 0x6e,
0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74,
0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x66, 0x6c,
0x6f, 0x77, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, 0x6c, 0x74,
0x65, 0x72, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a,
0x10, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64,
0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x68, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61,
0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x63, 0x6f, 0x6c, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x63, 0x6f, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f,
0x72, 0x74, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
0x50, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f,
0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12,
0x14, 0x0a, 0x05, 0x72, 0x65, 0x70, 0x6c, 0x79, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x08, 0x52, 0x05,
0x72, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x6e, 0x73, 0x5f, 0x71, 0x75, 0x65,
0x72, 0x79, 0x18, 0x12, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x64, 0x6e, 0x73, 0x51, 0x75, 0x65,
0x72, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x65,
0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0e, 0x73, 0x6f, 0x75,
0x72, 0x63, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x31, 0x0a, 0x14, 0x64,
0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74,
0x69, 0x74, 0x79, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69,
0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1f,
0x0a, 0x0b, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x15, 0x20,
0x03, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12,
0x1b, 0x0a, 0x09, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x16, 0x20, 0x03,
0x28, 0x09, 0x52, 0x08, 0x68, 0x74, 0x74, 0x70, 0x50, 0x61, 0x74, 0x68, 0x12, 0x19, 0x0a, 0x08,
0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x1f, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07,
0x68, 0x74, 0x74, 0x70, 0x55, 0x72, 0x6c, 0x12, 0x31, 0x0a, 0x0b, 0x68, 0x74, 0x74, 0x70, 0x5f,
0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x20, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66,
0x6c, 0x6f, 0x77, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a,
0x68, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x09, 0x74, 0x63,
0x70, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x17, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x43, 0x50, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x08, 0x74,
0x63, 0x70, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f,
0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65,
0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6c, 0x61, 0x62,
0x65, 0x6c, 0x73, 0x18, 0x24, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x4c,
0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x2e, 0x0a, 0x0a, 0x69, 0x70, 0x5f, 0x76, 0x65, 0x72, 0x73,
0x69, 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x66, 0x6c, 0x6f, 0x77,
0x2e, 0x49, 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x69, 0x70, 0x56, 0x65,
0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69,
0x64, 0x18, 0x1c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64,
0x12, 0x42, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c,
0x18, 0xe7, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x46,
0x6c, 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69,
0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x52, 0x0c, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65,
0x6e, 0x74, 0x61, 0x6c, 0x1a, 0x35, 0x0a, 0x0c, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65,
0x6e, 0x74, 0x61, 0x6c, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x65, 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x72,
0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x65,
0x6c, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xce, 0x01, 0x0a, 0x03,
0x44, 0x4e, 0x53, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x70, 0x73,
0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x69, 0x70, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x74,
0x74, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x16, 0x0a,
0x06, 0x63, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x63,
0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28,
0x09, 0x52, 0x11, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x06, 0x20,
0x01, 0x28, 0x0d, 0x52, 0x05, 0x72, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x71, 0x74,
0x79, 0x70, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x71, 0x74, 0x79, 0x70,
0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x72, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x08, 0x20,
0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x72, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, 0x34, 0x0a, 0x0a,
0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x22, 0x8c, 0x01, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x12, 0x12, 0x0a, 0x04, 0x63,
0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12,
0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x03,
0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x2a, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x48, 0x54,
0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72,
0x73, 0x22, 0x9d, 0x01, 0x0a, 0x05, 0x4b, 0x61, 0x66, 0x6b, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x65,
0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52,
0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70,
0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x61,
0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x70,
0x69, 0x4b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x72, 0x72, 0x65, 0x6c, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x63, 0x6f,
0x72, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74,
0x6f, 0x70, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69,
0x63, 0x22, 0x3b, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04,
0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20,
0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x91,
0x01, 0x0a, 0x09, 0x4c, 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2d, 0x0a, 0x06,
0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x66,
0x6c, 0x6f, 0x77, 0x2e, 0x4c, 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75,
0x72, 0x63, 0x65, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6e,
0x75, 0x6d, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x6c, 0x6f, 0x73, 0x74, 0x18, 0x02,
0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6e, 0x75, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x4c,
0x6f, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, 0x63,
0x70, 0x75, 0x22, 0xf6, 0x04, 0x0a, 0x0a, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e,
0x74, 0x12, 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
0x14, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e,
0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x75,
0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x66,
0x6c, 0x6f, 0x77, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x55, 0x6e,
0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x48, 0x00, 0x52, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e,
0x12, 0x39, 0x0a, 0x0b, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18,
0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x69, 0x6d,
0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52,
0x0a, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70,
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x66, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x55, 0x70, 0x64, 0x61,
0x74, 0x65, 0x12, 0x52, 0x0a, 0x13, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x72,
0x65, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x1f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52,
0x65, 0x67, 0x65, 0x6e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x48, 0x00, 0x52, 0x12, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x65,
0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x4b, 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69,
0x6e, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x68, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x20, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55,
0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x70, 0x64,
0x61, 0x74, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x69, 0x70, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x75,
0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x69, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c,
0x6f, 0x77, 0x2e, 0x49, 0x50, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x69, 0x70, 0x63, 0x61, 0x63, 0x68,
0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x48, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x5f, 0x75, 0x70, 0x73, 0x65, 0x72, 0x74, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x1f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70,
0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x48, 0x00, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72,
0x74, 0x12, 0x48, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x6c,
0x65, 0x74, 0x65, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x6f, 0x77,
0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f,
0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x6e,
0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4b, 0x0a, 0x11, 0x41,
0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e,
0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
0x74, 0x79, 0x70, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x69,
0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x42, 0x0a, 0x10, 0x54, 0x69, 0x6d, 0x65,
0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x04,
0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x22, 0x6d, 0x0a, 0x18,
0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69,
0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65,
0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73,
0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01,
0x28, 0x04, 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a,
0x72, 0x75, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03,
0x52, 0x09, 0x72, 0x75, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x59, 0x0a, 0x19, 0x45,
0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x65, 0x6e, 0x4e, 0x6f, 0x74, 0x69,
0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01,
0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65,
0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73,
0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x93, 0x01, 0x0a, 0x1a, 0x45, 0x6e, 0x64, 0x70, 0x6f,
0x69, 0x6e, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18,
0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x14, 0x0a,
0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72,
0x72, 0x6f, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c,
0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28,
0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x99, 0x02, 0x0a,
0x13, 0x49, 0x50, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x69, 0x64, 0x72, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x04, 0x63, 0x69, 0x64, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e,
0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e,
0x74, 0x69, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x0c, 0x6f, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x65, 0x6e,
0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e,
0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x6f, 0x6c, 0x64, 0x49, 0x64, 0x65,
0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x70,
0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x68, 0x6f, 0x73, 0x74, 0x49, 0x70, 0x12, 0x1e,
0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x70, 0x18, 0x05, 0x20,
0x01, 0x28, 0x09, 0x52, 0x09, 0x6f, 0x6c, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x49, 0x70, 0x12, 0x1f,
0x0a, 0x0b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20,
0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x4b, 0x65, 0x79, 0x12,
0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01,
0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x19, 0x0a,
0x08, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52,
0x07, 0x70, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x43, 0x0a, 0x1d, 0x53, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72,
0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x22, 0x9a, 0x03,
0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e,
0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69,
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x4e, 0x0a, 0x10, 0x66,
0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x52, 0x0f, 0x66, 0x72, 0x6f, 0x6e,
0x74, 0x65, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x50, 0x0a, 0x11, 0x62,
0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73,
0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66,
0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x52, 0x10, 0x62, 0x61, 0x63,
0x6b, 0x65, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x12, 0x0a,
0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70,
0x65, 0x12, 0x29, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x70, 0x6f, 0x6c,
0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x74,
0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x12, 0x0a, 0x04,
0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x07, 0x20,
0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2c,
0x0a, 0x12, 0x65, 0x78, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x70, 0x6f,
0x6c, 0x69, 0x63, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x65, 0x78, 0x74, 0x54,
0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2c, 0x0a, 0x12,
0x69, 0x6e, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x70, 0x6f, 0x6c, 0x69,
0x63, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x69, 0x6e, 0x74, 0x54, 0x72, 0x61,
0x66, 0x66, 0x69, 0x63, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x2b, 0x0a, 0x19, 0x53, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66,
0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x22, 0x3c, 0x0a, 0x10, 0x4e, 0x65, 0x74, 0x77, 0x6f,
0x72, 0x6b, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69,
0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65,
0x78, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xef, 0x02, 0x0a, 0x0a, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45,
0x76, 0x65, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0e, 0x32, 0x14, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45,
0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x26,
0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e,
0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x06,
0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x30, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c,
0x75, 0x65, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x30, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x31,
0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56,
0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x61, 0x72, 0x67, 0x31, 0x12, 0x30, 0x0a, 0x04, 0x61, 0x72,
0x67, 0x32, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33,
0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x61, 0x72, 0x67, 0x32, 0x12, 0x30, 0x0a, 0x04,
0x61, 0x72, 0x67, 0x33, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e,
0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x61, 0x72, 0x67, 0x33, 0x12, 0x18,
0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52,
0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2d, 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18,
0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c,
0x75, 0x65, 0x52, 0x03, 0x63, 0x70, 0x75, 0x2a, 0x39, 0x0a, 0x08, 0x46, 0x6c, 0x6f, 0x77, 0x54,
0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54,
0x59, 0x50, 0x45, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x4c, 0x33, 0x5f, 0x4c, 0x34, 0x10, 0x01,
0x12, 0x06, 0x0a, 0x02, 0x4c, 0x37, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x4f, 0x43, 0x4b,
0x10, 0x03, 0x2a, 0x39, 0x0a, 0x08, 0x41, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0c,
0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05,
0x53, 0x50, 0x49, 0x52, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x45, 0x53, 0x54, 0x5f,
0x41, 0x4c, 0x57, 0x41, 0x59, 0x53, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x02, 0x2a, 0xea, 0x01,
0x0a, 0x15, 0x54, 0x72, 0x61, 0x63, 0x65, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x4b, 0x4e, 0x4f,
0x57, 0x4e, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x54, 0x4f,
0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x4f, 0x5f, 0x48,
0x4f, 0x53, 0x54, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x54, 0x4f, 0x5f, 0x53, 0x54, 0x41, 0x43,
0x4b, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x4f, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x4c, 0x41,
0x59, 0x10, 0x04, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x4f, 0x5f, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49,
0x4e, 0x54, 0x10, 0x65, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x4e, 0x44,
0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x05, 0x12, 0x0e, 0x0a, 0x0a, 0x46, 0x52, 0x4f, 0x4d, 0x5f,
0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x52, 0x4f, 0x4d, 0x5f,
0x48, 0x4f, 0x53, 0x54, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x53,
0x54, 0x41, 0x43, 0x4b, 0x10, 0x08, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x4f,
0x56, 0x45, 0x52, 0x4c, 0x41, 0x59, 0x10, 0x09, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x52, 0x4f, 0x4d,
0x5f, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x10, 0x0a, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x4f,
0x5f, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x10, 0x0b, 0x2a, 0xa0, 0x01, 0x0a, 0x0b, 0x54,
0x72, 0x61, 0x63, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x14, 0x54, 0x52,
0x41, 0x43, 0x45, 0x5f, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f,
0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x4e, 0x45, 0x57, 0x10, 0x01, 0x12, 0x0f, 0x0a,
0x0b, 0x45, 0x53, 0x54, 0x41, 0x42, 0x4c, 0x49, 0x53, 0x48, 0x45, 0x44, 0x10, 0x02, 0x12, 0x09,
0x0a, 0x05, 0x52, 0x45, 0x50, 0x4c, 0x59, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x4c,
0x41, 0x54, 0x45, 0x44, 0x10, 0x04, 0x12, 0x10, 0x0a, 0x08, 0x52, 0x45, 0x4f, 0x50, 0x45, 0x4e,
0x45, 0x44, 0x10, 0x05, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x53, 0x52, 0x56, 0x36,
0x5f, 0x45, 0x4e, 0x43, 0x41, 0x50, 0x10, 0x06, 0x12, 0x0e, 0x0a, 0x0a, 0x53, 0x52, 0x56, 0x36,
0x5f, 0x44, 0x45, 0x43, 0x41, 0x50, 0x10, 0x07, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x4e, 0x43, 0x52,
0x59, 0x50, 0x54, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x4c, 0x41, 0x59, 0x10, 0x08, 0x2a, 0x48, 0x0a,
0x0a, 0x4c, 0x37, 0x46, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x55,
0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x37, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x00,
0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a,
0x08, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x53,
0x41, 0x4d, 0x50, 0x4c, 0x45, 0x10, 0x03, 0x2a, 0x30, 0x0a, 0x09, 0x49, 0x50, 0x56, 0x65, 0x72,
0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x49, 0x50, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x55,
0x53, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x76, 0x34, 0x10, 0x01, 0x12,
0x08, 0x0a, 0x04, 0x49, 0x50, 0x76, 0x36, 0x10, 0x02, 0x2a, 0x7c, 0x0a, 0x07, 0x56, 0x65, 0x72,
0x64, 0x69, 0x63, 0x74, 0x12, 0x13, 0x0a, 0x0f, 0x56, 0x45, 0x52, 0x44, 0x49, 0x43, 0x54, 0x5f,
0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x4f, 0x52,
0x57, 0x41, 0x52, 0x44, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x52, 0x4f, 0x50,
0x50, 0x45, 0x44, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03,
0x12, 0x09, 0x0a, 0x05, 0x41, 0x55, 0x44, 0x49, 0x54, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x52,
0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x54,
0x52, 0x41, 0x43, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x52, 0x41, 0x4e, 0x53,
0x4c, 0x41, 0x54, 0x45, 0x44, 0x10, 0x07, 0x2a, 0x97, 0x11, 0x0a, 0x0a, 0x44, 0x72, 0x6f, 0x70,
0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x52,
0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
0x1b, 0x0a, 0x12, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43,
0x45, 0x5f, 0x4d, 0x41, 0x43, 0x10, 0x82, 0x01, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x20, 0x0a, 0x17,
0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54,
0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x43, 0x10, 0x83, 0x01, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x16,
0x0a, 0x11, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45,
0x5f, 0x49, 0x50, 0x10, 0x84, 0x01, 0x12, 0x12, 0x0a, 0x0d, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59,
0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x85, 0x01, 0x12, 0x1b, 0x0a, 0x16, 0x49, 0x4e,
0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x44, 0x52, 0x4f,
0x50, 0x50, 0x45, 0x44, 0x10, 0x86, 0x01, 0x12, 0x23, 0x0a, 0x1e, 0x43, 0x54, 0x5f, 0x54, 0x52,
0x55, 0x4e, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c,
0x49, 0x44, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x87, 0x01, 0x12, 0x1c, 0x0a, 0x17,
0x43, 0x54, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x54, 0x43, 0x50, 0x5f, 0x41,
0x43, 0x4b, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x88, 0x01, 0x12, 0x1b, 0x0a, 0x16, 0x43, 0x54,
0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x34, 0x5f, 0x50, 0x52, 0x4f, 0x54,
0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x89, 0x01, 0x12, 0x2b, 0x0a, 0x22, 0x43, 0x54, 0x5f, 0x43, 0x41,
0x4e, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x5f, 0x45, 0x4e, 0x54, 0x52,
0x59, 0x5f, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x8a, 0x01,
0x1a, 0x02, 0x08, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52,
0x54, 0x45, 0x44, 0x5f, 0x4c, 0x33, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10,
0x8b, 0x01, 0x12, 0x15, 0x0a, 0x10, 0x4d, 0x49, 0x53, 0x53, 0x45, 0x44, 0x5f, 0x54, 0x41, 0x49,
0x4c, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x10, 0x8c, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x45, 0x52, 0x52,
0x4f, 0x52, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x54, 0x4f, 0x5f, 0x50, 0x41,
0x43, 0x4b, 0x45, 0x54, 0x10, 0x8d, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f,
0x57, 0x4e, 0x5f, 0x4c, 0x34, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x8e,
0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x49, 0x43, 0x4d,
0x50, 0x56, 0x34, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x8f, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55,
0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x34, 0x5f, 0x54, 0x59,
0x50, 0x45, 0x10, 0x90, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x36, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x91, 0x01, 0x12,
0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56,
0x36, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x92, 0x01, 0x12, 0x20, 0x0a, 0x1b, 0x45, 0x52, 0x52,
0x4f, 0x52, 0x5f, 0x52, 0x45, 0x54, 0x52, 0x49, 0x45, 0x56, 0x49, 0x4e, 0x47, 0x5f, 0x54, 0x55,
0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x93, 0x01, 0x12, 0x28, 0x0a, 0x1f, 0x45,
0x52, 0x52, 0x4f, 0x52, 0x5f, 0x52, 0x45, 0x54, 0x52, 0x49, 0x45, 0x56, 0x49, 0x4e, 0x47, 0x5f,
0x54, 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x10, 0x94,
0x01, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x1e, 0x0a, 0x15, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44,
0x5f, 0x47, 0x45, 0x4e, 0x45, 0x56, 0x45, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x95,
0x01, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x1e, 0x0a, 0x19, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
0x5f, 0x4c, 0x33, 0x5f, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x41, 0x44, 0x44, 0x52, 0x45,
0x53, 0x53, 0x10, 0x96, 0x01, 0x12, 0x1b, 0x0a, 0x16, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x5f, 0x4f,
0x52, 0x5f, 0x55, 0x4e, 0x52, 0x4f, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x49, 0x50, 0x10,
0x97, 0x01, 0x12, 0x2a, 0x0a, 0x21, 0x4e, 0x4f, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x49, 0x4e,
0x47, 0x5f, 0x4c, 0x4f, 0x43, 0x41, 0x4c, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45,
0x52, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x98, 0x01, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x27,
0x0a, 0x22, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x57, 0x48, 0x49, 0x4c, 0x45, 0x5f, 0x43, 0x4f,
0x52, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x4c, 0x33, 0x5f, 0x43, 0x48, 0x45, 0x43,
0x4b, 0x53, 0x55, 0x4d, 0x10, 0x99, 0x01, 0x12, 0x27, 0x0a, 0x22, 0x45, 0x52, 0x52, 0x4f, 0x52,
0x5f, 0x57, 0x48, 0x49, 0x4c, 0x45, 0x5f, 0x43, 0x4f, 0x52, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4e,
0x47, 0x5f, 0x4c, 0x34, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x53, 0x55, 0x4d, 0x10, 0x9a, 0x01,
0x12, 0x1c, 0x0a, 0x17, 0x43, 0x54, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x49, 0x4e, 0x53, 0x45, 0x52,
0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x9b, 0x01, 0x12, 0x22,
0x0a, 0x1d, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x5f, 0x45,
0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10,
0x9c, 0x01, 0x12, 0x23, 0x0a, 0x1e, 0x49, 0x50, 0x5f, 0x46, 0x52, 0x41, 0x47, 0x4d, 0x45, 0x4e,
0x54, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x55, 0x50, 0x50, 0x4f,
0x52, 0x54, 0x45, 0x44, 0x10, 0x9d, 0x01, 0x12, 0x1e, 0x0a, 0x19, 0x53, 0x45, 0x52, 0x56, 0x49,
0x43, 0x45, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46,
0x4f, 0x55, 0x4e, 0x44, 0x10, 0x9e, 0x01, 0x12, 0x28, 0x0a, 0x23, 0x4e, 0x4f, 0x5f, 0x54, 0x55,
0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4f, 0x52, 0x5f, 0x45, 0x4e, 0x43, 0x41, 0x50, 0x53, 0x55, 0x4c,
0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0xa0,
0x01, 0x12, 0x23, 0x0a, 0x1e, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x4f, 0x5f, 0x49,
0x4e, 0x53, 0x45, 0x52, 0x54, 0x5f, 0x49, 0x4e, 0x54, 0x4f, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59,
0x4d, 0x41, 0x50, 0x10, 0xa1, 0x01, 0x12, 0x2b, 0x0a, 0x26, 0x52, 0x45, 0x41, 0x43, 0x48, 0x45,
0x44, 0x5f, 0x45, 0x44, 0x54, 0x5f, 0x52, 0x41, 0x54, 0x45, 0x5f, 0x4c, 0x49, 0x4d, 0x49, 0x54,
0x49, 0x4e, 0x47, 0x5f, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x48, 0x4f, 0x52, 0x49, 0x5a, 0x4f, 0x4e,
0x10, 0xa2, 0x01, 0x12, 0x26, 0x0a, 0x21, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x43,
0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x4b, 0x49,
0x4e, 0x47, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x10, 0xa3, 0x01, 0x12, 0x1e, 0x0a, 0x19, 0x4c,
0x4f, 0x43, 0x41, 0x4c, 0x5f, 0x48, 0x4f, 0x53, 0x54, 0x5f, 0x49, 0x53, 0x5f, 0x55, 0x4e, 0x52,
0x45, 0x41, 0x43, 0x48, 0x41, 0x42, 0x4c, 0x45, 0x10, 0xa4, 0x01, 0x12, 0x3a, 0x0a, 0x35, 0x4e,
0x4f, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x54, 0x4f, 0x5f, 0x50, 0x45, 0x52,
0x46, 0x4f, 0x52, 0x4d, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x43, 0x49,
0x53, 0x49, 0x4f, 0x4e, 0x10, 0xa5, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x55, 0x4e, 0x53, 0x55, 0x50,
0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x4c, 0x32, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43,
0x4f, 0x4c, 0x10, 0xa6, 0x01, 0x12, 0x22, 0x0a, 0x1d, 0x4e, 0x4f, 0x5f, 0x4d, 0x41, 0x50, 0x50,
0x49, 0x4e, 0x47, 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x4e, 0x41, 0x54, 0x5f, 0x4d, 0x41, 0x53, 0x51,
0x55, 0x45, 0x52, 0x41, 0x44, 0x45, 0x10, 0xa7, 0x01, 0x12, 0x2c, 0x0a, 0x27, 0x55, 0x4e, 0x53,
0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f,
0x4c, 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x4e, 0x41, 0x54, 0x5f, 0x4d, 0x41, 0x53, 0x51, 0x55, 0x45,
0x52, 0x41, 0x44, 0x45, 0x10, 0xa8, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x46, 0x49, 0x42, 0x5f, 0x4c,
0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0xa9, 0x01, 0x12,
0x28, 0x0a, 0x23, 0x45, 0x4e, 0x43, 0x41, 0x50, 0x53, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e,
0x5f, 0x54, 0x52, 0x41, 0x46, 0x46, 0x49, 0x43, 0x5f, 0x49, 0x53, 0x5f, 0x50, 0x52, 0x4f, 0x48,
0x49, 0x42, 0x49, 0x54, 0x45, 0x44, 0x10, 0xaa, 0x01, 0x12, 0x15, 0x0a, 0x10, 0x49, 0x4e, 0x56,
0x41, 0x4c, 0x49, 0x44, 0x5f, 0x49, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x54, 0x59, 0x10, 0xab, 0x01,
0x12, 0x13, 0x0a, 0x0e, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x53, 0x45, 0x4e, 0x44,
0x45, 0x52, 0x10, 0xac, 0x01, 0x12, 0x13, 0x0a, 0x0e, 0x4e, 0x41, 0x54, 0x5f, 0x4e, 0x4f, 0x54,
0x5f, 0x4e, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0xad, 0x01, 0x12, 0x13, 0x0a, 0x0e, 0x49, 0x53,
0x5f, 0x41, 0x5f, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, 0x52, 0x49, 0x50, 0x10, 0xae, 0x01, 0x12,
0x2e, 0x0a, 0x29, 0x46, 0x49, 0x52, 0x53, 0x54, 0x5f, 0x4c, 0x4f, 0x47, 0x49, 0x43, 0x41, 0x4c,
0x5f, 0x44, 0x41, 0x54, 0x41, 0x47, 0x52, 0x41, 0x4d, 0x5f, 0x46, 0x52, 0x41, 0x47, 0x4d, 0x45,
0x4e, 0x54, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0xaf, 0x01, 0x12,
0x1d, 0x0a, 0x18, 0x46, 0x4f, 0x52, 0x42, 0x49, 0x44, 0x44, 0x45, 0x4e, 0x5f, 0x49, 0x43, 0x4d,
0x50, 0x56, 0x36, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0xb0, 0x01, 0x12, 0x21,
0x0a, 0x1c, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x5f, 0x42, 0x59, 0x5f, 0x4c, 0x42, 0x5f, 0x53,
0x52, 0x43, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x10, 0xb1,
0x01, 0x12, 0x19, 0x0a, 0x14, 0x53, 0x4f, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b,
0x55, 0x50, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0xb2, 0x01, 0x12, 0x19, 0x0a, 0x14,
0x53, 0x4f, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x41, 0x53, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x46, 0x41,
0x49, 0x4c, 0x45, 0x44, 0x10, 0xb3, 0x01, 0x12, 0x31, 0x0a, 0x2c, 0x50, 0x52, 0x4f, 0x58, 0x59,
0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4e, 0x4f, 0x54,
0x5f, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x50,
0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0xb4, 0x01, 0x12, 0x10, 0x0a, 0x0b, 0x50, 0x4f,
0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x4e, 0x59, 0x10, 0xb5, 0x01, 0x12, 0x12, 0x0a, 0x0d,
0x56, 0x4c, 0x41, 0x4e, 0x5f, 0x46, 0x49, 0x4c, 0x54, 0x45, 0x52, 0x45, 0x44, 0x10, 0xb6, 0x01,
0x12, 0x10, 0x0a, 0x0b, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x56, 0x4e, 0x49, 0x10,
0xb7, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x54, 0x43,
0x5f, 0x42, 0x55, 0x46, 0x46, 0x45, 0x52, 0x10, 0xb8, 0x01, 0x12, 0x0b, 0x0a, 0x06, 0x4e, 0x4f,
0x5f, 0x53, 0x49, 0x44, 0x10, 0xb9, 0x01, 0x12, 0x1b, 0x0a, 0x12, 0x4d, 0x49, 0x53, 0x53, 0x49,
0x4e, 0x47, 0x5f, 0x53, 0x52, 0x56, 0x36, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x10, 0xba, 0x01,
0x1a, 0x02, 0x08, 0x01, 0x12, 0x0a, 0x0a, 0x05, 0x4e, 0x41, 0x54, 0x34, 0x36, 0x10, 0xbb, 0x01,
0x12, 0x0a, 0x0a, 0x05, 0x4e, 0x41, 0x54, 0x36, 0x34, 0x10, 0xbc, 0x01, 0x12, 0x12, 0x0a, 0x0d,
0x41, 0x55, 0x54, 0x48, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0xbd, 0x01,
0x12, 0x14, 0x0a, 0x0f, 0x43, 0x54, 0x5f, 0x4e, 0x4f, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46, 0x4f,
0x55, 0x4e, 0x44, 0x10, 0xbe, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x53, 0x4e, 0x41, 0x54, 0x5f, 0x4e,
0x4f, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0xbf, 0x01, 0x12, 0x17,
0x0a, 0x12, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45,
0x52, 0x5f, 0x49, 0x44, 0x10, 0xc0, 0x01, 0x12, 0x27, 0x0a, 0x22, 0x55, 0x4e, 0x53, 0x55, 0x50,
0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f,
0x46, 0x4f, 0x52, 0x5f, 0x44, 0x53, 0x52, 0x5f, 0x45, 0x4e, 0x43, 0x41, 0x50, 0x10, 0xc1, 0x01,
0x12, 0x16, 0x0a, 0x11, 0x4e, 0x4f, 0x5f, 0x45, 0x47, 0x52, 0x45, 0x53, 0x53, 0x5f, 0x47, 0x41,
0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0xc2, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x45, 0x4e,
0x43, 0x52, 0x59, 0x50, 0x54, 0x45, 0x44, 0x5f, 0x54, 0x52, 0x41, 0x46, 0x46, 0x49, 0x43, 0x10,
0xc3, 0x01, 0x12, 0x11, 0x0a, 0x0c, 0x54, 0x54, 0x4c, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44,
0x45, 0x44, 0x10, 0xc4, 0x01, 0x12, 0x0f, 0x0a, 0x0a, 0x4e, 0x4f, 0x5f, 0x4e, 0x4f, 0x44, 0x45,
0x5f, 0x49, 0x44, 0x10, 0xc5, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x52,
0x41, 0x54, 0x45, 0x5f, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0xc6, 0x01, 0x12, 0x11,
0x0a, 0x0c, 0x49, 0x47, 0x4d, 0x50, 0x5f, 0x48, 0x41, 0x4e, 0x44, 0x4c, 0x45, 0x44, 0x10, 0xc7,
0x01, 0x12, 0x14, 0x0a, 0x0f, 0x49, 0x47, 0x4d, 0x50, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x43, 0x52,
0x49, 0x42, 0x45, 0x44, 0x10, 0xc8, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x4d, 0x55, 0x4c, 0x54, 0x49,
0x43, 0x41, 0x53, 0x54, 0x5f, 0x48, 0x41, 0x4e, 0x44, 0x4c, 0x45, 0x44, 0x10, 0xc9, 0x01, 0x12,
0x18, 0x0a, 0x13, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x48, 0x4f, 0x53, 0x54, 0x5f, 0x4e, 0x4f, 0x54,
0x5f, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0xca, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x44, 0x52, 0x4f,
0x50, 0x5f, 0x45, 0x50, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0xcb,
0x01, 0x2a, 0x4a, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65,
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x19, 0x54, 0x52, 0x41, 0x46, 0x46, 0x49, 0x43,
0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f,
0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10,
0x01, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x02, 0x2a, 0x8d, 0x02,
0x0a, 0x11, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x50, 0x6f,
0x69, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x19, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55,
0x52, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52,
0x45, 0x5f, 0x44, 0x45, 0x4c, 0x49, 0x56, 0x45, 0x52, 0x59, 0x10, 0x04, 0x12, 0x17, 0x0a, 0x13,
0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x46, 0x52, 0x4f, 0x4d,
0x5f, 0x4c, 0x42, 0x10, 0x05, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50,
0x54, 0x55, 0x52, 0x45, 0x5f, 0x41, 0x46, 0x54, 0x45, 0x52, 0x5f, 0x56, 0x34, 0x36, 0x10, 0x06,
0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f,
0x41, 0x46, 0x54, 0x45, 0x52, 0x5f, 0x56, 0x36, 0x34, 0x10, 0x07, 0x12, 0x19, 0x0a, 0x15, 0x44,
0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59,
0x5f, 0x50, 0x52, 0x45, 0x10, 0x08, 0x12, 0x1a, 0x0a, 0x16, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41,
0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x50, 0x4f, 0x53, 0x54,
0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52,
0x45, 0x5f, 0x53, 0x4e, 0x41, 0x54, 0x5f, 0x50, 0x52, 0x45, 0x10, 0x0a, 0x12, 0x19, 0x0a, 0x15,
0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x53, 0x4e, 0x41, 0x54,
0x5f, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x0b, 0x22, 0x04, 0x08, 0x01, 0x10, 0x03, 0x2a, 0x39, 0x0a,
0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e,
0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x76, 0x65, 0x6e, 0x74,
0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x52, 0x65, 0x63, 0x6f,
0x72, 0x64, 0x4c, 0x6f, 0x73, 0x74, 0x10, 0x02, 0x2a, 0x7f, 0x0a, 0x0f, 0x4c, 0x6f, 0x73, 0x74,
0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x19, 0x55,
0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x4f, 0x53, 0x54, 0x5f, 0x45, 0x56, 0x45, 0x4e,
0x54, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x50, 0x45,
0x52, 0x46, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x42, 0x55,
0x46, 0x46, 0x45, 0x52, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x4f, 0x42, 0x53, 0x45, 0x52, 0x56,
0x45, 0x52, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x53, 0x5f, 0x51, 0x55, 0x45, 0x55, 0x45, 0x10,
0x02, 0x12, 0x16, 0x0a, 0x12, 0x48, 0x55, 0x42, 0x42, 0x4c, 0x45, 0x5f, 0x52, 0x49, 0x4e, 0x47,
0x5f, 0x42, 0x55, 0x46, 0x46, 0x45, 0x52, 0x10, 0x03, 0x2a, 0xae, 0x02, 0x0a, 0x0e, 0x41, 0x67,
0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13,
0x41, 0x47, 0x45, 0x4e, 0x54, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e,
0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x47, 0x45, 0x4e, 0x54, 0x5f, 0x53,
0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x50, 0x4f, 0x4c, 0x49,
0x43, 0x59, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e,
0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x04,
0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x47,
0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10,
0x05, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x52, 0x45,
0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45,
0x10, 0x06, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x43,
0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x07, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x4e, 0x44, 0x50,
0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x08, 0x12, 0x14,
0x0a, 0x10, 0x49, 0x50, 0x43, 0x41, 0x43, 0x48, 0x45, 0x5f, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54,
0x45, 0x44, 0x10, 0x09, 0x12, 0x13, 0x0a, 0x0f, 0x49, 0x50, 0x43, 0x41, 0x43, 0x48, 0x45, 0x5f,
0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x0a, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x45, 0x52,
0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x45, 0x44, 0x10, 0x0b, 0x12,
0x13, 0x0a, 0x0f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54,
0x45, 0x44, 0x10, 0x0c, 0x22, 0x04, 0x08, 0x01, 0x10, 0x01, 0x2a, 0xd8, 0x01, 0x0a, 0x16, 0x53,
0x6f, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c,
0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57,
0x4e, 0x10, 0x00, 0x12, 0x26, 0x0a, 0x22, 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54,
0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x50, 0x52, 0x45, 0x5f, 0x44, 0x49, 0x52, 0x45,
0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x57, 0x44, 0x10, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x53,
0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f,
0x50, 0x4f, 0x53, 0x54, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46,
0x57, 0x44, 0x10, 0x02, 0x12, 0x26, 0x0a, 0x22, 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41,
0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x50, 0x52, 0x45, 0x5f, 0x44, 0x49, 0x52,
0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x56, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23,
0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54,
0x5f, 0x50, 0x4f, 0x53, 0x54, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
0x52, 0x45, 0x56, 0x10, 0x04, 0x2a, 0x81, 0x0d, 0x0a, 0x0e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45,
0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42, 0x47, 0x5f,
0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
0x0f, 0x0a, 0x0b, 0x44, 0x42, 0x47, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x49, 0x43, 0x10, 0x01,
0x12, 0x16, 0x0a, 0x12, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x4f, 0x43, 0x41, 0x4c, 0x5f, 0x44, 0x45,
0x4c, 0x49, 0x56, 0x45, 0x52, 0x59, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x42, 0x47, 0x5f,
0x45, 0x4e, 0x43, 0x41, 0x50, 0x10, 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x4c,
0x58, 0x43, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42,
0x47, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10,
0x05, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b,
0x55, 0x50, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c,
0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x52, 0x45, 0x56, 0x10, 0x07, 0x12, 0x10, 0x0a, 0x0c, 0x44,
0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x08, 0x12, 0x12, 0x0a,
0x0e, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10,
0x09, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41,
0x54, 0x45, 0x44, 0x32, 0x10, 0x0a, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x43,
0x4d, 0x50, 0x36, 0x5f, 0x48, 0x41, 0x4e, 0x44, 0x4c, 0x45, 0x10, 0x0b, 0x12, 0x15, 0x0a, 0x11,
0x44, 0x42, 0x47, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x36, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53,
0x54, 0x10, 0x0c, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x36,
0x5f, 0x4e, 0x53, 0x10, 0x0d, 0x12, 0x1b, 0x0a, 0x17, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x43, 0x4d,
0x50, 0x36, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44,
0x10, 0x0e, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x56, 0x45, 0x52,
0x44, 0x49, 0x43, 0x54, 0x10, 0x0f, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x42, 0x47, 0x5f, 0x44, 0x45,
0x43, 0x41, 0x50, 0x10, 0x10, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x42, 0x47, 0x5f, 0x50, 0x4f, 0x52,
0x54, 0x5f, 0x4d, 0x41, 0x50, 0x10, 0x11, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x45,
0x52, 0x52, 0x4f, 0x52, 0x5f, 0x52, 0x45, 0x54, 0x10, 0x12, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x42,
0x47, 0x5f, 0x54, 0x4f, 0x5f, 0x48, 0x4f, 0x53, 0x54, 0x10, 0x13, 0x12, 0x10, 0x0a, 0x0c, 0x44,
0x42, 0x47, 0x5f, 0x54, 0x4f, 0x5f, 0x53, 0x54, 0x41, 0x43, 0x4b, 0x10, 0x14, 0x12, 0x10, 0x0a,
0x0c, 0x44, 0x42, 0x47, 0x5f, 0x50, 0x4b, 0x54, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x15, 0x12,
0x1b, 0x0a, 0x17, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55,
0x50, 0x5f, 0x46, 0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x10, 0x16, 0x12, 0x20, 0x0a, 0x1c,
0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46,
0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x17, 0x12, 0x1f,
0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50,
0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x10, 0x18, 0x12,
0x27, 0x0a, 0x23, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55,
0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x53,
0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x19, 0x12, 0x27, 0x0a, 0x23, 0x44, 0x42, 0x47, 0x5f,
0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45,
0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x56, 0x32, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10,
0x1a, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f,
0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c,
0x10, 0x1b, 0x12, 0x1e, 0x0a, 0x1a, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x52, 0x45,
0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50,
0x10, 0x1c, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x52, 0x45,
0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x10, 0x1d, 0x12, 0x1b, 0x0a, 0x17, 0x44,
0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x52,
0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x10, 0x1e, 0x12, 0x20, 0x0a, 0x1c, 0x44, 0x42, 0x47, 0x5f,
0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x52, 0x4f, 0x4e, 0x54,
0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x1f, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x42,
0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43,
0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x10, 0x20, 0x12, 0x27, 0x0a, 0x23, 0x44,
0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41,
0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45,
0x53, 0x53, 0x10, 0x21, 0x12, 0x27, 0x0a, 0x23, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f,
0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53,
0x4c, 0x4f, 0x54, 0x5f, 0x56, 0x32, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x22, 0x12, 0x1f, 0x0a,
0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f,
0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x23, 0x12, 0x1e,
0x0a, 0x1a, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x52, 0x45, 0x56, 0x45, 0x52, 0x53,
0x45, 0x5f, 0x4e, 0x41, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x10, 0x24, 0x12, 0x17,
0x0a, 0x13, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x52, 0x45, 0x56, 0x45, 0x52, 0x53,
0x45, 0x5f, 0x4e, 0x41, 0x54, 0x10, 0x25, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x4c,
0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x50, 0x42, 0x41, 0x43, 0x4b, 0x5f, 0x53, 0x4e, 0x41, 0x54,
0x10, 0x26, 0x12, 0x1d, 0x0a, 0x19, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f,
0x4f, 0x50, 0x42, 0x41, 0x43, 0x4b, 0x5f, 0x53, 0x4e, 0x41, 0x54, 0x5f, 0x52, 0x45, 0x56, 0x10,
0x27, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b,
0x55, 0x50, 0x34, 0x10, 0x28, 0x12, 0x1b, 0x0a, 0x17, 0x44, 0x42, 0x47, 0x5f, 0x52, 0x52, 0x5f,
0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x4c,
0x10, 0x29, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x52, 0x45, 0x56, 0x5f, 0x50, 0x52,
0x4f, 0x58, 0x59, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x10, 0x2a, 0x12, 0x17, 0x0a, 0x13,
0x44, 0x42, 0x47, 0x5f, 0x52, 0x45, 0x56, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x46, 0x4f,
0x55, 0x4e, 0x44, 0x10, 0x2b, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x52, 0x45, 0x56,
0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x10, 0x2c, 0x12,
0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x34, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59,
0x10, 0x2d, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x4e, 0x45, 0x54, 0x44, 0x45, 0x56,
0x5f, 0x49, 0x4e, 0x5f, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, 0x52, 0x10, 0x2e, 0x12, 0x15, 0x0a,
0x11, 0x44, 0x42, 0x47, 0x5f, 0x4e, 0x45, 0x54, 0x44, 0x45, 0x56, 0x5f, 0x45, 0x4e, 0x43, 0x41,
0x50, 0x34, 0x10, 0x2f, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c,
0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x34, 0x5f, 0x31, 0x10, 0x30, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42,
0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x34, 0x5f, 0x32, 0x10, 0x31,
0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54,
0x45, 0x44, 0x34, 0x10, 0x32, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f,
0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x36, 0x5f, 0x31, 0x10, 0x33, 0x12, 0x14, 0x0a, 0x10, 0x44,
0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x36, 0x5f, 0x32, 0x10,
0x34, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41,
0x54, 0x45, 0x44, 0x36, 0x10, 0x35, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b,
0x49, 0x50, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x36, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42,
0x47, 0x5f, 0x4c, 0x34, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x37, 0x12, 0x19, 0x0a,
0x15, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x50, 0x5f, 0x49, 0x44, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46,
0x41, 0x49, 0x4c, 0x45, 0x44, 0x34, 0x10, 0x38, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f,
0x49, 0x50, 0x5f, 0x49, 0x44, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44,
0x36, 0x10, 0x39, 0x12, 0x1a, 0x0a, 0x16, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x50, 0x5f, 0x49, 0x44,
0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, 0x34, 0x10, 0x3a, 0x12,
0x1a, 0x0a, 0x16, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x50, 0x5f, 0x49, 0x44, 0x5f, 0x4d, 0x41, 0x50,
0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, 0x36, 0x10, 0x3b, 0x12, 0x13, 0x0a, 0x0f, 0x44,
0x42, 0x47, 0x5f, 0x4c, 0x42, 0x5f, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x5f, 0x43, 0x54, 0x10, 0x3c,
0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x4e, 0x48, 0x45, 0x52, 0x49, 0x54, 0x5f,
0x49, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x54, 0x59, 0x10, 0x3d, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42,
0x47, 0x5f, 0x53, 0x4b, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x34, 0x10, 0x3e, 0x12, 0x12,
0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x36,
0x10, 0x3f, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b, 0x5f, 0x41, 0x53, 0x53,
0x49, 0x47, 0x4e, 0x10, 0x40, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x37, 0x5f,
0x4c, 0x42, 0x10, 0x41, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b, 0x49, 0x50,
0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x10, 0x42, 0x42, 0x26, 0x5a, 0x24, 0x67, 0x69, 0x74,
0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x63,
0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x66, 0x6c, 0x6f,
0x77, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_flow_flow_proto_rawDescOnce sync.Once
file_flow_flow_proto_rawDescData = file_flow_flow_proto_rawDesc
)
func file_flow_flow_proto_rawDescGZIP() []byte {
file_flow_flow_proto_rawDescOnce.Do(func() {
file_flow_flow_proto_rawDescData = protoimpl.X.CompressGZIP(file_flow_flow_proto_rawDescData)
})
return file_flow_flow_proto_rawDescData
}
var file_flow_flow_proto_enumTypes = make([]protoimpl.EnumInfo, 15)
var file_flow_flow_proto_msgTypes = make([]protoimpl.MessageInfo, 38)
var file_flow_flow_proto_goTypes = []any{
(FlowType)(0), // 0: flow.FlowType
(AuthType)(0), // 1: flow.AuthType
(TraceObservationPoint)(0), // 2: flow.TraceObservationPoint
(TraceReason)(0), // 3: flow.TraceReason
(L7FlowType)(0), // 4: flow.L7FlowType
(IPVersion)(0), // 5: flow.IPVersion
(Verdict)(0), // 6: flow.Verdict
(DropReason)(0), // 7: flow.DropReason
(TrafficDirection)(0), // 8: flow.TrafficDirection
(DebugCapturePoint)(0), // 9: flow.DebugCapturePoint
(EventType)(0), // 10: flow.EventType
(LostEventSource)(0), // 11: flow.LostEventSource
(AgentEventType)(0), // 12: flow.AgentEventType
(SocketTranslationPoint)(0), // 13: flow.SocketTranslationPoint
(DebugEventType)(0), // 14: flow.DebugEventType
(*Flow)(nil), // 15: flow.Flow
(*Layer4)(nil), // 16: flow.Layer4
(*Layer7)(nil), // 17: flow.Layer7
(*TraceContext)(nil), // 18: flow.TraceContext
(*TraceParent)(nil), // 19: flow.TraceParent
(*Endpoint)(nil), // 20: flow.Endpoint
(*Workload)(nil), // 21: flow.Workload
(*TCP)(nil), // 22: flow.TCP
(*IP)(nil), // 23: flow.IP
(*Ethernet)(nil), // 24: flow.Ethernet
(*TCPFlags)(nil), // 25: flow.TCPFlags
(*UDP)(nil), // 26: flow.UDP
(*SCTP)(nil), // 27: flow.SCTP
(*ICMPv4)(nil), // 28: flow.ICMPv4
(*ICMPv6)(nil), // 29: flow.ICMPv6
(*Policy)(nil), // 30: flow.Policy
(*EventTypeFilter)(nil), // 31: flow.EventTypeFilter
(*CiliumEventType)(nil), // 32: flow.CiliumEventType
(*FlowFilter)(nil), // 33: flow.FlowFilter
(*DNS)(nil), // 34: flow.DNS
(*HTTPHeader)(nil), // 35: flow.HTTPHeader
(*HTTP)(nil), // 36: flow.HTTP
(*Kafka)(nil), // 37: flow.Kafka
(*Service)(nil), // 38: flow.Service
(*LostEvent)(nil), // 39: flow.LostEvent
(*AgentEvent)(nil), // 40: flow.AgentEvent
(*AgentEventUnknown)(nil), // 41: flow.AgentEventUnknown
(*TimeNotification)(nil), // 42: flow.TimeNotification
(*PolicyUpdateNotification)(nil), // 43: flow.PolicyUpdateNotification
(*EndpointRegenNotification)(nil), // 44: flow.EndpointRegenNotification
(*EndpointUpdateNotification)(nil), // 45: flow.EndpointUpdateNotification
(*IPCacheNotification)(nil), // 46: flow.IPCacheNotification
(*ServiceUpsertNotificationAddr)(nil), // 47: flow.ServiceUpsertNotificationAddr
(*ServiceUpsertNotification)(nil), // 48: flow.ServiceUpsertNotification
(*ServiceDeleteNotification)(nil), // 49: flow.ServiceDeleteNotification
(*NetworkInterface)(nil), // 50: flow.NetworkInterface
(*DebugEvent)(nil), // 51: flow.DebugEvent
(*FlowFilter_Experimental)(nil), // 52: flow.FlowFilter.Experimental
(*timestamppb.Timestamp)(nil), // 53: google.protobuf.Timestamp
(*wrapperspb.BoolValue)(nil), // 54: google.protobuf.BoolValue
(*anypb.Any)(nil), // 55: google.protobuf.Any
(*wrapperspb.Int32Value)(nil), // 56: google.protobuf.Int32Value
(*wrapperspb.UInt32Value)(nil), // 57: google.protobuf.UInt32Value
}
var file_flow_flow_proto_depIdxs = []int32{
53, // 0: flow.Flow.time:type_name -> google.protobuf.Timestamp
6, // 1: flow.Flow.verdict:type_name -> flow.Verdict
1, // 2: flow.Flow.auth_type:type_name -> flow.AuthType
24, // 3: flow.Flow.ethernet:type_name -> flow.Ethernet
23, // 4: flow.Flow.IP:type_name -> flow.IP
16, // 5: flow.Flow.l4:type_name -> flow.Layer4
20, // 6: flow.Flow.source:type_name -> flow.Endpoint
20, // 7: flow.Flow.destination:type_name -> flow.Endpoint
0, // 8: flow.Flow.Type:type_name -> flow.FlowType
17, // 9: flow.Flow.l7:type_name -> flow.Layer7
32, // 10: flow.Flow.event_type:type_name -> flow.CiliumEventType
38, // 11: flow.Flow.source_service:type_name -> flow.Service
38, // 12: flow.Flow.destination_service:type_name -> flow.Service
8, // 13: flow.Flow.traffic_direction:type_name -> flow.TrafficDirection
2, // 14: flow.Flow.trace_observation_point:type_name -> flow.TraceObservationPoint
3, // 15: flow.Flow.trace_reason:type_name -> flow.TraceReason
7, // 16: flow.Flow.drop_reason_desc:type_name -> flow.DropReason
54, // 17: flow.Flow.is_reply:type_name -> google.protobuf.BoolValue
9, // 18: flow.Flow.debug_capture_point:type_name -> flow.DebugCapturePoint
50, // 19: flow.Flow.interface:type_name -> flow.NetworkInterface
18, // 20: flow.Flow.trace_context:type_name -> flow.TraceContext
13, // 21: flow.Flow.sock_xlate_point:type_name -> flow.SocketTranslationPoint
55, // 22: flow.Flow.extensions:type_name -> google.protobuf.Any
30, // 23: flow.Flow.egress_allowed_by:type_name -> flow.Policy
30, // 24: flow.Flow.ingress_allowed_by:type_name -> flow.Policy
30, // 25: flow.Flow.egress_denied_by:type_name -> flow.Policy
30, // 26: flow.Flow.ingress_denied_by:type_name -> flow.Policy
22, // 27: flow.Layer4.TCP:type_name -> flow.TCP
26, // 28: flow.Layer4.UDP:type_name -> flow.UDP
28, // 29: flow.Layer4.ICMPv4:type_name -> flow.ICMPv4
29, // 30: flow.Layer4.ICMPv6:type_name -> flow.ICMPv6
27, // 31: flow.Layer4.SCTP:type_name -> flow.SCTP
4, // 32: flow.Layer7.type:type_name -> flow.L7FlowType
34, // 33: flow.Layer7.dns:type_name -> flow.DNS
36, // 34: flow.Layer7.http:type_name -> flow.HTTP
37, // 35: flow.Layer7.kafka:type_name -> flow.Kafka
19, // 36: flow.TraceContext.parent:type_name -> flow.TraceParent
21, // 37: flow.Endpoint.workloads:type_name -> flow.Workload
25, // 38: flow.TCP.flags:type_name -> flow.TCPFlags
5, // 39: flow.IP.ipVersion:type_name -> flow.IPVersion
21, // 40: flow.FlowFilter.source_workload:type_name -> flow.Workload
21, // 41: flow.FlowFilter.destination_workload:type_name -> flow.Workload
8, // 42: flow.FlowFilter.traffic_direction:type_name -> flow.TrafficDirection
6, // 43: flow.FlowFilter.verdict:type_name -> flow.Verdict
7, // 44: flow.FlowFilter.drop_reason_desc:type_name -> flow.DropReason
50, // 45: flow.FlowFilter.interface:type_name -> flow.NetworkInterface
31, // 46: flow.FlowFilter.event_type:type_name -> flow.EventTypeFilter
35, // 47: flow.FlowFilter.http_header:type_name -> flow.HTTPHeader
25, // 48: flow.FlowFilter.tcp_flags:type_name -> flow.TCPFlags
5, // 49: flow.FlowFilter.ip_version:type_name -> flow.IPVersion
52, // 50: flow.FlowFilter.experimental:type_name -> flow.FlowFilter.Experimental
35, // 51: flow.HTTP.headers:type_name -> flow.HTTPHeader
11, // 52: flow.LostEvent.source:type_name -> flow.LostEventSource
56, // 53: flow.LostEvent.cpu:type_name -> google.protobuf.Int32Value
12, // 54: flow.AgentEvent.type:type_name -> flow.AgentEventType
41, // 55: flow.AgentEvent.unknown:type_name -> flow.AgentEventUnknown
42, // 56: flow.AgentEvent.agent_start:type_name -> flow.TimeNotification
43, // 57: flow.AgentEvent.policy_update:type_name -> flow.PolicyUpdateNotification
44, // 58: flow.AgentEvent.endpoint_regenerate:type_name -> flow.EndpointRegenNotification
45, // 59: flow.AgentEvent.endpoint_update:type_name -> flow.EndpointUpdateNotification
46, // 60: flow.AgentEvent.ipcache_update:type_name -> flow.IPCacheNotification
48, // 61: flow.AgentEvent.service_upsert:type_name -> flow.ServiceUpsertNotification
49, // 62: flow.AgentEvent.service_delete:type_name -> flow.ServiceDeleteNotification
53, // 63: flow.TimeNotification.time:type_name -> google.protobuf.Timestamp
57, // 64: flow.IPCacheNotification.old_identity:type_name -> google.protobuf.UInt32Value
47, // 65: flow.ServiceUpsertNotification.frontend_address:type_name -> flow.ServiceUpsertNotificationAddr
47, // 66: flow.ServiceUpsertNotification.backend_addresses:type_name -> flow.ServiceUpsertNotificationAddr
14, // 67: flow.DebugEvent.type:type_name -> flow.DebugEventType
20, // 68: flow.DebugEvent.source:type_name -> flow.Endpoint
57, // 69: flow.DebugEvent.hash:type_name -> google.protobuf.UInt32Value
57, // 70: flow.DebugEvent.arg1:type_name -> google.protobuf.UInt32Value
57, // 71: flow.DebugEvent.arg2:type_name -> google.protobuf.UInt32Value
57, // 72: flow.DebugEvent.arg3:type_name -> google.protobuf.UInt32Value
56, // 73: flow.DebugEvent.cpu:type_name -> google.protobuf.Int32Value
74, // [74:74] is the sub-list for method output_type
74, // [74:74] is the sub-list for method input_type
74, // [74:74] is the sub-list for extension type_name
74, // [74:74] is the sub-list for extension extendee
0, // [0:74] is the sub-list for field type_name
}
func init() { file_flow_flow_proto_init() }
func file_flow_flow_proto_init() {
if File_flow_flow_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_flow_flow_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*Flow); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*Layer4); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*Layer7); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*TraceContext); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*TraceParent); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*Endpoint); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*Workload); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[7].Exporter = func(v any, i int) any {
switch v := v.(*TCP); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[8].Exporter = func(v any, i int) any {
switch v := v.(*IP); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[9].Exporter = func(v any, i int) any {
switch v := v.(*Ethernet); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[10].Exporter = func(v any, i int) any {
switch v := v.(*TCPFlags); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[11].Exporter = func(v any, i int) any {
switch v := v.(*UDP); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[12].Exporter = func(v any, i int) any {
switch v := v.(*SCTP); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[13].Exporter = func(v any, i int) any {
switch v := v.(*ICMPv4); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[14].Exporter = func(v any, i int) any {
switch v := v.(*ICMPv6); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[15].Exporter = func(v any, i int) any {
switch v := v.(*Policy); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[16].Exporter = func(v any, i int) any {
switch v := v.(*EventTypeFilter); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[17].Exporter = func(v any, i int) any {
switch v := v.(*CiliumEventType); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[18].Exporter = func(v any, i int) any {
switch v := v.(*FlowFilter); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[19].Exporter = func(v any, i int) any {
switch v := v.(*DNS); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[20].Exporter = func(v any, i int) any {
switch v := v.(*HTTPHeader); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[21].Exporter = func(v any, i int) any {
switch v := v.(*HTTP); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[22].Exporter = func(v any, i int) any {
switch v := v.(*Kafka); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[23].Exporter = func(v any, i int) any {
switch v := v.(*Service); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[24].Exporter = func(v any, i int) any {
switch v := v.(*LostEvent); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[25].Exporter = func(v any, i int) any {
switch v := v.(*AgentEvent); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[26].Exporter = func(v any, i int) any {
switch v := v.(*AgentEventUnknown); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[27].Exporter = func(v any, i int) any {
switch v := v.(*TimeNotification); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[28].Exporter = func(v any, i int) any {
switch v := v.(*PolicyUpdateNotification); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[29].Exporter = func(v any, i int) any {
switch v := v.(*EndpointRegenNotification); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[30].Exporter = func(v any, i int) any {
switch v := v.(*EndpointUpdateNotification); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[31].Exporter = func(v any, i int) any {
switch v := v.(*IPCacheNotification); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[32].Exporter = func(v any, i int) any {
switch v := v.(*ServiceUpsertNotificationAddr); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[33].Exporter = func(v any, i int) any {
switch v := v.(*ServiceUpsertNotification); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[34].Exporter = func(v any, i int) any {
switch v := v.(*ServiceDeleteNotification); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[35].Exporter = func(v any, i int) any {
switch v := v.(*NetworkInterface); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[36].Exporter = func(v any, i int) any {
switch v := v.(*DebugEvent); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_flow_flow_proto_msgTypes[37].Exporter = func(v any, i int) any {
switch v := v.(*FlowFilter_Experimental); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
file_flow_flow_proto_msgTypes[1].OneofWrappers = []any{
(*Layer4_TCP)(nil),
(*Layer4_UDP)(nil),
(*Layer4_ICMPv4)(nil),
(*Layer4_ICMPv6)(nil),
(*Layer4_SCTP)(nil),
}
file_flow_flow_proto_msgTypes[2].OneofWrappers = []any{
(*Layer7_Dns)(nil),
(*Layer7_Http)(nil),
(*Layer7_Kafka)(nil),
}
file_flow_flow_proto_msgTypes[25].OneofWrappers = []any{
(*AgentEvent_Unknown)(nil),
(*AgentEvent_AgentStart)(nil),
(*AgentEvent_PolicyUpdate)(nil),
(*AgentEvent_EndpointRegenerate)(nil),
(*AgentEvent_EndpointUpdate)(nil),
(*AgentEvent_IpcacheUpdate)(nil),
(*AgentEvent_ServiceUpsert)(nil),
(*AgentEvent_ServiceDelete)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_flow_flow_proto_rawDesc,
NumEnums: 15,
NumMessages: 38,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_flow_flow_proto_goTypes,
DependencyIndexes: file_flow_flow_proto_depIdxs,
EnumInfos: file_flow_flow_proto_enumTypes,
MessageInfos: file_flow_flow_proto_msgTypes,
}.Build()
File_flow_flow_proto = out.File
file_flow_flow_proto_rawDesc = nil
file_flow_flow_proto_goTypes = nil
file_flow_flow_proto_depIdxs = nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
// Code generated by protoc-gen-go-json. DO NOT EDIT.
// source: flow/flow.proto
package flow
import (
"google.golang.org/protobuf/encoding/protojson"
)
// MarshalJSON implements json.Marshaler
func (msg *Flow) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *Flow) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *Layer4) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *Layer4) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *Layer7) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *Layer7) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *TraceContext) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *TraceContext) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *TraceParent) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *TraceParent) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *Endpoint) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *Endpoint) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *Workload) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *Workload) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *TCP) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *TCP) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *IP) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *IP) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *Ethernet) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *Ethernet) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *TCPFlags) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *TCPFlags) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *UDP) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *UDP) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *SCTP) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *SCTP) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *ICMPv4) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *ICMPv4) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *ICMPv6) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *ICMPv6) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *Policy) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *Policy) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *EventTypeFilter) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *EventTypeFilter) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *CiliumEventType) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *CiliumEventType) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *FlowFilter) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *FlowFilter) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *FlowFilter_Experimental) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *FlowFilter_Experimental) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *DNS) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *DNS) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *HTTPHeader) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *HTTPHeader) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *HTTP) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *HTTP) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *Kafka) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *Kafka) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *Service) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *Service) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *LostEvent) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *LostEvent) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *AgentEvent) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *AgentEvent) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *AgentEventUnknown) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *AgentEventUnknown) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *TimeNotification) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *TimeNotification) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *PolicyUpdateNotification) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *PolicyUpdateNotification) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *EndpointRegenNotification) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *EndpointRegenNotification) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *EndpointUpdateNotification) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *EndpointUpdateNotification) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *IPCacheNotification) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *IPCacheNotification) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *ServiceUpsertNotificationAddr) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *ServiceUpsertNotificationAddr) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *ServiceUpsertNotification) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *ServiceUpsertNotification) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *ServiceDeleteNotification) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *ServiceDeleteNotification) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *NetworkInterface) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *NetworkInterface) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *DebugEvent) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *DebugEvent) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
)
// Address IP address
//
// swagger:model Address
type Address string
// Validate validates this address
func (m Address) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this address based on context it is used
func (m Address) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// AddressPair Addressing information of an endpoint
//
// swagger:model AddressPair
type AddressPair struct {
// IPv4 address
IPV4 string `json:"ipv4,omitempty"`
// UUID of IPv4 expiration timer
IPV4ExpirationUUID string `json:"ipv4-expiration-uuid,omitempty"`
// IPAM pool from which this IPv4 address was allocated
IPV4PoolName string `json:"ipv4-pool-name,omitempty"`
// IPv6 address
IPV6 string `json:"ipv6,omitempty"`
// UUID of IPv6 expiration timer
IPV6ExpirationUUID string `json:"ipv6-expiration-uuid,omitempty"`
// IPAM pool from which this IPv6 address was allocated
IPV6PoolName string `json:"ipv6-pool-name,omitempty"`
}
// Validate validates this address pair
func (m *AddressPair) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this address pair based on context it is used
func (m *AddressPair) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *AddressPair) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *AddressPair) UnmarshalBinary(b []byte) error {
var res AddressPair
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
)
// AllocationMap Map of allocated IPs
//
// swagger:model AllocationMap
type AllocationMap map[string]string
// Validate validates this allocation map
func (m AllocationMap) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this allocation map based on context it is used
func (m AllocationMap) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/validate"
)
// AttachMode Core datapath attachment mode
//
// swagger:model AttachMode
type AttachMode string
func NewAttachMode(value AttachMode) *AttachMode {
return &value
}
// Pointer returns a pointer to a freshly-allocated AttachMode.
func (m AttachMode) Pointer() *AttachMode {
return &m
}
const (
// AttachModeTc captures enum value "tc"
AttachModeTc AttachMode = "tc"
// AttachModeTcx captures enum value "tcx"
AttachModeTcx AttachMode = "tcx"
)
// for schema
var attachModeEnum []interface{}
func init() {
var res []AttachMode
if err := json.Unmarshal([]byte(`["tc","tcx"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
attachModeEnum = append(attachModeEnum, v)
}
}
func (m AttachMode) validateAttachModeEnum(path, location string, value AttachMode) error {
if err := validate.EnumCase(path, location, value, attachModeEnum, true); err != nil {
return err
}
return nil
}
// Validate validates this attach mode
func (m AttachMode) Validate(formats strfmt.Registry) error {
var res []error
// value enum
if err := m.validateAttachModeEnum("", "body", m); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validates this attach mode based on context it is used
func (m AttachMode) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BPFMap BPF map definition and content
//
// swagger:model BPFMap
type BPFMap struct {
// Contents of cache
Cache []*BPFMapEntry `json:"cache"`
// Path to BPF map
Path string `json:"path,omitempty"`
}
// Validate validates this b p f map
func (m *BPFMap) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCache(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BPFMap) validateCache(formats strfmt.Registry) error {
if swag.IsZero(m.Cache) { // not required
return nil
}
for i := 0; i < len(m.Cache); i++ {
if swag.IsZero(m.Cache[i]) { // not required
continue
}
if m.Cache[i] != nil {
if err := m.Cache[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cache" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cache" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this b p f map based on the context it is used
func (m *BPFMap) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateCache(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BPFMap) contextValidateCache(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Cache); i++ {
if m.Cache[i] != nil {
if swag.IsZero(m.Cache[i]) { // not required
return nil
}
if err := m.Cache[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cache" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cache" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *BPFMap) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BPFMap) UnmarshalBinary(b []byte) error {
var res BPFMap
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// BPFMapEntry BPF map cache entry
//
// swagger:model BPFMapEntry
type BPFMapEntry struct {
// Desired action to be performed
// Enum: [ok insert delete]
DesiredAction string `json:"desired-action,omitempty"`
// Key of map entry
Key string `json:"key,omitempty"`
// Last error seen while performing desired action
LastError string `json:"last-error,omitempty"`
// Value of map entry
Value string `json:"value,omitempty"`
}
// Validate validates this b p f map entry
func (m *BPFMapEntry) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateDesiredAction(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var bPFMapEntryTypeDesiredActionPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["ok","insert","delete"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
bPFMapEntryTypeDesiredActionPropEnum = append(bPFMapEntryTypeDesiredActionPropEnum, v)
}
}
const (
// BPFMapEntryDesiredActionOk captures enum value "ok"
BPFMapEntryDesiredActionOk string = "ok"
// BPFMapEntryDesiredActionInsert captures enum value "insert"
BPFMapEntryDesiredActionInsert string = "insert"
// BPFMapEntryDesiredActionDelete captures enum value "delete"
BPFMapEntryDesiredActionDelete string = "delete"
)
// prop value enum
func (m *BPFMapEntry) validateDesiredActionEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, bPFMapEntryTypeDesiredActionPropEnum, true); err != nil {
return err
}
return nil
}
func (m *BPFMapEntry) validateDesiredAction(formats strfmt.Registry) error {
if swag.IsZero(m.DesiredAction) { // not required
return nil
}
// value enum
if err := m.validateDesiredActionEnum("desired-action", "body", m.DesiredAction); err != nil {
return err
}
return nil
}
// ContextValidate validates this b p f map entry based on context it is used
func (m *BPFMapEntry) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BPFMapEntry) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BPFMapEntry) UnmarshalBinary(b []byte) error {
var res BPFMapEntry
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BPFMapList List of BPF Maps
//
// swagger:model BPFMapList
type BPFMapList struct {
// Array of open BPF map lists
Maps []*BPFMap `json:"maps"`
}
// Validate validates this b p f map list
func (m *BPFMapList) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateMaps(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BPFMapList) validateMaps(formats strfmt.Registry) error {
if swag.IsZero(m.Maps) { // not required
return nil
}
for i := 0; i < len(m.Maps); i++ {
if swag.IsZero(m.Maps[i]) { // not required
continue
}
if m.Maps[i] != nil {
if err := m.Maps[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("maps" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("maps" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this b p f map list based on the context it is used
func (m *BPFMapList) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateMaps(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BPFMapList) contextValidateMaps(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Maps); i++ {
if m.Maps[i] != nil {
if swag.IsZero(m.Maps[i]) { // not required
return nil
}
if err := m.Maps[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("maps" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("maps" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *BPFMapList) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BPFMapList) UnmarshalBinary(b []byte) error {
var res BPFMapList
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BPFMapProperties BPF map properties
//
// swagger:model BPFMapProperties
type BPFMapProperties struct {
// Name of the BPF map
Name string `json:"name,omitempty"`
// Size of the BPF map
Size int64 `json:"size,omitempty"`
}
// Validate validates this b p f map properties
func (m *BPFMapProperties) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this b p f map properties based on context it is used
func (m *BPFMapProperties) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BPFMapProperties) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BPFMapProperties) UnmarshalBinary(b []byte) error {
var res BPFMapProperties
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BPFMapStatus BPF map status
//
// +k8s:deepcopy-gen=true
//
// swagger:model BPFMapStatus
type BPFMapStatus struct {
// Ratio of total system memory to use for dynamic sizing of BPF maps
DynamicSizeRatio float64 `json:"dynamic-size-ratio,omitempty"`
// BPF maps
Maps []*BPFMapProperties `json:"maps"`
}
// Validate validates this b p f map status
func (m *BPFMapStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateMaps(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BPFMapStatus) validateMaps(formats strfmt.Registry) error {
if swag.IsZero(m.Maps) { // not required
return nil
}
for i := 0; i < len(m.Maps); i++ {
if swag.IsZero(m.Maps[i]) { // not required
continue
}
if m.Maps[i] != nil {
if err := m.Maps[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("maps" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("maps" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this b p f map status based on the context it is used
func (m *BPFMapStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateMaps(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BPFMapStatus) contextValidateMaps(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Maps); i++ {
if m.Maps[i] != nil {
if swag.IsZero(m.Maps[i]) { // not required
return nil
}
if err := m.Maps[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("maps" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("maps" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *BPFMapStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BPFMapStatus) UnmarshalBinary(b []byte) error {
var res BPFMapStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// BackendAddress Service backend address
//
// swagger:model BackendAddress
type BackendAddress struct {
// Layer 3 address
// Required: true
IP *string `json:"ip"`
// Optional name of the node on which this backend runs
NodeName string `json:"nodeName,omitempty"`
// Layer 4 port number
Port uint16 `json:"port,omitempty"`
// Indicator if this backend is preferred in the context of clustermesh service affinity. The value is set based
// on related annotation of global service. Applicable for active state only.
Preferred bool `json:"preferred,omitempty"`
// State of the backend for load-balancing service traffic
// Enum: [active terminating quarantined maintenance]
State string `json:"state,omitempty"`
// Backend weight
Weight *uint16 `json:"weight,omitempty"`
// Optional name of the zone in which this backend runs
Zone string `json:"zone,omitempty"`
}
// Validate validates this backend address
func (m *BackendAddress) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateIP(formats); err != nil {
res = append(res, err)
}
if err := m.validateState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BackendAddress) validateIP(formats strfmt.Registry) error {
if err := validate.Required("ip", "body", m.IP); err != nil {
return err
}
return nil
}
var backendAddressTypeStatePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["active","terminating","quarantined","maintenance"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
backendAddressTypeStatePropEnum = append(backendAddressTypeStatePropEnum, v)
}
}
const (
// BackendAddressStateActive captures enum value "active"
BackendAddressStateActive string = "active"
// BackendAddressStateTerminating captures enum value "terminating"
BackendAddressStateTerminating string = "terminating"
// BackendAddressStateQuarantined captures enum value "quarantined"
BackendAddressStateQuarantined string = "quarantined"
// BackendAddressStateMaintenance captures enum value "maintenance"
BackendAddressStateMaintenance string = "maintenance"
)
// prop value enum
func (m *BackendAddress) validateStateEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, backendAddressTypeStatePropEnum, true); err != nil {
return err
}
return nil
}
func (m *BackendAddress) validateState(formats strfmt.Registry) error {
if swag.IsZero(m.State) { // not required
return nil
}
// value enum
if err := m.validateStateEnum("state", "body", m.State); err != nil {
return err
}
return nil
}
// ContextValidate validates this backend address based on context it is used
func (m *BackendAddress) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BackendAddress) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BackendAddress) UnmarshalBinary(b []byte) error {
var res BackendAddress
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// BandwidthManager Status of bandwidth manager
//
// +k8s:deepcopy-gen=true
//
// swagger:model BandwidthManager
type BandwidthManager struct {
// congestion control
// Enum: [cubic bbr]
CongestionControl string `json:"congestionControl,omitempty"`
// devices
Devices []string `json:"devices"`
// Is bandwidth manager enabled
Enabled bool `json:"enabled,omitempty"`
}
// Validate validates this bandwidth manager
func (m *BandwidthManager) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCongestionControl(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var bandwidthManagerTypeCongestionControlPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["cubic","bbr"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
bandwidthManagerTypeCongestionControlPropEnum = append(bandwidthManagerTypeCongestionControlPropEnum, v)
}
}
const (
// BandwidthManagerCongestionControlCubic captures enum value "cubic"
BandwidthManagerCongestionControlCubic string = "cubic"
// BandwidthManagerCongestionControlBbr captures enum value "bbr"
BandwidthManagerCongestionControlBbr string = "bbr"
)
// prop value enum
func (m *BandwidthManager) validateCongestionControlEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, bandwidthManagerTypeCongestionControlPropEnum, true); err != nil {
return err
}
return nil
}
func (m *BandwidthManager) validateCongestionControl(formats strfmt.Registry) error {
if swag.IsZero(m.CongestionControl) { // not required
return nil
}
// value enum
if err := m.validateCongestionControlEnum("congestionControl", "body", m.CongestionControl); err != nil {
return err
}
return nil
}
// ContextValidate validates this bandwidth manager based on context it is used
func (m *BandwidthManager) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BandwidthManager) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BandwidthManager) UnmarshalBinary(b []byte) error {
var res BandwidthManager
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpFamily Address Family Indicator (AFI) and Subsequent Address Family Indicator (SAFI) of the path
//
// swagger:model BgpFamily
type BgpFamily struct {
// Address Family Indicator (AFI) of the path
Afi string `json:"afi,omitempty"`
// Subsequent Address Family Indicator (SAFI) of the path
Safi string `json:"safi,omitempty"`
}
// Validate validates this bgp family
func (m *BgpFamily) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this bgp family based on context it is used
func (m *BgpFamily) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BgpFamily) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpFamily) UnmarshalBinary(b []byte) error {
var res BgpFamily
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpGracefulRestart BGP graceful restart parameters negotiated with the peer.
//
// +k8s:deepcopy-gen=true
//
// swagger:model BgpGracefulRestart
type BgpGracefulRestart struct {
// When set, graceful restart capability is negotiated for all AFI/SAFIs of
// this peer.
Enabled bool `json:"enabled,omitempty"`
// This is the time advertised to peer for the BGP session to be re-established
// after a restart. After this period, peer will remove stale routes.
// (RFC 4724 section 4.2)
RestartTimeSeconds int64 `json:"restart-time-seconds,omitempty"`
}
// Validate validates this bgp graceful restart
func (m *BgpGracefulRestart) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this bgp graceful restart based on context it is used
func (m *BgpGracefulRestart) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BgpGracefulRestart) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpGracefulRestart) UnmarshalBinary(b []byte) error {
var res BgpGracefulRestart
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpNlri Network Layer Reachability Information (NLRI) of the path
//
// swagger:model BgpNlri
type BgpNlri struct {
// Base64-encoded NLRI in the BGP UPDATE message format
Base64 string `json:"base64,omitempty"`
}
// Validate validates this bgp nlri
func (m *BgpNlri) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this bgp nlri based on context it is used
func (m *BgpNlri) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BgpNlri) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpNlri) UnmarshalBinary(b []byte) error {
var res BgpNlri
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpPath Single BGP routing Path containing BGP Network Layer Reachability Information (NLRI) and path attributes
//
// swagger:model BgpPath
type BgpPath struct {
// Age of the path (time since its creation) in nanoseconds
AgeNanoseconds int64 `json:"age-nanoseconds,omitempty"`
// True value flags the best path towards the destination prefix
Best bool `json:"best,omitempty"`
// Address Family Indicator (AFI) and Subsequent Address Family Indicator (SAFI) of the path
Family *BgpFamily `json:"family,omitempty"`
// Network Layer Reachability Information of the path
Nlri *BgpNlri `json:"nlri,omitempty"`
// List of BGP path attributes specific for the path
PathAttributes []*BgpPathAttribute `json:"path-attributes"`
// True value marks the path as stale
Stale bool `json:"stale,omitempty"`
}
// Validate validates this bgp path
func (m *BgpPath) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateFamily(formats); err != nil {
res = append(res, err)
}
if err := m.validateNlri(formats); err != nil {
res = append(res, err)
}
if err := m.validatePathAttributes(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpPath) validateFamily(formats strfmt.Registry) error {
if swag.IsZero(m.Family) { // not required
return nil
}
if m.Family != nil {
if err := m.Family.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("family")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("family")
}
return err
}
}
return nil
}
func (m *BgpPath) validateNlri(formats strfmt.Registry) error {
if swag.IsZero(m.Nlri) { // not required
return nil
}
if m.Nlri != nil {
if err := m.Nlri.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nlri")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nlri")
}
return err
}
}
return nil
}
func (m *BgpPath) validatePathAttributes(formats strfmt.Registry) error {
if swag.IsZero(m.PathAttributes) { // not required
return nil
}
for i := 0; i < len(m.PathAttributes); i++ {
if swag.IsZero(m.PathAttributes[i]) { // not required
continue
}
if m.PathAttributes[i] != nil {
if err := m.PathAttributes[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("path-attributes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("path-attributes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this bgp path based on the context it is used
func (m *BgpPath) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateFamily(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNlri(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidatePathAttributes(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpPath) contextValidateFamily(ctx context.Context, formats strfmt.Registry) error {
if m.Family != nil {
if swag.IsZero(m.Family) { // not required
return nil
}
if err := m.Family.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("family")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("family")
}
return err
}
}
return nil
}
func (m *BgpPath) contextValidateNlri(ctx context.Context, formats strfmt.Registry) error {
if m.Nlri != nil {
if swag.IsZero(m.Nlri) { // not required
return nil
}
if err := m.Nlri.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nlri")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nlri")
}
return err
}
}
return nil
}
func (m *BgpPath) contextValidatePathAttributes(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.PathAttributes); i++ {
if m.PathAttributes[i] != nil {
if swag.IsZero(m.PathAttributes[i]) { // not required
return nil
}
if err := m.PathAttributes[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("path-attributes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("path-attributes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *BgpPath) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpPath) UnmarshalBinary(b []byte) error {
var res BgpPath
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpPathAttribute Single BGP path attribute specific for the path
//
// swagger:model BgpPathAttribute
type BgpPathAttribute struct {
// Base64-encoded BGP path attribute in the BGP UPDATE message format
Base64 string `json:"base64,omitempty"`
}
// Validate validates this bgp path attribute
func (m *BgpPathAttribute) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this bgp path attribute based on context it is used
func (m *BgpPathAttribute) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BgpPathAttribute) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpPathAttribute) UnmarshalBinary(b []byte) error {
var res BgpPathAttribute
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// BgpPeer State of a BGP Peer
//
// +k8s:deepcopy-gen=true
//
// swagger:model BgpPeer
type BgpPeer struct {
// Applied initial value for the BGP HoldTimer (RFC 4271, Section 4.2) in seconds.
// The applied value holds the value that is in effect on the current BGP session.
//
AppliedHoldTimeSeconds int64 `json:"applied-hold-time-seconds,omitempty"`
// Applied initial value for the BGP KeepaliveTimer (RFC 4271, Section 8) in seconds.
// The applied value holds the value that is in effect on the current BGP session.
//
AppliedKeepAliveTimeSeconds int64 `json:"applied-keep-alive-time-seconds,omitempty"`
// Configured initial value for the BGP HoldTimer (RFC 4271, Section 4.2) in seconds.
// The configured value will be used for negotiation with the peer during the BGP session establishment.
//
ConfiguredHoldTimeSeconds int64 `json:"configured-hold-time-seconds,omitempty"`
// Configured initial value for the BGP KeepaliveTimer (RFC 4271, Section 8) in seconds.
// The applied value may be different than the configured value, as it depends on the negotiated hold time interval.
//
ConfiguredKeepAliveTimeSeconds int64 `json:"configured-keep-alive-time-seconds,omitempty"`
// Initial value for the BGP ConnectRetryTimer (RFC 4271, Section 8) in seconds
ConnectRetryTimeSeconds int64 `json:"connect-retry-time-seconds,omitempty"`
// Time To Live (TTL) value used in BGP packets sent to the eBGP neighbor.
// 1 implies that eBGP multi-hop feature is disabled (only a single hop is allowed).
//
EbgpMultihopTTL int64 `json:"ebgp-multihop-ttl,omitempty"`
// BGP peer address family state
Families []*BgpPeerFamilies `json:"families"`
// Graceful restart capability
GracefulRestart *BgpGracefulRestart `json:"graceful-restart,omitempty"`
// Local AS Number
LocalAsn int64 `json:"local-asn,omitempty"`
// IP Address of peer
PeerAddress string `json:"peer-address,omitempty"`
// Peer AS Number
PeerAsn int64 `json:"peer-asn,omitempty"`
// TCP port number of peer
// Maximum: 65535
// Minimum: 1
PeerPort int64 `json:"peer-port,omitempty"`
// BGP peer operational state as described here
// https://www.rfc-editor.org/rfc/rfc4271#section-8.2.2
//
SessionState string `json:"session-state,omitempty"`
// Set when a TCP password is configured for communications with this peer
TCPPasswordEnabled bool `json:"tcp-password-enabled,omitempty"`
// BGP peer connection uptime in nano seconds.
UptimeNanoseconds int64 `json:"uptime-nanoseconds,omitempty"`
}
// Validate validates this bgp peer
func (m *BgpPeer) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateFamilies(formats); err != nil {
res = append(res, err)
}
if err := m.validateGracefulRestart(formats); err != nil {
res = append(res, err)
}
if err := m.validatePeerPort(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpPeer) validateFamilies(formats strfmt.Registry) error {
if swag.IsZero(m.Families) { // not required
return nil
}
for i := 0; i < len(m.Families); i++ {
if swag.IsZero(m.Families[i]) { // not required
continue
}
if m.Families[i] != nil {
if err := m.Families[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("families" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("families" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *BgpPeer) validateGracefulRestart(formats strfmt.Registry) error {
if swag.IsZero(m.GracefulRestart) { // not required
return nil
}
if m.GracefulRestart != nil {
if err := m.GracefulRestart.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("graceful-restart")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("graceful-restart")
}
return err
}
}
return nil
}
func (m *BgpPeer) validatePeerPort(formats strfmt.Registry) error {
if swag.IsZero(m.PeerPort) { // not required
return nil
}
if err := validate.MinimumInt("peer-port", "body", m.PeerPort, 1, false); err != nil {
return err
}
if err := validate.MaximumInt("peer-port", "body", m.PeerPort, 65535, false); err != nil {
return err
}
return nil
}
// ContextValidate validate this bgp peer based on the context it is used
func (m *BgpPeer) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateFamilies(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateGracefulRestart(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpPeer) contextValidateFamilies(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Families); i++ {
if m.Families[i] != nil {
if swag.IsZero(m.Families[i]) { // not required
return nil
}
if err := m.Families[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("families" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("families" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *BgpPeer) contextValidateGracefulRestart(ctx context.Context, formats strfmt.Registry) error {
if m.GracefulRestart != nil {
if swag.IsZero(m.GracefulRestart) { // not required
return nil
}
if err := m.GracefulRestart.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("graceful-restart")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("graceful-restart")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *BgpPeer) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpPeer) UnmarshalBinary(b []byte) error {
var res BgpPeer
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpPeerFamilies BGP AFI SAFI state of the peer
//
// +k8s:deepcopy-gen=true
//
// swagger:model BgpPeerFamilies
type BgpPeerFamilies struct {
// Number of routes accepted from the peer of this address family
Accepted int64 `json:"accepted,omitempty"`
// Number of routes advertised of this address family to the peer
Advertised int64 `json:"advertised,omitempty"`
// BGP address family indicator
Afi string `json:"afi,omitempty"`
// Number of routes received from the peer of this address family
Received int64 `json:"received,omitempty"`
// BGP subsequent address family indicator
Safi string `json:"safi,omitempty"`
}
// Validate validates this bgp peer families
func (m *BgpPeerFamilies) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this bgp peer families based on context it is used
func (m *BgpPeerFamilies) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BgpPeerFamilies) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpPeerFamilies) UnmarshalBinary(b []byte) error {
var res BgpPeerFamilies
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpRoute Single BGP route retrieved from the RIB of underlying router
//
// swagger:model BgpRoute
type BgpRoute struct {
// IP address specifying a BGP neighbor if the source table type is adj-rib-in or adj-rib-out
Neighbor string `json:"neighbor,omitempty"`
// List of routing paths leading towards the prefix
Paths []*BgpPath `json:"paths"`
// IP prefix of the route
Prefix string `json:"prefix,omitempty"`
// Autonomous System Number (ASN) identifying a BGP virtual router instance
RouterAsn int64 `json:"router-asn,omitempty"`
}
// Validate validates this bgp route
func (m *BgpRoute) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validatePaths(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpRoute) validatePaths(formats strfmt.Registry) error {
if swag.IsZero(m.Paths) { // not required
return nil
}
for i := 0; i < len(m.Paths); i++ {
if swag.IsZero(m.Paths[i]) { // not required
continue
}
if m.Paths[i] != nil {
if err := m.Paths[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("paths" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("paths" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this bgp route based on the context it is used
func (m *BgpRoute) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidatePaths(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpRoute) contextValidatePaths(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Paths); i++ {
if m.Paths[i] != nil {
if swag.IsZero(m.Paths[i]) { // not required
return nil
}
if err := m.Paths[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("paths" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("paths" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *BgpRoute) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpRoute) UnmarshalBinary(b []byte) error {
var res BgpRoute
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// BgpRoutePolicy Single BGP route policy retrieved from the underlying router
//
// swagger:model BgpRoutePolicy
type BgpRoutePolicy struct {
// Name of the route policy
Name string `json:"name,omitempty"`
// Autonomous System Number (ASN) identifying a BGP virtual router instance
RouterAsn int64 `json:"router-asn,omitempty"`
// List of the route policy statements
Statements []*BgpRoutePolicyStatement `json:"statements"`
// Type of the route policy
// Enum: [export import]
Type string `json:"type,omitempty"`
}
// Validate validates this bgp route policy
func (m *BgpRoutePolicy) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateStatements(formats); err != nil {
res = append(res, err)
}
if err := m.validateType(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpRoutePolicy) validateStatements(formats strfmt.Registry) error {
if swag.IsZero(m.Statements) { // not required
return nil
}
for i := 0; i < len(m.Statements); i++ {
if swag.IsZero(m.Statements[i]) { // not required
continue
}
if m.Statements[i] != nil {
if err := m.Statements[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("statements" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("statements" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
var bgpRoutePolicyTypeTypePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["export","import"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
bgpRoutePolicyTypeTypePropEnum = append(bgpRoutePolicyTypeTypePropEnum, v)
}
}
const (
// BgpRoutePolicyTypeExport captures enum value "export"
BgpRoutePolicyTypeExport string = "export"
// BgpRoutePolicyTypeImport captures enum value "import"
BgpRoutePolicyTypeImport string = "import"
)
// prop value enum
func (m *BgpRoutePolicy) validateTypeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, bgpRoutePolicyTypeTypePropEnum, true); err != nil {
return err
}
return nil
}
func (m *BgpRoutePolicy) validateType(formats strfmt.Registry) error {
if swag.IsZero(m.Type) { // not required
return nil
}
// value enum
if err := m.validateTypeEnum("type", "body", m.Type); err != nil {
return err
}
return nil
}
// ContextValidate validate this bgp route policy based on the context it is used
func (m *BgpRoutePolicy) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateStatements(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpRoutePolicy) contextValidateStatements(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Statements); i++ {
if m.Statements[i] != nil {
if swag.IsZero(m.Statements[i]) { // not required
return nil
}
if err := m.Statements[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("statements" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("statements" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *BgpRoutePolicy) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpRoutePolicy) UnmarshalBinary(b []byte) error {
var res BgpRoutePolicy
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpRoutePolicyPrefixMatch Matches a CIDR prefix in a BGP route policy
//
// swagger:model BgpRoutePolicyPrefixMatch
type BgpRoutePolicyPrefixMatch struct {
// CIDR prefix to match with
Cidr string `json:"cidr,omitempty"`
// Maximal prefix length that will match if it falls under CIDR
PrefixLenMax int64 `json:"prefix-len-max,omitempty"`
// Minimal prefix length that will match if it falls under CIDR
PrefixLenMin int64 `json:"prefix-len-min,omitempty"`
}
// Validate validates this bgp route policy prefix match
func (m *BgpRoutePolicyPrefixMatch) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this bgp route policy prefix match based on context it is used
func (m *BgpRoutePolicyPrefixMatch) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BgpRoutePolicyPrefixMatch) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpRoutePolicyPrefixMatch) UnmarshalBinary(b []byte) error {
var res BgpRoutePolicyPrefixMatch
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// BgpRoutePolicyStatement Single BGP route policy statement
//
// swagger:model BgpRoutePolicyStatement
type BgpRoutePolicyStatement struct {
// List of BGP standard community values to be added to the matched route
AddCommunities []string `json:"add-communities"`
// List of BGP large community values to be added to the matched route
AddLargeCommunities []string `json:"add-large-communities"`
// Matches any of the provided BGP neighbor IP addresses. If empty matches all neighbors.
MatchNeighbors []string `json:"match-neighbors"`
// Matches any of the provided prefixes. If empty matches all prefixes.
MatchPrefixes []*BgpRoutePolicyPrefixMatch `json:"match-prefixes"`
// RIB processing action taken on the matched route
// Enum: [none accept reject]
RouteAction string `json:"route-action,omitempty"`
// BGP local preference value to be set on the matched route
SetLocalPreference int64 `json:"set-local-preference,omitempty"`
}
// Validate validates this bgp route policy statement
func (m *BgpRoutePolicyStatement) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateMatchPrefixes(formats); err != nil {
res = append(res, err)
}
if err := m.validateRouteAction(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpRoutePolicyStatement) validateMatchPrefixes(formats strfmt.Registry) error {
if swag.IsZero(m.MatchPrefixes) { // not required
return nil
}
for i := 0; i < len(m.MatchPrefixes); i++ {
if swag.IsZero(m.MatchPrefixes[i]) { // not required
continue
}
if m.MatchPrefixes[i] != nil {
if err := m.MatchPrefixes[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("match-prefixes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("match-prefixes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
var bgpRoutePolicyStatementTypeRouteActionPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["none","accept","reject"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
bgpRoutePolicyStatementTypeRouteActionPropEnum = append(bgpRoutePolicyStatementTypeRouteActionPropEnum, v)
}
}
const (
// BgpRoutePolicyStatementRouteActionNone captures enum value "none"
BgpRoutePolicyStatementRouteActionNone string = "none"
// BgpRoutePolicyStatementRouteActionAccept captures enum value "accept"
BgpRoutePolicyStatementRouteActionAccept string = "accept"
// BgpRoutePolicyStatementRouteActionReject captures enum value "reject"
BgpRoutePolicyStatementRouteActionReject string = "reject"
)
// prop value enum
func (m *BgpRoutePolicyStatement) validateRouteActionEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, bgpRoutePolicyStatementTypeRouteActionPropEnum, true); err != nil {
return err
}
return nil
}
func (m *BgpRoutePolicyStatement) validateRouteAction(formats strfmt.Registry) error {
if swag.IsZero(m.RouteAction) { // not required
return nil
}
// value enum
if err := m.validateRouteActionEnum("route-action", "body", m.RouteAction); err != nil {
return err
}
return nil
}
// ContextValidate validate this bgp route policy statement based on the context it is used
func (m *BgpRoutePolicyStatement) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateMatchPrefixes(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpRoutePolicyStatement) contextValidateMatchPrefixes(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.MatchPrefixes); i++ {
if m.MatchPrefixes[i] != nil {
if swag.IsZero(m.MatchPrefixes[i]) { // not required
return nil
}
if err := m.MatchPrefixes[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("match-prefixes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("match-prefixes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *BgpRoutePolicyStatement) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpRoutePolicyStatement) UnmarshalBinary(b []byte) error {
var res BgpRoutePolicyStatement
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// CIDRList List of CIDRs
//
// swagger:model CIDRList
type CIDRList struct {
// list
List []string `json:"list"`
// revision
Revision int64 `json:"revision,omitempty"`
}
// Validate validates this c ID r list
func (m *CIDRList) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this c ID r list based on context it is used
func (m *CIDRList) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *CIDRList) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *CIDRList) UnmarshalBinary(b []byte) error {
var res CIDRList
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// CIDRPolicy CIDR endpoint policy
//
// +k8s:deepcopy-gen=true
//
// swagger:model CIDRPolicy
type CIDRPolicy struct {
// List of CIDR egress rules
Egress []*PolicyRule `json:"egress"`
// List of CIDR ingress rules
Ingress []*PolicyRule `json:"ingress"`
}
// Validate validates this c ID r policy
func (m *CIDRPolicy) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateEgress(formats); err != nil {
res = append(res, err)
}
if err := m.validateIngress(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *CIDRPolicy) validateEgress(formats strfmt.Registry) error {
if swag.IsZero(m.Egress) { // not required
return nil
}
for i := 0; i < len(m.Egress); i++ {
if swag.IsZero(m.Egress[i]) { // not required
continue
}
if m.Egress[i] != nil {
if err := m.Egress[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("egress" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("egress" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *CIDRPolicy) validateIngress(formats strfmt.Registry) error {
if swag.IsZero(m.Ingress) { // not required
return nil
}
for i := 0; i < len(m.Ingress); i++ {
if swag.IsZero(m.Ingress[i]) { // not required
continue
}
if m.Ingress[i] != nil {
if err := m.Ingress[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ingress" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ingress" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this c ID r policy based on the context it is used
func (m *CIDRPolicy) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateEgress(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIngress(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *CIDRPolicy) contextValidateEgress(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Egress); i++ {
if m.Egress[i] != nil {
if swag.IsZero(m.Egress[i]) { // not required
return nil
}
if err := m.Egress[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("egress" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("egress" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *CIDRPolicy) contextValidateIngress(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Ingress); i++ {
if m.Ingress[i] != nil {
if swag.IsZero(m.Ingress[i]) { // not required
return nil
}
if err := m.Ingress[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ingress" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ingress" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *CIDRPolicy) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *CIDRPolicy) UnmarshalBinary(b []byte) error {
var res CIDRPolicy
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// CNIChainingStatus Status of CNI chaining
//
// +k8s:deepcopy-gen=true
//
// swagger:model CNIChainingStatus
type CNIChainingStatus struct {
// mode
// Enum: [none aws-cni flannel generic-veth portmap]
Mode string `json:"mode,omitempty"`
}
// Validate validates this c n i chaining status
func (m *CNIChainingStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var cNIChainingStatusTypeModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["none","aws-cni","flannel","generic-veth","portmap"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
cNIChainingStatusTypeModePropEnum = append(cNIChainingStatusTypeModePropEnum, v)
}
}
const (
// CNIChainingStatusModeNone captures enum value "none"
CNIChainingStatusModeNone string = "none"
// CNIChainingStatusModeAwsDashCni captures enum value "aws-cni"
CNIChainingStatusModeAwsDashCni string = "aws-cni"
// CNIChainingStatusModeFlannel captures enum value "flannel"
CNIChainingStatusModeFlannel string = "flannel"
// CNIChainingStatusModeGenericDashVeth captures enum value "generic-veth"
CNIChainingStatusModeGenericDashVeth string = "generic-veth"
// CNIChainingStatusModePortmap captures enum value "portmap"
CNIChainingStatusModePortmap string = "portmap"
)
// prop value enum
func (m *CNIChainingStatus) validateModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, cNIChainingStatusTypeModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *CNIChainingStatus) validateMode(formats strfmt.Registry) error {
if swag.IsZero(m.Mode) { // not required
return nil
}
// value enum
if err := m.validateModeEnum("mode", "body", m.Mode); err != nil {
return err
}
return nil
}
// ContextValidate validates this c n i chaining status based on context it is used
func (m *CNIChainingStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *CNIChainingStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *CNIChainingStatus) UnmarshalBinary(b []byte) error {
var res CNIChainingStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// CgroupContainerMetadata cgroup container metadata
//
// swagger:model CgroupContainerMetadata
type CgroupContainerMetadata struct {
// cgroup id
CgroupID uint64 `json:"cgroup-id,omitempty"`
// cgroup path
CgroupPath string `json:"cgroup-path,omitempty"`
}
// Validate validates this cgroup container metadata
func (m *CgroupContainerMetadata) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this cgroup container metadata based on context it is used
func (m *CgroupContainerMetadata) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *CgroupContainerMetadata) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *CgroupContainerMetadata) UnmarshalBinary(b []byte) error {
var res CgroupContainerMetadata
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// CgroupDumpMetadata cgroup full metadata
//
// swagger:model CgroupDumpMetadata
type CgroupDumpMetadata struct {
// pod metadatas
PodMetadatas []*CgroupPodMetadata `json:"pod-metadatas"`
}
// Validate validates this cgroup dump metadata
func (m *CgroupDumpMetadata) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validatePodMetadatas(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *CgroupDumpMetadata) validatePodMetadatas(formats strfmt.Registry) error {
if swag.IsZero(m.PodMetadatas) { // not required
return nil
}
for i := 0; i < len(m.PodMetadatas); i++ {
if swag.IsZero(m.PodMetadatas[i]) { // not required
continue
}
if m.PodMetadatas[i] != nil {
if err := m.PodMetadatas[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("pod-metadatas" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("pod-metadatas" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this cgroup dump metadata based on the context it is used
func (m *CgroupDumpMetadata) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidatePodMetadatas(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *CgroupDumpMetadata) contextValidatePodMetadatas(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.PodMetadatas); i++ {
if m.PodMetadatas[i] != nil {
if swag.IsZero(m.PodMetadatas[i]) { // not required
return nil
}
if err := m.PodMetadatas[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("pod-metadatas" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("pod-metadatas" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *CgroupDumpMetadata) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *CgroupDumpMetadata) UnmarshalBinary(b []byte) error {
var res CgroupDumpMetadata
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// CgroupPodMetadata cgroup pod metadata
//
// swagger:model CgroupPodMetadata
type CgroupPodMetadata struct {
// containers
Containers []*CgroupContainerMetadata `json:"containers"`
// ips
Ips []string `json:"ips"`
// name
Name string `json:"name,omitempty"`
// namespace
Namespace string `json:"namespace,omitempty"`
}
// Validate validates this cgroup pod metadata
func (m *CgroupPodMetadata) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateContainers(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *CgroupPodMetadata) validateContainers(formats strfmt.Registry) error {
if swag.IsZero(m.Containers) { // not required
return nil
}
for i := 0; i < len(m.Containers); i++ {
if swag.IsZero(m.Containers[i]) { // not required
continue
}
if m.Containers[i] != nil {
if err := m.Containers[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("containers" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("containers" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this cgroup pod metadata based on the context it is used
func (m *CgroupPodMetadata) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateContainers(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *CgroupPodMetadata) contextValidateContainers(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Containers); i++ {
if m.Containers[i] != nil {
if swag.IsZero(m.Containers[i]) { // not required
return nil
}
if err := m.Containers[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("containers" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("containers" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *CgroupPodMetadata) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *CgroupPodMetadata) UnmarshalBinary(b []byte) error {
var res CgroupPodMetadata
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// ClockSource Status of BPF clock source
//
// +k8s:deepcopy-gen=true
//
// swagger:model ClockSource
type ClockSource struct {
// Kernel Hz
Hertz int64 `json:"hertz,omitempty"`
// Datapath clock source
// Enum: [ktime jiffies]
Mode string `json:"mode,omitempty"`
}
// Validate validates this clock source
func (m *ClockSource) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var clockSourceTypeModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["ktime","jiffies"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
clockSourceTypeModePropEnum = append(clockSourceTypeModePropEnum, v)
}
}
const (
// ClockSourceModeKtime captures enum value "ktime"
ClockSourceModeKtime string = "ktime"
// ClockSourceModeJiffies captures enum value "jiffies"
ClockSourceModeJiffies string = "jiffies"
)
// prop value enum
func (m *ClockSource) validateModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, clockSourceTypeModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *ClockSource) validateMode(formats strfmt.Registry) error {
if swag.IsZero(m.Mode) { // not required
return nil
}
// value enum
if err := m.validateModeEnum("mode", "body", m.Mode); err != nil {
return err
}
return nil
}
// ContextValidate validates this clock source based on context it is used
func (m *ClockSource) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *ClockSource) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ClockSource) UnmarshalBinary(b []byte) error {
var res ClockSource
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ClusterMeshStatus Status of ClusterMesh
//
// +k8s:deepcopy-gen=true
//
// swagger:model ClusterMeshStatus
type ClusterMeshStatus struct {
// List of remote clusters
Clusters []*RemoteCluster `json:"clusters"`
// Number of global services
NumGlobalServices int64 `json:"num-global-services,omitempty"`
}
// Validate validates this cluster mesh status
func (m *ClusterMeshStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateClusters(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ClusterMeshStatus) validateClusters(formats strfmt.Registry) error {
if swag.IsZero(m.Clusters) { // not required
return nil
}
for i := 0; i < len(m.Clusters); i++ {
if swag.IsZero(m.Clusters[i]) { // not required
continue
}
if m.Clusters[i] != nil {
if err := m.Clusters[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("clusters" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("clusters" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this cluster mesh status based on the context it is used
func (m *ClusterMeshStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateClusters(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ClusterMeshStatus) contextValidateClusters(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Clusters); i++ {
if m.Clusters[i] != nil {
if swag.IsZero(m.Clusters[i]) { // not required
return nil
}
if err := m.Clusters[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("clusters" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("clusters" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ClusterMeshStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ClusterMeshStatus) UnmarshalBinary(b []byte) error {
var res ClusterMeshStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ClusterNodeStatus Status of cluster
//
// swagger:model ClusterNodeStatus
type ClusterNodeStatus struct {
// ID that should be used by the client to receive a diff from the previous request
ClientID int64 `json:"client-id,omitempty"`
// List of known nodes
NodesAdded []*NodeElement `json:"nodes-added"`
// List of known nodes
NodesRemoved []*NodeElement `json:"nodes-removed"`
// Name of local node (if available)
Self string `json:"self,omitempty"`
}
// Validate validates this cluster node status
func (m *ClusterNodeStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateNodesAdded(formats); err != nil {
res = append(res, err)
}
if err := m.validateNodesRemoved(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ClusterNodeStatus) validateNodesAdded(formats strfmt.Registry) error {
if swag.IsZero(m.NodesAdded) { // not required
return nil
}
for i := 0; i < len(m.NodesAdded); i++ {
if swag.IsZero(m.NodesAdded[i]) { // not required
continue
}
if m.NodesAdded[i] != nil {
if err := m.NodesAdded[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes-added" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes-added" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *ClusterNodeStatus) validateNodesRemoved(formats strfmt.Registry) error {
if swag.IsZero(m.NodesRemoved) { // not required
return nil
}
for i := 0; i < len(m.NodesRemoved); i++ {
if swag.IsZero(m.NodesRemoved[i]) { // not required
continue
}
if m.NodesRemoved[i] != nil {
if err := m.NodesRemoved[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes-removed" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes-removed" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this cluster node status based on the context it is used
func (m *ClusterNodeStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateNodesAdded(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNodesRemoved(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ClusterNodeStatus) contextValidateNodesAdded(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.NodesAdded); i++ {
if m.NodesAdded[i] != nil {
if swag.IsZero(m.NodesAdded[i]) { // not required
return nil
}
if err := m.NodesAdded[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes-added" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes-added" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *ClusterNodeStatus) contextValidateNodesRemoved(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.NodesRemoved); i++ {
if m.NodesRemoved[i] != nil {
if swag.IsZero(m.NodesRemoved[i]) { // not required
return nil
}
if err := m.NodesRemoved[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes-removed" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes-removed" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ClusterNodeStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ClusterNodeStatus) UnmarshalBinary(b []byte) error {
var res ClusterNodeStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ClusterNodesResponse cluster nodes response
//
// swagger:model ClusterNodesResponse
type ClusterNodesResponse struct {
// List of known nodes
Nodes []*NodeElement `json:"nodes"`
// Name of local node (if available)
Self string `json:"self,omitempty"`
}
// Validate validates this cluster nodes response
func (m *ClusterNodesResponse) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateNodes(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ClusterNodesResponse) validateNodes(formats strfmt.Registry) error {
if swag.IsZero(m.Nodes) { // not required
return nil
}
for i := 0; i < len(m.Nodes); i++ {
if swag.IsZero(m.Nodes[i]) { // not required
continue
}
if m.Nodes[i] != nil {
if err := m.Nodes[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this cluster nodes response based on the context it is used
func (m *ClusterNodesResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateNodes(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ClusterNodesResponse) contextValidateNodes(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Nodes); i++ {
if m.Nodes[i] != nil {
if swag.IsZero(m.Nodes[i]) { // not required
return nil
}
if err := m.Nodes[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ClusterNodesResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ClusterNodesResponse) UnmarshalBinary(b []byte) error {
var res ClusterNodesResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ClusterStatus Status of cluster
//
// +k8s:deepcopy-gen=true
//
// swagger:model ClusterStatus
type ClusterStatus struct {
// Status of local cilium-health daemon
CiliumHealth *Status `json:"ciliumHealth,omitempty"`
// List of known nodes
Nodes []*NodeElement `json:"nodes"`
// Name of local node (if available)
Self string `json:"self,omitempty"`
}
// Validate validates this cluster status
func (m *ClusterStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCiliumHealth(formats); err != nil {
res = append(res, err)
}
if err := m.validateNodes(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ClusterStatus) validateCiliumHealth(formats strfmt.Registry) error {
if swag.IsZero(m.CiliumHealth) { // not required
return nil
}
if m.CiliumHealth != nil {
if err := m.CiliumHealth.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ciliumHealth")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ciliumHealth")
}
return err
}
}
return nil
}
func (m *ClusterStatus) validateNodes(formats strfmt.Registry) error {
if swag.IsZero(m.Nodes) { // not required
return nil
}
for i := 0; i < len(m.Nodes); i++ {
if swag.IsZero(m.Nodes[i]) { // not required
continue
}
if m.Nodes[i] != nil {
if err := m.Nodes[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this cluster status based on the context it is used
func (m *ClusterStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateCiliumHealth(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNodes(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ClusterStatus) contextValidateCiliumHealth(ctx context.Context, formats strfmt.Registry) error {
if m.CiliumHealth != nil {
if swag.IsZero(m.CiliumHealth) { // not required
return nil
}
if err := m.CiliumHealth.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ciliumHealth")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ciliumHealth")
}
return err
}
}
return nil
}
func (m *ClusterStatus) contextValidateNodes(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Nodes); i++ {
if m.Nodes[i] != nil {
if swag.IsZero(m.Nodes[i]) { // not required
return nil
}
if err := m.Nodes[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ClusterStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ClusterStatus) UnmarshalBinary(b []byte) error {
var res ClusterStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
)
// ConfigurationMap Map of configuration key/value pairs.
//
// swagger:model ConfigurationMap
type ConfigurationMap map[string]string
// Validate validates this configuration map
func (m ConfigurationMap) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this configuration map based on context it is used
func (m ConfigurationMap) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// ControllerStatus Status of a controller
//
// +k8s:deepcopy-gen=true
//
// swagger:model ControllerStatus
type ControllerStatus struct {
// configuration
Configuration *ControllerStatusConfiguration `json:"configuration,omitempty"`
// Name of controller
Name string `json:"name,omitempty"`
// status
Status *ControllerStatusStatus `json:"status,omitempty"`
// UUID of controller
// Format: uuid
UUID strfmt.UUID `json:"uuid,omitempty"`
}
// Validate validates this controller status
func (m *ControllerStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateConfiguration(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatus(formats); err != nil {
res = append(res, err)
}
if err := m.validateUUID(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ControllerStatus) validateConfiguration(formats strfmt.Registry) error {
if swag.IsZero(m.Configuration) { // not required
return nil
}
if m.Configuration != nil {
if err := m.Configuration.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("configuration")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("configuration")
}
return err
}
}
return nil
}
func (m *ControllerStatus) validateStatus(formats strfmt.Registry) error {
if swag.IsZero(m.Status) { // not required
return nil
}
if m.Status != nil {
if err := m.Status.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
func (m *ControllerStatus) validateUUID(formats strfmt.Registry) error {
if swag.IsZero(m.UUID) { // not required
return nil
}
if err := validate.FormatOf("uuid", "body", "uuid", m.UUID.String(), formats); err != nil {
return err
}
return nil
}
// ContextValidate validate this controller status based on the context it is used
func (m *ControllerStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateConfiguration(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateStatus(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ControllerStatus) contextValidateConfiguration(ctx context.Context, formats strfmt.Registry) error {
if m.Configuration != nil {
if swag.IsZero(m.Configuration) { // not required
return nil
}
if err := m.Configuration.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("configuration")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("configuration")
}
return err
}
}
return nil
}
func (m *ControllerStatus) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error {
if m.Status != nil {
if swag.IsZero(m.Status) { // not required
return nil
}
if err := m.Status.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ControllerStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ControllerStatus) UnmarshalBinary(b []byte) error {
var res ControllerStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// ControllerStatusConfiguration Configuration of controller
//
// +deepequal-gen=true
// +k8s:deepcopy-gen=true
//
// swagger:model ControllerStatusConfiguration
type ControllerStatusConfiguration struct {
// Retry on error
ErrorRetry bool `json:"error-retry,omitempty"`
// Base error retry back-off time
// Format: duration
ErrorRetryBase strfmt.Duration `json:"error-retry-base,omitempty"`
// Regular synchronization interval
// Format: duration
Interval strfmt.Duration `json:"interval,omitempty"`
}
// Validate validates this controller status configuration
func (m *ControllerStatusConfiguration) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateErrorRetryBase(formats); err != nil {
res = append(res, err)
}
if err := m.validateInterval(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ControllerStatusConfiguration) validateErrorRetryBase(formats strfmt.Registry) error {
if swag.IsZero(m.ErrorRetryBase) { // not required
return nil
}
if err := validate.FormatOf("configuration"+"."+"error-retry-base", "body", "duration", m.ErrorRetryBase.String(), formats); err != nil {
return err
}
return nil
}
func (m *ControllerStatusConfiguration) validateInterval(formats strfmt.Registry) error {
if swag.IsZero(m.Interval) { // not required
return nil
}
if err := validate.FormatOf("configuration"+"."+"interval", "body", "duration", m.Interval.String(), formats); err != nil {
return err
}
return nil
}
// ContextValidate validates this controller status configuration based on context it is used
func (m *ControllerStatusConfiguration) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *ControllerStatusConfiguration) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ControllerStatusConfiguration) UnmarshalBinary(b []byte) error {
var res ControllerStatusConfiguration
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// ControllerStatusStatus Current status of controller
//
// +k8s:deepcopy-gen=true
//
// swagger:model ControllerStatusStatus
type ControllerStatusStatus struct {
// Number of consecutive errors since last success
ConsecutiveFailureCount int64 `json:"consecutive-failure-count,omitempty"`
// Total number of failed runs
FailureCount int64 `json:"failure-count,omitempty"`
// Error message of last failed run
LastFailureMsg string `json:"last-failure-msg,omitempty"`
// Timestamp of last error
// Format: date-time
LastFailureTimestamp strfmt.DateTime `json:"last-failure-timestamp,omitempty"`
// Timestamp of last success
// Format: date-time
LastSuccessTimestamp strfmt.DateTime `json:"last-success-timestamp,omitempty"`
// Total number of successful runs
SuccessCount int64 `json:"success-count,omitempty"`
}
// Validate validates this controller status status
func (m *ControllerStatusStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateLastFailureTimestamp(formats); err != nil {
res = append(res, err)
}
if err := m.validateLastSuccessTimestamp(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ControllerStatusStatus) validateLastFailureTimestamp(formats strfmt.Registry) error {
if swag.IsZero(m.LastFailureTimestamp) { // not required
return nil
}
if err := validate.FormatOf("status"+"."+"last-failure-timestamp", "body", "date-time", m.LastFailureTimestamp.String(), formats); err != nil {
return err
}
return nil
}
func (m *ControllerStatusStatus) validateLastSuccessTimestamp(formats strfmt.Registry) error {
if swag.IsZero(m.LastSuccessTimestamp) { // not required
return nil
}
if err := validate.FormatOf("status"+"."+"last-success-timestamp", "body", "date-time", m.LastSuccessTimestamp.String(), formats); err != nil {
return err
}
return nil
}
// ContextValidate validates this controller status status based on context it is used
func (m *ControllerStatusStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *ControllerStatusStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ControllerStatusStatus) UnmarshalBinary(b []byte) error {
var res ControllerStatusStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ControllerStatuses Collection of controller statuses
//
// swagger:model ControllerStatuses
type ControllerStatuses []*ControllerStatus
// Validate validates this controller statuses
func (m ControllerStatuses) Validate(formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if swag.IsZero(m[i]) { // not required
continue
}
if m[i] != nil {
if err := m[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validate this controller statuses based on the context it is used
func (m ControllerStatuses) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if m[i] != nil {
if swag.IsZero(m[i]) { // not required
return nil
}
if err := m[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// DaemonConfiguration Response to a daemon configuration request.
//
// swagger:model DaemonConfiguration
type DaemonConfiguration struct {
// Changeable configuration
Spec *DaemonConfigurationSpec `json:"spec,omitempty"`
// Current daemon configuration related status.Contains the addressing
// information, k8s, node monitor and immutable and mutable
// configuration settings.
//
Status *DaemonConfigurationStatus `json:"status,omitempty"`
}
// Validate validates this daemon configuration
func (m *DaemonConfiguration) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateSpec(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatus(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DaemonConfiguration) validateSpec(formats strfmt.Registry) error {
if swag.IsZero(m.Spec) { // not required
return nil
}
if m.Spec != nil {
if err := m.Spec.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *DaemonConfiguration) validateStatus(formats strfmt.Registry) error {
if swag.IsZero(m.Status) { // not required
return nil
}
if m.Status != nil {
if err := m.Status.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// ContextValidate validate this daemon configuration based on the context it is used
func (m *DaemonConfiguration) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateSpec(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateStatus(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DaemonConfiguration) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error {
if m.Spec != nil {
if swag.IsZero(m.Spec) { // not required
return nil
}
if err := m.Spec.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *DaemonConfiguration) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error {
if m.Status != nil {
if swag.IsZero(m.Status) { // not required
return nil
}
if err := m.Status.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *DaemonConfiguration) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *DaemonConfiguration) UnmarshalBinary(b []byte) error {
var res DaemonConfiguration
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// DaemonConfigurationSpec The controllable configuration of the daemon.
//
// swagger:model DaemonConfigurationSpec
type DaemonConfigurationSpec struct {
// Changeable configuration
Options ConfigurationMap `json:"options,omitempty"`
// The policy-enforcement mode
// Enum: [default always never]
PolicyEnforcement string `json:"policy-enforcement,omitempty"`
}
// Validate validates this daemon configuration spec
func (m *DaemonConfigurationSpec) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateOptions(formats); err != nil {
res = append(res, err)
}
if err := m.validatePolicyEnforcement(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DaemonConfigurationSpec) validateOptions(formats strfmt.Registry) error {
if swag.IsZero(m.Options) { // not required
return nil
}
if m.Options != nil {
if err := m.Options.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("options")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("options")
}
return err
}
}
return nil
}
var daemonConfigurationSpecTypePolicyEnforcementPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["default","always","never"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
daemonConfigurationSpecTypePolicyEnforcementPropEnum = append(daemonConfigurationSpecTypePolicyEnforcementPropEnum, v)
}
}
const (
// DaemonConfigurationSpecPolicyEnforcementDefault captures enum value "default"
DaemonConfigurationSpecPolicyEnforcementDefault string = "default"
// DaemonConfigurationSpecPolicyEnforcementAlways captures enum value "always"
DaemonConfigurationSpecPolicyEnforcementAlways string = "always"
// DaemonConfigurationSpecPolicyEnforcementNever captures enum value "never"
DaemonConfigurationSpecPolicyEnforcementNever string = "never"
)
// prop value enum
func (m *DaemonConfigurationSpec) validatePolicyEnforcementEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, daemonConfigurationSpecTypePolicyEnforcementPropEnum, true); err != nil {
return err
}
return nil
}
func (m *DaemonConfigurationSpec) validatePolicyEnforcement(formats strfmt.Registry) error {
if swag.IsZero(m.PolicyEnforcement) { // not required
return nil
}
// value enum
if err := m.validatePolicyEnforcementEnum("policy-enforcement", "body", m.PolicyEnforcement); err != nil {
return err
}
return nil
}
// ContextValidate validate this daemon configuration spec based on the context it is used
func (m *DaemonConfigurationSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateOptions(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DaemonConfigurationSpec) contextValidateOptions(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Options) { // not required
return nil
}
if err := m.Options.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("options")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("options")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *DaemonConfigurationSpec) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *DaemonConfigurationSpec) UnmarshalBinary(b []byte) error {
var res DaemonConfigurationSpec
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// DaemonConfigurationStatus Response to a daemon configuration request. Contains the addressing
// information, k8s, node monitor and immutable and mutable configuration
// settings.
//
// swagger:model DaemonConfigurationStatus
type DaemonConfigurationStatus struct {
// Maximum IPv4 GRO size on workload facing devices
GROIPV4MaxSize int64 `json:"GROIPv4MaxSize,omitempty"`
// Maximum IPv6 GRO size on workload facing devices
GROMaxSize int64 `json:"GROMaxSize,omitempty"`
// Maximum IPv4 GSO size on workload facing devices
GSOIPV4MaxSize int64 `json:"GSOIPv4MaxSize,omitempty"`
// Maximum IPv6 GSO size on workload facing devices
GSOMaxSize int64 `json:"GSOMaxSize,omitempty"`
// addressing
Addressing *NodeAddressing `json:"addressing,omitempty"`
// Config map which contains all the active daemon configurations
DaemonConfigurationMap map[string]interface{} `json:"daemonConfigurationMap,omitempty"`
// datapath mode
DatapathMode DatapathMode `json:"datapathMode,omitempty"`
// MTU on workload facing devices
DeviceMTU int64 `json:"deviceMTU,omitempty"`
// Configured compatibility mode for --egress-multi-home-ip-rule-compat
EgressMultiHomeIPRuleCompat bool `json:"egress-multi-home-ip-rule-compat,omitempty"`
// Enable route MTU for pod netns when CNI chaining is used
EnableRouteMTUForCNIChaining bool `json:"enableRouteMTUForCNIChaining,omitempty"`
// Immutable configuration (read-only)
Immutable ConfigurationMap `json:"immutable,omitempty"`
// Comma-separated list of IP ports should be reserved in the workload network namespace
IPLocalReservedPorts string `json:"ipLocalReservedPorts,omitempty"`
// Configured IPAM mode
IpamMode string `json:"ipam-mode,omitempty"`
// k8s configuration
K8sConfiguration string `json:"k8s-configuration,omitempty"`
// k8s endpoint
K8sEndpoint string `json:"k8s-endpoint,omitempty"`
// kvstore configuration
KvstoreConfiguration *KVstoreConfiguration `json:"kvstoreConfiguration,omitempty"`
// masquerade
Masquerade bool `json:"masquerade,omitempty"`
// masquerade protocols
MasqueradeProtocols *DaemonConfigurationStatusMasqueradeProtocols `json:"masqueradeProtocols,omitempty"`
// Status of the node monitor
NodeMonitor *MonitorStatus `json:"nodeMonitor,omitempty"`
// Currently applied configuration
Realized *DaemonConfigurationSpec `json:"realized,omitempty"`
// MTU for network facing routes
RouteMTU int64 `json:"routeMTU,omitempty"`
}
// Validate validates this daemon configuration status
func (m *DaemonConfigurationStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAddressing(formats); err != nil {
res = append(res, err)
}
if err := m.validateDatapathMode(formats); err != nil {
res = append(res, err)
}
if err := m.validateImmutable(formats); err != nil {
res = append(res, err)
}
if err := m.validateKvstoreConfiguration(formats); err != nil {
res = append(res, err)
}
if err := m.validateMasqueradeProtocols(formats); err != nil {
res = append(res, err)
}
if err := m.validateNodeMonitor(formats); err != nil {
res = append(res, err)
}
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DaemonConfigurationStatus) validateAddressing(formats strfmt.Registry) error {
if swag.IsZero(m.Addressing) { // not required
return nil
}
if m.Addressing != nil {
if err := m.Addressing.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("addressing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("addressing")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) validateDatapathMode(formats strfmt.Registry) error {
if swag.IsZero(m.DatapathMode) { // not required
return nil
}
if err := m.DatapathMode.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("datapathMode")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("datapathMode")
}
return err
}
return nil
}
func (m *DaemonConfigurationStatus) validateImmutable(formats strfmt.Registry) error {
if swag.IsZero(m.Immutable) { // not required
return nil
}
if m.Immutable != nil {
if err := m.Immutable.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("immutable")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("immutable")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) validateKvstoreConfiguration(formats strfmt.Registry) error {
if swag.IsZero(m.KvstoreConfiguration) { // not required
return nil
}
if m.KvstoreConfiguration != nil {
if err := m.KvstoreConfiguration.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("kvstoreConfiguration")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("kvstoreConfiguration")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) validateMasqueradeProtocols(formats strfmt.Registry) error {
if swag.IsZero(m.MasqueradeProtocols) { // not required
return nil
}
if m.MasqueradeProtocols != nil {
if err := m.MasqueradeProtocols.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("masqueradeProtocols")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("masqueradeProtocols")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) validateNodeMonitor(formats strfmt.Registry) error {
if swag.IsZero(m.NodeMonitor) { // not required
return nil
}
if m.NodeMonitor != nil {
if err := m.NodeMonitor.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodeMonitor")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodeMonitor")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// ContextValidate validate this daemon configuration status based on the context it is used
func (m *DaemonConfigurationStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateAddressing(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateDatapathMode(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateImmutable(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateKvstoreConfiguration(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateMasqueradeProtocols(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNodeMonitor(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DaemonConfigurationStatus) contextValidateAddressing(ctx context.Context, formats strfmt.Registry) error {
if m.Addressing != nil {
if swag.IsZero(m.Addressing) { // not required
return nil
}
if err := m.Addressing.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("addressing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("addressing")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) contextValidateDatapathMode(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.DatapathMode) { // not required
return nil
}
if err := m.DatapathMode.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("datapathMode")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("datapathMode")
}
return err
}
return nil
}
func (m *DaemonConfigurationStatus) contextValidateImmutable(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Immutable) { // not required
return nil
}
if err := m.Immutable.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("immutable")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("immutable")
}
return err
}
return nil
}
func (m *DaemonConfigurationStatus) contextValidateKvstoreConfiguration(ctx context.Context, formats strfmt.Registry) error {
if m.KvstoreConfiguration != nil {
if swag.IsZero(m.KvstoreConfiguration) { // not required
return nil
}
if err := m.KvstoreConfiguration.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("kvstoreConfiguration")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("kvstoreConfiguration")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) contextValidateMasqueradeProtocols(ctx context.Context, formats strfmt.Registry) error {
if m.MasqueradeProtocols != nil {
if swag.IsZero(m.MasqueradeProtocols) { // not required
return nil
}
if err := m.MasqueradeProtocols.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("masqueradeProtocols")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("masqueradeProtocols")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) contextValidateNodeMonitor(ctx context.Context, formats strfmt.Registry) error {
if m.NodeMonitor != nil {
if swag.IsZero(m.NodeMonitor) { // not required
return nil
}
if err := m.NodeMonitor.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodeMonitor")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodeMonitor")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *DaemonConfigurationStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *DaemonConfigurationStatus) UnmarshalBinary(b []byte) error {
var res DaemonConfigurationStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// DaemonConfigurationStatusMasqueradeProtocols Status of masquerading feature
//
// swagger:model DaemonConfigurationStatusMasqueradeProtocols
type DaemonConfigurationStatusMasqueradeProtocols struct {
// Status of masquerading for IPv4 traffic
IPV4 bool `json:"ipv4,omitempty"`
// Status of masquerading for IPv6 traffic
IPV6 bool `json:"ipv6,omitempty"`
}
// Validate validates this daemon configuration status masquerade protocols
func (m *DaemonConfigurationStatusMasqueradeProtocols) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this daemon configuration status masquerade protocols based on context it is used
func (m *DaemonConfigurationStatusMasqueradeProtocols) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *DaemonConfigurationStatusMasqueradeProtocols) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *DaemonConfigurationStatusMasqueradeProtocols) UnmarshalBinary(b []byte) error {
var res DaemonConfigurationStatusMasqueradeProtocols
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/validate"
)
// DatapathMode Datapath mode
//
// swagger:model DatapathMode
type DatapathMode string
func NewDatapathMode(value DatapathMode) *DatapathMode {
return &value
}
// Pointer returns a pointer to a freshly-allocated DatapathMode.
func (m DatapathMode) Pointer() *DatapathMode {
return &m
}
const (
// DatapathModeVeth captures enum value "veth"
DatapathModeVeth DatapathMode = "veth"
// DatapathModeNetkit captures enum value "netkit"
DatapathModeNetkit DatapathMode = "netkit"
// DatapathModeNetkitDashL2 captures enum value "netkit-l2"
DatapathModeNetkitDashL2 DatapathMode = "netkit-l2"
)
// for schema
var datapathModeEnum []interface{}
func init() {
var res []DatapathMode
if err := json.Unmarshal([]byte(`["veth","netkit","netkit-l2"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
datapathModeEnum = append(datapathModeEnum, v)
}
}
func (m DatapathMode) validateDatapathModeEnum(path, location string, value DatapathMode) error {
if err := validate.EnumCase(path, location, value, datapathModeEnum, true); err != nil {
return err
}
return nil
}
// Validate validates this datapath mode
func (m DatapathMode) Validate(formats strfmt.Registry) error {
var res []error
// value enum
if err := m.validateDatapathModeEnum("", "body", m); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validates this datapath mode based on context it is used
func (m DatapathMode) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// DebugInfo groups some debugging related information on the agent
//
// swagger:model DebugInfo
type DebugInfo struct {
// cilium memory map
CiliumMemoryMap string `json:"cilium-memory-map,omitempty"`
// cilium nodemonitor memory map
CiliumNodemonitorMemoryMap string `json:"cilium-nodemonitor-memory-map,omitempty"`
// cilium status
CiliumStatus *StatusResponse `json:"cilium-status,omitempty"`
// cilium version
CiliumVersion string `json:"cilium-version,omitempty"`
// encryption
Encryption *DebugInfoEncryption `json:"encryption,omitempty"`
// endpoint list
EndpointList []*Endpoint `json:"endpoint-list"`
// environment variables
EnvironmentVariables []string `json:"environment-variables"`
// kernel version
KernelVersion string `json:"kernel-version,omitempty"`
// policy
Policy *Policy `json:"policy,omitempty"`
// service list
ServiceList []*Service `json:"service-list"`
// subsystem
Subsystem map[string]string `json:"subsystem,omitempty"`
}
// Validate validates this debug info
func (m *DebugInfo) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCiliumStatus(formats); err != nil {
res = append(res, err)
}
if err := m.validateEncryption(formats); err != nil {
res = append(res, err)
}
if err := m.validateEndpointList(formats); err != nil {
res = append(res, err)
}
if err := m.validatePolicy(formats); err != nil {
res = append(res, err)
}
if err := m.validateServiceList(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DebugInfo) validateCiliumStatus(formats strfmt.Registry) error {
if swag.IsZero(m.CiliumStatus) { // not required
return nil
}
if m.CiliumStatus != nil {
if err := m.CiliumStatus.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cilium-status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cilium-status")
}
return err
}
}
return nil
}
func (m *DebugInfo) validateEncryption(formats strfmt.Registry) error {
if swag.IsZero(m.Encryption) { // not required
return nil
}
if m.Encryption != nil {
if err := m.Encryption.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("encryption")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("encryption")
}
return err
}
}
return nil
}
func (m *DebugInfo) validateEndpointList(formats strfmt.Registry) error {
if swag.IsZero(m.EndpointList) { // not required
return nil
}
for i := 0; i < len(m.EndpointList); i++ {
if swag.IsZero(m.EndpointList[i]) { // not required
continue
}
if m.EndpointList[i] != nil {
if err := m.EndpointList[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("endpoint-list" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("endpoint-list" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *DebugInfo) validatePolicy(formats strfmt.Registry) error {
if swag.IsZero(m.Policy) { // not required
return nil
}
if m.Policy != nil {
if err := m.Policy.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("policy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("policy")
}
return err
}
}
return nil
}
func (m *DebugInfo) validateServiceList(formats strfmt.Registry) error {
if swag.IsZero(m.ServiceList) { // not required
return nil
}
for i := 0; i < len(m.ServiceList); i++ {
if swag.IsZero(m.ServiceList[i]) { // not required
continue
}
if m.ServiceList[i] != nil {
if err := m.ServiceList[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("service-list" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("service-list" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this debug info based on the context it is used
func (m *DebugInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateCiliumStatus(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateEncryption(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateEndpointList(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidatePolicy(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateServiceList(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DebugInfo) contextValidateCiliumStatus(ctx context.Context, formats strfmt.Registry) error {
if m.CiliumStatus != nil {
if swag.IsZero(m.CiliumStatus) { // not required
return nil
}
if err := m.CiliumStatus.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cilium-status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cilium-status")
}
return err
}
}
return nil
}
func (m *DebugInfo) contextValidateEncryption(ctx context.Context, formats strfmt.Registry) error {
if m.Encryption != nil {
if swag.IsZero(m.Encryption) { // not required
return nil
}
if err := m.Encryption.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("encryption")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("encryption")
}
return err
}
}
return nil
}
func (m *DebugInfo) contextValidateEndpointList(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.EndpointList); i++ {
if m.EndpointList[i] != nil {
if swag.IsZero(m.EndpointList[i]) { // not required
return nil
}
if err := m.EndpointList[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("endpoint-list" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("endpoint-list" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *DebugInfo) contextValidatePolicy(ctx context.Context, formats strfmt.Registry) error {
if m.Policy != nil {
if swag.IsZero(m.Policy) { // not required
return nil
}
if err := m.Policy.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("policy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("policy")
}
return err
}
}
return nil
}
func (m *DebugInfo) contextValidateServiceList(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.ServiceList); i++ {
if m.ServiceList[i] != nil {
if swag.IsZero(m.ServiceList[i]) { // not required
return nil
}
if err := m.ServiceList[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("service-list" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("service-list" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *DebugInfo) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *DebugInfo) UnmarshalBinary(b []byte) error {
var res DebugInfo
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// DebugInfoEncryption debug info encryption
//
// swagger:model DebugInfoEncryption
type DebugInfoEncryption struct {
// Status of the WireGuard agent
Wireguard *WireguardStatus `json:"wireguard,omitempty"`
}
// Validate validates this debug info encryption
func (m *DebugInfoEncryption) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateWireguard(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DebugInfoEncryption) validateWireguard(formats strfmt.Registry) error {
if swag.IsZero(m.Wireguard) { // not required
return nil
}
if m.Wireguard != nil {
if err := m.Wireguard.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("encryption" + "." + "wireguard")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("encryption" + "." + "wireguard")
}
return err
}
}
return nil
}
// ContextValidate validate this debug info encryption based on the context it is used
func (m *DebugInfoEncryption) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateWireguard(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DebugInfoEncryption) contextValidateWireguard(ctx context.Context, formats strfmt.Registry) error {
if m.Wireguard != nil {
if swag.IsZero(m.Wireguard) { // not required
return nil
}
if err := m.Wireguard.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("encryption" + "." + "wireguard")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("encryption" + "." + "wireguard")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *DebugInfoEncryption) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *DebugInfoEncryption) UnmarshalBinary(b []byte) error {
var res DebugInfoEncryption
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// DNSLookup An IP -> DNS mapping, with metadata
//
// swagger:model DNSLookup
type DNSLookup struct {
// The endpoint that made this lookup, or 0 for the agent itself.
EndpointID int64 `json:"endpoint-id,omitempty"`
// The absolute time when this data will expire in this cache
// Format: date-time
ExpirationTime strfmt.DateTime `json:"expiration-time,omitempty"`
// DNS name
Fqdn string `json:"fqdn,omitempty"`
// IP addresses returned in this lookup
Ips []string `json:"ips"`
// The absolute time when this data was received
// Format: date-time
LookupTime strfmt.DateTime `json:"lookup-time,omitempty"`
// The reason this FQDN IP association exists. Either a DNS lookup or an ongoing connection to an IP that was created by a DNS lookup.
Source string `json:"source,omitempty"`
// The TTL in the DNS response
TTL int64 `json:"ttl,omitempty"`
}
// Validate validates this DNS lookup
func (m *DNSLookup) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateExpirationTime(formats); err != nil {
res = append(res, err)
}
if err := m.validateLookupTime(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DNSLookup) validateExpirationTime(formats strfmt.Registry) error {
if swag.IsZero(m.ExpirationTime) { // not required
return nil
}
if err := validate.FormatOf("expiration-time", "body", "date-time", m.ExpirationTime.String(), formats); err != nil {
return err
}
return nil
}
func (m *DNSLookup) validateLookupTime(formats strfmt.Registry) error {
if swag.IsZero(m.LookupTime) { // not required
return nil
}
if err := validate.FormatOf("lookup-time", "body", "date-time", m.LookupTime.String(), formats); err != nil {
return err
}
return nil
}
// ContextValidate validates this DNS lookup based on context it is used
func (m *DNSLookup) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *DNSLookup) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *DNSLookup) UnmarshalBinary(b []byte) error {
var res DNSLookup
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// EncryptionStatus Status of transparent encryption
//
// +k8s:deepcopy-gen=true
//
// swagger:model EncryptionStatus
type EncryptionStatus struct {
// Status of the IPsec agent
Ipsec *IPsecStatus `json:"ipsec,omitempty"`
// mode
// Enum: [Disabled IPsec Wireguard]
Mode string `json:"mode,omitempty"`
// Human readable error/warning message
Msg string `json:"msg,omitempty"`
// Status of the WireGuard agent
Wireguard *WireguardStatus `json:"wireguard,omitempty"`
}
// Validate validates this encryption status
func (m *EncryptionStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateIpsec(formats); err != nil {
res = append(res, err)
}
if err := m.validateMode(formats); err != nil {
res = append(res, err)
}
if err := m.validateWireguard(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EncryptionStatus) validateIpsec(formats strfmt.Registry) error {
if swag.IsZero(m.Ipsec) { // not required
return nil
}
if m.Ipsec != nil {
if err := m.Ipsec.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipsec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipsec")
}
return err
}
}
return nil
}
var encryptionStatusTypeModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Disabled","IPsec","Wireguard"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
encryptionStatusTypeModePropEnum = append(encryptionStatusTypeModePropEnum, v)
}
}
const (
// EncryptionStatusModeDisabled captures enum value "Disabled"
EncryptionStatusModeDisabled string = "Disabled"
// EncryptionStatusModeIPsec captures enum value "IPsec"
EncryptionStatusModeIPsec string = "IPsec"
// EncryptionStatusModeWireguard captures enum value "Wireguard"
EncryptionStatusModeWireguard string = "Wireguard"
)
// prop value enum
func (m *EncryptionStatus) validateModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, encryptionStatusTypeModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *EncryptionStatus) validateMode(formats strfmt.Registry) error {
if swag.IsZero(m.Mode) { // not required
return nil
}
// value enum
if err := m.validateModeEnum("mode", "body", m.Mode); err != nil {
return err
}
return nil
}
func (m *EncryptionStatus) validateWireguard(formats strfmt.Registry) error {
if swag.IsZero(m.Wireguard) { // not required
return nil
}
if m.Wireguard != nil {
if err := m.Wireguard.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("wireguard")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("wireguard")
}
return err
}
}
return nil
}
// ContextValidate validate this encryption status based on the context it is used
func (m *EncryptionStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateIpsec(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateWireguard(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EncryptionStatus) contextValidateIpsec(ctx context.Context, formats strfmt.Registry) error {
if m.Ipsec != nil {
if swag.IsZero(m.Ipsec) { // not required
return nil
}
if err := m.Ipsec.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipsec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipsec")
}
return err
}
}
return nil
}
func (m *EncryptionStatus) contextValidateWireguard(ctx context.Context, formats strfmt.Registry) error {
if m.Wireguard != nil {
if swag.IsZero(m.Wireguard) { // not required
return nil
}
if err := m.Wireguard.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("wireguard")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("wireguard")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *EncryptionStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EncryptionStatus) UnmarshalBinary(b []byte) error {
var res EncryptionStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// Endpoint An endpoint is a namespaced network interface to which cilium applies policies
//
// swagger:model Endpoint
type Endpoint struct {
// The cilium-agent-local ID of the endpoint
ID int64 `json:"id,omitempty"`
// The desired configuration state of the endpoint
Spec *EndpointConfigurationSpec `json:"spec,omitempty"`
// The desired and realized configuration state of the endpoint
Status *EndpointStatus `json:"status,omitempty"`
}
// Validate validates this endpoint
func (m *Endpoint) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateSpec(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatus(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Endpoint) validateSpec(formats strfmt.Registry) error {
if swag.IsZero(m.Spec) { // not required
return nil
}
if m.Spec != nil {
if err := m.Spec.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *Endpoint) validateStatus(formats strfmt.Registry) error {
if swag.IsZero(m.Status) { // not required
return nil
}
if m.Status != nil {
if err := m.Status.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// ContextValidate validate this endpoint based on the context it is used
func (m *Endpoint) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateSpec(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateStatus(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Endpoint) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error {
if m.Spec != nil {
if swag.IsZero(m.Spec) { // not required
return nil
}
if err := m.Spec.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *Endpoint) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error {
if m.Status != nil {
if swag.IsZero(m.Status) { // not required
return nil
}
if err := m.Status.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *Endpoint) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Endpoint) UnmarshalBinary(b []byte) error {
var res Endpoint
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointBatchDeleteRequest Properties selecting a batch of endpoints to delete.
//
// swagger:model EndpointBatchDeleteRequest
type EndpointBatchDeleteRequest struct {
// ID assigned by container runtime
ContainerID string `json:"container-id,omitempty"`
}
// Validate validates this endpoint batch delete request
func (m *EndpointBatchDeleteRequest) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this endpoint batch delete request based on context it is used
func (m *EndpointBatchDeleteRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *EndpointBatchDeleteRequest) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointBatchDeleteRequest) UnmarshalBinary(b []byte) error {
var res EndpointBatchDeleteRequest
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// EndpointChangeRequest Structure which contains the mutable elements of an Endpoint.
//
// swagger:model EndpointChangeRequest
type EndpointChangeRequest struct {
// addressing
Addressing *AddressPair `json:"addressing,omitempty"`
// ID assigned by container runtime
ContainerID string `json:"container-id,omitempty"`
// Name of network device in container netns
ContainerInterfaceName string `json:"container-interface-name,omitempty"`
// Name assigned to container
ContainerName string `json:"container-name,omitempty"`
// datapath configuration
DatapathConfiguration *EndpointDatapathConfiguration `json:"datapath-configuration,omitempty"`
// ID of datapath tail call map
DatapathMapID int64 `json:"datapath-map-id,omitempty"`
// Disables lookup using legacy endpoint identifiers (container name, container id, pod name) for this endpoint
DisableLegacyIdentifiers bool `json:"disable-legacy-identifiers,omitempty"`
// Docker endpoint ID
DockerEndpointID string `json:"docker-endpoint-id,omitempty"`
// Docker network ID
DockerNetworkID string `json:"docker-network-id,omitempty"`
// MAC address
HostMac string `json:"host-mac,omitempty"`
// Local endpoint ID
ID int64 `json:"id,omitempty"`
// Index of network device in host netns
InterfaceIndex int64 `json:"interface-index,omitempty"`
// Name of network device in host netns
InterfaceName string `json:"interface-name,omitempty"`
// Kubernetes namespace name
K8sNamespace string `json:"k8s-namespace,omitempty"`
// Kubernetes pod name
K8sPodName string `json:"k8s-pod-name,omitempty"`
// Kubernetes pod UID
K8sUID string `json:"k8s-uid,omitempty"`
// Labels describing the identity
Labels Labels `json:"labels,omitempty"`
// MAC address
Mac string `json:"mac,omitempty"`
// Network namespace cookie
NetnsCookie string `json:"netns-cookie,omitempty"`
// Process ID of the workload belonging to this endpoint
Pid int64 `json:"pid,omitempty"`
// Whether policy enforcement is enabled or not
PolicyEnabled bool `json:"policy-enabled,omitempty"`
// Properties is used to store information about the endpoint at creation. Useful for tests.
Properties map[string]interface{} `json:"properties,omitempty"`
// Current state of endpoint
// Required: true
State *EndpointState `json:"state"`
// Whether to build an endpoint synchronously
//
SyncBuildEndpoint bool `json:"sync-build-endpoint,omitempty"`
}
// Validate validates this endpoint change request
func (m *EndpointChangeRequest) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAddressing(formats); err != nil {
res = append(res, err)
}
if err := m.validateDatapathConfiguration(formats); err != nil {
res = append(res, err)
}
if err := m.validateLabels(formats); err != nil {
res = append(res, err)
}
if err := m.validateState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointChangeRequest) validateAddressing(formats strfmt.Registry) error {
if swag.IsZero(m.Addressing) { // not required
return nil
}
if m.Addressing != nil {
if err := m.Addressing.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("addressing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("addressing")
}
return err
}
}
return nil
}
func (m *EndpointChangeRequest) validateDatapathConfiguration(formats strfmt.Registry) error {
if swag.IsZero(m.DatapathConfiguration) { // not required
return nil
}
if m.DatapathConfiguration != nil {
if err := m.DatapathConfiguration.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("datapath-configuration")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("datapath-configuration")
}
return err
}
}
return nil
}
func (m *EndpointChangeRequest) validateLabels(formats strfmt.Registry) error {
if swag.IsZero(m.Labels) { // not required
return nil
}
if err := m.Labels.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
func (m *EndpointChangeRequest) validateState(formats strfmt.Registry) error {
if err := validate.Required("state", "body", m.State); err != nil {
return err
}
if err := validate.Required("state", "body", m.State); err != nil {
return err
}
if m.State != nil {
if err := m.State.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("state")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("state")
}
return err
}
}
return nil
}
// ContextValidate validate this endpoint change request based on the context it is used
func (m *EndpointChangeRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateAddressing(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateDatapathConfiguration(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateLabels(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateState(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointChangeRequest) contextValidateAddressing(ctx context.Context, formats strfmt.Registry) error {
if m.Addressing != nil {
if swag.IsZero(m.Addressing) { // not required
return nil
}
if err := m.Addressing.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("addressing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("addressing")
}
return err
}
}
return nil
}
func (m *EndpointChangeRequest) contextValidateDatapathConfiguration(ctx context.Context, formats strfmt.Registry) error {
if m.DatapathConfiguration != nil {
if swag.IsZero(m.DatapathConfiguration) { // not required
return nil
}
if err := m.DatapathConfiguration.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("datapath-configuration")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("datapath-configuration")
}
return err
}
}
return nil
}
func (m *EndpointChangeRequest) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error {
if err := m.Labels.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
func (m *EndpointChangeRequest) contextValidateState(ctx context.Context, formats strfmt.Registry) error {
if m.State != nil {
if err := m.State.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("state")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("state")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointChangeRequest) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointChangeRequest) UnmarshalBinary(b []byte) error {
var res EndpointChangeRequest
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointConfigurationSpec An endpoint's configuration
//
// swagger:model EndpointConfigurationSpec
type EndpointConfigurationSpec struct {
// the endpoint's labels
LabelConfiguration *LabelConfigurationSpec `json:"label-configuration,omitempty"`
// Changeable configuration
Options ConfigurationMap `json:"options,omitempty"`
}
// Validate validates this endpoint configuration spec
func (m *EndpointConfigurationSpec) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateLabelConfiguration(formats); err != nil {
res = append(res, err)
}
if err := m.validateOptions(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointConfigurationSpec) validateLabelConfiguration(formats strfmt.Registry) error {
if swag.IsZero(m.LabelConfiguration) { // not required
return nil
}
if m.LabelConfiguration != nil {
if err := m.LabelConfiguration.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("label-configuration")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("label-configuration")
}
return err
}
}
return nil
}
func (m *EndpointConfigurationSpec) validateOptions(formats strfmt.Registry) error {
if swag.IsZero(m.Options) { // not required
return nil
}
if m.Options != nil {
if err := m.Options.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("options")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("options")
}
return err
}
}
return nil
}
// ContextValidate validate this endpoint configuration spec based on the context it is used
func (m *EndpointConfigurationSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateLabelConfiguration(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateOptions(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointConfigurationSpec) contextValidateLabelConfiguration(ctx context.Context, formats strfmt.Registry) error {
if m.LabelConfiguration != nil {
if swag.IsZero(m.LabelConfiguration) { // not required
return nil
}
if err := m.LabelConfiguration.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("label-configuration")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("label-configuration")
}
return err
}
}
return nil
}
func (m *EndpointConfigurationSpec) contextValidateOptions(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Options) { // not required
return nil
}
if err := m.Options.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("options")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("options")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointConfigurationSpec) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointConfigurationSpec) UnmarshalBinary(b []byte) error {
var res EndpointConfigurationSpec
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointConfigurationStatus An endpoint's configuration
//
// swagger:model EndpointConfigurationStatus
type EndpointConfigurationStatus struct {
// Most recent error, if applicable
Error Error `json:"error,omitempty"`
// Immutable configuration (read-only)
Immutable ConfigurationMap `json:"immutable,omitempty"`
// currently applied changeable configuration
Realized *EndpointConfigurationSpec `json:"realized,omitempty"`
}
// Validate validates this endpoint configuration status
func (m *EndpointConfigurationStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateError(formats); err != nil {
res = append(res, err)
}
if err := m.validateImmutable(formats); err != nil {
res = append(res, err)
}
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointConfigurationStatus) validateError(formats strfmt.Registry) error {
if swag.IsZero(m.Error) { // not required
return nil
}
if err := m.Error.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("error")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("error")
}
return err
}
return nil
}
func (m *EndpointConfigurationStatus) validateImmutable(formats strfmt.Registry) error {
if swag.IsZero(m.Immutable) { // not required
return nil
}
if m.Immutable != nil {
if err := m.Immutable.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("immutable")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("immutable")
}
return err
}
}
return nil
}
func (m *EndpointConfigurationStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// ContextValidate validate this endpoint configuration status based on the context it is used
func (m *EndpointConfigurationStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateError(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateImmutable(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointConfigurationStatus) contextValidateError(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Error) { // not required
return nil
}
if err := m.Error.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("error")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("error")
}
return err
}
return nil
}
func (m *EndpointConfigurationStatus) contextValidateImmutable(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Immutable) { // not required
return nil
}
if err := m.Immutable.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("immutable")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("immutable")
}
return err
}
return nil
}
func (m *EndpointConfigurationStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointConfigurationStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointConfigurationStatus) UnmarshalBinary(b []byte) error {
var res EndpointConfigurationStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointDatapathConfiguration Datapath configuration to be used for the endpoint
//
// swagger:model EndpointDatapathConfiguration
type EndpointDatapathConfiguration struct {
// Disable source IP verification for the endpoint.
//
DisableSipVerification bool `json:"disable-sip-verification,omitempty"`
// Indicates that IPAM is done external to Cilium. This will prevent the IP from being released and re-allocation of the IP address is skipped on restore.
//
ExternalIpam bool `json:"external-ipam,omitempty"`
// Installs a route in the Linux routing table pointing to the device of the endpoint's interface.
//
InstallEndpointRoute bool `json:"install-endpoint-route,omitempty"`
// Enable ARP passthrough mode
RequireArpPassthrough bool `json:"require-arp-passthrough,omitempty"`
// Endpoint requires a host-facing egress program to be attached to implement ingress policy and reverse NAT.
//
RequireEgressProg bool `json:"require-egress-prog,omitempty"`
// Endpoint requires BPF routing to be enabled, when disabled, routing is delegated to Linux routing.
//
RequireRouting *bool `json:"require-routing,omitempty"`
}
// Validate validates this endpoint datapath configuration
func (m *EndpointDatapathConfiguration) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this endpoint datapath configuration based on context it is used
func (m *EndpointDatapathConfiguration) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *EndpointDatapathConfiguration) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointDatapathConfiguration) UnmarshalBinary(b []byte) error {
var res EndpointDatapathConfiguration
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointHealth Health of the endpoint
//
// +deepequal-gen=true
//
// swagger:model EndpointHealth
type EndpointHealth struct {
// bpf
Bpf EndpointHealthStatus `json:"bpf,omitempty"`
// Is this endpoint reachable
Connected bool `json:"connected,omitempty"`
// overall health
OverallHealth EndpointHealthStatus `json:"overallHealth,omitempty"`
// policy
Policy EndpointHealthStatus `json:"policy,omitempty"`
}
// Validate validates this endpoint health
func (m *EndpointHealth) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateBpf(formats); err != nil {
res = append(res, err)
}
if err := m.validateOverallHealth(formats); err != nil {
res = append(res, err)
}
if err := m.validatePolicy(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointHealth) validateBpf(formats strfmt.Registry) error {
if swag.IsZero(m.Bpf) { // not required
return nil
}
if err := m.Bpf.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("bpf")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("bpf")
}
return err
}
return nil
}
func (m *EndpointHealth) validateOverallHealth(formats strfmt.Registry) error {
if swag.IsZero(m.OverallHealth) { // not required
return nil
}
if err := m.OverallHealth.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("overallHealth")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("overallHealth")
}
return err
}
return nil
}
func (m *EndpointHealth) validatePolicy(formats strfmt.Registry) error {
if swag.IsZero(m.Policy) { // not required
return nil
}
if err := m.Policy.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("policy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("policy")
}
return err
}
return nil
}
// ContextValidate validate this endpoint health based on the context it is used
func (m *EndpointHealth) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateBpf(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateOverallHealth(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidatePolicy(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointHealth) contextValidateBpf(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Bpf) { // not required
return nil
}
if err := m.Bpf.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("bpf")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("bpf")
}
return err
}
return nil
}
func (m *EndpointHealth) contextValidateOverallHealth(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.OverallHealth) { // not required
return nil
}
if err := m.OverallHealth.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("overallHealth")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("overallHealth")
}
return err
}
return nil
}
func (m *EndpointHealth) contextValidatePolicy(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Policy) { // not required
return nil
}
if err := m.Policy.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("policy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("policy")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointHealth) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointHealth) UnmarshalBinary(b []byte) error {
var res EndpointHealth
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/validate"
)
// EndpointHealthStatus A common set of statuses for endpoint health * “OK“ = All components operational * “Bootstrap“ = This component is being created * “Pending“ = A change is being processed to be applied * “Warning“ = This component is not applying up-to-date policies (but is still applying the previous version) * “Failure“ = An error has occurred and no policy is being applied * “Disabled“ = This endpoint is disabled and will not handle traffic
//
// swagger:model EndpointHealthStatus
type EndpointHealthStatus string
func NewEndpointHealthStatus(value EndpointHealthStatus) *EndpointHealthStatus {
return &value
}
// Pointer returns a pointer to a freshly-allocated EndpointHealthStatus.
func (m EndpointHealthStatus) Pointer() *EndpointHealthStatus {
return &m
}
const (
// EndpointHealthStatusOK captures enum value "OK"
EndpointHealthStatusOK EndpointHealthStatus = "OK"
// EndpointHealthStatusBootstrap captures enum value "Bootstrap"
EndpointHealthStatusBootstrap EndpointHealthStatus = "Bootstrap"
// EndpointHealthStatusPending captures enum value "Pending"
EndpointHealthStatusPending EndpointHealthStatus = "Pending"
// EndpointHealthStatusWarning captures enum value "Warning"
EndpointHealthStatusWarning EndpointHealthStatus = "Warning"
// EndpointHealthStatusFailure captures enum value "Failure"
EndpointHealthStatusFailure EndpointHealthStatus = "Failure"
// EndpointHealthStatusDisabled captures enum value "Disabled"
EndpointHealthStatusDisabled EndpointHealthStatus = "Disabled"
)
// for schema
var endpointHealthStatusEnum []interface{}
func init() {
var res []EndpointHealthStatus
if err := json.Unmarshal([]byte(`["OK","Bootstrap","Pending","Warning","Failure","Disabled"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
endpointHealthStatusEnum = append(endpointHealthStatusEnum, v)
}
}
func (m EndpointHealthStatus) validateEndpointHealthStatusEnum(path, location string, value EndpointHealthStatus) error {
if err := validate.EnumCase(path, location, value, endpointHealthStatusEnum, true); err != nil {
return err
}
return nil
}
// Validate validates this endpoint health status
func (m EndpointHealthStatus) Validate(formats strfmt.Registry) error {
var res []error
// value enum
if err := m.validateEndpointHealthStatusEnum("", "body", m); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validates this endpoint health status based on context it is used
func (m EndpointHealthStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointIdentifiers Unique identifiers for this endpoint from outside cilium
//
// +deepequal-gen=true
//
// swagger:model EndpointIdentifiers
type EndpointIdentifiers struct {
// ID assigned to this attachment by container runtime
CniAttachmentID string `json:"cni-attachment-id,omitempty"`
// ID assigned by container runtime (deprecated, may not be unique)
ContainerID string `json:"container-id,omitempty"`
// Name assigned to container (deprecated, may not be unique)
ContainerName string `json:"container-name,omitempty"`
// Docker endpoint ID
DockerEndpointID string `json:"docker-endpoint-id,omitempty"`
// Docker network ID
DockerNetworkID string `json:"docker-network-id,omitempty"`
// K8s namespace for this endpoint (deprecated, may not be unique)
K8sNamespace string `json:"k8s-namespace,omitempty"`
// K8s pod name for this endpoint (deprecated, may not be unique)
K8sPodName string `json:"k8s-pod-name,omitempty"`
// K8s pod for this endpoint (deprecated, may not be unique)
PodName string `json:"pod-name,omitempty"`
}
// Validate validates this endpoint identifiers
func (m *EndpointIdentifiers) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this endpoint identifiers based on context it is used
func (m *EndpointIdentifiers) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *EndpointIdentifiers) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointIdentifiers) UnmarshalBinary(b []byte) error {
var res EndpointIdentifiers
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointNetworking Unique identifiers for this endpoint from outside cilium
//
// swagger:model EndpointNetworking
type EndpointNetworking struct {
// IP4/6 addresses assigned to this Endpoint
Addressing []*AddressPair `json:"addressing"`
// Name of network device in container netns
ContainerInterfaceName string `json:"container-interface-name,omitempty"`
// host addressing
HostAddressing *NodeAddressing `json:"host-addressing,omitempty"`
// MAC address
HostMac string `json:"host-mac,omitempty"`
// Index of network device in host netns
InterfaceIndex int64 `json:"interface-index,omitempty"`
// Name of network device in host netns
InterfaceName string `json:"interface-name,omitempty"`
// MAC address
Mac string `json:"mac,omitempty"`
}
// Validate validates this endpoint networking
func (m *EndpointNetworking) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAddressing(formats); err != nil {
res = append(res, err)
}
if err := m.validateHostAddressing(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointNetworking) validateAddressing(formats strfmt.Registry) error {
if swag.IsZero(m.Addressing) { // not required
return nil
}
for i := 0; i < len(m.Addressing); i++ {
if swag.IsZero(m.Addressing[i]) { // not required
continue
}
if m.Addressing[i] != nil {
if err := m.Addressing[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("addressing" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("addressing" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *EndpointNetworking) validateHostAddressing(formats strfmt.Registry) error {
if swag.IsZero(m.HostAddressing) { // not required
return nil
}
if m.HostAddressing != nil {
if err := m.HostAddressing.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("host-addressing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("host-addressing")
}
return err
}
}
return nil
}
// ContextValidate validate this endpoint networking based on the context it is used
func (m *EndpointNetworking) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateAddressing(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateHostAddressing(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointNetworking) contextValidateAddressing(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Addressing); i++ {
if m.Addressing[i] != nil {
if swag.IsZero(m.Addressing[i]) { // not required
return nil
}
if err := m.Addressing[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("addressing" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("addressing" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *EndpointNetworking) contextValidateHostAddressing(ctx context.Context, formats strfmt.Registry) error {
if m.HostAddressing != nil {
if swag.IsZero(m.HostAddressing) { // not required
return nil
}
if err := m.HostAddressing.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("host-addressing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("host-addressing")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointNetworking) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointNetworking) UnmarshalBinary(b []byte) error {
var res EndpointNetworking
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointPolicy Policy information of an endpoint
//
// +k8s:deepcopy-gen=true
//
// swagger:model EndpointPolicy
type EndpointPolicy struct {
// List of identities to which this endpoint is allowed to communicate
//
AllowedEgressIdentities []int64 `json:"allowed-egress-identities"`
// List of identities allowed to communicate to this endpoint
//
AllowedIngressIdentities []int64 `json:"allowed-ingress-identities"`
// Build number of calculated policy in use
Build int64 `json:"build,omitempty"`
// cidr policy
CidrPolicy *CIDRPolicy `json:"cidr-policy,omitempty"`
// List of identities to which this endpoint is not allowed to communicate
//
DeniedEgressIdentities []int64 `json:"denied-egress-identities"`
// List of identities not allowed to communicate to this endpoint
//
DeniedIngressIdentities []int64 `json:"denied-ingress-identities"`
// Own identity of endpoint
ID int64 `json:"id,omitempty"`
// l4
L4 *L4Policy `json:"l4,omitempty"`
// Whether policy enforcement is enabled (ingress, egress, both or none)
PolicyEnabled EndpointPolicyEnabled `json:"policy-enabled,omitempty"`
// The agent-local policy revision
PolicyRevision int64 `json:"policy-revision,omitempty"`
}
// Validate validates this endpoint policy
func (m *EndpointPolicy) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCidrPolicy(formats); err != nil {
res = append(res, err)
}
if err := m.validateL4(formats); err != nil {
res = append(res, err)
}
if err := m.validatePolicyEnabled(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointPolicy) validateCidrPolicy(formats strfmt.Registry) error {
if swag.IsZero(m.CidrPolicy) { // not required
return nil
}
if m.CidrPolicy != nil {
if err := m.CidrPolicy.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cidr-policy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cidr-policy")
}
return err
}
}
return nil
}
func (m *EndpointPolicy) validateL4(formats strfmt.Registry) error {
if swag.IsZero(m.L4) { // not required
return nil
}
if m.L4 != nil {
if err := m.L4.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("l4")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("l4")
}
return err
}
}
return nil
}
func (m *EndpointPolicy) validatePolicyEnabled(formats strfmt.Registry) error {
if swag.IsZero(m.PolicyEnabled) { // not required
return nil
}
if err := m.PolicyEnabled.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("policy-enabled")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("policy-enabled")
}
return err
}
return nil
}
// ContextValidate validate this endpoint policy based on the context it is used
func (m *EndpointPolicy) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateCidrPolicy(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateL4(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidatePolicyEnabled(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointPolicy) contextValidateCidrPolicy(ctx context.Context, formats strfmt.Registry) error {
if m.CidrPolicy != nil {
if swag.IsZero(m.CidrPolicy) { // not required
return nil
}
if err := m.CidrPolicy.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cidr-policy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cidr-policy")
}
return err
}
}
return nil
}
func (m *EndpointPolicy) contextValidateL4(ctx context.Context, formats strfmt.Registry) error {
if m.L4 != nil {
if swag.IsZero(m.L4) { // not required
return nil
}
if err := m.L4.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("l4")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("l4")
}
return err
}
}
return nil
}
func (m *EndpointPolicy) contextValidatePolicyEnabled(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.PolicyEnabled) { // not required
return nil
}
if err := m.PolicyEnabled.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("policy-enabled")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("policy-enabled")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointPolicy) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointPolicy) UnmarshalBinary(b []byte) error {
var res EndpointPolicy
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/validate"
)
// EndpointPolicyEnabled Whether policy enforcement is enabled (ingress, egress, both or none)
//
// swagger:model EndpointPolicyEnabled
type EndpointPolicyEnabled string
func NewEndpointPolicyEnabled(value EndpointPolicyEnabled) *EndpointPolicyEnabled {
return &value
}
// Pointer returns a pointer to a freshly-allocated EndpointPolicyEnabled.
func (m EndpointPolicyEnabled) Pointer() *EndpointPolicyEnabled {
return &m
}
const (
// EndpointPolicyEnabledNone captures enum value "none"
EndpointPolicyEnabledNone EndpointPolicyEnabled = "none"
// EndpointPolicyEnabledIngress captures enum value "ingress"
EndpointPolicyEnabledIngress EndpointPolicyEnabled = "ingress"
// EndpointPolicyEnabledEgress captures enum value "egress"
EndpointPolicyEnabledEgress EndpointPolicyEnabled = "egress"
// EndpointPolicyEnabledBoth captures enum value "both"
EndpointPolicyEnabledBoth EndpointPolicyEnabled = "both"
// EndpointPolicyEnabledAuditDashIngress captures enum value "audit-ingress"
EndpointPolicyEnabledAuditDashIngress EndpointPolicyEnabled = "audit-ingress"
// EndpointPolicyEnabledAuditDashEgress captures enum value "audit-egress"
EndpointPolicyEnabledAuditDashEgress EndpointPolicyEnabled = "audit-egress"
// EndpointPolicyEnabledAuditDashBoth captures enum value "audit-both"
EndpointPolicyEnabledAuditDashBoth EndpointPolicyEnabled = "audit-both"
)
// for schema
var endpointPolicyEnabledEnum []interface{}
func init() {
var res []EndpointPolicyEnabled
if err := json.Unmarshal([]byte(`["none","ingress","egress","both","audit-ingress","audit-egress","audit-both"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
endpointPolicyEnabledEnum = append(endpointPolicyEnabledEnum, v)
}
}
func (m EndpointPolicyEnabled) validateEndpointPolicyEnabledEnum(path, location string, value EndpointPolicyEnabled) error {
if err := validate.EnumCase(path, location, value, endpointPolicyEnabledEnum, true); err != nil {
return err
}
return nil
}
// Validate validates this endpoint policy enabled
func (m EndpointPolicyEnabled) Validate(formats strfmt.Registry) error {
var res []error
// value enum
if err := m.validateEndpointPolicyEnabledEnum("", "body", m); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validates this endpoint policy enabled based on context it is used
func (m EndpointPolicyEnabled) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointPolicyStatus Policy information of an endpoint
//
// swagger:model EndpointPolicyStatus
type EndpointPolicyStatus struct {
// The policy revision currently enforced in the proxy for this endpoint
ProxyPolicyRevision int64 `json:"proxy-policy-revision,omitempty"`
// Statistics of the proxy redirects configured for this endpoint
ProxyStatistics []*ProxyStatistics `json:"proxy-statistics"`
// The policy in the datapath for this endpoint
Realized *EndpointPolicy `json:"realized,omitempty"`
// The policy that should apply to this endpoint
Spec *EndpointPolicy `json:"spec,omitempty"`
}
// Validate validates this endpoint policy status
func (m *EndpointPolicyStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateProxyStatistics(formats); err != nil {
res = append(res, err)
}
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if err := m.validateSpec(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointPolicyStatus) validateProxyStatistics(formats strfmt.Registry) error {
if swag.IsZero(m.ProxyStatistics) { // not required
return nil
}
for i := 0; i < len(m.ProxyStatistics); i++ {
if swag.IsZero(m.ProxyStatistics[i]) { // not required
continue
}
if m.ProxyStatistics[i] != nil {
if err := m.ProxyStatistics[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("proxy-statistics" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("proxy-statistics" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *EndpointPolicyStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
func (m *EndpointPolicyStatus) validateSpec(formats strfmt.Registry) error {
if swag.IsZero(m.Spec) { // not required
return nil
}
if m.Spec != nil {
if err := m.Spec.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
// ContextValidate validate this endpoint policy status based on the context it is used
func (m *EndpointPolicyStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateProxyStatistics(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSpec(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointPolicyStatus) contextValidateProxyStatistics(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.ProxyStatistics); i++ {
if m.ProxyStatistics[i] != nil {
if swag.IsZero(m.ProxyStatistics[i]) { // not required
return nil
}
if err := m.ProxyStatistics[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("proxy-statistics" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("proxy-statistics" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *EndpointPolicyStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
func (m *EndpointPolicyStatus) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error {
if m.Spec != nil {
if swag.IsZero(m.Spec) { // not required
return nil
}
if err := m.Spec.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointPolicyStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointPolicyStatus) UnmarshalBinary(b []byte) error {
var res EndpointPolicyStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/validate"
)
// EndpointState State of endpoint
//
// swagger:model EndpointState
type EndpointState string
func NewEndpointState(value EndpointState) *EndpointState {
return &value
}
// Pointer returns a pointer to a freshly-allocated EndpointState.
func (m EndpointState) Pointer() *EndpointState {
return &m
}
const (
// EndpointStateWaitingDashForDashIdentity captures enum value "waiting-for-identity"
EndpointStateWaitingDashForDashIdentity EndpointState = "waiting-for-identity"
// EndpointStateNotDashReady captures enum value "not-ready"
EndpointStateNotDashReady EndpointState = "not-ready"
// EndpointStateWaitingDashToDashRegenerate captures enum value "waiting-to-regenerate"
EndpointStateWaitingDashToDashRegenerate EndpointState = "waiting-to-regenerate"
// EndpointStateRegenerating captures enum value "regenerating"
EndpointStateRegenerating EndpointState = "regenerating"
// EndpointStateRestoring captures enum value "restoring"
EndpointStateRestoring EndpointState = "restoring"
// EndpointStateReady captures enum value "ready"
EndpointStateReady EndpointState = "ready"
// EndpointStateDisconnecting captures enum value "disconnecting"
EndpointStateDisconnecting EndpointState = "disconnecting"
// EndpointStateDisconnected captures enum value "disconnected"
EndpointStateDisconnected EndpointState = "disconnected"
// EndpointStateInvalid captures enum value "invalid"
EndpointStateInvalid EndpointState = "invalid"
)
// for schema
var endpointStateEnum []interface{}
func init() {
var res []EndpointState
if err := json.Unmarshal([]byte(`["waiting-for-identity","not-ready","waiting-to-regenerate","regenerating","restoring","ready","disconnecting","disconnected","invalid"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
endpointStateEnum = append(endpointStateEnum, v)
}
}
func (m EndpointState) validateEndpointStateEnum(path, location string, value EndpointState) error {
if err := validate.EnumCase(path, location, value, endpointStateEnum, true); err != nil {
return err
}
return nil
}
// Validate validates this endpoint state
func (m EndpointState) Validate(formats strfmt.Registry) error {
var res []error
// value enum
if err := m.validateEndpointStateEnum("", "body", m); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validates this endpoint state based on context it is used
func (m EndpointState) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// EndpointStatus The current state and configuration of the endpoint, its policy & datapath, and subcomponents
//
// swagger:model EndpointStatus
type EndpointStatus struct {
// Status of internal controllers attached to this endpoint
Controllers ControllerStatuses `json:"controllers,omitempty"`
// Unique identifiers for this endpoint from outside cilium
ExternalIdentifiers *EndpointIdentifiers `json:"external-identifiers,omitempty"`
// Summary overall endpoint & subcomponent health
Health *EndpointHealth `json:"health,omitempty"`
// The security identity for this endpoint
Identity *Identity `json:"identity,omitempty"`
// Labels applied to this endpoint
Labels *LabelConfigurationStatus `json:"labels,omitempty"`
// Most recent status log. See endpoint/{id}/log for the complete log.
Log EndpointStatusLog `json:"log,omitempty"`
// List of named ports that can be used in Network Policy
NamedPorts NamedPorts `json:"namedPorts,omitempty"`
// Networking properties of the endpoint
Networking *EndpointNetworking `json:"networking,omitempty"`
// The policy applied to this endpoint from the policy repository
Policy *EndpointPolicyStatus `json:"policy,omitempty"`
// The configuration in effect on this endpoint
Realized *EndpointConfigurationSpec `json:"realized,omitempty"`
// Current state of endpoint
// Required: true
State *EndpointState `json:"state"`
}
// Validate validates this endpoint status
func (m *EndpointStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateControllers(formats); err != nil {
res = append(res, err)
}
if err := m.validateExternalIdentifiers(formats); err != nil {
res = append(res, err)
}
if err := m.validateHealth(formats); err != nil {
res = append(res, err)
}
if err := m.validateIdentity(formats); err != nil {
res = append(res, err)
}
if err := m.validateLabels(formats); err != nil {
res = append(res, err)
}
if err := m.validateLog(formats); err != nil {
res = append(res, err)
}
if err := m.validateNamedPorts(formats); err != nil {
res = append(res, err)
}
if err := m.validateNetworking(formats); err != nil {
res = append(res, err)
}
if err := m.validatePolicy(formats); err != nil {
res = append(res, err)
}
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if err := m.validateState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointStatus) validateControllers(formats strfmt.Registry) error {
if swag.IsZero(m.Controllers) { // not required
return nil
}
if err := m.Controllers.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("controllers")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("controllers")
}
return err
}
return nil
}
func (m *EndpointStatus) validateExternalIdentifiers(formats strfmt.Registry) error {
if swag.IsZero(m.ExternalIdentifiers) { // not required
return nil
}
if m.ExternalIdentifiers != nil {
if err := m.ExternalIdentifiers.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("external-identifiers")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("external-identifiers")
}
return err
}
}
return nil
}
func (m *EndpointStatus) validateHealth(formats strfmt.Registry) error {
if swag.IsZero(m.Health) { // not required
return nil
}
if m.Health != nil {
if err := m.Health.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("health")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("health")
}
return err
}
}
return nil
}
func (m *EndpointStatus) validateIdentity(formats strfmt.Registry) error {
if swag.IsZero(m.Identity) { // not required
return nil
}
if m.Identity != nil {
if err := m.Identity.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("identity")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("identity")
}
return err
}
}
return nil
}
func (m *EndpointStatus) validateLabels(formats strfmt.Registry) error {
if swag.IsZero(m.Labels) { // not required
return nil
}
if m.Labels != nil {
if err := m.Labels.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
}
return nil
}
func (m *EndpointStatus) validateLog(formats strfmt.Registry) error {
if swag.IsZero(m.Log) { // not required
return nil
}
if err := m.Log.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("log")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("log")
}
return err
}
return nil
}
func (m *EndpointStatus) validateNamedPorts(formats strfmt.Registry) error {
if swag.IsZero(m.NamedPorts) { // not required
return nil
}
if err := m.NamedPorts.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("namedPorts")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("namedPorts")
}
return err
}
return nil
}
func (m *EndpointStatus) validateNetworking(formats strfmt.Registry) error {
if swag.IsZero(m.Networking) { // not required
return nil
}
if m.Networking != nil {
if err := m.Networking.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("networking")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("networking")
}
return err
}
}
return nil
}
func (m *EndpointStatus) validatePolicy(formats strfmt.Registry) error {
if swag.IsZero(m.Policy) { // not required
return nil
}
if m.Policy != nil {
if err := m.Policy.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("policy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("policy")
}
return err
}
}
return nil
}
func (m *EndpointStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
func (m *EndpointStatus) validateState(formats strfmt.Registry) error {
if err := validate.Required("state", "body", m.State); err != nil {
return err
}
if err := validate.Required("state", "body", m.State); err != nil {
return err
}
if m.State != nil {
if err := m.State.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("state")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("state")
}
return err
}
}
return nil
}
// ContextValidate validate this endpoint status based on the context it is used
func (m *EndpointStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateControllers(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateExternalIdentifiers(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateHealth(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIdentity(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateLabels(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateLog(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNamedPorts(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNetworking(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidatePolicy(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateState(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointStatus) contextValidateControllers(ctx context.Context, formats strfmt.Registry) error {
if err := m.Controllers.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("controllers")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("controllers")
}
return err
}
return nil
}
func (m *EndpointStatus) contextValidateExternalIdentifiers(ctx context.Context, formats strfmt.Registry) error {
if m.ExternalIdentifiers != nil {
if swag.IsZero(m.ExternalIdentifiers) { // not required
return nil
}
if err := m.ExternalIdentifiers.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("external-identifiers")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("external-identifiers")
}
return err
}
}
return nil
}
func (m *EndpointStatus) contextValidateHealth(ctx context.Context, formats strfmt.Registry) error {
if m.Health != nil {
if swag.IsZero(m.Health) { // not required
return nil
}
if err := m.Health.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("health")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("health")
}
return err
}
}
return nil
}
func (m *EndpointStatus) contextValidateIdentity(ctx context.Context, formats strfmt.Registry) error {
if m.Identity != nil {
if swag.IsZero(m.Identity) { // not required
return nil
}
if err := m.Identity.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("identity")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("identity")
}
return err
}
}
return nil
}
func (m *EndpointStatus) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error {
if m.Labels != nil {
if swag.IsZero(m.Labels) { // not required
return nil
}
if err := m.Labels.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
}
return nil
}
func (m *EndpointStatus) contextValidateLog(ctx context.Context, formats strfmt.Registry) error {
if err := m.Log.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("log")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("log")
}
return err
}
return nil
}
func (m *EndpointStatus) contextValidateNamedPorts(ctx context.Context, formats strfmt.Registry) error {
if err := m.NamedPorts.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("namedPorts")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("namedPorts")
}
return err
}
return nil
}
func (m *EndpointStatus) contextValidateNetworking(ctx context.Context, formats strfmt.Registry) error {
if m.Networking != nil {
if swag.IsZero(m.Networking) { // not required
return nil
}
if err := m.Networking.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("networking")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("networking")
}
return err
}
}
return nil
}
func (m *EndpointStatus) contextValidatePolicy(ctx context.Context, formats strfmt.Registry) error {
if m.Policy != nil {
if swag.IsZero(m.Policy) { // not required
return nil
}
if err := m.Policy.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("policy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("policy")
}
return err
}
}
return nil
}
func (m *EndpointStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
func (m *EndpointStatus) contextValidateState(ctx context.Context, formats strfmt.Registry) error {
if m.State != nil {
if err := m.State.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("state")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("state")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointStatus) UnmarshalBinary(b []byte) error {
var res EndpointStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// EndpointStatusChange Indication of a change of status
//
// +deepequal-gen=true
//
// swagger:model EndpointStatusChange
type EndpointStatusChange struct {
// Code indicate type of status change
// Enum: [ok failed]
Code string `json:"code,omitempty"`
// Status message
Message string `json:"message,omitempty"`
// state
State EndpointState `json:"state,omitempty"`
// Timestamp when status change occurred
Timestamp string `json:"timestamp,omitempty"`
}
// Validate validates this endpoint status change
func (m *EndpointStatusChange) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCode(formats); err != nil {
res = append(res, err)
}
if err := m.validateState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var endpointStatusChangeTypeCodePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["ok","failed"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
endpointStatusChangeTypeCodePropEnum = append(endpointStatusChangeTypeCodePropEnum, v)
}
}
const (
// EndpointStatusChangeCodeOk captures enum value "ok"
EndpointStatusChangeCodeOk string = "ok"
// EndpointStatusChangeCodeFailed captures enum value "failed"
EndpointStatusChangeCodeFailed string = "failed"
)
// prop value enum
func (m *EndpointStatusChange) validateCodeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, endpointStatusChangeTypeCodePropEnum, true); err != nil {
return err
}
return nil
}
func (m *EndpointStatusChange) validateCode(formats strfmt.Registry) error {
if swag.IsZero(m.Code) { // not required
return nil
}
// value enum
if err := m.validateCodeEnum("code", "body", m.Code); err != nil {
return err
}
return nil
}
func (m *EndpointStatusChange) validateState(formats strfmt.Registry) error {
if swag.IsZero(m.State) { // not required
return nil
}
if err := m.State.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("state")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("state")
}
return err
}
return nil
}
// ContextValidate validate this endpoint status change based on the context it is used
func (m *EndpointStatusChange) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateState(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointStatusChange) contextValidateState(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.State) { // not required
return nil
}
if err := m.State.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("state")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("state")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointStatusChange) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointStatusChange) UnmarshalBinary(b []byte) error {
var res EndpointStatusChange
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointStatusLog Status log of endpoint
//
// swagger:model EndpointStatusLog
type EndpointStatusLog []*EndpointStatusChange
// Validate validates this endpoint status log
func (m EndpointStatusLog) Validate(formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if swag.IsZero(m[i]) { // not required
continue
}
if m[i] != nil {
if err := m[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validate this endpoint status log based on the context it is used
func (m EndpointStatusLog) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if m[i] != nil {
if swag.IsZero(m[i]) { // not required
return nil
}
if err := m[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
)
// Error error
//
// swagger:model Error
type Error string
// Validate validates this error
func (m Error) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this error based on context it is used
func (m Error) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// FrontendAddress Layer 4 address. The protocol is currently ignored, all services will
// behave as if protocol any is specified. To restrict to a particular
// protocol, use policy.
//
// swagger:model FrontendAddress
type FrontendAddress struct {
// Layer 3 address
IP string `json:"ip,omitempty"`
// Layer 4 port number
Port uint16 `json:"port,omitempty"`
// Layer 4 protocol
// Enum: [tcp udp any]
Protocol string `json:"protocol,omitempty"`
// Load balancing scope for frontend address
// Enum: [external internal]
Scope string `json:"scope,omitempty"`
}
// Validate validates this frontend address
func (m *FrontendAddress) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateProtocol(formats); err != nil {
res = append(res, err)
}
if err := m.validateScope(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var frontendAddressTypeProtocolPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["tcp","udp","any"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
frontendAddressTypeProtocolPropEnum = append(frontendAddressTypeProtocolPropEnum, v)
}
}
const (
// FrontendAddressProtocolTCP captures enum value "tcp"
FrontendAddressProtocolTCP string = "tcp"
// FrontendAddressProtocolUDP captures enum value "udp"
FrontendAddressProtocolUDP string = "udp"
// FrontendAddressProtocolAny captures enum value "any"
FrontendAddressProtocolAny string = "any"
)
// prop value enum
func (m *FrontendAddress) validateProtocolEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, frontendAddressTypeProtocolPropEnum, true); err != nil {
return err
}
return nil
}
func (m *FrontendAddress) validateProtocol(formats strfmt.Registry) error {
if swag.IsZero(m.Protocol) { // not required
return nil
}
// value enum
if err := m.validateProtocolEnum("protocol", "body", m.Protocol); err != nil {
return err
}
return nil
}
var frontendAddressTypeScopePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["external","internal"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
frontendAddressTypeScopePropEnum = append(frontendAddressTypeScopePropEnum, v)
}
}
const (
// FrontendAddressScopeExternal captures enum value "external"
FrontendAddressScopeExternal string = "external"
// FrontendAddressScopeInternal captures enum value "internal"
FrontendAddressScopeInternal string = "internal"
)
// prop value enum
func (m *FrontendAddress) validateScopeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, frontendAddressTypeScopePropEnum, true); err != nil {
return err
}
return nil
}
func (m *FrontendAddress) validateScope(formats strfmt.Registry) error {
if swag.IsZero(m.Scope) { // not required
return nil
}
// value enum
if err := m.validateScopeEnum("scope", "body", m.Scope); err != nil {
return err
}
return nil
}
// ContextValidate validates this frontend address based on context it is used
func (m *FrontendAddress) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *FrontendAddress) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *FrontendAddress) UnmarshalBinary(b []byte) error {
var res FrontendAddress
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// FrontendMapping Mapping of frontend to backend pods of an LRP
//
// swagger:model FrontendMapping
type FrontendMapping struct {
// Pod backends of an LRP
Backends []*LRPBackend `json:"backends"`
// frontend address
FrontendAddress *FrontendAddress `json:"frontend-address,omitempty"`
}
// Validate validates this frontend mapping
func (m *FrontendMapping) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateBackends(formats); err != nil {
res = append(res, err)
}
if err := m.validateFrontendAddress(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *FrontendMapping) validateBackends(formats strfmt.Registry) error {
if swag.IsZero(m.Backends) { // not required
return nil
}
for i := 0; i < len(m.Backends); i++ {
if swag.IsZero(m.Backends[i]) { // not required
continue
}
if m.Backends[i] != nil {
if err := m.Backends[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("backends" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("backends" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *FrontendMapping) validateFrontendAddress(formats strfmt.Registry) error {
if swag.IsZero(m.FrontendAddress) { // not required
return nil
}
if m.FrontendAddress != nil {
if err := m.FrontendAddress.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("frontend-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("frontend-address")
}
return err
}
}
return nil
}
// ContextValidate validate this frontend mapping based on the context it is used
func (m *FrontendMapping) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateBackends(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateFrontendAddress(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *FrontendMapping) contextValidateBackends(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Backends); i++ {
if m.Backends[i] != nil {
if swag.IsZero(m.Backends[i]) { // not required
return nil
}
if err := m.Backends[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("backends" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("backends" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *FrontendMapping) contextValidateFrontendAddress(ctx context.Context, formats strfmt.Registry) error {
if m.FrontendAddress != nil {
if swag.IsZero(m.FrontendAddress) { // not required
return nil
}
if err := m.FrontendAddress.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("frontend-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("frontend-address")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *FrontendMapping) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *FrontendMapping) UnmarshalBinary(b []byte) error {
var res FrontendMapping
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// HostFirewall Status of the host firewall
//
// +k8s:deepcopy-gen=true
//
// swagger:model HostFirewall
type HostFirewall struct {
// devices
Devices []string `json:"devices"`
// mode
// Enum: [Disabled Enabled]
Mode string `json:"mode,omitempty"`
}
// Validate validates this host firewall
func (m *HostFirewall) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var hostFirewallTypeModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Disabled","Enabled"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
hostFirewallTypeModePropEnum = append(hostFirewallTypeModePropEnum, v)
}
}
const (
// HostFirewallModeDisabled captures enum value "Disabled"
HostFirewallModeDisabled string = "Disabled"
// HostFirewallModeEnabled captures enum value "Enabled"
HostFirewallModeEnabled string = "Enabled"
)
// prop value enum
func (m *HostFirewall) validateModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, hostFirewallTypeModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *HostFirewall) validateMode(formats strfmt.Registry) error {
if swag.IsZero(m.Mode) { // not required
return nil
}
// value enum
if err := m.validateModeEnum("mode", "body", m.Mode); err != nil {
return err
}
return nil
}
// ContextValidate validates this host firewall based on context it is used
func (m *HostFirewall) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *HostFirewall) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *HostFirewall) UnmarshalBinary(b []byte) error {
var res HostFirewall
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// HubbleStatus Status of the Hubble server
//
// +k8s:deepcopy-gen=true
//
// swagger:model HubbleStatus
type HubbleStatus struct {
// metrics
Metrics *HubbleStatusMetrics `json:"metrics,omitempty"`
// Human readable status/error/warning message
Msg string `json:"msg,omitempty"`
// observer
Observer *HubbleStatusObserver `json:"observer,omitempty"`
// State the component is in
// Enum: [Ok Warning Failure Disabled]
State string `json:"state,omitempty"`
}
// Validate validates this hubble status
func (m *HubbleStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateMetrics(formats); err != nil {
res = append(res, err)
}
if err := m.validateObserver(formats); err != nil {
res = append(res, err)
}
if err := m.validateState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *HubbleStatus) validateMetrics(formats strfmt.Registry) error {
if swag.IsZero(m.Metrics) { // not required
return nil
}
if m.Metrics != nil {
if err := m.Metrics.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("metrics")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("metrics")
}
return err
}
}
return nil
}
func (m *HubbleStatus) validateObserver(formats strfmt.Registry) error {
if swag.IsZero(m.Observer) { // not required
return nil
}
if m.Observer != nil {
if err := m.Observer.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("observer")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("observer")
}
return err
}
}
return nil
}
var hubbleStatusTypeStatePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Ok","Warning","Failure","Disabled"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
hubbleStatusTypeStatePropEnum = append(hubbleStatusTypeStatePropEnum, v)
}
}
const (
// HubbleStatusStateOk captures enum value "Ok"
HubbleStatusStateOk string = "Ok"
// HubbleStatusStateWarning captures enum value "Warning"
HubbleStatusStateWarning string = "Warning"
// HubbleStatusStateFailure captures enum value "Failure"
HubbleStatusStateFailure string = "Failure"
// HubbleStatusStateDisabled captures enum value "Disabled"
HubbleStatusStateDisabled string = "Disabled"
)
// prop value enum
func (m *HubbleStatus) validateStateEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, hubbleStatusTypeStatePropEnum, true); err != nil {
return err
}
return nil
}
func (m *HubbleStatus) validateState(formats strfmt.Registry) error {
if swag.IsZero(m.State) { // not required
return nil
}
// value enum
if err := m.validateStateEnum("state", "body", m.State); err != nil {
return err
}
return nil
}
// ContextValidate validate this hubble status based on the context it is used
func (m *HubbleStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateMetrics(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateObserver(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *HubbleStatus) contextValidateMetrics(ctx context.Context, formats strfmt.Registry) error {
if m.Metrics != nil {
if swag.IsZero(m.Metrics) { // not required
return nil
}
if err := m.Metrics.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("metrics")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("metrics")
}
return err
}
}
return nil
}
func (m *HubbleStatus) contextValidateObserver(ctx context.Context, formats strfmt.Registry) error {
if m.Observer != nil {
if swag.IsZero(m.Observer) { // not required
return nil
}
if err := m.Observer.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("observer")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("observer")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *HubbleStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *HubbleStatus) UnmarshalBinary(b []byte) error {
var res HubbleStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// HubbleStatusMetrics Status of the Hubble metrics server
//
// swagger:model HubbleStatusMetrics
type HubbleStatusMetrics struct {
// State of the Hubble metrics
// Enum: [Ok Warning Failure Disabled]
State string `json:"state,omitempty"`
}
// Validate validates this hubble status metrics
func (m *HubbleStatusMetrics) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var hubbleStatusMetricsTypeStatePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Ok","Warning","Failure","Disabled"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
hubbleStatusMetricsTypeStatePropEnum = append(hubbleStatusMetricsTypeStatePropEnum, v)
}
}
const (
// HubbleStatusMetricsStateOk captures enum value "Ok"
HubbleStatusMetricsStateOk string = "Ok"
// HubbleStatusMetricsStateWarning captures enum value "Warning"
HubbleStatusMetricsStateWarning string = "Warning"
// HubbleStatusMetricsStateFailure captures enum value "Failure"
HubbleStatusMetricsStateFailure string = "Failure"
// HubbleStatusMetricsStateDisabled captures enum value "Disabled"
HubbleStatusMetricsStateDisabled string = "Disabled"
)
// prop value enum
func (m *HubbleStatusMetrics) validateStateEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, hubbleStatusMetricsTypeStatePropEnum, true); err != nil {
return err
}
return nil
}
func (m *HubbleStatusMetrics) validateState(formats strfmt.Registry) error {
if swag.IsZero(m.State) { // not required
return nil
}
// value enum
if err := m.validateStateEnum("metrics"+"."+"state", "body", m.State); err != nil {
return err
}
return nil
}
// ContextValidate validates this hubble status metrics based on context it is used
func (m *HubbleStatusMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *HubbleStatusMetrics) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *HubbleStatusMetrics) UnmarshalBinary(b []byte) error {
var res HubbleStatusMetrics
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// HubbleStatusObserver Status of the Hubble observer
//
// +k8s:deepcopy-gen=true
//
// swagger:model HubbleStatusObserver
type HubbleStatusObserver struct {
// Current number of flows this Hubble observer stores
CurrentFlows int64 `json:"current-flows,omitempty"`
// Maximum number of flows this Hubble observer is able to store
MaxFlows int64 `json:"max-flows,omitempty"`
// Total number of flows this Hubble observer has seen
SeenFlows int64 `json:"seen-flows,omitempty"`
// Uptime of this Hubble observer instance
// Format: duration
Uptime strfmt.Duration `json:"uptime,omitempty"`
}
// Validate validates this hubble status observer
func (m *HubbleStatusObserver) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateUptime(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *HubbleStatusObserver) validateUptime(formats strfmt.Registry) error {
if swag.IsZero(m.Uptime) { // not required
return nil
}
if err := validate.FormatOf("observer"+"."+"uptime", "body", "duration", m.Uptime.String(), formats); err != nil {
return err
}
return nil
}
// ContextValidate validates this hubble status observer based on context it is used
func (m *HubbleStatusObserver) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *HubbleStatusObserver) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *HubbleStatusObserver) UnmarshalBinary(b []byte) error {
var res HubbleStatusObserver
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// IPsecStatus Status of the IPsec agent
//
// +k8s:deepcopy-gen=true
//
// swagger:model IPsecStatus
type IPsecStatus struct {
// IPsec decryption interfaces
DecryptInterfaces []string `json:"decrypt-interfaces"`
// IPsec error count
ErrorCount int64 `json:"error-count,omitempty"`
// IPsec keys in use
KeysInUse int64 `json:"keys-in-use,omitempty"`
// IPsec max sequence number
MaxSeqNumber string `json:"max-seq-number,omitempty"`
// IPsec XFRM errors
XfrmErrors map[string]int64 `json:"xfrm-errors,omitempty"`
}
// Validate validates this i psec status
func (m *IPsecStatus) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this i psec status based on context it is used
func (m *IPsecStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *IPsecStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IPsecStatus) UnmarshalBinary(b []byte) error {
var res IPsecStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// Identity Security identity
//
// swagger:model Identity
type Identity struct {
// Unique identifier
ID int64 `json:"id,omitempty"`
// Labels describing the identity
Labels Labels `json:"labels,omitempty"`
// SHA256 of labels
LabelsSHA256 string `json:"labelsSHA256,omitempty"`
}
// Validate validates this identity
func (m *Identity) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateLabels(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Identity) validateLabels(formats strfmt.Registry) error {
if swag.IsZero(m.Labels) { // not required
return nil
}
if err := m.Labels.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
// ContextValidate validate this identity based on the context it is used
func (m *Identity) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateLabels(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Identity) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error {
if err := m.Labels.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *Identity) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Identity) UnmarshalBinary(b []byte) error {
var res Identity
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// IdentityEndpoints Security identities owned by endpoints on the local node
//
// swagger:model IdentityEndpoints
type IdentityEndpoints struct {
// Security identity
Identity *Identity `json:"identity,omitempty"`
// number of endpoints consuming this identity locally (should always be > 0)
RefCount int64 `json:"refCount,omitempty"`
}
// Validate validates this identity endpoints
func (m *IdentityEndpoints) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateIdentity(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *IdentityEndpoints) validateIdentity(formats strfmt.Registry) error {
if swag.IsZero(m.Identity) { // not required
return nil
}
if m.Identity != nil {
if err := m.Identity.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("identity")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("identity")
}
return err
}
}
return nil
}
// ContextValidate validate this identity endpoints based on the context it is used
func (m *IdentityEndpoints) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateIdentity(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *IdentityEndpoints) contextValidateIdentity(ctx context.Context, formats strfmt.Registry) error {
if m.Identity != nil {
if swag.IsZero(m.Identity) { // not required
return nil
}
if err := m.Identity.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("identity")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("identity")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *IdentityEndpoints) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IdentityEndpoints) UnmarshalBinary(b []byte) error {
var res IdentityEndpoints
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// IdentityRange Status of identity range of the cluster
//
// +k8s:deepcopy-gen=true
//
// swagger:model IdentityRange
type IdentityRange struct {
// Maximum identity of the cluster
MaxIdentity int64 `json:"max-identity,omitempty"`
// Minimum identity of the cluster
MinIdentity int64 `json:"min-identity,omitempty"`
}
// Validate validates this identity range
func (m *IdentityRange) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this identity range based on context it is used
func (m *IdentityRange) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *IdentityRange) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IdentityRange) UnmarshalBinary(b []byte) error {
var res IdentityRange
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// IPAMAddressResponse IPAM configuration of an individual address family
//
// swagger:model IPAMAddressResponse
type IPAMAddressResponse struct {
// List of CIDRs out of which IPs are allocated
Cidrs []string `json:"cidrs"`
// The UUID for the expiration timer. Set when expiration has been
// enabled while allocating.
//
ExpirationUUID string `json:"expiration-uuid,omitempty"`
// IP of gateway
Gateway string `json:"gateway,omitempty"`
// InterfaceNumber is a field for generically identifying an interface. This is only useful in ENI mode.
//
InterfaceNumber string `json:"interface-number,omitempty"`
// Allocated IP for endpoint
IP string `json:"ip,omitempty"`
// MAC of master interface if address is a slave/secondary of a master interface
MasterMac string `json:"master-mac,omitempty"`
}
// Validate validates this IP a m address response
func (m *IPAMAddressResponse) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this IP a m address response based on context it is used
func (m *IPAMAddressResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *IPAMAddressResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IPAMAddressResponse) UnmarshalBinary(b []byte) error {
var res IPAMAddressResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// IPAMResponse IPAM configuration of an endpoint
//
// swagger:model IPAMResponse
type IPAMResponse struct {
// address
// Required: true
Address *AddressPair `json:"address"`
// host addressing
// Required: true
HostAddressing *NodeAddressing `json:"host-addressing"`
// ipv4
IPV4 *IPAMAddressResponse `json:"ipv4,omitempty"`
// ipv6
IPV6 *IPAMAddressResponse `json:"ipv6,omitempty"`
}
// Validate validates this IP a m response
func (m *IPAMResponse) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAddress(formats); err != nil {
res = append(res, err)
}
if err := m.validateHostAddressing(formats); err != nil {
res = append(res, err)
}
if err := m.validateIPV4(formats); err != nil {
res = append(res, err)
}
if err := m.validateIPV6(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *IPAMResponse) validateAddress(formats strfmt.Registry) error {
if err := validate.Required("address", "body", m.Address); err != nil {
return err
}
if m.Address != nil {
if err := m.Address.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("address")
}
return err
}
}
return nil
}
func (m *IPAMResponse) validateHostAddressing(formats strfmt.Registry) error {
if err := validate.Required("host-addressing", "body", m.HostAddressing); err != nil {
return err
}
if m.HostAddressing != nil {
if err := m.HostAddressing.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("host-addressing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("host-addressing")
}
return err
}
}
return nil
}
func (m *IPAMResponse) validateIPV4(formats strfmt.Registry) error {
if swag.IsZero(m.IPV4) { // not required
return nil
}
if m.IPV4 != nil {
if err := m.IPV4.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv4")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv4")
}
return err
}
}
return nil
}
func (m *IPAMResponse) validateIPV6(formats strfmt.Registry) error {
if swag.IsZero(m.IPV6) { // not required
return nil
}
if m.IPV6 != nil {
if err := m.IPV6.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv6")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv6")
}
return err
}
}
return nil
}
// ContextValidate validate this IP a m response based on the context it is used
func (m *IPAMResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateAddress(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateHostAddressing(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIPV4(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIPV6(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *IPAMResponse) contextValidateAddress(ctx context.Context, formats strfmt.Registry) error {
if m.Address != nil {
if err := m.Address.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("address")
}
return err
}
}
return nil
}
func (m *IPAMResponse) contextValidateHostAddressing(ctx context.Context, formats strfmt.Registry) error {
if m.HostAddressing != nil {
if err := m.HostAddressing.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("host-addressing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("host-addressing")
}
return err
}
}
return nil
}
func (m *IPAMResponse) contextValidateIPV4(ctx context.Context, formats strfmt.Registry) error {
if m.IPV4 != nil {
if swag.IsZero(m.IPV4) { // not required
return nil
}
if err := m.IPV4.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv4")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv4")
}
return err
}
}
return nil
}
func (m *IPAMResponse) contextValidateIPV6(ctx context.Context, formats strfmt.Registry) error {
if m.IPV6 != nil {
if swag.IsZero(m.IPV6) { // not required
return nil
}
if err := m.IPV6.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv6")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv6")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *IPAMResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IPAMResponse) UnmarshalBinary(b []byte) error {
var res IPAMResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// IPAMStatus Status of IP address management
//
// +k8s:deepcopy-gen=true
//
// swagger:model IPAMStatus
type IPAMStatus struct {
// allocations
Allocations AllocationMap `json:"allocations,omitempty"`
// ipv4
IPV4 []string `json:"ipv4"`
// ipv6
IPV6 []string `json:"ipv6"`
// status
Status string `json:"status,omitempty"`
}
// Validate validates this IP a m status
func (m *IPAMStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAllocations(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *IPAMStatus) validateAllocations(formats strfmt.Registry) error {
if swag.IsZero(m.Allocations) { // not required
return nil
}
if m.Allocations != nil {
if err := m.Allocations.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("allocations")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("allocations")
}
return err
}
}
return nil
}
// ContextValidate validate this IP a m status based on the context it is used
func (m *IPAMStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateAllocations(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *IPAMStatus) contextValidateAllocations(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Allocations) { // not required
return nil
}
if err := m.Allocations.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("allocations")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("allocations")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *IPAMStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IPAMStatus) UnmarshalBinary(b []byte) error {
var res IPAMStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// IPListEntry IP entry with metadata
//
// swagger:model IPListEntry
type IPListEntry struct {
// Key of the entry in the form of a CIDR range
// Required: true
Cidr *string `json:"cidr"`
// The context ID for the encryption session
EncryptKey int64 `json:"encryptKey,omitempty"`
// IP address of the host
HostIP string `json:"hostIP,omitempty"`
// Numerical identity assigned to the IP
// Required: true
Identity *int64 `json:"identity"`
// metadata
Metadata *IPListEntryMetadata `json:"metadata,omitempty"`
}
// Validate validates this IP list entry
func (m *IPListEntry) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCidr(formats); err != nil {
res = append(res, err)
}
if err := m.validateIdentity(formats); err != nil {
res = append(res, err)
}
if err := m.validateMetadata(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *IPListEntry) validateCidr(formats strfmt.Registry) error {
if err := validate.Required("cidr", "body", m.Cidr); err != nil {
return err
}
return nil
}
func (m *IPListEntry) validateIdentity(formats strfmt.Registry) error {
if err := validate.Required("identity", "body", m.Identity); err != nil {
return err
}
return nil
}
func (m *IPListEntry) validateMetadata(formats strfmt.Registry) error {
if swag.IsZero(m.Metadata) { // not required
return nil
}
if m.Metadata != nil {
if err := m.Metadata.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("metadata")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("metadata")
}
return err
}
}
return nil
}
// ContextValidate validate this IP list entry based on the context it is used
func (m *IPListEntry) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateMetadata(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *IPListEntry) contextValidateMetadata(ctx context.Context, formats strfmt.Registry) error {
if m.Metadata != nil {
if swag.IsZero(m.Metadata) { // not required
return nil
}
if err := m.Metadata.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("metadata")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("metadata")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *IPListEntry) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IPListEntry) UnmarshalBinary(b []byte) error {
var res IPListEntry
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// IPListEntryMetadata Additional metadata assigned to an IP list entry
//
// swagger:model IPListEntryMetadata
type IPListEntryMetadata struct {
// Name assigned to the IP (e.g. Kubernetes pod name)
Name string `json:"name,omitempty"`
// Namespace of the IP (e.g. Kubernetes namespace)
Namespace string `json:"namespace,omitempty"`
// Source of the IP entry and its metadata
// Example: k8s
Source string `json:"source,omitempty"`
}
// Validate validates this IP list entry metadata
func (m *IPListEntryMetadata) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this IP list entry metadata based on context it is used
func (m *IPListEntryMetadata) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *IPListEntryMetadata) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IPListEntryMetadata) UnmarshalBinary(b []byte) error {
var res IPListEntryMetadata
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// IPV4BigTCP Status of IPv4 BIG TCP
//
// +k8s:deepcopy-gen=true
//
// swagger:model IPV4BigTCP
type IPV4BigTCP struct {
// Is IPv4 BIG TCP enabled
Enabled bool `json:"enabled,omitempty"`
// Maximum IPv4 GRO size
MaxGRO int64 `json:"maxGRO,omitempty"`
// Maximum IPv4 GSO size
MaxGSO int64 `json:"maxGSO,omitempty"`
}
// Validate validates this IP v4 big TCP
func (m *IPV4BigTCP) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this IP v4 big TCP based on context it is used
func (m *IPV4BigTCP) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *IPV4BigTCP) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IPV4BigTCP) UnmarshalBinary(b []byte) error {
var res IPV4BigTCP
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// IPV6BigTCP Status of IPv6 BIG TCP
//
// +k8s:deepcopy-gen=true
//
// swagger:model IPV6BigTCP
type IPV6BigTCP struct {
// Is IPv6 BIG TCP enabled
Enabled bool `json:"enabled,omitempty"`
// Maximum IPv6 GRO size
MaxGRO int64 `json:"maxGRO,omitempty"`
// Maximum IPv6 GSO size
MaxGSO int64 `json:"maxGSO,omitempty"`
}
// Validate validates this IP v6 big TCP
func (m *IPV6BigTCP) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this IP v6 big TCP based on context it is used
func (m *IPV6BigTCP) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *IPV6BigTCP) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IPV6BigTCP) UnmarshalBinary(b []byte) error {
var res IPV6BigTCP
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// K8sStatus Status of Kubernetes integration
//
// +k8s:deepcopy-gen=true
//
// swagger:model K8sStatus
type K8sStatus struct {
// k8s api versions
K8sAPIVersions []string `json:"k8s-api-versions"`
// Human readable status/error/warning message
Msg string `json:"msg,omitempty"`
// State the component is in
// Enum: [Ok Warning Failure Disabled]
State string `json:"state,omitempty"`
}
// Validate validates this k8s status
func (m *K8sStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var k8sStatusTypeStatePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Ok","Warning","Failure","Disabled"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
k8sStatusTypeStatePropEnum = append(k8sStatusTypeStatePropEnum, v)
}
}
const (
// K8sStatusStateOk captures enum value "Ok"
K8sStatusStateOk string = "Ok"
// K8sStatusStateWarning captures enum value "Warning"
K8sStatusStateWarning string = "Warning"
// K8sStatusStateFailure captures enum value "Failure"
K8sStatusStateFailure string = "Failure"
// K8sStatusStateDisabled captures enum value "Disabled"
K8sStatusStateDisabled string = "Disabled"
)
// prop value enum
func (m *K8sStatus) validateStateEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, k8sStatusTypeStatePropEnum, true); err != nil {
return err
}
return nil
}
func (m *K8sStatus) validateState(formats strfmt.Registry) error {
if swag.IsZero(m.State) { // not required
return nil
}
// value enum
if err := m.validateStateEnum("state", "body", m.State); err != nil {
return err
}
return nil
}
// ContextValidate validates this k8s status based on context it is used
func (m *K8sStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *K8sStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *K8sStatus) UnmarshalBinary(b []byte) error {
var res K8sStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// KVstoreConfiguration Configuration used for the kvstore
//
// swagger:model KVstoreConfiguration
type KVstoreConfiguration struct {
// Configuration options
Options map[string]string `json:"options,omitempty"`
// Type of kvstore
Type string `json:"type,omitempty"`
}
// Validate validates this k vstore configuration
func (m *KVstoreConfiguration) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this k vstore configuration based on context it is used
func (m *KVstoreConfiguration) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KVstoreConfiguration) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KVstoreConfiguration) UnmarshalBinary(b []byte) error {
var res KVstoreConfiguration
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// KubeProxyReplacement Status of kube-proxy replacement
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacement
type KubeProxyReplacement struct {
//
//
// +k8s:deepcopy-gen=true
DeviceList []*KubeProxyReplacementDeviceListItems0 `json:"deviceList"`
// devices
Devices []string `json:"devices"`
// direct routing device
DirectRoutingDevice string `json:"directRoutingDevice,omitempty"`
// features
Features *KubeProxyReplacementFeatures `json:"features,omitempty"`
// mode
// Enum: [True False]
Mode string `json:"mode,omitempty"`
}
// Validate validates this kube proxy replacement
func (m *KubeProxyReplacement) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateDeviceList(formats); err != nil {
res = append(res, err)
}
if err := m.validateFeatures(formats); err != nil {
res = append(res, err)
}
if err := m.validateMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *KubeProxyReplacement) validateDeviceList(formats strfmt.Registry) error {
if swag.IsZero(m.DeviceList) { // not required
return nil
}
for i := 0; i < len(m.DeviceList); i++ {
if swag.IsZero(m.DeviceList[i]) { // not required
continue
}
if m.DeviceList[i] != nil {
if err := m.DeviceList[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("deviceList" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("deviceList" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *KubeProxyReplacement) validateFeatures(formats strfmt.Registry) error {
if swag.IsZero(m.Features) { // not required
return nil
}
if m.Features != nil {
if err := m.Features.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features")
}
return err
}
}
return nil
}
var kubeProxyReplacementTypeModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["True","False"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
kubeProxyReplacementTypeModePropEnum = append(kubeProxyReplacementTypeModePropEnum, v)
}
}
const (
// KubeProxyReplacementModeTrue captures enum value "True"
KubeProxyReplacementModeTrue string = "True"
// KubeProxyReplacementModeFalse captures enum value "False"
KubeProxyReplacementModeFalse string = "False"
)
// prop value enum
func (m *KubeProxyReplacement) validateModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, kubeProxyReplacementTypeModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *KubeProxyReplacement) validateMode(formats strfmt.Registry) error {
if swag.IsZero(m.Mode) { // not required
return nil
}
// value enum
if err := m.validateModeEnum("mode", "body", m.Mode); err != nil {
return err
}
return nil
}
// ContextValidate validate this kube proxy replacement based on the context it is used
func (m *KubeProxyReplacement) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateDeviceList(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateFeatures(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *KubeProxyReplacement) contextValidateDeviceList(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.DeviceList); i++ {
if m.DeviceList[i] != nil {
if swag.IsZero(m.DeviceList[i]) { // not required
return nil
}
if err := m.DeviceList[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("deviceList" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("deviceList" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *KubeProxyReplacement) contextValidateFeatures(ctx context.Context, formats strfmt.Registry) error {
if m.Features != nil {
if swag.IsZero(m.Features) { // not required
return nil
}
if err := m.Features.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacement) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacement) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacement
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementDeviceListItems0
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacementDeviceListItems0
type KubeProxyReplacementDeviceListItems0 struct {
//
//
// +k8s:deepcopy-gen=true
IP []string `json:"ip"`
// name
Name string `json:"name,omitempty"`
}
// Validate validates this kube proxy replacement device list items0
func (m *KubeProxyReplacementDeviceListItems0) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement device list items0 based on context it is used
func (m *KubeProxyReplacementDeviceListItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementDeviceListItems0) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementDeviceListItems0) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementDeviceListItems0
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeatures
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacementFeatures
type KubeProxyReplacementFeatures struct {
// flag bpf-lb-sock-hostns-only
BpfSocketLBHostnsOnly bool `json:"bpfSocketLBHostnsOnly,omitempty"`
// external i ps
ExternalIPs *KubeProxyReplacementFeaturesExternalIPs `json:"externalIPs,omitempty"`
// graceful termination
GracefulTermination *KubeProxyReplacementFeaturesGracefulTermination `json:"gracefulTermination,omitempty"`
// host port
HostPort *KubeProxyReplacementFeaturesHostPort `json:"hostPort,omitempty"`
// host reachable services
HostReachableServices *KubeProxyReplacementFeaturesHostReachableServices `json:"hostReachableServices,omitempty"`
// nat46 x64
Nat46X64 *KubeProxyReplacementFeaturesNat46X64 `json:"nat46X64,omitempty"`
// node port
NodePort *KubeProxyReplacementFeaturesNodePort `json:"nodePort,omitempty"`
// session affinity
SessionAffinity *KubeProxyReplacementFeaturesSessionAffinity `json:"sessionAffinity,omitempty"`
// socket l b
SocketLB *KubeProxyReplacementFeaturesSocketLB `json:"socketLB,omitempty"`
// socket l b tracing
SocketLBTracing *KubeProxyReplacementFeaturesSocketLBTracing `json:"socketLBTracing,omitempty"`
}
// Validate validates this kube proxy replacement features
func (m *KubeProxyReplacementFeatures) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateExternalIPs(formats); err != nil {
res = append(res, err)
}
if err := m.validateGracefulTermination(formats); err != nil {
res = append(res, err)
}
if err := m.validateHostPort(formats); err != nil {
res = append(res, err)
}
if err := m.validateHostReachableServices(formats); err != nil {
res = append(res, err)
}
if err := m.validateNat46X64(formats); err != nil {
res = append(res, err)
}
if err := m.validateNodePort(formats); err != nil {
res = append(res, err)
}
if err := m.validateSessionAffinity(formats); err != nil {
res = append(res, err)
}
if err := m.validateSocketLB(formats); err != nil {
res = append(res, err)
}
if err := m.validateSocketLBTracing(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateExternalIPs(formats strfmt.Registry) error {
if swag.IsZero(m.ExternalIPs) { // not required
return nil
}
if m.ExternalIPs != nil {
if err := m.ExternalIPs.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "externalIPs")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "externalIPs")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateGracefulTermination(formats strfmt.Registry) error {
if swag.IsZero(m.GracefulTermination) { // not required
return nil
}
if m.GracefulTermination != nil {
if err := m.GracefulTermination.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "gracefulTermination")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "gracefulTermination")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateHostPort(formats strfmt.Registry) error {
if swag.IsZero(m.HostPort) { // not required
return nil
}
if m.HostPort != nil {
if err := m.HostPort.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "hostPort")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "hostPort")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateHostReachableServices(formats strfmt.Registry) error {
if swag.IsZero(m.HostReachableServices) { // not required
return nil
}
if m.HostReachableServices != nil {
if err := m.HostReachableServices.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "hostReachableServices")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "hostReachableServices")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateNat46X64(formats strfmt.Registry) error {
if swag.IsZero(m.Nat46X64) { // not required
return nil
}
if m.Nat46X64 != nil {
if err := m.Nat46X64.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "nat46X64")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "nat46X64")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateNodePort(formats strfmt.Registry) error {
if swag.IsZero(m.NodePort) { // not required
return nil
}
if m.NodePort != nil {
if err := m.NodePort.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "nodePort")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "nodePort")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateSessionAffinity(formats strfmt.Registry) error {
if swag.IsZero(m.SessionAffinity) { // not required
return nil
}
if m.SessionAffinity != nil {
if err := m.SessionAffinity.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "sessionAffinity")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "sessionAffinity")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateSocketLB(formats strfmt.Registry) error {
if swag.IsZero(m.SocketLB) { // not required
return nil
}
if m.SocketLB != nil {
if err := m.SocketLB.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "socketLB")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "socketLB")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateSocketLBTracing(formats strfmt.Registry) error {
if swag.IsZero(m.SocketLBTracing) { // not required
return nil
}
if m.SocketLBTracing != nil {
if err := m.SocketLBTracing.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "socketLBTracing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "socketLBTracing")
}
return err
}
}
return nil
}
// ContextValidate validate this kube proxy replacement features based on the context it is used
func (m *KubeProxyReplacementFeatures) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateExternalIPs(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateGracefulTermination(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateHostPort(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateHostReachableServices(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNat46X64(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNodePort(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSessionAffinity(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSocketLB(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSocketLBTracing(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateExternalIPs(ctx context.Context, formats strfmt.Registry) error {
if m.ExternalIPs != nil {
if swag.IsZero(m.ExternalIPs) { // not required
return nil
}
if err := m.ExternalIPs.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "externalIPs")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "externalIPs")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateGracefulTermination(ctx context.Context, formats strfmt.Registry) error {
if m.GracefulTermination != nil {
if swag.IsZero(m.GracefulTermination) { // not required
return nil
}
if err := m.GracefulTermination.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "gracefulTermination")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "gracefulTermination")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateHostPort(ctx context.Context, formats strfmt.Registry) error {
if m.HostPort != nil {
if swag.IsZero(m.HostPort) { // not required
return nil
}
if err := m.HostPort.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "hostPort")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "hostPort")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateHostReachableServices(ctx context.Context, formats strfmt.Registry) error {
if m.HostReachableServices != nil {
if swag.IsZero(m.HostReachableServices) { // not required
return nil
}
if err := m.HostReachableServices.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "hostReachableServices")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "hostReachableServices")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateNat46X64(ctx context.Context, formats strfmt.Registry) error {
if m.Nat46X64 != nil {
if swag.IsZero(m.Nat46X64) { // not required
return nil
}
if err := m.Nat46X64.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "nat46X64")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "nat46X64")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateNodePort(ctx context.Context, formats strfmt.Registry) error {
if m.NodePort != nil {
if swag.IsZero(m.NodePort) { // not required
return nil
}
if err := m.NodePort.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "nodePort")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "nodePort")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateSessionAffinity(ctx context.Context, formats strfmt.Registry) error {
if m.SessionAffinity != nil {
if swag.IsZero(m.SessionAffinity) { // not required
return nil
}
if err := m.SessionAffinity.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "sessionAffinity")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "sessionAffinity")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateSocketLB(ctx context.Context, formats strfmt.Registry) error {
if m.SocketLB != nil {
if swag.IsZero(m.SocketLB) { // not required
return nil
}
if err := m.SocketLB.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "socketLB")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "socketLB")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateSocketLBTracing(ctx context.Context, formats strfmt.Registry) error {
if m.SocketLBTracing != nil {
if swag.IsZero(m.SocketLBTracing) { // not required
return nil
}
if err := m.SocketLBTracing.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "socketLBTracing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "socketLBTracing")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeatures) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeatures) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeatures
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesExternalIPs
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacementFeaturesExternalIPs
type KubeProxyReplacementFeaturesExternalIPs struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
}
// Validate validates this kube proxy replacement features external i ps
func (m *KubeProxyReplacementFeaturesExternalIPs) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features external i ps based on context it is used
func (m *KubeProxyReplacementFeaturesExternalIPs) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesExternalIPs) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesExternalIPs) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesExternalIPs
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesGracefulTermination
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacementFeaturesGracefulTermination
type KubeProxyReplacementFeaturesGracefulTermination struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
}
// Validate validates this kube proxy replacement features graceful termination
func (m *KubeProxyReplacementFeaturesGracefulTermination) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features graceful termination based on context it is used
func (m *KubeProxyReplacementFeaturesGracefulTermination) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesGracefulTermination) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesGracefulTermination) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesGracefulTermination
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesHostPort
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacementFeaturesHostPort
type KubeProxyReplacementFeaturesHostPort struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
}
// Validate validates this kube proxy replacement features host port
func (m *KubeProxyReplacementFeaturesHostPort) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features host port based on context it is used
func (m *KubeProxyReplacementFeaturesHostPort) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesHostPort) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesHostPort) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesHostPort
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesHostReachableServices
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacementFeaturesHostReachableServices
type KubeProxyReplacementFeaturesHostReachableServices struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
// protocols
Protocols []string `json:"protocols"`
}
// Validate validates this kube proxy replacement features host reachable services
func (m *KubeProxyReplacementFeaturesHostReachableServices) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features host reachable services based on context it is used
func (m *KubeProxyReplacementFeaturesHostReachableServices) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesHostReachableServices) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesHostReachableServices) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesHostReachableServices
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesNat46X64
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacementFeaturesNat46X64
type KubeProxyReplacementFeaturesNat46X64 struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
// gateway
Gateway *KubeProxyReplacementFeaturesNat46X64Gateway `json:"gateway,omitempty"`
// service
Service *KubeProxyReplacementFeaturesNat46X64Service `json:"service,omitempty"`
}
// Validate validates this kube proxy replacement features nat46 x64
func (m *KubeProxyReplacementFeaturesNat46X64) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateGateway(formats); err != nil {
res = append(res, err)
}
if err := m.validateService(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *KubeProxyReplacementFeaturesNat46X64) validateGateway(formats strfmt.Registry) error {
if swag.IsZero(m.Gateway) { // not required
return nil
}
if m.Gateway != nil {
if err := m.Gateway.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "nat46X64" + "." + "gateway")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "nat46X64" + "." + "gateway")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeaturesNat46X64) validateService(formats strfmt.Registry) error {
if swag.IsZero(m.Service) { // not required
return nil
}
if m.Service != nil {
if err := m.Service.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "nat46X64" + "." + "service")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "nat46X64" + "." + "service")
}
return err
}
}
return nil
}
// ContextValidate validate this kube proxy replacement features nat46 x64 based on the context it is used
func (m *KubeProxyReplacementFeaturesNat46X64) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateGateway(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateService(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *KubeProxyReplacementFeaturesNat46X64) contextValidateGateway(ctx context.Context, formats strfmt.Registry) error {
if m.Gateway != nil {
if swag.IsZero(m.Gateway) { // not required
return nil
}
if err := m.Gateway.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "nat46X64" + "." + "gateway")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "nat46X64" + "." + "gateway")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeaturesNat46X64) contextValidateService(ctx context.Context, formats strfmt.Registry) error {
if m.Service != nil {
if swag.IsZero(m.Service) { // not required
return nil
}
if err := m.Service.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "nat46X64" + "." + "service")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "nat46X64" + "." + "service")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesNat46X64) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesNat46X64) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesNat46X64
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesNat46X64Gateway
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacementFeaturesNat46X64Gateway
type KubeProxyReplacementFeaturesNat46X64Gateway struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
// prefixes
Prefixes []string `json:"prefixes"`
}
// Validate validates this kube proxy replacement features nat46 x64 gateway
func (m *KubeProxyReplacementFeaturesNat46X64Gateway) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features nat46 x64 gateway based on context it is used
func (m *KubeProxyReplacementFeaturesNat46X64Gateway) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesNat46X64Gateway) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesNat46X64Gateway) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesNat46X64Gateway
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesNat46X64Service
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacementFeaturesNat46X64Service
type KubeProxyReplacementFeaturesNat46X64Service struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
}
// Validate validates this kube proxy replacement features nat46 x64 service
func (m *KubeProxyReplacementFeaturesNat46X64Service) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features nat46 x64 service based on context it is used
func (m *KubeProxyReplacementFeaturesNat46X64Service) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesNat46X64Service) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesNat46X64Service) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesNat46X64Service
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesNodePort
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacementFeaturesNodePort
type KubeProxyReplacementFeaturesNodePort struct {
// acceleration
// Enum: [None Native Generic Best-Effort]
Acceleration string `json:"acceleration,omitempty"`
// algorithm
// Enum: [Random Maglev]
Algorithm string `json:"algorithm,omitempty"`
// dsr mode
// Enum: [IP Option/Extension IPIP Geneve]
DsrMode string `json:"dsrMode,omitempty"`
// enabled
Enabled bool `json:"enabled,omitempty"`
// lut size
LutSize int64 `json:"lutSize,omitempty"`
// mode
// Enum: [SNAT DSR Hybrid]
Mode string `json:"mode,omitempty"`
// port max
PortMax int64 `json:"portMax,omitempty"`
// port min
PortMin int64 `json:"portMin,omitempty"`
}
// Validate validates this kube proxy replacement features node port
func (m *KubeProxyReplacementFeaturesNodePort) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAcceleration(formats); err != nil {
res = append(res, err)
}
if err := m.validateAlgorithm(formats); err != nil {
res = append(res, err)
}
if err := m.validateDsrMode(formats); err != nil {
res = append(res, err)
}
if err := m.validateMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var kubeProxyReplacementFeaturesNodePortTypeAccelerationPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["None","Native","Generic","Best-Effort"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
kubeProxyReplacementFeaturesNodePortTypeAccelerationPropEnum = append(kubeProxyReplacementFeaturesNodePortTypeAccelerationPropEnum, v)
}
}
const (
// KubeProxyReplacementFeaturesNodePortAccelerationNone captures enum value "None"
KubeProxyReplacementFeaturesNodePortAccelerationNone string = "None"
// KubeProxyReplacementFeaturesNodePortAccelerationNative captures enum value "Native"
KubeProxyReplacementFeaturesNodePortAccelerationNative string = "Native"
// KubeProxyReplacementFeaturesNodePortAccelerationGeneric captures enum value "Generic"
KubeProxyReplacementFeaturesNodePortAccelerationGeneric string = "Generic"
// KubeProxyReplacementFeaturesNodePortAccelerationBestDashEffort captures enum value "Best-Effort"
KubeProxyReplacementFeaturesNodePortAccelerationBestDashEffort string = "Best-Effort"
)
// prop value enum
func (m *KubeProxyReplacementFeaturesNodePort) validateAccelerationEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, kubeProxyReplacementFeaturesNodePortTypeAccelerationPropEnum, true); err != nil {
return err
}
return nil
}
func (m *KubeProxyReplacementFeaturesNodePort) validateAcceleration(formats strfmt.Registry) error {
if swag.IsZero(m.Acceleration) { // not required
return nil
}
// value enum
if err := m.validateAccelerationEnum("features"+"."+"nodePort"+"."+"acceleration", "body", m.Acceleration); err != nil {
return err
}
return nil
}
var kubeProxyReplacementFeaturesNodePortTypeAlgorithmPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Random","Maglev"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
kubeProxyReplacementFeaturesNodePortTypeAlgorithmPropEnum = append(kubeProxyReplacementFeaturesNodePortTypeAlgorithmPropEnum, v)
}
}
const (
// KubeProxyReplacementFeaturesNodePortAlgorithmRandom captures enum value "Random"
KubeProxyReplacementFeaturesNodePortAlgorithmRandom string = "Random"
// KubeProxyReplacementFeaturesNodePortAlgorithmMaglev captures enum value "Maglev"
KubeProxyReplacementFeaturesNodePortAlgorithmMaglev string = "Maglev"
)
// prop value enum
func (m *KubeProxyReplacementFeaturesNodePort) validateAlgorithmEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, kubeProxyReplacementFeaturesNodePortTypeAlgorithmPropEnum, true); err != nil {
return err
}
return nil
}
func (m *KubeProxyReplacementFeaturesNodePort) validateAlgorithm(formats strfmt.Registry) error {
if swag.IsZero(m.Algorithm) { // not required
return nil
}
// value enum
if err := m.validateAlgorithmEnum("features"+"."+"nodePort"+"."+"algorithm", "body", m.Algorithm); err != nil {
return err
}
return nil
}
var kubeProxyReplacementFeaturesNodePortTypeDsrModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["IP Option/Extension","IPIP","Geneve"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
kubeProxyReplacementFeaturesNodePortTypeDsrModePropEnum = append(kubeProxyReplacementFeaturesNodePortTypeDsrModePropEnum, v)
}
}
const (
// KubeProxyReplacementFeaturesNodePortDsrModeIPOptionExtension captures enum value "IP Option/Extension"
KubeProxyReplacementFeaturesNodePortDsrModeIPOptionExtension string = "IP Option/Extension"
// KubeProxyReplacementFeaturesNodePortDsrModeIPIP captures enum value "IPIP"
KubeProxyReplacementFeaturesNodePortDsrModeIPIP string = "IPIP"
// KubeProxyReplacementFeaturesNodePortDsrModeGeneve captures enum value "Geneve"
KubeProxyReplacementFeaturesNodePortDsrModeGeneve string = "Geneve"
)
// prop value enum
func (m *KubeProxyReplacementFeaturesNodePort) validateDsrModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, kubeProxyReplacementFeaturesNodePortTypeDsrModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *KubeProxyReplacementFeaturesNodePort) validateDsrMode(formats strfmt.Registry) error {
if swag.IsZero(m.DsrMode) { // not required
return nil
}
// value enum
if err := m.validateDsrModeEnum("features"+"."+"nodePort"+"."+"dsrMode", "body", m.DsrMode); err != nil {
return err
}
return nil
}
var kubeProxyReplacementFeaturesNodePortTypeModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["SNAT","DSR","Hybrid"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
kubeProxyReplacementFeaturesNodePortTypeModePropEnum = append(kubeProxyReplacementFeaturesNodePortTypeModePropEnum, v)
}
}
const (
// KubeProxyReplacementFeaturesNodePortModeSNAT captures enum value "SNAT"
KubeProxyReplacementFeaturesNodePortModeSNAT string = "SNAT"
// KubeProxyReplacementFeaturesNodePortModeDSR captures enum value "DSR"
KubeProxyReplacementFeaturesNodePortModeDSR string = "DSR"
// KubeProxyReplacementFeaturesNodePortModeHybrid captures enum value "Hybrid"
KubeProxyReplacementFeaturesNodePortModeHybrid string = "Hybrid"
)
// prop value enum
func (m *KubeProxyReplacementFeaturesNodePort) validateModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, kubeProxyReplacementFeaturesNodePortTypeModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *KubeProxyReplacementFeaturesNodePort) validateMode(formats strfmt.Registry) error {
if swag.IsZero(m.Mode) { // not required
return nil
}
// value enum
if err := m.validateModeEnum("features"+"."+"nodePort"+"."+"mode", "body", m.Mode); err != nil {
return err
}
return nil
}
// ContextValidate validates this kube proxy replacement features node port based on context it is used
func (m *KubeProxyReplacementFeaturesNodePort) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesNodePort) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesNodePort) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesNodePort
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesSessionAffinity
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacementFeaturesSessionAffinity
type KubeProxyReplacementFeaturesSessionAffinity struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
}
// Validate validates this kube proxy replacement features session affinity
func (m *KubeProxyReplacementFeaturesSessionAffinity) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features session affinity based on context it is used
func (m *KubeProxyReplacementFeaturesSessionAffinity) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesSessionAffinity) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesSessionAffinity) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesSessionAffinity
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesSocketLB
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacementFeaturesSocketLB
type KubeProxyReplacementFeaturesSocketLB struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
}
// Validate validates this kube proxy replacement features socket l b
func (m *KubeProxyReplacementFeaturesSocketLB) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features socket l b based on context it is used
func (m *KubeProxyReplacementFeaturesSocketLB) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesSocketLB) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesSocketLB) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesSocketLB
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesSocketLBTracing
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacementFeaturesSocketLBTracing
type KubeProxyReplacementFeaturesSocketLBTracing struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
}
// Validate validates this kube proxy replacement features socket l b tracing
func (m *KubeProxyReplacementFeaturesSocketLBTracing) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features socket l b tracing based on context it is used
func (m *KubeProxyReplacementFeaturesSocketLBTracing) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesSocketLBTracing) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesSocketLBTracing) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesSocketLBTracing
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// L4Policy L4 endpoint policy
//
// +k8s:deepcopy-gen=true
//
// swagger:model L4Policy
type L4Policy struct {
// List of L4 egress rules
Egress []*PolicyRule `json:"egress"`
// List of L4 ingress rules
Ingress []*PolicyRule `json:"ingress"`
}
// Validate validates this l4 policy
func (m *L4Policy) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateEgress(formats); err != nil {
res = append(res, err)
}
if err := m.validateIngress(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *L4Policy) validateEgress(formats strfmt.Registry) error {
if swag.IsZero(m.Egress) { // not required
return nil
}
for i := 0; i < len(m.Egress); i++ {
if swag.IsZero(m.Egress[i]) { // not required
continue
}
if m.Egress[i] != nil {
if err := m.Egress[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("egress" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("egress" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *L4Policy) validateIngress(formats strfmt.Registry) error {
if swag.IsZero(m.Ingress) { // not required
return nil
}
for i := 0; i < len(m.Ingress); i++ {
if swag.IsZero(m.Ingress[i]) { // not required
continue
}
if m.Ingress[i] != nil {
if err := m.Ingress[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ingress" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ingress" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this l4 policy based on the context it is used
func (m *L4Policy) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateEgress(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIngress(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *L4Policy) contextValidateEgress(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Egress); i++ {
if m.Egress[i] != nil {
if swag.IsZero(m.Egress[i]) { // not required
return nil
}
if err := m.Egress[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("egress" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("egress" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *L4Policy) contextValidateIngress(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Ingress); i++ {
if m.Ingress[i] != nil {
if swag.IsZero(m.Ingress[i]) { // not required
return nil
}
if err := m.Ingress[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ingress" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ingress" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *L4Policy) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *L4Policy) UnmarshalBinary(b []byte) error {
var res L4Policy
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// LRPBackend Pod backend of an LRP
//
// swagger:model LRPBackend
type LRPBackend struct {
// backend address
BackendAddress *BackendAddress `json:"backend-address,omitempty"`
// Namespace and name of the backend pod
PodID string `json:"pod-id,omitempty"`
}
// Validate validates this l r p backend
func (m *LRPBackend) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateBackendAddress(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LRPBackend) validateBackendAddress(formats strfmt.Registry) error {
if swag.IsZero(m.BackendAddress) { // not required
return nil
}
if m.BackendAddress != nil {
if err := m.BackendAddress.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("backend-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("backend-address")
}
return err
}
}
return nil
}
// ContextValidate validate this l r p backend based on the context it is used
func (m *LRPBackend) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateBackendAddress(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LRPBackend) contextValidateBackendAddress(ctx context.Context, formats strfmt.Registry) error {
if m.BackendAddress != nil {
if swag.IsZero(m.BackendAddress) { // not required
return nil
}
if err := m.BackendAddress.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("backend-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("backend-address")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *LRPBackend) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *LRPBackend) UnmarshalBinary(b []byte) error {
var res LRPBackend
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// LRPSpec Configuration of an LRP
//
// swagger:model LRPSpec
type LRPSpec struct {
// mapping of frontends to pod backends
FrontendMappings []*FrontendMapping `json:"frontend-mappings"`
// LRP frontend type
FrontendType string `json:"frontend-type,omitempty"`
// LRP config type
LrpType string `json:"lrp-type,omitempty"`
// LRP service name
Name string `json:"name,omitempty"`
// LRP service namespace
Namespace string `json:"namespace,omitempty"`
// matching k8s service namespace and name
ServiceID string `json:"service-id,omitempty"`
// Unique identification
UID string `json:"uid,omitempty"`
}
// Validate validates this l r p spec
func (m *LRPSpec) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateFrontendMappings(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LRPSpec) validateFrontendMappings(formats strfmt.Registry) error {
if swag.IsZero(m.FrontendMappings) { // not required
return nil
}
for i := 0; i < len(m.FrontendMappings); i++ {
if swag.IsZero(m.FrontendMappings[i]) { // not required
continue
}
if m.FrontendMappings[i] != nil {
if err := m.FrontendMappings[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("frontend-mappings" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("frontend-mappings" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this l r p spec based on the context it is used
func (m *LRPSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateFrontendMappings(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LRPSpec) contextValidateFrontendMappings(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.FrontendMappings); i++ {
if m.FrontendMappings[i] != nil {
if swag.IsZero(m.FrontendMappings[i]) { // not required
return nil
}
if err := m.FrontendMappings[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("frontend-mappings" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("frontend-mappings" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *LRPSpec) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *LRPSpec) UnmarshalBinary(b []byte) error {
var res LRPSpec
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// Label Label is the Cilium's representation of a container label
//
// swagger:model Label
type Label struct {
// key
Key string `json:"key,omitempty"`
// Source can be one of the above values (e.g. LabelSourceContainer)
Source string `json:"source,omitempty"`
// value
Value string `json:"value,omitempty"`
}
// Validate validates this label
func (m *Label) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this label based on context it is used
func (m *Label) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *Label) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Label) UnmarshalBinary(b []byte) error {
var res Label
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// LabelArray LabelArray is an array of labels forming a set
//
// swagger:model LabelArray
type LabelArray []*Label
// Validate validates this label array
func (m LabelArray) Validate(formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if swag.IsZero(m[i]) { // not required
continue
}
if m[i] != nil {
if err := m[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validate this label array based on the context it is used
func (m LabelArray) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if m[i] != nil {
if swag.IsZero(m[i]) { // not required
return nil
}
if err := m[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// LabelConfiguration Label configuration of an endpoint
//
// swagger:model LabelConfiguration
type LabelConfiguration struct {
// The user provided desired configuration
Spec *LabelConfigurationSpec `json:"spec,omitempty"`
// The current configuration
Status *LabelConfigurationStatus `json:"status,omitempty"`
}
// Validate validates this label configuration
func (m *LabelConfiguration) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateSpec(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatus(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LabelConfiguration) validateSpec(formats strfmt.Registry) error {
if swag.IsZero(m.Spec) { // not required
return nil
}
if m.Spec != nil {
if err := m.Spec.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *LabelConfiguration) validateStatus(formats strfmt.Registry) error {
if swag.IsZero(m.Status) { // not required
return nil
}
if m.Status != nil {
if err := m.Status.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// ContextValidate validate this label configuration based on the context it is used
func (m *LabelConfiguration) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateSpec(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateStatus(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LabelConfiguration) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error {
if m.Spec != nil {
if swag.IsZero(m.Spec) { // not required
return nil
}
if err := m.Spec.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *LabelConfiguration) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error {
if m.Status != nil {
if swag.IsZero(m.Status) { // not required
return nil
}
if err := m.Status.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *LabelConfiguration) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *LabelConfiguration) UnmarshalBinary(b []byte) error {
var res LabelConfiguration
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// LabelConfigurationSpec User desired Label configuration of an endpoint
//
// swagger:model LabelConfigurationSpec
type LabelConfigurationSpec struct {
// Custom labels in addition to orchestration system labels.
User Labels `json:"user,omitempty"`
}
// Validate validates this label configuration spec
func (m *LabelConfigurationSpec) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateUser(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LabelConfigurationSpec) validateUser(formats strfmt.Registry) error {
if swag.IsZero(m.User) { // not required
return nil
}
if err := m.User.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("user")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("user")
}
return err
}
return nil
}
// ContextValidate validate this label configuration spec based on the context it is used
func (m *LabelConfigurationSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateUser(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LabelConfigurationSpec) contextValidateUser(ctx context.Context, formats strfmt.Registry) error {
if err := m.User.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("user")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("user")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *LabelConfigurationSpec) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *LabelConfigurationSpec) UnmarshalBinary(b []byte) error {
var res LabelConfigurationSpec
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// LabelConfigurationStatus Labels and label configuration of an endpoint
//
// swagger:model LabelConfigurationStatus
type LabelConfigurationStatus struct {
// All labels derived from the orchestration system
Derived Labels `json:"derived,omitempty"`
// Labels derived from orchestration system which have been disabled.
Disabled Labels `json:"disabled,omitempty"`
// The current configuration
Realized *LabelConfigurationSpec `json:"realized,omitempty"`
// Labels derived from orchestration system that are used in computing a security identity
SecurityRelevant Labels `json:"security-relevant,omitempty"`
}
// Validate validates this label configuration status
func (m *LabelConfigurationStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateDerived(formats); err != nil {
res = append(res, err)
}
if err := m.validateDisabled(formats); err != nil {
res = append(res, err)
}
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if err := m.validateSecurityRelevant(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LabelConfigurationStatus) validateDerived(formats strfmt.Registry) error {
if swag.IsZero(m.Derived) { // not required
return nil
}
if err := m.Derived.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("derived")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("derived")
}
return err
}
return nil
}
func (m *LabelConfigurationStatus) validateDisabled(formats strfmt.Registry) error {
if swag.IsZero(m.Disabled) { // not required
return nil
}
if err := m.Disabled.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("disabled")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("disabled")
}
return err
}
return nil
}
func (m *LabelConfigurationStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
func (m *LabelConfigurationStatus) validateSecurityRelevant(formats strfmt.Registry) error {
if swag.IsZero(m.SecurityRelevant) { // not required
return nil
}
if err := m.SecurityRelevant.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("security-relevant")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("security-relevant")
}
return err
}
return nil
}
// ContextValidate validate this label configuration status based on the context it is used
func (m *LabelConfigurationStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateDerived(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateDisabled(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSecurityRelevant(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LabelConfigurationStatus) contextValidateDerived(ctx context.Context, formats strfmt.Registry) error {
if err := m.Derived.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("derived")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("derived")
}
return err
}
return nil
}
func (m *LabelConfigurationStatus) contextValidateDisabled(ctx context.Context, formats strfmt.Registry) error {
if err := m.Disabled.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("disabled")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("disabled")
}
return err
}
return nil
}
func (m *LabelConfigurationStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
func (m *LabelConfigurationStatus) contextValidateSecurityRelevant(ctx context.Context, formats strfmt.Registry) error {
if err := m.SecurityRelevant.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("security-relevant")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("security-relevant")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *LabelConfigurationStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *LabelConfigurationStatus) UnmarshalBinary(b []byte) error {
var res LabelConfigurationStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
)
// Labels Set of labels
//
// swagger:model Labels
type Labels []string
// Validate validates this labels
func (m Labels) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this labels based on context it is used
func (m Labels) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// MapEvent Event on Map
//
// swagger:model MapEvent
type MapEvent struct {
// Action type for event
// Enum: [update delete]
Action string `json:"action,omitempty"`
// Desired action to be performed after this event
// Enum: [ok insert delete]
DesiredAction string `json:"desired-action,omitempty"`
// Map key on which the event occured
Key string `json:"key,omitempty"`
// Last error seen while performing desired action
LastError string `json:"last-error,omitempty"`
// Timestamp when the event occurred
// Format: date-time
Timestamp strfmt.DateTime `json:"timestamp,omitempty"`
// Map value on which the event occured
Value string `json:"value,omitempty"`
}
// Validate validates this map event
func (m *MapEvent) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAction(formats); err != nil {
res = append(res, err)
}
if err := m.validateDesiredAction(formats); err != nil {
res = append(res, err)
}
if err := m.validateTimestamp(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var mapEventTypeActionPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["update","delete"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
mapEventTypeActionPropEnum = append(mapEventTypeActionPropEnum, v)
}
}
const (
// MapEventActionUpdate captures enum value "update"
MapEventActionUpdate string = "update"
// MapEventActionDelete captures enum value "delete"
MapEventActionDelete string = "delete"
)
// prop value enum
func (m *MapEvent) validateActionEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, mapEventTypeActionPropEnum, true); err != nil {
return err
}
return nil
}
func (m *MapEvent) validateAction(formats strfmt.Registry) error {
if swag.IsZero(m.Action) { // not required
return nil
}
// value enum
if err := m.validateActionEnum("action", "body", m.Action); err != nil {
return err
}
return nil
}
var mapEventTypeDesiredActionPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["ok","insert","delete"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
mapEventTypeDesiredActionPropEnum = append(mapEventTypeDesiredActionPropEnum, v)
}
}
const (
// MapEventDesiredActionOk captures enum value "ok"
MapEventDesiredActionOk string = "ok"
// MapEventDesiredActionInsert captures enum value "insert"
MapEventDesiredActionInsert string = "insert"
// MapEventDesiredActionDelete captures enum value "delete"
MapEventDesiredActionDelete string = "delete"
)
// prop value enum
func (m *MapEvent) validateDesiredActionEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, mapEventTypeDesiredActionPropEnum, true); err != nil {
return err
}
return nil
}
func (m *MapEvent) validateDesiredAction(formats strfmt.Registry) error {
if swag.IsZero(m.DesiredAction) { // not required
return nil
}
// value enum
if err := m.validateDesiredActionEnum("desired-action", "body", m.DesiredAction); err != nil {
return err
}
return nil
}
func (m *MapEvent) validateTimestamp(formats strfmt.Registry) error {
if swag.IsZero(m.Timestamp) { // not required
return nil
}
if err := validate.FormatOf("timestamp", "body", "date-time", m.Timestamp.String(), formats); err != nil {
return err
}
return nil
}
// ContextValidate validates this map event based on context it is used
func (m *MapEvent) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *MapEvent) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *MapEvent) UnmarshalBinary(b []byte) error {
var res MapEvent
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// Masquerading Status of masquerading
//
// +k8s:deepcopy-gen=true
//
// swagger:model Masquerading
type Masquerading struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
// enabled protocols
EnabledProtocols *MasqueradingEnabledProtocols `json:"enabledProtocols,omitempty"`
// Is BPF ip-masq-agent enabled
IPMasqAgent bool `json:"ip-masq-agent,omitempty"`
// mode
// Enum: [BPF iptables]
Mode string `json:"mode,omitempty"`
// This field is obsolete, please use snat-exclusion-cidr-v4 or snat-exclusion-cidr-v6.
SnatExclusionCidr string `json:"snat-exclusion-cidr,omitempty"`
// SnatExclusionCIDRv4 exempts SNAT from being performed on any packet sent to
// an IPv4 address that belongs to this CIDR.
SnatExclusionCidrV4 string `json:"snat-exclusion-cidr-v4,omitempty"`
// SnatExclusionCIDRv6 exempts SNAT from being performed on any packet sent to
// an IPv6 address that belongs to this CIDR.
// For IPv6 we only do masquerading in iptables mode.
SnatExclusionCidrV6 string `json:"snat-exclusion-cidr-v6,omitempty"`
}
// Validate validates this masquerading
func (m *Masquerading) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateEnabledProtocols(formats); err != nil {
res = append(res, err)
}
if err := m.validateMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Masquerading) validateEnabledProtocols(formats strfmt.Registry) error {
if swag.IsZero(m.EnabledProtocols) { // not required
return nil
}
if m.EnabledProtocols != nil {
if err := m.EnabledProtocols.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("enabledProtocols")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("enabledProtocols")
}
return err
}
}
return nil
}
var masqueradingTypeModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["BPF","iptables"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
masqueradingTypeModePropEnum = append(masqueradingTypeModePropEnum, v)
}
}
const (
// MasqueradingModeBPF captures enum value "BPF"
MasqueradingModeBPF string = "BPF"
// MasqueradingModeIptables captures enum value "iptables"
MasqueradingModeIptables string = "iptables"
)
// prop value enum
func (m *Masquerading) validateModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, masqueradingTypeModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *Masquerading) validateMode(formats strfmt.Registry) error {
if swag.IsZero(m.Mode) { // not required
return nil
}
// value enum
if err := m.validateModeEnum("mode", "body", m.Mode); err != nil {
return err
}
return nil
}
// ContextValidate validate this masquerading based on the context it is used
func (m *Masquerading) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateEnabledProtocols(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Masquerading) contextValidateEnabledProtocols(ctx context.Context, formats strfmt.Registry) error {
if m.EnabledProtocols != nil {
if swag.IsZero(m.EnabledProtocols) { // not required
return nil
}
if err := m.EnabledProtocols.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("enabledProtocols")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("enabledProtocols")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *Masquerading) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Masquerading) UnmarshalBinary(b []byte) error {
var res Masquerading
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// MasqueradingEnabledProtocols Is masquerading enabled
//
// swagger:model MasqueradingEnabledProtocols
type MasqueradingEnabledProtocols struct {
// Is masquerading enabled for IPv4 traffic
IPV4 bool `json:"ipv4,omitempty"`
// Is masquerading enabled for IPv6 traffic
IPV6 bool `json:"ipv6,omitempty"`
}
// Validate validates this masquerading enabled protocols
func (m *MasqueradingEnabledProtocols) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this masquerading enabled protocols based on context it is used
func (m *MasqueradingEnabledProtocols) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *MasqueradingEnabledProtocols) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *MasqueradingEnabledProtocols) UnmarshalBinary(b []byte) error {
var res MasqueradingEnabledProtocols
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// MessageForwardingStatistics Statistics of a message forwarding entity
//
// swagger:model MessageForwardingStatistics
type MessageForwardingStatistics struct {
// Number of messages denied
Denied int64 `json:"denied,omitempty"`
// Number of errors while parsing messages
Error int64 `json:"error,omitempty"`
// Number of messages forwarded
Forwarded int64 `json:"forwarded,omitempty"`
// Number of messages received
Received int64 `json:"received,omitempty"`
}
// Validate validates this message forwarding statistics
func (m *MessageForwardingStatistics) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this message forwarding statistics based on context it is used
func (m *MessageForwardingStatistics) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *MessageForwardingStatistics) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *MessageForwardingStatistics) UnmarshalBinary(b []byte) error {
var res MessageForwardingStatistics
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// Metric Metric information
//
// swagger:model Metric
type Metric struct {
// Labels of the metric
Labels map[string]string `json:"labels,omitempty"`
// Name of the metric
Name string `json:"name,omitempty"`
// Value of the metric
Value float64 `json:"value,omitempty"`
}
// Validate validates this metric
func (m *Metric) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this metric based on context it is used
func (m *Metric) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *Metric) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Metric) UnmarshalBinary(b []byte) error {
var res Metric
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// MonitorStatus Status of the node monitor
//
// swagger:model MonitorStatus
type MonitorStatus struct {
// Number of CPUs to listen on for events.
Cpus int64 `json:"cpus,omitempty"`
// Number of samples lost by perf.
Lost int64 `json:"lost,omitempty"`
// Number of pages used for the perf ring buffer.
Npages int64 `json:"npages,omitempty"`
// Pages size used for the perf ring buffer.
Pagesize int64 `json:"pagesize,omitempty"`
// Number of unknown samples.
Unknown int64 `json:"unknown,omitempty"`
}
// Validate validates this monitor status
func (m *MonitorStatus) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this monitor status based on context it is used
func (m *MonitorStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *MonitorStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *MonitorStatus) UnmarshalBinary(b []byte) error {
var res MonitorStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NameManager Internal state about DNS names in relation to policy subsystem
//
// swagger:model NameManager
type NameManager struct {
// Names to poll for DNS Poller
DNSPollNames []string `json:"DNSPollNames"`
// Mapping of FQDNSelectors to corresponding regular expressions
FQDNPolicySelectors []*SelectorEntry `json:"FQDNPolicySelectors"`
}
// Validate validates this name manager
func (m *NameManager) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateFQDNPolicySelectors(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NameManager) validateFQDNPolicySelectors(formats strfmt.Registry) error {
if swag.IsZero(m.FQDNPolicySelectors) { // not required
return nil
}
for i := 0; i < len(m.FQDNPolicySelectors); i++ {
if swag.IsZero(m.FQDNPolicySelectors[i]) { // not required
continue
}
if m.FQDNPolicySelectors[i] != nil {
if err := m.FQDNPolicySelectors[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("FQDNPolicySelectors" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("FQDNPolicySelectors" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this name manager based on the context it is used
func (m *NameManager) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateFQDNPolicySelectors(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NameManager) contextValidateFQDNPolicySelectors(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.FQDNPolicySelectors); i++ {
if m.FQDNPolicySelectors[i] != nil {
if swag.IsZero(m.FQDNPolicySelectors[i]) { // not required
return nil
}
if err := m.FQDNPolicySelectors[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("FQDNPolicySelectors" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("FQDNPolicySelectors" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *NameManager) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *NameManager) UnmarshalBinary(b []byte) error {
var res NameManager
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NamedPorts List of named Layer 4 port and protocol pairs which will be used in Network
// Policy specs.
//
// +deepequal-gen=true
// +k8s:deepcopy-gen=true
//
// swagger:model NamedPorts
type NamedPorts []*Port
// Validate validates this named ports
func (m NamedPorts) Validate(formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if swag.IsZero(m[i]) { // not required
continue
}
if m[i] != nil {
if err := m[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validate this named ports based on the context it is used
func (m NamedPorts) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if m[i] != nil {
if swag.IsZero(m[i]) { // not required
return nil
}
if err := m[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NodeAddressing Addressing information of a node for all address families
//
// +k8s:deepcopy-gen=true
//
// swagger:model NodeAddressing
type NodeAddressing struct {
// ipv4
IPV4 *NodeAddressingElement `json:"ipv4,omitempty"`
// ipv6
IPV6 *NodeAddressingElement `json:"ipv6,omitempty"`
}
// Validate validates this node addressing
func (m *NodeAddressing) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateIPV4(formats); err != nil {
res = append(res, err)
}
if err := m.validateIPV6(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NodeAddressing) validateIPV4(formats strfmt.Registry) error {
if swag.IsZero(m.IPV4) { // not required
return nil
}
if m.IPV4 != nil {
if err := m.IPV4.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv4")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv4")
}
return err
}
}
return nil
}
func (m *NodeAddressing) validateIPV6(formats strfmt.Registry) error {
if swag.IsZero(m.IPV6) { // not required
return nil
}
if m.IPV6 != nil {
if err := m.IPV6.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv6")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv6")
}
return err
}
}
return nil
}
// ContextValidate validate this node addressing based on the context it is used
func (m *NodeAddressing) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateIPV4(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIPV6(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NodeAddressing) contextValidateIPV4(ctx context.Context, formats strfmt.Registry) error {
if m.IPV4 != nil {
if swag.IsZero(m.IPV4) { // not required
return nil
}
if err := m.IPV4.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv4")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv4")
}
return err
}
}
return nil
}
func (m *NodeAddressing) contextValidateIPV6(ctx context.Context, formats strfmt.Registry) error {
if m.IPV6 != nil {
if swag.IsZero(m.IPV6) { // not required
return nil
}
if err := m.IPV6.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv6")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv6")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *NodeAddressing) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *NodeAddressing) UnmarshalBinary(b []byte) error {
var res NodeAddressing
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NodeAddressingElement Addressing information
//
// swagger:model NodeAddressingElement
type NodeAddressingElement struct {
// Node address type, one of HostName, ExternalIP or InternalIP
AddressType string `json:"address-type,omitempty"`
// Address pool to be used for local endpoints
AllocRange string `json:"alloc-range,omitempty"`
// True if address family is enabled
Enabled bool `json:"enabled,omitempty"`
// IP address of node
IP string `json:"ip,omitempty"`
}
// Validate validates this node addressing element
func (m *NodeAddressingElement) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this node addressing element based on context it is used
func (m *NodeAddressingElement) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *NodeAddressingElement) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *NodeAddressingElement) UnmarshalBinary(b []byte) error {
var res NodeAddressingElement
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NodeElement Known node in the cluster
//
// +k8s:deepcopy-gen=true
//
// swagger:model NodeElement
type NodeElement struct {
// Address used for probing cluster connectivity
HealthEndpointAddress *NodeAddressing `json:"health-endpoint-address,omitempty"`
// Source address for Ingress listener
IngressAddress *NodeAddressing `json:"ingress-address,omitempty"`
// Name of the node including the cluster association. This is typically
// <clustername>/<hostname>.
//
Name string `json:"name,omitempty"`
// Primary address used for intra-cluster communication
PrimaryAddress *NodeAddressing `json:"primary-address,omitempty"`
// Alternative addresses assigned to the node
SecondaryAddresses []*NodeAddressingElement `json:"secondary-addresses"`
// Source of the node configuration
Source string `json:"source,omitempty"`
}
// Validate validates this node element
func (m *NodeElement) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateHealthEndpointAddress(formats); err != nil {
res = append(res, err)
}
if err := m.validateIngressAddress(formats); err != nil {
res = append(res, err)
}
if err := m.validatePrimaryAddress(formats); err != nil {
res = append(res, err)
}
if err := m.validateSecondaryAddresses(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NodeElement) validateHealthEndpointAddress(formats strfmt.Registry) error {
if swag.IsZero(m.HealthEndpointAddress) { // not required
return nil
}
if m.HealthEndpointAddress != nil {
if err := m.HealthEndpointAddress.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("health-endpoint-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("health-endpoint-address")
}
return err
}
}
return nil
}
func (m *NodeElement) validateIngressAddress(formats strfmt.Registry) error {
if swag.IsZero(m.IngressAddress) { // not required
return nil
}
if m.IngressAddress != nil {
if err := m.IngressAddress.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ingress-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ingress-address")
}
return err
}
}
return nil
}
func (m *NodeElement) validatePrimaryAddress(formats strfmt.Registry) error {
if swag.IsZero(m.PrimaryAddress) { // not required
return nil
}
if m.PrimaryAddress != nil {
if err := m.PrimaryAddress.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("primary-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("primary-address")
}
return err
}
}
return nil
}
func (m *NodeElement) validateSecondaryAddresses(formats strfmt.Registry) error {
if swag.IsZero(m.SecondaryAddresses) { // not required
return nil
}
for i := 0; i < len(m.SecondaryAddresses); i++ {
if swag.IsZero(m.SecondaryAddresses[i]) { // not required
continue
}
if m.SecondaryAddresses[i] != nil {
if err := m.SecondaryAddresses[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this node element based on the context it is used
func (m *NodeElement) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateHealthEndpointAddress(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIngressAddress(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidatePrimaryAddress(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSecondaryAddresses(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NodeElement) contextValidateHealthEndpointAddress(ctx context.Context, formats strfmt.Registry) error {
if m.HealthEndpointAddress != nil {
if swag.IsZero(m.HealthEndpointAddress) { // not required
return nil
}
if err := m.HealthEndpointAddress.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("health-endpoint-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("health-endpoint-address")
}
return err
}
}
return nil
}
func (m *NodeElement) contextValidateIngressAddress(ctx context.Context, formats strfmt.Registry) error {
if m.IngressAddress != nil {
if swag.IsZero(m.IngressAddress) { // not required
return nil
}
if err := m.IngressAddress.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ingress-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ingress-address")
}
return err
}
}
return nil
}
func (m *NodeElement) contextValidatePrimaryAddress(ctx context.Context, formats strfmt.Registry) error {
if m.PrimaryAddress != nil {
if swag.IsZero(m.PrimaryAddress) { // not required
return nil
}
if err := m.PrimaryAddress.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("primary-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("primary-address")
}
return err
}
}
return nil
}
func (m *NodeElement) contextValidateSecondaryAddresses(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.SecondaryAddresses); i++ {
if m.SecondaryAddresses[i] != nil {
if swag.IsZero(m.SecondaryAddresses[i]) { // not required
return nil
}
if err := m.SecondaryAddresses[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *NodeElement) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *NodeElement) UnmarshalBinary(b []byte) error {
var res NodeElement
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// NodeID Node ID with associated node IP addresses
//
// swagger:model NodeID
type NodeID struct {
// ID allocated by the agent for the node
// Required: true
ID *int64 `json:"id"`
// IP addresses of the node associated with the ID in the agent
// Required: true
Ips []string `json:"ips"`
}
// Validate validates this node ID
func (m *NodeID) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateID(formats); err != nil {
res = append(res, err)
}
if err := m.validateIps(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NodeID) validateID(formats strfmt.Registry) error {
if err := validate.Required("id", "body", m.ID); err != nil {
return err
}
return nil
}
func (m *NodeID) validateIps(formats strfmt.Registry) error {
if err := validate.Required("ips", "body", m.Ips); err != nil {
return err
}
return nil
}
// ContextValidate validates this node ID based on context it is used
func (m *NodeID) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *NodeID) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *NodeID) UnmarshalBinary(b []byte) error {
var res NodeID
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// Policy Policy definition
//
// swagger:model Policy
type Policy struct {
// Policy definition as JSON.
Policy string `json:"policy,omitempty"`
// Revision number of the policy. Incremented each time the policy is
// changed in the agent's repository
//
Revision int64 `json:"revision,omitempty"`
}
// Validate validates this policy
func (m *Policy) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this policy based on context it is used
func (m *Policy) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *Policy) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Policy) UnmarshalBinary(b []byte) error {
var res Policy
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// PolicyRule A policy rule including the rule labels it derives from
//
// +k8s:deepcopy-gen=true
//
// swagger:model PolicyRule
type PolicyRule struct {
// The policy rule labels identifying the policy rules this rule derives from
DerivedFromRules [][]string `json:"derived-from-rules"`
// The policy rule as json
Rule string `json:"rule,omitempty"`
// The policy rule labels identifying the policy rules this rule derives from, mapped by selector
RulesBySelector map[string][][]string `json:"rules-by-selector,omitempty"`
}
// Validate validates this policy rule
func (m *PolicyRule) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this policy rule based on context it is used
func (m *PolicyRule) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *PolicyRule) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *PolicyRule) UnmarshalBinary(b []byte) error {
var res PolicyRule
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// PolicyTraceResult Response to a policy resolution process
//
// swagger:model PolicyTraceResult
type PolicyTraceResult struct {
// log
Log string `json:"log,omitempty"`
// verdict
Verdict string `json:"verdict,omitempty"`
}
// Validate validates this policy trace result
func (m *PolicyTraceResult) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this policy trace result based on context it is used
func (m *PolicyTraceResult) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *PolicyTraceResult) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *PolicyTraceResult) UnmarshalBinary(b []byte) error {
var res PolicyTraceResult
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// Port Layer 4 port / protocol pair
//
// +deepequal-gen=true
//
// swagger:model Port
type Port struct {
// Optional layer 4 port name
Name string `json:"name,omitempty"`
// Layer 4 port number
Port uint16 `json:"port,omitempty"`
// Layer 4 protocol
// Enum: [TCP UDP SCTP ICMP ICMPV6 ANY]
Protocol string `json:"protocol,omitempty"`
}
// Validate validates this port
func (m *Port) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateProtocol(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var portTypeProtocolPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["TCP","UDP","SCTP","ICMP","ICMPV6","ANY"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
portTypeProtocolPropEnum = append(portTypeProtocolPropEnum, v)
}
}
const (
// PortProtocolTCP captures enum value "TCP"
PortProtocolTCP string = "TCP"
// PortProtocolUDP captures enum value "UDP"
PortProtocolUDP string = "UDP"
// PortProtocolSCTP captures enum value "SCTP"
PortProtocolSCTP string = "SCTP"
// PortProtocolICMP captures enum value "ICMP"
PortProtocolICMP string = "ICMP"
// PortProtocolICMPV6 captures enum value "ICMPV6"
PortProtocolICMPV6 string = "ICMPV6"
// PortProtocolANY captures enum value "ANY"
PortProtocolANY string = "ANY"
)
// prop value enum
func (m *Port) validateProtocolEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, portTypeProtocolPropEnum, true); err != nil {
return err
}
return nil
}
func (m *Port) validateProtocol(formats strfmt.Registry) error {
if swag.IsZero(m.Protocol) { // not required
return nil
}
// value enum
if err := m.validateProtocolEnum("protocol", "body", m.Protocol); err != nil {
return err
}
return nil
}
// ContextValidate validates this port based on context it is used
func (m *Port) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *Port) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Port) UnmarshalBinary(b []byte) error {
var res Port
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// Prefilter Collection of endpoints to be served
//
// swagger:model Prefilter
type Prefilter struct {
// spec
Spec *PrefilterSpec `json:"spec,omitempty"`
// status
Status *PrefilterStatus `json:"status,omitempty"`
}
// Validate validates this prefilter
func (m *Prefilter) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateSpec(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatus(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Prefilter) validateSpec(formats strfmt.Registry) error {
if swag.IsZero(m.Spec) { // not required
return nil
}
if m.Spec != nil {
if err := m.Spec.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *Prefilter) validateStatus(formats strfmt.Registry) error {
if swag.IsZero(m.Status) { // not required
return nil
}
if m.Status != nil {
if err := m.Status.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// ContextValidate validate this prefilter based on the context it is used
func (m *Prefilter) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateSpec(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateStatus(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Prefilter) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error {
if m.Spec != nil {
if swag.IsZero(m.Spec) { // not required
return nil
}
if err := m.Spec.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *Prefilter) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error {
if m.Status != nil {
if swag.IsZero(m.Status) { // not required
return nil
}
if err := m.Status.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *Prefilter) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Prefilter) UnmarshalBinary(b []byte) error {
var res Prefilter
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// PrefilterSpec CIDR ranges implemented in the Prefilter
//
// swagger:model PrefilterSpec
type PrefilterSpec struct {
// deny
Deny []string `json:"deny"`
// revision
Revision int64 `json:"revision,omitempty"`
}
// Validate validates this prefilter spec
func (m *PrefilterSpec) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this prefilter spec based on context it is used
func (m *PrefilterSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *PrefilterSpec) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *PrefilterSpec) UnmarshalBinary(b []byte) error {
var res PrefilterSpec
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// PrefilterStatus CIDR ranges implemented in the Prefilter
//
// swagger:model PrefilterStatus
type PrefilterStatus struct {
// realized
Realized *PrefilterSpec `json:"realized,omitempty"`
}
// Validate validates this prefilter status
func (m *PrefilterStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *PrefilterStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// ContextValidate validate this prefilter status based on the context it is used
func (m *PrefilterStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *PrefilterStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *PrefilterStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *PrefilterStatus) UnmarshalBinary(b []byte) error {
var res PrefilterStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ProxyRedirect Configured proxy redirection state
//
// swagger:model ProxyRedirect
type ProxyRedirect struct {
// Name of the proxy redirect
Name string `json:"name,omitempty"`
// Name of the proxy this redirect points to
Proxy string `json:"proxy,omitempty"`
// Host port that this redirect points to
ProxyPort int64 `json:"proxy-port,omitempty"`
}
// Validate validates this proxy redirect
func (m *ProxyRedirect) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this proxy redirect based on context it is used
func (m *ProxyRedirect) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *ProxyRedirect) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ProxyRedirect) UnmarshalBinary(b []byte) error {
var res ProxyRedirect
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// ProxyStatistics Statistics of a set of proxy redirects for an endpoint
//
// +k8s:deepcopy-gen=true
//
// swagger:model ProxyStatistics
type ProxyStatistics struct {
// The port the proxy is listening on
AllocatedProxyPort int64 `json:"allocated-proxy-port,omitempty"`
// Location of where the redirect is installed
// Enum: [ingress egress]
Location string `json:"location,omitempty"`
// The port subject to the redirect
Port int64 `json:"port,omitempty"`
// Name of the L7 protocol
Protocol string `json:"protocol,omitempty"`
// Statistics of this set of proxy redirect
Statistics *RequestResponseStatistics `json:"statistics,omitempty"`
}
// Validate validates this proxy statistics
func (m *ProxyStatistics) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateLocation(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatistics(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var proxyStatisticsTypeLocationPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["ingress","egress"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
proxyStatisticsTypeLocationPropEnum = append(proxyStatisticsTypeLocationPropEnum, v)
}
}
const (
// ProxyStatisticsLocationIngress captures enum value "ingress"
ProxyStatisticsLocationIngress string = "ingress"
// ProxyStatisticsLocationEgress captures enum value "egress"
ProxyStatisticsLocationEgress string = "egress"
)
// prop value enum
func (m *ProxyStatistics) validateLocationEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, proxyStatisticsTypeLocationPropEnum, true); err != nil {
return err
}
return nil
}
func (m *ProxyStatistics) validateLocation(formats strfmt.Registry) error {
if swag.IsZero(m.Location) { // not required
return nil
}
// value enum
if err := m.validateLocationEnum("location", "body", m.Location); err != nil {
return err
}
return nil
}
func (m *ProxyStatistics) validateStatistics(formats strfmt.Registry) error {
if swag.IsZero(m.Statistics) { // not required
return nil
}
if m.Statistics != nil {
if err := m.Statistics.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("statistics")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("statistics")
}
return err
}
}
return nil
}
// ContextValidate validate this proxy statistics based on the context it is used
func (m *ProxyStatistics) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateStatistics(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ProxyStatistics) contextValidateStatistics(ctx context.Context, formats strfmt.Registry) error {
if m.Statistics != nil {
if swag.IsZero(m.Statistics) { // not required
return nil
}
if err := m.Statistics.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("statistics")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("statistics")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ProxyStatistics) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ProxyStatistics) UnmarshalBinary(b []byte) error {
var res ProxyStatistics
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// ProxyStatus Status of proxy
//
// +k8s:deepcopy-gen=true
//
// swagger:model ProxyStatus
type ProxyStatus struct {
// Deployment mode of Envoy L7 proxy
// Enum: [embedded external]
EnvoyDeploymentMode string `json:"envoy-deployment-mode,omitempty"`
// IP address that the proxy listens on
IP string `json:"ip,omitempty"`
// Port range used for proxying
PortRange string `json:"port-range,omitempty"`
// Detailed description of configured redirects
Redirects []*ProxyRedirect `json:"redirects"`
// Total number of listening proxy ports
TotalPorts int64 `json:"total-ports,omitempty"`
// Total number of ports configured to redirect to proxies
TotalRedirects int64 `json:"total-redirects,omitempty"`
}
// Validate validates this proxy status
func (m *ProxyStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateEnvoyDeploymentMode(formats); err != nil {
res = append(res, err)
}
if err := m.validateRedirects(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var proxyStatusTypeEnvoyDeploymentModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["embedded","external"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
proxyStatusTypeEnvoyDeploymentModePropEnum = append(proxyStatusTypeEnvoyDeploymentModePropEnum, v)
}
}
const (
// ProxyStatusEnvoyDeploymentModeEmbedded captures enum value "embedded"
ProxyStatusEnvoyDeploymentModeEmbedded string = "embedded"
// ProxyStatusEnvoyDeploymentModeExternal captures enum value "external"
ProxyStatusEnvoyDeploymentModeExternal string = "external"
)
// prop value enum
func (m *ProxyStatus) validateEnvoyDeploymentModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, proxyStatusTypeEnvoyDeploymentModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *ProxyStatus) validateEnvoyDeploymentMode(formats strfmt.Registry) error {
if swag.IsZero(m.EnvoyDeploymentMode) { // not required
return nil
}
// value enum
if err := m.validateEnvoyDeploymentModeEnum("envoy-deployment-mode", "body", m.EnvoyDeploymentMode); err != nil {
return err
}
return nil
}
func (m *ProxyStatus) validateRedirects(formats strfmt.Registry) error {
if swag.IsZero(m.Redirects) { // not required
return nil
}
for i := 0; i < len(m.Redirects); i++ {
if swag.IsZero(m.Redirects[i]) { // not required
continue
}
if m.Redirects[i] != nil {
if err := m.Redirects[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("redirects" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("redirects" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this proxy status based on the context it is used
func (m *ProxyStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateRedirects(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ProxyStatus) contextValidateRedirects(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Redirects); i++ {
if m.Redirects[i] != nil {
if swag.IsZero(m.Redirects[i]) { // not required
return nil
}
if err := m.Redirects[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("redirects" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("redirects" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ProxyStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ProxyStatus) UnmarshalBinary(b []byte) error {
var res ProxyStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// Recorder Collection of wildcard filters for pcap recorder
//
// swagger:model Recorder
type Recorder struct {
// spec
Spec *RecorderSpec `json:"spec,omitempty"`
// status
Status *RecorderStatus `json:"status,omitempty"`
}
// Validate validates this recorder
func (m *Recorder) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateSpec(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatus(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Recorder) validateSpec(formats strfmt.Registry) error {
if swag.IsZero(m.Spec) { // not required
return nil
}
if m.Spec != nil {
if err := m.Spec.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *Recorder) validateStatus(formats strfmt.Registry) error {
if swag.IsZero(m.Status) { // not required
return nil
}
if m.Status != nil {
if err := m.Status.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// ContextValidate validate this recorder based on the context it is used
func (m *Recorder) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateSpec(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateStatus(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Recorder) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error {
if m.Spec != nil {
if swag.IsZero(m.Spec) { // not required
return nil
}
if err := m.Spec.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *Recorder) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error {
if m.Status != nil {
if swag.IsZero(m.Status) { // not required
return nil
}
if err := m.Status.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *Recorder) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Recorder) UnmarshalBinary(b []byte) error {
var res Recorder
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// RecorderFilter n-tuple filter to match traffic to be recorded
//
// swagger:model RecorderFilter
type RecorderFilter struct {
// Layer 4 destination port, zero (or in future range)
DstPort string `json:"dst-port,omitempty"`
// Layer 3 destination CIDR
DstPrefix string `json:"dst-prefix,omitempty"`
// Layer 4 protocol
// Enum: [TCP UDP SCTP ANY]
Protocol string `json:"protocol,omitempty"`
// Layer 4 source port, zero (or in future range)
SrcPort string `json:"src-port,omitempty"`
// Layer 3 source CIDR
SrcPrefix string `json:"src-prefix,omitempty"`
}
// Validate validates this recorder filter
func (m *RecorderFilter) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateProtocol(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var recorderFilterTypeProtocolPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["TCP","UDP","SCTP","ANY"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
recorderFilterTypeProtocolPropEnum = append(recorderFilterTypeProtocolPropEnum, v)
}
}
const (
// RecorderFilterProtocolTCP captures enum value "TCP"
RecorderFilterProtocolTCP string = "TCP"
// RecorderFilterProtocolUDP captures enum value "UDP"
RecorderFilterProtocolUDP string = "UDP"
// RecorderFilterProtocolSCTP captures enum value "SCTP"
RecorderFilterProtocolSCTP string = "SCTP"
// RecorderFilterProtocolANY captures enum value "ANY"
RecorderFilterProtocolANY string = "ANY"
)
// prop value enum
func (m *RecorderFilter) validateProtocolEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, recorderFilterTypeProtocolPropEnum, true); err != nil {
return err
}
return nil
}
func (m *RecorderFilter) validateProtocol(formats strfmt.Registry) error {
if swag.IsZero(m.Protocol) { // not required
return nil
}
// value enum
if err := m.validateProtocolEnum("protocol", "body", m.Protocol); err != nil {
return err
}
return nil
}
// ContextValidate validates this recorder filter based on context it is used
func (m *RecorderFilter) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *RecorderFilter) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RecorderFilter) UnmarshalBinary(b []byte) error {
var res RecorderFilter
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// RecorderMask Individual mask for pcap recorder
//
// swagger:model RecorderMask
type RecorderMask struct {
// status
Status *RecorderMaskStatus `json:"status,omitempty"`
}
// Validate validates this recorder mask
func (m *RecorderMask) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateStatus(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RecorderMask) validateStatus(formats strfmt.Registry) error {
if swag.IsZero(m.Status) { // not required
return nil
}
if m.Status != nil {
if err := m.Status.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// ContextValidate validate this recorder mask based on the context it is used
func (m *RecorderMask) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateStatus(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RecorderMask) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error {
if m.Status != nil {
if swag.IsZero(m.Status) { // not required
return nil
}
if err := m.Status.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *RecorderMask) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RecorderMask) UnmarshalBinary(b []byte) error {
var res RecorderMask
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// RecorderMaskSpec Configuration of a recorder mask
//
// swagger:model RecorderMaskSpec
type RecorderMaskSpec struct {
// Layer 4 destination port mask
DstPortMask string `json:"dst-port-mask,omitempty"`
// Layer 3 destination IP mask
DstPrefixMask string `json:"dst-prefix-mask,omitempty"`
// Priority of this mask
Priority int64 `json:"priority,omitempty"`
// Layer 4 protocol mask
ProtocolMask string `json:"protocol-mask,omitempty"`
// Layer 4 source port mask
SrcPortMask string `json:"src-port-mask,omitempty"`
// Layer 3 source IP mask
SrcPrefixMask string `json:"src-prefix-mask,omitempty"`
// Number of users of this mask
Users int64 `json:"users,omitempty"`
}
// Validate validates this recorder mask spec
func (m *RecorderMaskSpec) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this recorder mask spec based on context it is used
func (m *RecorderMaskSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *RecorderMaskSpec) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RecorderMaskSpec) UnmarshalBinary(b []byte) error {
var res RecorderMaskSpec
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// RecorderMaskStatus Configuration of a recorder mask
//
// swagger:model RecorderMaskStatus
type RecorderMaskStatus struct {
// realized
Realized *RecorderMaskSpec `json:"realized,omitempty"`
}
// Validate validates this recorder mask status
func (m *RecorderMaskStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RecorderMaskStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// ContextValidate validate this recorder mask status based on the context it is used
func (m *RecorderMaskStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RecorderMaskStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *RecorderMaskStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RecorderMaskStatus) UnmarshalBinary(b []byte) error {
var res RecorderMaskStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// RecorderSpec Configuration of a recorder
//
// swagger:model RecorderSpec
type RecorderSpec struct {
// Maximum packet length or zero for full packet length
CaptureLength int64 `json:"capture-length,omitempty"`
// List of wildcard filters for given recorder
// Required: true
Filters []*RecorderFilter `json:"filters"`
// Unique identification
// Required: true
ID *int64 `json:"id"`
}
// Validate validates this recorder spec
func (m *RecorderSpec) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateFilters(formats); err != nil {
res = append(res, err)
}
if err := m.validateID(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RecorderSpec) validateFilters(formats strfmt.Registry) error {
if err := validate.Required("filters", "body", m.Filters); err != nil {
return err
}
for i := 0; i < len(m.Filters); i++ {
if swag.IsZero(m.Filters[i]) { // not required
continue
}
if m.Filters[i] != nil {
if err := m.Filters[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("filters" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("filters" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *RecorderSpec) validateID(formats strfmt.Registry) error {
if err := validate.Required("id", "body", m.ID); err != nil {
return err
}
return nil
}
// ContextValidate validate this recorder spec based on the context it is used
func (m *RecorderSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateFilters(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RecorderSpec) contextValidateFilters(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Filters); i++ {
if m.Filters[i] != nil {
if swag.IsZero(m.Filters[i]) { // not required
return nil
}
if err := m.Filters[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("filters" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("filters" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *RecorderSpec) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RecorderSpec) UnmarshalBinary(b []byte) error {
var res RecorderSpec
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// RecorderStatus Configuration of a recorder
//
// swagger:model RecorderStatus
type RecorderStatus struct {
// realized
Realized *RecorderSpec `json:"realized,omitempty"`
}
// Validate validates this recorder status
func (m *RecorderStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RecorderStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// ContextValidate validate this recorder status based on the context it is used
func (m *RecorderStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RecorderStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *RecorderStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RecorderStatus) UnmarshalBinary(b []byte) error {
var res RecorderStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// RemoteCluster Status of remote cluster
//
// +k8s:deepcopy-gen=true
//
// swagger:model RemoteCluster
type RemoteCluster struct {
// Cluster configuration exposed by the remote cluster
Config *RemoteClusterConfig `json:"config,omitempty"`
// Indicates whether the connection to the remote kvstore is established
Connected bool `json:"connected,omitempty"`
// Time of last failure that occurred while attempting to reach the cluster
// Format: date-time
LastFailure strfmt.DateTime `json:"last-failure,omitempty"`
// Name of the cluster
Name string `json:"name,omitempty"`
// Number of endpoints in the cluster
NumEndpoints int64 `json:"num-endpoints,omitempty"`
// Number of failures reaching the cluster
NumFailures int64 `json:"num-failures,omitempty"`
// Number of identities in the cluster
NumIdentities int64 `json:"num-identities,omitempty"`
// Number of nodes in the cluster
NumNodes int64 `json:"num-nodes,omitempty"`
// Number of services in the cluster
NumSharedServices int64 `json:"num-shared-services,omitempty"`
// Indicates readiness of the remote cluster
Ready bool `json:"ready,omitempty"`
// Status of the control plane
Status string `json:"status,omitempty"`
// Synchronization status about each resource type
Synced *RemoteClusterSynced `json:"synced,omitempty"`
}
// Validate validates this remote cluster
func (m *RemoteCluster) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateConfig(formats); err != nil {
res = append(res, err)
}
if err := m.validateLastFailure(formats); err != nil {
res = append(res, err)
}
if err := m.validateSynced(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RemoteCluster) validateConfig(formats strfmt.Registry) error {
if swag.IsZero(m.Config) { // not required
return nil
}
if m.Config != nil {
if err := m.Config.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("config")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("config")
}
return err
}
}
return nil
}
func (m *RemoteCluster) validateLastFailure(formats strfmt.Registry) error {
if swag.IsZero(m.LastFailure) { // not required
return nil
}
if err := validate.FormatOf("last-failure", "body", "date-time", m.LastFailure.String(), formats); err != nil {
return err
}
return nil
}
func (m *RemoteCluster) validateSynced(formats strfmt.Registry) error {
if swag.IsZero(m.Synced) { // not required
return nil
}
if m.Synced != nil {
if err := m.Synced.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("synced")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("synced")
}
return err
}
}
return nil
}
// ContextValidate validate this remote cluster based on the context it is used
func (m *RemoteCluster) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateConfig(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSynced(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RemoteCluster) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error {
if m.Config != nil {
if swag.IsZero(m.Config) { // not required
return nil
}
if err := m.Config.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("config")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("config")
}
return err
}
}
return nil
}
func (m *RemoteCluster) contextValidateSynced(ctx context.Context, formats strfmt.Registry) error {
if m.Synced != nil {
if swag.IsZero(m.Synced) { // not required
return nil
}
if err := m.Synced.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("synced")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("synced")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *RemoteCluster) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RemoteCluster) UnmarshalBinary(b []byte) error {
var res RemoteCluster
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// RemoteClusterConfig Cluster configuration exposed by the remote cluster
//
// +k8s:deepcopy-gen=true
//
// swagger:model RemoteClusterConfig
type RemoteClusterConfig struct {
// The Cluster ID advertised by the remote cluster
ClusterID int64 `json:"cluster-id,omitempty"`
// Whether the remote cluster information is locally cached by kvstoremesh
Kvstoremesh bool `json:"kvstoremesh,omitempty"`
// Whether the configuration is required to be present
Required bool `json:"required,omitempty"`
// Whether the configuration has been correctly retrieved
Retrieved bool `json:"retrieved,omitempty"`
// Whether the remote cluster supports per-prefix "synced" canaries
SyncCanaries bool `json:"sync-canaries,omitempty"`
}
// Validate validates this remote cluster config
func (m *RemoteClusterConfig) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this remote cluster config based on context it is used
func (m *RemoteClusterConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *RemoteClusterConfig) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RemoteClusterConfig) UnmarshalBinary(b []byte) error {
var res RemoteClusterConfig
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// RemoteClusterSynced Status of the synchronization with the remote cluster, about each resource
// type. A given resource is considered to be synchronized if the initial
// list of entries has been completely received from the remote cluster, and
// new events are currently being watched.
//
// +k8s:deepcopy-gen=true
//
// swagger:model RemoteClusterSynced
type RemoteClusterSynced struct {
// Endpoints synchronization status
Endpoints bool `json:"endpoints,omitempty"`
// Identities synchronization status
Identities bool `json:"identities,omitempty"`
// Nodes synchronization status
Nodes bool `json:"nodes,omitempty"`
// Services synchronization status
Services bool `json:"services,omitempty"`
}
// Validate validates this remote cluster synced
func (m *RemoteClusterSynced) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this remote cluster synced based on context it is used
func (m *RemoteClusterSynced) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *RemoteClusterSynced) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RemoteClusterSynced) UnmarshalBinary(b []byte) error {
var res RemoteClusterSynced
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// RequestResponseStatistics Statistics of a proxy redirect
//
// +k8s:deepcopy-gen=true
//
// swagger:model RequestResponseStatistics
type RequestResponseStatistics struct {
// requests
Requests *MessageForwardingStatistics `json:"requests,omitempty"`
// responses
Responses *MessageForwardingStatistics `json:"responses,omitempty"`
}
// Validate validates this request response statistics
func (m *RequestResponseStatistics) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateRequests(formats); err != nil {
res = append(res, err)
}
if err := m.validateResponses(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RequestResponseStatistics) validateRequests(formats strfmt.Registry) error {
if swag.IsZero(m.Requests) { // not required
return nil
}
if m.Requests != nil {
if err := m.Requests.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("requests")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("requests")
}
return err
}
}
return nil
}
func (m *RequestResponseStatistics) validateResponses(formats strfmt.Registry) error {
if swag.IsZero(m.Responses) { // not required
return nil
}
if m.Responses != nil {
if err := m.Responses.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("responses")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("responses")
}
return err
}
}
return nil
}
// ContextValidate validate this request response statistics based on the context it is used
func (m *RequestResponseStatistics) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateRequests(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateResponses(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RequestResponseStatistics) contextValidateRequests(ctx context.Context, formats strfmt.Registry) error {
if m.Requests != nil {
if swag.IsZero(m.Requests) { // not required
return nil
}
if err := m.Requests.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("requests")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("requests")
}
return err
}
}
return nil
}
func (m *RequestResponseStatistics) contextValidateResponses(ctx context.Context, formats strfmt.Registry) error {
if m.Responses != nil {
if swag.IsZero(m.Responses) { // not required
return nil
}
if err := m.Responses.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("responses")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("responses")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *RequestResponseStatistics) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RequestResponseStatistics) UnmarshalBinary(b []byte) error {
var res RequestResponseStatistics
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// Routing Status of routing
//
// +k8s:deepcopy-gen=true
//
// swagger:model Routing
type Routing struct {
// Datapath routing mode for cross-cluster connectivity
// Enum: [Native Tunnel]
InterHostRoutingMode string `json:"inter-host-routing-mode,omitempty"`
// Datapath routing mode for connectivity within the host
// Enum: [BPF Legacy]
IntraHostRoutingMode string `json:"intra-host-routing-mode,omitempty"`
// Tunnel protocol in use for cross-cluster connectivity
TunnelProtocol string `json:"tunnel-protocol,omitempty"`
}
// Validate validates this routing
func (m *Routing) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateInterHostRoutingMode(formats); err != nil {
res = append(res, err)
}
if err := m.validateIntraHostRoutingMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var routingTypeInterHostRoutingModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Native","Tunnel"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
routingTypeInterHostRoutingModePropEnum = append(routingTypeInterHostRoutingModePropEnum, v)
}
}
const (
// RoutingInterHostRoutingModeNative captures enum value "Native"
RoutingInterHostRoutingModeNative string = "Native"
// RoutingInterHostRoutingModeTunnel captures enum value "Tunnel"
RoutingInterHostRoutingModeTunnel string = "Tunnel"
)
// prop value enum
func (m *Routing) validateInterHostRoutingModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, routingTypeInterHostRoutingModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *Routing) validateInterHostRoutingMode(formats strfmt.Registry) error {
if swag.IsZero(m.InterHostRoutingMode) { // not required
return nil
}
// value enum
if err := m.validateInterHostRoutingModeEnum("inter-host-routing-mode", "body", m.InterHostRoutingMode); err != nil {
return err
}
return nil
}
var routingTypeIntraHostRoutingModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["BPF","Legacy"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
routingTypeIntraHostRoutingModePropEnum = append(routingTypeIntraHostRoutingModePropEnum, v)
}
}
const (
// RoutingIntraHostRoutingModeBPF captures enum value "BPF"
RoutingIntraHostRoutingModeBPF string = "BPF"
// RoutingIntraHostRoutingModeLegacy captures enum value "Legacy"
RoutingIntraHostRoutingModeLegacy string = "Legacy"
)
// prop value enum
func (m *Routing) validateIntraHostRoutingModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, routingTypeIntraHostRoutingModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *Routing) validateIntraHostRoutingMode(formats strfmt.Registry) error {
if swag.IsZero(m.IntraHostRoutingMode) { // not required
return nil
}
// value enum
if err := m.validateIntraHostRoutingModeEnum("intra-host-routing-mode", "body", m.IntraHostRoutingMode); err != nil {
return err
}
return nil
}
// ContextValidate validates this routing based on context it is used
func (m *Routing) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *Routing) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Routing) UnmarshalBinary(b []byte) error {
var res Routing
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// SelectorCache cache of which identities match selectors in the policy repository
//
// swagger:model SelectorCache
type SelectorCache []*SelectorIdentityMapping
// Validate validates this selector cache
func (m SelectorCache) Validate(formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if swag.IsZero(m[i]) { // not required
continue
}
if m[i] != nil {
if err := m[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validate this selector cache based on the context it is used
func (m SelectorCache) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if m[i] != nil {
if swag.IsZero(m[i]) { // not required
return nil
}
if err := m[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// SelectorEntry Mapping of FQDNSelector to corresponding regular expression
//
// swagger:model SelectorEntry
type SelectorEntry struct {
// String representation of regular expression form of FQDNSelector
RegexString string `json:"regexString,omitempty"`
// FQDNSelector in string representation
SelectorString string `json:"selectorString,omitempty"`
}
// Validate validates this selector entry
func (m *SelectorEntry) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this selector entry based on context it is used
func (m *SelectorEntry) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *SelectorEntry) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *SelectorEntry) UnmarshalBinary(b []byte) error {
var res SelectorEntry
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// SelectorIdentityMapping mapping of selector to identities which match it
//
// swagger:model SelectorIdentityMapping
type SelectorIdentityMapping struct {
// identities mapping to this selector
Identities []int64 `json:"identities"`
// Labels are the metadata labels associated with the selector
Labels LabelArray `json:"labels,omitempty"`
// string form of selector
Selector string `json:"selector,omitempty"`
// number of users of this selector in the cache
Users int64 `json:"users,omitempty"`
}
// Validate validates this selector identity mapping
func (m *SelectorIdentityMapping) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateLabels(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *SelectorIdentityMapping) validateLabels(formats strfmt.Registry) error {
if swag.IsZero(m.Labels) { // not required
return nil
}
if err := m.Labels.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
// ContextValidate validate this selector identity mapping based on the context it is used
func (m *SelectorIdentityMapping) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateLabels(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *SelectorIdentityMapping) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error {
if err := m.Labels.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *SelectorIdentityMapping) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *SelectorIdentityMapping) UnmarshalBinary(b []byte) error {
var res SelectorIdentityMapping
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// Service Collection of endpoints to be served
//
// swagger:model Service
type Service struct {
// spec
Spec *ServiceSpec `json:"spec,omitempty"`
// status
Status *ServiceStatus `json:"status,omitempty"`
}
// Validate validates this service
func (m *Service) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateSpec(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatus(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Service) validateSpec(formats strfmt.Registry) error {
if swag.IsZero(m.Spec) { // not required
return nil
}
if m.Spec != nil {
if err := m.Spec.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *Service) validateStatus(formats strfmt.Registry) error {
if swag.IsZero(m.Status) { // not required
return nil
}
if m.Status != nil {
if err := m.Status.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// ContextValidate validate this service based on the context it is used
func (m *Service) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateSpec(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateStatus(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Service) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error {
if m.Spec != nil {
if swag.IsZero(m.Spec) { // not required
return nil
}
if err := m.Spec.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *Service) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error {
if m.Status != nil {
if swag.IsZero(m.Status) { // not required
return nil
}
if err := m.Status.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *Service) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Service) UnmarshalBinary(b []byte) error {
var res Service
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// ServiceSpec Configuration of a service
//
// swagger:model ServiceSpec
type ServiceSpec struct {
// List of backend addresses
BackendAddresses []*BackendAddress `json:"backend-addresses"`
// flags
Flags *ServiceSpecFlags `json:"flags,omitempty"`
// Frontend address
// Required: true
FrontendAddress *FrontendAddress `json:"frontend-address"`
// Unique identification
ID int64 `json:"id,omitempty"`
// Update all services selecting the backends with their given states
// (id and frontend are ignored)
//
UpdateServices bool `json:"updateServices,omitempty"`
}
// Validate validates this service spec
func (m *ServiceSpec) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateBackendAddresses(formats); err != nil {
res = append(res, err)
}
if err := m.validateFlags(formats); err != nil {
res = append(res, err)
}
if err := m.validateFrontendAddress(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ServiceSpec) validateBackendAddresses(formats strfmt.Registry) error {
if swag.IsZero(m.BackendAddresses) { // not required
return nil
}
for i := 0; i < len(m.BackendAddresses); i++ {
if swag.IsZero(m.BackendAddresses[i]) { // not required
continue
}
if m.BackendAddresses[i] != nil {
if err := m.BackendAddresses[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("backend-addresses" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("backend-addresses" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *ServiceSpec) validateFlags(formats strfmt.Registry) error {
if swag.IsZero(m.Flags) { // not required
return nil
}
if m.Flags != nil {
if err := m.Flags.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("flags")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("flags")
}
return err
}
}
return nil
}
func (m *ServiceSpec) validateFrontendAddress(formats strfmt.Registry) error {
if err := validate.Required("frontend-address", "body", m.FrontendAddress); err != nil {
return err
}
if m.FrontendAddress != nil {
if err := m.FrontendAddress.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("frontend-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("frontend-address")
}
return err
}
}
return nil
}
// ContextValidate validate this service spec based on the context it is used
func (m *ServiceSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateBackendAddresses(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateFlags(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateFrontendAddress(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ServiceSpec) contextValidateBackendAddresses(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.BackendAddresses); i++ {
if m.BackendAddresses[i] != nil {
if swag.IsZero(m.BackendAddresses[i]) { // not required
return nil
}
if err := m.BackendAddresses[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("backend-addresses" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("backend-addresses" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *ServiceSpec) contextValidateFlags(ctx context.Context, formats strfmt.Registry) error {
if m.Flags != nil {
if swag.IsZero(m.Flags) { // not required
return nil
}
if err := m.Flags.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("flags")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("flags")
}
return err
}
}
return nil
}
func (m *ServiceSpec) contextValidateFrontendAddress(ctx context.Context, formats strfmt.Registry) error {
if m.FrontendAddress != nil {
if err := m.FrontendAddress.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("frontend-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("frontend-address")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ServiceSpec) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ServiceSpec) UnmarshalBinary(b []byte) error {
var res ServiceSpec
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// ServiceSpecFlags Optional service configuration flags
//
// swagger:model ServiceSpecFlags
type ServiceSpecFlags struct {
// Service cluster
Cluster string `json:"cluster,omitempty"`
// Service external traffic policy
// Enum: [Cluster Local]
ExtTrafficPolicy string `json:"extTrafficPolicy,omitempty"`
// Service health check node port
HealthCheckNodePort uint16 `json:"healthCheckNodePort,omitempty"`
// Service internal traffic policy
// Enum: [Cluster Local]
IntTrafficPolicy string `json:"intTrafficPolicy,omitempty"`
// Service name (e.g. Kubernetes service name)
Name string `json:"name,omitempty"`
// Service namespace (e.g. Kubernetes namespace)
Namespace string `json:"namespace,omitempty"`
// Service protocol NAT policy
// Enum: [None Nat46 Nat64]
NatPolicy string `json:"natPolicy,omitempty"`
// Service external traffic policy (deprecated in favor of extTrafficPolicy)
// Enum: [Cluster Local]
TrafficPolicy string `json:"trafficPolicy,omitempty"`
// Service type
// Enum: [ClusterIP NodePort ExternalIPs HostPort LoadBalancer LocalRedirect]
Type string `json:"type,omitempty"`
}
// Validate validates this service spec flags
func (m *ServiceSpecFlags) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateExtTrafficPolicy(formats); err != nil {
res = append(res, err)
}
if err := m.validateIntTrafficPolicy(formats); err != nil {
res = append(res, err)
}
if err := m.validateNatPolicy(formats); err != nil {
res = append(res, err)
}
if err := m.validateTrafficPolicy(formats); err != nil {
res = append(res, err)
}
if err := m.validateType(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var serviceSpecFlagsTypeExtTrafficPolicyPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Cluster","Local"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
serviceSpecFlagsTypeExtTrafficPolicyPropEnum = append(serviceSpecFlagsTypeExtTrafficPolicyPropEnum, v)
}
}
const (
// ServiceSpecFlagsExtTrafficPolicyCluster captures enum value "Cluster"
ServiceSpecFlagsExtTrafficPolicyCluster string = "Cluster"
// ServiceSpecFlagsExtTrafficPolicyLocal captures enum value "Local"
ServiceSpecFlagsExtTrafficPolicyLocal string = "Local"
)
// prop value enum
func (m *ServiceSpecFlags) validateExtTrafficPolicyEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, serviceSpecFlagsTypeExtTrafficPolicyPropEnum, true); err != nil {
return err
}
return nil
}
func (m *ServiceSpecFlags) validateExtTrafficPolicy(formats strfmt.Registry) error {
if swag.IsZero(m.ExtTrafficPolicy) { // not required
return nil
}
// value enum
if err := m.validateExtTrafficPolicyEnum("flags"+"."+"extTrafficPolicy", "body", m.ExtTrafficPolicy); err != nil {
return err
}
return nil
}
var serviceSpecFlagsTypeIntTrafficPolicyPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Cluster","Local"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
serviceSpecFlagsTypeIntTrafficPolicyPropEnum = append(serviceSpecFlagsTypeIntTrafficPolicyPropEnum, v)
}
}
const (
// ServiceSpecFlagsIntTrafficPolicyCluster captures enum value "Cluster"
ServiceSpecFlagsIntTrafficPolicyCluster string = "Cluster"
// ServiceSpecFlagsIntTrafficPolicyLocal captures enum value "Local"
ServiceSpecFlagsIntTrafficPolicyLocal string = "Local"
)
// prop value enum
func (m *ServiceSpecFlags) validateIntTrafficPolicyEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, serviceSpecFlagsTypeIntTrafficPolicyPropEnum, true); err != nil {
return err
}
return nil
}
func (m *ServiceSpecFlags) validateIntTrafficPolicy(formats strfmt.Registry) error {
if swag.IsZero(m.IntTrafficPolicy) { // not required
return nil
}
// value enum
if err := m.validateIntTrafficPolicyEnum("flags"+"."+"intTrafficPolicy", "body", m.IntTrafficPolicy); err != nil {
return err
}
return nil
}
var serviceSpecFlagsTypeNatPolicyPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["None","Nat46","Nat64"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
serviceSpecFlagsTypeNatPolicyPropEnum = append(serviceSpecFlagsTypeNatPolicyPropEnum, v)
}
}
const (
// ServiceSpecFlagsNatPolicyNone captures enum value "None"
ServiceSpecFlagsNatPolicyNone string = "None"
// ServiceSpecFlagsNatPolicyNat46 captures enum value "Nat46"
ServiceSpecFlagsNatPolicyNat46 string = "Nat46"
// ServiceSpecFlagsNatPolicyNat64 captures enum value "Nat64"
ServiceSpecFlagsNatPolicyNat64 string = "Nat64"
)
// prop value enum
func (m *ServiceSpecFlags) validateNatPolicyEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, serviceSpecFlagsTypeNatPolicyPropEnum, true); err != nil {
return err
}
return nil
}
func (m *ServiceSpecFlags) validateNatPolicy(formats strfmt.Registry) error {
if swag.IsZero(m.NatPolicy) { // not required
return nil
}
// value enum
if err := m.validateNatPolicyEnum("flags"+"."+"natPolicy", "body", m.NatPolicy); err != nil {
return err
}
return nil
}
var serviceSpecFlagsTypeTrafficPolicyPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Cluster","Local"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
serviceSpecFlagsTypeTrafficPolicyPropEnum = append(serviceSpecFlagsTypeTrafficPolicyPropEnum, v)
}
}
const (
// ServiceSpecFlagsTrafficPolicyCluster captures enum value "Cluster"
ServiceSpecFlagsTrafficPolicyCluster string = "Cluster"
// ServiceSpecFlagsTrafficPolicyLocal captures enum value "Local"
ServiceSpecFlagsTrafficPolicyLocal string = "Local"
)
// prop value enum
func (m *ServiceSpecFlags) validateTrafficPolicyEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, serviceSpecFlagsTypeTrafficPolicyPropEnum, true); err != nil {
return err
}
return nil
}
func (m *ServiceSpecFlags) validateTrafficPolicy(formats strfmt.Registry) error {
if swag.IsZero(m.TrafficPolicy) { // not required
return nil
}
// value enum
if err := m.validateTrafficPolicyEnum("flags"+"."+"trafficPolicy", "body", m.TrafficPolicy); err != nil {
return err
}
return nil
}
var serviceSpecFlagsTypeTypePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["ClusterIP","NodePort","ExternalIPs","HostPort","LoadBalancer","LocalRedirect"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
serviceSpecFlagsTypeTypePropEnum = append(serviceSpecFlagsTypeTypePropEnum, v)
}
}
const (
// ServiceSpecFlagsTypeClusterIP captures enum value "ClusterIP"
ServiceSpecFlagsTypeClusterIP string = "ClusterIP"
// ServiceSpecFlagsTypeNodePort captures enum value "NodePort"
ServiceSpecFlagsTypeNodePort string = "NodePort"
// ServiceSpecFlagsTypeExternalIPs captures enum value "ExternalIPs"
ServiceSpecFlagsTypeExternalIPs string = "ExternalIPs"
// ServiceSpecFlagsTypeHostPort captures enum value "HostPort"
ServiceSpecFlagsTypeHostPort string = "HostPort"
// ServiceSpecFlagsTypeLoadBalancer captures enum value "LoadBalancer"
ServiceSpecFlagsTypeLoadBalancer string = "LoadBalancer"
// ServiceSpecFlagsTypeLocalRedirect captures enum value "LocalRedirect"
ServiceSpecFlagsTypeLocalRedirect string = "LocalRedirect"
)
// prop value enum
func (m *ServiceSpecFlags) validateTypeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, serviceSpecFlagsTypeTypePropEnum, true); err != nil {
return err
}
return nil
}
func (m *ServiceSpecFlags) validateType(formats strfmt.Registry) error {
if swag.IsZero(m.Type) { // not required
return nil
}
// value enum
if err := m.validateTypeEnum("flags"+"."+"type", "body", m.Type); err != nil {
return err
}
return nil
}
// ContextValidate validates this service spec flags based on context it is used
func (m *ServiceSpecFlags) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *ServiceSpecFlags) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ServiceSpecFlags) UnmarshalBinary(b []byte) error {
var res ServiceSpecFlags
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ServiceStatus Configuration of a service
//
// swagger:model ServiceStatus
type ServiceStatus struct {
// realized
Realized *ServiceSpec `json:"realized,omitempty"`
}
// Validate validates this service status
func (m *ServiceStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ServiceStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// ContextValidate validate this service status based on the context it is used
func (m *ServiceStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ServiceStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ServiceStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ServiceStatus) UnmarshalBinary(b []byte) error {
var res ServiceStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// Srv6 Status of the SRv6
//
// +k8s:deepcopy-gen=true
//
// swagger:model Srv6
type Srv6 struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
// srv6 encap mode
// Enum: [SRH Reduced]
Srv6EncapMode string `json:"srv6EncapMode,omitempty"`
}
// Validate validates this srv6
func (m *Srv6) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateSrv6EncapMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var srv6TypeSrv6EncapModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["SRH","Reduced"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
srv6TypeSrv6EncapModePropEnum = append(srv6TypeSrv6EncapModePropEnum, v)
}
}
const (
// Srv6Srv6EncapModeSRH captures enum value "SRH"
Srv6Srv6EncapModeSRH string = "SRH"
// Srv6Srv6EncapModeReduced captures enum value "Reduced"
Srv6Srv6EncapModeReduced string = "Reduced"
)
// prop value enum
func (m *Srv6) validateSrv6EncapModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, srv6TypeSrv6EncapModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *Srv6) validateSrv6EncapMode(formats strfmt.Registry) error {
if swag.IsZero(m.Srv6EncapMode) { // not required
return nil
}
// value enum
if err := m.validateSrv6EncapModeEnum("srv6EncapMode", "body", m.Srv6EncapMode); err != nil {
return err
}
return nil
}
// ContextValidate validates this srv6 based on context it is used
func (m *Srv6) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *Srv6) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Srv6) UnmarshalBinary(b []byte) error {
var res Srv6
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// StateDBQuery StateDB query
//
// swagger:model StateDBQuery
type StateDBQuery struct {
// Index to query against
Index string `json:"index,omitempty"`
// Key to query with. Base64 encoded.
Key string `json:"key,omitempty"`
// LowerBound prefix search or full-matching Get
Lowerbound bool `json:"lowerbound,omitempty"`
// Name of the table to query
Table string `json:"table,omitempty"`
}
// Validate validates this state d b query
func (m *StateDBQuery) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this state d b query based on context it is used
func (m *StateDBQuery) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *StateDBQuery) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *StateDBQuery) UnmarshalBinary(b []byte) error {
var res StateDBQuery
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// Status Status of an individual component
//
// swagger:model Status
type Status struct {
// Human readable status/error/warning message
Msg string `json:"msg,omitempty"`
// State the component is in
// Enum: [Ok Warning Failure Disabled]
State string `json:"state,omitempty"`
}
// Validate validates this status
func (m *Status) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var statusTypeStatePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Ok","Warning","Failure","Disabled"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
statusTypeStatePropEnum = append(statusTypeStatePropEnum, v)
}
}
const (
// StatusStateOk captures enum value "Ok"
StatusStateOk string = "Ok"
// StatusStateWarning captures enum value "Warning"
StatusStateWarning string = "Warning"
// StatusStateFailure captures enum value "Failure"
StatusStateFailure string = "Failure"
// StatusStateDisabled captures enum value "Disabled"
StatusStateDisabled string = "Disabled"
)
// prop value enum
func (m *Status) validateStateEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, statusTypeStatePropEnum, true); err != nil {
return err
}
return nil
}
func (m *Status) validateState(formats strfmt.Registry) error {
if swag.IsZero(m.State) { // not required
return nil
}
// value enum
if err := m.validateStateEnum("state", "body", m.State); err != nil {
return err
}
return nil
}
// ContextValidate validates this status based on context it is used
func (m *Status) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *Status) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Status) UnmarshalBinary(b []byte) error {
var res Status
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// StatusResponse Health and status information of daemon
//
// +k8s:deepcopy-gen=true
//
// swagger:model StatusResponse
type StatusResponse struct {
// Status of core datapath attachment mode
AttachMode AttachMode `json:"attach-mode,omitempty"`
// Status of Mutual Authentication certificate provider
AuthCertificateProvider *Status `json:"auth-certificate-provider,omitempty"`
// Status of bandwidth manager
BandwidthManager *BandwidthManager `json:"bandwidth-manager,omitempty"`
// Status of BPF maps
BpfMaps *BPFMapStatus `json:"bpf-maps,omitempty"`
// Status of Cilium daemon
Cilium *Status `json:"cilium,omitempty"`
// When supported by the API, this client ID should be used by the
// client when making another request to the server.
// See for example "/cluster/nodes".
//
ClientID int64 `json:"client-id,omitempty"`
// Status of clock source
ClockSource *ClockSource `json:"clock-source,omitempty"`
// Status of cluster
Cluster *ClusterStatus `json:"cluster,omitempty"`
// Status of ClusterMesh
ClusterMesh *ClusterMeshStatus `json:"cluster-mesh,omitempty"`
// Status of CNI chaining
CniChaining *CNIChainingStatus `json:"cni-chaining,omitempty"`
// Status of the CNI configuration file
CniFile *Status `json:"cni-file,omitempty"`
// Status of local container runtime
ContainerRuntime *Status `json:"container-runtime,omitempty"`
// Status of all endpoint controllers
Controllers ControllerStatuses `json:"controllers,omitempty"`
// Status of datapath mode
DatapathMode DatapathMode `json:"datapath-mode,omitempty"`
// Status of transparent encryption
Encryption *EncryptionStatus `json:"encryption,omitempty"`
// Status of the host firewall
HostFirewall *HostFirewall `json:"host-firewall,omitempty"`
// Status of Hubble server
Hubble *HubbleStatus `json:"hubble,omitempty"`
// Status of identity range of the cluster
IdentityRange *IdentityRange `json:"identity-range,omitempty"`
// Status of IP address management
Ipam *IPAMStatus `json:"ipam,omitempty"`
// Status of IPv4 BIG TCP
IPV4BigTCP *IPV4BigTCP `json:"ipv4-big-tcp,omitempty"`
// Status of IPv6 BIG TCP
IPV6BigTCP *IPV6BigTCP `json:"ipv6-big-tcp,omitempty"`
// Status of kube-proxy replacement
KubeProxyReplacement *KubeProxyReplacement `json:"kube-proxy-replacement,omitempty"`
// Status of Kubernetes integration
Kubernetes *K8sStatus `json:"kubernetes,omitempty"`
// Status of key/value datastore
Kvstore *Status `json:"kvstore,omitempty"`
// Status of masquerading
Masquerading *Masquerading `json:"masquerading,omitempty"`
// Status of the node monitor
NodeMonitor *MonitorStatus `json:"nodeMonitor,omitempty"`
// Status of proxy
Proxy *ProxyStatus `json:"proxy,omitempty"`
// Status of routing
Routing *Routing `json:"routing,omitempty"`
// Status of SRv6
Srv6 *Srv6 `json:"srv6,omitempty"`
// List of stale information in the status
Stale map[string]strfmt.DateTime `json:"stale,omitempty"`
}
// Validate validates this status response
func (m *StatusResponse) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAttachMode(formats); err != nil {
res = append(res, err)
}
if err := m.validateAuthCertificateProvider(formats); err != nil {
res = append(res, err)
}
if err := m.validateBandwidthManager(formats); err != nil {
res = append(res, err)
}
if err := m.validateBpfMaps(formats); err != nil {
res = append(res, err)
}
if err := m.validateCilium(formats); err != nil {
res = append(res, err)
}
if err := m.validateClockSource(formats); err != nil {
res = append(res, err)
}
if err := m.validateCluster(formats); err != nil {
res = append(res, err)
}
if err := m.validateClusterMesh(formats); err != nil {
res = append(res, err)
}
if err := m.validateCniChaining(formats); err != nil {
res = append(res, err)
}
if err := m.validateCniFile(formats); err != nil {
res = append(res, err)
}
if err := m.validateContainerRuntime(formats); err != nil {
res = append(res, err)
}
if err := m.validateControllers(formats); err != nil {
res = append(res, err)
}
if err := m.validateDatapathMode(formats); err != nil {
res = append(res, err)
}
if err := m.validateEncryption(formats); err != nil {
res = append(res, err)
}
if err := m.validateHostFirewall(formats); err != nil {
res = append(res, err)
}
if err := m.validateHubble(formats); err != nil {
res = append(res, err)
}
if err := m.validateIdentityRange(formats); err != nil {
res = append(res, err)
}
if err := m.validateIpam(formats); err != nil {
res = append(res, err)
}
if err := m.validateIPV4BigTCP(formats); err != nil {
res = append(res, err)
}
if err := m.validateIPV6BigTCP(formats); err != nil {
res = append(res, err)
}
if err := m.validateKubeProxyReplacement(formats); err != nil {
res = append(res, err)
}
if err := m.validateKubernetes(formats); err != nil {
res = append(res, err)
}
if err := m.validateKvstore(formats); err != nil {
res = append(res, err)
}
if err := m.validateMasquerading(formats); err != nil {
res = append(res, err)
}
if err := m.validateNodeMonitor(formats); err != nil {
res = append(res, err)
}
if err := m.validateProxy(formats); err != nil {
res = append(res, err)
}
if err := m.validateRouting(formats); err != nil {
res = append(res, err)
}
if err := m.validateSrv6(formats); err != nil {
res = append(res, err)
}
if err := m.validateStale(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *StatusResponse) validateAttachMode(formats strfmt.Registry) error {
if swag.IsZero(m.AttachMode) { // not required
return nil
}
if err := m.AttachMode.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("attach-mode")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("attach-mode")
}
return err
}
return nil
}
func (m *StatusResponse) validateAuthCertificateProvider(formats strfmt.Registry) error {
if swag.IsZero(m.AuthCertificateProvider) { // not required
return nil
}
if m.AuthCertificateProvider != nil {
if err := m.AuthCertificateProvider.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("auth-certificate-provider")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("auth-certificate-provider")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateBandwidthManager(formats strfmt.Registry) error {
if swag.IsZero(m.BandwidthManager) { // not required
return nil
}
if m.BandwidthManager != nil {
if err := m.BandwidthManager.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("bandwidth-manager")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("bandwidth-manager")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateBpfMaps(formats strfmt.Registry) error {
if swag.IsZero(m.BpfMaps) { // not required
return nil
}
if m.BpfMaps != nil {
if err := m.BpfMaps.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("bpf-maps")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("bpf-maps")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateCilium(formats strfmt.Registry) error {
if swag.IsZero(m.Cilium) { // not required
return nil
}
if m.Cilium != nil {
if err := m.Cilium.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cilium")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cilium")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateClockSource(formats strfmt.Registry) error {
if swag.IsZero(m.ClockSource) { // not required
return nil
}
if m.ClockSource != nil {
if err := m.ClockSource.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("clock-source")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("clock-source")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateCluster(formats strfmt.Registry) error {
if swag.IsZero(m.Cluster) { // not required
return nil
}
if m.Cluster != nil {
if err := m.Cluster.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cluster")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cluster")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateClusterMesh(formats strfmt.Registry) error {
if swag.IsZero(m.ClusterMesh) { // not required
return nil
}
if m.ClusterMesh != nil {
if err := m.ClusterMesh.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cluster-mesh")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cluster-mesh")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateCniChaining(formats strfmt.Registry) error {
if swag.IsZero(m.CniChaining) { // not required
return nil
}
if m.CniChaining != nil {
if err := m.CniChaining.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cni-chaining")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cni-chaining")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateCniFile(formats strfmt.Registry) error {
if swag.IsZero(m.CniFile) { // not required
return nil
}
if m.CniFile != nil {
if err := m.CniFile.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cni-file")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cni-file")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateContainerRuntime(formats strfmt.Registry) error {
if swag.IsZero(m.ContainerRuntime) { // not required
return nil
}
if m.ContainerRuntime != nil {
if err := m.ContainerRuntime.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("container-runtime")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("container-runtime")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateControllers(formats strfmt.Registry) error {
if swag.IsZero(m.Controllers) { // not required
return nil
}
if err := m.Controllers.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("controllers")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("controllers")
}
return err
}
return nil
}
func (m *StatusResponse) validateDatapathMode(formats strfmt.Registry) error {
if swag.IsZero(m.DatapathMode) { // not required
return nil
}
if err := m.DatapathMode.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("datapath-mode")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("datapath-mode")
}
return err
}
return nil
}
func (m *StatusResponse) validateEncryption(formats strfmt.Registry) error {
if swag.IsZero(m.Encryption) { // not required
return nil
}
if m.Encryption != nil {
if err := m.Encryption.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("encryption")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("encryption")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateHostFirewall(formats strfmt.Registry) error {
if swag.IsZero(m.HostFirewall) { // not required
return nil
}
if m.HostFirewall != nil {
if err := m.HostFirewall.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("host-firewall")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("host-firewall")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateHubble(formats strfmt.Registry) error {
if swag.IsZero(m.Hubble) { // not required
return nil
}
if m.Hubble != nil {
if err := m.Hubble.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("hubble")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("hubble")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateIdentityRange(formats strfmt.Registry) error {
if swag.IsZero(m.IdentityRange) { // not required
return nil
}
if m.IdentityRange != nil {
if err := m.IdentityRange.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("identity-range")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("identity-range")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateIpam(formats strfmt.Registry) error {
if swag.IsZero(m.Ipam) { // not required
return nil
}
if m.Ipam != nil {
if err := m.Ipam.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipam")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipam")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateIPV4BigTCP(formats strfmt.Registry) error {
if swag.IsZero(m.IPV4BigTCP) { // not required
return nil
}
if m.IPV4BigTCP != nil {
if err := m.IPV4BigTCP.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv4-big-tcp")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv4-big-tcp")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateIPV6BigTCP(formats strfmt.Registry) error {
if swag.IsZero(m.IPV6BigTCP) { // not required
return nil
}
if m.IPV6BigTCP != nil {
if err := m.IPV6BigTCP.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv6-big-tcp")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv6-big-tcp")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateKubeProxyReplacement(formats strfmt.Registry) error {
if swag.IsZero(m.KubeProxyReplacement) { // not required
return nil
}
if m.KubeProxyReplacement != nil {
if err := m.KubeProxyReplacement.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("kube-proxy-replacement")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("kube-proxy-replacement")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateKubernetes(formats strfmt.Registry) error {
if swag.IsZero(m.Kubernetes) { // not required
return nil
}
if m.Kubernetes != nil {
if err := m.Kubernetes.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("kubernetes")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("kubernetes")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateKvstore(formats strfmt.Registry) error {
if swag.IsZero(m.Kvstore) { // not required
return nil
}
if m.Kvstore != nil {
if err := m.Kvstore.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("kvstore")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("kvstore")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateMasquerading(formats strfmt.Registry) error {
if swag.IsZero(m.Masquerading) { // not required
return nil
}
if m.Masquerading != nil {
if err := m.Masquerading.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("masquerading")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("masquerading")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateNodeMonitor(formats strfmt.Registry) error {
if swag.IsZero(m.NodeMonitor) { // not required
return nil
}
if m.NodeMonitor != nil {
if err := m.NodeMonitor.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodeMonitor")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodeMonitor")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateProxy(formats strfmt.Registry) error {
if swag.IsZero(m.Proxy) { // not required
return nil
}
if m.Proxy != nil {
if err := m.Proxy.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("proxy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("proxy")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateRouting(formats strfmt.Registry) error {
if swag.IsZero(m.Routing) { // not required
return nil
}
if m.Routing != nil {
if err := m.Routing.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("routing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("routing")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateSrv6(formats strfmt.Registry) error {
if swag.IsZero(m.Srv6) { // not required
return nil
}
if m.Srv6 != nil {
if err := m.Srv6.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("srv6")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("srv6")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateStale(formats strfmt.Registry) error {
if swag.IsZero(m.Stale) { // not required
return nil
}
for k := range m.Stale {
if err := validate.FormatOf("stale"+"."+k, "body", "date-time", m.Stale[k].String(), formats); err != nil {
return err
}
}
return nil
}
// ContextValidate validate this status response based on the context it is used
func (m *StatusResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateAttachMode(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateAuthCertificateProvider(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateBandwidthManager(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateBpfMaps(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateCilium(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateClockSource(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateCluster(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateClusterMesh(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateCniChaining(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateCniFile(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateContainerRuntime(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateControllers(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateDatapathMode(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateEncryption(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateHostFirewall(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateHubble(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIdentityRange(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIpam(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIPV4BigTCP(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIPV6BigTCP(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateKubeProxyReplacement(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateKubernetes(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateKvstore(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateMasquerading(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNodeMonitor(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateProxy(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateRouting(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSrv6(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *StatusResponse) contextValidateAttachMode(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.AttachMode) { // not required
return nil
}
if err := m.AttachMode.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("attach-mode")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("attach-mode")
}
return err
}
return nil
}
func (m *StatusResponse) contextValidateAuthCertificateProvider(ctx context.Context, formats strfmt.Registry) error {
if m.AuthCertificateProvider != nil {
if swag.IsZero(m.AuthCertificateProvider) { // not required
return nil
}
if err := m.AuthCertificateProvider.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("auth-certificate-provider")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("auth-certificate-provider")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateBandwidthManager(ctx context.Context, formats strfmt.Registry) error {
if m.BandwidthManager != nil {
if swag.IsZero(m.BandwidthManager) { // not required
return nil
}
if err := m.BandwidthManager.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("bandwidth-manager")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("bandwidth-manager")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateBpfMaps(ctx context.Context, formats strfmt.Registry) error {
if m.BpfMaps != nil {
if swag.IsZero(m.BpfMaps) { // not required
return nil
}
if err := m.BpfMaps.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("bpf-maps")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("bpf-maps")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateCilium(ctx context.Context, formats strfmt.Registry) error {
if m.Cilium != nil {
if swag.IsZero(m.Cilium) { // not required
return nil
}
if err := m.Cilium.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cilium")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cilium")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateClockSource(ctx context.Context, formats strfmt.Registry) error {
if m.ClockSource != nil {
if swag.IsZero(m.ClockSource) { // not required
return nil
}
if err := m.ClockSource.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("clock-source")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("clock-source")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateCluster(ctx context.Context, formats strfmt.Registry) error {
if m.Cluster != nil {
if swag.IsZero(m.Cluster) { // not required
return nil
}
if err := m.Cluster.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cluster")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cluster")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateClusterMesh(ctx context.Context, formats strfmt.Registry) error {
if m.ClusterMesh != nil {
if swag.IsZero(m.ClusterMesh) { // not required
return nil
}
if err := m.ClusterMesh.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cluster-mesh")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cluster-mesh")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateCniChaining(ctx context.Context, formats strfmt.Registry) error {
if m.CniChaining != nil {
if swag.IsZero(m.CniChaining) { // not required
return nil
}
if err := m.CniChaining.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cni-chaining")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cni-chaining")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateCniFile(ctx context.Context, formats strfmt.Registry) error {
if m.CniFile != nil {
if swag.IsZero(m.CniFile) { // not required
return nil
}
if err := m.CniFile.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cni-file")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cni-file")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateContainerRuntime(ctx context.Context, formats strfmt.Registry) error {
if m.ContainerRuntime != nil {
if swag.IsZero(m.ContainerRuntime) { // not required
return nil
}
if err := m.ContainerRuntime.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("container-runtime")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("container-runtime")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateControllers(ctx context.Context, formats strfmt.Registry) error {
if err := m.Controllers.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("controllers")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("controllers")
}
return err
}
return nil
}
func (m *StatusResponse) contextValidateDatapathMode(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.DatapathMode) { // not required
return nil
}
if err := m.DatapathMode.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("datapath-mode")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("datapath-mode")
}
return err
}
return nil
}
func (m *StatusResponse) contextValidateEncryption(ctx context.Context, formats strfmt.Registry) error {
if m.Encryption != nil {
if swag.IsZero(m.Encryption) { // not required
return nil
}
if err := m.Encryption.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("encryption")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("encryption")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateHostFirewall(ctx context.Context, formats strfmt.Registry) error {
if m.HostFirewall != nil {
if swag.IsZero(m.HostFirewall) { // not required
return nil
}
if err := m.HostFirewall.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("host-firewall")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("host-firewall")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateHubble(ctx context.Context, formats strfmt.Registry) error {
if m.Hubble != nil {
if swag.IsZero(m.Hubble) { // not required
return nil
}
if err := m.Hubble.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("hubble")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("hubble")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateIdentityRange(ctx context.Context, formats strfmt.Registry) error {
if m.IdentityRange != nil {
if swag.IsZero(m.IdentityRange) { // not required
return nil
}
if err := m.IdentityRange.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("identity-range")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("identity-range")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateIpam(ctx context.Context, formats strfmt.Registry) error {
if m.Ipam != nil {
if swag.IsZero(m.Ipam) { // not required
return nil
}
if err := m.Ipam.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipam")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipam")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateIPV4BigTCP(ctx context.Context, formats strfmt.Registry) error {
if m.IPV4BigTCP != nil {
if swag.IsZero(m.IPV4BigTCP) { // not required
return nil
}
if err := m.IPV4BigTCP.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv4-big-tcp")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv4-big-tcp")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateIPV6BigTCP(ctx context.Context, formats strfmt.Registry) error {
if m.IPV6BigTCP != nil {
if swag.IsZero(m.IPV6BigTCP) { // not required
return nil
}
if err := m.IPV6BigTCP.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv6-big-tcp")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv6-big-tcp")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateKubeProxyReplacement(ctx context.Context, formats strfmt.Registry) error {
if m.KubeProxyReplacement != nil {
if swag.IsZero(m.KubeProxyReplacement) { // not required
return nil
}
if err := m.KubeProxyReplacement.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("kube-proxy-replacement")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("kube-proxy-replacement")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateKubernetes(ctx context.Context, formats strfmt.Registry) error {
if m.Kubernetes != nil {
if swag.IsZero(m.Kubernetes) { // not required
return nil
}
if err := m.Kubernetes.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("kubernetes")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("kubernetes")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateKvstore(ctx context.Context, formats strfmt.Registry) error {
if m.Kvstore != nil {
if swag.IsZero(m.Kvstore) { // not required
return nil
}
if err := m.Kvstore.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("kvstore")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("kvstore")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateMasquerading(ctx context.Context, formats strfmt.Registry) error {
if m.Masquerading != nil {
if swag.IsZero(m.Masquerading) { // not required
return nil
}
if err := m.Masquerading.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("masquerading")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("masquerading")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateNodeMonitor(ctx context.Context, formats strfmt.Registry) error {
if m.NodeMonitor != nil {
if swag.IsZero(m.NodeMonitor) { // not required
return nil
}
if err := m.NodeMonitor.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodeMonitor")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodeMonitor")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateProxy(ctx context.Context, formats strfmt.Registry) error {
if m.Proxy != nil {
if swag.IsZero(m.Proxy) { // not required
return nil
}
if err := m.Proxy.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("proxy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("proxy")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateRouting(ctx context.Context, formats strfmt.Registry) error {
if m.Routing != nil {
if swag.IsZero(m.Routing) { // not required
return nil
}
if err := m.Routing.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("routing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("routing")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateSrv6(ctx context.Context, formats strfmt.Registry) error {
if m.Srv6 != nil {
if swag.IsZero(m.Srv6) { // not required
return nil
}
if err := m.Srv6.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("srv6")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("srv6")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *StatusResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *StatusResponse) UnmarshalBinary(b []byte) error {
var res StatusResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// TraceFrom trace from
//
// swagger:model TraceFrom
type TraceFrom struct {
// labels
Labels Labels `json:"labels,omitempty"`
}
// Validate validates this trace from
func (m *TraceFrom) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateLabels(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *TraceFrom) validateLabels(formats strfmt.Registry) error {
if swag.IsZero(m.Labels) { // not required
return nil
}
if err := m.Labels.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
// ContextValidate validate this trace from based on the context it is used
func (m *TraceFrom) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateLabels(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *TraceFrom) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error {
if err := m.Labels.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *TraceFrom) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *TraceFrom) UnmarshalBinary(b []byte) error {
var res TraceFrom
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// TraceSelector Context describing a pair of source and destination identity
//
// swagger:model TraceSelector
type TraceSelector struct {
// from
From *TraceFrom `json:"from,omitempty"`
// to
To *TraceTo `json:"to,omitempty"`
// Enable verbose tracing.
//
Verbose bool `json:"verbose,omitempty"`
}
// Validate validates this trace selector
func (m *TraceSelector) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateFrom(formats); err != nil {
res = append(res, err)
}
if err := m.validateTo(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *TraceSelector) validateFrom(formats strfmt.Registry) error {
if swag.IsZero(m.From) { // not required
return nil
}
if m.From != nil {
if err := m.From.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("from")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("from")
}
return err
}
}
return nil
}
func (m *TraceSelector) validateTo(formats strfmt.Registry) error {
if swag.IsZero(m.To) { // not required
return nil
}
if m.To != nil {
if err := m.To.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("to")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("to")
}
return err
}
}
return nil
}
// ContextValidate validate this trace selector based on the context it is used
func (m *TraceSelector) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateFrom(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateTo(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *TraceSelector) contextValidateFrom(ctx context.Context, formats strfmt.Registry) error {
if m.From != nil {
if swag.IsZero(m.From) { // not required
return nil
}
if err := m.From.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("from")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("from")
}
return err
}
}
return nil
}
func (m *TraceSelector) contextValidateTo(ctx context.Context, formats strfmt.Registry) error {
if m.To != nil {
if swag.IsZero(m.To) { // not required
return nil
}
if err := m.To.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("to")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("to")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *TraceSelector) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *TraceSelector) UnmarshalBinary(b []byte) error {
var res TraceSelector
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// TraceTo trace to
//
// swagger:model TraceTo
type TraceTo struct {
// List of Layer 4 port and protocol pairs which will be used in communication
// from the source identity to the destination identity.
//
Dports []*Port `json:"dports"`
// labels
Labels Labels `json:"labels,omitempty"`
}
// Validate validates this trace to
func (m *TraceTo) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateDports(formats); err != nil {
res = append(res, err)
}
if err := m.validateLabels(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *TraceTo) validateDports(formats strfmt.Registry) error {
if swag.IsZero(m.Dports) { // not required
return nil
}
for i := 0; i < len(m.Dports); i++ {
if swag.IsZero(m.Dports[i]) { // not required
continue
}
if m.Dports[i] != nil {
if err := m.Dports[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("dports" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("dports" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *TraceTo) validateLabels(formats strfmt.Registry) error {
if swag.IsZero(m.Labels) { // not required
return nil
}
if err := m.Labels.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
// ContextValidate validate this trace to based on the context it is used
func (m *TraceTo) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateDports(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateLabels(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *TraceTo) contextValidateDports(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Dports); i++ {
if m.Dports[i] != nil {
if swag.IsZero(m.Dports[i]) { // not required
return nil
}
if err := m.Dports[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("dports" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("dports" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *TraceTo) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error {
if err := m.Labels.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *TraceTo) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *TraceTo) UnmarshalBinary(b []byte) error {
var res TraceTo
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// WireguardInterface Status of a WireGuard interface
//
// +k8s:deepcopy-gen=true
//
// swagger:model WireguardInterface
type WireguardInterface struct {
// Port on which the WireGuard endpoint is exposed
ListenPort int64 `json:"listen-port,omitempty"`
// Name of the interface
Name string `json:"name,omitempty"`
// Number of peers configured on this interface
PeerCount int64 `json:"peer-count,omitempty"`
// Optional list of WireGuard peers
Peers []*WireguardPeer `json:"peers"`
// Public key of this interface
PublicKey string `json:"public-key,omitempty"`
}
// Validate validates this wireguard interface
func (m *WireguardInterface) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validatePeers(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *WireguardInterface) validatePeers(formats strfmt.Registry) error {
if swag.IsZero(m.Peers) { // not required
return nil
}
for i := 0; i < len(m.Peers); i++ {
if swag.IsZero(m.Peers[i]) { // not required
continue
}
if m.Peers[i] != nil {
if err := m.Peers[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("peers" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("peers" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this wireguard interface based on the context it is used
func (m *WireguardInterface) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidatePeers(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *WireguardInterface) contextValidatePeers(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Peers); i++ {
if m.Peers[i] != nil {
if swag.IsZero(m.Peers[i]) { // not required
return nil
}
if err := m.Peers[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("peers" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("peers" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *WireguardInterface) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *WireguardInterface) UnmarshalBinary(b []byte) error {
var res WireguardInterface
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// WireguardPeer Status of a WireGuard peer
//
// +k8s:deepcopy-gen=true
//
// swagger:model WireguardPeer
type WireguardPeer struct {
// List of IPs which may be routed through this peer
AllowedIps []string `json:"allowed-ips"`
// Endpoint on which we are connected to this peer
Endpoint string `json:"endpoint,omitempty"`
// Timestamp of the last handshake with this peer
// Format: date-time
LastHandshakeTime strfmt.DateTime `json:"last-handshake-time,omitempty"`
// Public key of this peer
PublicKey string `json:"public-key,omitempty"`
// Number of received bytes
TransferRx int64 `json:"transfer-rx,omitempty"`
// Number of sent bytes
TransferTx int64 `json:"transfer-tx,omitempty"`
}
// Validate validates this wireguard peer
func (m *WireguardPeer) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateLastHandshakeTime(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *WireguardPeer) validateLastHandshakeTime(formats strfmt.Registry) error {
if swag.IsZero(m.LastHandshakeTime) { // not required
return nil
}
if err := validate.FormatOf("last-handshake-time", "body", "date-time", m.LastHandshakeTime.String(), formats); err != nil {
return err
}
return nil
}
// ContextValidate validates this wireguard peer based on context it is used
func (m *WireguardPeer) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *WireguardPeer) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *WireguardPeer) UnmarshalBinary(b []byte) error {
var res WireguardPeer
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// WireguardStatus Status of the WireGuard agent
//
// +k8s:deepcopy-gen=true
//
// swagger:model WireguardStatus
type WireguardStatus struct {
// WireGuard interfaces managed by this Cilium instance
Interfaces []*WireguardInterface `json:"interfaces"`
// Node Encryption status
NodeEncryption string `json:"node-encryption,omitempty"`
}
// Validate validates this wireguard status
func (m *WireguardStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateInterfaces(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *WireguardStatus) validateInterfaces(formats strfmt.Registry) error {
if swag.IsZero(m.Interfaces) { // not required
return nil
}
for i := 0; i < len(m.Interfaces); i++ {
if swag.IsZero(m.Interfaces[i]) { // not required
continue
}
if m.Interfaces[i] != nil {
if err := m.Interfaces[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("interfaces" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("interfaces" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this wireguard status based on the context it is used
func (m *WireguardStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateInterfaces(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *WireguardStatus) contextValidateInterfaces(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Interfaces); i++ {
if m.Interfaces[i] != nil {
if swag.IsZero(m.Interfaces[i]) { // not required
return nil
}
if err := m.Interfaces[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("interfaces" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("interfaces" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *WireguardStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *WireguardStatus) UnmarshalBinary(b []byte) error {
var res WireguardStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepcopy-gen. DO NOT EDIT.
package models
import (
strfmt "github.com/go-openapi/strfmt"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BPFMapStatus) DeepCopyInto(out *BPFMapStatus) {
*out = *in
if in.Maps != nil {
in, out := &in.Maps, &out.Maps
*out = make([]*BPFMapProperties, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(BPFMapProperties)
**out = **in
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BPFMapStatus.
func (in *BPFMapStatus) DeepCopy() *BPFMapStatus {
if in == nil {
return nil
}
out := new(BPFMapStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BandwidthManager) DeepCopyInto(out *BandwidthManager) {
*out = *in
if in.Devices != nil {
in, out := &in.Devices, &out.Devices
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BandwidthManager.
func (in *BandwidthManager) DeepCopy() *BandwidthManager {
if in == nil {
return nil
}
out := new(BandwidthManager)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BgpGracefulRestart) DeepCopyInto(out *BgpGracefulRestart) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BgpGracefulRestart.
func (in *BgpGracefulRestart) DeepCopy() *BgpGracefulRestart {
if in == nil {
return nil
}
out := new(BgpGracefulRestart)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BgpPeer) DeepCopyInto(out *BgpPeer) {
*out = *in
if in.Families != nil {
in, out := &in.Families, &out.Families
*out = make([]*BgpPeerFamilies, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(BgpPeerFamilies)
**out = **in
}
}
}
if in.GracefulRestart != nil {
in, out := &in.GracefulRestart, &out.GracefulRestart
*out = new(BgpGracefulRestart)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BgpPeer.
func (in *BgpPeer) DeepCopy() *BgpPeer {
if in == nil {
return nil
}
out := new(BgpPeer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BgpPeerFamilies) DeepCopyInto(out *BgpPeerFamilies) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BgpPeerFamilies.
func (in *BgpPeerFamilies) DeepCopy() *BgpPeerFamilies {
if in == nil {
return nil
}
out := new(BgpPeerFamilies)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CIDRPolicy) DeepCopyInto(out *CIDRPolicy) {
*out = *in
if in.Egress != nil {
in, out := &in.Egress, &out.Egress
*out = make([]*PolicyRule, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(PolicyRule)
(*in).DeepCopyInto(*out)
}
}
}
if in.Ingress != nil {
in, out := &in.Ingress, &out.Ingress
*out = make([]*PolicyRule, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(PolicyRule)
(*in).DeepCopyInto(*out)
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CIDRPolicy.
func (in *CIDRPolicy) DeepCopy() *CIDRPolicy {
if in == nil {
return nil
}
out := new(CIDRPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CNIChainingStatus) DeepCopyInto(out *CNIChainingStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CNIChainingStatus.
func (in *CNIChainingStatus) DeepCopy() *CNIChainingStatus {
if in == nil {
return nil
}
out := new(CNIChainingStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClockSource) DeepCopyInto(out *ClockSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClockSource.
func (in *ClockSource) DeepCopy() *ClockSource {
if in == nil {
return nil
}
out := new(ClockSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterMeshStatus) DeepCopyInto(out *ClusterMeshStatus) {
*out = *in
if in.Clusters != nil {
in, out := &in.Clusters, &out.Clusters
*out = make([]*RemoteCluster, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(RemoteCluster)
(*in).DeepCopyInto(*out)
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMeshStatus.
func (in *ClusterMeshStatus) DeepCopy() *ClusterMeshStatus {
if in == nil {
return nil
}
out := new(ClusterMeshStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
*out = *in
if in.CiliumHealth != nil {
in, out := &in.CiliumHealth, &out.CiliumHealth
*out = new(Status)
**out = **in
}
if in.Nodes != nil {
in, out := &in.Nodes, &out.Nodes
*out = make([]*NodeElement, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(NodeElement)
(*in).DeepCopyInto(*out)
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus.
func (in *ClusterStatus) DeepCopy() *ClusterStatus {
if in == nil {
return nil
}
out := new(ClusterStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerStatus) DeepCopyInto(out *ControllerStatus) {
*out = *in
if in.Configuration != nil {
in, out := &in.Configuration, &out.Configuration
*out = new(ControllerStatusConfiguration)
**out = **in
}
if in.Status != nil {
in, out := &in.Status, &out.Status
*out = new(ControllerStatusStatus)
(*in).DeepCopyInto(*out)
}
in.UUID.DeepCopyInto(&out.UUID)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerStatus.
func (in *ControllerStatus) DeepCopy() *ControllerStatus {
if in == nil {
return nil
}
out := new(ControllerStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerStatusConfiguration) DeepCopyInto(out *ControllerStatusConfiguration) {
*out = *in
in.ErrorRetryBase.DeepCopyInto(&out.ErrorRetryBase)
in.Interval.DeepCopyInto(&out.Interval)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerStatusConfiguration.
func (in *ControllerStatusConfiguration) DeepCopy() *ControllerStatusConfiguration {
if in == nil {
return nil
}
out := new(ControllerStatusConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerStatusStatus) DeepCopyInto(out *ControllerStatusStatus) {
*out = *in
in.LastFailureTimestamp.DeepCopyInto(&out.LastFailureTimestamp)
in.LastSuccessTimestamp.DeepCopyInto(&out.LastSuccessTimestamp)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerStatusStatus.
func (in *ControllerStatusStatus) DeepCopy() *ControllerStatusStatus {
if in == nil {
return nil
}
out := new(ControllerStatusStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EncryptionStatus) DeepCopyInto(out *EncryptionStatus) {
*out = *in
if in.Ipsec != nil {
in, out := &in.Ipsec, &out.Ipsec
*out = new(IPsecStatus)
(*in).DeepCopyInto(*out)
}
if in.Wireguard != nil {
in, out := &in.Wireguard, &out.Wireguard
*out = new(WireguardStatus)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionStatus.
func (in *EncryptionStatus) DeepCopy() *EncryptionStatus {
if in == nil {
return nil
}
out := new(EncryptionStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointPolicy) DeepCopyInto(out *EndpointPolicy) {
*out = *in
if in.AllowedEgressIdentities != nil {
in, out := &in.AllowedEgressIdentities, &out.AllowedEgressIdentities
*out = make([]int64, len(*in))
copy(*out, *in)
}
if in.AllowedIngressIdentities != nil {
in, out := &in.AllowedIngressIdentities, &out.AllowedIngressIdentities
*out = make([]int64, len(*in))
copy(*out, *in)
}
if in.CidrPolicy != nil {
in, out := &in.CidrPolicy, &out.CidrPolicy
*out = new(CIDRPolicy)
(*in).DeepCopyInto(*out)
}
if in.DeniedEgressIdentities != nil {
in, out := &in.DeniedEgressIdentities, &out.DeniedEgressIdentities
*out = make([]int64, len(*in))
copy(*out, *in)
}
if in.DeniedIngressIdentities != nil {
in, out := &in.DeniedIngressIdentities, &out.DeniedIngressIdentities
*out = make([]int64, len(*in))
copy(*out, *in)
}
if in.L4 != nil {
in, out := &in.L4, &out.L4
*out = new(L4Policy)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPolicy.
func (in *EndpointPolicy) DeepCopy() *EndpointPolicy {
if in == nil {
return nil
}
out := new(EndpointPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HostFirewall) DeepCopyInto(out *HostFirewall) {
*out = *in
if in.Devices != nil {
in, out := &in.Devices, &out.Devices
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostFirewall.
func (in *HostFirewall) DeepCopy() *HostFirewall {
if in == nil {
return nil
}
out := new(HostFirewall)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HubbleStatus) DeepCopyInto(out *HubbleStatus) {
*out = *in
if in.Metrics != nil {
in, out := &in.Metrics, &out.Metrics
*out = new(HubbleStatusMetrics)
**out = **in
}
if in.Observer != nil {
in, out := &in.Observer, &out.Observer
*out = new(HubbleStatusObserver)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubbleStatus.
func (in *HubbleStatus) DeepCopy() *HubbleStatus {
if in == nil {
return nil
}
out := new(HubbleStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HubbleStatusObserver) DeepCopyInto(out *HubbleStatusObserver) {
*out = *in
in.Uptime.DeepCopyInto(&out.Uptime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubbleStatusObserver.
func (in *HubbleStatusObserver) DeepCopy() *HubbleStatusObserver {
if in == nil {
return nil
}
out := new(HubbleStatusObserver)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAMStatus) DeepCopyInto(out *IPAMStatus) {
*out = *in
if in.Allocations != nil {
in, out := &in.Allocations, &out.Allocations
*out = make(AllocationMap, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.IPV4 != nil {
in, out := &in.IPV4, &out.IPV4
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.IPV6 != nil {
in, out := &in.IPV6, &out.IPV6
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMStatus.
func (in *IPAMStatus) DeepCopy() *IPAMStatus {
if in == nil {
return nil
}
out := new(IPAMStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPV4BigTCP) DeepCopyInto(out *IPV4BigTCP) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPV4BigTCP.
func (in *IPV4BigTCP) DeepCopy() *IPV4BigTCP {
if in == nil {
return nil
}
out := new(IPV4BigTCP)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPV6BigTCP) DeepCopyInto(out *IPV6BigTCP) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPV6BigTCP.
func (in *IPV6BigTCP) DeepCopy() *IPV6BigTCP {
if in == nil {
return nil
}
out := new(IPV6BigTCP)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPsecStatus) DeepCopyInto(out *IPsecStatus) {
*out = *in
if in.DecryptInterfaces != nil {
in, out := &in.DecryptInterfaces, &out.DecryptInterfaces
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.XfrmErrors != nil {
in, out := &in.XfrmErrors, &out.XfrmErrors
*out = make(map[string]int64, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPsecStatus.
func (in *IPsecStatus) DeepCopy() *IPsecStatus {
if in == nil {
return nil
}
out := new(IPsecStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IdentityRange) DeepCopyInto(out *IdentityRange) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityRange.
func (in *IdentityRange) DeepCopy() *IdentityRange {
if in == nil {
return nil
}
out := new(IdentityRange)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K8sStatus) DeepCopyInto(out *K8sStatus) {
*out = *in
if in.K8sAPIVersions != nil {
in, out := &in.K8sAPIVersions, &out.K8sAPIVersions
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sStatus.
func (in *K8sStatus) DeepCopy() *K8sStatus {
if in == nil {
return nil
}
out := new(K8sStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacement) DeepCopyInto(out *KubeProxyReplacement) {
*out = *in
if in.DeviceList != nil {
in, out := &in.DeviceList, &out.DeviceList
*out = make([]*KubeProxyReplacementDeviceListItems0, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(KubeProxyReplacementDeviceListItems0)
(*in).DeepCopyInto(*out)
}
}
}
if in.Devices != nil {
in, out := &in.Devices, &out.Devices
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Features != nil {
in, out := &in.Features, &out.Features
*out = new(KubeProxyReplacementFeatures)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacement.
func (in *KubeProxyReplacement) DeepCopy() *KubeProxyReplacement {
if in == nil {
return nil
}
out := new(KubeProxyReplacement)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacementDeviceListItems0) DeepCopyInto(out *KubeProxyReplacementDeviceListItems0) {
*out = *in
if in.IP != nil {
in, out := &in.IP, &out.IP
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacementDeviceListItems0.
func (in *KubeProxyReplacementDeviceListItems0) DeepCopy() *KubeProxyReplacementDeviceListItems0 {
if in == nil {
return nil
}
out := new(KubeProxyReplacementDeviceListItems0)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacementFeatures) DeepCopyInto(out *KubeProxyReplacementFeatures) {
*out = *in
if in.ExternalIPs != nil {
in, out := &in.ExternalIPs, &out.ExternalIPs
*out = new(KubeProxyReplacementFeaturesExternalIPs)
**out = **in
}
if in.GracefulTermination != nil {
in, out := &in.GracefulTermination, &out.GracefulTermination
*out = new(KubeProxyReplacementFeaturesGracefulTermination)
**out = **in
}
if in.HostPort != nil {
in, out := &in.HostPort, &out.HostPort
*out = new(KubeProxyReplacementFeaturesHostPort)
**out = **in
}
if in.HostReachableServices != nil {
in, out := &in.HostReachableServices, &out.HostReachableServices
*out = new(KubeProxyReplacementFeaturesHostReachableServices)
(*in).DeepCopyInto(*out)
}
if in.Nat46X64 != nil {
in, out := &in.Nat46X64, &out.Nat46X64
*out = new(KubeProxyReplacementFeaturesNat46X64)
(*in).DeepCopyInto(*out)
}
if in.NodePort != nil {
in, out := &in.NodePort, &out.NodePort
*out = new(KubeProxyReplacementFeaturesNodePort)
**out = **in
}
if in.SessionAffinity != nil {
in, out := &in.SessionAffinity, &out.SessionAffinity
*out = new(KubeProxyReplacementFeaturesSessionAffinity)
**out = **in
}
if in.SocketLB != nil {
in, out := &in.SocketLB, &out.SocketLB
*out = new(KubeProxyReplacementFeaturesSocketLB)
**out = **in
}
if in.SocketLBTracing != nil {
in, out := &in.SocketLBTracing, &out.SocketLBTracing
*out = new(KubeProxyReplacementFeaturesSocketLBTracing)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacementFeatures.
func (in *KubeProxyReplacementFeatures) DeepCopy() *KubeProxyReplacementFeatures {
if in == nil {
return nil
}
out := new(KubeProxyReplacementFeatures)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacementFeaturesExternalIPs) DeepCopyInto(out *KubeProxyReplacementFeaturesExternalIPs) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacementFeaturesExternalIPs.
func (in *KubeProxyReplacementFeaturesExternalIPs) DeepCopy() *KubeProxyReplacementFeaturesExternalIPs {
if in == nil {
return nil
}
out := new(KubeProxyReplacementFeaturesExternalIPs)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacementFeaturesGracefulTermination) DeepCopyInto(out *KubeProxyReplacementFeaturesGracefulTermination) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacementFeaturesGracefulTermination.
func (in *KubeProxyReplacementFeaturesGracefulTermination) DeepCopy() *KubeProxyReplacementFeaturesGracefulTermination {
if in == nil {
return nil
}
out := new(KubeProxyReplacementFeaturesGracefulTermination)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacementFeaturesHostPort) DeepCopyInto(out *KubeProxyReplacementFeaturesHostPort) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacementFeaturesHostPort.
func (in *KubeProxyReplacementFeaturesHostPort) DeepCopy() *KubeProxyReplacementFeaturesHostPort {
if in == nil {
return nil
}
out := new(KubeProxyReplacementFeaturesHostPort)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacementFeaturesHostReachableServices) DeepCopyInto(out *KubeProxyReplacementFeaturesHostReachableServices) {
*out = *in
if in.Protocols != nil {
in, out := &in.Protocols, &out.Protocols
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacementFeaturesHostReachableServices.
func (in *KubeProxyReplacementFeaturesHostReachableServices) DeepCopy() *KubeProxyReplacementFeaturesHostReachableServices {
if in == nil {
return nil
}
out := new(KubeProxyReplacementFeaturesHostReachableServices)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacementFeaturesNat46X64) DeepCopyInto(out *KubeProxyReplacementFeaturesNat46X64) {
*out = *in
if in.Gateway != nil {
in, out := &in.Gateway, &out.Gateway
*out = new(KubeProxyReplacementFeaturesNat46X64Gateway)
(*in).DeepCopyInto(*out)
}
if in.Service != nil {
in, out := &in.Service, &out.Service
*out = new(KubeProxyReplacementFeaturesNat46X64Service)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacementFeaturesNat46X64.
func (in *KubeProxyReplacementFeaturesNat46X64) DeepCopy() *KubeProxyReplacementFeaturesNat46X64 {
if in == nil {
return nil
}
out := new(KubeProxyReplacementFeaturesNat46X64)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacementFeaturesNat46X64Gateway) DeepCopyInto(out *KubeProxyReplacementFeaturesNat46X64Gateway) {
*out = *in
if in.Prefixes != nil {
in, out := &in.Prefixes, &out.Prefixes
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacementFeaturesNat46X64Gateway.
func (in *KubeProxyReplacementFeaturesNat46X64Gateway) DeepCopy() *KubeProxyReplacementFeaturesNat46X64Gateway {
if in == nil {
return nil
}
out := new(KubeProxyReplacementFeaturesNat46X64Gateway)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacementFeaturesNat46X64Service) DeepCopyInto(out *KubeProxyReplacementFeaturesNat46X64Service) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacementFeaturesNat46X64Service.
func (in *KubeProxyReplacementFeaturesNat46X64Service) DeepCopy() *KubeProxyReplacementFeaturesNat46X64Service {
if in == nil {
return nil
}
out := new(KubeProxyReplacementFeaturesNat46X64Service)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacementFeaturesNodePort) DeepCopyInto(out *KubeProxyReplacementFeaturesNodePort) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacementFeaturesNodePort.
func (in *KubeProxyReplacementFeaturesNodePort) DeepCopy() *KubeProxyReplacementFeaturesNodePort {
if in == nil {
return nil
}
out := new(KubeProxyReplacementFeaturesNodePort)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacementFeaturesSessionAffinity) DeepCopyInto(out *KubeProxyReplacementFeaturesSessionAffinity) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacementFeaturesSessionAffinity.
func (in *KubeProxyReplacementFeaturesSessionAffinity) DeepCopy() *KubeProxyReplacementFeaturesSessionAffinity {
if in == nil {
return nil
}
out := new(KubeProxyReplacementFeaturesSessionAffinity)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacementFeaturesSocketLB) DeepCopyInto(out *KubeProxyReplacementFeaturesSocketLB) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacementFeaturesSocketLB.
func (in *KubeProxyReplacementFeaturesSocketLB) DeepCopy() *KubeProxyReplacementFeaturesSocketLB {
if in == nil {
return nil
}
out := new(KubeProxyReplacementFeaturesSocketLB)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacementFeaturesSocketLBTracing) DeepCopyInto(out *KubeProxyReplacementFeaturesSocketLBTracing) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacementFeaturesSocketLBTracing.
func (in *KubeProxyReplacementFeaturesSocketLBTracing) DeepCopy() *KubeProxyReplacementFeaturesSocketLBTracing {
if in == nil {
return nil
}
out := new(KubeProxyReplacementFeaturesSocketLBTracing)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *L4Policy) DeepCopyInto(out *L4Policy) {
*out = *in
if in.Egress != nil {
in, out := &in.Egress, &out.Egress
*out = make([]*PolicyRule, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(PolicyRule)
(*in).DeepCopyInto(*out)
}
}
}
if in.Ingress != nil {
in, out := &in.Ingress, &out.Ingress
*out = make([]*PolicyRule, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(PolicyRule)
(*in).DeepCopyInto(*out)
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new L4Policy.
func (in *L4Policy) DeepCopy() *L4Policy {
if in == nil {
return nil
}
out := new(L4Policy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Masquerading) DeepCopyInto(out *Masquerading) {
*out = *in
if in.EnabledProtocols != nil {
in, out := &in.EnabledProtocols, &out.EnabledProtocols
*out = new(MasqueradingEnabledProtocols)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Masquerading.
func (in *Masquerading) DeepCopy() *Masquerading {
if in == nil {
return nil
}
out := new(Masquerading)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in NamedPorts) DeepCopyInto(out *NamedPorts) {
{
in := &in
*out = make(NamedPorts, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(Port)
**out = **in
}
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedPorts.
func (in NamedPorts) DeepCopy() NamedPorts {
if in == nil {
return nil
}
out := new(NamedPorts)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeAddressing) DeepCopyInto(out *NodeAddressing) {
*out = *in
if in.IPV4 != nil {
in, out := &in.IPV4, &out.IPV4
*out = new(NodeAddressingElement)
**out = **in
}
if in.IPV6 != nil {
in, out := &in.IPV6, &out.IPV6
*out = new(NodeAddressingElement)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddressing.
func (in *NodeAddressing) DeepCopy() *NodeAddressing {
if in == nil {
return nil
}
out := new(NodeAddressing)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeElement) DeepCopyInto(out *NodeElement) {
*out = *in
if in.HealthEndpointAddress != nil {
in, out := &in.HealthEndpointAddress, &out.HealthEndpointAddress
*out = new(NodeAddressing)
(*in).DeepCopyInto(*out)
}
if in.IngressAddress != nil {
in, out := &in.IngressAddress, &out.IngressAddress
*out = new(NodeAddressing)
(*in).DeepCopyInto(*out)
}
if in.PrimaryAddress != nil {
in, out := &in.PrimaryAddress, &out.PrimaryAddress
*out = new(NodeAddressing)
(*in).DeepCopyInto(*out)
}
if in.SecondaryAddresses != nil {
in, out := &in.SecondaryAddresses, &out.SecondaryAddresses
*out = make([]*NodeAddressingElement, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(NodeAddressingElement)
**out = **in
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeElement.
func (in *NodeElement) DeepCopy() *NodeElement {
if in == nil {
return nil
}
out := new(NodeElement)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PolicyRule) DeepCopyInto(out *PolicyRule) {
*out = *in
if in.DerivedFromRules != nil {
in, out := &in.DerivedFromRules, &out.DerivedFromRules
*out = make([][]string, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = make([]string, len(*in))
copy(*out, *in)
}
}
}
if in.RulesBySelector != nil {
in, out := &in.RulesBySelector, &out.RulesBySelector
*out = make(map[string][][]string, len(*in))
for key, val := range *in {
var outVal [][]string
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make([][]string, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = make([]string, len(*in))
copy(*out, *in)
}
}
}
(*out)[key] = outVal
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule.
func (in *PolicyRule) DeepCopy() *PolicyRule {
if in == nil {
return nil
}
out := new(PolicyRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProxyStatistics) DeepCopyInto(out *ProxyStatistics) {
*out = *in
if in.Statistics != nil {
in, out := &in.Statistics, &out.Statistics
*out = new(RequestResponseStatistics)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyStatistics.
func (in *ProxyStatistics) DeepCopy() *ProxyStatistics {
if in == nil {
return nil
}
out := new(ProxyStatistics)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProxyStatus) DeepCopyInto(out *ProxyStatus) {
*out = *in
if in.Redirects != nil {
in, out := &in.Redirects, &out.Redirects
*out = make([]*ProxyRedirect, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(ProxyRedirect)
**out = **in
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyStatus.
func (in *ProxyStatus) DeepCopy() *ProxyStatus {
if in == nil {
return nil
}
out := new(ProxyStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemoteCluster) DeepCopyInto(out *RemoteCluster) {
*out = *in
if in.Config != nil {
in, out := &in.Config, &out.Config
*out = new(RemoteClusterConfig)
**out = **in
}
in.LastFailure.DeepCopyInto(&out.LastFailure)
if in.Synced != nil {
in, out := &in.Synced, &out.Synced
*out = new(RemoteClusterSynced)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteCluster.
func (in *RemoteCluster) DeepCopy() *RemoteCluster {
if in == nil {
return nil
}
out := new(RemoteCluster)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemoteClusterConfig) DeepCopyInto(out *RemoteClusterConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteClusterConfig.
func (in *RemoteClusterConfig) DeepCopy() *RemoteClusterConfig {
if in == nil {
return nil
}
out := new(RemoteClusterConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemoteClusterSynced) DeepCopyInto(out *RemoteClusterSynced) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteClusterSynced.
func (in *RemoteClusterSynced) DeepCopy() *RemoteClusterSynced {
if in == nil {
return nil
}
out := new(RemoteClusterSynced)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RequestResponseStatistics) DeepCopyInto(out *RequestResponseStatistics) {
*out = *in
if in.Requests != nil {
in, out := &in.Requests, &out.Requests
*out = new(MessageForwardingStatistics)
**out = **in
}
if in.Responses != nil {
in, out := &in.Responses, &out.Responses
*out = new(MessageForwardingStatistics)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestResponseStatistics.
func (in *RequestResponseStatistics) DeepCopy() *RequestResponseStatistics {
if in == nil {
return nil
}
out := new(RequestResponseStatistics)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Routing) DeepCopyInto(out *Routing) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Routing.
func (in *Routing) DeepCopy() *Routing {
if in == nil {
return nil
}
out := new(Routing)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Srv6) DeepCopyInto(out *Srv6) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Srv6.
func (in *Srv6) DeepCopy() *Srv6 {
if in == nil {
return nil
}
out := new(Srv6)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatusResponse) DeepCopyInto(out *StatusResponse) {
*out = *in
if in.AuthCertificateProvider != nil {
in, out := &in.AuthCertificateProvider, &out.AuthCertificateProvider
*out = new(Status)
**out = **in
}
if in.BandwidthManager != nil {
in, out := &in.BandwidthManager, &out.BandwidthManager
*out = new(BandwidthManager)
(*in).DeepCopyInto(*out)
}
if in.BpfMaps != nil {
in, out := &in.BpfMaps, &out.BpfMaps
*out = new(BPFMapStatus)
(*in).DeepCopyInto(*out)
}
if in.Cilium != nil {
in, out := &in.Cilium, &out.Cilium
*out = new(Status)
**out = **in
}
if in.ClockSource != nil {
in, out := &in.ClockSource, &out.ClockSource
*out = new(ClockSource)
**out = **in
}
if in.Cluster != nil {
in, out := &in.Cluster, &out.Cluster
*out = new(ClusterStatus)
(*in).DeepCopyInto(*out)
}
if in.ClusterMesh != nil {
in, out := &in.ClusterMesh, &out.ClusterMesh
*out = new(ClusterMeshStatus)
(*in).DeepCopyInto(*out)
}
if in.CniChaining != nil {
in, out := &in.CniChaining, &out.CniChaining
*out = new(CNIChainingStatus)
**out = **in
}
if in.CniFile != nil {
in, out := &in.CniFile, &out.CniFile
*out = new(Status)
**out = **in
}
if in.ContainerRuntime != nil {
in, out := &in.ContainerRuntime, &out.ContainerRuntime
*out = new(Status)
**out = **in
}
if in.Controllers != nil {
in, out := &in.Controllers, &out.Controllers
*out = make(ControllerStatuses, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(ControllerStatus)
(*in).DeepCopyInto(*out)
}
}
}
if in.Encryption != nil {
in, out := &in.Encryption, &out.Encryption
*out = new(EncryptionStatus)
(*in).DeepCopyInto(*out)
}
if in.HostFirewall != nil {
in, out := &in.HostFirewall, &out.HostFirewall
*out = new(HostFirewall)
(*in).DeepCopyInto(*out)
}
if in.Hubble != nil {
in, out := &in.Hubble, &out.Hubble
*out = new(HubbleStatus)
(*in).DeepCopyInto(*out)
}
if in.IdentityRange != nil {
in, out := &in.IdentityRange, &out.IdentityRange
*out = new(IdentityRange)
**out = **in
}
if in.Ipam != nil {
in, out := &in.Ipam, &out.Ipam
*out = new(IPAMStatus)
(*in).DeepCopyInto(*out)
}
if in.IPV4BigTCP != nil {
in, out := &in.IPV4BigTCP, &out.IPV4BigTCP
*out = new(IPV4BigTCP)
**out = **in
}
if in.IPV6BigTCP != nil {
in, out := &in.IPV6BigTCP, &out.IPV6BigTCP
*out = new(IPV6BigTCP)
**out = **in
}
if in.KubeProxyReplacement != nil {
in, out := &in.KubeProxyReplacement, &out.KubeProxyReplacement
*out = new(KubeProxyReplacement)
(*in).DeepCopyInto(*out)
}
if in.Kubernetes != nil {
in, out := &in.Kubernetes, &out.Kubernetes
*out = new(K8sStatus)
(*in).DeepCopyInto(*out)
}
if in.Kvstore != nil {
in, out := &in.Kvstore, &out.Kvstore
*out = new(Status)
**out = **in
}
if in.Masquerading != nil {
in, out := &in.Masquerading, &out.Masquerading
*out = new(Masquerading)
(*in).DeepCopyInto(*out)
}
if in.NodeMonitor != nil {
in, out := &in.NodeMonitor, &out.NodeMonitor
*out = new(MonitorStatus)
**out = **in
}
if in.Proxy != nil {
in, out := &in.Proxy, &out.Proxy
*out = new(ProxyStatus)
(*in).DeepCopyInto(*out)
}
if in.Routing != nil {
in, out := &in.Routing, &out.Routing
*out = new(Routing)
**out = **in
}
if in.Srv6 != nil {
in, out := &in.Srv6, &out.Srv6
*out = new(Srv6)
**out = **in
}
if in.Stale != nil {
in, out := &in.Stale, &out.Stale
*out = make(map[string]strfmt.DateTime, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusResponse.
func (in *StatusResponse) DeepCopy() *StatusResponse {
if in == nil {
return nil
}
out := new(StatusResponse)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WireguardInterface) DeepCopyInto(out *WireguardInterface) {
*out = *in
if in.Peers != nil {
in, out := &in.Peers, &out.Peers
*out = make([]*WireguardPeer, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(WireguardPeer)
(*in).DeepCopyInto(*out)
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WireguardInterface.
func (in *WireguardInterface) DeepCopy() *WireguardInterface {
if in == nil {
return nil
}
out := new(WireguardInterface)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WireguardPeer) DeepCopyInto(out *WireguardPeer) {
*out = *in
if in.AllowedIps != nil {
in, out := &in.AllowedIps, &out.AllowedIps
*out = make([]string, len(*in))
copy(*out, *in)
}
in.LastHandshakeTime.DeepCopyInto(&out.LastHandshakeTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WireguardPeer.
func (in *WireguardPeer) DeepCopy() *WireguardPeer {
if in == nil {
return nil
}
out := new(WireguardPeer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WireguardStatus) DeepCopyInto(out *WireguardStatus) {
*out = *in
if in.Interfaces != nil {
in, out := &in.Interfaces, &out.Interfaces
*out = make([]*WireguardInterface, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(WireguardInterface)
(*in).DeepCopyInto(*out)
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WireguardStatus.
func (in *WireguardStatus) DeepCopy() *WireguardStatus {
if in == nil {
return nil
}
out := new(WireguardStatus)
in.DeepCopyInto(out)
return out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package models
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ControllerStatusConfiguration) DeepEqual(other *ControllerStatusConfiguration) bool {
if other == nil {
return false
}
if in.ErrorRetry != other.ErrorRetry {
return false
}
if in.ErrorRetryBase != other.ErrorRetryBase {
return false
}
if in.Interval != other.Interval {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointHealth) DeepEqual(other *EndpointHealth) bool {
if other == nil {
return false
}
if in.Bpf != other.Bpf {
return false
}
if in.Connected != other.Connected {
return false
}
if in.OverallHealth != other.OverallHealth {
return false
}
if in.Policy != other.Policy {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointIdentifiers) DeepEqual(other *EndpointIdentifiers) bool {
if other == nil {
return false
}
if in.CniAttachmentID != other.CniAttachmentID {
return false
}
if in.ContainerID != other.ContainerID {
return false
}
if in.ContainerName != other.ContainerName {
return false
}
if in.DockerEndpointID != other.DockerEndpointID {
return false
}
if in.DockerNetworkID != other.DockerNetworkID {
return false
}
if in.K8sNamespace != other.K8sNamespace {
return false
}
if in.K8sPodName != other.K8sPodName {
return false
}
if in.PodName != other.PodName {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointStatusChange) DeepEqual(other *EndpointStatusChange) bool {
if other == nil {
return false
}
if in.Code != other.Code {
return false
}
if in.Message != other.Message {
return false
}
if in.State != other.State {
return false
}
if in.Timestamp != other.Timestamp {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NamedPorts) DeepEqual(other *NamedPorts) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual((*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Port) DeepEqual(other *Port) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if in.Port != other.Port {
return false
}
if in.Protocol != other.Protocol {
return false
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Package config provides BGP configuration logic.
package config
import (
"fmt"
"io"
metallbcfg "go.universe.tf/metallb/pkg/config"
"github.com/cilium/cilium/pkg/safeio"
)
// Parse parses and validates the BGP configuration for use with MetalLB. It
// expects the string to be in YAML or JSON form.
func Parse(r io.Reader) (*metallbcfg.Config, error) {
buf, err := safeio.ReadAllLimit(r, safeio.MB)
if err != nil {
return nil, fmt.Errorf("failed to read MetalLB config: %w", err)
}
config, err := metallbcfg.Parse(buf)
if err != nil {
return nil, fmt.Errorf("failed to parse MetalLB config: %w", err)
}
return config, nil
}
// Copyright 2022 ADA Logics Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package config
import (
"bytes"
)
func FuzzConfigParse(data []byte) int {
_, _ = Parse(bytes.NewReader(data))
return 1
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package cidr
import (
"bytes"
"fmt"
"net"
)
// NewCIDR returns a new CIDR using a net.IPNet
func NewCIDR(ipnet *net.IPNet) *CIDR {
if ipnet == nil {
return nil
}
return &CIDR{ipnet}
}
// CIDR is a network CIDR representation based on net.IPNet
type CIDR struct {
*net.IPNet
}
// DeepEqual is an deepequal function, deeply comparing the receiver with other.
// in must be non-nil.
func (in *CIDR) DeepEqual(other *CIDR) bool {
if other == nil {
return false
}
if (in.IPNet == nil) != (other.IPNet == nil) {
return false
} else if in.IPNet != nil {
if !in.IPNet.IP.Equal(other.IPNet.IP) {
return false
}
inOnes, inBits := in.IPNet.Mask.Size()
otherOnes, otherBits := other.IPNet.Mask.Size()
return inOnes == otherOnes && inBits == otherBits
}
return true
}
// DeepCopy creates a deep copy of a CIDR
func (n *CIDR) DeepCopy() *CIDR {
if n == nil {
return nil
}
out := new(CIDR)
n.DeepCopyInto(out)
return out
}
// DeepCopyInto is a deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CIDR) DeepCopyInto(out *CIDR) {
*out = *in
if in.IPNet == nil {
return
}
out.IPNet = new(net.IPNet)
*out.IPNet = *in.IPNet
if in.IPNet.IP != nil {
in, out := &in.IPNet.IP, &out.IPNet.IP
*out = make(net.IP, len(*in))
copy(*out, *in)
}
if in.IPNet.Mask != nil {
in, out := &in.IPNet.Mask, &out.IPNet.Mask
*out = make(net.IPMask, len(*in))
copy(*out, *in)
}
}
// AvailableIPs returns the number of IPs available in a CIDR
func (n *CIDR) AvailableIPs() int {
ones, bits := n.Mask.Size()
return 1 << (bits - ones)
}
// Equal returns true if the receiver's CIDR equals the other CIDR.
func (n *CIDR) Equal(o *CIDR) bool {
if n == nil || o == nil {
return n == o
}
return Equal(n.IPNet, o.IPNet)
}
// Equal returns true if the n and o net.IPNet CIDRs are Equal.
func Equal(n, o *net.IPNet) bool {
if n == nil || o == nil {
return n == o
}
if n == o {
return true
}
return n.IP.Equal(o.IP) &&
bytes.Equal(n.Mask, o.Mask)
}
// ZeroNet generates a zero net.IPNet object for the given address family
func ZeroNet(family int) *net.IPNet {
switch family {
case FAMILY_V4:
return &net.IPNet{
IP: net.IPv4zero,
Mask: net.CIDRMask(0, 8*net.IPv4len),
}
case FAMILY_V6:
return &net.IPNet{
IP: net.IPv6zero,
Mask: net.CIDRMask(0, 8*net.IPv6len),
}
}
return nil
}
// ContainsAll returns true if 'ipNets1' contains all net.IPNet of 'ipNets2'
func ContainsAll(ipNets1, ipNets2 []*net.IPNet) bool {
for _, n := range ipNets2 {
if !Contains(ipNets1, n) {
return false
}
}
return true
}
// Contains returns true if 'ipNets' contains ipNet.
func Contains(ipNets []*net.IPNet, ipNet *net.IPNet) bool {
for _, n := range ipNets {
if Equal(n, ipNet) {
return true
}
}
return false
}
// RemoveAll removes all cidrs specified in 'toRemove' from 'ipNets'. ipNets
// is clobbered (to ensure removed CIDRs can be garbage collected) and
// must not be used after this function has been called.
// Example usage:
//
// cidrs = cidr.RemoveAll(cidrs, toRemove)
func RemoveAll(ipNets, toRemove []*net.IPNet) []*net.IPNet {
newIPNets := ipNets[:0]
for _, n := range ipNets {
if !Contains(toRemove, n) {
newIPNets = append(newIPNets, n)
}
}
for i := len(newIPNets); i < len(ipNets); i++ {
ipNets[i] = nil // or the zero value of T
}
return newIPNets
}
// ParseCIDR parses the CIDR string using net.ParseCIDR
func ParseCIDR(str string) (*CIDR, error) {
_, ipnet, err := net.ParseCIDR(str)
if err != nil {
return nil, err
}
return NewCIDR(ipnet), nil
}
// MustParseCIDR parses the CIDR string using net.ParseCIDR and panics if the
// CIDR cannot be parsed
func MustParseCIDR(str string) *CIDR {
c, err := ParseCIDR(str)
if err != nil {
panic(fmt.Sprintf("Unable to parse CIDR '%s': %s", str, err))
}
return c
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package cidr
func createIPNetMap(list []*CIDR) map[string]*CIDR {
m := map[string]*CIDR{}
for _, c := range list {
if c != nil {
m[c.String()] = c
}
}
return m
}
func listMissingIPNets(existing map[string]*CIDR, new []*CIDR) (missing []*CIDR) {
for _, c := range new {
if c != nil {
if _, ok := existing[c.String()]; !ok {
missing = append(missing, c)
}
}
}
return
}
// DiffCIDRLists compares an old and new list of CIDRs and returns the list of
// removed and added CIDRs
func DiffCIDRLists(old, new []*CIDR) (add, remove []*CIDR) {
add = listMissingIPNets(createIPNetMap(old), new)
remove = listMissingIPNets(createIPNetMap(new), old)
return
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"bytes"
"errors"
"fmt"
"net"
"net/netip"
"strconv"
"strings"
"go4.org/netipx"
"github.com/cilium/cilium/pkg/cidr"
ippkg "github.com/cilium/cilium/pkg/ip"
)
//
// In this file, we define types and utilities for cluster-aware
// addressing which identifies network endpoints using IP address
// and an optional ClusterID. With this special addressing scheme,
// we can distinguish network endpoints (e.g. Pods) that have the
// same IP address, but belong to the different cluster.
//
// A "bare" IP address is still a valid identifier because there
// are cases that endpoints can be identified without ClusterID
// (e.g. network endpoint has a unique IP address). We can consider
// this as a special case that ClusterID "doesn't matter". ClusterID
// 0 is reserved for indicating that.
//
// AddrCluster is a type that holds a pair of IP and ClusterID.
// We should use this type as much as possible when we implement
// IP + Cluster addressing. We should avoid managing IP and ClusterID
// separately. Otherwise, it is very hard for code readers to see
// where we are using cluster-aware addressing.
type AddrCluster struct {
addr netip.Addr
clusterID uint32
}
const AddrClusterLen = 20
var (
errUnmarshalBadAddress = errors.New("AddrCluster.UnmarshalJSON: bad address")
errMarshalInvalidAddress = errors.New("AddrCluster.MarshalJSON: invalid address")
jsonZeroAddress = []byte("\"\"")
)
// MarshalJSON marshals the address as a string in the form
// <addr>@<clusterID>, e.g. "1.2.3.4@1"
func (a *AddrCluster) MarshalJSON() ([]byte, error) {
if !a.addr.IsValid() {
if a.clusterID != 0 {
return nil, errMarshalInvalidAddress
}
// AddrCluster{} is the zero value. Preserve this across the
// marshalling by returning an empty string.
return jsonZeroAddress, nil
}
var b bytes.Buffer
b.WriteByte('"')
b.WriteString(a.String())
b.WriteByte('"')
return b.Bytes(), nil
}
func (a *AddrCluster) UnmarshalJSON(data []byte) error {
if bytes.Equal(data, jsonZeroAddress) {
return nil
}
if len(data) <= 2 || data[0] != '"' || data[len(data)-1] != '"' {
return errUnmarshalBadAddress
}
// Drop the parens
data = data[1 : len(data)-1]
a2, err := ParseAddrCluster(string(data))
if err != nil {
return err
}
a.addr = a2.addr
a.clusterID = a2.clusterID
return nil
}
// ParseAddrCluster parses s as an IP + ClusterID and returns AddrCluster.
// The string s can be a bare IP string (any IP address format allowed in
// netip.ParseAddr()) or IP string + @ + ClusterID with decimal. Bare IP
// string is considered as IP string + @ + ClusterID = 0.
func ParseAddrCluster(s string) (AddrCluster, error) {
atIndex := strings.LastIndex(s, "@")
var (
addrStr string
clusterIDStr string
)
if atIndex == -1 {
// s may be a bare IP address string, still valid
addrStr = s
clusterIDStr = ""
} else {
// s may be a IP + ClusterID string
addrStr = s[:atIndex]
clusterIDStr = s[atIndex+1:]
}
addr, err := netip.ParseAddr(addrStr)
if err != nil {
return AddrCluster{}, err
}
if clusterIDStr == "" {
if atIndex != len(s)-1 {
return AddrCluster{addr: addr, clusterID: 0}, nil
} else {
// handle the invalid case like "10.0.0.0@"
return AddrCluster{}, fmt.Errorf("empty cluster ID")
}
}
clusterID64, err := strconv.ParseUint(clusterIDStr, 10, 32)
if err != nil {
return AddrCluster{}, err
}
return AddrCluster{addr: addr, clusterID: uint32(clusterID64)}, nil
}
// MustParseAddrCluster calls ParseAddr(s) and panics on error. It is
// intended for use in tests with hard-coded strings.
func MustParseAddrCluster(s string) AddrCluster {
addrCluster, err := ParseAddrCluster(s)
if err != nil {
panic(err)
}
return addrCluster
}
// AddrClusterFromIP parses the given net.IP using ip.AddrFromIP and returns
// AddrCluster with ClusterID = 0.
func AddrClusterFromIP(ip net.IP) (AddrCluster, bool) {
addr, ok := ippkg.AddrFromIP(ip)
if !ok {
return AddrCluster{}, false
}
return AddrCluster{addr: addr, clusterID: 0}, true
}
func MustAddrClusterFromIP(ip net.IP) AddrCluster {
addr, ok := AddrClusterFromIP(ip)
if !ok {
panic("cannot convert net.IP to AddrCluster")
}
return addr
}
// AddrClusterFrom creates AddrCluster from netip.Addr and ClusterID
func AddrClusterFrom(addr netip.Addr, clusterID uint32) AddrCluster {
return AddrCluster{addr: addr, clusterID: clusterID}
}
// Addr returns IP address part of AddrCluster as netip.Addr. This function
// exists for keeping backward compatibility between the existing components
// which are not aware of the cluster-aware addressing. Calling this function
// against the AddrCluster which has non-zero clusterID will lose the ClusterID
// information. It should be used with an extra care.
func (ac AddrCluster) Addr() netip.Addr {
return ac.addr
}
// ClusterID returns ClusterID part of AddrCluster as uint32. We should avoid
// using this function as much as possible and treat IP address and ClusterID
// together.
func (ac AddrCluster) ClusterID() uint32 {
return ac.clusterID
}
// Equal returns true when given AddrCluster has a same IP address and ClusterID
func (ac0 AddrCluster) Equal(ac1 AddrCluster) bool {
return ac0.addr == ac1.addr && ac0.clusterID == ac1.clusterID
}
// Less compares ac0 and ac1 and returns true if ac0 is lesser than ac1
func (ac0 AddrCluster) Less(ac1 AddrCluster) bool {
// First, compare the IP address part
if ret := ac0.addr.Compare(ac1.addr); ret == -1 {
return true
} else if ret == 1 {
return false
} else {
// If IP address is the same, compare ClusterID
return ac0.clusterID < ac1.clusterID
}
}
// This is an alias of Equal which only exists for satisfying deepequal-gen
func (ac0 *AddrCluster) DeepEqual(ac1 *AddrCluster) bool {
return ac0.Equal(*ac1)
}
// DeepCopyInto copies in to out
func (in *AddrCluster) DeepCopyInto(out *AddrCluster) {
if out == nil {
return
}
out.addr = in.addr
out.clusterID = in.clusterID
}
// DeepCopy returns a new copy of AddrCluster
func (in *AddrCluster) DeepCopy() *AddrCluster {
out := new(AddrCluster)
in.DeepCopyInto(out)
return out
}
// String returns the string representation of the AddrCluster. If
// AddrCluster.clusterID = 0, it returns bare IP address string. Otherwise, it
// returns IP string + "@" + ClusterID (e.g. 10.0.0.1@1)
func (ac AddrCluster) String() string {
if ac.clusterID == 0 {
return ac.addr.String()
}
return ac.addr.String() + "@" + strconv.FormatUint(uint64(ac.clusterID), 10)
}
// Is4 reports whether IP address part of AddrCluster is an IPv4 address.
func (ac AddrCluster) Is4() bool {
return ac.addr.Is4()
}
// Is6 reports whether IP address part of AddrCluster is an IPv6 address.
func (ac AddrCluster) Is6() bool {
return ac.addr.Is6()
}
// IsUnspecified reports whether IP address part of the AddrCluster is an
// unspecified address, either the IPv4 address "0.0.0.0" or the IPv6
// address "::".
func (ac AddrCluster) IsUnspecified() bool {
return ac.addr.IsUnspecified()
}
// As20 returns the AddrCluster in its 20-byte representation which consists
// of 16-byte IP address part from netip.Addr.As16 and 4-byte ClusterID part.
func (ac AddrCluster) As20() (ac20 [20]byte) {
addr16 := ac.addr.As16()
copy(ac20[:16], addr16[:])
ac20[16] = byte(ac.clusterID >> 24)
ac20[17] = byte(ac.clusterID >> 16)
ac20[18] = byte(ac.clusterID >> 8)
ac20[19] = byte(ac.clusterID)
return ac20
}
// AsNetIP returns the IP address part of AddCluster as a net.IP type. This
// function exists for keeping backward compatibility between the existing
// components which are not aware of the cluster-aware addressing. Calling
// this function against the AddrCluster which has non-zero clusterID will
// lose the ClusterID information. It should be used with an extra care.
func (ac AddrCluster) AsNetIP() net.IP {
return ac.addr.AsSlice()
}
func (ac AddrCluster) AsPrefixCluster() PrefixCluster {
return PrefixClusterFrom(ac.addr, ac.addr.BitLen(), WithClusterID(ac.clusterID))
}
// PrefixCluster is a type that holds a pair of prefix and ClusterID.
// We should use this type as much as possible when we implement
// prefix + Cluster addressing. We should avoid managing prefix and
// ClusterID separately. Otherwise, it is very hard for code readers
// to see where we are using cluster-aware addressing.
type PrefixCluster struct {
prefix netip.Prefix
clusterID uint32
}
// ParsePrefixCluster parses s as an Prefix + ClusterID and returns PrefixCluster.
// The string s can be a bare IP prefix string (any prefix format allowed in
// netip.ParsePrefix()) or prefix string + @ + ClusterID with decimal. Bare prefix
// string is considered as prefix string + @ + ClusterID = 0.
func ParsePrefixCluster(s string) (PrefixCluster, error) {
atIndex := strings.LastIndex(s, "@")
var (
prefixStr string
clusterIDStr string
)
if atIndex == -1 {
// s may be a bare IP prefix string, still valid
prefixStr = s
clusterIDStr = ""
} else {
// s may be a prefix + ClusterID string
prefixStr = s[:atIndex]
clusterIDStr = s[atIndex+1:]
}
prefix, err := netip.ParsePrefix(prefixStr)
if err != nil {
return PrefixCluster{}, err
}
if clusterIDStr == "" {
if atIndex != len(s)-1 {
return PrefixCluster{prefix: prefix, clusterID: 0}, nil
} else {
// handle the invalid case like "10.0.0.0/24@"
return PrefixCluster{}, fmt.Errorf("empty cluster ID")
}
}
clusterID64, err := strconv.ParseUint(clusterIDStr, 10, 32)
if err != nil {
return PrefixCluster{}, err
}
return PrefixCluster{prefix: prefix, clusterID: uint32(clusterID64)}, nil
}
// MustParsePrefixCluster calls ParsePrefixCluster(s) and panics on error.
// It is intended for use in tests with hard-coded strings.
func MustParsePrefixCluster(s string) PrefixCluster {
prefixCluster, err := ParsePrefixCluster(s)
if err != nil {
panic(err)
}
return prefixCluster
}
func (pc PrefixCluster) IsSingleIP() bool {
return pc.prefix.IsSingleIP()
}
type PrefixClusterOpts func(*PrefixCluster)
func WithClusterID(id uint32) PrefixClusterOpts {
return func(pc *PrefixCluster) { pc.clusterID = id }
}
func PrefixClusterFrom(addr netip.Addr, bits int, opts ...PrefixClusterOpts) PrefixCluster {
pc := PrefixCluster{prefix: netip.PrefixFrom(addr, bits)}
for _, opt := range opts {
opt(&pc)
}
return pc
}
func PrefixClusterFromCIDR(c *cidr.CIDR, opts ...PrefixClusterOpts) PrefixCluster {
if c == nil {
return PrefixCluster{}
}
addr, ok := ippkg.AddrFromIP(c.IP)
if !ok {
return PrefixCluster{}
}
ones, _ := c.Mask.Size()
return PrefixClusterFrom(addr, ones, opts...)
}
func (pc0 PrefixCluster) Equal(pc1 PrefixCluster) bool {
return pc0.prefix == pc1.prefix && pc0.clusterID == pc1.clusterID
}
func (pc PrefixCluster) IsValid() bool {
return pc.prefix.IsValid()
}
func (pc PrefixCluster) AddrCluster() AddrCluster {
return AddrClusterFrom(pc.prefix.Addr(), pc.clusterID)
}
func (pc PrefixCluster) ClusterID() uint32 {
return pc.clusterID
}
func (pc PrefixCluster) String() string {
if pc.clusterID == 0 {
return pc.prefix.String()
}
return pc.prefix.String() + "@" + strconv.FormatUint(uint64(pc.clusterID), 10)
}
// AsPrefix returns the IP prefix part of PrefixCluster as a netip.Prefix type.
// This function exists for keeping backward compatibility between the existing
// components which are not aware of the cluster-aware addressing. Calling
// this function against the PrefixCluster which has non-zero clusterID will
// lose the ClusterID information. It should be used with an extra care.
func (pc PrefixCluster) AsPrefix() netip.Prefix {
return netip.PrefixFrom(pc.prefix.Addr(), pc.prefix.Bits())
}
// AsIPNet returns the IP prefix part of PrefixCluster as a net.IPNet type. This
// function exists for keeping backward compatibility between the existing
// components which are not aware of the cluster-aware addressing. Calling
// this function against the PrefixCluster which has non-zero clusterID will
// lose the ClusterID information. It should be used with an extra care.
func (pc PrefixCluster) AsIPNet() net.IPNet {
return *netipx.PrefixIPNet(pc.AsPrefix())
}
// This function is solely exists for annotating IPCache's key string with ClusterID.
// IPCache's key string is IP address or Prefix string (10.0.0.1 and 10.0.0.0/32 are
// different entry). This function assumes given string is one of those format and
// just put @<ClusterID> suffix and there's no format check for performance reason.
// User must make sure the input is a valid IP or Prefix string.
//
// We should eventually remove this function once we finish refactoring IPCache and
// stop using string as a key. At that point, we should consider using PrefixCluster
// type for IPCache's key.
func AnnotateIPCacheKeyWithClusterID(key string, clusterID uint32) string {
return key + "@" + strconv.FormatUint(uint64(clusterID), 10)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"fmt"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"github.com/cilium/cilium/pkg/defaults"
"github.com/cilium/cilium/pkg/logging/logfields"
)
const (
// OptClusterName is the name of the OptClusterName option
OptClusterName = "cluster-name"
// OptClusterID is the name of the OptClusterID option
OptClusterID = "cluster-id"
// OptMaxConnectedClusters is the name of the OptMaxConnectedClusters option
OptMaxConnectedClusters = "max-connected-clusters"
)
// ClusterInfo groups together the ClusterID and the ClusterName
type ClusterInfo struct {
ID uint32 `mapstructure:"cluster-id"`
Name string `mapstructure:"cluster-name"`
MaxConnectedClusters uint32 `mapstructure:"max-connected-clusters"`
}
// DefaultClusterInfo represents the default ClusterInfo values.
var DefaultClusterInfo = ClusterInfo{
ID: 0,
Name: defaults.ClusterName,
MaxConnectedClusters: defaults.MaxConnectedClusters,
}
// Flags implements the cell.Flagger interface, to register the given flags.
func (def ClusterInfo) Flags(flags *pflag.FlagSet) {
flags.Uint32(OptClusterID, def.ID, "Unique identifier of the cluster")
flags.String(OptClusterName, def.Name, "Name of the cluster. It must consist of at most 32 lower case alphanumeric characters and '-', start and end with an alphanumeric character.")
flags.Uint32(OptMaxConnectedClusters, def.MaxConnectedClusters, "Maximum number of clusters to be connected in a clustermesh. Increasing this value will reduce the maximum number of identities available. Valid configurations are [255, 511].")
}
// Validate validates that the ClusterID is in the valid range (including ClusterID == 0),
// and that the ClusterName is different from the default value if the ClusterID != 0.
func (c ClusterInfo) Validate(log logrus.FieldLogger) error {
if c.ID < ClusterIDMin || c.ID > ClusterIDMax {
return fmt.Errorf("invalid cluster id %d: must be in range %d..%d",
c.ID, ClusterIDMin, ClusterIDMax)
}
return c.validateName(log)
}
// ValidateStrict validates that the ClusterID is in the valid range, but not 0,
// and that the ClusterName is different from the default value.
func (c ClusterInfo) ValidateStrict(log logrus.FieldLogger) error {
if err := ValidateClusterID(c.ID); err != nil {
return err
}
return c.validateName(log)
}
func (c ClusterInfo) validateName(log logrus.FieldLogger) error {
if err := ValidateClusterName(c.Name); err != nil {
log.WithField(logfields.ClusterName, c.Name).WithError(err).
Error("Invalid cluster name. This may cause degraded functionality, and will be strictly forbidden starting from Cilium v1.17")
}
if c.ID != 0 && c.Name == defaults.ClusterName {
return fmt.Errorf("cannot use default cluster name (%s) with option %s",
defaults.ClusterName, OptClusterID)
}
return nil
}
// ExtendedClusterMeshEnabled returns true if MaxConnectedClusters value has
// been set to a value larger than the default 255.
func (c ClusterInfo) ExtendedClusterMeshEnabled() bool {
return c.MaxConnectedClusters != defaults.MaxConnectedClusters
}
// ValidateRemoteConfig validates the remote CiliumClusterConfig to ensure
// compatibility with this cluster's configuration.
func (c ClusterInfo) ValidateRemoteConfig(config CiliumClusterConfig) error {
if err := ValidateClusterID(config.ID); err != nil {
return err
}
if c.ExtendedClusterMeshEnabled() && (c.MaxConnectedClusters != config.Capabilities.MaxConnectedClusters) {
return fmt.Errorf("mismatched MaxConnectedClusters; local=%d, remote=%d", c.MaxConnectedClusters, config.Capabilities.MaxConnectedClusters)
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"errors"
"fmt"
"regexp"
"github.com/cilium/cilium/pkg/defaults"
)
const (
// ClusterIDMin is the minimum value of the cluster ID
ClusterIDMin = 0
ClusterIDExt511 = 511
ClusterIDUnset = ClusterIDMin
)
// ClusterIDMax is the maximum value of the cluster ID
var ClusterIDMax uint32 = defaults.MaxConnectedClusters
// A cluster name must respect the following constraints:
// * It must contain at most 32 characters;
// * It must begin and end with a lower case alphanumeric character;
// * It may contain lower case alphanumeric characters and dashes between.
const (
// clusterNameMaxLength is the maximum allowed length of a cluster name.
clusterNameMaxLength = 32
// clusterNameRegexStr is the regex to validate a cluster name.
clusterNameRegexStr = `^([a-z0-9][-a-z0-9]*)?[a-z0-9]$`
)
var clusterNameRegex = regexp.MustCompile(clusterNameRegexStr)
// InitClusterIDMax validates and sets the ClusterIDMax package level variable.
func (c ClusterInfo) InitClusterIDMax() error {
switch c.MaxConnectedClusters {
case defaults.MaxConnectedClusters, ClusterIDExt511:
ClusterIDMax = c.MaxConnectedClusters
default:
return fmt.Errorf("--%s=%d is invalid; supported values are [%d, %d]", OptMaxConnectedClusters, c.MaxConnectedClusters, defaults.MaxConnectedClusters, ClusterIDExt511)
}
return nil
}
// ValidateClusterID ensures that the given clusterID is within the configured
// range of the ClusterMesh.
func ValidateClusterID(clusterID uint32) error {
if clusterID == ClusterIDMin {
return fmt.Errorf("ClusterID %d is reserved", ClusterIDMin)
}
if clusterID > ClusterIDMax {
return fmt.Errorf("ClusterID > %d is not supported", ClusterIDMax)
}
return nil
}
// ValidateClusterName validates that the given name matches the cluster name specifications.
func ValidateClusterName(name string) error {
if name == "" {
return errors.New("must not be empty")
}
if len(name) > clusterNameMaxLength {
return fmt.Errorf("must not be more than %d characters", clusterNameMaxLength)
}
if !clusterNameRegex.MatchString(name) {
return errors.New("must consist of lower case alphanumeric characters and '-', and must start and end with an alphanumeric character")
}
return nil
}
type CiliumClusterConfig struct {
ID uint32 `json:"id,omitempty"`
Capabilities CiliumClusterConfigCapabilities `json:"capabilities,omitempty"`
}
type CiliumClusterConfigCapabilities struct {
// Supports per-prefix "synced" canaries
SyncedCanaries bool `json:"syncedCanaries,omitempty"`
// The information concerning the given cluster is cached from an external
// kvstore (for instance, by kvstoremesh). This implies that keys are stored
// under the dedicated "cilium/cache" prefix, and all are cluster-scoped.
Cached bool `json:"cached,omitempty"`
// The maximum number of clusters the given cluster can support in a ClusterMesh.
MaxConnectedClusters uint32 `json:"maxConnectedClusters,omitempty"`
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package command
import (
"encoding/json"
"errors"
"fmt"
"regexp"
"strings"
"unicode"
"github.com/spf13/cast"
"github.com/spf13/viper"
)
const (
comma = ','
equal = '='
)
var keyValueRegex = regexp.MustCompile(`([\w-:;./@]+=([\w-:;,./@][\w-:;,./@ ]*[\w-:;,./@])?[\w-:;,./@]*,)*([\w-:;./@]+=([\w-:;,./@][\w-:;,./@ ]*)?[\w-:;./@]+)$`)
// GetStringMapString contains one enhancement to support k1=v2,k2=v2 compared to original
// implementation of GetStringMapString function
// Related upstream issue https://github.com/spf13/viper/issues/911
func GetStringMapString(vp *viper.Viper, key string) map[string]string {
v, _ := GetStringMapStringE(vp, key)
return v
}
// GetStringMapStringE is same as GetStringMapString, but with error
func GetStringMapStringE(vp *viper.Viper, key string) (map[string]string, error) {
return ToStringMapStringE(vp.Get(key))
}
// ToStringMapStringE casts an interface to a map[string]string type. The underlying
// interface type might be a map or string. In the latter case, it is attempted to be
// json decoded, falling back to the k1=v2,k2=v2 format in case it doesn't look like json.
func ToStringMapStringE(data interface{}) (map[string]string, error) {
if data == nil {
return map[string]string{}, nil
}
v, err := cast.ToStringMapStringE(data)
if err != nil {
var syntaxErr *json.SyntaxError
if !errors.As(err, &syntaxErr) {
return v, err
}
switch s := data.(type) {
case string:
if len(s) == 0 {
return map[string]string{}, nil
}
// if the input is starting with either '{' or '[', just preserve original json parsing error.
firstIndex := strings.IndexFunc(s, func(r rune) bool {
return !unicode.IsSpace(r)
})
if firstIndex != -1 && (s[firstIndex] == '{' || s[firstIndex] == '[') {
return v, err
}
if !isValidKeyValuePair(s) {
return map[string]string{}, fmt.Errorf("'%s' is not formatted as key=value,key1=value1", s)
}
var v = map[string]string{}
kvs := splitKeyValue(s, comma, equal)
for _, kv := range kvs {
temp := strings.Split(kv, string(equal))
if len(temp) != 2 {
return map[string]string{}, fmt.Errorf("'%s' in '%s' is not formatted as key=value,key1=value1", kv, s)
}
v[temp[0]] = temp[1]
}
return v, nil
}
}
return v, nil
}
// isValidKeyValuePair returns true if the input is following key1=value1,key2=value2,...,keyN=valueN format.
func isValidKeyValuePair(str string) bool {
if len(str) == 0 {
return true
}
return len(keyValueRegex.ReplaceAllString(str, "")) == 0
}
// splitKeyValue is similar to strings.Split, but looks ahead to make sure
// that sep character is allowed in value component of key-value pair.
//
// Example: with the input "c6a.2xlarge=4,15,15,m4.xlarge=2,4,8",
// - strings.Split function will return []string{"c6a.2xlarge=4", "15", "15", "m4.xlarge=2", "4", "8"}.
// - splitKeyValue function will return []string{"c6a.2xlarge=4,15,15", "m4.xlarge=2,4,8"} instead.
func splitKeyValue(str string, sep rune, keyValueSep rune) []string {
var sepIndexes, kvValueSepIndexes []int
// find all indexes of separator character
for i := 0; i < len(str); i++ {
switch int32(str[i]) {
case sep:
sepIndexes = append(sepIndexes, i)
case keyValueSep:
kvValueSepIndexes = append(kvValueSepIndexes, i)
}
}
// there's only a single key-value if there are no separators ("key=value")
// or a single key-value separator ("key=option1:value1,option2:value2")
if len(sepIndexes) == 0 || len(kvValueSepIndexes) == 1 {
return []string{str}
}
if len(sepIndexes) == 1 {
index := sepIndexes[0]
return []string{str[:index], str[index+1:]}
}
var res []string
var start = 0
for i := 0; i < len(sepIndexes); i++ {
last := len(str)
if i < len(sepIndexes)-1 {
last = sepIndexes[i+1]
}
if strings.ContainsRune(str[sepIndexes[i]:last], keyValueSep) {
res = append(res, str[start:sepIndexes[i]])
start = sepIndexes[i] + 1
}
}
// append the remaining for last sep index
res = append(res, str[start:])
return res
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package command
import (
"encoding/json"
"fmt"
"os"
"regexp"
"strings"
"github.com/spf13/cobra"
"gopkg.in/yaml.v3"
"k8s.io/client-go/util/jsonpath"
)
var (
outputOpt string
re = regexp.MustCompile(`^jsonpath\=(.*)`)
)
// OutputOption returns true if an output option was specified.
func OutputOption() bool {
return len(outputOpt) > 0
}
// OutputOptionString returns the output option as a string
func OutputOptionString() string {
if outputOpt == "yaml" {
return "YAML"
}
if outputOpt == "json" || re.MatchString(outputOpt) {
return "JSON"
}
return "unknown"
}
// AddOutputOption adds the -o|--output option to any cmd to export to json or yaml.
func AddOutputOption(cmd *cobra.Command) {
cmd.Flags().StringVarP(&outputOpt, "output", "o", "", "json| yaml| jsonpath='{}'")
}
// ForceJSON sets output mode to JSON (for unit tests)
func ForceJSON() {
outputOpt = "json"
}
// PrintOutput receives an interface and dump the data using the --output flag.
// ATM only json or jsonpath. In the future yaml
func PrintOutput(data interface{}) error {
return PrintOutputWithType(data, outputOpt)
}
// PrintOutputWithPatch merges data with patch and dump the data using the --output flag.
func PrintOutputWithPatch(data interface{}, patch interface{}) error {
mergedInterface, err := mergeInterfaces(data, patch)
if err != nil {
return fmt.Errorf("Unable to merge Interfaces: %w", err)
}
return PrintOutputWithType(mergedInterface, outputOpt)
}
func mergeInterfaces(data, patch interface{}) (interface{}, error) {
var i1, i2 interface{}
data1, err := json.Marshal(data)
if err != nil {
return nil, err
}
data2, err := json.Marshal(patch)
if err != nil {
return nil, err
}
err = json.Unmarshal(data1, &i1)
if err != nil {
return nil, err
}
err = json.Unmarshal(data2, &i2)
if err != nil {
return nil, err
}
return recursiveMerge(i1, i2), nil
}
func recursiveMerge(i1, i2 interface{}) interface{} {
switch i1 := i1.(type) {
case map[string]interface{}:
i2, ok := i2.(map[string]interface{})
if !ok {
return i1
}
for k, v2 := range i2 {
if v1, ok := i1[k]; ok {
i1[k] = recursiveMerge(v1, v2)
} else {
i1[k] = v2
}
}
case nil:
i2, ok := i2.(map[string]interface{})
if ok {
return i2
}
}
return i1
}
// PrintOutputWithType receives an interface and dump the data using the --output flag.
// ATM only json, yaml, or jsonpath.
func PrintOutputWithType(data interface{}, outputType string) error {
if outputType == "json" {
return dumpJSON(data, "")
}
if outputType == "yaml" {
return dumpYAML(data)
}
if re.MatchString(outputType) {
return dumpJSON(data, re.ReplaceAllString(outputType, "$1"))
}
return fmt.Errorf("couldn't find output printer")
}
// DumpJSONToString dumps the contents of data into a string. If jsonpath is
// non-empty, will attempt to do jsonpath filtering using said string. Returns a
// string containing the JSON in data, or an error if any JSON marshaling,
// parsing operations fail.
func DumpJSONToString(data interface{}, jsonPath string) (string, error) {
if len(jsonPath) == 0 {
result, err := json.MarshalIndent(data, "", " ")
if err != nil {
fmt.Fprintf(os.Stderr, "Couldn't marshal to json: '%s'\n", err)
return "", err
}
fmt.Println(string(result))
return "", nil
}
parser := jsonpath.New("").AllowMissingKeys(true)
if err := parser.Parse(jsonPath); err != nil {
fmt.Fprintf(os.Stderr, "Couldn't parse jsonpath expression: '%s'\n", err)
return "", err
}
var sb strings.Builder
if err := parser.Execute(&sb, data); err != nil {
fmt.Fprintf(os.Stderr, "Couldn't parse jsonpath expression: '%s'\n", err)
return "", err
}
return sb.String(), nil
}
// dumpJSON dumps the data variable to the stdout as json.
// If something fails, it returns an error
// If jsonPath is passed, it runs the json query over data var.
func dumpJSON(data interface{}, jsonPath string) error {
jsonStr, err := DumpJSONToString(data, jsonPath)
if err != nil {
return err
}
fmt.Println(jsonStr)
return nil
}
// dumpYAML dumps the data variable to the stdout as yaml.
// If something fails, it returns an error
func dumpYAML(data interface{}) error {
result, err := yaml.Marshal(data)
if err != nil {
fmt.Fprintf(os.Stderr, "Couldn't marshal to yaml: '%s'\n", err)
return err
}
fmt.Println(string(result))
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Based on code from github.com/miekg/dns which is:
//
// Copyright 2009 The Go Authors. All rights reserved.
// Copyright 2011 Miek Gieben. All rights reserved.
// Copyright 2014 CloudFlare. All rights reserved.
package dns
import "strings"
// These functions were copied and adapted from github.com/miekg/dns.
// isFQDN reports whether the domain name s is fully qualified.
func isFQDN(s string) bool {
s2 := strings.TrimSuffix(s, ".")
if s == s2 {
return false
}
i := strings.LastIndexFunc(s2, func(r rune) bool {
return r != '\\'
})
// Test whether we have an even number of escape sequences before
// the dot or none.
return (len(s2)-i)%2 != 0
}
// FQDN returns the fully qualified domain name from s.
// If s is already fully qualified, it behaves as the identity function.
func FQDN(s string) string {
if isFQDN(s) {
return strings.ToLower(s)
}
return strings.ToLower(s) + "."
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package matchpattern
import (
"errors"
"regexp"
"strings"
"github.com/cilium/cilium/pkg/fqdn/dns"
"github.com/cilium/cilium/pkg/fqdn/re"
)
const allowedDNSCharsREGroup = "[-a-zA-Z0-9_]"
// MatchAllAnchoredPattern is the simplest pattern that match all inputs. This resulting
// parsed regular expression is the same as an empty string regex (""), but this
// value is easier to reason about when serializing to and from json.
const MatchAllAnchoredPattern = "(?:)"
// MatchAllUnAnchoredPattern is the same as MatchAllAnchoredPattern, except that
// it can be or-ed (joined with "|") with other rules, and still match all rules.
const MatchAllUnAnchoredPattern = ".*"
// Validate ensures that pattern is a parseable matchPattern. It returns the
// regexp generated when validating.
func Validate(pattern string) (matcher *regexp.Regexp, err error) {
if err := prevalidate(pattern); err != nil {
return nil, err
}
return re.CompileRegex(ToAnchoredRegexp(pattern))
}
// ValidateWithoutCache is the same as Validate() but doesn't consult the regex
// LRU.
func ValidateWithoutCache(pattern string) (matcher *regexp.Regexp, err error) {
if err := prevalidate(pattern); err != nil {
return nil, err
}
return regexp.Compile(ToAnchoredRegexp(pattern))
}
func prevalidate(pattern string) error {
pattern = strings.TrimSpace(pattern)
pattern = strings.ToLower(pattern)
// error check
if strings.ContainsAny(pattern, "[]+{},") {
return errors.New(`Only alphanumeric ASCII characters, the hyphen "-", underscore "_", "." and "*" are allowed in a matchPattern`)
}
return nil
}
// Sanitize canonicalized the pattern for use by ToAnchoredRegexp
func Sanitize(pattern string) string {
if pattern == "*" {
return pattern
}
return dns.FQDN(pattern)
}
// ToAnchoredRegexp converts a MatchPattern field into a regexp string. It does not
// validate the pattern. It also adds anchors to ensure it match the whole string.
// It supports:
// * to select 0 or more DNS valid characters
func ToAnchoredRegexp(pattern string) string {
pattern = strings.TrimSpace(pattern)
pattern = strings.ToLower(pattern)
// handle the * match-all case. This will filter down to the end.
if pattern == "*" {
return "(^(" + allowedDNSCharsREGroup + "+[.])+$)|(^[.]$)"
}
pattern = escapeRegexpCharacters(pattern)
// Anchor the match to require the whole string to match this expression
return "^" + pattern + "$"
}
// ToUnAnchoredRegexp converts a MatchPattern field into a regexp string. It does not
// validate the pattern. It does not add regexp anchors.
// It supports:
// * to select 0 or more DNS valid characters
func ToUnAnchoredRegexp(pattern string) string {
pattern = strings.TrimSpace(pattern)
pattern = strings.ToLower(pattern)
// handle the * match-all case. This will filter down to the end.
if pattern == "*" {
return MatchAllUnAnchoredPattern
}
pattern = escapeRegexpCharacters(pattern)
return pattern
}
func escapeRegexpCharacters(pattern string) string {
// base case. "." becomes a literal .
pattern = strings.Replace(pattern, ".", "[.]", -1)
// base case. * becomes .*, but only for DNS valid characters
// NOTE: this only works because the case above does not leave the *
pattern = strings.Replace(pattern, "*", allowedDNSCharsREGroup+"*", -1)
return pattern
}
// Copyright 2022 ADA Logics Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package matchpattern
func FuzzMatchpatternValidate(data []byte) int {
_, _ = Validate(string(data))
return 1
}
func FuzzMatchpatternValidateWithoutCache(data []byte) int {
_, _ = ValidateWithoutCache(string(data))
return 1
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Package re provides a simple function to access compile regex objects for
// the FQDN subsystem.
package re
import (
"errors"
"fmt"
"regexp"
"sync/atomic"
lru "github.com/golang/groupcache/lru"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/option"
)
var (
log = logging.DefaultLogger.WithField(logfields.LogSubsys, "fqdn/re")
)
// CompileRegex compiles a pattern p into a regex and returns the regex object.
// The regex object will be cached by an LRU. If p has already been compiled
// and cached, this function will return the cached regex object. If not
// already cached, it will compile p into a regex object and cache it in the
// LRU. This function will return an error if the LRU has not already been
// initialized.
func CompileRegex(p string) (*regexp.Regexp, error) {
lru := regexCompileLRU.Load()
if lru == nil {
return nil, errors.New("FQDN regex compilation LRU not yet initialized")
}
lru.Lock()
r, ok := lru.Get(p)
lru.Unlock()
if ok {
return r.(*regexp.Regexp), nil
}
n, err := regexp.Compile(p)
if err != nil {
return nil, fmt.Errorf("failed to compile regex: %w", err)
}
lru.Lock()
lru.Add(p, n)
lru.Unlock()
return n, nil
}
// InitRegexCompileLRU creates a new instance of the regex compilation LRU.
func InitRegexCompileLRU(size int) error {
if size < 0 {
return fmt.Errorf("failed to initialize FQDN regex compilation LRU due to invalid size %d", size)
} else if size == 0 {
log.Warnf(
"FQDN regex compilation LRU size is unlimited, which can grow unbounded potentially consuming too much memory. Consider passing a maximum size via --%s.",
option.FQDNRegexCompileLRUSize)
}
regexCompileLRU.Store(&RegexCompileLRU{
Mutex: &lock.Mutex{},
Cache: lru.New(size),
})
return nil
}
// regexCompileLRU is the singleton instance of the LRU that's shared
// throughout Cilium.
var regexCompileLRU atomic.Pointer[RegexCompileLRU]
// RegexCompileLRU is an LRU cache for storing compiled regex objects of FQDN
// names or patterns, used in CiliumNetworkPolicy or
// ClusterwideCiliumNetworkPolicy.
type RegexCompileLRU struct {
// The lru package doesn't provide any concurrency guarantees so we must
// provide our own locking.
*lock.Mutex
*lru.Cache
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package ip
import (
"net"
"net/netip"
)
// ParseCIDRs fetches all CIDRs referred to by the specified slice and returns
// them as regular golang CIDR objects.
//
// Deprecated. Consider using ParsePrefixes() instead.
func ParseCIDRs(cidrs []string) (valid []*net.IPNet, invalid []string) {
valid = make([]*net.IPNet, 0, len(cidrs))
invalid = make([]string, 0, len(cidrs))
for _, cidr := range cidrs {
_, prefix, err := net.ParseCIDR(cidr)
if err != nil {
// Likely the CIDR is specified in host format.
ip := net.ParseIP(cidr)
if ip == nil {
invalid = append(invalid, cidr)
continue
} else {
prefix = IPToPrefix(ip)
}
}
if prefix != nil {
valid = append(valid, prefix)
}
}
return valid, invalid
}
// ParsePrefixes parses all CIDRs referred to by the specified slice and
// returns them as regular golang netip.Prefix objects.
func ParsePrefixes(cidrs []string) (valid []netip.Prefix, invalid []string, errors []error) {
valid = make([]netip.Prefix, 0, len(cidrs))
invalid = make([]string, 0, len(cidrs))
errors = make([]error, 0, len(cidrs))
for _, cidr := range cidrs {
prefix, err := netip.ParsePrefix(cidr)
if err != nil {
ip, err2 := netip.ParseAddr(cidr)
if err2 != nil {
invalid = append(invalid, cidr)
errors = append(errors, err2)
continue
}
prefix = netip.PrefixFrom(ip, ip.BitLen())
}
valid = append(valid, prefix.Masked())
}
return valid, invalid, errors
}
// AddrToIPNet is a convenience helper to convert a netip.Addr to a *net.IPNet
// with a mask corresponding to the addresses's bit length.
func AddrToIPNet(addr netip.Addr) *net.IPNet {
if !addr.IsValid() {
return nil
}
return &net.IPNet{
IP: addr.AsSlice(),
Mask: net.CIDRMask(addr.BitLen(), addr.BitLen()),
}
}
// IPToNetPrefix is a convenience helper for migrating from the older 'net'
// standard library types to the newer 'netip' types. Use this to plug the new
// types in newer code into older types in older code during the migration.
//
// Note: This function assumes given ip is not an IPv4 mapped IPv6 address.
// See the comment of AddrFromIP for more details.
func IPToNetPrefix(ip net.IP) netip.Prefix {
a, ok := AddrFromIP(ip)
if !ok {
return netip.Prefix{}
}
return netip.PrefixFrom(a, a.BitLen())
}
// IPsToNetPrefixes returns all of the ips as a slice of netip.Prefix.
//
// See IPToNetPrefix() for how net.IP types are handled by this function.
func IPsToNetPrefixes(ips []net.IP) []netip.Prefix {
if len(ips) == 0 {
return nil
}
res := make([]netip.Prefix, 0, len(ips))
for _, ip := range ips {
res = append(res, IPToNetPrefix(ip))
}
return res
}
// NetsContainsAny checks that any subnet in the `a` subnet group *fully*
// contains any of the subnets in the `b` subnet group.
func NetsContainsAny(a, b []*net.IPNet) bool {
for _, an := range a {
aMask, _ := an.Mask.Size()
aIsIPv4 := an.IP.To4() != nil
for _, bn := range b {
bIsIPv4 := bn.IP.To4() != nil
isSameFamily := aIsIPv4 == bIsIPv4
if isSameFamily {
bMask, _ := bn.Mask.Size()
if bMask >= aMask && an.Contains(bn.IP) {
return true
}
}
}
}
return false
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package ip
import (
"bytes"
"encoding/binary"
"math/big"
"net"
"net/netip"
"sort"
"github.com/cilium/cilium/pkg/slices"
)
const (
ipv4BitLen = 8 * net.IPv4len
ipv6BitLen = 8 * net.IPv6len
)
// CountIPsInCIDR takes a RFC4632/RFC4291-formatted IPv4/IPv6 CIDR and
// determines how many IP addresses reside within that CIDR.
// The first and the last (base and broadcast) IPs are excluded.
//
// Returns 0 if the input CIDR cannot be parsed.
func CountIPsInCIDR(ipnet *net.IPNet) *big.Int {
subnet, size := ipnet.Mask.Size()
if subnet == size {
return big.NewInt(0)
}
return big.NewInt(0).
Sub(
big.NewInt(2).Exp(big.NewInt(2),
big.NewInt(int64(size-subnet)), nil),
big.NewInt(2),
)
}
var (
// v4Mappedv6Prefix is the RFC2765 IPv4-mapped address prefix.
v4Mappedv6Prefix = []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff}
ipv4LeadingZeroes = []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}
defaultIPv4 = []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0}
defaultIPv6 = []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}
upperIPv4 = []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 255, 255, 255, 255}
upperIPv6 = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
)
// NetsByMask is used to sort a list of IP networks by the size of their masks.
// Implements sort.Interface.
type NetsByMask []*net.IPNet
func (s NetsByMask) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s NetsByMask) Less(i, j int) bool {
iPrefixSize, _ := s[i].Mask.Size()
jPrefixSize, _ := s[j].Mask.Size()
if iPrefixSize == jPrefixSize {
return bytes.Compare(s[i].IP, s[j].IP) < 0
}
return iPrefixSize < jPrefixSize
}
func (s NetsByMask) Len() int {
return len(s)
}
// Assert that NetsByMask implements sort.Interface.
var _ sort.Interface = NetsByMask{}
var _ sort.Interface = NetsByRange{}
// NetsByRange is used to sort a list of ranges, first by their last IPs, then by
// their first IPs
// Implements sort.Interface.
type NetsByRange []*netWithRange
func (s NetsByRange) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s NetsByRange) Less(i, j int) bool {
// First compare by last IP.
lastComparison := bytes.Compare(*s[i].Last, *s[j].Last)
if lastComparison < 0 {
return true
} else if lastComparison > 0 {
return false
}
// Then compare by first IP.
firstComparison := bytes.Compare(*s[i].First, *s[i].First)
if firstComparison < 0 {
return true
} else if firstComparison > 0 {
return false
}
// First and last IPs are the same, so thus are equal, and s[i]
// is not less than s[j].
return false
}
func (s NetsByRange) Len() int {
return len(s)
}
// removeRedundantCIDRs removes CIDRs which are contained within other given CIDRs.
func removeRedundantCIDRs(CIDRs []*net.IPNet) []*net.IPNet {
redundant := make(map[int]bool)
for j, CIDR := range CIDRs {
if redundant[j] {
continue // Skip redundant CIDRs
}
for i, CIDR2 := range CIDRs {
// Skip checking CIDR aganst itself or if CIDR has already been deemed redundant.
if i == j || redundant[i] {
continue
}
if CIDR.Contains(CIDR2.IP) {
redundant[i] = true
}
}
}
if len(redundant) == 0 {
return CIDRs
}
if len(redundant) == 1 {
for i := range redundant {
return append(CIDRs[:i], CIDRs[i+1:]...)
}
}
newCIDRs := make([]*net.IPNet, 0, len(CIDRs)-len(redundant))
for i := range CIDRs {
if redundant[i] {
continue
}
newCIDRs = append(newCIDRs, CIDRs[i])
}
return newCIDRs
}
// RemoveCIDRs removes the specified CIDRs from another set of CIDRs. If a CIDR
// to remove is not contained within the CIDR, the CIDR to remove is ignored. A
// slice of CIDRs is returned which contains the set of CIDRs provided minus
// the set of CIDRs which were removed. Both input slices may be modified by
// calling this function.
func RemoveCIDRs(allowCIDRs, removeCIDRs []*net.IPNet) []*net.IPNet {
// Ensure that we iterate through the provided CIDRs in order of largest
// subnet first.
sort.Sort(NetsByMask(removeCIDRs))
// Remove CIDRs which are contained within CIDRs that we want to remove;
// such CIDRs are redundant.
removeCIDRs = removeRedundantCIDRs(removeCIDRs)
// Remove redundant allowCIDR so that all allowCIDRs are disjoint
allowCIDRs = removeRedundantCIDRs(allowCIDRs)
for _, remove := range removeCIDRs {
i := 0
for i < len(allowCIDRs) {
allowCIDR := allowCIDRs[i]
// Only remove CIDR if it is contained in the subnet we are allowing.
if allowCIDR.Contains(remove.IP.Mask(remove.Mask)) {
nets := excludeContainedCIDR(allowCIDR, remove)
// Remove CIDR that we have just processed and append new CIDRs
// that we computed from removing the CIDR to remove.
allowCIDRs = append(allowCIDRs[:i], allowCIDRs[i+1:]...)
allowCIDRs = append(allowCIDRs, nets...)
} else if remove.Contains(allowCIDR.IP.Mask(allowCIDR.Mask)) {
// If a CIDR that we want to remove contains a CIDR in the list
// that is allowed, then we can just remove the CIDR to allow.
allowCIDRs = append(allowCIDRs[:i], allowCIDRs[i+1:]...)
} else {
// Advance only if CIDR at index 'i' was not removed
i++
}
}
}
return allowCIDRs
}
func getNetworkPrefix(ipNet *net.IPNet) *net.IP {
var mask net.IP
if ipNet.IP.To4() == nil {
mask = make(net.IP, net.IPv6len)
for i := 0; i < len(ipNet.Mask); i++ {
mask[net.IPv6len-i-1] = ipNet.IP[net.IPv6len-i-1] & ^ipNet.Mask[i]
}
} else {
mask = make(net.IP, net.IPv4len)
for i := 0; i < net.IPv4len; i++ {
mask[net.IPv4len-i-1] = ipNet.IP[net.IPv6len-i-1] & ^ipNet.Mask[i]
}
}
return &mask
}
// excludeContainedCIDR returns a set of CIDRs that is equivalent to 'allowCIDR'
// except for 'removeCIDR', which must be a subset of 'allowCIDR'.
// Caller is responsible for only passing CIDRs of the same address family.
func excludeContainedCIDR(allowCIDR, removeCIDR *net.IPNet) []*net.IPNet {
// Get size of each CIDR mask.
allowSize, addrSize := allowCIDR.Mask.Size()
removeSize, _ := removeCIDR.Mask.Size()
// Removing a CIDR from itself should result into an empty set
if allowSize == removeSize && allowCIDR.IP.Equal(removeCIDR.IP) {
return nil
}
removeIPMasked := removeCIDR.IP.Mask(removeCIDR.Mask)
// Create CIDR prefixes with mask size of Y+1, Y+2 ... X where Y is the mask
// length of the CIDR prefix of allowCIDR from which we are excluding the CIDR
// prefix removeCIDR with mask length X.
allows := make([]*net.IPNet, 0, removeSize-allowSize)
// Scan bits from high to low, where 0th bit is the highest.
// For example, an allowCIDR of size 16 covers bits 0..15,
// so the new bit in the first new mask is 16th bit, for a mask size 17.
for bit := allowSize; bit < removeSize; bit++ {
newMaskSize := bit + 1 // bit numbering starts from 0, 0th bit needs mask of size 1
// The mask for each CIDR prefix is simply the masked removeCIDR with the lowest bit
// within the new mask size flipped.
newMask := net.CIDRMask(newMaskSize, addrSize)
newIPMasked := removeIPMasked.Mask(newMask)
flipNthHighestBit(newIPMasked, uint(bit))
newIPNet := net.IPNet{IP: newIPMasked, Mask: newMask}
allows = append(allows, &newIPNet)
}
return allows
}
// Flip the 'n'th highest bit in 'ip'. 'ip' is modified in place. 'n' is zero indexed.
func flipNthHighestBit(ip net.IP, n uint) {
i := n / 8
ip[i] = ip[i] ^ 0x80>>(n%8)
}
func ipNetToRange(ipNet net.IPNet) netWithRange {
firstIP := make(net.IP, len(ipNet.IP))
lastIP := make(net.IP, len(ipNet.IP))
copy(firstIP, ipNet.IP)
copy(lastIP, ipNet.IP)
firstIP = firstIP.Mask(ipNet.Mask)
lastIP = lastIP.Mask(ipNet.Mask)
if firstIP.To4() != nil {
firstIP = append(v4Mappedv6Prefix, firstIP...)
lastIP = append(v4Mappedv6Prefix, lastIP...)
}
lastIPMask := make(net.IPMask, len(ipNet.Mask))
copy(lastIPMask, ipNet.Mask)
for i := range lastIPMask {
lastIPMask[len(lastIPMask)-i-1] = ^lastIPMask[len(lastIPMask)-i-1]
lastIP[net.IPv6len-i-1] = lastIP[net.IPv6len-i-1] | lastIPMask[len(lastIPMask)-i-1]
}
return netWithRange{First: &firstIP, Last: &lastIP, Network: &ipNet}
}
// PrefixCeil converts the given number of IPs to the minimum number of prefixes needed to host those IPs.
// multiple indicates the number of IPs in a single prefix.
func PrefixCeil(numIPs int, multiple int) int {
if numIPs == 0 {
return 0
}
quotient := numIPs / multiple
rem := numIPs % multiple
if rem > 0 {
return quotient + 1
}
return quotient
}
// PrefixToIps converts the given prefix to an array containing IPs in the provided
// prefix/CIDR block. When maxIPs is set to 0, the returned array will contain all IPs
// in the given prefix. Otherwise, the returned array of IPs will be limited to the
// value of maxIPs starting at the first IP in the provided CIDR. For example, when
// providing 192.168.1.0/28 as a CIDR with 4 maxIPs, 192.168.1.0, 192.168.1.1,
// 192.168.1.2, 192.168.1.3 will be returned.
func PrefixToIps(prefixCidr string, maxIPs int) ([]string, error) {
var prefixIps []string
_, ipNet, err := net.ParseCIDR(prefixCidr)
if err != nil {
return prefixIps, err
}
netWithRange := ipNetToRange(*ipNet)
// Ensure last IP in the prefix is included
for ip := *netWithRange.First; len(prefixIps) < maxIPs || maxIPs == 0; ip = GetNextIP(ip) {
prefixIps = append(prefixIps, ip.String())
if ip.Equal(*netWithRange.Last) {
break
}
}
return prefixIps, nil
}
// GetIPAtIndex get the IP by index in the range of ipNet. The index is start with 0.
func GetIPAtIndex(ipNet net.IPNet, index int64) net.IP {
netRange := ipNetToRange(ipNet)
val := big.NewInt(0)
var ip net.IP
if index >= 0 {
ip = *netRange.First
} else {
ip = *netRange.Last
index++
}
if ip.To4() != nil {
val.SetBytes(ip.To4())
} else {
val.SetBytes(ip)
}
val.Add(val, big.NewInt(index))
if ipNet.Contains(val.Bytes()) {
return val.Bytes()
}
return nil
}
func getPreviousIP(ip net.IP) net.IP {
// Cannot go lower than zero!
if ip.Equal(defaultIPv4) || ip.Equal(defaultIPv6) {
return ip
}
previousIP := make(net.IP, len(ip))
copy(previousIP, ip)
var overflow bool
var lowerByteBound int
if ip.To4() != nil {
lowerByteBound = net.IPv6len - net.IPv4len
} else {
lowerByteBound = 0
}
for i := len(ip) - 1; i >= lowerByteBound; i-- {
if overflow || i == len(ip)-1 {
previousIP[i]--
}
// Track if we have overflowed and thus need to continue subtracting.
if ip[i] == 0 && previousIP[i] == 255 {
overflow = true
} else {
overflow = false
}
}
return previousIP
}
// GetNextIP returns the next IP from the given IP address. If the given IP is
// the last IP of a v4 or v6 range, the same IP is returned.
func GetNextIP(ip net.IP) net.IP {
if ip.Equal(upperIPv4) || ip.Equal(upperIPv6) {
return ip
}
nextIP := make(net.IP, len(ip))
switch len(ip) {
case net.IPv4len:
ipU32 := binary.BigEndian.Uint32(ip)
ipU32++
binary.BigEndian.PutUint32(nextIP, ipU32)
return nextIP
case net.IPv6len:
ipU64 := binary.BigEndian.Uint64(ip[net.IPv6len/2:])
ipU64++
binary.BigEndian.PutUint64(nextIP[net.IPv6len/2:], ipU64)
if ipU64 == 0 {
ipU64 = binary.BigEndian.Uint64(ip[:net.IPv6len/2])
ipU64++
binary.BigEndian.PutUint64(nextIP[:net.IPv6len/2], ipU64)
} else {
copy(nextIP[:net.IPv6len/2], ip[:net.IPv6len/2])
}
return nextIP
default:
return ip
}
}
func createSpanningCIDR(r netWithRange) net.IPNet {
// Don't want to modify the values of the provided range, so make copies.
lowest := *r.First
highest := *r.Last
var isIPv4 bool
var spanningMaskSize, bitLen, byteLen int
if lowest.To4() != nil {
isIPv4 = true
bitLen = ipv4BitLen
byteLen = net.IPv4len
} else {
bitLen = ipv6BitLen
byteLen = net.IPv6len
}
if isIPv4 {
spanningMaskSize = ipv4BitLen
} else {
spanningMaskSize = ipv6BitLen
}
// Convert to big Int so we can easily do bitshifting on the IP addresses,
// since golang only provides up to 64-bit unsigned integers.
lowestBig := big.NewInt(0).SetBytes(lowest)
highestBig := big.NewInt(0).SetBytes(highest)
// Starting from largest mask / smallest range possible, apply a mask one bit
// larger in each iteration to the upper bound in the range until we have
// masked enough to pass the lower bound in the range. This
// gives us the size of the prefix for the spanning CIDR to return as
// well as the IP for the CIDR prefix of the spanning CIDR.
for spanningMaskSize > 0 && lowestBig.Cmp(highestBig) < 0 {
spanningMaskSize--
mask := big.NewInt(1)
mask = mask.Lsh(mask, uint(bitLen-spanningMaskSize))
mask = mask.Mul(mask, big.NewInt(-1))
highestBig = highestBig.And(highestBig, mask)
}
// If ipv4, need to append 0s because math.Big gets rid of preceding zeroes.
if isIPv4 {
highest = append(ipv4LeadingZeroes, highestBig.Bytes()...)
} else {
highest = highestBig.Bytes()
}
// Int does not store leading zeroes.
if len(highest) == 0 {
highest = make([]byte, byteLen)
}
newNet := net.IPNet{IP: highest, Mask: net.CIDRMask(spanningMaskSize, bitLen)}
return newNet
}
type netWithRange struct {
First *net.IP
Last *net.IP
Network *net.IPNet
}
func mergeAdjacentCIDRs(ranges []*netWithRange) []*netWithRange {
// Sort the ranges. This sorts first by the last IP, then first IP, then by
// the IP network in the list itself
sort.Sort(NetsByRange(ranges))
// Merge adjacent CIDRs if possible.
for i := len(ranges) - 1; i > 0; i-- {
first1 := getPreviousIP(*ranges[i].First)
// Since the networks are sorted, we know that if a network in the list
// is adjacent to another one in the list, it will be the network next
// to it in the list. If the previous IP of the current network we are
// processing overlaps with the last IP of the previous network in the
// list, then we can merge the two ranges together.
if bytes.Compare(first1, *ranges[i-1].Last) <= 0 {
// Pick the minimum of the first two IPs to represent the start
// of the new range.
var minFirstIP *net.IP
if bytes.Compare(*ranges[i-1].First, *ranges[i].First) < 0 {
minFirstIP = ranges[i-1].First
} else {
minFirstIP = ranges[i].First
}
// Always take the last IP of the ith IP.
newRangeLast := make(net.IP, len(*ranges[i].Last))
copy(newRangeLast, *ranges[i].Last)
newRangeFirst := make(net.IP, len(*minFirstIP))
copy(newRangeFirst, *minFirstIP)
// Can't set the network field because since we are combining a
// range of IPs, and we don't yet know what CIDR prefix(es) represent
// the new range.
ranges[i-1] = &netWithRange{First: &newRangeFirst, Last: &newRangeLast, Network: nil}
// Since we have combined ranges[i] with the preceding item in the
// ranges list, we can delete ranges[i] from the slice.
ranges = append(ranges[:i], ranges[i+1:]...)
}
}
return ranges
}
// coalesceRanges converts ranges into an equivalent list of net.IPNets.
// All IPs in ranges should be of the same address family (IPv4 or IPv6).
func coalesceRanges(ranges []*netWithRange) []*net.IPNet {
coalescedCIDRs := []*net.IPNet{}
// Create CIDRs from ranges that were combined if needed.
for _, netRange := range ranges {
// If the Network field of netWithRange wasn't modified, then we can
// add it to the list which we will return, as it cannot be joined with
// any other CIDR in the list.
if netRange.Network != nil {
coalescedCIDRs = append(coalescedCIDRs, netRange.Network)
} else {
// We have joined two ranges together, so we need to find the new CIDRs
// that represent this range.
rangeCIDRs := rangeToCIDRs(*netRange.First, *netRange.Last)
coalescedCIDRs = append(coalescedCIDRs, rangeCIDRs...)
}
}
return coalescedCIDRs
}
// CoalesceCIDRs transforms the provided list of CIDRs into the most-minimal
// equivalent set of IPv4 and IPv6 CIDRs.
// It removes CIDRs that are subnets of other CIDRs in the list, and groups
// together CIDRs that have the same mask size into a CIDR of the same mask
// size provided that they share the same number of most significant
// mask-size bits.
//
// Note: this algorithm was ported from the Python library netaddr.
// https://github.com/drkjam/netaddr .
func CoalesceCIDRs(cidrs []*net.IPNet) ([]*net.IPNet, []*net.IPNet) {
ranges4 := []*netWithRange{}
ranges6 := []*netWithRange{}
for _, network := range cidrs {
newNetToRange := ipNetToRange(*network)
if network.IP.To4() != nil {
ranges4 = append(ranges4, &newNetToRange)
} else {
ranges6 = append(ranges6, &newNetToRange)
}
}
return coalesceRanges(mergeAdjacentCIDRs(ranges4)), coalesceRanges(mergeAdjacentCIDRs(ranges6))
}
// rangeToCIDRs converts the range of IPs covered by firstIP and lastIP to
// a list of CIDRs that contains all of the IPs covered by the range.
func rangeToCIDRs(firstIP, lastIP net.IP) []*net.IPNet {
// First, create a CIDR that spans both IPs.
spanningCIDR := createSpanningCIDR(netWithRange{&firstIP, &lastIP, nil})
spanningRange := ipNetToRange(spanningCIDR)
firstIPSpanning := spanningRange.First
lastIPSpanning := spanningRange.Last
cidrList := []*net.IPNet{}
// If the first IP of the spanning CIDR passes the lower bound (firstIP),
// we need to split the spanning CIDR and only take the IPs that are
// greater than the value which we split on, as we do not want the lesser
// values since they are less than the lower-bound (firstIP).
if bytes.Compare(*firstIPSpanning, firstIP) < 0 {
// Split on the previous IP of the first IP so that the right list of IPs
// of the partition includes the firstIP.
prevFirstRangeIP := getPreviousIP(firstIP)
var bitLen int
if prevFirstRangeIP.To4() != nil {
bitLen = ipv4BitLen
} else {
bitLen = ipv6BitLen
}
_, _, right := PartitionCIDR(spanningCIDR, net.IPNet{IP: prevFirstRangeIP, Mask: net.CIDRMask(bitLen, bitLen)})
// Append all CIDRs but the first, as this CIDR includes the upper
// bound of the spanning CIDR, which we still need to partition on.
cidrList = append(cidrList, right...)
spanningCIDR = *right[0]
cidrList = cidrList[1:]
}
// Conversely, if the last IP of the spanning CIDR passes the upper bound
// (lastIP), we need to split the spanning CIDR and only take the IPs that
// are greater than the value which we split on, as we do not want the greater
// values since they are greater than the upper-bound (lastIP).
if bytes.Compare(*lastIPSpanning, lastIP) > 0 {
// Split on the next IP of the last IP so that the left list of IPs
// of the partition include the lastIP.
nextFirstRangeIP := GetNextIP(lastIP)
var bitLen int
if nextFirstRangeIP.To4() != nil {
bitLen = ipv4BitLen
} else {
bitLen = ipv6BitLen
}
left, _, _ := PartitionCIDR(spanningCIDR, net.IPNet{IP: nextFirstRangeIP, Mask: net.CIDRMask(bitLen, bitLen)})
cidrList = append(cidrList, left...)
} else {
// Otherwise, there is no need to partition; just use add the spanning
// CIDR to the list of networks.
cidrList = append(cidrList, &spanningCIDR)
}
return cidrList
}
// PartitionCIDR returns a list of IP Networks partitioned upon excludeCIDR.
// The first list contains the networks to the left of the excludeCIDR in the
// partition, the second is a list containing the excludeCIDR itself if it is
// contained within the targetCIDR (nil otherwise), and the
// third is a list containing the networks to the right of the excludeCIDR in
// the partition.
func PartitionCIDR(targetCIDR net.IPNet, excludeCIDR net.IPNet) ([]*net.IPNet, []*net.IPNet, []*net.IPNet) {
var targetIsIPv4 bool
if targetCIDR.IP.To4() != nil {
targetIsIPv4 = true
}
targetIPRange := ipNetToRange(targetCIDR)
excludeIPRange := ipNetToRange(excludeCIDR)
targetFirstIP := *targetIPRange.First
targetLastIP := *targetIPRange.Last
excludeFirstIP := *excludeIPRange.First
excludeLastIP := *excludeIPRange.Last
targetMaskSize, _ := targetCIDR.Mask.Size()
excludeMaskSize, _ := excludeCIDR.Mask.Size()
if bytes.Compare(excludeLastIP, targetFirstIP) < 0 {
return nil, nil, []*net.IPNet{&targetCIDR}
} else if bytes.Compare(targetLastIP, excludeFirstIP) < 0 {
return []*net.IPNet{&targetCIDR}, nil, nil
}
if targetMaskSize >= excludeMaskSize {
return nil, []*net.IPNet{&targetCIDR}, nil
}
left := []*net.IPNet{}
right := []*net.IPNet{}
newPrefixLen := targetMaskSize + 1
targetFirstCopy := make(net.IP, len(targetFirstIP))
copy(targetFirstCopy, targetFirstIP)
iLowerOld := make(net.IP, len(targetFirstCopy))
copy(iLowerOld, targetFirstCopy)
// Since golang only supports up to unsigned 64-bit integers, and we need
// to perform addition on addresses, use math/big library, which allows
// for manipulation of large integers.
// Used to track the current lower and upper bounds of the ranges to compare
// to excludeCIDR.
iLower := big.NewInt(0)
iUpper := big.NewInt(0)
iLower = iLower.SetBytes(targetFirstCopy)
var bitLen int
if targetIsIPv4 {
bitLen = ipv4BitLen
} else {
bitLen = ipv6BitLen
}
shiftAmount := (uint)(bitLen - newPrefixLen)
targetIPInt := big.NewInt(0)
targetIPInt.SetBytes(targetFirstIP.To16())
exp := big.NewInt(0)
// Use left shift for exponentiation
exp = exp.Lsh(big.NewInt(1), shiftAmount)
iUpper = iUpper.Add(targetIPInt, exp)
matched := big.NewInt(0)
for excludeMaskSize >= newPrefixLen {
// Append leading zeros to IPv4 addresses, as math.Big.Int does not
// append them when the IP address is copied from a byte array to
// math.Big.Int. Leading zeroes are required for parsing IPv4 addresses
// for use with net.IP / net.IPNet.
var iUpperBytes, iLowerBytes []byte
if targetIsIPv4 {
iUpperBytes = append(ipv4LeadingZeroes, iUpper.Bytes()...)
iLowerBytes = append(ipv4LeadingZeroes, iLower.Bytes()...)
} else {
iUpperBytesLen := len(iUpper.Bytes())
// Make sure that the number of bytes in the array matches what net
// package expects, as big package doesn't append leading zeroes.
if iUpperBytesLen != net.IPv6len {
numZeroesToAppend := net.IPv6len - iUpperBytesLen
zeroBytes := make([]byte, numZeroesToAppend)
iUpperBytes = append(zeroBytes, iUpper.Bytes()...)
} else {
iUpperBytes = iUpper.Bytes()
}
iLowerBytesLen := len(iLower.Bytes())
if iLowerBytesLen != net.IPv6len {
numZeroesToAppend := net.IPv6len - iLowerBytesLen
zeroBytes := make([]byte, numZeroesToAppend)
iLowerBytes = append(zeroBytes, iLower.Bytes()...)
} else {
iLowerBytes = iLower.Bytes()
}
}
// If the IP we are excluding over is of a higher value than the current
// CIDR prefix we are generating, add the CIDR prefix to the set of IPs
// to the left of the exclude CIDR
if bytes.Compare(excludeFirstIP, iUpperBytes) >= 0 {
left = append(left, &net.IPNet{IP: iLowerBytes, Mask: net.CIDRMask(newPrefixLen, bitLen)})
matched = matched.Set(iUpper)
} else {
// Same as above, but opposite.
right = append(right, &net.IPNet{IP: iUpperBytes, Mask: net.CIDRMask(newPrefixLen, bitLen)})
matched = matched.Set(iLower)
}
newPrefixLen++
if newPrefixLen > bitLen {
break
}
iLower = iLower.Set(matched)
iUpper = iUpper.Add(matched, big.NewInt(0).Lsh(big.NewInt(1), uint(bitLen-newPrefixLen)))
}
excludeList := []*net.IPNet{&excludeCIDR}
return left, excludeList, right
}
// KeepUniqueAddrs transforms the provided multiset of IP addresses into a
// single set, lexicographically sorted via comparison of the addresses using
// netip.Addr.Compare (i.e. IPv4 addresses show up before IPv6).
// The slice is manipulated in-place destructively; it does not create a new slice.
func KeepUniqueAddrs(addrs []netip.Addr) []netip.Addr {
return slices.SortedUniqueFunc(
addrs,
func(i, j int) bool {
return addrs[i].Compare(addrs[j]) < 0
},
func(a, b netip.Addr) bool {
return a == b
},
)
}
var privateIPBlocks []*net.IPNet
func initPrivatePrefixes() {
// We only care about global scope prefixes here.
for _, cidr := range []string{
"0.0.0.0/8", // RFC1122 - IPv4 Host on this network
"10.0.0.0/8", // RFC1918 - IPv4 Private-Use Networks
"100.64.0.0/10", // RFC6598 - IPv4 Shared address space
"127.0.0.0/8", // RFC1122 - IPv4 Loopback
"169.254.0.0/16", // RFC3927 - IPv4 Link-Local
"172.16.0.0/12", // RFC1918 - IPv4 Private-Use Networks
"192.0.0.0/24", // RFC6890 - IPv4 IETF Assignments
"192.0.2.0/24", // RFC5737 - IPv4 TEST-NET-1
"192.168.0.0/16", // RFC1918 - IPv4 Private-Use Networks
"198.18.0.0/15", // RFC2544 - IPv4 Interconnect Benchmarks
"198.51.100.0/24", // RFC5737 - IPv4 TEST-NET-2
"203.0.113.0/24", // RFC5737 - IPv4 TEST-NET-3
"224.0.0.0/4", // RFC5771 - IPv4 Multicast
"::/128", // RFC4291 - IPv6 Unspecified
"::1/128", // RFC4291 - IPv6 Loopback
"100::/64", // RFC6666 - IPv6 Discard-Only Prefix
"2001:2::/48", // RFC5180 - IPv6 Benchmarking
"2001:db8::/48", // RFC3849 - IPv6 Documentation
"fc00::/7", // RFC4193 - IPv6 Unique-Local
"fe80::/10", // RFC4291 - IPv6 Link-Local
"ff00::/8", // RFC4291 - IPv6 Multicast
} {
_, block, _ := net.ParseCIDR(cidr)
privateIPBlocks = append(privateIPBlocks, block)
}
}
func init() {
initPrivatePrefixes()
}
// IsPublicAddr returns whether a given global IP is from
// a public range.
func IsPublicAddr(ip net.IP) bool {
for _, block := range privateIPBlocks {
if block.Contains(ip) {
return false
}
}
return true
}
// IPToPrefix returns the corresponding IPNet for the given IP.
func IPToPrefix(ip net.IP) *net.IPNet {
bits := net.IPv6len * 8
if ip.To4() != nil {
ip = ip.To4()
bits = net.IPv4len * 8
}
prefix := &net.IPNet{
IP: ip,
Mask: net.CIDRMask(bits, bits),
}
return prefix
}
// IsIPv4 returns true if the given IP is an IPv4
func IsIPv4(ip net.IP) bool {
return ip.To4() != nil
}
// IsIPv6 returns if netIP is IPv6.
func IsIPv6(ip net.IP) bool {
return ip != nil && ip.To4() == nil
}
// ListContainsIP returns whether a list of IPs contains a given IP.
func ListContainsIP(ipList []net.IP, ip net.IP) bool {
for _, e := range ipList {
if e.Equal(ip) {
return true
}
}
return false
}
// SortIPList sorts the provided net.IP slice in place.
func SortIPList(ipList []net.IP) {
sort.Slice(ipList, func(i, j int) bool {
return bytes.Compare(ipList[i], ipList[j]) < 0
})
}
func SortAddrList(ipList []netip.Addr) {
sort.Slice(ipList, func(i, j int) bool {
return ipList[i].Compare(ipList[j]) < 0
})
}
// getSortedIPList returns a new net.IP slice in which the IPs are sorted.
func getSortedIPList(ipList []net.IP) []net.IP {
sortedIPList := make([]net.IP, len(ipList))
copy(sortedIPList, ipList)
SortIPList(sortedIPList)
return sortedIPList
}
// UnsortedIPListsAreEqual returns true if the list of net.IP provided is same
// without considering the order of the IPs in the list. The function will first
// attempt to sort both the IP lists and then validate equality for sorted lists.
func UnsortedIPListsAreEqual(ipList1, ipList2 []net.IP) bool {
// The IP set is definitely different if the lengths are different.
if len(ipList1) != len(ipList2) {
return false
}
a := getSortedIPList(ipList1)
b := getSortedIPList(ipList2)
// Lengths are equal, so each member in one set must be in the other
// If any IPs at the same index differ the sorted IP list are not equal.
for i := range a {
if !a[i].Equal(b[i]) {
return false
}
}
return true
}
// GetIPFromListByFamily returns a single IP address of the provided family from a list
// of ip addresses.
func GetIPFromListByFamily(ipList []net.IP, v4Family bool) net.IP {
for _, ipAddr := range ipList {
if v4Family == IsIPv4(ipAddr) || (!v4Family && IsIPv6(ipAddr)) {
return ipAddr
}
}
return nil
}
// AddrFromIP converts a net.IP to netip.Addr using netip.AddrFromSlice, but preserves
// the original address family. It assumes given net.IP is not an IPv4 mapped IPv6
// address.
//
// The problem behind this is that when we convert the IPv4 net.IP address with
// netip.AddrFromSlice, the address is interpreted as an IPv4 mapped IPv6 address in some
// cases.
//
// For example, when we do netip.AddrFromSlice(net.ParseIP("1.1.1.1")), it is interpreted
// as an IPv6 address "::ffff:1.1.1.1". This is because 1) net.IP created with
// net.ParseIP(IPv4 string) holds IPv4 address as an IPv4 mapped IPv6 address internally
// and 2) netip.AddrFromSlice recognizes address family with length of the slice (4-byte =
// IPv4 and 16-byte = IPv6).
//
// By using AddrFromIP, we can preserve the address family, but since we cannot distinguish
// IPv4 and IPv4 mapped IPv6 address only from net.IP value (see #37921 on golang/go) we
// need an assumption that given net.IP is not an IPv4 mapped IPv6 address.
func AddrFromIP(ip net.IP) (netip.Addr, bool) {
addr, ok := netip.AddrFromSlice(ip)
if !ok {
return addr, ok
}
return addr.Unmap(), ok
}
// MustAddrFromIP is the same as AddrFromIP except that it assumes the input is
// a valid IP address and always returns a valid netip.Addr.
func MustAddrFromIP(ip net.IP) netip.Addr {
addr, ok := AddrFromIP(ip)
if !ok {
panic("addr is not a valid IP address")
}
return addr
}
// MustAddrsFromIPs converts a slice of net.IP to a slice of netip.Addr. It assumes
// the input slice contains only valid IP addresses and always returns a slice
// containing valid netip.Addr.
func MustAddrsFromIPs(ips []net.IP) []netip.Addr {
addrs := make([]netip.Addr, 0, len(ips))
for _, ip := range ips {
addrs = append(addrs, MustAddrFromIP(ip))
}
return addrs
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package ip
import (
"strconv"
"github.com/vishvananda/netlink"
)
func ParseScope(scope string) (int, error) {
switch scope {
case "global":
return int(netlink.SCOPE_UNIVERSE), nil
case "nowhere":
return int(netlink.SCOPE_NOWHERE), nil
case "host":
return int(netlink.SCOPE_HOST), nil
case "link":
return int(netlink.SCOPE_LINK), nil
case "site":
return int(netlink.SCOPE_SITE), nil
default:
return strconv.Atoi(scope)
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2
import (
"fmt"
"reflect"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sCiliumUtils "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils"
"github.com/cilium/cilium/pkg/policy/api"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +deepequal-gen:private-method=true
// +kubebuilder:resource:categories={cilium,ciliumpolicy},singular="ciliumclusterwidenetworkpolicy",path="ciliumclusterwidenetworkpolicies",scope="Cluster",shortName={ccnp}
// +kubebuilder:subresource:status
// +kubebuilder:storageversion
// CiliumClusterwideNetworkPolicy is a Kubernetes third-party resource with an
// modified version of CiliumNetworkPolicy which is cluster scoped rather than
// namespace scoped.
type CiliumClusterwideNetworkPolicy struct {
// +deepequal-gen=false
metav1.TypeMeta `json:",inline"`
// +deepequal-gen=false
metav1.ObjectMeta `json:"metadata"`
// Spec is the desired Cilium specific rule specification.
Spec *api.Rule `json:"spec,omitempty"`
// Specs is a list of desired Cilium specific rule specification.
Specs api.Rules `json:"specs,omitempty"`
// Status is the status of the Cilium policy rule.
//
// The reason this field exists in this structure is due a bug in the k8s
// code-generator that doesn't create a `UpdateStatus` method because the
// field does not exist in the structure.
//
// +kubebuilder:validation:Optional
Status CiliumNetworkPolicyStatus `json:"status"`
}
// DeepEqual compares 2 CCNPs while ignoring the LastAppliedConfigAnnotation
// and ignoring the Status field of the CCNP.
func (in *CiliumClusterwideNetworkPolicy) DeepEqual(other *CiliumClusterwideNetworkPolicy) bool {
return objectMetaDeepEqual(in.ObjectMeta, other.ObjectMeta) && in.deepEqual(other)
}
// SetDerivedPolicyStatus set the derivative policy status for the given
// derivative policy name.
func (r *CiliumClusterwideNetworkPolicy) SetDerivedPolicyStatus(derivativePolicyName string, status CiliumNetworkPolicyNodeStatus) {
if r.Status.DerivativePolicies == nil {
r.Status.DerivativePolicies = map[string]CiliumNetworkPolicyNodeStatus{}
}
r.Status.DerivativePolicies[derivativePolicyName] = status
}
// AnnotationsEquals returns true if ObjectMeta.Annotations of each
// CiliumClusterwideNetworkPolicy are equivalent (i.e., they contain equivalent key-value
// pairs).
func (r *CiliumClusterwideNetworkPolicy) AnnotationsEquals(o *CiliumClusterwideNetworkPolicy) bool {
if o == nil {
return r == nil
}
return reflect.DeepEqual(r.ObjectMeta.Annotations, o.ObjectMeta.Annotations)
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=false
// +deepequal-gen=false
// CiliumClusterwideNetworkPolicyList is a list of
// CiliumClusterwideNetworkPolicy objects.
type CiliumClusterwideNetworkPolicyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items is a list of CiliumClusterwideNetworkPolicies.
Items []CiliumClusterwideNetworkPolicy `json:"items"`
}
// Parse parses a CiliumClusterwideNetworkPolicy and returns a list of cilium
// policy rules.
func (r *CiliumClusterwideNetworkPolicy) Parse() (api.Rules, error) {
if r.ObjectMeta.Name == "" {
return nil, NewErrParse("CiliumClusterwideNetworkPolicy must have name")
}
name := r.ObjectMeta.Name
uid := r.ObjectMeta.UID
retRules := api.Rules{}
if r.Spec == nil && r.Specs == nil {
return nil, ErrEmptyCCNP
}
if r.Spec != nil {
if err := r.Spec.Sanitize(); err != nil {
return nil, NewErrParse(fmt.Sprintf("Invalid CiliumClusterwideNetworkPolicy spec: %s", err))
}
cr := k8sCiliumUtils.ParseToCiliumRule("", name, uid, r.Spec)
retRules = append(retRules, cr)
}
if r.Specs != nil {
for _, rule := range r.Specs {
if err := rule.Sanitize(); err != nil {
return nil, NewErrParse(fmt.Sprintf("Invalid CiliumClusterwideNetworkPolicy specs: %s", err))
}
cr := k8sCiliumUtils.ParseToCiliumRule("", name, uid, rule)
retRules = append(retRules, cr)
}
}
return retRules, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2
import (
"bytes"
"encoding/json"
"fmt"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/encoding/prototext"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
"github.com/cilium/cilium/pkg/option"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:categories={cilium},singular="ciliumenvoyconfig",path="ciliumenvoyconfigs",scope="Namespaced",shortName={cec}
// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",description="The age of the identity",name="Age",type=date
// +kubebuilder:storageversion
type CiliumEnvoyConfig struct {
// +k8s:openapi-gen=false
// +deepequal-gen=false
metav1.TypeMeta `json:",inline"`
// +k8s:openapi-gen=false
// +deepequal-gen=false
metav1.ObjectMeta `json:"metadata"`
// +k8s:openapi-gen=false
Spec CiliumEnvoyConfigSpec `json:"spec,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +deepequal-gen=false
// CiliumEnvoyConfigList is a list of CiliumEnvoyConfig objects.
type CiliumEnvoyConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items is a list of CiliumEnvoyConfig.
Items []CiliumEnvoyConfig `json:"items"`
}
type CiliumEnvoyConfigSpec struct {
// Services specifies Kubernetes services for which traffic is
// forwarded to an Envoy listener for L7 load balancing. Backends
// of these services are automatically synced to Envoy usign EDS.
//
// +kubebuilder:validation:Optional
Services []*ServiceListener `json:"services,omitempty"`
// BackendServices specifies Kubernetes services whose backends
// are automatically synced to Envoy using EDS. Traffic for these
// services is not forwarded to an Envoy listener. This allows an
// Envoy listener load balance traffic to these backends while
// normal Cilium service load balancing takes care of balancing
// traffic for these services at the same time.
//
// +kubebuilder:validation:Optional
BackendServices []*Service `json:"backendServices,omitempty"`
// Envoy xDS resources, a list of the following Envoy resource types:
// type.googleapis.com/envoy.config.listener.v3.Listener,
// type.googleapis.com/envoy.config.route.v3.RouteConfiguration,
// type.googleapis.com/envoy.config.cluster.v3.Cluster,
// type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment, and
// type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret.
//
// +kubebuilder:validation:Required
Resources []XDSResource `json:"resources,omitempty"`
// NodeSelector is a label selector that determines to which nodes
// this configuration applies.
// If nil, then this config applies to all nodes.
//
// +kubebuilder:validation:Optional
NodeSelector *slim_metav1.LabelSelector `json:"nodeSelector,omitempty"`
}
type Service struct {
// Name is the name of a destination Kubernetes service that identifies traffic
// to be redirected.
//
// +kubebuilder:validation:Required
Name string `json:"name"`
// Namespace is the Kubernetes service namespace.
// In CiliumEnvoyConfig namespace defaults to the namespace of the CEC,
// In CiliumClusterwideEnvoyConfig namespace defaults to "default".
// +kubebuilder:validation:Optional
Namespace string `json:"namespace"`
// Ports is a set of port numbers, which can be used for filtering in case of underlying
// is exposing multiple port numbers.
//
// +kubebuilder:validation:Optional
Ports []string `json:"number,omitempty"`
}
type ServiceListener struct {
// Name is the name of a destination Kubernetes service that identifies traffic
// to be redirected.
//
// +kubebuilder:validation:Required
Name string `json:"name"`
// Namespace is the Kubernetes service namespace.
// In CiliumEnvoyConfig namespace this is overridden to the namespace of the CEC,
// In CiliumClusterwideEnvoyConfig namespace defaults to "default".
// +kubebuilder:validation:Optional
Namespace string `json:"namespace"`
// Ports is a set of service's frontend ports that should be redirected to the Envoy
// listener. By default all frontend ports of the service are redirected.
//
// +kubebuilder:validation:Optional
Ports []uint16 `json:"ports,omitempty"`
// Listener specifies the name of the Envoy listener the
// service traffic is redirected to. The listener must be
// specified in the Envoy 'resources' of the same
// CiliumEnvoyConfig.
//
// If omitted, the first listener specified in 'resources' is
// used.
//
// +kubebuilder:validation:Optional
Listener string `json:"listener"`
}
// +kubebuilder:pruning:PreserveUnknownFields
type XDSResource struct {
*anypb.Any `json:"-"`
}
// DeepCopyInto deep copies 'in' into 'out'.
func (in *XDSResource) DeepCopyInto(out *XDSResource) {
out.Any, _ = proto.Clone(in.Any).(*anypb.Any)
}
// DeepEqual returns 'true' if 'a' and 'b' are equal.
func (a *XDSResource) DeepEqual(b *XDSResource) bool {
return proto.Equal(a.Any, b.Any)
}
// MarshalJSON ensures that the unstructured object produces proper
// JSON when passed to Go's standard JSON library.
func (u *XDSResource) MarshalJSON() ([]byte, error) {
return protojson.Marshal(u.Any)
}
// UnmarshalJSON ensures that the unstructured object properly decodes
// JSON when passed to Go's standard JSON library.
func (u *XDSResource) UnmarshalJSON(b []byte) (err error) {
// xDS resources are not validated in K8s, recover from possible panics
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("CEC JSON decoding paniced: %v", r)
}
}()
u.Any = &anypb.Any{}
err = protojson.Unmarshal(b, u.Any)
if err != nil {
var buf bytes.Buffer
json.Indent(&buf, b, "", "\t")
log.Warningf("Ignoring invalid CiliumEnvoyConfig JSON (%s): %s",
err, buf.String())
} else if option.Config.Debug {
log.Debugf("CEC unmarshaled XDS Resource: %v", prototext.Format(u.Any))
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2
import (
"fmt"
"strconv"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/cilium/cilium/pkg/iana"
slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
lb "github.com/cilium/cilium/pkg/loadbalancer"
"github.com/cilium/cilium/pkg/policy/api"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:categories={cilium,ciliumpolicy},singular="ciliumlocalredirectpolicy",path="ciliumlocalredirectpolicies",scope="Namespaced",shortName={clrp}
// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date
// CiliumLocalRedirectPolicy is a Kubernetes Custom Resource that contains a
// specification to redirect traffic locally within a node.
type CiliumLocalRedirectPolicy struct {
// +k8s:openapi-gen=false
// +deepequal-gen=false
metav1.TypeMeta `json:",inline"`
// +k8s:openapi-gen=false
// +deepequal-gen=false
metav1.ObjectMeta `json:"metadata"`
// Spec is the desired behavior of the local redirect policy.
Spec CiliumLocalRedirectPolicySpec `json:"spec,omitempty"`
// Status is the most recent status of the local redirect policy.
// It is a read-only field.
//
// +deepequal-gen=false
// +kubebuilder:validation:Optional
Status CiliumLocalRedirectPolicyStatus `json:"status"`
}
type Frontend struct {
// IP is a destination ip address for traffic to be redirected.
//
// Example:
// When it is set to "169.254.169.254", traffic destined to
// "169.254.169.254" is redirected.
//
// +kubebuilder:validation:Pattern=`((^\s*((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))\s*$)|(^\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?\s*$))`
// +kubebuilder:validation:Required
IP string `json:"ip"`
// ToPorts is a list of destination L4 ports with protocol for traffic
// to be redirected.
// When multiple ports are specified, the ports must be named.
//
// Example:
// When set to Port: "53" and Protocol: UDP, traffic destined to port '53'
// with UDP protocol is redirected.
//
// +kubebuilder:validation:Required
ToPorts []PortInfo `json:"toPorts"`
}
// RedirectFrontend is a frontend configuration that matches traffic that needs to be redirected.
// The configuration must be specified using a ip/port tuple or a Kubernetes service.
type RedirectFrontend struct {
// AddressMatcher is a tuple {IP, port, protocol} that matches traffic to be
// redirected.
//
// +kubebuilder:validation:OneOf
AddressMatcher *Frontend `json:"addressMatcher,omitempty"`
// ServiceMatcher specifies Kubernetes service and port that matches
// traffic to be redirected.
//
// +kubebuilder:validation:OneOf
ServiceMatcher *ServiceInfo `json:"serviceMatcher,omitempty"`
}
// PortInfo specifies L4 port number and name along with the transport protocol
type PortInfo struct {
// Port is an L4 port number. The string will be strictly parsed as a single uint16.
//
// +kubebuilder:validation:Pattern=`^()([1-9]|[1-5]?[0-9]{2,4}|6[1-4][0-9]{3}|65[1-4][0-9]{2}|655[1-2][0-9]|6553[1-5])$`
// +kubebuilder:validation:Required
Port string `json:"port"`
// Protocol is the L4 protocol.
// Accepted values: "TCP", "UDP"
//
// +kubebuilder:validation:Enum=TCP;UDP
// +kubebuilder:validation:Required
Protocol api.L4Proto `json:"protocol"`
// Name is a port name, which must contain at least one [a-z],
// and may also contain [0-9] and '-' anywhere except adjacent to another
// '-' or in the beginning or the end.
//
// +kubebuilder:validation:Pattern=`^([0-9]{1,4})|([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$`
// +kubebuilder:validation:Optional
Name string `json:"name"`
}
type ServiceInfo struct {
// Name is the name of a destination Kubernetes service that identifies traffic
// to be redirected.
// The service type needs to be ClusterIP.
//
// Example:
// When this field is populated with 'serviceName:myService', all the traffic
// destined to the cluster IP of this service at the (specified)
// service port(s) will be redirected.
//
// +kubebuilder:validation:Required
Name string `json:"serviceName"`
// Namespace is the Kubernetes service namespace.
// The service namespace must match the namespace of the parent Local
// Redirect Policy. For Cluster-wide Local Redirect Policy, this
// can be any namespace.
// +kubebuilder:validation:Required
Namespace string `json:"namespace"`
// ToPorts is a list of destination service L4 ports with protocol for
// traffic to be redirected. If not specified, traffic for all the service
// ports will be redirected.
// When multiple ports are specified, the ports must be named.
//
// +kubebuilder:validation:Optional
ToPorts []PortInfo `json:"toPorts,omitempty"`
}
// RedirectBackend is a backend configuration that determines where traffic needs to be redirected to.
type RedirectBackend struct {
// LocalEndpointSelector selects node local pod(s) where traffic is redirected to.
//
// +kubebuilder:validation:Required
LocalEndpointSelector slim_metav1.LabelSelector `json:"localEndpointSelector"`
// ToPorts is a list of L4 ports with protocol of node local pod(s) where traffic
// is redirected to.
// When multiple ports are specified, the ports must be named.
//
// +kubebuilder:validation:Required
ToPorts []PortInfo `json:"toPorts"`
}
// CiliumLocalRedirectPolicySpec specifies the configurations for redirecting traffic
// within a node.
type CiliumLocalRedirectPolicySpec struct {
// RedirectFrontend specifies frontend configuration to redirect traffic from.
// It can not be empty.
//
// +kubebuilder:validation:Required
// +kubebuilder:validation:XValidation:rule="self == oldSelf", message="redirectFrontend is immutable"
RedirectFrontend RedirectFrontend `json:"redirectFrontend"`
// RedirectBackend specifies backend configuration to redirect traffic to.
// It can not be empty.
//
// +kubebuilder:validation:Required
// +kubebuilder:validation:XValidation:rule="self == oldSelf", message="redirectBackend is immutable"
RedirectBackend RedirectBackend `json:"redirectBackend"`
// SkipRedirectFromBackend indicates whether traffic matching RedirectFrontend
// from RedirectBackend should skip redirection, and hence the traffic will
// be forwarded as-is.
//
// The default is false which means traffic matching RedirectFrontend will
// get redirected from all pods, including the RedirectBackend(s).
//
// Example: If RedirectFrontend is configured to "169.254.169.254:80" as the traffic
// that needs to be redirected to backends selected by RedirectBackend, if
// SkipRedirectFromBackend is set to true, traffic going to "169.254.169.254:80"
// from such backends will not be redirected back to the backends. Instead,
// the matched traffic from the backends will be forwarded to the original
// destination "169.254.169.254:80".
//
// +kubebuilder:validation:Optional
// +kubebuilder:default=false
// +kubebuilder:validation:XValidation:rule="self == oldSelf", message="skipRedirectFromBackend is immutable"
SkipRedirectFromBackend bool `json:"skipRedirectFromBackend"`
// Description can be used by the creator of the policy to describe the
// purpose of this policy.
//
// +kubebuilder:validation:Optional
Description string `json:"description,omitempty"`
}
// CiliumLocalRedirectPolicyStatus is the status of a Local Redirect Policy.
type CiliumLocalRedirectPolicyStatus struct {
// TODO Define status(aditi)
OK bool `json:"ok,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=false
// +deepequal-gen=false
// CiliumLocalRedirectPolicyList is a list of CiliumLocalRedirectPolicy objects.
type CiliumLocalRedirectPolicyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items is a list of CiliumLocalRedirectPolicy
Items []CiliumLocalRedirectPolicy `json:"items"`
}
// SanitizePortInfo sanitizes all the fields in the PortInfo.
// It returns port number, name, and protocol derived from the given input and error (failure cases).
func (pInfo *PortInfo) SanitizePortInfo(checkNamedPort bool) (uint16, string, lb.L4Type, error) {
var (
pInt uint16
pName string
protocol lb.L4Type
)
// Sanitize port
if pInfo.Port == "" {
return pInt, pName, protocol, fmt.Errorf("port must be specified")
} else {
p, err := strconv.ParseUint(pInfo.Port, 0, 16)
if err != nil {
return pInt, pName, protocol, fmt.Errorf("unable to parse port: %w", err)
}
if p == 0 {
return pInt, pName, protocol, fmt.Errorf("port cannot be 0")
}
pInt = uint16(p)
}
// Sanitize name
if checkNamedPort {
if pInfo.Name == "" {
return pInt, pName, protocol, fmt.Errorf("port %s in the local "+
"redirect policy spec must have a valid IANA_SVC_NAME, as there are multiple ports", pInfo.Port)
}
if !iana.IsSvcName(pInfo.Name) {
return pInt, pName, protocol, fmt.Errorf("port name %s isn't a "+
"valid IANA_SVC_NAME", pInfo.Name)
}
}
pName = strings.ToLower(pInfo.Name) // Normalize for case insensitive comparison
// Sanitize protocol
var err error
protocol, err = lb.NewL4Type(string(pInfo.Protocol))
if err != nil {
return pInt, pName, protocol, err
}
return pInt, pName, protocol, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2
import (
"fmt"
"reflect"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/cilium/cilium/pkg/comparator"
k8sCiliumUtils "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils"
slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
k8sUtils "github.com/cilium/cilium/pkg/k8s/utils"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/policy/api"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +deepequal-gen:private-method=true
// +kubebuilder:resource:categories={cilium,ciliumpolicy},singular="ciliumnetworkpolicy",path="ciliumnetworkpolicies",scope="Namespaced",shortName={cnp,ciliumnp}
// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date
// +kubebuilder:subresource:status
// +kubebuilder:storageversion
// CiliumNetworkPolicy is a Kubernetes third-party resource with an extended
// version of NetworkPolicy.
type CiliumNetworkPolicy struct {
// +deepequal-gen=false
metav1.TypeMeta `json:",inline"`
// +deepequal-gen=false
metav1.ObjectMeta `json:"metadata"`
// Spec is the desired Cilium specific rule specification.
Spec *api.Rule `json:"spec,omitempty"`
// Specs is a list of desired Cilium specific rule specification.
Specs api.Rules `json:"specs,omitempty"`
// Status is the status of the Cilium policy rule
//
// +deepequal-gen=false
// +kubebuilder:validation:Optional
Status CiliumNetworkPolicyStatus `json:"status"`
}
// DeepEqual compares 2 CNPs.
func (in *CiliumNetworkPolicy) DeepEqual(other *CiliumNetworkPolicy) bool {
return objectMetaDeepEqual(in.ObjectMeta, other.ObjectMeta) && in.deepEqual(other)
}
// objectMetaDeepEqual performs an equality check for metav1.ObjectMeta that
// ignores the LastAppliedConfigAnnotation. This function's usage is shared
// among CNP and CCNP as they have the same structure.
func objectMetaDeepEqual(in, other metav1.ObjectMeta) bool {
if !(in.Name == other.Name && in.Namespace == other.Namespace) {
return false
}
return comparator.MapStringEqualsIgnoreKeys(
in.GetAnnotations(),
other.GetAnnotations(),
// Ignore v1.LastAppliedConfigAnnotation annotation
[]string{v1.LastAppliedConfigAnnotation})
}
// +deepequal-gen=true
// CiliumNetworkPolicyStatus is the status of a Cilium policy rule.
type CiliumNetworkPolicyStatus struct {
// DerivativePolicies is the status of all policies derived from the Cilium
// policy
DerivativePolicies map[string]CiliumNetworkPolicyNodeStatus `json:"derivativePolicies,omitempty"`
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
// +listType=map
// +listMapKey=type
Conditions []NetworkPolicyCondition `json:"conditions,omitempty"`
}
// +deepequal-gen=true
// CiliumNetworkPolicyNodeStatus is the status of a Cilium policy rule for a
// specific node.
type CiliumNetworkPolicyNodeStatus struct {
// OK is true when the policy has been parsed and imported successfully
// into the in-memory policy repository on the node.
OK bool `json:"ok,omitempty"`
// Error describes any error that occurred when parsing or importing the
// policy, or realizing the policy for the endpoints to which it applies
// on the node.
Error string `json:"error,omitempty"`
// LastUpdated contains the last time this status was updated
LastUpdated slimv1.Time `json:"lastUpdated,omitempty"`
// Revision is the policy revision of the repository which first implemented
// this policy.
Revision uint64 `json:"localPolicyRevision,omitempty"`
// Enforcing is set to true once all endpoints present at the time the
// policy has been imported are enforcing this policy.
Enforcing bool `json:"enforcing,omitempty"`
// Annotations corresponds to the Annotations in the ObjectMeta of the CNP
// that have been realized on the node for CNP. That is, if a CNP has been
// imported and has been assigned annotation X=Y by the user,
// Annotations in CiliumNetworkPolicyNodeStatus will be X=Y once the
// CNP that was imported corresponding to Annotation X=Y has been realized on
// the node.
Annotations map[string]string `json:"annotations,omitempty"`
}
// CreateCNPNodeStatus returns a CiliumNetworkPolicyNodeStatus created from the
// provided fields.
func CreateCNPNodeStatus(enforcing, ok bool, cnpError error, rev uint64, annotations map[string]string) CiliumNetworkPolicyNodeStatus {
cnpns := CiliumNetworkPolicyNodeStatus{
Enforcing: enforcing,
Revision: rev,
OK: ok,
LastUpdated: slimv1.Now(),
Annotations: annotations,
}
if cnpError != nil {
cnpns.Error = cnpError.Error()
}
return cnpns
}
func (r *CiliumNetworkPolicy) String() string {
result := ""
result += fmt.Sprintf("TypeMeta: %s, ", r.TypeMeta.String())
result += fmt.Sprintf("ObjectMeta: %s, ", r.ObjectMeta.String())
if r.Spec != nil {
result += fmt.Sprintf("Spec: %v", *(r.Spec))
}
if r.Specs != nil {
result += fmt.Sprintf("Specs: %v", r.Specs)
}
result += fmt.Sprintf("Status: %v", r.Status)
return result
}
// SetDerivedPolicyStatus set the derivative policy status for the given
// derivative policy name.
func (r *CiliumNetworkPolicy) SetDerivedPolicyStatus(derivativePolicyName string, status CiliumNetworkPolicyNodeStatus) {
if r.Status.DerivativePolicies == nil {
r.Status.DerivativePolicies = map[string]CiliumNetworkPolicyNodeStatus{}
}
r.Status.DerivativePolicies[derivativePolicyName] = status
}
// AnnotationsEquals returns true if ObjectMeta.Annotations of each
// CiliumNetworkPolicy are equivalent (i.e., they contain equivalent key-value
// pairs).
func (r *CiliumNetworkPolicy) AnnotationsEquals(o *CiliumNetworkPolicy) bool {
if o == nil {
return r == nil
}
return reflect.DeepEqual(r.ObjectMeta.Annotations, o.ObjectMeta.Annotations)
}
// Parse parses a CiliumNetworkPolicy and returns a list of cilium policy
// rules.
func (r *CiliumNetworkPolicy) Parse() (api.Rules, error) {
if r.ObjectMeta.Name == "" {
return nil, NewErrParse("CiliumNetworkPolicy must have name")
}
namespace := k8sUtils.ExtractNamespace(&r.ObjectMeta)
// Temporary fix for CCNPs. See #12834.
// TL;DR. CCNPs are converted into SlimCNPs and end up here so we need to
// convert them back to CCNPs to allow proper parsing.
if namespace == "" {
ccnp := CiliumClusterwideNetworkPolicy{
TypeMeta: r.TypeMeta,
ObjectMeta: r.ObjectMeta,
Spec: r.Spec,
Specs: r.Specs,
Status: r.Status,
}
return ccnp.Parse()
}
name := r.ObjectMeta.Name
uid := r.ObjectMeta.UID
retRules := api.Rules{}
if r.Spec == nil && r.Specs == nil {
return nil, ErrEmptyCNP
}
if r.Spec != nil {
if err := r.Spec.Sanitize(); err != nil {
return nil, NewErrParse(fmt.Sprintf("Invalid CiliumNetworkPolicy spec: %s", err))
}
if r.Spec.NodeSelector.LabelSelector != nil {
return nil, NewErrParse("Invalid CiliumNetworkPolicy spec: rule cannot have NodeSelector")
}
cr := k8sCiliumUtils.ParseToCiliumRule(namespace, name, uid, r.Spec)
retRules = append(retRules, cr)
}
if r.Specs != nil {
for _, rule := range r.Specs {
if err := rule.Sanitize(); err != nil {
return nil, NewErrParse(fmt.Sprintf("Invalid CiliumNetworkPolicy specs: %s", err))
}
cr := k8sCiliumUtils.ParseToCiliumRule(namespace, name, uid, rule)
retRules = append(retRules, cr)
}
}
return retRules, nil
}
// GetIdentityLabels returns all rule labels in the CiliumNetworkPolicy.
func (r *CiliumNetworkPolicy) GetIdentityLabels() labels.LabelArray {
namespace := k8sUtils.ExtractNamespace(&r.ObjectMeta)
name := r.ObjectMeta.Name
uid := r.ObjectMeta.UID
// Even though the struct represents CiliumNetworkPolicy, we use it both for
// CiliumNetworkPolicy and CiliumClusterwideNetworkPolicy, so here we check for namespace
// to send correct derivedFrom label to get the correct policy labels.
derivedFrom := k8sCiliumUtils.ResourceTypeCiliumNetworkPolicy
if namespace == "" {
derivedFrom = k8sCiliumUtils.ResourceTypeCiliumClusterwideNetworkPolicy
}
return k8sCiliumUtils.GetPolicyLabels(namespace, name, uid, derivedFrom)
}
// RequiresDerivative return true if the CNP has any rule that will create a new
// derivative rule.
func (r *CiliumNetworkPolicy) RequiresDerivative() bool {
if r.Spec != nil {
if r.Spec.RequiresDerivative() {
return true
}
}
if r.Specs != nil {
for _, rule := range r.Specs {
if rule.RequiresDerivative() {
return true
}
}
}
return false
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=false
// +deepequal-gen=false
// CiliumNetworkPolicyList is a list of CiliumNetworkPolicy objects.
type CiliumNetworkPolicyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items is a list of CiliumNetworkPolicy
Items []CiliumNetworkPolicy `json:"items"`
}
type PolicyConditionType string
const (
PolicyConditionValid PolicyConditionType = "Valid"
)
type NetworkPolicyCondition struct {
// The type of the policy condition
Type PolicyConditionType `json:"type"`
// The status of the condition, one of True, False, or Unknown
Status v1.ConditionStatus `json:"status"`
// The last time the condition transitioned from one status to another.
// +optional
LastTransitionTime slimv1.Time `json:"lastTransitionTime,omitempty"`
// The reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty"`
// A human readable message indicating details about the transition.
// +optional
Message string `json:"message,omitempty"`
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2
var (
// ErrEmptyCNP is an error representing a CNP that is empty, which means it is
// missing both a `spec` and `specs` (both are nil).
ErrEmptyCNP = NewErrParse("Invalid CiliumNetworkPolicy spec(s): empty policy")
// ErrEmptyCCNP is an error representing a CCNP that is empty, which means it is
// missing both a `spec` and `specs` (both are nil).
ErrEmptyCCNP = NewErrParse("Invalid CiliumClusterwideNetworkPolicy spec(s): empty policy")
// ParsingErr is for comparison when checking error types.
ParsingErr = NewErrParse("")
)
// ErrParse is an error to describe where policy fails to parse due any invalid
// rule.
//
// +k8s:deepcopy-gen=false
// +deepequal-gen=false
type ErrParse struct {
msg string
}
// NewErrParse returns a new ErrParse.
func NewErrParse(msg string) ErrParse {
return ErrParse{
msg: msg,
}
}
// Error returns the error message for parsing
func (e ErrParse) Error() string {
return e.msg
}
// Is returns true if the given error is the type of 'ErrParse'.
func (_ ErrParse) Is(e error) bool {
_, ok := e.(ErrParse)
return ok
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
k8sconst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
)
const (
// CustomResourceDefinitionGroup is the name of the third party resource group
CustomResourceDefinitionGroup = k8sconst.CustomResourceDefinitionGroup
// CustomResourceDefinitionVersion is the current version of the resource
CustomResourceDefinitionVersion = "v2"
// Cilium Network Policy (CNP)
// CNPPluralName is the plural name of Cilium Network Policy
CNPPluralName = "ciliumnetworkpolicies"
// CNPKindDefinition is the kind name for Cilium Network Policy
CNPKindDefinition = "CiliumNetworkPolicy"
// CNPName is the full name of Cilium Network Policy
CNPName = CNPPluralName + "." + CustomResourceDefinitionGroup
// Cilium Cluster wide Network Policy (CCNP)
// CCNPPluralName is the plural name of Cilium Cluster wide Network Policy
CCNPPluralName = "ciliumclusterwidenetworkpolicies"
// CCNPKindDefinition is the kind name for Cilium Cluster wide Network Policy
CCNPKindDefinition = "CiliumClusterwideNetworkPolicy"
// CCNPName is the full name of Cilium Cluster wide Network Policy
CCNPName = CCNPPluralName + "." + CustomResourceDefinitionGroup
// Cilium Egress Gateway Policy (CEGP)
// CEGPPluralName is the plural name of Cilium Egress Gateway Policy
CEGPPluralName = "ciliumegressgatewaypolicies"
// CEGPKindDefinition is the kind name of Cilium Egress Gateway Policy
CEGPKindDefinition = "CiliumEgressGatewayPolicy"
// CEGPName is the full name of Cilium Egress Gateway Policy
CEGPName = CEGPPluralName + "." + CustomResourceDefinitionGroup
// Cilium Endpoint (CEP)
// CEPluralName is the plural name of Cilium Endpoint
CEPPluralName = "ciliumendpoints"
// CEKindDefinition is the kind name for Cilium Endpoint
CEPKindDefinition = "CiliumEndpoint"
// CEPName is the full name of Cilium Endpoint
CEPName = CEPPluralName + "." + CustomResourceDefinitionGroup
// Cilium Node (CN)
// CNPluralName is the plural name of Cilium Node
CNPluralName = "ciliumnodes"
// CNKindDefinition is the kind name for Cilium Node
CNKindDefinition = "CiliumNode"
// CNName is the full name of Cilium Node
CNName = CNPluralName + "." + CustomResourceDefinitionGroup
// Cilium Identity
// CIDPluralName is the plural name of Cilium Identity
CIDPluralName = "ciliumidentities"
// CIDKindDefinition is the kind name for Cilium Identity
CIDKindDefinition = "CiliumIdentity"
// CIDName is the full name of Cilium Identity
CIDName = CIDPluralName + "." + CustomResourceDefinitionGroup
// Cilium Local Redirect Policy (CLRP)
// CLRPPluralName is the plural name of Local Redirect Policy
CLRPPluralName = "ciliumlocalredirectpolicies"
// CLRPKindDefinition is the kind name for Local Redirect Policy
CLRPKindDefinition = "CiliumLocalRedirectPolicy"
// CLRPName is the full name of Local Redirect Policy
CLRPName = CLRPPluralName + "." + CustomResourceDefinitionGroup
// Cilium External Workload (CEW)
// CEWPluralName is the plural name of Cilium External Workload
CEWPluralName = "ciliumexternalworkloads"
// CEWKindDefinition is the kind name for Cilium External Workload
CEWKindDefinition = "CiliumExternalWorkload"
// CEWName is the full name of Cilium External Workload
CEWName = CEWPluralName + "." + CustomResourceDefinitionGroup
// Cilium Cluster Envoy Config (CCEC)
// CCECPluralName is the plural name of Cilium Clusterwide Envoy Config
CCECPluralName = "ciliumclusterwideenvoyconfigs"
// CCECKindDefinition is the kind name of Cilium Clusterwide Envoy Config
CCECKindDefinition = "CiliumClusterwideEnvoyConfig"
// CCECName is the full name of Cilium Clusterwide Envoy Config
CCECName = CCECPluralName + "." + CustomResourceDefinitionGroup
// Cilium Envoy Config (CEC)
// CECPluralName is the plural name of Cilium Envoy Config
CECPluralName = "ciliumenvoyconfigs"
// CECKindDefinition is the kind name of Cilium Envoy Config
CECKindDefinition = "CiliumEnvoyConfig"
// CECName is the full name of Cilium Envoy Config
CECName = CECPluralName + "." + CustomResourceDefinitionGroup
// CiliumNodeConfig (CNC)
// CNCPluralName is the plural name of Cilium Node Config
CNCPluralName = "ciliumnodeconfigs"
// CNCKindDefinition is the kind name of Cilium Node Config
CNCKindDefinition = "CiliumNodeConfig"
// CNCName is the full name of Cilium Node Config
CNCName = CNCPluralName + "." + CustomResourceDefinitionGroup
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{
Group: CustomResourceDefinitionGroup,
Version: CustomResourceDefinitionVersion,
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder is needed by DeepCopy generator.
SchemeBuilder runtime.SchemeBuilder
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
localSchemeBuilder = &SchemeBuilder
// AddToScheme adds all types of this clientset into the given scheme.
// This allows composition of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addKnownTypes)
}
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&CiliumNetworkPolicy{},
&CiliumNetworkPolicyList{},
&CiliumClusterwideNetworkPolicy{},
&CiliumClusterwideNetworkPolicyList{},
&CiliumEgressGatewayPolicy{},
&CiliumEgressGatewayPolicyList{},
&CiliumEndpoint{},
&CiliumEndpointList{},
&CiliumNode{},
&CiliumNodeList{},
&CiliumNodeConfig{},
&CiliumNodeConfigList{},
&CiliumExternalWorkload{},
&CiliumExternalWorkloadList{},
&CiliumIdentity{},
&CiliumIdentityList{},
&CiliumLocalRedirectPolicy{},
&CiliumLocalRedirectPolicyList{},
&CiliumEnvoyConfig{},
&CiliumEnvoyConfigList{},
&CiliumClusterwideEnvoyConfig{},
&CiliumClusterwideEnvoyConfigList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2
import (
"net"
"sort"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/cilium/cilium/api/v1/models"
alibabaCloudTypes "github.com/cilium/cilium/pkg/alibabacloud/eni/types"
eniTypes "github.com/cilium/cilium/pkg/aws/eni/types"
azureTypes "github.com/cilium/cilium/pkg/azure/types"
ipamTypes "github.com/cilium/cilium/pkg/ipam/types"
"github.com/cilium/cilium/pkg/node/addressing"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=false
// +kubebuilder:resource:categories={cilium},singular="ciliumendpoint",path="ciliumendpoints",scope="Namespaced",shortName={cep,ciliumep}
// +kubebuilder:printcolumn:JSONPath=".status.identity.id",description="Security Identity",name="Security Identity",type=integer
// +kubebuilder:printcolumn:JSONPath=".status.policy.ingress.state",description="Ingress enforcement in the endpoint",name="Ingress Enforcement",type=string,priority=1
// +kubebuilder:printcolumn:JSONPath=".status.policy.egress.state",description="Egress enforcement in the endpoint",name="Egress Enforcement",type=string,priority=1
// +kubebuilder:printcolumn:JSONPath=".status.visibility-policy-status",description="Status of visibility policy in the endpoint",name="Visibility Policy",type=string,priority=1
// +kubebuilder:printcolumn:JSONPath=".status.state",description="Endpoint current state",name="Endpoint State",type=string
// +kubebuilder:printcolumn:JSONPath=".status.networking.addressing[0].ipv4",description="Endpoint IPv4 address",name="IPv4",type=string
// +kubebuilder:printcolumn:JSONPath=".status.networking.addressing[0].ipv6",description="Endpoint IPv6 address",name="IPv6",type=string
// +kubebuilder:storageversion
// CiliumEndpoint is the status of a Cilium policy rule.
type CiliumEndpoint struct {
// +deepequal-gen=false
metav1.TypeMeta `json:",inline"`
// +deepequal-gen=false
metav1.ObjectMeta `json:"metadata"`
// +kubebuilder:validation:Optional
Status EndpointStatus `json:"status"`
}
// EndpointPolicyState defines the state of the Policy mode: "enforcing", "non-enforcing", "disabled"
type EndpointPolicyState string
// EndpointStatus is the status of a Cilium endpoint.
type EndpointStatus struct {
// ID is the cilium-agent-local ID of the endpoint.
ID int64 `json:"id,omitempty"`
// Controllers is the list of failing controllers for this endpoint.
Controllers ControllerList `json:"controllers,omitempty"`
// ExternalIdentifiers is a set of identifiers to identify the endpoint
// apart from the pod name. This includes container runtime IDs.
ExternalIdentifiers *models.EndpointIdentifiers `json:"external-identifiers,omitempty"`
// Health is the overall endpoint & subcomponent health.
Health *models.EndpointHealth `json:"health,omitempty"`
// Identity is the security identity associated with the endpoint
Identity *EndpointIdentity `json:"identity,omitempty"`
// Log is the list of the last few warning and error log entries
Log []*models.EndpointStatusChange `json:"log,omitempty"`
// Networking is the networking properties of the endpoint.
//
// +kubebuilder:validation:Optional
Networking *EndpointNetworking `json:"networking,omitempty"`
// Encryption is the encryption configuration of the node
//
// +kubebuilder:validation:Optional
Encryption EncryptionSpec `json:"encryption,omitempty"`
Policy *EndpointPolicy `json:"policy,omitempty"`
VisibilityPolicyStatus *string `json:"visibility-policy-status,omitempty"`
// State is the state of the endpoint.
//
// +kubebuilder:validation:Enum=creating;waiting-for-identity;not-ready;waiting-to-regenerate;regenerating;restoring;ready;disconnecting;disconnected;invalid
State string `json:"state,omitempty"`
NamedPorts models.NamedPorts `json:"named-ports,omitempty"`
}
// +k8s:deepcopy-gen=false
// ControllerList is a list of ControllerStatus.
type ControllerList []ControllerStatus
// Sort sorts the ControllerList by controller name
func (c ControllerList) Sort() {
sort.Slice(c, func(i, j int) bool { return c[i].Name < c[j].Name })
}
// ControllerStatus is the status of a failing controller.
type ControllerStatus struct {
// Name is the name of the controller
Name string `json:"name,omitempty"`
// Configuration is the controller configuration
Configuration *models.ControllerStatusConfiguration `json:"configuration,omitempty"`
// Status is the status of the controller
Status ControllerStatusStatus `json:"status,omitempty"`
// UUID is the UUID of the controller
UUID string `json:"uuid,omitempty"`
}
// +k8s:deepcopy-gen=false
// ControllerStatusStatus is the detailed status section of a controller.
type ControllerStatusStatus struct {
ConsecutiveFailureCount int64 `json:"consecutive-failure-count,omitempty"`
FailureCount int64 `json:"failure-count,omitempty"`
LastFailureMsg string `json:"last-failure-msg,omitempty"`
LastFailureTimestamp string `json:"last-failure-timestamp,omitempty"`
LastSuccessTimestamp string `json:"last-success-timestamp,omitempty"`
SuccessCount int64 `json:"success-count,omitempty"`
}
// EndpointPolicy represents the endpoint's policy by listing all allowed
// ingress and egress identities in combination with L4 port and protocol.
type EndpointPolicy struct {
Ingress *EndpointPolicyDirection `json:"ingress,omitempty"`
Egress *EndpointPolicyDirection `json:"egress,omitempty"`
}
// EndpointPolicyDirection is the list of allowed identities per direction.
type EndpointPolicyDirection struct {
Enforcing bool `json:"enforcing"`
Allowed AllowedIdentityList `json:"allowed,omitempty"`
Denied DenyIdentityList `json:"denied,omitempty"`
// Deprecated
Removing AllowedIdentityList `json:"removing,omitempty"`
// Deprecated
Adding AllowedIdentityList `json:"adding,omitempty"`
State EndpointPolicyState `json:"state,omitempty"`
}
// IdentityTuple specifies a peer by identity, destination port and protocol.
type IdentityTuple struct {
Identity uint64 `json:"identity,omitempty"`
IdentityLabels map[string]string `json:"identity-labels,omitempty"`
DestPort uint16 `json:"dest-port,omitempty"`
Protocol uint8 `json:"protocol,omitempty"`
}
// +k8s:deepcopy-gen=false
// IdentityList is a list of IdentityTuple.
type IdentityList []IdentityTuple
// Sort sorts a list IdentityList by numeric identity, port and protocol.
func (a IdentityList) Sort() {
sort.Slice(a, func(i, j int) bool {
if a[i].Identity < a[j].Identity {
return true
} else if a[i].Identity == a[j].Identity {
if a[i].DestPort < a[j].DestPort {
return true
} else if a[i].DestPort == a[j].DestPort {
return a[i].Protocol < a[j].Protocol
}
}
return false
})
}
// +k8s:deepcopy-gen=false
// AllowedIdentityList is a list of IdentityTuples that species peers that are
// allowed.
type AllowedIdentityList IdentityList
// Sort sorts a list IdentityList by numeric identity, port and protocol.
func (a AllowedIdentityList) Sort() {
IdentityList(a).Sort()
}
// +k8s:deepcopy-gen=false
// DenyIdentityList is a list of IdentityTuples that species peers that are
// denied.
type DenyIdentityList IdentityList
// Sort sorts a list IdentityList by numeric identity, port and protocol.
func (d DenyIdentityList) Sort() {
IdentityList(d).Sort()
}
// EndpointIdentity is the identity information of an endpoint.
type EndpointIdentity struct {
// ID is the numeric identity of the endpoint
ID int64 `json:"id,omitempty"`
// Labels is the list of labels associated with the identity
Labels []string `json:"labels,omitempty"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:categories={cilium},singular="ciliumidentity",path="ciliumidentities",scope="Cluster",shortName={ciliumid}
// +kubebuilder:printcolumn:JSONPath=".metadata.labels.io\\.kubernetes\\.pod\\.namespace",description="The namespace of the entity",name="Namespace",type=string
// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",description="The age of the identity",name="Age",type=date
// +kubebuilder:subresource:status
// +kubebuilder:storageversion
// CiliumIdentity is a CRD that represents an identity managed by Cilium.
// It is intended as a backing store for identity allocation, acting as the
// global coordination backend, and can be used in place of a KVStore (such as
// etcd).
// The name of the CRD is the numeric identity and the labels on the CRD object
// are the kubernetes sourced labels seen by cilium. This is currently the
// only label source possible when running under kubernetes. Non-kubernetes
// labels are filtered but all labels, from all sources, are places in the
// SecurityLabels field. These also include the source and are used to define
// the identity.
// The labels under metav1.ObjectMeta can be used when searching for
// CiliumIdentity instances that include particular labels. This can be done
// with invocations such as:
//
// kubectl get ciliumid -l 'foo=bar'
type CiliumIdentity struct {
// +deepequal-gen=false
metav1.TypeMeta `json:",inline"`
// +deepequal-gen=false
metav1.ObjectMeta `json:"metadata"`
// SecurityLabels is the source-of-truth set of labels for this identity.
SecurityLabels map[string]string `json:"security-labels"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +deepequal-gen=false
// CiliumIdentityList is a list of CiliumIdentity objects.
type CiliumIdentityList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items is a list of CiliumIdentity
Items []CiliumIdentity `json:"items"`
}
// +k8s:deepcopy-gen=false
// AddressPair is a pair of IPv4 and/or IPv6 address.
type AddressPair struct {
IPV4 string `json:"ipv4,omitempty"`
IPV6 string `json:"ipv6,omitempty"`
}
// +k8s:deepcopy-gen=false
// AddressPairList is a list of address pairs.
type AddressPairList []*AddressPair
// Sort sorts an AddressPairList by IPv4 and IPv6 address.
func (a AddressPairList) Sort() {
sort.Slice(a, func(i, j int) bool {
if a[i].IPV4 < a[j].IPV4 {
return true
} else if a[i].IPV4 == a[j].IPV4 {
return a[i].IPV6 < a[j].IPV6
}
return false
})
}
// EndpointNetworking is the addressing information of an endpoint.
type EndpointNetworking struct {
// IP4/6 addresses assigned to this Endpoint
Addressing AddressPairList `json:"addressing"`
// NodeIP is the IP of the node the endpoint is running on. The IP must
// be reachable between nodes.
NodeIP string `json:"node,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=false
// +deepequal-gen=false
// CiliumEndpointList is a list of CiliumEndpoint objects.
type CiliumEndpointList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items is a list of CiliumEndpoint
Items []CiliumEndpoint `json:"items"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:categories={cilium},singular="ciliumnode",path="ciliumnodes",scope="Cluster",shortName={cn,ciliumn}
// +kubebuilder:printcolumn:JSONPath=".spec.addresses[?(@.type==\"CiliumInternalIP\")].ip",description="Cilium internal IP for this node",name="CiliumInternalIP",type=string
// +kubebuilder:printcolumn:JSONPath=".spec.addresses[?(@.type==\"InternalIP\")].ip",description="IP of the node",name="InternalIP",type=string
// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",description="Time duration since creation of Ciliumnode",name="Age",type=date
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// CiliumNode represents a node managed by Cilium. It contains a specification
// to control various node specific configuration aspects and a status section
// to represent the status of the node.
type CiliumNode struct {
// +deepequal-gen=false
metav1.TypeMeta `json:",inline"`
// +deepequal-gen=false
metav1.ObjectMeta `json:"metadata"`
// Spec defines the desired specification/configuration of the node.
Spec NodeSpec `json:"spec"`
// Status defines the realized specification/configuration and status
// of the node.
//
// +kubebuilder:validation:Optional
Status NodeStatus `json:"status,omitempty"`
}
// NodeAddress is a node address.
type NodeAddress struct {
// Type is the type of the node address
Type addressing.AddressType `json:"type,omitempty"`
// IP is an IP of a node
IP string `json:"ip,omitempty"`
}
// NodeSpec is the configuration specific to a node.
type NodeSpec struct {
// InstanceID is the identifier of the node. This is different from the
// node name which is typically the FQDN of the node. The InstanceID
// typically refers to the identifier used by the cloud provider or
// some other means of identification.
InstanceID string `json:"instance-id,omitempty"`
// BootID is a unique node identifier generated on boot
//
// +kubebuilder:validation:Optional
BootID string `json:"bootid,omitempty"`
// Addresses is the list of all node addresses.
//
// +kubebuilder:validation:Optional
Addresses []NodeAddress `json:"addresses,omitempty"`
// HealthAddressing is the addressing information for health connectivity
// checking.
//
// +kubebuilder:validation:Optional
HealthAddressing HealthAddressingSpec `json:"health,omitempty"`
// IngressAddressing is the addressing information for Ingress listener.
//
// +kubebuilder:validation:Optional
IngressAddressing AddressPair `json:"ingress,omitempty"`
// Encryption is the encryption configuration of the node.
//
// +kubebuilder:validation:Optional
Encryption EncryptionSpec `json:"encryption,omitempty"`
// ENI is the AWS ENI specific configuration.
//
// +kubebuilder:validation:Optional
ENI eniTypes.ENISpec `json:"eni,omitempty"`
// Azure is the Azure IPAM specific configuration.
//
// +kubebuilder:validation:Optional
Azure azureTypes.AzureSpec `json:"azure,omitempty"`
// AlibabaCloud is the AlibabaCloud IPAM specific configuration.
//
// +kubebuilder:validation:Optional
AlibabaCloud alibabaCloudTypes.Spec `json:"alibaba-cloud,omitempty"`
// IPAM is the address management specification. This section can be
// populated by a user or it can be automatically populated by an IPAM
// operator.
//
// +kubebuilder:validation:Optional
IPAM ipamTypes.IPAMSpec `json:"ipam,omitempty"`
// NodeIdentity is the Cilium numeric identity allocated for the node, if any.
//
// +kubebuilder:validation:Optional
NodeIdentity uint64 `json:"nodeidentity,omitempty"`
}
// HealthAddressingSpec is the addressing information required to do
// connectivity health checking.
type HealthAddressingSpec struct {
// IPv4 is the IPv4 address of the IPv4 health endpoint.
//
// +kubebuilder:validation:Optional
IPv4 string `json:"ipv4,omitempty"`
// IPv6 is the IPv6 address of the IPv4 health endpoint.
//
// +kubebuilder:validation:Optional
IPv6 string `json:"ipv6,omitempty"`
}
// EncryptionSpec defines the encryption relevant configuration of a node.
type EncryptionSpec struct {
// Key is the index to the key to use for encryption or 0 if encryption is
// disabled.
//
// +kubebuilder:validation:Optional
Key int `json:"key,omitempty"`
}
// NodeStatus is the status of a node.
type NodeStatus struct {
// ENI is the AWS ENI specific status of the node.
//
// +kubebuilder:validation:Optional
ENI eniTypes.ENIStatus `json:"eni,omitempty"`
// Azure is the Azure specific status of the node.
//
// +kubebuilder:validation:Optional
Azure azureTypes.AzureStatus `json:"azure,omitempty"`
// IPAM is the IPAM status of the node.
//
// +kubebuilder:validation:Optional
IPAM ipamTypes.IPAMStatus `json:"ipam,omitempty"`
// AlibabaCloud is the AlibabaCloud specific status of the node.
//
// +kubebuilder:validation:Optional
AlibabaCloud alibabaCloudTypes.ENIStatus `json:"alibaba-cloud,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +deepequal-gen=false
// CiliumNodeList is a list of CiliumNode objects.
type CiliumNodeList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items is a list of CiliumNode
Items []CiliumNode `json:"items"`
}
// InstanceID returns the InstanceID of a CiliumNode.
func (n *CiliumNode) InstanceID() (instanceID string) {
if n != nil {
instanceID = n.Spec.InstanceID
// OBSOLETE: This fallback can be removed in Cilium 1.9
if instanceID == "" {
instanceID = n.Spec.ENI.InstanceID
}
}
return
}
func (n NodeAddress) ToString() string {
return n.IP
}
func (n NodeAddress) AddrType() addressing.AddressType {
return n.Type
}
// GetIP returns one of the CiliumNode's IP addresses available with the
// following priority:
// - NodeInternalIP
// - NodeExternalIP
// - other IP address type
// An error is returned if GetIP fails to extract an IP from the CiliumNode
// based on the provided address family.
func (n *CiliumNode) GetIP(ipv6 bool) net.IP {
return addressing.ExtractNodeIP[NodeAddress](n.Spec.Addresses, ipv6)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepcopy-gen. DO NOT EDIT.
package v2
import (
models "github.com/cilium/cilium/api/v1/models"
v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
api "github.com/cilium/cilium/pkg/policy/api"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumClusterwideEnvoyConfig) DeepCopyInto(out *CiliumClusterwideEnvoyConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumClusterwideEnvoyConfig.
func (in *CiliumClusterwideEnvoyConfig) DeepCopy() *CiliumClusterwideEnvoyConfig {
if in == nil {
return nil
}
out := new(CiliumClusterwideEnvoyConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumClusterwideEnvoyConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumClusterwideEnvoyConfigList) DeepCopyInto(out *CiliumClusterwideEnvoyConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumClusterwideEnvoyConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumClusterwideEnvoyConfigList.
func (in *CiliumClusterwideEnvoyConfigList) DeepCopy() *CiliumClusterwideEnvoyConfigList {
if in == nil {
return nil
}
out := new(CiliumClusterwideEnvoyConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumClusterwideEnvoyConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumClusterwideNetworkPolicy) DeepCopyInto(out *CiliumClusterwideNetworkPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Spec != nil {
in, out := &in.Spec, &out.Spec
*out = new(api.Rule)
(*in).DeepCopyInto(*out)
}
if in.Specs != nil {
in, out := &in.Specs, &out.Specs
*out = make(api.Rules, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(api.Rule)
(*in).DeepCopyInto(*out)
}
}
}
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumClusterwideNetworkPolicy.
func (in *CiliumClusterwideNetworkPolicy) DeepCopy() *CiliumClusterwideNetworkPolicy {
if in == nil {
return nil
}
out := new(CiliumClusterwideNetworkPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumClusterwideNetworkPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumClusterwideNetworkPolicyList) DeepCopyInto(out *CiliumClusterwideNetworkPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumClusterwideNetworkPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumClusterwideNetworkPolicyList.
func (in *CiliumClusterwideNetworkPolicyList) DeepCopy() *CiliumClusterwideNetworkPolicyList {
if in == nil {
return nil
}
out := new(CiliumClusterwideNetworkPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumClusterwideNetworkPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEgressGatewayPolicy) DeepCopyInto(out *CiliumEgressGatewayPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEgressGatewayPolicy.
func (in *CiliumEgressGatewayPolicy) DeepCopy() *CiliumEgressGatewayPolicy {
if in == nil {
return nil
}
out := new(CiliumEgressGatewayPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumEgressGatewayPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEgressGatewayPolicyList) DeepCopyInto(out *CiliumEgressGatewayPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumEgressGatewayPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEgressGatewayPolicyList.
func (in *CiliumEgressGatewayPolicyList) DeepCopy() *CiliumEgressGatewayPolicyList {
if in == nil {
return nil
}
out := new(CiliumEgressGatewayPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumEgressGatewayPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEgressGatewayPolicySpec) DeepCopyInto(out *CiliumEgressGatewayPolicySpec) {
*out = *in
if in.Selectors != nil {
in, out := &in.Selectors, &out.Selectors
*out = make([]EgressRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.DestinationCIDRs != nil {
in, out := &in.DestinationCIDRs, &out.DestinationCIDRs
*out = make([]IPv4CIDR, len(*in))
copy(*out, *in)
}
if in.ExcludedCIDRs != nil {
in, out := &in.ExcludedCIDRs, &out.ExcludedCIDRs
*out = make([]IPv4CIDR, len(*in))
copy(*out, *in)
}
if in.EgressGateway != nil {
in, out := &in.EgressGateway, &out.EgressGateway
*out = new(EgressGateway)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEgressGatewayPolicySpec.
func (in *CiliumEgressGatewayPolicySpec) DeepCopy() *CiliumEgressGatewayPolicySpec {
if in == nil {
return nil
}
out := new(CiliumEgressGatewayPolicySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEndpoint) DeepCopyInto(out *CiliumEndpoint) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEndpoint.
func (in *CiliumEndpoint) DeepCopy() *CiliumEndpoint {
if in == nil {
return nil
}
out := new(CiliumEndpoint)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumEndpoint) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEndpointList) DeepCopyInto(out *CiliumEndpointList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumEndpoint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEndpointList.
func (in *CiliumEndpointList) DeepCopy() *CiliumEndpointList {
if in == nil {
return nil
}
out := new(CiliumEndpointList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumEndpointList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEnvoyConfig) DeepCopyInto(out *CiliumEnvoyConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEnvoyConfig.
func (in *CiliumEnvoyConfig) DeepCopy() *CiliumEnvoyConfig {
if in == nil {
return nil
}
out := new(CiliumEnvoyConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumEnvoyConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEnvoyConfigList) DeepCopyInto(out *CiliumEnvoyConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumEnvoyConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEnvoyConfigList.
func (in *CiliumEnvoyConfigList) DeepCopy() *CiliumEnvoyConfigList {
if in == nil {
return nil
}
out := new(CiliumEnvoyConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumEnvoyConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEnvoyConfigSpec) DeepCopyInto(out *CiliumEnvoyConfigSpec) {
*out = *in
if in.Services != nil {
in, out := &in.Services, &out.Services
*out = make([]*ServiceListener, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(ServiceListener)
(*in).DeepCopyInto(*out)
}
}
}
if in.BackendServices != nil {
in, out := &in.BackendServices, &out.BackendServices
*out = make([]*Service, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(Service)
(*in).DeepCopyInto(*out)
}
}
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]XDSResource, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEnvoyConfigSpec.
func (in *CiliumEnvoyConfigSpec) DeepCopy() *CiliumEnvoyConfigSpec {
if in == nil {
return nil
}
out := new(CiliumEnvoyConfigSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumExternalWorkload) DeepCopyInto(out *CiliumExternalWorkload) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumExternalWorkload.
func (in *CiliumExternalWorkload) DeepCopy() *CiliumExternalWorkload {
if in == nil {
return nil
}
out := new(CiliumExternalWorkload)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumExternalWorkload) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumExternalWorkloadList) DeepCopyInto(out *CiliumExternalWorkloadList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumExternalWorkload, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumExternalWorkloadList.
func (in *CiliumExternalWorkloadList) DeepCopy() *CiliumExternalWorkloadList {
if in == nil {
return nil
}
out := new(CiliumExternalWorkloadList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumExternalWorkloadList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumExternalWorkloadSpec) DeepCopyInto(out *CiliumExternalWorkloadSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumExternalWorkloadSpec.
func (in *CiliumExternalWorkloadSpec) DeepCopy() *CiliumExternalWorkloadSpec {
if in == nil {
return nil
}
out := new(CiliumExternalWorkloadSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumExternalWorkloadStatus) DeepCopyInto(out *CiliumExternalWorkloadStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumExternalWorkloadStatus.
func (in *CiliumExternalWorkloadStatus) DeepCopy() *CiliumExternalWorkloadStatus {
if in == nil {
return nil
}
out := new(CiliumExternalWorkloadStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumIdentity) DeepCopyInto(out *CiliumIdentity) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.SecurityLabels != nil {
in, out := &in.SecurityLabels, &out.SecurityLabels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumIdentity.
func (in *CiliumIdentity) DeepCopy() *CiliumIdentity {
if in == nil {
return nil
}
out := new(CiliumIdentity)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumIdentity) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumIdentityList) DeepCopyInto(out *CiliumIdentityList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumIdentity, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumIdentityList.
func (in *CiliumIdentityList) DeepCopy() *CiliumIdentityList {
if in == nil {
return nil
}
out := new(CiliumIdentityList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumIdentityList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumLocalRedirectPolicy) DeepCopyInto(out *CiliumLocalRedirectPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLocalRedirectPolicy.
func (in *CiliumLocalRedirectPolicy) DeepCopy() *CiliumLocalRedirectPolicy {
if in == nil {
return nil
}
out := new(CiliumLocalRedirectPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumLocalRedirectPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumLocalRedirectPolicyList) DeepCopyInto(out *CiliumLocalRedirectPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumLocalRedirectPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLocalRedirectPolicyList.
func (in *CiliumLocalRedirectPolicyList) DeepCopy() *CiliumLocalRedirectPolicyList {
if in == nil {
return nil
}
out := new(CiliumLocalRedirectPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumLocalRedirectPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumLocalRedirectPolicySpec) DeepCopyInto(out *CiliumLocalRedirectPolicySpec) {
*out = *in
in.RedirectFrontend.DeepCopyInto(&out.RedirectFrontend)
in.RedirectBackend.DeepCopyInto(&out.RedirectBackend)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLocalRedirectPolicySpec.
func (in *CiliumLocalRedirectPolicySpec) DeepCopy() *CiliumLocalRedirectPolicySpec {
if in == nil {
return nil
}
out := new(CiliumLocalRedirectPolicySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumLocalRedirectPolicyStatus) DeepCopyInto(out *CiliumLocalRedirectPolicyStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLocalRedirectPolicyStatus.
func (in *CiliumLocalRedirectPolicyStatus) DeepCopy() *CiliumLocalRedirectPolicyStatus {
if in == nil {
return nil
}
out := new(CiliumLocalRedirectPolicyStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNetworkPolicy) DeepCopyInto(out *CiliumNetworkPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Spec != nil {
in, out := &in.Spec, &out.Spec
*out = new(api.Rule)
(*in).DeepCopyInto(*out)
}
if in.Specs != nil {
in, out := &in.Specs, &out.Specs
*out = make(api.Rules, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(api.Rule)
(*in).DeepCopyInto(*out)
}
}
}
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNetworkPolicy.
func (in *CiliumNetworkPolicy) DeepCopy() *CiliumNetworkPolicy {
if in == nil {
return nil
}
out := new(CiliumNetworkPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumNetworkPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNetworkPolicyList) DeepCopyInto(out *CiliumNetworkPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumNetworkPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNetworkPolicyList.
func (in *CiliumNetworkPolicyList) DeepCopy() *CiliumNetworkPolicyList {
if in == nil {
return nil
}
out := new(CiliumNetworkPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumNetworkPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNetworkPolicyNodeStatus) DeepCopyInto(out *CiliumNetworkPolicyNodeStatus) {
*out = *in
in.LastUpdated.DeepCopyInto(&out.LastUpdated)
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNetworkPolicyNodeStatus.
func (in *CiliumNetworkPolicyNodeStatus) DeepCopy() *CiliumNetworkPolicyNodeStatus {
if in == nil {
return nil
}
out := new(CiliumNetworkPolicyNodeStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNetworkPolicyStatus) DeepCopyInto(out *CiliumNetworkPolicyStatus) {
*out = *in
if in.DerivativePolicies != nil {
in, out := &in.DerivativePolicies, &out.DerivativePolicies
*out = make(map[string]CiliumNetworkPolicyNodeStatus, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]NetworkPolicyCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNetworkPolicyStatus.
func (in *CiliumNetworkPolicyStatus) DeepCopy() *CiliumNetworkPolicyStatus {
if in == nil {
return nil
}
out := new(CiliumNetworkPolicyStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNode) DeepCopyInto(out *CiliumNode) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNode.
func (in *CiliumNode) DeepCopy() *CiliumNode {
if in == nil {
return nil
}
out := new(CiliumNode)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumNode) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNodeConfig) DeepCopyInto(out *CiliumNodeConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNodeConfig.
func (in *CiliumNodeConfig) DeepCopy() *CiliumNodeConfig {
if in == nil {
return nil
}
out := new(CiliumNodeConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumNodeConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNodeConfigList) DeepCopyInto(out *CiliumNodeConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumNodeConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNodeConfigList.
func (in *CiliumNodeConfigList) DeepCopy() *CiliumNodeConfigList {
if in == nil {
return nil
}
out := new(CiliumNodeConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumNodeConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNodeConfigSpec) DeepCopyInto(out *CiliumNodeConfigSpec) {
*out = *in
if in.Defaults != nil {
in, out := &in.Defaults, &out.Defaults
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(metav1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNodeConfigSpec.
func (in *CiliumNodeConfigSpec) DeepCopy() *CiliumNodeConfigSpec {
if in == nil {
return nil
}
out := new(CiliumNodeConfigSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNodeList) DeepCopyInto(out *CiliumNodeList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumNode, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNodeList.
func (in *CiliumNodeList) DeepCopy() *CiliumNodeList {
if in == nil {
return nil
}
out := new(CiliumNodeList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumNodeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerStatus) DeepCopyInto(out *ControllerStatus) {
*out = *in
if in.Configuration != nil {
in, out := &in.Configuration, &out.Configuration
*out = new(models.ControllerStatusConfiguration)
**out = **in
}
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerStatus.
func (in *ControllerStatus) DeepCopy() *ControllerStatus {
if in == nil {
return nil
}
out := new(ControllerStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EgressGateway) DeepCopyInto(out *EgressGateway) {
*out = *in
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressGateway.
func (in *EgressGateway) DeepCopy() *EgressGateway {
if in == nil {
return nil
}
out := new(EgressGateway)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EgressRule) DeepCopyInto(out *EgressRule) {
*out = *in
if in.NamespaceSelector != nil {
in, out := &in.NamespaceSelector, &out.NamespaceSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.PodSelector != nil {
in, out := &in.PodSelector, &out.PodSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressRule.
func (in *EgressRule) DeepCopy() *EgressRule {
if in == nil {
return nil
}
out := new(EgressRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EncryptionSpec) DeepCopyInto(out *EncryptionSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionSpec.
func (in *EncryptionSpec) DeepCopy() *EncryptionSpec {
if in == nil {
return nil
}
out := new(EncryptionSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointIdentity) DeepCopyInto(out *EndpointIdentity) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointIdentity.
func (in *EndpointIdentity) DeepCopy() *EndpointIdentity {
if in == nil {
return nil
}
out := new(EndpointIdentity)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointNetworking) DeepCopyInto(out *EndpointNetworking) {
*out = *in
if in.Addressing != nil {
in, out := &in.Addressing, &out.Addressing
*out = make(AddressPairList, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(AddressPair)
**out = **in
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointNetworking.
func (in *EndpointNetworking) DeepCopy() *EndpointNetworking {
if in == nil {
return nil
}
out := new(EndpointNetworking)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointPolicy) DeepCopyInto(out *EndpointPolicy) {
*out = *in
if in.Ingress != nil {
in, out := &in.Ingress, &out.Ingress
*out = new(EndpointPolicyDirection)
(*in).DeepCopyInto(*out)
}
if in.Egress != nil {
in, out := &in.Egress, &out.Egress
*out = new(EndpointPolicyDirection)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPolicy.
func (in *EndpointPolicy) DeepCopy() *EndpointPolicy {
if in == nil {
return nil
}
out := new(EndpointPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointPolicyDirection) DeepCopyInto(out *EndpointPolicyDirection) {
*out = *in
if in.Allowed != nil {
in, out := &in.Allowed, &out.Allowed
*out = make(AllowedIdentityList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Denied != nil {
in, out := &in.Denied, &out.Denied
*out = make(DenyIdentityList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Removing != nil {
in, out := &in.Removing, &out.Removing
*out = make(AllowedIdentityList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Adding != nil {
in, out := &in.Adding, &out.Adding
*out = make(AllowedIdentityList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPolicyDirection.
func (in *EndpointPolicyDirection) DeepCopy() *EndpointPolicyDirection {
if in == nil {
return nil
}
out := new(EndpointPolicyDirection)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointStatus) DeepCopyInto(out *EndpointStatus) {
*out = *in
if in.Controllers != nil {
in, out := &in.Controllers, &out.Controllers
*out = make(ControllerList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ExternalIdentifiers != nil {
in, out := &in.ExternalIdentifiers, &out.ExternalIdentifiers
*out = new(models.EndpointIdentifiers)
**out = **in
}
if in.Health != nil {
in, out := &in.Health, &out.Health
*out = new(models.EndpointHealth)
**out = **in
}
if in.Identity != nil {
in, out := &in.Identity, &out.Identity
*out = new(EndpointIdentity)
(*in).DeepCopyInto(*out)
}
if in.Log != nil {
in, out := &in.Log, &out.Log
*out = make([]*models.EndpointStatusChange, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(models.EndpointStatusChange)
**out = **in
}
}
}
if in.Networking != nil {
in, out := &in.Networking, &out.Networking
*out = new(EndpointNetworking)
(*in).DeepCopyInto(*out)
}
out.Encryption = in.Encryption
if in.Policy != nil {
in, out := &in.Policy, &out.Policy
*out = new(EndpointPolicy)
(*in).DeepCopyInto(*out)
}
if in.VisibilityPolicyStatus != nil {
in, out := &in.VisibilityPolicyStatus, &out.VisibilityPolicyStatus
*out = new(string)
**out = **in
}
if in.NamedPorts != nil {
in, out := &in.NamedPorts, &out.NamedPorts
*out = make(models.NamedPorts, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(models.Port)
**out = **in
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointStatus.
func (in *EndpointStatus) DeepCopy() *EndpointStatus {
if in == nil {
return nil
}
out := new(EndpointStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Frontend) DeepCopyInto(out *Frontend) {
*out = *in
if in.ToPorts != nil {
in, out := &in.ToPorts, &out.ToPorts
*out = make([]PortInfo, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Frontend.
func (in *Frontend) DeepCopy() *Frontend {
if in == nil {
return nil
}
out := new(Frontend)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HealthAddressingSpec) DeepCopyInto(out *HealthAddressingSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthAddressingSpec.
func (in *HealthAddressingSpec) DeepCopy() *HealthAddressingSpec {
if in == nil {
return nil
}
out := new(HealthAddressingSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IdentityTuple) DeepCopyInto(out *IdentityTuple) {
*out = *in
if in.IdentityLabels != nil {
in, out := &in.IdentityLabels, &out.IdentityLabels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityTuple.
func (in *IdentityTuple) DeepCopy() *IdentityTuple {
if in == nil {
return nil
}
out := new(IdentityTuple)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicyCondition) DeepCopyInto(out *NetworkPolicyCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyCondition.
func (in *NetworkPolicyCondition) DeepCopy() *NetworkPolicyCondition {
if in == nil {
return nil
}
out := new(NetworkPolicyCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeAddress) DeepCopyInto(out *NodeAddress) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddress.
func (in *NodeAddress) DeepCopy() *NodeAddress {
if in == nil {
return nil
}
out := new(NodeAddress)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeSpec) DeepCopyInto(out *NodeSpec) {
*out = *in
if in.Addresses != nil {
in, out := &in.Addresses, &out.Addresses
*out = make([]NodeAddress, len(*in))
copy(*out, *in)
}
out.HealthAddressing = in.HealthAddressing
out.IngressAddressing = in.IngressAddressing
out.Encryption = in.Encryption
in.ENI.DeepCopyInto(&out.ENI)
out.Azure = in.Azure
in.AlibabaCloud.DeepCopyInto(&out.AlibabaCloud)
in.IPAM.DeepCopyInto(&out.IPAM)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec.
func (in *NodeSpec) DeepCopy() *NodeSpec {
if in == nil {
return nil
}
out := new(NodeSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
*out = *in
in.ENI.DeepCopyInto(&out.ENI)
in.Azure.DeepCopyInto(&out.Azure)
in.IPAM.DeepCopyInto(&out.IPAM)
in.AlibabaCloud.DeepCopyInto(&out.AlibabaCloud)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus.
func (in *NodeStatus) DeepCopy() *NodeStatus {
if in == nil {
return nil
}
out := new(NodeStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PortInfo) DeepCopyInto(out *PortInfo) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortInfo.
func (in *PortInfo) DeepCopy() *PortInfo {
if in == nil {
return nil
}
out := new(PortInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RedirectBackend) DeepCopyInto(out *RedirectBackend) {
*out = *in
in.LocalEndpointSelector.DeepCopyInto(&out.LocalEndpointSelector)
if in.ToPorts != nil {
in, out := &in.ToPorts, &out.ToPorts
*out = make([]PortInfo, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectBackend.
func (in *RedirectBackend) DeepCopy() *RedirectBackend {
if in == nil {
return nil
}
out := new(RedirectBackend)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RedirectFrontend) DeepCopyInto(out *RedirectFrontend) {
*out = *in
if in.AddressMatcher != nil {
in, out := &in.AddressMatcher, &out.AddressMatcher
*out = new(Frontend)
(*in).DeepCopyInto(*out)
}
if in.ServiceMatcher != nil {
in, out := &in.ServiceMatcher, &out.ServiceMatcher
*out = new(ServiceInfo)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectFrontend.
func (in *RedirectFrontend) DeepCopy() *RedirectFrontend {
if in == nil {
return nil
}
out := new(RedirectFrontend)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Service) DeepCopyInto(out *Service) {
*out = *in
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service.
func (in *Service) DeepCopy() *Service {
if in == nil {
return nil
}
out := new(Service)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceInfo) DeepCopyInto(out *ServiceInfo) {
*out = *in
if in.ToPorts != nil {
in, out := &in.ToPorts, &out.ToPorts
*out = make([]PortInfo, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceInfo.
func (in *ServiceInfo) DeepCopy() *ServiceInfo {
if in == nil {
return nil
}
out := new(ServiceInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceListener) DeepCopyInto(out *ServiceListener) {
*out = *in
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]uint16, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceListener.
func (in *ServiceListener) DeepCopy() *ServiceListener {
if in == nil {
return nil
}
out := new(ServiceListener)
in.DeepCopyInto(out)
return out
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XDSResource.
func (in *XDSResource) DeepCopy() *XDSResource {
if in == nil {
return nil
}
out := new(XDSResource)
in.DeepCopyInto(out)
return out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package v2
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *AddressPair) DeepEqual(other *AddressPair) bool {
if other == nil {
return false
}
if in.IPV4 != other.IPV4 {
return false
}
if in.IPV6 != other.IPV6 {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *AddressPairList) DeepEqual(other *AddressPairList) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual((*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *AllowedIdentityList) DeepEqual(other *AllowedIdentityList) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumClusterwideEnvoyConfig) DeepEqual(other *CiliumClusterwideEnvoyConfig) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
return true
}
// deepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumClusterwideNetworkPolicy) deepEqual(other *CiliumClusterwideNetworkPolicy) bool {
if other == nil {
return false
}
if (in.Spec == nil) != (other.Spec == nil) {
return false
} else if in.Spec != nil {
if !in.Spec.DeepEqual(other.Spec) {
return false
}
}
if ((in.Specs != nil) && (other.Specs != nil)) || ((in.Specs == nil) != (other.Specs == nil)) {
in, other := &in.Specs, &other.Specs
if other == nil || !in.DeepEqual(other) {
return false
}
}
if !in.Status.DeepEqual(&other.Status) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumEgressGatewayPolicy) DeepEqual(other *CiliumEgressGatewayPolicy) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumEgressGatewayPolicySpec) DeepEqual(other *CiliumEgressGatewayPolicySpec) bool {
if other == nil {
return false
}
if ((in.Selectors != nil) && (other.Selectors != nil)) || ((in.Selectors == nil) != (other.Selectors == nil)) {
in, other := &in.Selectors, &other.Selectors
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.DestinationCIDRs != nil) && (other.DestinationCIDRs != nil)) || ((in.DestinationCIDRs == nil) != (other.DestinationCIDRs == nil)) {
in, other := &in.DestinationCIDRs, &other.DestinationCIDRs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if ((in.ExcludedCIDRs != nil) && (other.ExcludedCIDRs != nil)) || ((in.ExcludedCIDRs == nil) != (other.ExcludedCIDRs == nil)) {
in, other := &in.ExcludedCIDRs, &other.ExcludedCIDRs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if (in.EgressGateway == nil) != (other.EgressGateway == nil) {
return false
} else if in.EgressGateway != nil {
if !in.EgressGateway.DeepEqual(other.EgressGateway) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumEndpoint) DeepEqual(other *CiliumEndpoint) bool {
if other == nil {
return false
}
if !in.Status.DeepEqual(&other.Status) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumEnvoyConfig) DeepEqual(other *CiliumEnvoyConfig) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumEnvoyConfigSpec) DeepEqual(other *CiliumEnvoyConfigSpec) bool {
if other == nil {
return false
}
if ((in.Services != nil) && (other.Services != nil)) || ((in.Services == nil) != (other.Services == nil)) {
in, other := &in.Services, &other.Services
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual((*other)[i]) {
return false
}
}
}
}
if ((in.BackendServices != nil) && (other.BackendServices != nil)) || ((in.BackendServices == nil) != (other.BackendServices == nil)) {
in, other := &in.BackendServices, &other.BackendServices
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual((*other)[i]) {
return false
}
}
}
}
if ((in.Resources != nil) && (other.Resources != nil)) || ((in.Resources == nil) != (other.Resources == nil)) {
in, other := &in.Resources, &other.Resources
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if (in.NodeSelector == nil) != (other.NodeSelector == nil) {
return false
} else if in.NodeSelector != nil {
if !in.NodeSelector.DeepEqual(other.NodeSelector) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumExternalWorkload) DeepEqual(other *CiliumExternalWorkload) bool {
if other == nil {
return false
}
if in.Spec != other.Spec {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumExternalWorkloadSpec) DeepEqual(other *CiliumExternalWorkloadSpec) bool {
if other == nil {
return false
}
if in.IPv4AllocCIDR != other.IPv4AllocCIDR {
return false
}
if in.IPv6AllocCIDR != other.IPv6AllocCIDR {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumExternalWorkloadStatus) DeepEqual(other *CiliumExternalWorkloadStatus) bool {
if other == nil {
return false
}
if in.ID != other.ID {
return false
}
if in.IP != other.IP {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumIdentity) DeepEqual(other *CiliumIdentity) bool {
if other == nil {
return false
}
if ((in.SecurityLabels != nil) && (other.SecurityLabels != nil)) || ((in.SecurityLabels == nil) != (other.SecurityLabels == nil)) {
in, other := &in.SecurityLabels, &other.SecurityLabels
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumLocalRedirectPolicy) DeepEqual(other *CiliumLocalRedirectPolicy) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumLocalRedirectPolicySpec) DeepEqual(other *CiliumLocalRedirectPolicySpec) bool {
if other == nil {
return false
}
if !in.RedirectFrontend.DeepEqual(&other.RedirectFrontend) {
return false
}
if !in.RedirectBackend.DeepEqual(&other.RedirectBackend) {
return false
}
if in.SkipRedirectFromBackend != other.SkipRedirectFromBackend {
return false
}
if in.Description != other.Description {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumLocalRedirectPolicyStatus) DeepEqual(other *CiliumLocalRedirectPolicyStatus) bool {
if other == nil {
return false
}
if in.OK != other.OK {
return false
}
return true
}
// deepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumNetworkPolicy) deepEqual(other *CiliumNetworkPolicy) bool {
if other == nil {
return false
}
if (in.Spec == nil) != (other.Spec == nil) {
return false
} else if in.Spec != nil {
if !in.Spec.DeepEqual(other.Spec) {
return false
}
}
if ((in.Specs != nil) && (other.Specs != nil)) || ((in.Specs == nil) != (other.Specs == nil)) {
in, other := &in.Specs, &other.Specs
if other == nil || !in.DeepEqual(other) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumNetworkPolicyNodeStatus) DeepEqual(other *CiliumNetworkPolicyNodeStatus) bool {
if other == nil {
return false
}
if in.OK != other.OK {
return false
}
if in.Error != other.Error {
return false
}
if !in.LastUpdated.DeepEqual(&other.LastUpdated) {
return false
}
if in.Revision != other.Revision {
return false
}
if in.Enforcing != other.Enforcing {
return false
}
if ((in.Annotations != nil) && (other.Annotations != nil)) || ((in.Annotations == nil) != (other.Annotations == nil)) {
in, other := &in.Annotations, &other.Annotations
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumNetworkPolicyStatus) DeepEqual(other *CiliumNetworkPolicyStatus) bool {
if other == nil {
return false
}
if ((in.DerivativePolicies != nil) && (other.DerivativePolicies != nil)) || ((in.DerivativePolicies == nil) != (other.DerivativePolicies == nil)) {
in, other := &in.DerivativePolicies, &other.DerivativePolicies
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if !inValue.DeepEqual(&otherValue) {
return false
}
}
}
}
}
if ((in.Conditions != nil) && (other.Conditions != nil)) || ((in.Conditions == nil) != (other.Conditions == nil)) {
in, other := &in.Conditions, &other.Conditions
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumNode) DeepEqual(other *CiliumNode) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
if !in.Status.DeepEqual(&other.Status) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ControllerList) DeepEqual(other *ControllerList) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ControllerStatus) DeepEqual(other *ControllerStatus) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if (in.Configuration == nil) != (other.Configuration == nil) {
return false
} else if in.Configuration != nil {
if !in.Configuration.DeepEqual(other.Configuration) {
return false
}
}
if in.Status != other.Status {
return false
}
if in.UUID != other.UUID {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ControllerStatusStatus) DeepEqual(other *ControllerStatusStatus) bool {
if other == nil {
return false
}
if in.ConsecutiveFailureCount != other.ConsecutiveFailureCount {
return false
}
if in.FailureCount != other.FailureCount {
return false
}
if in.LastFailureMsg != other.LastFailureMsg {
return false
}
if in.LastFailureTimestamp != other.LastFailureTimestamp {
return false
}
if in.LastSuccessTimestamp != other.LastSuccessTimestamp {
return false
}
if in.SuccessCount != other.SuccessCount {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *DenyIdentityList) DeepEqual(other *DenyIdentityList) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EgressGateway) DeepEqual(other *EgressGateway) bool {
if other == nil {
return false
}
if (in.NodeSelector == nil) != (other.NodeSelector == nil) {
return false
} else if in.NodeSelector != nil {
if !in.NodeSelector.DeepEqual(other.NodeSelector) {
return false
}
}
if in.Interface != other.Interface {
return false
}
if in.EgressIP != other.EgressIP {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EgressRule) DeepEqual(other *EgressRule) bool {
if other == nil {
return false
}
if (in.NamespaceSelector == nil) != (other.NamespaceSelector == nil) {
return false
} else if in.NamespaceSelector != nil {
if !in.NamespaceSelector.DeepEqual(other.NamespaceSelector) {
return false
}
}
if (in.PodSelector == nil) != (other.PodSelector == nil) {
return false
} else if in.PodSelector != nil {
if !in.PodSelector.DeepEqual(other.PodSelector) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EncryptionSpec) DeepEqual(other *EncryptionSpec) bool {
if other == nil {
return false
}
if in.Key != other.Key {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointIdentity) DeepEqual(other *EndpointIdentity) bool {
if other == nil {
return false
}
if in.ID != other.ID {
return false
}
if ((in.Labels != nil) && (other.Labels != nil)) || ((in.Labels == nil) != (other.Labels == nil)) {
in, other := &in.Labels, &other.Labels
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointNetworking) DeepEqual(other *EndpointNetworking) bool {
if other == nil {
return false
}
if ((in.Addressing != nil) && (other.Addressing != nil)) || ((in.Addressing == nil) != (other.Addressing == nil)) {
in, other := &in.Addressing, &other.Addressing
if other == nil || !in.DeepEqual(other) {
return false
}
}
if in.NodeIP != other.NodeIP {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointPolicy) DeepEqual(other *EndpointPolicy) bool {
if other == nil {
return false
}
if (in.Ingress == nil) != (other.Ingress == nil) {
return false
} else if in.Ingress != nil {
if !in.Ingress.DeepEqual(other.Ingress) {
return false
}
}
if (in.Egress == nil) != (other.Egress == nil) {
return false
} else if in.Egress != nil {
if !in.Egress.DeepEqual(other.Egress) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointPolicyDirection) DeepEqual(other *EndpointPolicyDirection) bool {
if other == nil {
return false
}
if in.Enforcing != other.Enforcing {
return false
}
if ((in.Allowed != nil) && (other.Allowed != nil)) || ((in.Allowed == nil) != (other.Allowed == nil)) {
in, other := &in.Allowed, &other.Allowed
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.Denied != nil) && (other.Denied != nil)) || ((in.Denied == nil) != (other.Denied == nil)) {
in, other := &in.Denied, &other.Denied
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.Removing != nil) && (other.Removing != nil)) || ((in.Removing == nil) != (other.Removing == nil)) {
in, other := &in.Removing, &other.Removing
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.Adding != nil) && (other.Adding != nil)) || ((in.Adding == nil) != (other.Adding == nil)) {
in, other := &in.Adding, &other.Adding
if other == nil || !in.DeepEqual(other) {
return false
}
}
if in.State != other.State {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointStatus) DeepEqual(other *EndpointStatus) bool {
if other == nil {
return false
}
if in.ID != other.ID {
return false
}
if ((in.Controllers != nil) && (other.Controllers != nil)) || ((in.Controllers == nil) != (other.Controllers == nil)) {
in, other := &in.Controllers, &other.Controllers
if other == nil || !in.DeepEqual(other) {
return false
}
}
if (in.ExternalIdentifiers == nil) != (other.ExternalIdentifiers == nil) {
return false
} else if in.ExternalIdentifiers != nil {
if !in.ExternalIdentifiers.DeepEqual(other.ExternalIdentifiers) {
return false
}
}
if (in.Health == nil) != (other.Health == nil) {
return false
} else if in.Health != nil {
if !in.Health.DeepEqual(other.Health) {
return false
}
}
if (in.Identity == nil) != (other.Identity == nil) {
return false
} else if in.Identity != nil {
if !in.Identity.DeepEqual(other.Identity) {
return false
}
}
if ((in.Log != nil) && (other.Log != nil)) || ((in.Log == nil) != (other.Log == nil)) {
in, other := &in.Log, &other.Log
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual((*other)[i]) {
return false
}
}
}
}
if (in.Networking == nil) != (other.Networking == nil) {
return false
} else if in.Networking != nil {
if !in.Networking.DeepEqual(other.Networking) {
return false
}
}
if in.Encryption != other.Encryption {
return false
}
if (in.Policy == nil) != (other.Policy == nil) {
return false
} else if in.Policy != nil {
if !in.Policy.DeepEqual(other.Policy) {
return false
}
}
if (in.VisibilityPolicyStatus == nil) != (other.VisibilityPolicyStatus == nil) {
return false
} else if in.VisibilityPolicyStatus != nil {
if *in.VisibilityPolicyStatus != *other.VisibilityPolicyStatus {
return false
}
}
if in.State != other.State {
return false
}
if ((in.NamedPorts != nil) && (other.NamedPorts != nil)) || ((in.NamedPorts == nil) != (other.NamedPorts == nil)) {
in, other := &in.NamedPorts, &other.NamedPorts
if other == nil || !in.DeepEqual(other) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Frontend) DeepEqual(other *Frontend) bool {
if other == nil {
return false
}
if in.IP != other.IP {
return false
}
if ((in.ToPorts != nil) && (other.ToPorts != nil)) || ((in.ToPorts == nil) != (other.ToPorts == nil)) {
in, other := &in.ToPorts, &other.ToPorts
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *HealthAddressingSpec) DeepEqual(other *HealthAddressingSpec) bool {
if other == nil {
return false
}
if in.IPv4 != other.IPv4 {
return false
}
if in.IPv6 != other.IPv6 {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *IdentityList) DeepEqual(other *IdentityList) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *IdentityTuple) DeepEqual(other *IdentityTuple) bool {
if other == nil {
return false
}
if in.Identity != other.Identity {
return false
}
if ((in.IdentityLabels != nil) && (other.IdentityLabels != nil)) || ((in.IdentityLabels == nil) != (other.IdentityLabels == nil)) {
in, other := &in.IdentityLabels, &other.IdentityLabels
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
if in.DestPort != other.DestPort {
return false
}
if in.Protocol != other.Protocol {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NetworkPolicyCondition) DeepEqual(other *NetworkPolicyCondition) bool {
if other == nil {
return false
}
if in.Type != other.Type {
return false
}
if in.Status != other.Status {
return false
}
if !in.LastTransitionTime.DeepEqual(&other.LastTransitionTime) {
return false
}
if in.Reason != other.Reason {
return false
}
if in.Message != other.Message {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NodeAddress) DeepEqual(other *NodeAddress) bool {
if other == nil {
return false
}
if in.Type != other.Type {
return false
}
if in.IP != other.IP {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NodeSpec) DeepEqual(other *NodeSpec) bool {
if other == nil {
return false
}
if in.InstanceID != other.InstanceID {
return false
}
if in.BootID != other.BootID {
return false
}
if ((in.Addresses != nil) && (other.Addresses != nil)) || ((in.Addresses == nil) != (other.Addresses == nil)) {
in, other := &in.Addresses, &other.Addresses
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if in.HealthAddressing != other.HealthAddressing {
return false
}
if in.IngressAddressing != other.IngressAddressing {
return false
}
if in.Encryption != other.Encryption {
return false
}
if !in.ENI.DeepEqual(&other.ENI) {
return false
}
if in.Azure != other.Azure {
return false
}
if !in.AlibabaCloud.DeepEqual(&other.AlibabaCloud) {
return false
}
if !in.IPAM.DeepEqual(&other.IPAM) {
return false
}
if in.NodeIdentity != other.NodeIdentity {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NodeStatus) DeepEqual(other *NodeStatus) bool {
if other == nil {
return false
}
if !in.ENI.DeepEqual(&other.ENI) {
return false
}
if !in.Azure.DeepEqual(&other.Azure) {
return false
}
if !in.IPAM.DeepEqual(&other.IPAM) {
return false
}
if !in.AlibabaCloud.DeepEqual(&other.AlibabaCloud) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PortInfo) DeepEqual(other *PortInfo) bool {
if other == nil {
return false
}
if in.Port != other.Port {
return false
}
if in.Protocol != other.Protocol {
return false
}
if in.Name != other.Name {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *RedirectBackend) DeepEqual(other *RedirectBackend) bool {
if other == nil {
return false
}
if !in.LocalEndpointSelector.DeepEqual(&other.LocalEndpointSelector) {
return false
}
if ((in.ToPorts != nil) && (other.ToPorts != nil)) || ((in.ToPorts == nil) != (other.ToPorts == nil)) {
in, other := &in.ToPorts, &other.ToPorts
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *RedirectFrontend) DeepEqual(other *RedirectFrontend) bool {
if other == nil {
return false
}
if (in.AddressMatcher == nil) != (other.AddressMatcher == nil) {
return false
} else if in.AddressMatcher != nil {
if !in.AddressMatcher.DeepEqual(other.AddressMatcher) {
return false
}
}
if (in.ServiceMatcher == nil) != (other.ServiceMatcher == nil) {
return false
} else if in.ServiceMatcher != nil {
if !in.ServiceMatcher.DeepEqual(other.ServiceMatcher) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Service) DeepEqual(other *Service) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if in.Namespace != other.Namespace {
return false
}
if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) {
in, other := &in.Ports, &other.Ports
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ServiceInfo) DeepEqual(other *ServiceInfo) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if in.Namespace != other.Namespace {
return false
}
if ((in.ToPorts != nil) && (other.ToPorts != nil)) || ((in.ToPorts == nil) != (other.ToPorts == nil)) {
in, other := &in.ToPorts, &other.ToPorts
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ServiceListener) DeepEqual(other *ServiceListener) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if in.Namespace != other.Namespace {
return false
}
if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) {
in, other := &in.Ports, &other.Ports
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if in.Listener != other.Listener {
return false
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Copyright 2014 The Kubernetes Authors.
package labels
import (
"fmt"
"sort"
"strings"
"k8s.io/apimachinery/pkg/util/validation/field"
)
// Labels allows you to present labels independently from their storage.
type Labels interface {
// Has returns whether the provided label exists.
Has(label string) (exists bool)
// Get returns the value for the provided label.
Get(label string) (value string)
}
// Set is a map of label:value. It implements Labels.
type Set map[string]string
// String returns all labels listed as a human readable string.
// Conveniently, exactly the format that ParseSelector takes.
func (ls Set) String() string {
selector := make([]string, 0, len(ls))
for key, value := range ls {
selector = append(selector, key+"="+value)
}
// Sort for determinism.
sort.StringSlice(selector).Sort()
return strings.Join(selector, ",")
}
// Has returns whether the provided label exists in the map.
func (ls Set) Has(label string) bool {
_, exists := ls[label]
return exists
}
// Get returns the value in the map for the provided label.
func (ls Set) Get(label string) string {
return ls[label]
}
// AsSelector converts labels into a selectors. It does not
// perform any validation, which means the server will reject
// the request if the Set contains invalid values.
func (ls Set) AsSelector() Selector {
return SelectorFromSet(ls)
}
// AsValidatedSelector converts labels into a selectors.
// The Set is validated client-side, which allows to catch errors early.
func (ls Set) AsValidatedSelector() (Selector, error) {
return ValidatedSelectorFromSet(ls)
}
// AsSelectorPreValidated converts labels into a selector, but
// assumes that labels are already validated and thus doesn't
// perform any validation.
// According to our measurements this is significantly faster
// in codepaths that matter at high scale.
// Note: this method copies the Set; if the Set is immutable, consider wrapping it with ValidatedSetSelector
// instead, which does not copy.
func (ls Set) AsSelectorPreValidated() Selector {
return SelectorFromValidatedSet(ls)
}
// FormatLabels converts label map into plain string
func FormatLabels(labelMap map[string]string) string {
l := Set(labelMap).String()
if l == "" {
l = "<none>"
}
return l
}
// Conflicts takes 2 maps and returns true if there a key match between
// the maps but the value doesn't match, and returns false in other cases
func Conflicts(labels1, labels2 Set) bool {
small := labels1
big := labels2
if len(labels2) < len(labels1) {
small = labels2
big = labels1
}
for k, v := range small {
if val, match := big[k]; match {
if val != v {
return true
}
}
}
return false
}
// Merge combines given maps, and does not check for any conflicts
// between the maps. In case of conflicts, second map (labels2) wins
func Merge(labels1, labels2 Set) Set {
mergedMap := Set{}
for k, v := range labels1 {
mergedMap[k] = v
}
for k, v := range labels2 {
mergedMap[k] = v
}
return mergedMap
}
// Equals returns true if the given maps are equal
func Equals(labels1, labels2 Set) bool {
if len(labels1) != len(labels2) {
return false
}
for k, v := range labels1 {
value, ok := labels2[k]
if !ok {
return false
}
if value != v {
return false
}
}
return true
}
// ConvertSelectorToLabelsMap converts selector string to labels map
// and validates keys and values
func ConvertSelectorToLabelsMap(selector string, opts ...field.PathOption) (Set, error) {
labelsMap := Set{}
if len(selector) == 0 {
return labelsMap, nil
}
labels := strings.Split(selector, ",")
for _, label := range labels {
l := strings.Split(label, "=")
if len(l) != 2 {
return labelsMap, fmt.Errorf("invalid selector: %s", l)
}
key := strings.TrimSpace(l[0])
if err := validateLabelKey(key, field.ToPath(opts...)); err != nil {
return labelsMap, err
}
value := strings.TrimSpace(l[1])
if err := validateLabelValue(key, value, field.ToPath(opts...)); err != nil {
return labelsMap, err
}
labelsMap[key] = value
}
return labelsMap, nil
}
// Copyright 2022 ADA Logics Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package labels
func FuzzLabelsParse(data []byte) int {
_, _ = Parse(string(data))
return 1
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Copyright 2014 The Kubernetes Authors.
package labels
import (
"fmt"
"sort"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/klog/v2"
stringslices "k8s.io/utils/strings/slices"
"github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/selection"
)
var (
unaryOperators = []string{
string(selection.Exists), string(selection.DoesNotExist),
}
binaryOperators = []string{
string(selection.In), string(selection.NotIn),
string(selection.Equals), string(selection.DoubleEquals), string(selection.NotEquals),
string(selection.GreaterThan), string(selection.LessThan),
}
validRequirementOperators = append(binaryOperators, unaryOperators...)
)
// Requirements is AND of all requirements.
type Requirements []Requirement
// Selector represents a label selector.
type Selector interface {
// Matches returns true if this selector matches the given set of labels.
Matches(Labels) bool
// Empty returns true if this selector does not restrict the selection space.
Empty() bool
// String returns a human readable string that represents this selector.
String() string
// Add adds requirements to the Selector
Add(r ...Requirement) Selector
// Requirements converts this interface into Requirements to expose
// more detailed selection information.
// If there are querying parameters, it will return converted requirements and selectable=true.
// If this selector doesn't want to select anything, it will return selectable=false.
Requirements() (requirements Requirements, selectable bool)
// Make a deep copy of the selector.
DeepCopySelector() Selector
// RequiresExactMatch allows a caller to introspect whether a given selector
// requires a single specific label to be set, and if so returns the value it
// requires.
RequiresExactMatch(label string) (value string, found bool)
}
// Sharing this saves 1 alloc per use; this is safe because it's immutable.
var sharedEverythingSelector Selector = internalSelector{}
// Everything returns a selector that matches all labels.
func Everything() Selector {
return sharedEverythingSelector
}
type nothingSelector struct{}
func (n nothingSelector) Matches(_ Labels) bool { return false }
func (n nothingSelector) Empty() bool { return false }
func (n nothingSelector) String() string { return "" }
func (n nothingSelector) Add(_ ...Requirement) Selector { return n }
func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false }
func (n nothingSelector) DeepCopySelector() Selector { return n }
func (n nothingSelector) RequiresExactMatch(label string) (value string, found bool) {
return "", false
}
// Sharing this saves 1 alloc per use; this is safe because it's immutable.
var sharedNothingSelector Selector = nothingSelector{}
// Nothing returns a selector that matches no labels
func Nothing() Selector {
return sharedNothingSelector
}
// NewSelector returns a nil selector
func NewSelector() Selector {
return internalSelector(nil)
}
type internalSelector []Requirement
func (s internalSelector) DeepCopy() internalSelector {
if s == nil {
return nil
}
result := make([]Requirement, len(s))
for i := range s {
s[i].DeepCopyInto(&result[i])
}
return result
}
func (s internalSelector) DeepCopySelector() Selector {
return s.DeepCopy()
}
// ByKey sorts requirements by key to obtain deterministic parser
type ByKey []Requirement
func (a ByKey) Len() int { return len(a) }
func (a ByKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByKey) Less(i, j int) bool { return a[i].key < a[j].key }
// Requirement contains values, a key, and an operator that relates the key and values.
// The zero value of Requirement is invalid.
// Requirement implements both set based match and exact match
// Requirement should be initialized via NewRequirement constructor for creating a valid Requirement.
// +k8s:deepcopy-gen=true
type Requirement struct {
key string
operator selection.Operator
// In huge majority of cases we have at most one value here.
// It is generally faster to operate on a single-element slice
// than on a single-element map, so we have a slice here.
strValues []string
}
// NewRequirement is the constructor for a Requirement.
// If any of these rules is violated, an error is returned:
// 1. The operator can only be In, NotIn, Equals, DoubleEquals, Gt, Lt, NotEquals, Exists, or DoesNotExist.
// 2. If the operator is In or NotIn, the values set must be non-empty.
// 3. If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value.
// 4. If the operator is Exists or DoesNotExist, the value set must be empty.
// 5. If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer.
// 6. The key is invalid due to its length, or sequence of characters. See validateLabelKey for more details.
//
// The empty string is a valid value in the input values set.
// Returned error, if not nil, is guaranteed to be an aggregated field.ErrorList
func NewRequirement(key string, op selection.Operator, vals []string, opts ...field.PathOption) (*Requirement, error) {
var allErrs field.ErrorList
path := field.ToPath(opts...)
if err := validateLabelKey(key, path.Child("key")); err != nil {
allErrs = append(allErrs, err)
}
valuePath := path.Child("values")
switch op {
case selection.In, selection.NotIn:
if len(vals) == 0 {
allErrs = append(allErrs, field.Invalid(valuePath, vals, "for 'in', 'notin' operators, values set can't be empty"))
}
case selection.Equals, selection.DoubleEquals, selection.NotEquals:
if len(vals) != 1 {
allErrs = append(allErrs, field.Invalid(valuePath, vals, "exact-match compatibility requires one single value"))
}
case selection.Exists, selection.DoesNotExist:
if len(vals) != 0 {
allErrs = append(allErrs, field.Invalid(valuePath, vals, "values set must be empty for exists and does not exist"))
}
case selection.GreaterThan, selection.LessThan:
if len(vals) != 1 {
allErrs = append(allErrs, field.Invalid(valuePath, vals, "for 'Gt', 'Lt' operators, exactly one value is required"))
}
for i := range vals {
if _, err := strconv.ParseInt(vals[i], 10, 64); err != nil {
allErrs = append(allErrs, field.Invalid(valuePath.Index(i), vals[i], "for 'Gt', 'Lt' operators, the value must be an integer"))
}
}
default:
allErrs = append(allErrs, field.NotSupported(path.Child("operator"), op, validRequirementOperators))
}
for i := range vals {
if err := validateLabelValue(key, vals[i], valuePath.Index(i)); err != nil {
allErrs = append(allErrs, err)
}
}
return &Requirement{key: key, operator: op, strValues: vals}, allErrs.ToAggregate()
}
func (r *Requirement) hasValue(value string) bool {
for i := range r.strValues {
if r.strValues[i] == value {
return true
}
}
return false
}
// Matches returns true if the Requirement matches the input Labels.
// There is a match in the following cases:
// 1. The operator is Exists and Labels has the Requirement's key.
// 2. The operator is In, Labels has the Requirement's key and Labels'
// value for that key is in Requirement's value set.
// 3. The operator is NotIn, Labels has the Requirement's key and
// Labels' value for that key is not in Requirement's value set.
// 4. The operator is DoesNotExist or NotIn and Labels does not have the
// Requirement's key.
// 5. The operator is GreaterThanOperator or LessThanOperator, and Labels has
// the Requirement's key and the corresponding value satisfies mathematical inequality.
func (r *Requirement) Matches(ls Labels) bool {
switch r.operator {
case selection.In, selection.Equals, selection.DoubleEquals:
if !ls.Has(r.key) {
return false
}
return r.hasValue(ls.Get(r.key))
case selection.NotIn, selection.NotEquals:
if !ls.Has(r.key) {
return true
}
return !r.hasValue(ls.Get(r.key))
case selection.Exists:
return ls.Has(r.key)
case selection.DoesNotExist:
return !ls.Has(r.key)
case selection.GreaterThan, selection.LessThan:
if !ls.Has(r.key) {
return false
}
lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64)
if err != nil {
klog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err)
return false
}
// There should be only one strValue in r.strValues, and can be converted to an integer.
if len(r.strValues) != 1 {
klog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r)
return false
}
var rValue int64
for i := range r.strValues {
rValue, err = strconv.ParseInt(r.strValues[i], 10, 64)
if err != nil {
klog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r)
return false
}
}
return (r.operator == selection.GreaterThan && lsValue > rValue) || (r.operator == selection.LessThan && lsValue < rValue)
default:
return false
}
}
// Key returns requirement key
func (r *Requirement) Key() string {
return r.key
}
// Operator returns requirement operator
func (r *Requirement) Operator() selection.Operator {
return r.operator
}
// Values returns requirement values
func (r *Requirement) Values() sets.Set[string] {
ret := sets.New[string]()
for i := range r.strValues {
ret.Insert(r.strValues[i])
}
return ret
}
// Equal checks the equality of requirement.
func (r Requirement) Equal(x Requirement) bool {
if r.key != x.key {
return false
}
if r.operator != x.operator {
return false
}
return stringslices.Equal(r.strValues, x.strValues)
}
// Empty returns true if the internalSelector doesn't restrict selection space
func (s internalSelector) Empty() bool {
if s == nil {
return true
}
return len(s) == 0
}
// String returns a human-readable string that represents this
// Requirement. If called on an invalid Requirement, an error is
// returned. See NewRequirement for creating a valid Requirement.
func (r *Requirement) String() string {
var sb strings.Builder
sb.Grow(
// length of r.key
len(r.key) +
// length of 'r.operator' + 2 spaces for the worst case ('in' and 'notin')
len(r.operator) + 2 +
// length of 'r.strValues' slice times. Heuristically 5 chars per word
+5*len(r.strValues))
if r.operator == selection.DoesNotExist {
sb.WriteString("!")
}
sb.WriteString(r.key)
switch r.operator {
case selection.Equals:
sb.WriteString("=")
case selection.DoubleEquals:
sb.WriteString("==")
case selection.NotEquals:
sb.WriteString("!=")
case selection.In:
sb.WriteString(" in ")
case selection.NotIn:
sb.WriteString(" notin ")
case selection.GreaterThan:
sb.WriteString(">")
case selection.LessThan:
sb.WriteString("<")
case selection.Exists, selection.DoesNotExist:
return sb.String()
}
switch r.operator {
case selection.In, selection.NotIn:
sb.WriteString("(")
}
if len(r.strValues) == 1 {
sb.WriteString(r.strValues[0])
} else { // only > 1 since == 0 prohibited by NewRequirement
// normalizes value order on output, without mutating the in-memory selector representation
// also avoids normalization when it is not required, and ensures we do not mutate shared data
sb.WriteString(strings.Join(safeSort(r.strValues), ","))
}
switch r.operator {
case selection.In, selection.NotIn:
sb.WriteString(")")
}
return sb.String()
}
// safeSort sorts input strings without modification
func safeSort(in []string) []string {
if sort.StringsAreSorted(in) {
return in
}
out := make([]string, len(in))
copy(out, in)
sort.Strings(out)
return out
}
// Add adds requirements to the selector. It copies the current selector returning a new one
func (s internalSelector) Add(reqs ...Requirement) Selector {
ret := make(internalSelector, 0, len(s)+len(reqs))
ret = append(ret, s...)
ret = append(ret, reqs...)
sort.Sort(ByKey(ret))
return ret
}
// Matches for a internalSelector returns true if all
// its Requirements match the input Labels. If any
// Requirement does not match, false is returned.
func (s internalSelector) Matches(l Labels) bool {
for ix := range s {
if matches := s[ix].Matches(l); !matches {
return false
}
}
return true
}
func (s internalSelector) Requirements() (Requirements, bool) { return Requirements(s), true }
// String returns a comma-separated string of all
// the internalSelector Requirements' human-readable strings.
func (s internalSelector) String() string {
var reqs []string
for ix := range s {
reqs = append(reqs, s[ix].String())
}
return strings.Join(reqs, ",")
}
// RequiresExactMatch introspects whether a given selector requires a single specific field
// to be set, and if so returns the value it requires.
func (s internalSelector) RequiresExactMatch(label string) (value string, found bool) {
for ix := range s {
if s[ix].key == label {
switch s[ix].operator {
case selection.Equals, selection.DoubleEquals, selection.In:
if len(s[ix].strValues) == 1 {
return s[ix].strValues[0], true
}
}
return "", false
}
}
return "", false
}
// Token represents constant definition for lexer token
type Token int
const (
// ErrorToken represents scan error
ErrorToken Token = iota
// EndOfStringToken represents end of string
EndOfStringToken
// ClosedParToken represents close parenthesis
ClosedParToken
// CommaToken represents the comma
CommaToken
// DoesNotExistToken represents logic not
DoesNotExistToken
// DoubleEqualsToken represents double equals
DoubleEqualsToken
// EqualsToken represents equal
EqualsToken
// GreaterThanToken represents greater than
GreaterThanToken
// IdentifierToken represents identifier, e.g. keys and values
IdentifierToken
// InToken represents in
InToken
// LessThanToken represents less than
LessThanToken
// NotEqualsToken represents not equal
NotEqualsToken
// NotInToken represents not in
NotInToken
// OpenParToken represents open parenthesis
OpenParToken
)
// string2token contains the mapping between lexer Token and token literal
// (except IdentifierToken, EndOfStringToken and ErrorToken since it makes no sense)
var string2token = map[string]Token{
")": ClosedParToken,
",": CommaToken,
"!": DoesNotExistToken,
"==": DoubleEqualsToken,
"=": EqualsToken,
">": GreaterThanToken,
"in": InToken,
"<": LessThanToken,
"!=": NotEqualsToken,
"notin": NotInToken,
"(": OpenParToken,
}
// ScannedItem contains the Token and the literal produced by the lexer.
type ScannedItem struct {
tok Token
literal string
}
// isWhitespace returns true if the rune is a space, tab, or newline.
func isWhitespace(ch byte) bool {
return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n'
}
// isSpecialSymbol detects if the character ch can be an operator
func isSpecialSymbol(ch byte) bool {
switch ch {
case '=', '!', '(', ')', ',', '>', '<':
return true
}
return false
}
// Lexer represents the Lexer struct for label selector.
// It contains necessary informationt to tokenize the input string
type Lexer struct {
// s stores the string to be tokenized
s string
// pos is the position currently tokenized
pos int
}
// read returns the character currently lexed
// increment the position and check the buffer overflow
func (l *Lexer) read() (b byte) {
b = 0
if l.pos < len(l.s) {
b = l.s[l.pos]
l.pos++
}
return b
}
// unread 'undoes' the last read character
func (l *Lexer) unread() {
l.pos--
}
// scanIDOrKeyword scans string to recognize literal token (for example 'in') or an identifier.
func (l *Lexer) scanIDOrKeyword() (tok Token, lit string) {
var buffer []byte
IdentifierLoop:
for {
switch ch := l.read(); {
case ch == 0:
break IdentifierLoop
case isSpecialSymbol(ch) || isWhitespace(ch):
l.unread()
break IdentifierLoop
default:
buffer = append(buffer, ch)
}
}
s := string(buffer)
if val, ok := string2token[s]; ok { // is a literal token?
return val, s
}
return IdentifierToken, s // otherwise is an identifier
}
// scanSpecialSymbol scans string starting with special symbol.
// special symbol identify non literal operators. "!=", "==", "="
func (l *Lexer) scanSpecialSymbol() (Token, string) {
lastScannedItem := ScannedItem{}
var buffer []byte
SpecialSymbolLoop:
for {
switch ch := l.read(); {
case ch == 0:
break SpecialSymbolLoop
case isSpecialSymbol(ch):
buffer = append(buffer, ch)
if token, ok := string2token[string(buffer)]; ok {
lastScannedItem = ScannedItem{tok: token, literal: string(buffer)}
} else if lastScannedItem.tok != 0 {
l.unread()
break SpecialSymbolLoop
}
default:
l.unread()
break SpecialSymbolLoop
}
}
if lastScannedItem.tok == 0 {
return ErrorToken, fmt.Sprintf("error expected: keyword found '%s'", buffer)
}
return lastScannedItem.tok, lastScannedItem.literal
}
// skipWhiteSpaces consumes all blank characters
// returning the first non blank character
func (l *Lexer) skipWhiteSpaces(ch byte) byte {
for {
if !isWhitespace(ch) {
return ch
}
ch = l.read()
}
}
// Lex returns a pair of Token and the literal
// literal is meaningfull only for IdentifierToken token
func (l *Lexer) Lex() (tok Token, lit string) {
switch ch := l.skipWhiteSpaces(l.read()); {
case ch == 0:
return EndOfStringToken, ""
case isSpecialSymbol(ch):
l.unread()
return l.scanSpecialSymbol()
default:
l.unread()
return l.scanIDOrKeyword()
}
}
// Parser data structure contains the label selector parser data structure
type Parser struct {
l *Lexer
scannedItems []ScannedItem
position int
}
// ParserContext represents context during parsing:
// some literal for example 'in' and 'notin' can be
// recognized as operator for example 'x in (a)' but
// it can be recognized as value for example 'value in (in)'
type ParserContext int
const (
// KeyAndOperator represents key and operator
KeyAndOperator ParserContext = iota
// Values represents values
Values
)
// lookahead func returns the current token and string. No increment of current position
func (p *Parser) lookahead(context ParserContext) (Token, string) {
tok, lit := p.scannedItems[p.position].tok, p.scannedItems[p.position].literal
if context == Values {
switch tok {
case InToken, NotInToken:
tok = IdentifierToken
}
}
return tok, lit
}
// consume returns current token and string. Increments the position
func (p *Parser) consume(context ParserContext) (Token, string) {
p.position++
tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal
if context == Values {
switch tok {
case InToken, NotInToken:
tok = IdentifierToken
}
}
return tok, lit
}
// scan runs through the input string and stores the ScannedItem in an array
// Parser can now lookahead and consume the tokens
func (p *Parser) scan() {
for {
token, literal := p.l.Lex()
p.scannedItems = append(p.scannedItems, ScannedItem{token, literal})
if token == EndOfStringToken {
break
}
}
}
// parse runs the left recursive descending algorithm
// on input string. It returns a list of Requirement objects.
func (p *Parser) parse() (internalSelector, error) {
p.scan() // init scannedItems
var requirements internalSelector
for {
tok, lit := p.lookahead(Values)
switch tok {
case IdentifierToken, DoesNotExistToken:
r, err := p.parseRequirement()
if err != nil {
return nil, fmt.Errorf("unable to parse requirement: %w", err)
}
requirements = append(requirements, *r)
t, l := p.consume(Values)
switch t {
case EndOfStringToken:
return requirements, nil
case CommaToken:
t2, l2 := p.lookahead(Values)
if t2 != IdentifierToken && t2 != DoesNotExistToken {
return nil, fmt.Errorf("found '%s', expected: identifier after ','", l2)
}
default:
return nil, fmt.Errorf("found '%s', expected: ',' or 'end of string'", l)
}
case EndOfStringToken:
return requirements, nil
default:
return nil, fmt.Errorf("found '%s', expected: !, identifier, or 'end of string'", lit)
}
}
}
func (p *Parser) parseRequirement() (*Requirement, error) {
key, operator, err := p.parseKeyAndInferOperator()
if err != nil {
return nil, err
}
if operator == selection.Exists || operator == selection.DoesNotExist { // operator found lookahead set checked
return NewRequirement(key, operator, []string{})
}
operator, err = p.parseOperator()
if err != nil {
return nil, err
}
var values sets.Set[string]
switch operator {
case selection.In, selection.NotIn:
values, err = p.parseValues()
case selection.Equals, selection.DoubleEquals, selection.NotEquals, selection.GreaterThan, selection.LessThan:
values, err = p.parseExactValue()
}
if err != nil {
return nil, err
}
return NewRequirement(key, operator, sets.List(values))
}
// parseKeyAndInferOperator parses literals.
// in case of no operator '!, in, notin, ==, =, !=' are found
// the 'exists' operator is inferred
func (p *Parser) parseKeyAndInferOperator() (string, selection.Operator, error) {
var operator selection.Operator
tok, literal := p.consume(Values)
if tok == DoesNotExistToken {
operator = selection.DoesNotExist
tok, literal = p.consume(Values)
}
if tok != IdentifierToken {
err := fmt.Errorf("found '%s', expected: identifier", literal)
return "", "", err
}
if err := validateLabelKey(literal, nil); err != nil {
return "", "", err
}
if t, _ := p.lookahead(Values); t == EndOfStringToken || t == CommaToken {
if operator != selection.DoesNotExist {
operator = selection.Exists
}
}
return literal, operator, nil
}
// parseOperator returns operator and eventually matchType
// matchType can be exact
func (p *Parser) parseOperator() (op selection.Operator, err error) {
tok, lit := p.consume(KeyAndOperator)
switch tok {
// DoesNotExistToken shouldn't be here because it's a unary operator, not a binary operator
case InToken:
op = selection.In
case EqualsToken:
op = selection.Equals
case DoubleEqualsToken:
op = selection.DoubleEquals
case GreaterThanToken:
op = selection.GreaterThan
case LessThanToken:
op = selection.LessThan
case NotInToken:
op = selection.NotIn
case NotEqualsToken:
op = selection.NotEquals
default:
return "", fmt.Errorf("found '%s', expected: %v", lit, strings.Join(binaryOperators, ", "))
}
return op, nil
}
// parseValues parses the values for set based matching (x,y,z)
func (p *Parser) parseValues() (sets.Set[string], error) {
tok, lit := p.consume(Values)
if tok != OpenParToken {
return nil, fmt.Errorf("found '%s' expected: '('", lit)
}
tok, lit = p.lookahead(Values)
switch tok {
case IdentifierToken, CommaToken:
s, err := p.parseIdentifiersList() // handles general cases
if err != nil {
return s, err
}
if tok, _ = p.consume(Values); tok != ClosedParToken {
return nil, fmt.Errorf("found '%s', expected: ')'", lit)
}
return s, nil
case ClosedParToken: // handles "()"
p.consume(Values)
return sets.New[string](""), nil
default:
return nil, fmt.Errorf("found '%s', expected: ',', ')' or identifier", lit)
}
}
// parseIdentifiersList parses a (possibly empty) list of
// of comma separated (possibly empty) identifiers
func (p *Parser) parseIdentifiersList() (sets.Set[string], error) {
s := sets.New[string]()
for {
tok, lit := p.consume(Values)
switch tok {
case IdentifierToken:
s.Insert(lit)
tok2, lit2 := p.lookahead(Values)
switch tok2 {
case CommaToken:
continue
case ClosedParToken:
return s, nil
default:
return nil, fmt.Errorf("found '%s', expected: ',' or ')'", lit2)
}
case CommaToken: // handled here since we can have "(,"
if s.Len() == 0 {
s.Insert("") // to handle (,
}
tok2, _ := p.lookahead(Values)
if tok2 == ClosedParToken {
s.Insert("") // to handle ,) Double "" removed by StringSet
return s, nil
}
if tok2 == CommaToken {
p.consume(Values)
s.Insert("") // to handle ,, Double "" removed by StringSet
}
default: // it can be operator
return s, fmt.Errorf("found '%s', expected: ',', or identifier", lit)
}
}
}
// parseExactValue parses the only value for exact match style
func (p *Parser) parseExactValue() (sets.Set[string], error) {
s := sets.New[string]()
tok, _ := p.lookahead(Values)
if tok == EndOfStringToken || tok == CommaToken {
s.Insert("")
return s, nil
}
tok, lit := p.consume(Values)
if tok == IdentifierToken {
s.Insert(lit)
return s, nil
}
return nil, fmt.Errorf("found '%s', expected: identifier", lit)
}
// Parse takes a string representing a selector and returns a selector
// object, or an error. This parsing function differs from ParseSelector
// as they parse different selectors with different syntaxes.
// The input will cause an error if it does not follow this form:
//
// <selector-syntax> ::= <requirement> | <requirement> "," <selector-syntax>
// <requirement> ::= [!] KEY [ <set-based-restriction> | <exact-match-restriction> ]
// <set-based-restriction> ::= "" | <inclusion-exclusion> <value-set>
// <inclusion-exclusion> ::= <inclusion> | <exclusion>
// <exclusion> ::= "notin"
// <inclusion> ::= "in"
// <value-set> ::= "(" <values> ")"
// <values> ::= VALUE | VALUE "," <values>
// <exact-match-restriction> ::= ["="|"=="|"!="] VALUE
//
// KEY is a sequence of one or more characters following [ DNS_SUBDOMAIN "/" ] DNS_LABEL. Max length is 63 characters.
// VALUE is a sequence of zero or more characters "([A-Za-z0-9_-\.])". Max length is 63 characters.
// Delimiter is white space: (' ', '\t')
// Example of valid syntax:
//
// "x in (foo,,baz),y,z notin ()"
//
// Note:
// 1. Inclusion - " in " - denotes that the KEY exists and is equal to any of the
// VALUEs in its requirement
// 2. Exclusion - " notin " - denotes that the KEY is not equal to any
// of the VALUEs in its requirement or does not exist
// 3. The empty string is a valid VALUE
// 4. A requirement with just a KEY - as in "y" above - denotes that
// the KEY exists and can be any VALUE.
// 5. A requirement with just !KEY requires that the KEY not exist.
func Parse(selector string, opts ...field.PathOption) (Selector, error) {
parsedSelector, err := parse(selector, field.ToPath(opts...))
if err == nil {
return parsedSelector, nil
}
return nil, err
}
// parse parses the string representation of the selector and returns the internalSelector struct.
// The callers of this method can then decide how to return the internalSelector struct to their
// callers. This function has two callers now, one returns a Selector interface and the other
// returns a list of requirements.
func parse(selector string, _ *field.Path) (internalSelector, error) {
p := &Parser{l: &Lexer{s: selector, pos: 0}}
items, err := p.parse()
if err != nil {
return nil, err
}
sort.Sort(ByKey(items)) // sort to grant determistic parsing
return internalSelector(items), err
}
func validateLabelKey(k string, path *field.Path) *field.Error {
if errs := validation.IsQualifiedName(k); len(errs) != 0 {
return field.Invalid(path, k, strings.Join(errs, "; "))
}
return nil
}
func validateLabelValue(k, v string, path *field.Path) *field.Error {
if errs := validation.IsValidLabelValue(v); len(errs) != 0 {
return field.Invalid(path.Key(k), v, strings.Join(errs, "; "))
}
return nil
}
// SelectorFromSet returns a Selector which will match exactly the given Set. A
// nil and empty Sets are considered equivalent to Everything().
// It does not perform any validation, which means the server will reject
// the request if the Set contains invalid values.
func SelectorFromSet(ls Set) Selector {
return SelectorFromValidatedSet(ls)
}
// ValidatedSelectorFromSet returns a Selector which will match exactly the given Set. A
// nil and empty Sets are considered equivalent to Everything().
// The Set is validated client-side, which allows to catch errors early.
func ValidatedSelectorFromSet(ls Set) (Selector, error) {
if len(ls) == 0 {
return internalSelector{}, nil
}
requirements := make([]Requirement, 0, len(ls))
for label, value := range ls {
r, err := NewRequirement(label, selection.Equals, []string{value})
if err != nil {
return nil, err
}
requirements = append(requirements, *r)
}
// sort to have deterministic string representation
sort.Sort(ByKey(requirements))
return internalSelector(requirements), nil
}
// SelectorFromValidatedSet returns a Selector which will match exactly the given Set.
// A nil and empty Sets are considered equivalent to Everything().
// It assumes that Set is already validated and doesn't do any validation.
// Note: this method copies the Set; if the Set is immutable, consider wrapping it with ValidatedSetSelector
// instead, which does not copy.
func SelectorFromValidatedSet(ls Set) Selector {
if len(ls) == 0 {
return internalSelector{}
}
requirements := make([]Requirement, 0, len(ls))
for label, value := range ls {
requirements = append(requirements, Requirement{key: label, operator: selection.Equals, strValues: []string{value}})
}
// sort to have deterministic string representation
sort.Sort(ByKey(requirements))
return internalSelector(requirements)
}
// ParseToRequirements takes a string representing a selector and returns a list of
// requirements. This function is suitable for those callers that perform additional
// processing on selector requirements.
// See the documentation for Parse() function for more details.
// TODO: Consider exporting the internalSelector type instead.
func ParseToRequirements(selector string, opts ...field.PathOption) ([]Requirement, error) {
return parse(selector, field.ToPath(opts...))
}
// ValidatedSetSelector wraps a Set, allowing it to implement the Selector interface. Unlike
// Set.AsSelectorPreValidated (which copies the input Set), this type simply wraps the underlying
// Set. As a result, it is substantially more efficient. A nil and empty Sets are considered
// equivalent to Everything().
//
// Callers MUST ensure the underlying Set is not mutated, and that it is already validated. If these
// constraints are not met, Set.AsValidatedSelector should be preferred
//
// None of the Selector methods mutate the underlying Set, but Add() and Requirements() convert to
// the less optimized version.
type ValidatedSetSelector Set
func (s ValidatedSetSelector) Matches(labels Labels) bool {
for k, v := range s {
if !labels.Has(k) || v != labels.Get(k) {
return false
}
}
return true
}
func (s ValidatedSetSelector) Empty() bool {
return len(s) == 0
}
func (s ValidatedSetSelector) String() string {
keys := make([]string, 0, len(s))
for k := range s {
keys = append(keys, k)
}
// Ensure deterministic output
sort.Strings(keys)
b := strings.Builder{}
for i, key := range keys {
v := s[key]
b.Grow(len(key) + 2 + len(v))
if i != 0 {
b.WriteString(",")
}
b.WriteString(key)
b.WriteString("=")
b.WriteString(v)
}
return b.String()
}
func (s ValidatedSetSelector) Add(r ...Requirement) Selector {
return s.toFullSelector().Add(r...)
}
func (s ValidatedSetSelector) Requirements() (requirements Requirements, selectable bool) {
return s.toFullSelector().Requirements()
}
func (s ValidatedSetSelector) DeepCopySelector() Selector {
res := make(ValidatedSetSelector, len(s))
for k, v := range s {
res[k] = v
}
return res
}
func (s ValidatedSetSelector) RequiresExactMatch(label string) (value string, found bool) {
v, f := s[label]
return v, f
}
func (s ValidatedSetSelector) toFullSelector() Selector {
return SelectorFromValidatedSet(Set(s))
}
var _ Selector = ValidatedSetSelector{}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepcopy-gen. DO NOT EDIT.
package labels
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Requirement) DeepCopyInto(out *Requirement) {
*out = *in
if in.strValues != nil {
in, out := &in.strValues, &out.strValues
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Requirement.
func (in *Requirement) DeepCopy() *Requirement {
if in == nil {
return nil
}
out := new(Requirement)
in.DeepCopyInto(out)
return out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package labels
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ByKey) DeepEqual(other *ByKey) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Lexer) DeepEqual(other *Lexer) bool {
if other == nil {
return false
}
if in.s != other.s {
return false
}
if in.pos != other.pos {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Parser) DeepEqual(other *Parser) bool {
if other == nil {
return false
}
if (in.l == nil) != (other.l == nil) {
return false
} else if in.l != nil {
if !in.l.DeepEqual(other.l) {
return false
}
}
if ((in.scannedItems != nil) && (other.scannedItems != nil)) || ((in.scannedItems == nil) != (other.scannedItems == nil)) {
in, other := &in.scannedItems, &other.scannedItems
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if in.position != other.position {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Requirement) DeepEqual(other *Requirement) bool {
if other == nil {
return false
}
if in.key != other.key {
return false
}
if in.operator != other.operator {
return false
}
if ((in.strValues != nil) && (other.strValues != nil)) || ((in.strValues == nil) != (other.strValues == nil)) {
in, other := &in.strValues, &other.strValues
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Requirements) DeepEqual(other *Requirements) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ScannedItem) DeepEqual(other *ScannedItem) bool {
if other == nil {
return false
}
if in.tok != other.tok {
return false
}
if in.literal != other.literal {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Set) DeepEqual(other *Set) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ValidatedSetSelector) DeepEqual(other *ValidatedSetSelector) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package labels
import (
"sort"
"strings"
)
// LabelArray is an array of labels forming a set
type LabelArray []Label
// Sort is an internal utility to return all LabelArrays in sorted
// order, when the source material may be unsorted. 'ls' is sorted
// in-place, but also returns the sorted array for convenience.
func (ls LabelArray) Sort() LabelArray {
sort.Slice(ls, func(i, j int) bool {
return ls[i].Key < ls[j].Key
})
return ls
}
// ParseLabelArray parses a list of labels and returns a LabelArray
func ParseLabelArray(labels ...string) LabelArray {
array := make(LabelArray, len(labels))
for i := range labels {
array[i] = ParseLabel(labels[i])
}
return array.Sort()
}
// ParseSelectLabelArray parses a list of select labels and returns a LabelArray
func ParseSelectLabelArray(labels ...string) LabelArray {
array := make(LabelArray, len(labels))
for i := range labels {
array[i] = ParseSelectLabel(labels[i])
}
return array.Sort()
}
// ParseLabelArrayFromArray converts an array of strings as labels and returns a LabelArray
func ParseLabelArrayFromArray(base []string) LabelArray {
array := make(LabelArray, len(base))
for i := range base {
array[i] = ParseLabel(base[i])
}
return array.Sort()
}
// NewLabelArrayFromSortedList returns labels based on the output of SortedList()
// Trailing ';' will result in an empty key that must be filtered out.
func NewLabelArrayFromSortedList(list string) LabelArray {
base := strings.Split(list, ";")
array := make(LabelArray, 0, len(base))
for _, v := range base {
if lbl := ParseLabel(v); lbl.Key != "" {
array = append(array, lbl)
}
}
return array
}
// ParseSelectLabelArrayFromArray converts an array of strings as select labels and returns a LabelArray
func ParseSelectLabelArrayFromArray(base []string) LabelArray {
array := make(LabelArray, len(base))
for i := range base {
array[i] = ParseSelectLabel(base[i])
}
return array.Sort()
}
// Labels returns the LabelArray as Labels
func (ls LabelArray) Labels() Labels {
lbls := Labels{}
for _, l := range ls {
lbls[l.Key] = l
}
return lbls
}
// Contains returns true if all ls contains all the labels in needed. If
// needed contains no labels, Contains() will always return true
func (ls LabelArray) Contains(needed LabelArray) bool {
nextLabel:
for i := range needed {
for l := range ls {
if ls[l].Has(&needed[i]) {
continue nextLabel
}
}
return false
}
return true
}
// Intersects returns true if ls contains at least one label in needed.
//
// This has the same matching semantics as Has, namely,
// ["k8s:foo=bar"].Intersects(["any:foo=bar"]) == true
// ["any:foo=bar"].Intersects(["k8s:foo=bar"]) == false
func (ls LabelArray) Intersects(needed LabelArray) bool {
for _, l := range ls {
for _, n := range needed {
if l.Has(&n) {
return true
}
}
}
return false
}
// Lacks is identical to Contains but returns all missing labels
func (ls LabelArray) Lacks(needed LabelArray) LabelArray {
missing := LabelArray{}
nextLabel:
for i := range needed {
for l := range ls {
if ls[l].Has(&needed[l]) {
continue nextLabel
}
}
missing = append(missing, needed[i])
}
return missing
}
// Has returns whether the provided key exists in the label array.
// Implementation of the
// github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels.Labels interface.
//
// The key can be of source "any", in which case the source is
// ignored. The inverse, however, is not true.
// ["k8s.foo=bar"].Has("any.foo") => true
// ["any.foo=bar"].Has("k8s.foo") => false
//
// If the key is of source "cidr", this will also match
// broader keys.
// ["cidr:1.1.1.1/32"].Has("cidr.1.0.0.0/8") => true
// ["cidr:1.0.0.0/8"].Has("cidr.1.1.1.1/32") => false
func (ls LabelArray) Has(key string) bool {
// The key is submitted in the form of `source.key=value`
keyLabel := parseSelectLabel(key, '.')
for _, l := range ls {
if l.HasKey(&keyLabel) {
return true
}
}
return false
}
// Get returns the value for the provided key.
// Implementation of the
// github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels.Labels interface.
//
// The key can be of source "any", in which case the source is
// ignored. The inverse, however, is not true.
// ["k8s.foo=bar"].Get("any.foo") => "bar"
// ["any.foo=bar"].Get("k8s.foo") => ""
//
// If the key is of source "cidr", this will also match
// broader keys.
// ["cidr:1.1.1.1/32"].Has("cidr.1.0.0.0/8") => true
// ["cidr:1.0.0.0/8"].Has("cidr.1.1.1.1/32") => false
func (ls LabelArray) Get(key string) string {
keyLabel := parseSelectLabel(key, '.')
for _, l := range ls {
if l.HasKey(&keyLabel) {
return l.Value
}
}
return ""
}
// DeepCopy returns a deep copy of the labels.
func (ls LabelArray) DeepCopy() LabelArray {
if ls == nil {
return nil
}
o := make(LabelArray, len(ls))
copy(o, ls)
return o
}
// GetModel returns the LabelArray as a string array with fully-qualified labels.
// The output is parseable by ParseLabelArrayFromArray
func (ls LabelArray) GetModel() []string {
res := make([]string, 0, len(ls))
for l := range ls {
res = append(res, ls[l].String())
}
return res
}
func (ls LabelArray) String() string {
var sb strings.Builder
sb.WriteString("[")
for l := range ls {
if l > 0 {
sb.WriteString(" ")
}
sb.WriteString(ls[l].String())
}
sb.WriteString("]")
return sb.String()
}
// StringMap converts LabelArray into map[string]string
// Note: The source is included in the keys with a ':' separator.
// Note: LabelArray does not deduplicate entries, as it is an array. It is
// possible for the output to contain fewer entries when the source and key are
// repeated in a LabelArray, as that is the key of the output. This scenario is
// not expected.
func (ls LabelArray) StringMap() map[string]string {
o := map[string]string{}
for _, v := range ls {
o[v.Source+":"+v.Key] = v.Value
}
return o
}
// Equals returns true if the label arrays are the same, i.e., have the same labels in the same order.
func (ls LabelArray) Equals(b LabelArray) bool {
if len(ls) != len(b) {
return false
}
for l := range ls {
if !ls[l].Equals(&b[l]) {
return false
}
}
return true
}
// Less returns true if ls comes before b in the lexicographical order.
// Assumes both ls and b are already sorted.
func (ls LabelArray) Less(b LabelArray) bool {
lsLen, bLen := len(ls), len(b)
minLen := lsLen
if bLen < minLen {
minLen = bLen
}
for i := 0; i < minLen; i++ {
switch {
// Key
case ls[i].Key < b[i].Key:
return true
case ls[i].Key > b[i].Key:
return false
// Value
case ls[i].Value < b[i].Value:
return true
case ls[i].Value > b[i].Value:
return false
// Source
case ls[i].Source < b[i].Source:
return true
case ls[i].Source > b[i].Source:
return false
}
}
return lsLen < bLen
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package labels
import "sort"
// LabelArrayList is an array of LabelArrays. It is primarily intended as a
// simple collection
type LabelArrayList []LabelArray
// DeepCopy returns a deep copy of the LabelArray, with each element also copied.
func (ls LabelArrayList) DeepCopy() LabelArrayList {
if ls == nil {
return nil
}
o := make(LabelArrayList, 0, len(ls))
for _, v := range ls {
o = append(o, v.DeepCopy())
}
return o
}
// GetModel returns the LabelArrayList as a [][]string. Each member LabelArray
// becomes a []string.
func (ls LabelArrayList) GetModel() [][]string {
res := make([][]string, 0, len(ls))
for _, v := range ls {
res = append(res, v.GetModel())
}
return res
}
// Equals returns true if the label arrays lists have the same label arrays in the same order.
func (ls LabelArrayList) Equals(b LabelArrayList) bool {
if len(ls) != len(b) {
return false
}
for l := range ls {
if !ls[l].Equals(b[l]) {
return false
}
}
return true
}
// Sort sorts the LabelArrayList in-place, but also returns the sorted list
// for convenience. The LabelArrays themselves must already be sorted. This is
// true for all constructors of LabelArray.
func (ls LabelArrayList) Sort() LabelArrayList {
sort.Slice(ls, func(i, j int) bool {
return ls[i].Less(ls[j])
})
return ls
}
// Merge incorporates new LabelArrays into an existing LabelArrayList, without
// introducing duplicates, returning the result for convenience. Existing
// duplication in either list is not removed.
func (lsp *LabelArrayList) Merge(include ...LabelArray) LabelArrayList {
lsp.Sort()
incl := LabelArrayList(include).Sort()
return lsp.MergeSorted(incl)
}
// MergeSorted incorporates new labels from 'include' to the receiver,
// both of which must be already sorted.
// LabelArrays are inserted from 'include' to the receiver as needed.
func (lsp *LabelArrayList) MergeSorted(include LabelArrayList) LabelArrayList {
merged := *lsp
i := 0
for j := 0; i < len(include) && j < len(merged); j++ {
if include[i].Less(merged[j]) {
merged = append(merged[:j+1], merged[j:]...) // make space at merged[j]
merged[j] = include[i]
i++
} else if include[i].Equals(merged[j]) {
i++
}
}
// 'include' may have more entries after original labels have been exhausted
if i < len(include) {
merged = append(merged, include[i:]...)
}
*lsp = merged
return *lsp
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package labels
import (
"fmt"
"net/netip"
"strconv"
"strings"
"github.com/cilium/cilium/pkg/option"
)
var (
worldLabelNonDualStack = Label{Key: IDNameWorld, Source: LabelSourceReserved}
worldLabelV4 = Label{Source: LabelSourceReserved, Key: IDNameWorldIPv4}
worldLabelV6 = Label{Source: LabelSourceReserved, Key: IDNameWorldIPv6}
)
// maskedIPToLabelString is the base method for serializing an IP + prefix into
// a string that can be used for creating Labels and EndpointSelector objects.
//
// For IPv6 addresses, it converts ":" into "-" as EndpointSelectors don't
// support colons inside the name section of a label.
func maskedIPToLabel(ipStr string, prefix int) Label {
var str strings.Builder
str.Grow(
1 /* preZero */ +
len(ipStr) +
1 /* postZero */ +
2 /*len of prefix*/ +
1, /* '/' */
)
for i := 0; i < len(ipStr); i++ {
if ipStr[i] == ':' {
// EndpointSelector keys can't start or end with a "-", so insert a
// zero at the start or end if it would otherwise have a "-" at that
// position.
if i == 0 {
str.WriteByte('0')
str.WriteByte('-')
continue
}
if i == len(ipStr)-1 {
str.WriteByte('-')
str.WriteByte('0')
continue
}
str.WriteByte('-')
} else {
str.WriteByte(ipStr[i])
}
}
str.WriteRune('/')
str.WriteString(strconv.Itoa(prefix))
return Label{Key: str.String(), Source: LabelSourceCIDR}
}
// IPStringToLabel parses a string and returns it as a CIDR label.
//
// If ip is not a valid IP address or CIDR Prefix, returns an error.
func IPStringToLabel(ip string) (Label, error) {
// factored out of netip.ParsePrefix to avoid allocating an empty netip.Prefix in case it's
// an IP and not a CIDR.
i := strings.LastIndexByte(ip, '/')
if i < 0 {
parsedIP, err := netip.ParseAddr(ip)
if err != nil {
return Label{}, fmt.Errorf("%q is not an IP address: %w", ip, err)
}
return maskedIPToLabel(ip, parsedIP.BitLen()), nil
} else {
parsedPrefix, err := netip.ParsePrefix(ip)
if err != nil {
return Label{}, fmt.Errorf("%q is not a CIDR: %w", ip, err)
}
return maskedIPToLabel(parsedPrefix.Masked().Addr().String(), parsedPrefix.Bits()), nil
}
}
// GetCIDRLabels turns a CIDR in to a specially formatted label, and returns
// a Labels including the CIDR-specific label and the appropriate world label.
// e.g. "10.0.0.0/8" => ["cidr:10.0.0.0/8", "reserved:world-ipv4"]
//
// IPv6 requires some special treatment, since ":" is special in the label selector
// grammar. For example, "::/0" becomes "cidr:0--0/0",
func GetCIDRLabels(prefix netip.Prefix) Labels {
lbls := make(Labels, 2)
if prefix.Bits() > 0 {
l := maskedIPToLabel(prefix.Addr().String(), prefix.Bits())
l.cidr = &prefix
lbls[l.Key] = l
}
AddWorldLabel(prefix.Addr(), lbls)
return lbls
}
func AddWorldLabel(addr netip.Addr, lbls Labels) {
switch {
case !option.Config.IsDualStack():
lbls[worldLabelNonDualStack.Key] = worldLabelNonDualStack
case addr.Is4():
lbls[worldLabelV4.Key] = worldLabelV4
default:
lbls[worldLabelV6.Key] = worldLabelV6
}
}
func LabelToPrefix(key string) (netip.Prefix, error) {
prefixStr := strings.Replace(key, "-", ":", -1)
pfx, err := netip.ParsePrefix(prefixStr)
if err != nil {
return netip.Prefix{}, fmt.Errorf("failed to parse label prefix %s: %w", key, err)
}
return pfx, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package labels
import (
"bytes"
"encoding/json"
"fmt"
"net/netip"
"slices"
"sort"
"strings"
"github.com/sirupsen/logrus"
"github.com/cilium/cilium/pkg/logging/logfields"
)
const (
// PathDelimiter is the delimiter used in the labels paths.
PathDelimiter = "."
// IDNameHost is the label used for the hostname ID.
IDNameHost = "host"
// IDNameRemoteNode is the label used to describe the
// ReservedIdentityRemoteNode
IDNameRemoteNode = "remote-node"
// IDNameWorld is the label used for the world ID.
IDNameWorld = "world"
// IDNameWorldIPv4 is the label used for the world-ipv4 ID, to distinguish
// it from world-ipv6 in dual-stack mode.
IDNameWorldIPv4 = "world-ipv4"
// IDNameWorldIPv6 is the label used for the world-ipv6 ID, to distinguish
// it from world-ipv4 in dual-stack mode.
IDNameWorldIPv6 = "world-ipv6"
// IDNameCluster is the label used to identify an unspecified endpoint
// inside the cluster
IDNameCluster = "cluster"
// IDNameHealth is the label used for the local cilium-health endpoint
IDNameHealth = "health"
// IDNameInit is the label used to identify any endpoint that has not
// received any labels yet.
IDNameInit = "init"
// IDNameKubeAPIServer is the label used to identify the kube-apiserver. It
// is part of the reserved identity 7 and it is also used in conjunction
// with IDNameHost if the kube-apiserver is running on the local host.
IDNameKubeAPIServer = "kube-apiserver"
// IDNameEncryptedOverlay is the label used to identify encrypted overlay
// traffic.
//
// It is part of the reserved identity 11 and signals that overlay traffic
// with this identity must be IPSec encrypted before leaving the host.
//
// This identity should never be seen on the wire and is used only on the
// local host.
IDNameEncryptedOverlay = "overlay-to-encrypt"
// IDNameIngress is the label used to identify Ingress proxies. It is part
// of the reserved identity 8.
IDNameIngress = "ingress"
// IDNameNone is the label used to identify no endpoint or other L3 entity.
// It will never be assigned and this "label" is here for consistency with
// other Entities.
IDNameNone = "none"
// IDNameUnmanaged is the label used to identify unmanaged endpoints
IDNameUnmanaged = "unmanaged"
// IDNameUnknown is the label used to to identify an endpoint with an
// unknown identity.
IDNameUnknown = "unknown"
)
var (
// LabelHealth is the label used for health.
LabelHealth = Labels{IDNameHealth: NewLabel(IDNameHealth, "", LabelSourceReserved)}
// LabelHost is the label used for the host endpoint.
LabelHost = Labels{IDNameHost: NewLabel(IDNameHost, "", LabelSourceReserved)}
// LabelWorld is the label used for world.
LabelWorld = Labels{IDNameWorld: NewLabel(IDNameWorld, "", LabelSourceReserved)}
// LabelWorldIPv4 is the label used for world-ipv4.
LabelWorldIPv4 = Labels{IDNameWorldIPv4: NewLabel(IDNameWorldIPv4, "", LabelSourceReserved)}
// LabelWorldIPv6 is the label used for world-ipv6.
LabelWorldIPv6 = Labels{IDNameWorldIPv6: NewLabel(IDNameWorldIPv6, "", LabelSourceReserved)}
// LabelRemoteNode is the label used for remote nodes.
LabelRemoteNode = Labels{IDNameRemoteNode: NewLabel(IDNameRemoteNode, "", LabelSourceReserved)}
// LabelKubeAPIServer is the label used for the kube-apiserver. See comment
// on IDNameKubeAPIServer.
LabelKubeAPIServer = Labels{IDNameKubeAPIServer: NewLabel(IDNameKubeAPIServer, "", LabelSourceReserved)}
// LabelIngress is the label used for Ingress proxies. See comment
// on IDNameIngress.
LabelIngress = Labels{IDNameIngress: NewLabel(IDNameIngress, "", LabelSourceReserved)}
)
const (
// LabelSourceUnspec is a label with unspecified source
LabelSourceUnspec = "unspec"
// LabelSourceAny is a label that matches any source
LabelSourceAny = "any"
// LabelSourceAnyKeyPrefix is prefix of a "any" label
LabelSourceAnyKeyPrefix = LabelSourceAny + "."
// LabelSourceK8s is a label imported from Kubernetes
LabelSourceK8s = "k8s"
// LabelSourceK8sKeyPrefix is prefix of a Kubernetes label
LabelSourceK8sKeyPrefix = LabelSourceK8s + "."
// LabelSourceContainer is a label imported from the container runtime
LabelSourceContainer = "container"
// LabelSourceCNI is a label imported from the CNI plugin
LabelSourceCNI = "cni"
// LabelSourceReserved is the label source for reserved types.
LabelSourceReserved = "reserved"
// LabelSourceCIDR is the label source for generated CIDRs.
LabelSourceCIDR = "cidr"
// LabelSourceNode is the label source for remote-nodes.
LabelSourceNode = "node"
// LabelSourceFQDN is the label source for IPs resolved by fqdn lookups
LabelSourceFQDN = "fqdn"
// LabelSourceReservedKeyPrefix is the prefix of a reserved label
LabelSourceReservedKeyPrefix = LabelSourceReserved + "."
// LabelSourceDirectory is the label source for policies read from files
LabelSourceDirectory = "directory"
// LabelKeyFixedIdentity is the label that can be used to define a fixed
// identity.
LabelKeyFixedIdentity = "io.cilium.fixed-identity"
)
// Label is the Cilium's representation of a container label.
type Label struct {
Key string `json:"key"`
Value string `json:"value,omitempty"`
// Source can be one of the above values (e.g.: LabelSourceContainer).
//
// +kubebuilder:validation:Optional
Source string `json:"source"`
// optimization for CIDR prefixes
// +deepequal-gen=false
cidr *netip.Prefix `json:"-"`
}
// Labels is a map of labels where the map's key is the same as the label's key.
type Labels map[string]Label
// GetPrintableModel turns the Labels into a sorted list of strings
// representing the labels.
func (l Labels) GetPrintableModel() (res []string) {
res = make([]string, 0, len(l))
for _, v := range l {
if v.Source == LabelSourceCIDR {
prefix, err := LabelToPrefix(v.Key)
if err != nil {
res = append(res, v.String())
} else {
res = append(res, LabelSourceCIDR+":"+prefix.String())
}
} else {
// not a CIDR label, no magic needed
res = append(res, v.String())
}
}
sort.Strings(res)
return res
}
// String returns the map of labels as human readable string
func (l Labels) String() string {
return strings.Join(l.GetPrintableModel(), ",")
}
// Equals returns true if the two Labels contain the same set of labels.
func (l Labels) Equals(other Labels) bool {
if len(l) != len(other) {
return false
}
for k, lbl1 := range l {
if lbl2, ok := other[k]; ok {
if lbl1.Source == lbl2.Source && lbl1.Key == lbl2.Key && lbl1.Value == lbl2.Value {
continue
}
}
return false
}
return true
}
// GetFromSource returns all labels that are from the given source.
func (l Labels) GetFromSource(source string) Labels {
lbls := Labels{}
for k, v := range l {
if v.Source == source {
lbls[k] = v
}
}
return lbls
}
// NewLabel returns a new label from the given key, value and source. If source is empty,
// the default value will be LabelSourceUnspec. If key starts with '$', the source
// will be overwritten with LabelSourceReserved. If key contains ':', the value
// before ':' will be used as source if given source is empty, otherwise the value before
// ':' will be deleted and unused.
func NewLabel(key string, value string, source string) Label {
var src string
src, key = parseSource(key, ':')
if source == "" {
if src == "" {
source = LabelSourceUnspec
} else {
source = src
}
}
if src == LabelSourceReserved && key == "" {
key = value
value = ""
}
l := Label{
Key: key,
Value: value,
Source: source,
}
if l.Source == LabelSourceCIDR {
c, err := LabelToPrefix(l.Key)
if err != nil {
logrus.WithField("key", l.Key).WithError(err).Error("Failed to parse CIDR label: invalid prefix.")
} else {
l.cidr = &c
}
}
return l
}
// Equals returns true if source, Key and Value are equal and false otherwise.
func (l *Label) Equals(b *Label) bool {
if !l.IsAnySource() && l.Source != b.Source {
return false
}
return l.Key == b.Key && l.Value == b.Value
}
// IsAnySource return if the label was set with source "any".
func (l *Label) IsAnySource() bool {
return l.Source == LabelSourceAny
}
// IsReservedSource return if the label was set with source "Reserved".
func (l *Label) IsReservedSource() bool {
return l.Source == LabelSourceReserved
}
// Has returns true label L contains target.
// target may be "looser" w.r.t source or cidr, i.e.
// "k8s:foo=bar".Has("any:foo=bar") is true
// "any:foo=bar".Has("k8s:foo=bar") is false
// "cidr:10.0.0.1/32".Has("cidr:10.0.0.0/24") is true
func (l *Label) Has(target *Label) bool {
return l.HasKey(target) && l.Value == target.Value
}
// HasKey returns true if l has target's key.
// target may be "looser" w.r.t source or cidr, i.e.
// "k8s:foo=bar".HasKey("any:foo") is true
// "any:foo=bar".HasKey("k8s:foo") is false
// "cidr:10.0.0.1/32".HasKey("cidr:10.0.0.0/24") is true
// "cidr:10.0.0.0/24".HasKey("cidr:10.0.0.1/32") is false
func (l *Label) HasKey(target *Label) bool {
if !target.IsAnySource() && l.Source != target.Source {
return false
}
// Do cidr-aware matching if both sources are "cidr".
if target.Source == LabelSourceCIDR && l.Source == LabelSourceCIDR {
tc := target.cidr
if tc == nil {
v, err := LabelToPrefix(target.Key)
if err != nil {
tc = &v
}
}
lc := l.cidr
if lc == nil {
v, err := LabelToPrefix(l.Key)
if err != nil {
lc = &v
}
}
if tc != nil && lc != nil && tc.Bits() <= lc.Bits() && tc.Contains(lc.Addr()) {
return true
}
}
return l.Key == target.Key
}
// String returns the string representation of Label in the for of Source:Key=Value or
// Source:Key if Value is empty.
func (l *Label) String() string {
if len(l.Value) != 0 {
return l.Source + ":" + l.Key + "=" + l.Value
}
return l.Source + ":" + l.Key
}
// IsValid returns true if Key != "".
func (l *Label) IsValid() bool {
return l.Key != ""
}
// UnmarshalJSON TODO create better explanation about unmarshall with examples
func (l *Label) UnmarshalJSON(data []byte) error {
if l == nil {
return fmt.Errorf("cannot unmarshal to nil pointer")
}
if len(data) == 0 {
return fmt.Errorf("invalid Label: empty data")
}
var aux struct {
Source string `json:"source"`
Key string `json:"key"`
Value string `json:"value,omitempty"`
}
err := json.Unmarshal(data, &aux)
if err != nil {
// If parsing of the full representation failed then try the short
// form in the format:
//
// [SOURCE:]KEY[=VALUE]
var aux string
if err := json.Unmarshal(data, &aux); err != nil {
return fmt.Errorf("decode of Label as string failed: %w", err)
}
if aux == "" {
return fmt.Errorf("invalid Label: Failed to parse %s as a string", data)
}
*l = ParseLabel(aux)
} else {
if aux.Key == "" {
return fmt.Errorf("invalid Label: '%s' does not contain label key", data)
}
l.Source = aux.Source
l.Key = aux.Key
l.Value = aux.Value
}
if l.Source == LabelSourceCIDR {
c, err := LabelToPrefix(l.Key)
if err == nil {
l.cidr = &c
} else {
logrus.WithField("key", l.Key).WithError(err).Error("Failed to parse CIDR label: invalid prefix.")
}
}
return nil
}
// GetExtendedKey returns the key of a label with the source encoded.
func (l *Label) GetExtendedKey() string {
return l.Source + PathDelimiter + l.Key
}
// GetCiliumKeyFrom returns the label's source and key from the an extended key
// in the format SOURCE:KEY.
func GetCiliumKeyFrom(extKey string) string {
i := strings.IndexByte(extKey, PathDelimiter[0])
if i >= 0 {
return extKey[:i] + ":" + extKey[i+1:]
}
return LabelSourceAny + ":" + extKey
}
// GetExtendedKeyFrom returns the extended key of a label string.
// For example:
// `k8s:foo=bar` returns `k8s.foo`
// `container:foo=bar` returns `container.foo`
// `foo=bar` returns `any.foo=bar`
func GetExtendedKeyFrom(str string) string {
src, next := parseSource(str, ':')
if src == "" {
src = LabelSourceAny
}
// Remove an eventually value
i := strings.IndexByte(next, '=')
if i >= 0 {
return src + PathDelimiter + next[:i]
}
return src + PathDelimiter + next
}
// Map2Labels transforms in the form: map[key(string)]value(string) into Labels. The
// source argument will overwrite the source written in the key of the given map.
// Example:
// l := Map2Labels(map[string]string{"k8s:foo": "bar"}, "cilium")
// fmt.Printf("%+v\n", l)
//
// map[string]Label{"foo":Label{Key:"foo", Value:"bar", Source:"cilium"}}
func Map2Labels(m map[string]string, source string) Labels {
o := make(Labels, len(m))
for k, v := range m {
l := NewLabel(k, v, source)
o[l.Key] = l
}
return o
}
// StringMap converts Labels into map[string]string
func (l Labels) StringMap() map[string]string {
o := make(map[string]string, len(l))
for _, v := range l {
o[v.Source+":"+v.Key] = v.Value
}
return o
}
// StringMap converts Labels into map[string]string
func (l Labels) K8sStringMap() map[string]string {
o := make(map[string]string, len(l))
for _, v := range l {
if v.Source == LabelSourceK8s || v.Source == LabelSourceAny || v.Source == LabelSourceUnspec {
o[v.Key] = v.Value
} else {
o[v.Source+"."+v.Key] = v.Value
}
}
return o
}
// NewLabelsFromModel creates labels from string array.
func NewLabelsFromModel(base []string) Labels {
lbls := make(Labels, len(base))
for _, v := range base {
if lbl := ParseLabel(v); lbl.Key != "" {
lbls[lbl.Key] = lbl
}
}
return lbls
}
// FromSlice creates labels from a slice of labels.
func FromSlice(labels []Label) Labels {
lbls := make(Labels, len(labels))
for _, lbl := range labels {
lbls[lbl.Key] = lbl
}
return lbls
}
// NewLabelsFromSortedList returns labels based on the output of SortedList()
func NewLabelsFromSortedList(list string) Labels {
return NewLabelsFromModel(strings.Split(list, ";"))
}
// NewSelectLabelArrayFromModel parses a slice of strings and converts them
// into an array of selecting labels, sorted by the key.
func NewSelectLabelArrayFromModel(base []string) LabelArray {
lbls := make(LabelArray, 0, len(base))
for i := range base {
lbls = append(lbls, ParseSelectLabel(base[i]))
}
return lbls.Sort()
}
// NewFrom creates a new Labels from the given labels by creating a copy.
func NewFrom(l Labels) Labels {
nl := make(Labels, len(l))
nl.MergeLabels(l)
return nl
}
// GetModel returns model with all the values of the labels.
func (l Labels) GetModel() []string {
res := make([]string, 0, len(l))
for _, v := range l {
res = append(res, v.String())
}
return res
}
// MergeLabels merges labels from into to. It overwrites all labels with the same Key as
// from written into to.
// Example:
// to := Labels{Label{key1, value1, source1}, Label{key2, value3, source4}}
// from := Labels{Label{key1, value3, source4}}
// to.MergeLabels(from)
// fmt.Printf("%+v\n", to)
//
// Labels{Label{key1, value3, source4}, Label{key2, value3, source4}}
func (l Labels) MergeLabels(from Labels) {
for k, v := range from {
l[k] = v
}
}
// Remove is similar to MergeLabels, but returns a new Labels object with the
// specified Labels removed. The received Labels is not modified.
func (l Labels) Remove(from Labels) Labels {
result := make(Labels, len(l))
for k, v := range l {
if _, exists := from[k]; !exists {
result[k] = v
}
}
return result
}
// FormatForKVStore returns the label as a formatted string, ending in
// a semicolon
//
// DO NOT BREAK THE FORMAT OF THIS. THE RETURNED STRING IS USED AS
// PART OF THE KEY IN THE KEY-VALUE STORE.
//
// Non-pointer receiver allows this to be called on a value in a map.
func (l Label) FormatForKVStore() []byte {
// We don't care if the values already have a '='.
//
// We absolutely care that the final character is a semi-colon.
// Identity allocation in the kvstore depends on this (see
// kvstore.prefixMatchesKey())
b := make([]byte, 0, len(l.Source)+len(l.Key)+len(l.Value)+3)
buf := bytes.NewBuffer(b)
l.formatForKVStoreInto(buf)
return buf.Bytes()
}
// formatForKVStoreInto writes the label as a formatted string, ending in
// a semicolon into buf.
//
// DO NOT BREAK THE FORMAT OF THIS. THE RETURNED STRING IS USED AS
// PART OF THE KEY IN THE KEY-VALUE STORE.
//
// Non-pointer receiver allows this to be called on a value in a map.
func (l Label) formatForKVStoreInto(buf *bytes.Buffer) {
buf.WriteString(l.Source)
buf.WriteRune(':')
buf.WriteString(l.Key)
buf.WriteRune('=')
buf.WriteString(l.Value)
buf.WriteRune(';')
}
// SortedList returns the labels as a sorted list, separated by semicolon
//
// DO NOT BREAK THE FORMAT OF THIS. THE RETURNED STRING IS USED AS KEY IN
// THE KEY-VALUE STORE.
func (l Labels) SortedList() []byte {
keys := make([]string, 0, len(l))
for k := range l {
keys = append(keys, k)
}
slices.Sort(keys)
// Labels can have arbitrary size. However, when many CIDR identities are in
// the system, for example due to a FQDN policy matching S3, CIDR labels
// dominate in number. IPv4 CIDR labels in serialized form are max 25 bytes
// long. Allocate slightly more to avoid having a realloc if there's some
// other labels which may longer, since the cost of allocating a few bytes
// more is dominated by a second allocation, especially since these
// allocations are short-lived.
//
// cidr:123.123.123.123/32=;
// 0 1 2
// 1234567890123456789012345
b := make([]byte, 0, len(keys)*30)
buf := bytes.NewBuffer(b)
for _, k := range keys {
l[k].formatForKVStoreInto(buf)
}
return buf.Bytes()
}
// ToSlice returns a slice of label with the values of the given
// Labels' map, sorted by the key.
func (l Labels) ToSlice() []Label {
return l.LabelArray()
}
// LabelArray returns the labels as label array, sorted by the key.
func (l Labels) LabelArray() LabelArray {
labels := make(LabelArray, 0, len(l))
for _, v := range l {
labels = append(labels, v)
}
return labels.Sort()
}
// FindReserved locates all labels with reserved source in the labels and
// returns a copy of them. If there are no reserved labels, returns nil.
// TODO: return LabelArray as it is likely faster
func (l Labels) FindReserved() Labels {
lbls := Labels{}
for k, lbl := range l {
if lbl.Source == LabelSourceReserved {
lbls[k] = lbl
}
}
if len(lbls) > 0 {
return lbls
}
return nil
}
// IsReserved returns true if any of the labels has a reserved source.
func (l Labels) IsReserved() bool {
return l.HasSource(LabelSourceReserved)
}
// Has returns true if l contains the given label.
func (l Labels) Has(label Label) bool {
for _, lbl := range l {
if lbl.Has(&label) {
return true
}
}
return false
}
// HasSource returns true if l contains the given label source.
func (l Labels) HasSource(source string) bool {
for _, lbl := range l {
if lbl.Source == source {
return true
}
}
return false
}
// CollectSources returns all distinct label sources found in l
func (l Labels) CollectSources() map[string]struct{} {
sources := make(map[string]struct{})
for _, lbl := range l {
sources[lbl.Source] = struct{}{}
}
return sources
}
// parseSource returns the parsed source of the given str. It also returns the next piece
// of text that is after the source.
// Example:
//
// src, next := parseSource("foo:bar==value")
//
// Println(src) // foo
// Println(next) // bar==value
// For Cilium format 'delim' must be passed in as ':'
// For k8s format 'delim' must be passed in as '.'
func parseSource(str string, delim byte) (src, next string) {
if str == "" {
return "", ""
}
if str[0] == '$' {
return LabelSourceReserved, str[1:]
}
i := strings.IndexByte(str, delim)
if i < 0 {
if delim != '.' && strings.HasPrefix(str, LabelSourceReservedKeyPrefix) {
return LabelSourceReserved, strings.TrimPrefix(str, LabelSourceReservedKeyPrefix)
}
return "", str
}
return str[:i], str[i+1:]
}
// ParseLabel returns the label representation of the given string. The str should be
// in the form of Source:Key=Value or Source:Key if Value is empty. It also parses short
// forms, for example: $host will be Label{Key: "host", Source: "reserved", Value: ""}.
func ParseLabel(str string) Label {
return parseLabel(str, ':')
}
// parseLabel returns the label representation of the given string by value.
// For Cilium format 'delim' must be passed in as ':'
// For k8s format 'delim' must be passed in as '.'
func parseLabel(str string, delim byte) (lbl Label) {
src, next := parseSource(str, delim)
if src != "" {
lbl.Source = src
} else {
lbl.Source = LabelSourceUnspec
}
i := strings.IndexByte(next, '=')
if i < 0 {
lbl.Key = next
} else {
if i == 0 && src == LabelSourceReserved {
lbl.Key = next[i+1:]
} else {
lbl.Key = next[:i]
lbl.Value = next[i+1:]
}
}
if lbl.Source == LabelSourceCIDR {
if lbl.Value != "" {
logrus.WithField(logfields.Label, lbl.String()).Error("Invalid CIDR label: labels with source cidr cannot have values.")
}
c, err := LabelToPrefix(lbl.Key)
if err != nil {
logrus.WithField(logfields.Label, str).WithError(err).Error("Failed to parse CIDR label: invalid prefix.")
} else {
lbl.cidr = &c
}
}
return lbl
}
// ParseSelectLabel returns a selecting label representation of the given
// string. Unlike ParseLabel, if source is unspecified, the source defaults to
// LabelSourceAny
func ParseSelectLabel(str string) Label {
return parseSelectLabel(str, ':')
}
// parseSelectLabel returns a selecting label representation of the given
// string by value.
// For Cilium format 'delim' must be passed in as ':'
// For k8s format 'delim' must be passed in as '.'
func parseSelectLabel(str string, delim byte) Label {
lbl := parseLabel(str, delim)
if lbl.Source == LabelSourceUnspec {
lbl.Source = LabelSourceAny
}
return lbl
}
// generateLabelString generates the string representation of a label with
// the provided source, key, and value in the format "source:key=value".
func generateLabelString(source, key, value string) string {
return source + ":" + key + "=" + value
}
// GenerateK8sLabelString generates the string representation of a label with
// the provided source, key, and value in the format "LabelSourceK8s:key=value".
func GenerateK8sLabelString(k, v string) string {
return generateLabelString(LabelSourceK8s, k, v)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package labels
import (
"fmt"
"github.com/sirupsen/logrus"
"github.com/cilium/cilium/pkg/logging/logfields"
)
type keepMarks map[string]struct{}
// set marks the label with 'key' to not be deleted.
func (k keepMarks) set(key string) {
k[key] = struct{}{} // marked for keeping
}
// OpLabels represents the possible types.
type OpLabels struct {
// Active labels that are enabled and disabled but not deleted
Custom Labels
// Labels derived from orchestration system
OrchestrationIdentity Labels
// orchestrationIdentity labels which have been disabled
Disabled Labels
// orchestrationInfo - labels from orchestration which are not used in determining a security identity
OrchestrationInfo Labels
}
// NewOpLabels creates new initialized OpLabels
func NewOpLabels() OpLabels {
return OpLabels{
Custom: Labels{},
Disabled: Labels{},
OrchestrationIdentity: Labels{},
OrchestrationInfo: Labels{},
}
}
// SplitUserLabelChanges returns labels to 'add' and 'del'ete to make
// the custom labels match 'lbls'
// FIXME: Somewhere in the code we crash if the returned maps are non-nil
// but length 0. We retain this behaviour here because it's easier.
func (o *OpLabels) SplitUserLabelChanges(lbls Labels) (add, del Labels) {
for key, lbl := range lbls {
if _, found := o.Custom[key]; !found {
if add == nil {
add = Labels{}
}
add[key] = lbl
}
}
for key, lbl := range o.Custom {
if _, found := lbls[key]; !found {
if del == nil {
del = Labels{}
}
del[key] = lbl
}
}
return add, del
}
// IdentityLabels returns map of labels that are used when determining a
// security identity.
func (o *OpLabels) IdentityLabels() Labels {
enabled := make(Labels, len(o.Custom)+len(o.OrchestrationIdentity))
for k, v := range o.Custom {
enabled[k] = v
}
for k, v := range o.OrchestrationIdentity {
enabled[k] = v
}
return enabled
}
// GetIdentityLabel returns the value of the given Key from all IdentityLabels.
func (o *OpLabels) GetIdentityLabel(key string) (l Label, found bool) {
l, found = o.OrchestrationIdentity[key]
if !found {
l, found = o.Custom[key]
}
return l, found
}
// AllLabels returns all Labels within the provided OpLabels.
func (o *OpLabels) AllLabels() Labels {
all := make(Labels, len(o.Custom)+len(o.OrchestrationInfo)+len(o.OrchestrationIdentity)+len(o.Disabled))
for k, v := range o.Custom {
all[k] = v
}
for k, v := range o.Disabled {
all[k] = v
}
for k, v := range o.OrchestrationIdentity {
all[k] = v
}
for k, v := range o.OrchestrationInfo {
all[k] = v
}
return all
}
func (o *OpLabels) ReplaceInformationLabels(sourceFilter string, l Labels, logger *logrus.Entry) bool {
changed := false
keepers := make(keepMarks)
for _, v := range l {
keepers.set(v.Key)
if o.OrchestrationInfo.upsertLabel(sourceFilter, v) {
changed = true
logger.WithField(logfields.Object, logfields.Repr(v)).Debug("Assigning information label")
}
}
o.OrchestrationInfo.deleteUnMarked(sourceFilter, keepers)
return changed
}
func (o *OpLabels) ReplaceIdentityLabels(sourceFilter string, l Labels, logger *logrus.Entry) bool {
changed := false
keepers := make(keepMarks)
disabledKeepers := make(keepMarks)
for k, v := range l {
// A disabled identity label stays disabled without value updates
if _, found := o.Disabled[k]; found {
disabledKeepers.set(k)
} else if keepers.set(v.Key); o.OrchestrationIdentity.upsertLabel(sourceFilter, v) {
logger.WithField(logfields.Object, logfields.Repr(v)).Debug("Assigning security relevant label")
changed = true
}
}
if o.OrchestrationIdentity.deleteUnMarked(sourceFilter, keepers) || o.Disabled.deleteUnMarked(sourceFilter, disabledKeepers) {
changed = true
}
return changed
}
func (o *OpLabels) ModifyIdentityLabels(addLabels, delLabels Labels) (changed bool, err error) {
for k := range delLabels {
// The change request is accepted if the label is on
// any of the lists. If the label is already disabled,
// we will simply ignore that change.
if _, found := o.Custom[k]; !found {
if _, found := o.OrchestrationIdentity[k]; !found {
if _, found := o.Disabled[k]; !found {
return false, fmt.Errorf("label %s not found", k)
}
}
}
}
// Will not fail after this point
for k := range delLabels {
if v, found := o.OrchestrationIdentity[k]; found {
delete(o.OrchestrationIdentity, k)
o.Disabled[k] = v
changed = true
}
if _, found := o.Custom[k]; found {
delete(o.Custom, k)
changed = true
}
}
for k, v := range addLabels {
if _, found := o.Disabled[k]; found { // Restore label.
delete(o.Disabled, k)
o.OrchestrationIdentity[k] = v
changed = true
} else if _, found := o.OrchestrationIdentity[k]; found { // Replace label's source and value.
o.OrchestrationIdentity[k] = v
changed = true
} else {
o.Custom[k] = v
changed = true
}
}
return changed, nil
}
// upsertLabel updates or inserts 'label' in 'l', but only if exactly the same label
// was not already in 'l'. Returns 'true' if a label was added, or an old label was
// updated, 'false' otherwise.
// The label is only updated if its source matches the provided 'sourceFilter'
// or in case the provided sourceFilter is 'LabelSourceAny'. The new label must
// also match the old label 'source' in order for it to be replaced.
func (l Labels) upsertLabel(sourceFilter string, label Label) bool {
oldLabel, found := l[label.Key]
if found {
if sourceFilter != LabelSourceAny && sourceFilter != oldLabel.Source {
return false
}
// Key is the same, check if Value and Source are also the same
if label.Value == oldLabel.Value && label.Source == oldLabel.Source {
return false // No change
}
// If the label is not from the same source, then don't replace it.
if oldLabel.Source != label.Source {
return false
}
}
// Insert or replace old label
l[label.Key] = label
return true
}
// deleteUnMarked deletes the labels which have not been marked for keeping.
// The labels are only deleted if their source matches the provided sourceFilter
// or in case the provided sourceFilter is 'LabelSourceAny'.
// Returns true if any of them were deleted.
func (l Labels) deleteUnMarked(sourceFilter string, marks keepMarks) bool {
deleted := false
for k, v := range l {
if _, keep := marks[k]; !keep && (sourceFilter == LabelSourceAny || sourceFilter == v.Source) {
delete(l, k)
deleted = true
}
}
return deleted
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package labels
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Label) DeepEqual(other *Label) bool {
if other == nil {
return false
}
if in.Key != other.Key {
return false
}
if in.Value != other.Value {
return false
}
if in.Source != other.Source {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *LabelArray) DeepEqual(other *LabelArray) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *LabelArrayList) DeepEqual(other *LabelArrayList) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Labels) DeepEqual(other *Labels) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if !inValue.DeepEqual(&otherValue) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *OpLabels) DeepEqual(other *OpLabels) bool {
if other == nil {
return false
}
if ((in.Custom != nil) && (other.Custom != nil)) || ((in.Custom == nil) != (other.Custom == nil)) {
in, other := &in.Custom, &other.Custom
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.OrchestrationIdentity != nil) && (other.OrchestrationIdentity != nil)) || ((in.OrchestrationIdentity == nil) != (other.OrchestrationIdentity == nil)) {
in, other := &in.OrchestrationIdentity, &other.OrchestrationIdentity
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.Disabled != nil) && (other.Disabled != nil)) || ((in.Disabled == nil) != (other.Disabled == nil)) {
in, other := &in.Disabled, &other.Disabled
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.OrchestrationInfo != nil) && (other.OrchestrationInfo != nil)) || ((in.OrchestrationInfo == nil) != (other.OrchestrationInfo == nil)) {
in, other := &in.OrchestrationInfo, &other.OrchestrationInfo
if other == nil || !in.DeepEqual(other) {
return false
}
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
//go:build !lockdebug
package lock
import (
"sync"
)
type internalRWMutex struct {
sync.RWMutex
}
func (i *internalRWMutex) UnlockIgnoreTime() {
i.RWMutex.Unlock()
}
type internalMutex struct {
sync.Mutex
}
func (i *internalMutex) UnlockIgnoreTime() {
i.Mutex.Unlock()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package lock
import "sync"
// Map is a thin generic wrapper around sync.Map. The sync.Map description from
// the standard library follows (and is also propagated to the corresponding
// methods) for users' convenience:
//
// Map is like a Go map[interface{}]interface{} but is safe for concurrent use
// by multiple goroutines without additional locking or coordination.
// Loads, stores, and deletes run in amortized constant time.
//
// The Map type is specialized. Most code should use a plain Go map instead,
// with separate locking or coordination, for better type safety and to make it
// easier to maintain other invariants along with the map content.
//
// The Map type is optimized for two common use cases: (1) when the entry for a given
// key is only ever written once but read many times, as in caches that only grow,
// or (2) when multiple goroutines read, write, and overwrite entries for disjoint
// sets of keys. In these two cases, use of a Map may significantly reduce lock
// contention compared to a Go map paired with a separate Mutex or RWMutex.
//
// The zero Map is empty and ready for use. A Map must not be copied after first use.
type Map[K comparable, V any] sync.Map
// MapCmpValues is an extension of Map, which additionally wraps the two extra
// methods requiring values to be also of comparable type.
type MapCmpValues[K, V comparable] Map[K, V]
// Load returns the value stored in the map for a key, or the zero value if no
// value is present. The ok result indicates whether value was found in the map.
func (m *Map[K, V]) Load(key K) (value V, ok bool) {
val, ok := (*sync.Map)(m).Load(key)
return m.convert(val, ok)
}
// LoadOrStore returns the existing value for the key if present.
// Otherwise, it stores and returns the given value.
// The loaded result is true if the value was loaded, false if stored.
func (m *Map[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) {
val, loaded := (*sync.Map)(m).LoadOrStore(key, value)
return val.(V), loaded
}
// LoadAndDelete deletes the value for a key, returning the previous value if any
// (zero value otherwise). The loaded result reports whether the key was present.
func (m *Map[K, V]) LoadAndDelete(key K) (value V, loaded bool) {
val, loaded := (*sync.Map)(m).LoadAndDelete(key)
return m.convert(val, loaded)
}
// Store sets the value for a key.
func (m *Map[K, V]) Store(key K, value V) {
(*sync.Map)(m).Store(key, value)
}
// Swap swaps the value for a key and returns the previous value if any (zero
// value otherwise). The loaded result reports whether the key was present.
func (m *Map[K, V]) Swap(key K, value V) (previous V, loaded bool) {
val, loaded := (*sync.Map)(m).Swap(key, value)
return m.convert(val, loaded)
}
// Delete deletes the value for a key.
func (m *Map[K, V]) Delete(key K) {
(*sync.Map)(m).Delete(key)
}
// Range calls f sequentially for each key and value present in the map.
// If f returns false, range stops the iteration.
//
// Range does not necessarily correspond to any consistent snapshot of the Map's
// contents: no key will be visited more than once, but if the value for any key
// is stored or deleted concurrently (including by f), Range may reflect any
// mapping for that key from any point during the Range call. Range does not
// block other methods on the receiver; even f itself may call any method on m.
//
// Range may be O(N) with the number of elements in the map even if f returns
// false after a constant number of calls.
func (m *Map[K, V]) Range(f func(key K, value V) bool) {
(*sync.Map)(m).Range(func(key, value any) bool {
return f(key.(K), value.(V))
})
}
// CompareAndDelete deletes the entry for key if its value is equal to old.
// If there is no current value for key in the map, CompareAndDelete returns false
// (even if the old value is the nil interface value).
func (m *MapCmpValues[K, V]) CompareAndDelete(key K, old V) (deleted bool) {
return (*sync.Map)(m).CompareAndDelete(key, old)
}
// CompareAndSwap swaps the old and new values for key if the value stored in
// the map is equal to old.
func (m *MapCmpValues[K, V]) CompareAndSwap(key K, old, new V) bool {
return (*sync.Map)(m).CompareAndSwap(key, old, new)
}
func (m *Map[K, V]) convert(value any, ok bool) (V, bool) {
if !ok {
return *new(V), false
}
return value.(V), true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package lock
import (
"context"
"golang.org/x/sync/semaphore"
)
// SemaphoredMutex is a semaphored mutex that provides a RWLocker interface.
type SemaphoredMutex struct {
semaphore *semaphore.Weighted
}
// using the same value set in `go/src/rwmutex.go#rwmutexMaxReaders
const maxReaders = 1 << 30
// NewSemaphoredMutex returns a new SemaphoredMutex.
func NewSemaphoredMutex() SemaphoredMutex {
return SemaphoredMutex{
semaphore: semaphore.NewWeighted(maxReaders),
}
}
func (i *SemaphoredMutex) Lock() {
// It's fine ignoring error since the error is only caused by passing a
// context with a deadline.
i.semaphore.Acquire(context.Background(), maxReaders)
}
// UnlockToRLock releases the current lock for writing but it still keeps it
// for reading purposes.
func (i *SemaphoredMutex) UnlockToRLock() {
i.semaphore.Release(maxReaders - 1)
}
func (i *SemaphoredMutex) Unlock() {
i.semaphore.Release(maxReaders)
}
func (i *SemaphoredMutex) RLock() {
// It's fine ignoring error since the error is only caused by passing a
// context with a deadline.
i.semaphore.Acquire(context.Background(), 1)
}
func (i *SemaphoredMutex) RUnlock() {
i.semaphore.Release(1)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package lock
import (
"sort"
"sync"
"sync/atomic"
"time"
)
// sortableMutexSeq is a global sequence counter for the creation of new
// SortableMutex's with unique sequence numbers.
var sortableMutexSeq atomic.Uint64
// sortableMutex implements SortableMutex. Not exported as the only way to
// initialize it is via NewSortableMutex().
type sortableMutex struct {
sync.Mutex
seq uint64
acquireDuration time.Duration
}
func (s *sortableMutex) Lock() {
start := time.Now()
s.Mutex.Lock()
s.acquireDuration += time.Since(start)
}
func (s *sortableMutex) Seq() uint64 { return s.seq }
func (s *sortableMutex) AcquireDuration() time.Duration { return s.acquireDuration }
// SortableMutex provides a Mutex that can be globally sorted with other
// sortable mutexes. This allows deadlock-safe locking of a set of mutexes
// as it guarantees consistent lock ordering.
type SortableMutex interface {
sync.Locker
Seq() uint64
AcquireDuration() time.Duration // The amount of time it took to acquire the lock
}
// SortableMutexes is a set of mutexes that can be locked in a safe order.
// Once Lock() is called it must not be mutated!
type SortableMutexes []SortableMutex
// Len implements sort.Interface.
func (s SortableMutexes) Len() int {
return len(s)
}
// Less implements sort.Interface.
func (s SortableMutexes) Less(i int, j int) bool {
return s[i].Seq() < s[j].Seq()
}
// Swap implements sort.Interface.
func (s SortableMutexes) Swap(i int, j int) {
s[i], s[j] = s[j], s[i]
}
// Lock sorts the mutexes, and then locks them in order. If any lock cannot be acquired,
// this will block while holding the locks with a lower sequence number.
func (s SortableMutexes) Lock() {
sort.Sort(s)
for _, mu := range s {
mu.Lock()
}
}
// Unlock locks the sorted set of mutexes locked by prior call to Lock().
func (s SortableMutexes) Unlock() {
for _, mu := range s {
mu.Unlock()
}
}
var _ sort.Interface = SortableMutexes{}
func NewSortableMutex() SortableMutex {
seq := sortableMutexSeq.Add(1)
return &sortableMutex{
seq: seq,
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package lock
import (
"sync"
"sync/atomic"
)
// A StoppableWaitGroup waits for a collection of goroutines to finish.
type StoppableWaitGroup struct {
noopDone chan struct{}
noopAdd chan struct{}
// i is the internal counter which can store tolerate negative values
// as opposed the golang's library WaitGroup.
i atomic.Int64
doneOnce, stopOnce sync.Once
}
// NewStoppableWaitGroup returns a new StoppableWaitGroup. When the 'Stop' is
// executed, following 'Add()' calls won't have any effect.
func NewStoppableWaitGroup() *StoppableWaitGroup {
return &StoppableWaitGroup{
noopDone: make(chan struct{}),
noopAdd: make(chan struct{}),
doneOnce: sync.Once{},
stopOnce: sync.Once{},
}
}
// Stop makes following 'Add()' to be considered a no-op.
// If all goroutines that have called Add also called Done, 'Wait()' will
// be immediately unblocked.
func (l *StoppableWaitGroup) Stop() {
l.stopOnce.Do(func() {
// We will do an Add here so we can perform a Done after we close
// the l.noopAdd channel.
l.Add()
close(l.noopAdd)
// Calling Done() here so we know that in case 'l.i' will become zero
// it will trigger a close of l.noopDone channel.
l.Done()
})
}
// Wait will return once all goroutines that have called Add also called
// Done and StoppableWaitGroup was stopped.
// Internally, Wait() returns once the internal counter becomes negative.
func (l *StoppableWaitGroup) Wait() {
<-l.noopDone
}
// WaitChannel will return a channel that will be closed once all goroutines
// that have called Add also called Done and StoppableWaitGroup was stopped.
func (l *StoppableWaitGroup) WaitChannel() <-chan struct{} {
return l.noopDone
}
// Add adds the goroutine to the list of routines to that Wait() will have
// to wait before it returns.
// If the StoppableWaitGroup was stopped this will be a no-op.
func (l *StoppableWaitGroup) Add() {
select {
case <-l.noopAdd:
default:
l.i.Add(1)
}
}
// Done will decrement the number of goroutines the Wait() will have to wait
// before it returns.
// This function is a no-op once all goroutines that have called 'Add()' have
// also called 'Done()' and the StoppableWaitGroup was stopped.
func (l *StoppableWaitGroup) Done() {
select {
case <-l.noopDone:
return
default:
select {
case <-l.noopAdd:
a := l.i.Add(-1)
if a <= 0 {
l.doneOnce.Do(func() {
close(l.noopDone)
})
}
default:
a := l.i.Add(-1)
select {
// in case the channel was close while we where in this default
// case we will need to check if 'a' is less than zero and close
// l.noopDone channel.
case <-l.noopAdd:
if a <= 0 {
l.doneOnce.Do(func() {
close(l.noopDone)
})
}
default:
}
}
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package logging
import (
"time"
"golang.org/x/time/rate"
)
// Limiter is a wrapper around rate.Limiter that does not panic when
// the limiter is uninitialized. The wrapping also allows more logging
// specific functionality to be added later without changing all the call
// sites.
type Limiter struct {
bucket *rate.Limiter
}
// NewLimiter returns a new Limiter allowing log messages to be
// emitted on average once every 'interval' and upto 'burst' messages
// during any 'interval'.
func NewLimiter(interval time.Duration, burst int) Limiter {
return Limiter{
bucket: rate.NewLimiter(rate.Every(interval), burst),
}
}
// Allow returns true if the log message is allowed under the
// configured rate limit.
func (ll Limiter) Allow() bool {
if ll.bucket == nil {
return true // limiter not initialized => no limit
}
return ll.bucket.Allow()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package logfields
import (
"fmt"
)
// Repr formats an object with the Printf %+v formatter
func Repr(s interface{}) string {
return fmt.Sprintf("%+v", s)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package logging
import (
"bufio"
"bytes"
"flag"
"fmt"
"os"
"regexp"
"strings"
"sync/atomic"
"time"
"github.com/sirupsen/logrus"
"k8s.io/klog/v2"
"github.com/cilium/cilium/pkg/logging/logfields"
)
type LogFormat string
const (
Syslog = "syslog"
LevelOpt = "level"
FormatOpt = "format"
LogFormatText LogFormat = "text"
LogFormatTextTimestamp LogFormat = "text-ts"
LogFormatJSON LogFormat = "json"
LogFormatJSONTimestamp LogFormat = "json-ts"
// DefaultLogFormat is the string representation of the default logrus.Formatter
// we want to use (possible values: text or json)
DefaultLogFormat LogFormat = LogFormatText
// DefaultLogFormatTimestamp is the string representation of the default logrus.Formatter
// including timestamps.
// We don't use this for general runtime logs since kubernetes log capture handles those.
// This is only used for applications such as CNI which is written to disk so we have no
// way to correlate with other logs.
DefaultLogFormatTimestamp LogFormat = LogFormatTextTimestamp
// DefaultLogLevel is the default log level we want to use for our logrus.Formatter
DefaultLogLevel logrus.Level = logrus.PanicLevel
)
// DefaultLogger is the base logrus logger. It is different from the logrus
// default to avoid external dependencies from writing out unexpectedly
var DefaultLogger = initializeDefaultLogger()
func initializeKLog() {
log := DefaultLogger.WithField(logfields.LogSubsys, "klog")
//Create a new flag set and set error handler
klogFlags := flag.NewFlagSet("cilium", flag.ExitOnError)
// Make sure that klog logging variables are initialized so that we can
// update them from this file.
klog.InitFlags(klogFlags)
// Make sure klog does not log to stderr as we want it to control the output
// of klog so we want klog to log the errors to each writer of each level.
klogFlags.Set("logtostderr", "false")
// We don't need all headers because logrus will already print them if
// necessary.
klogFlags.Set("skip_headers", "true")
klog.SetOutputBySeverity("INFO", log.WriterLevel(logrus.PanicLevel))
klog.SetOutputBySeverity("WARNING", log.WriterLevel(logrus.WarnLevel))
klog.SetOutputBySeverity("ERROR", log.WriterLevel(logrus.ErrorLevel))
klog.SetOutputBySeverity("FATAL", log.WriterLevel(logrus.FatalLevel))
// Do not repeat log messages on all severities in klog
klogFlags.Set("one_output", "true")
}
// LogOptions maps configuration key-value pairs related to logging.
type LogOptions map[string]string
// initializeDefaultLogger returns a logrus Logger with the default logging
// settings.
func initializeDefaultLogger() (logger *logrus.Logger) {
logger = logrus.New()
logger.SetFormatter(GetFormatter(DefaultLogFormatTimestamp))
logger.SetLevel(DefaultLogLevel)
return
}
// GetLogLevel returns the log level specified in the provided LogOptions. If
// it is not set in the options, it will return the default level.
func (o LogOptions) GetLogLevel() (level logrus.Level) {
levelOpt, ok := o[LevelOpt]
if !ok {
return DefaultLogLevel
}
var err error
if level, err = logrus.ParseLevel(levelOpt); err != nil {
logrus.WithError(err).Warning("Ignoring user-configured log level")
return DefaultLogLevel
}
return
}
// GetLogFormat returns the log format specified in the provided LogOptions. If
// it is not set in the options or is invalid, it will return the default format.
func (o LogOptions) GetLogFormat() LogFormat {
formatOpt, ok := o[FormatOpt]
if !ok {
return DefaultLogFormatTimestamp
}
formatOpt = strings.ToLower(formatOpt)
re := regexp.MustCompile(`^(text|text-ts|json|json-ts)$`)
if !re.MatchString(formatOpt) {
logrus.WithError(
fmt.Errorf("incorrect log format configured '%s', expected 'text', 'text-ts', 'json' or 'json-ts'", formatOpt),
).Warning("Ignoring user-configured log format")
return DefaultLogFormatTimestamp
}
return LogFormat(formatOpt)
}
// SetLogLevel updates the DefaultLogger with a new logrus.Level
func SetLogLevel(logLevel logrus.Level) {
DefaultLogger.SetLevel(logLevel)
}
// SetDefaultLogLevel updates the DefaultLogger with the DefaultLogLevel
func SetDefaultLogLevel() {
DefaultLogger.SetLevel(DefaultLogLevel)
}
// SetLogLevelToDebug updates the DefaultLogger with the logrus.DebugLevel
func SetLogLevelToDebug() {
DefaultLogger.SetLevel(logrus.DebugLevel)
}
// SetLogFormat updates the DefaultLogger with a new LogFormat
func SetLogFormat(logFormat LogFormat) {
DefaultLogger.SetFormatter(GetFormatter(logFormat))
}
// SetDefaultLogFormat updates the DefaultLogger with the DefaultLogFormat
func SetDefaultLogFormat() {
DefaultLogger.SetFormatter(GetFormatter(DefaultLogFormatTimestamp))
}
// AddHooks adds additional logrus hook to default logger
func AddHooks(hooks ...logrus.Hook) {
for _, hook := range hooks {
DefaultLogger.AddHook(hook)
}
}
// SetupLogging sets up each logging service provided in loggers and configures
// each logger with the provided logOpts.
func SetupLogging(loggers []string, logOpts LogOptions, tag string, debug bool) error {
// Bridge klog to logrus. Note that this will open multiple pipes and fork
// background goroutines that are not cleaned up.
initializeKLog()
if debug {
logOpts[LevelOpt] = "debug"
}
initializeSlog(logOpts, len(loggers) == 0)
// Updating the default log format
SetLogFormat(logOpts.GetLogFormat())
// Set default logger to output to stdout if no loggers are provided.
if len(loggers) == 0 {
// TODO: switch to a per-logger version when we upgrade to logrus >1.0.3
logrus.SetOutput(os.Stdout)
}
// Updating the default log level, overriding the log options if the debug arg is being set
if debug {
SetLogLevelToDebug()
} else {
SetLogLevel(logOpts.GetLogLevel())
}
// always suppress the default logger so libraries don't print things
logrus.SetLevel(logrus.PanicLevel)
// Iterate through all provided loggers and configure them according
// to user-provided settings.
for _, logger := range loggers {
switch logger {
case Syslog:
if err := setupSyslog(logOpts, tag, debug); err != nil {
return fmt.Errorf("failed to set up syslog: %w", err)
}
default:
return fmt.Errorf("provided log driver %q is not a supported log driver", logger)
}
}
return nil
}
// GetFormatter returns a configured logrus.Formatter with some specific values
// we want to have
func GetFormatter(format LogFormat) logrus.Formatter {
switch format {
case LogFormatText:
return &logrus.TextFormatter{
DisableTimestamp: true,
DisableColors: true,
}
case LogFormatTextTimestamp:
return &logrus.TextFormatter{
DisableTimestamp: false,
DisableColors: true,
}
case LogFormatJSON:
return &logrus.JSONFormatter{
DisableTimestamp: true,
}
case LogFormatJSONTimestamp:
return &logrus.JSONFormatter{
DisableTimestamp: false,
TimestampFormat: time.RFC3339Nano,
}
}
return nil
}
// validateOpts iterates through all of the keys and values in logOpts, and errors out if
// the key in logOpts is not a key in supportedOpts, or the value of corresponding key is
// not listed in the value of validKVs.
func (o LogOptions) validateOpts(logDriver string, supportedOpts map[string]bool, validKVs map[string][]string) error {
for k, v := range o {
if !supportedOpts[k] {
return fmt.Errorf("provided configuration key %q is not supported as a logging option for log driver %s", k, logDriver)
}
if validValues, ok := validKVs[k]; ok {
valid := false
for _, vv := range validValues {
if v == vv {
valid = true
break
}
}
if !valid {
return fmt.Errorf("provided configuration value %q is not a valid value for %q in log driver %s, valid values: %v", v, k, logDriver, validValues)
}
}
}
return nil
}
// getLogDriverConfig returns a map containing the key-value pairs that start
// with string logDriver from map logOpts.
func getLogDriverConfig(logDriver string, logOpts LogOptions) LogOptions {
keysToValidate := make(LogOptions)
for k, v := range logOpts {
ok, err := regexp.MatchString(logDriver+".*", k)
if err != nil {
DefaultLogger.Fatal(err)
}
if ok {
keysToValidate[k] = v
}
}
return keysToValidate
}
// MultiLine breaks a multi line text into individual log entries and calls the
// logging function to log each entry
func MultiLine(logFn func(args ...interface{}), output string) {
scanner := bufio.NewScanner(bytes.NewReader([]byte(output)))
for scanner.Scan() {
logFn(scanner.Text())
}
}
// CanLogAt returns whether a log message at the given level would be
// logged by the given logger.
func CanLogAt(logger *logrus.Logger, level logrus.Level) bool {
return GetLevel(logger) >= level
}
// GetLevel returns the log level of the given logger.
func GetLevel(logger *logrus.Logger) logrus.Level {
return logrus.Level(atomic.LoadUint32((*uint32)(&logger.Level)))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
//go:build !windows
package logging
import (
"log/syslog"
"github.com/sirupsen/logrus"
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
)
const (
SLevel = "syslog.level"
SNetwork = "syslog.network"
SAddress = "syslog.address"
SSeverity = "syslog.severity"
SFacility = "syslog.facility"
STag = "syslog.tag"
)
var (
// syslogOpts is the set of supported options for syslog configuration.
syslogOpts = map[string]bool{
SLevel: true,
SNetwork: true,
SAddress: true,
SSeverity: true,
SFacility: true,
STag: true,
}
// From /usr/include/sys/syslog.h.
syslogSeverityMap = map[string]syslog.Priority{
"emerg": syslog.LOG_EMERG,
"panic": syslog.LOG_EMERG,
"alert": syslog.LOG_ALERT,
"crit": syslog.LOG_CRIT,
"err": syslog.LOG_ERR,
"error": syslog.LOG_ERR,
"warn": syslog.LOG_WARNING,
"warning": syslog.LOG_WARNING,
"notice": syslog.LOG_NOTICE,
"info": syslog.LOG_INFO,
"debug": syslog.LOG_DEBUG,
}
// From /usr/include/sys/syslog.h.
syslogFacilityMap = map[string]syslog.Priority{
"kern": syslog.LOG_KERN,
"user": syslog.LOG_USER,
"mail": syslog.LOG_MAIL,
"daemon": syslog.LOG_DAEMON,
"auth": syslog.LOG_AUTH,
"syslog": syslog.LOG_SYSLOG,
"lpr": syslog.LOG_LPR,
"news": syslog.LOG_NEWS,
"uucp": syslog.LOG_UUCP,
"cron": syslog.LOG_CRON,
"authpriv": syslog.LOG_AUTHPRIV,
"ftp": syslog.LOG_FTP,
"local0": syslog.LOG_LOCAL0,
"local1": syslog.LOG_LOCAL1,
"local2": syslog.LOG_LOCAL2,
"local3": syslog.LOG_LOCAL3,
"local4": syslog.LOG_LOCAL4,
"local5": syslog.LOG_LOCAL5,
"local6": syslog.LOG_LOCAL6,
"local7": syslog.LOG_LOCAL7,
}
// syslogLevelMap maps logrus.Level values to syslog.Priority levels.
syslogLevelMap = map[logrus.Level]syslog.Priority{
logrus.PanicLevel: syslog.LOG_ALERT,
logrus.FatalLevel: syslog.LOG_CRIT,
logrus.ErrorLevel: syslog.LOG_ERR,
logrus.WarnLevel: syslog.LOG_WARNING,
logrus.InfoLevel: syslog.LOG_INFO,
logrus.DebugLevel: syslog.LOG_DEBUG,
logrus.TraceLevel: syslog.LOG_DEBUG,
}
)
func mapStringPriorityToSlice(m map[string]syslog.Priority) []string {
s := make([]string, 0, len(m))
for k := range m {
s = append(s, k)
}
return s
}
// setupSyslog sets up and configures syslog with the provided options in
// logOpts. If some options are not provided, sensible defaults are used.
func setupSyslog(logOpts LogOptions, tag string, debug bool) error {
opts := getLogDriverConfig(Syslog, logOpts)
syslogOptValues := make(map[string][]string)
syslogOptValues[SSeverity] = mapStringPriorityToSlice(syslogSeverityMap)
syslogOptValues[SFacility] = mapStringPriorityToSlice(syslogFacilityMap)
if err := opts.validateOpts(Syslog, syslogOpts, syslogOptValues); err != nil {
return err
}
if stag, ok := opts[STag]; ok {
tag = stag
}
logLevel, ok := opts[SLevel]
if !ok {
if debug {
logLevel = "debug"
} else {
logLevel = "info"
}
}
// Validate provided log level.
level, err := logrus.ParseLevel(logLevel)
if err != nil {
DefaultLogger.Fatal(err)
}
SetLogLevel(level)
network := ""
address := ""
// Inherit severity from log level if syslog.severity is not specified explicitly
severity := syslogLevelMap[level]
// Default values for facility if not specified
facility := syslog.LOG_KERN
if networkStr, ok := opts[SNetwork]; ok {
network = networkStr
}
if addressStr, ok := opts[SAddress]; ok {
address = addressStr
}
if severityStr, ok := opts[SSeverity]; ok {
severity = syslogSeverityMap[severityStr]
}
if facilityStr, ok := opts[SFacility]; ok {
facility = syslogFacilityMap[facilityStr]
}
// Create syslog hook.
h, err := logrus_syslog.NewSyslogHook(network, address, severity|facility, tag)
if err != nil {
DefaultLogger.Fatal(err)
}
// TODO: switch to a per-logger version when we upgrade to logrus >1.0.3
logrus.AddHook(h)
DefaultLogger.AddHook(h)
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package logging
import (
"context"
"log/slog"
"os"
"strings"
"time"
"github.com/sirupsen/logrus"
)
// SlogNopHandler discards all logs.
var SlogNopHandler slog.Handler = nopHandler{}
type nopHandler struct{}
func (nopHandler) Enabled(context.Context, slog.Level) bool { return false }
func (nopHandler) Handle(context.Context, slog.Record) error { return nil }
func (n nopHandler) WithAttrs([]slog.Attr) slog.Handler { return n }
func (n nopHandler) WithGroup(string) slog.Handler { return n }
var slogHandlerOpts = &slog.HandlerOptions{
AddSource: false,
Level: slog.LevelInfo,
ReplaceAttr: replaceLevelAndDropTime,
}
// Default slog logger. Will be overwritten once initializeSlog is called.
var DefaultSlogLogger *slog.Logger = slog.New(slog.NewTextHandler(
os.Stderr,
slogHandlerOpts,
))
func slogLevel(l logrus.Level) slog.Level {
switch l {
case logrus.DebugLevel, logrus.TraceLevel:
return slog.LevelDebug
case logrus.InfoLevel:
return slog.LevelInfo
case logrus.WarnLevel:
return slog.LevelWarn
case logrus.ErrorLevel, logrus.PanicLevel, logrus.FatalLevel:
return slog.LevelError
default:
return slog.LevelInfo
}
}
// Approximates the logrus output via slog for job groups during the transition
// phase.
func initializeSlog(logOpts LogOptions, useStdout bool) {
opts := *slogHandlerOpts
opts.Level = slogLevel(logOpts.GetLogLevel())
logFormat := logOpts.GetLogFormat()
switch logFormat {
case LogFormatJSON, LogFormatText:
opts.ReplaceAttr = replaceLevelAndDropTime
case LogFormatJSONTimestamp, LogFormatTextTimestamp:
opts.ReplaceAttr = replaceLevel
}
writer := os.Stderr
if useStdout {
writer = os.Stdout
}
switch logFormat {
case LogFormatJSON, LogFormatJSONTimestamp:
DefaultSlogLogger = slog.New(slog.NewJSONHandler(
writer,
&opts,
))
case LogFormatText, LogFormatTextTimestamp:
DefaultSlogLogger = slog.New(slog.NewTextHandler(
writer,
&opts,
))
}
}
func replaceLevel(groups []string, a slog.Attr) slog.Attr {
switch a.Key {
case slog.TimeKey:
// Adjust to timestamp format that logrus uses; except that we can't
// force slog to quote the value like logrus does...
return slog.String(slog.TimeKey, a.Value.Time().Format(time.RFC3339))
case slog.LevelKey:
// Lower-case the log level
return slog.Attr{
Key: a.Key,
Value: slog.StringValue(strings.ToLower(a.Value.String())),
}
}
return a
}
func replaceLevelAndDropTime(groups []string, a slog.Attr) slog.Attr {
switch a.Key {
case slog.TimeKey:
// Drop timestamps
return slog.Attr{}
case slog.LevelKey:
// Lower-case the log level
return slog.Attr{
Key: a.Key,
Value: slog.StringValue(strings.ToLower(a.Value.String())),
}
}
return a
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package mac
import (
"bytes"
"crypto/rand"
"encoding/hex"
"fmt"
"net"
)
// Untagged ethernet (IEEE 802.3) frame header len
const EthHdrLen = 14
// Uint64MAC is the __u64 representation of a MAC address.
// It corresponds to the C mac_t type used in bpf/.
type Uint64MAC uint64
func (m Uint64MAC) String() string {
return fmt.Sprintf("%02X:%02X:%02X:%02X:%02X:%02X",
uint64((m & 0x0000000000FF)),
uint64((m&0x00000000FF00)>>8),
uint64((m&0x000000FF0000)>>16),
uint64((m&0x0000FF000000)>>24),
uint64((m&0x00FF00000000)>>32),
uint64((m&0xFF0000000000)>>40),
)
}
// MAC address is an net.HardwareAddr encapsulation to force cilium to only use MAC-48.
type MAC net.HardwareAddr
// String returns the string representation of m.
func (m MAC) String() string {
return net.HardwareAddr(m).String()
}
// ParseMAC parses s only as an IEEE 802 MAC-48.
func ParseMAC(s string) (MAC, error) {
ha, err := net.ParseMAC(s)
if err != nil {
return nil, err
}
if len(ha) != 6 {
return nil, fmt.Errorf("invalid MAC address %s", s)
}
return MAC(ha), nil
}
// Uint64 returns the MAC in uint64 format. The MAC is represented as little-endian in
// the returned value.
// Example:
//
// m := MAC([]{0x11, 0x12, 0x23, 0x34, 0x45, 0x56})
// v, err := m.Uint64()
// fmt.Printf("0x%X", v) // 0x564534231211
func (m MAC) Uint64() (Uint64MAC, error) {
if len(m) != 6 {
return 0, fmt.Errorf("invalid MAC address %s", m.String())
}
res := uint64(m[5])<<40 | uint64(m[4])<<32 | uint64(m[3])<<24 |
uint64(m[2])<<16 | uint64(m[1])<<8 | uint64(m[0])
return Uint64MAC(res), nil
}
func (m MAC) MarshalJSON() ([]byte, error) {
if len(m) == 0 {
return []byte(`""`), nil
}
if len(m) != 6 {
return nil, fmt.Errorf("invalid MAC address length %s", string(m))
}
return []byte(fmt.Sprintf("\"%02x:%02x:%02x:%02x:%02x:%02x\"", m[0], m[1], m[2], m[3], m[4], m[5])), nil
}
func (m MAC) MarshalIndentJSON(prefix, indent string) ([]byte, error) {
return m.MarshalJSON()
}
func (m *MAC) UnmarshalJSON(data []byte) error {
if len(data) == len([]byte(`""`)) {
if m == nil {
m = new(MAC)
}
*m = MAC{}
return nil
}
if len(data) != 19 {
return fmt.Errorf("invalid MAC address length %s", string(data))
}
data = data[1 : len(data)-1]
macStr := bytes.Replace(data, []byte(`:`), []byte(``), -1)
if len(macStr) != 12 {
return fmt.Errorf("invalid MAC address format")
}
macByte := make([]byte, len(macStr))
hex.Decode(macByte, macStr)
*m = MAC{macByte[0], macByte[1], macByte[2], macByte[3], macByte[4], macByte[5]}
return nil
}
// GenerateRandMAC generates a random unicast and locally administered MAC address.
func GenerateRandMAC() (MAC, error) {
buf := make([]byte, 6)
if _, err := rand.Read(buf); err != nil {
return nil, fmt.Errorf("Unable to retrieve 6 rnd bytes: %w", err)
}
// Set locally administered addresses bit and reset multicast bit
buf[0] = (buf[0] | 0x02) & 0xfe
return buf, nil
}
// HaveMACAddrs returns true if all given network interfaces have L2 addr.
func HaveMACAddrs(ifaces []string) bool {
for _, iface := range ifaces {
if !HasMacAddr(iface) {
return false
}
}
return true
}
// CArrayString returns a string which can be used for assigning the given
// MAC addr to "union macaddr" in C.
func CArrayString(m net.HardwareAddr) string {
if m == nil || len(m) != 6 {
return "{0x0,0x0,0x0,0x0,0x0,0x0}"
}
return fmt.Sprintf("{0x%x,0x%x,0x%x,0x%x,0x%x,0x%x}",
m[0], m[1], m[2], m[3], m[4], m[5])
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package mac
import (
"errors"
"net"
"github.com/vishvananda/netlink"
)
// HasMacAddr returns true if the given network interface has L2 addr.
func HasMacAddr(iface string) bool {
link, err := netlink.LinkByName(iface)
if err != nil {
return false
}
return LinkHasMacAddr(link)
}
// LinkHasMacAddr returns true if the given network interface has L2 addr.
func LinkHasMacAddr(link netlink.Link) bool {
return len(link.Attrs().HardwareAddr) != 0
}
// ReplaceMacAddressWithLinkName replaces the MAC address of the given link
func ReplaceMacAddressWithLinkName(ifName, macAddress string) error {
l, err := netlink.LinkByName(ifName)
if err != nil {
if errors.As(err, &netlink.LinkNotFoundError{}) {
return nil
}
return err
}
hw, err := net.ParseMAC(macAddress)
if err != nil {
return err
}
return netlink.LinkSetHardwareAddr(l, hw)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package format
import (
"strconv"
"strings"
"github.com/spf13/pflag"
)
// Uint16Flags is a slice of unsigned 16-bit ints with some convenience methods.
type Uint16Flags []uint16
var _ pflag.Value = &Uint16Flags{}
// String provides a human-readable string format of the received variable.
func (i *Uint16Flags) String() string {
pieces := make([]string, 0, len(*i))
for _, v := range *i {
pieces = append(pieces, strconv.Itoa(int(v)))
}
return strings.Join(pieces, ", ")
}
// Set converts the specified value into an integer and appends it to the flags.
// Returns an error if the value cannot be converted to a 16-bit unsigned value.
func (i *Uint16Flags) Set(value string) error {
vUint64, err := strconv.ParseUint(value, 10, 16)
if err != nil {
return err
}
*i = append(*i, uint16(vUint64))
return nil
}
// Type returns a human-readable string representing the type of the receiver.
func (i *Uint16Flags) Type() string {
return "[]uint16"
}
// Has returns true of value exist
func (i *Uint16Flags) Has(value uint16) bool {
for _, v := range *i {
if v == value {
return true
}
}
return false
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package format
import (
"bytes"
"encoding/binary"
"encoding/gob"
"fmt"
"github.com/cilium/cilium/pkg/byteorder"
"github.com/cilium/cilium/pkg/hubble/parser/getters"
"github.com/cilium/cilium/pkg/monitor"
monitorAPI "github.com/cilium/cilium/pkg/monitor/api"
"github.com/cilium/cilium/pkg/monitor/payload"
)
// Verbosity levels for formatting output.
type Verbosity uint8
const (
msgSeparator = "------------------------------------------------------------------------------"
// INFO is the level of verbosity in which summaries of Drop and Capture
// messages are printed out when the monitor is invoked
INFO Verbosity = iota + 1
// DEBUG is the level of verbosity in which more information about packets
// is printed than in INFO mode. Debug, Drop, and Capture messages are printed.
DEBUG
// VERBOSE is the level of verbosity in which the most information possible
// about packets is printed out. Currently is not utilized.
VERBOSE
// JSON is the level of verbosity in which event information is printed out in json format
JSON
)
// MonitorFormatter filters and formats monitor messages from a buffer.
type MonitorFormatter struct {
EventTypes monitorAPI.MessageTypeFilter
FromSource Uint16Flags
ToDst Uint16Flags
Related Uint16Flags
Hex bool
JSONOutput bool
Verbosity Verbosity
Numeric bool
linkMonitor getters.LinkGetter
}
// NewMonitorFormatter returns a new formatter with default configuration.
func NewMonitorFormatter(verbosity Verbosity, linkMonitor getters.LinkGetter) *MonitorFormatter {
return &MonitorFormatter{
Hex: false,
EventTypes: monitorAPI.MessageTypeFilter{},
FromSource: Uint16Flags{},
ToDst: Uint16Flags{},
Related: Uint16Flags{},
JSONOutput: false,
Verbosity: verbosity,
Numeric: bool(monitor.DisplayLabel),
linkMonitor: linkMonitor,
}
}
// match checks if the event type, from endpoint and / or to endpoint match
// when they are supplied. The either part of from and to endpoint depends on
// related to, which can match on both. If either one of them is less than or
// equal to zero, then it is assumed user did not use them.
func (m *MonitorFormatter) match(messageType int, src uint16, dst uint16) bool {
if len(m.EventTypes) > 0 && !m.EventTypes.Contains(messageType) {
return false
} else if len(m.FromSource) > 0 && !m.FromSource.Has(src) {
return false
} else if len(m.ToDst) > 0 && !m.ToDst.Has(dst) {
return false
} else if len(m.Related) > 0 && !m.Related.Has(src) && !m.Related.Has(dst) {
return false
}
return true
}
// dropEvents prints out all the received drop notifications.
func (m *MonitorFormatter) dropEvents(prefix string, data []byte) {
dn := monitor.DropNotify{}
if err := binary.Read(bytes.NewReader(data), byteorder.Native, &dn); err != nil {
fmt.Printf("Error while parsing drop notification message: %s\n", err)
}
if m.match(monitorAPI.MessageTypeDrop, dn.Source, uint16(dn.DstID)) {
switch m.Verbosity {
case INFO, DEBUG:
dn.DumpInfo(data, monitor.DisplayFormat(m.Numeric))
case JSON:
dn.DumpJSON(data, prefix)
default:
fmt.Println(msgSeparator)
dn.DumpVerbose(!m.Hex, data, prefix, monitor.DisplayFormat(m.Numeric))
}
}
}
// traceEvents prints out all the received trace notifications.
func (m *MonitorFormatter) traceEvents(prefix string, data []byte) {
tn := monitor.TraceNotify{}
if err := monitor.DecodeTraceNotify(data, &tn); err != nil {
fmt.Printf("Error while parsing trace notification message: %s\n", err)
}
if m.match(monitorAPI.MessageTypeTrace, tn.Source, tn.DstID) {
switch m.Verbosity {
case INFO, DEBUG:
tn.DumpInfo(data, monitor.DisplayFormat(m.Numeric), m.linkMonitor)
case JSON:
tn.DumpJSON(data, prefix, m.linkMonitor)
default:
fmt.Println(msgSeparator)
tn.DumpVerbose(!m.Hex, data, prefix, monitor.DisplayFormat(m.Numeric), m.linkMonitor)
}
}
}
func (m *MonitorFormatter) traceSockEvents(prefix string, data []byte) {
tn := monitor.TraceSockNotify{}
if err := binary.Read(bytes.NewReader(data), byteorder.Native, &tn); err != nil {
fmt.Printf("Error while parsing socket trace notification message: %s\n", err)
}
// Currently only printed with the debug option. Extend it to info and json.
// GH issue: https://github.com/cilium/cilium/issues/21510
if m.Verbosity == DEBUG {
tn.DumpDebug(prefix)
}
}
func (m *MonitorFormatter) policyVerdictEvents(prefix string, data []byte) {
pn := monitor.PolicyVerdictNotify{}
if err := binary.Read(bytes.NewReader(data), byteorder.Native, &pn); err != nil {
fmt.Printf("Error while parsing policy notification message: %s\n", err)
}
if m.match(monitorAPI.MessageTypePolicyVerdict, pn.Source, uint16(pn.RemoteLabel)) {
pn.DumpInfo(data, monitor.DisplayFormat(m.Numeric))
}
}
func (m *MonitorFormatter) recorderCaptureEvents(prefix string, data []byte) {
rc := monitor.RecorderCapture{}
if err := binary.Read(bytes.NewReader(data), byteorder.Native, &rc); err != nil {
fmt.Printf("Error while parsing capture record: %s\n", err)
}
if m.match(monitorAPI.MessageTypeRecCapture, 0, 0) {
rc.DumpInfo(data)
}
}
// debugEvents prints out all the debug messages.
func (m *MonitorFormatter) debugEvents(prefix string, data []byte) {
dm := monitor.DebugMsg{}
if err := binary.Read(bytes.NewReader(data), byteorder.Native, &dm); err != nil {
fmt.Printf("Error while parsing debug message: %s\n", err)
}
if m.match(monitorAPI.MessageTypeDebug, dm.Source, 0) {
switch m.Verbosity {
case INFO:
dm.DumpInfo(data)
case JSON:
dm.DumpJSON(prefix, m.linkMonitor)
default:
dm.Dump(prefix, m.linkMonitor)
}
}
}
// captureEvents prints out all the capture messages.
func (m *MonitorFormatter) captureEvents(prefix string, data []byte) {
dc := monitor.DebugCapture{}
if err := binary.Read(bytes.NewReader(data), byteorder.Native, &dc); err != nil {
fmt.Printf("Error while parsing debug capture message: %s\n", err)
}
if m.match(monitorAPI.MessageTypeCapture, dc.Source, 0) {
switch m.Verbosity {
case INFO, DEBUG:
dc.DumpInfo(data, m.linkMonitor)
case JSON:
dc.DumpJSON(data, prefix, m.linkMonitor)
default:
fmt.Println(msgSeparator)
dc.DumpVerbose(!m.Hex, data, prefix)
}
}
}
// logRecordEvents prints out LogRecord events
func (m *MonitorFormatter) logRecordEvents(prefix string, data []byte) {
buf := bytes.NewBuffer(data[1:])
dec := gob.NewDecoder(buf)
lr := monitor.LogRecordNotify{}
if err := dec.Decode(&lr); err != nil {
fmt.Printf("Error while decoding LogRecord notification message: %s\n", err)
}
if m.match(monitorAPI.MessageTypeAccessLog, uint16(lr.SourceEndpoint.ID), uint16(lr.DestinationEndpoint.ID)) {
if m.Verbosity == JSON {
lr.DumpJSON()
} else {
lr.DumpInfo()
}
}
}
// agentEvents prints out agent events
func (m *MonitorFormatter) agentEvents(prefix string, data []byte) {
buf := bytes.NewBuffer(data[1:])
dec := gob.NewDecoder(buf)
an := monitorAPI.AgentNotify{}
if err := dec.Decode(&an); err != nil {
fmt.Printf("Error while decoding agent notification message: %s\n", err)
}
if m.match(monitorAPI.MessageTypeAgent, 0, 0) {
if m.Verbosity == JSON {
an.DumpJSON()
} else {
an.DumpInfo()
}
}
}
// FormatSample prints an event from the provided raw data slice to stdout.
//
// For most monitor event types, 'data' corresponds to the 'data' field in
// bpf.PerfEventSample. Exceptions are MessageTypeAccessLog and
// MessageTypeAgent.
func (m *MonitorFormatter) FormatSample(data []byte, cpu int) {
prefix := fmt.Sprintf("CPU %02d:", cpu)
messageType := data[0]
switch messageType {
case monitorAPI.MessageTypeDrop:
m.dropEvents(prefix, data)
case monitorAPI.MessageTypeDebug:
m.debugEvents(prefix, data)
case monitorAPI.MessageTypeCapture:
m.captureEvents(prefix, data)
case monitorAPI.MessageTypeTrace:
m.traceEvents(prefix, data)
case monitorAPI.MessageTypeAccessLog:
m.logRecordEvents(prefix, data)
case monitorAPI.MessageTypeAgent:
m.agentEvents(prefix, data)
case monitorAPI.MessageTypePolicyVerdict:
m.policyVerdictEvents(prefix, data)
case monitorAPI.MessageTypeRecCapture:
m.recorderCaptureEvents(prefix, data)
case monitorAPI.MessageTypeTraceSock:
m.traceSockEvents(prefix, data)
default:
fmt.Printf("%s Unknown event: %+v\n", prefix, data)
}
}
// LostEvent formats a lost event using the specified payload parameters.
func LostEvent(lost uint64, cpu int) {
fmt.Printf("CPU %02d: Lost %d events\n", cpu, lost)
}
// FormatEvent formats an event from the specified payload to stdout.
//
// Returns true if the event was successfully printed, false otherwise.
func (m *MonitorFormatter) FormatEvent(pl *payload.Payload) bool {
switch pl.Type {
case payload.EventSample:
m.FormatSample(pl.Data, pl.CPU)
case payload.RecordLost:
LostEvent(pl.Lost, pl.CPU)
default:
return false
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package option
import (
"bytes"
"context"
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"io"
"math"
"net"
"net/netip"
"os"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"unicode"
"unicode/utf8"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/mackerelio/go-osstat/memory"
"github.com/sirupsen/logrus"
"github.com/spf13/cast"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"google.golang.org/protobuf/types/known/fieldmaskpb"
k8sLabels "k8s.io/apimachinery/pkg/labels"
flowpb "github.com/cilium/cilium/api/v1/flow"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/cidr"
clustermeshTypes "github.com/cilium/cilium/pkg/clustermesh/types"
"github.com/cilium/cilium/pkg/command"
"github.com/cilium/cilium/pkg/defaults"
"github.com/cilium/cilium/pkg/ip"
ipamOption "github.com/cilium/cilium/pkg/ipam/option"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/mac"
"github.com/cilium/cilium/pkg/time"
"github.com/cilium/cilium/pkg/version"
)
var (
log = logging.DefaultLogger.WithField(logfields.LogSubsys, "config")
)
const (
// AgentHealthPort is the TCP port for agent health status API
AgentHealthPort = "agent-health-port"
// ClusterHealthPort is the TCP port for cluster-wide network connectivity health API
ClusterHealthPort = "cluster-health-port"
// ClusterMeshHealthPort is the TCP port for ClusterMesh apiserver health API
ClusterMeshHealthPort = "clustermesh-health-port"
// AgentLabels are additional labels to identify this agent
AgentLabels = "agent-labels"
// AllowICMPFragNeeded allows ICMP Fragmentation Needed type packets in policy.
AllowICMPFragNeeded = "allow-icmp-frag-needed"
// AllowLocalhost is the policy when to allow local stack to reach local endpoints { auto | always | policy }
AllowLocalhost = "allow-localhost"
// AllowLocalhostAuto defaults to policy except when running in
// Kubernetes where it then defaults to "always"
AllowLocalhostAuto = "auto"
// AllowLocalhostAlways always allows the local stack to reach local
// endpoints
AllowLocalhostAlways = "always"
// AllowLocalhostPolicy requires a policy rule to allow the local stack
// to reach particular endpoints or policy enforcement must be
// disabled.
AllowLocalhostPolicy = "policy"
// AnnotateK8sNode enables annotating a kubernetes node while bootstrapping
// the daemon, which can also be disbled using this option.
AnnotateK8sNode = "annotate-k8s-node"
// ARPPingRefreshPeriod is the ARP entries refresher period
ARPPingRefreshPeriod = "arping-refresh-period"
// EnableL2NeighDiscovery determines if cilium should perform L2 neighbor
// discovery.
EnableL2NeighDiscovery = "enable-l2-neigh-discovery"
// BPFRoot is the Path to BPF filesystem
BPFRoot = "bpf-root"
// CGroupRoot is the path to Cgroup2 filesystem
CGroupRoot = "cgroup-root"
// CompilerFlags allow to specify extra compiler commands for advanced debugging
CompilerFlags = "cflags"
// ConfigFile is the Configuration file (default "$HOME/ciliumd.yaml")
ConfigFile = "config"
// ConfigDir is the directory that contains a file for each option where
// the filename represents the option name and the content of that file
// represents the value of that option.
ConfigDir = "config-dir"
// ConntrackGCInterval is the name of the ConntrackGCInterval option
ConntrackGCInterval = "conntrack-gc-interval"
// ConntrackGCMaxInterval is the name of the ConntrackGCMaxInterval option
ConntrackGCMaxInterval = "conntrack-gc-max-interval"
// DebugArg is the argument enables debugging mode
DebugArg = "debug"
// DebugVerbose is the argument enables verbose log message for particular subsystems
DebugVerbose = "debug-verbose"
// Devices facing cluster/external network for attaching bpf_host
Devices = "devices"
// Forces the auto-detection of devices, even if specific devices are explicitly listed
ForceDeviceDetection = "force-device-detection"
// DirectRoutingDevice is the name of a device used to connect nodes in
// direct routing mode (only required by BPF NodePort)
DirectRoutingDevice = "direct-routing-device"
// EnablePolicy enables policy enforcement in the agent.
EnablePolicy = "enable-policy"
// EnableExternalIPs enables implementation of k8s services with externalIPs in datapath
EnableExternalIPs = "enable-external-ips"
// EnableL7Proxy is the name of the option to enable L7 proxy
EnableL7Proxy = "enable-l7-proxy"
// EnableTracing enables tracing mode in the agent.
EnableTracing = "enable-tracing"
// EnableIPIPTermination is the name of the option to enable IPIP termination
EnableIPIPTermination = "enable-ipip-termination"
// Add unreachable routes on pod deletion
EnableUnreachableRoutes = "enable-unreachable-routes"
// EncryptInterface enables encryption on specified interface
EncryptInterface = "encrypt-interface"
// EncryptNode enables node IP encryption
EncryptNode = "encrypt-node"
// GopsPort is the TCP port for the gops server.
GopsPort = "gops-port"
// FixedIdentityMapping is the key-value for the fixed identity mapping
// which allows to use reserved label for fixed identities
FixedIdentityMapping = "fixed-identity-mapping"
// FixedZoneMapping is the key-value for the fixed zone mapping which
// is used to map zone value (string) from EndpointSlice to ID (uint8)
// in lb{4,6}_backend in BPF map.
FixedZoneMapping = "fixed-zone-mapping"
// IPv4Range is the per-node IPv4 endpoint prefix, e.g. 10.16.0.0/16
IPv4Range = "ipv4-range"
// IPv6Range is the per-node IPv6 endpoint prefix, must be /96, e.g. fd02:1:1::/96
IPv6Range = "ipv6-range"
// IPv4ServiceRange is the Kubernetes IPv4 services CIDR if not inside cluster prefix
IPv4ServiceRange = "ipv4-service-range"
// IPv6ServiceRange is the Kubernetes IPv6 services CIDR if not inside cluster prefix
IPv6ServiceRange = "ipv6-service-range"
// IPv6ClusterAllocCIDRName is the name of the IPv6ClusterAllocCIDR option
IPv6ClusterAllocCIDRName = "ipv6-cluster-alloc-cidr"
// K8sRequireIPv4PodCIDRName is the name of the K8sRequireIPv4PodCIDR option
K8sRequireIPv4PodCIDRName = "k8s-require-ipv4-pod-cidr"
// K8sRequireIPv6PodCIDRName is the name of the K8sRequireIPv6PodCIDR option
K8sRequireIPv6PodCIDRName = "k8s-require-ipv6-pod-cidr"
// K8sWatcherEndpointSelector specifies the k8s endpoints that Cilium
// should watch for.
K8sWatcherEndpointSelector = "k8s-watcher-endpoint-selector"
// EnableK8s operation of Kubernetes-related services/controllers.
// Intended for operating cilium with CNI-compatible orchestrators other than Kubernetes. (default is true)
EnableK8s = "enable-k8s"
// K8sAPIServer is the kubernetes api address server (for https use --k8s-kubeconfig-path instead)
K8sAPIServer = "k8s-api-server"
// K8sKubeConfigPath is the absolute path of the kubernetes kubeconfig file
K8sKubeConfigPath = "k8s-kubeconfig-path"
// K8sServiceCacheSize is service cache size for cilium k8s package.
K8sServiceCacheSize = "k8s-service-cache-size"
// K8sSyncTimeout is the timeout since last event was received to synchronize all resources with k8s.
K8sSyncTimeoutName = "k8s-sync-timeout"
// AllocatorListTimeout is the timeout to list initial allocator state.
AllocatorListTimeoutName = "allocator-list-timeout"
// KeepConfig when restoring state, keeps containers' configuration in place
KeepConfig = "keep-config"
// KVStore key-value store type
KVStore = "kvstore"
// KVStoreOpt key-value store options
KVStoreOpt = "kvstore-opt"
// Labels is the list of label prefixes used to determine identity of an endpoint
Labels = "labels"
// LabelPrefixFile is the valid label prefixes file path
LabelPrefixFile = "label-prefix-file"
// EnableHostFirewall enables network policies for the host
EnableHostFirewall = "enable-host-firewall"
// EnableHostPort enables HostPort forwarding implemented by Cilium in BPF
EnableHostPort = "enable-host-port"
// EnableHostLegacyRouting enables the old routing path via stack.
EnableHostLegacyRouting = "enable-host-legacy-routing"
// EnableNodePort enables NodePort services implemented by Cilium in BPF
EnableNodePort = "enable-node-port"
// EnableSVCSourceRangeCheck enables check of service source range checks
EnableSVCSourceRangeCheck = "enable-svc-source-range-check"
// NodePortMode indicates in which mode NodePort implementation should run
// ("snat", "dsr" or "hybrid")
NodePortMode = "node-port-mode"
// NodePortAlg indicates which algorithm is used for backend selection
// ("random" or "maglev")
NodePortAlg = "node-port-algorithm"
// NodePortAcceleration indicates whether NodePort should be accelerated
// via XDP ("none", "generic", "native", or "best-effort")
NodePortAcceleration = "node-port-acceleration"
// Alias to NodePortMode
LoadBalancerMode = "bpf-lb-mode"
// Alias to DSR dispatch method
LoadBalancerDSRDispatch = "bpf-lb-dsr-dispatch"
// Alias to DSR L4 translation method
LoadBalancerDSRL4Xlate = "bpf-lb-dsr-l4-xlate"
// Alias to DSR/IPIP IPv4 source CIDR
LoadBalancerRSSv4CIDR = "bpf-lb-rss-ipv4-src-cidr"
// Alias to DSR/IPIP IPv6 source CIDR
LoadBalancerRSSv6CIDR = "bpf-lb-rss-ipv6-src-cidr"
// Alias to NodePortAlg
LoadBalancerAlg = "bpf-lb-algorithm"
// Alias to NodePortAcceleration
LoadBalancerAcceleration = "bpf-lb-acceleration"
// LoadBalancerExternalControlPlane switch skips connectivity to kube-apiserver
// which is relevant in lb-only mode
LoadBalancerExternalControlPlane = "bpf-lb-external-control-plane"
// MaglevTableSize determines the size of the backend table per service
MaglevTableSize = "bpf-lb-maglev-table-size"
// MaglevHashSeed contains the cluster-wide seed for the hash
MaglevHashSeed = "bpf-lb-maglev-hash-seed"
// NodePortBindProtection rejects bind requests to NodePort service ports
NodePortBindProtection = "node-port-bind-protection"
// NodePortRange defines a custom range where to look up NodePort services
NodePortRange = "node-port-range"
// EnableAutoProtectNodePortRange enables appending NodePort range to
// net.ipv4.ip_local_reserved_ports if it overlaps with ephemeral port
// range (net.ipv4.ip_local_port_range)
EnableAutoProtectNodePortRange = "enable-auto-protect-node-port-range"
// KubeProxyReplacement controls how to enable kube-proxy replacement
// features in BPF datapath
KubeProxyReplacement = "kube-proxy-replacement"
// EnableSessionAffinity enables a support for service sessionAffinity
EnableSessionAffinity = "enable-session-affinity"
// EnableIdentityMark enables setting the mark field with the identity for
// local traffic. This may be disabled if chaining modes and Cilium use
// conflicting marks.
EnableIdentityMark = "enable-identity-mark"
// EnableHighScaleIPcache enables the special ipcache mode for high scale
// clusters. The ipcache content will be reduced to the strict minimum and
// traffic will be encapsulated to carry security identities.
EnableHighScaleIPcache = "enable-high-scale-ipcache"
// AddressScopeMax controls the maximum address scope for addresses to be
// considered local ones with HOST_ID in the ipcache
AddressScopeMax = "local-max-addr-scope"
// EnableRecorder enables the datapath pcap recorder
EnableRecorder = "enable-recorder"
// EnableLocalRedirectPolicy enables support for local redirect policy
EnableLocalRedirectPolicy = "enable-local-redirect-policy"
// EnableMKE enables MKE specific 'chaining' for kube-proxy replacement
EnableMKE = "enable-mke"
// CgroupPathMKE points to the cgroupv1 net_cls mount instance
CgroupPathMKE = "mke-cgroup-mount"
// LibDir enables the directory path to store runtime build environment
LibDir = "lib-dir"
// LogDriver sets logging endpoints to use for example syslog, fluentd
LogDriver = "log-driver"
// LogOpt sets log driver options for cilium
LogOpt = "log-opt"
// Logstash enables logstash integration
Logstash = "logstash"
// EnableIPv4Masquerade masquerades IPv4 packets from endpoints leaving the host.
EnableIPv4Masquerade = "enable-ipv4-masquerade"
// EnableIPv6Masquerade masquerades IPv6 packets from endpoints leaving the host.
EnableIPv6Masquerade = "enable-ipv6-masquerade"
// EnableBPFClockProbe selects a more efficient source clock (jiffies vs ktime)
EnableBPFClockProbe = "enable-bpf-clock-probe"
// EnableBPFMasquerade masquerades packets from endpoints leaving the host with BPF instead of iptables
EnableBPFMasquerade = "enable-bpf-masquerade"
// EnableMasqueradeRouteSource masquerades to the source route IP address instead of the interface one
EnableMasqueradeRouteSource = "enable-masquerade-to-route-source"
// EnableIPMasqAgent enables BPF ip-masq-agent
EnableIPMasqAgent = "enable-ip-masq-agent"
// EnableIPv4EgressGateway enables the IPv4 egress gateway
EnableIPv4EgressGateway = "enable-ipv4-egress-gateway"
// EnableEnvoyConfig enables processing of CiliumClusterwideEnvoyConfig and CiliumEnvoyConfig CRDs
EnableEnvoyConfig = "enable-envoy-config"
// IPMasqAgentConfigPath is the configuration file path
IPMasqAgentConfigPath = "ip-masq-agent-config-path"
// InstallIptRules sets whether Cilium should install any iptables in general
InstallIptRules = "install-iptables-rules"
// InstallNoConntrackIptRules instructs Cilium to install Iptables rules
// to skip netfilter connection tracking on all pod traffic.
InstallNoConntrackIptRules = "install-no-conntrack-iptables-rules"
// ContainerIPLocalReservedPorts instructs the Cilium CNI plugin to reserve
// the provided comma-separated list of ports in the container network namespace
ContainerIPLocalReservedPorts = "container-ip-local-reserved-ports"
// IPv6NodeAddr is the IPv6 address of node
IPv6NodeAddr = "ipv6-node"
// IPv4NodeAddr is the IPv4 address of node
IPv4NodeAddr = "ipv4-node"
// Restore restores state, if possible, from previous daemon
Restore = "restore"
// SocketPath sets daemon's socket path to listen for connections
SocketPath = "socket-path"
// StateDir is the directory path to store runtime state
StateDir = "state-dir"
// TracePayloadlen length of payload to capture when tracing
TracePayloadlen = "trace-payloadlen"
// Version prints the version information
Version = "version"
// EnableXDPPrefilter enables XDP-based prefiltering
EnableXDPPrefilter = "enable-xdp-prefilter"
// EnableTCX enables attaching endpoint programs using tcx if the kernel supports it
EnableTCX = "enable-tcx"
ProcFs = "procfs"
// PrometheusServeAddr IP:Port on which to serve prometheus metrics (pass ":Port" to bind on all interfaces, "" is off)
PrometheusServeAddr = "prometheus-serve-addr"
// ExternalEnvoyProxy defines whether the Envoy is deployed externally in form of a DaemonSet or not.
ExternalEnvoyProxy = "external-envoy-proxy"
// CMDRef is the path to cmdref output directory
CMDRef = "cmdref"
// DNSMaxIPsPerRestoredRule defines the maximum number of IPs to maintain
// for each FQDN selector in endpoint's restored DNS rules
DNSMaxIPsPerRestoredRule = "dns-max-ips-per-restored-rule"
// DNSPolicyUnloadOnShutdown is the name of the dns-policy-unload-on-shutdown option.
DNSPolicyUnloadOnShutdown = "dns-policy-unload-on-shutdown"
// ToFQDNsMinTTL is the minimum time, in seconds, to use DNS data for toFQDNs policies.
ToFQDNsMinTTL = "tofqdns-min-ttl"
// ToFQDNsProxyPort is the global port on which the in-agent DNS proxy should listen. Default 0 is a OS-assigned port.
ToFQDNsProxyPort = "tofqdns-proxy-port"
// ToFQDNsMaxIPsPerHost defines the maximum number of IPs to maintain
// for each FQDN name in an endpoint's FQDN cache
ToFQDNsMaxIPsPerHost = "tofqdns-endpoint-max-ip-per-hostname"
// ToFQDNsMaxDeferredConnectionDeletes defines the maximum number of IPs to
// retain for expired DNS lookups with still-active connections"
ToFQDNsMaxDeferredConnectionDeletes = "tofqdns-max-deferred-connection-deletes"
// ToFQDNsIdleConnectionGracePeriod defines the connection idle time during which
// previously active connections with expired DNS lookups are still considered alive
ToFQDNsIdleConnectionGracePeriod = "tofqdns-idle-connection-grace-period"
// ToFQDNsPreCache is a path to a file with DNS cache data to insert into the
// global cache on startup.
// The file is not re-read after agent start.
ToFQDNsPreCache = "tofqdns-pre-cache"
// ToFQDNsEnableDNSCompression allows the DNS proxy to compress responses to
// endpoints that are larger than 512 Bytes or the EDNS0 option, if present.
ToFQDNsEnableDNSCompression = "tofqdns-enable-dns-compression"
// DNSProxyConcurrencyLimit limits parallel processing of DNS messages in
// DNS proxy at any given point in time.
DNSProxyConcurrencyLimit = "dnsproxy-concurrency-limit"
// DNSProxyConcurrencyProcessingGracePeriod is the amount of grace time to
// wait while processing DNS messages when the DNSProxyConcurrencyLimit has
// been reached.
DNSProxyConcurrencyProcessingGracePeriod = "dnsproxy-concurrency-processing-grace-period"
// DNSProxyLockCount is the array size containing mutexes which protect
// against parallel handling of DNS response IPs.
DNSProxyLockCount = "dnsproxy-lock-count"
// DNSProxyLockTimeout is timeout when acquiring the locks controlled by
// DNSProxyLockCount.
DNSProxyLockTimeout = "dnsproxy-lock-timeout"
// DNSProxySocketLingerTimeout defines how many seconds we wait for the connection
// between the DNS proxy and the upstream server to be closed.
DNSProxySocketLingerTimeout = "dnsproxy-socket-linger-timeout"
// DNSProxyEnableTransparentMode enables transparent mode for the DNS proxy.
DNSProxyEnableTransparentMode = "dnsproxy-enable-transparent-mode"
// DNSProxyInsecureSkipTransparentModeCheck is a hidden flag that allows users
// to disable transparent mode even if IPSec is enabled
DNSProxyInsecureSkipTransparentModeCheck = "dnsproxy-insecure-skip-transparent-mode-check"
// MTUName is the name of the MTU option
MTUName = "mtu"
// RouteMetric is the name of the route-metric option
RouteMetric = "route-metric"
// DatapathMode is the name of the DatapathMode option
DatapathMode = "datapath-mode"
// EnableSocketLB is the name for the option to enable the socket LB
EnableSocketLB = "bpf-lb-sock"
// EnableSocketLBTracing is the name for the option to enable the socket LB tracing
EnableSocketLBTracing = "trace-sock"
// BPFSocketLBHostnsOnly is the name of the BPFSocketLBHostnsOnly option
BPFSocketLBHostnsOnly = "bpf-lb-sock-hostns-only"
// EnableSocketLBPodConnectionTermination enables termination of pod connections
// to deleted service backends when socket-LB is enabled.
EnableSocketLBPodConnectionTermination = "bpf-lb-sock-terminate-pod-connections"
// RoutingMode is the name of the option to choose between native routing and tunneling mode
RoutingMode = "routing-mode"
// ServiceNoBackendResponse is the name of the option to pick how to handle traffic for services
// without any backends
ServiceNoBackendResponse = "service-no-backend-response"
// ServiceNoBackendResponseReject is the name of the option to reject traffic for services
// without any backends
ServiceNoBackendResponseReject = "reject"
// ServiceNoBackendResponseDrop is the name of the option to drop traffic for services
// without any backends
ServiceNoBackendResponseDrop = "drop"
// MaxInternalTimerDelay sets a maximum on all periodic timers in
// the agent in order to flush out timer-related bugs in the agent.
MaxInternalTimerDelay = "max-internal-timer-delay"
// MonitorAggregationName specifies the MonitorAggregationLevel on the
// comandline.
MonitorAggregationName = "monitor-aggregation"
// MonitorAggregationInterval configures interval for monitor-aggregation
MonitorAggregationInterval = "monitor-aggregation-interval"
// MonitorAggregationFlags configures TCP flags used by monitor aggregation.
MonitorAggregationFlags = "monitor-aggregation-flags"
// ciliumEnvPrefix is the prefix used for environment variables
ciliumEnvPrefix = "CILIUM_"
// CNIChainingMode configures which CNI plugin Cilium is chained with.
CNIChainingMode = "cni-chaining-mode"
// CNIChainingTarget is the name of a CNI network in to which we should
// insert our plugin configuration
CNIChainingTarget = "cni-chaining-target"
// AuthMapEntriesMin defines the minimum auth map limit.
AuthMapEntriesMin = 1 << 8
// AuthMapEntriesMax defines the maximum auth map limit.
AuthMapEntriesMax = 1 << 24
// AuthMapEntriesDefault defines the default auth map limit.
AuthMapEntriesDefault = 1 << 19
// AuthMapEntriesName configures max entries for BPF auth map.
AuthMapEntriesName = "bpf-auth-map-max"
// CTMapEntriesGlobalTCPDefault is the default maximum number of entries
// in the TCP CT table.
CTMapEntriesGlobalTCPDefault = 2 << 18 // 512Ki
// CTMapEntriesGlobalAnyDefault is the default maximum number of entries
// in the non-TCP CT table.
CTMapEntriesGlobalAnyDefault = 2 << 17 // 256Ki
// CTMapEntriesGlobalTCPName configures max entries for the TCP CT
// table.
CTMapEntriesGlobalTCPName = "bpf-ct-global-tcp-max"
// CTMapEntriesGlobalAnyName configures max entries for the non-TCP CT
// table.
CTMapEntriesGlobalAnyName = "bpf-ct-global-any-max"
// CTMapEntriesTimeout* name option and default value mappings
CTMapEntriesTimeoutSYNName = "bpf-ct-timeout-regular-tcp-syn"
CTMapEntriesTimeoutFINName = "bpf-ct-timeout-regular-tcp-fin"
CTMapEntriesTimeoutTCPName = "bpf-ct-timeout-regular-tcp"
CTMapEntriesTimeoutAnyName = "bpf-ct-timeout-regular-any"
CTMapEntriesTimeoutSVCTCPName = "bpf-ct-timeout-service-tcp"
CTMapEntriesTimeoutSVCTCPGraceName = "bpf-ct-timeout-service-tcp-grace"
CTMapEntriesTimeoutSVCAnyName = "bpf-ct-timeout-service-any"
// NATMapEntriesGlobalDefault holds the default size of the NAT map
// and is 2/3 of the full CT size as a heuristic
NATMapEntriesGlobalDefault = int((CTMapEntriesGlobalTCPDefault + CTMapEntriesGlobalAnyDefault) * 2 / 3)
// SockRevNATMapEntriesDefault holds the default size of the SockRev NAT map
// and is the same size of CTMapEntriesGlobalAnyDefault as a heuristic given
// that sock rev NAT is mostly used for UDP and getpeername only.
SockRevNATMapEntriesDefault = CTMapEntriesGlobalAnyDefault
// MapEntriesGlobalDynamicSizeRatioName is the name of the option to
// set the ratio of total system memory to use for dynamic sizing of the
// CT, NAT, Neighbor and SockRevNAT BPF maps.
MapEntriesGlobalDynamicSizeRatioName = "bpf-map-dynamic-size-ratio"
// LimitTableAutoGlobalTCPMin defines the minimum TCP CT table limit for
// dynamic size ration calculation.
LimitTableAutoGlobalTCPMin = 1 << 17 // 128Ki entries
// LimitTableAutoGlobalAnyMin defines the minimum UDP CT table limit for
// dynamic size ration calculation.
LimitTableAutoGlobalAnyMin = 1 << 16 // 64Ki entries
// LimitTableAutoNatGlobalMin defines the minimum NAT limit for dynamic size
// ration calculation.
LimitTableAutoNatGlobalMin = 1 << 17 // 128Ki entries
// LimitTableAutoSockRevNatMin defines the minimum SockRevNAT limit for
// dynamic size ration calculation.
LimitTableAutoSockRevNatMin = 1 << 16 // 64Ki entries
// LimitTableMin defines the minimum CT or NAT table limit
LimitTableMin = 1 << 10 // 1Ki entries
// LimitTableMax defines the maximum CT or NAT table limit
LimitTableMax = 1 << 24 // 16Mi entries (~1GiB of entries per map)
// PolicyMapMin defines the minimum policy map limit.
PolicyMapMin = 1 << 8
// PolicyMapMax defines the maximum policy map limit.
PolicyMapMax = 1 << 16
// FragmentsMapMin defines the minimum fragments map limit.
FragmentsMapMin = 1 << 8
// FragmentsMapMax defines the maximum fragments map limit.
FragmentsMapMax = 1 << 16
// NATMapEntriesGlobalName configures max entries for BPF NAT table
NATMapEntriesGlobalName = "bpf-nat-global-max"
// NeighMapEntriesGlobalName configures max entries for BPF neighbor table
NeighMapEntriesGlobalName = "bpf-neigh-global-max"
// PolicyMapEntriesName configures max entries for BPF policymap.
PolicyMapEntriesName = "bpf-policy-map-max"
// PolicyMapFullReconciliationInterval sets the interval for performing the full
// reconciliation of the endpoint policy map.
PolicyMapFullReconciliationIntervalName = "bpf-policy-map-full-reconciliation-interval"
// SockRevNatEntriesName configures max entries for BPF sock reverse nat
// entries.
SockRevNatEntriesName = "bpf-sock-rev-map-max"
// EgressGatewayPolicyMapEntriesName configures max entries for egress gateway's policy
// map.
EgressGatewayPolicyMapEntriesName = "egress-gateway-policy-map-max"
// LogSystemLoadConfigName is the name of the option to enable system
// load loggging
LogSystemLoadConfigName = "log-system-load"
// DisableCiliumEndpointCRDName is the name of the option to disable
// use of the CEP CRD
DisableCiliumEndpointCRDName = "disable-endpoint-crd"
// MaxCtrlIntervalName and MaxCtrlIntervalNameEnv allow configuration
// of MaxControllerInterval.
MaxCtrlIntervalName = "max-controller-interval"
// K8sNamespaceName is the name of the K8sNamespace option
K8sNamespaceName = "k8s-namespace"
// AgentNotReadyNodeTaintKeyName is the name of the option to set
// AgentNotReadyNodeTaintKey
AgentNotReadyNodeTaintKeyName = "agent-not-ready-taint-key"
// JoinClusterName is the name of the JoinCluster Option
JoinClusterName = "join-cluster"
// EnableIPv4Name is the name of the option to enable IPv4 support
EnableIPv4Name = "enable-ipv4"
// EnableIPv6Name is the name of the option to enable IPv6 support
EnableIPv6Name = "enable-ipv6"
// EnableIPv6NDPName is the name of the option to enable IPv6 NDP support
EnableIPv6NDPName = "enable-ipv6-ndp"
// EnableSRv6 is the name of the option to enable SRv6 encapsulation support
EnableSRv6 = "enable-srv6"
// SRv6EncapModeName is the name of the option to specify the SRv6 encapsulation mode
SRv6EncapModeName = "srv6-encap-mode"
// EnableSCTPName is the name of the option to enable SCTP support
EnableSCTPName = "enable-sctp"
// EnableNat46X64Gateway enables L3 based NAT46 and NAT64 gateway
EnableNat46X64Gateway = "enable-nat46x64-gateway"
// IPv6MCastDevice is the name of the option to select IPv6 multicast device
IPv6MCastDevice = "ipv6-mcast-device"
// BPFEventsDefaultRateLimit specifies limit of messages per second that can be written to
// BPF events map. This limit is defined for all types of events except dbg and pcap.
// The number of messages is averaged, meaning that if no messages were written
// to the map over 5 seconds, it's possible to write more events than the value of rate limit
// in the 6th second.
//
// If BPFEventsDefaultRateLimit > 0, non-zero value for BPFEventsDefaultBurstLimit must also be provided
// lest the configuration is considered invalid.
// If both rate and burst limit are 0 or not specified, no limit is imposed.
BPFEventsDefaultRateLimit = "bpf-events-default-rate-limit"
// BPFEventsDefaultBurstLimit specifies the maximum number of messages that can be written
// to BPF events map in 1 second. This limit is defined for all types of events except dbg and pcap.
//
// If BPFEventsDefaultBurstLimit > 0, non-zero value for BPFEventsDefaultRateLimit must also be provided
// lest the configuration is considered invalid.
// If both burst and rate limit are 0 or not specified, no limit is imposed.
BPFEventsDefaultBurstLimit = "bpf-events-default-burst-limit"
// FQDNRejectResponseCode is the name for the option for dns-proxy reject response code
FQDNRejectResponseCode = "tofqdns-dns-reject-response-code"
// FQDNProxyDenyWithNameError is useful when stub resolvers, like the one
// in Alpine Linux's libc (musl), treat a REFUSED as a resolution error.
// This happens when trying a DNS search list, as in kubernetes, and breaks
// even whitelisted DNS names.
FQDNProxyDenyWithNameError = "nameError"
// FQDNProxyDenyWithRefused is the response code for Domain refused. It is
// the default for denied DNS requests.
FQDNProxyDenyWithRefused = "refused"
// FQDNProxyResponseMaxDelay is the maximum time the proxy holds back a response
FQDNProxyResponseMaxDelay = "tofqdns-proxy-response-max-delay"
// FQDNRegexCompileLRUSize is the size of the FQDN regex compilation LRU.
// Useful for heavy but repeated FQDN MatchName or MatchPattern use.
FQDNRegexCompileLRUSize = "fqdn-regex-compile-lru-size"
// PreAllocateMapsName is the name of the option PreAllocateMaps
PreAllocateMapsName = "preallocate-bpf-maps"
// EnableBPFTProxy option supports enabling or disabling BPF TProxy.
EnableBPFTProxy = "enable-bpf-tproxy"
// EnableXTSocketFallbackName is the name of the EnableXTSocketFallback option
EnableXTSocketFallbackName = "enable-xt-socket-fallback"
// EnableAutoDirectRoutingName is the name for the EnableAutoDirectRouting option
EnableAutoDirectRoutingName = "auto-direct-node-routes"
// DirectRoutingSkipUnreachableName is the name for the DirectRoutingSkipUnreachable option
DirectRoutingSkipUnreachableName = "direct-routing-skip-unreachable"
// EnableIPSecName is the name of the option to enable IPSec
EnableIPSecName = "enable-ipsec"
// Duration of the IPsec key rotation. After that time, we will clean the
// previous IPsec key from the node.
IPsecKeyRotationDuration = "ipsec-key-rotation-duration"
// Enable watcher for IPsec key. If disabled, a restart of the agent will
// be necessary on key rotations.
EnableIPsecKeyWatcher = "enable-ipsec-key-watcher"
// Enable caching for XfrmState for IPSec. Significantly reduces CPU usage
// in large clusters.
EnableIPSecXfrmStateCaching = "enable-ipsec-xfrm-state-caching"
// IPSecKeyFileName is the name of the option for ipsec key file
IPSecKeyFileName = "ipsec-key-file"
// EnableIPSecEncrytpedOverlay is the name of the option which enables
// the EncryptedOverlay feature.
//
// This feature will encrypt overlay traffic before it leaves the cluster.
EnableIPSecEncryptedOverlay = "enable-ipsec-encrypted-overlay"
// EnableWireguard is the name of the option to enable WireGuard
EnableWireguard = "enable-wireguard"
// EnableL2Announcements is the name of the option to enable l2 announcements
EnableL2Announcements = "enable-l2-announcements"
// L2AnnouncerLeaseDuration, if a lease has not been renewed for X amount of time, a new leader can be chosen.
L2AnnouncerLeaseDuration = "l2-announcements-lease-duration"
// L2AnnouncerRenewDeadline, the leader will renew the lease every X amount of time.
L2AnnouncerRenewDeadline = "l2-announcements-renew-deadline"
// L2AnnouncerRetryPeriod, on renew failure, retry after X amount of time.
L2AnnouncerRetryPeriod = "l2-announcements-retry-period"
// EnableEncryptionStrictMode is the name of the option to enable strict encryption mode.
EnableEncryptionStrictMode = "enable-encryption-strict-mode"
// EncryptionStrictModeCIDR is the CIDR in which the strict ecryption mode should be enforced.
EncryptionStrictModeCIDR = "encryption-strict-mode-cidr"
// EncryptionStrictModeAllowRemoteNodeIdentities allows dynamic lookup of remote node identities.
// This is required when tunneling is used
// or direct routing is used and the node CIDR and pod CIDR overlap.
EncryptionStrictModeAllowRemoteNodeIdentities = "encryption-strict-mode-allow-remote-node-identities"
// EnableWireguardUserspaceFallback is the name of the option that enables the fallback to WireGuard userspace mode
EnableWireguardUserspaceFallback = "enable-wireguard-userspace-fallback"
// WireguardPersistentKeepalivee controls Wireguard PersistentKeepalive option. Set 0 to disable.
WireguardPersistentKeepalive = "wireguard-persistent-keepalive"
// NodeEncryptionOptOutLabels is the name of the option for the node-to-node encryption opt-out labels
NodeEncryptionOptOutLabels = "node-encryption-opt-out-labels"
// KVstoreLeaseTTL is the time-to-live for lease in kvstore.
KVstoreLeaseTTL = "kvstore-lease-ttl"
// KVstoreMaxConsecutiveQuorumErrorsName is the maximum number of acceptable
// kvstore consecutive quorum errors before the agent assumes permanent failure
KVstoreMaxConsecutiveQuorumErrorsName = "kvstore-max-consecutive-quorum-errors"
// KVstorePeriodicSync is the time interval in which periodic
// synchronization with the kvstore occurs
KVstorePeriodicSync = "kvstore-periodic-sync"
// KVstoreConnectivityTimeout is the timeout when performing kvstore operations
KVstoreConnectivityTimeout = "kvstore-connectivity-timeout"
// IdentityChangeGracePeriod is the name of the
// IdentityChangeGracePeriod option
IdentityChangeGracePeriod = "identity-change-grace-period"
// IdentityRestoreGracePeriod is the name of the
// IdentityRestoreGracePeriod option
IdentityRestoreGracePeriod = "identity-restore-grace-period"
// EnableHealthChecking is the name of the EnableHealthChecking option
EnableHealthChecking = "enable-health-checking"
// EnableEndpointHealthChecking is the name of the EnableEndpointHealthChecking option
EnableEndpointHealthChecking = "enable-endpoint-health-checking"
// EnableHealthCheckNodePort is the name of the EnableHealthCheckNodePort option
EnableHealthCheckNodePort = "enable-health-check-nodeport"
// EnableHealthCheckLoadBalancerIP is the name of the EnableHealthCheckLoadBalancerIP option
EnableHealthCheckLoadBalancerIP = "enable-health-check-loadbalancer-ip"
// PolicyQueueSize is the size of the queues utilized by the policy
// repository.
PolicyQueueSize = "policy-queue-size"
// EndpointQueueSize is the size of the EventQueue per-endpoint.
EndpointQueueSize = "endpoint-queue-size"
// EndpointGCInterval interval to attempt garbage collection of
// endpoints that are no longer alive and healthy.
EndpointGCInterval = "endpoint-gc-interval"
// LoopbackIPv4 is the address to use for service loopback SNAT
LoopbackIPv4 = "ipv4-service-loopback-address"
// LocalRouterIPv4 is the link-local IPv4 address to use for Cilium router device
LocalRouterIPv4 = "local-router-ipv4"
// LocalRouterIPv6 is the link-local IPv6 address to use for Cilium router device
LocalRouterIPv6 = "local-router-ipv6"
// EnableEndpointRoutes enables use of per endpoint routes
EnableEndpointRoutes = "enable-endpoint-routes"
// ExcludeLocalAddress excludes certain addresses to be recognized as a
// local address
ExcludeLocalAddress = "exclude-local-address"
// IPv4PodSubnets A list of IPv4 subnets that pods may be
// assigned from. Used with CNI chaining where IPs are not directly managed
// by Cilium.
IPv4PodSubnets = "ipv4-pod-subnets"
// IPv6PodSubnets A list of IPv6 subnets that pods may be
// assigned from. Used with CNI chaining where IPs are not directly managed
// by Cilium.
IPv6PodSubnets = "ipv6-pod-subnets"
// IPAM is the IPAM method to use
IPAM = "ipam"
// IPAMMultiPoolPreAllocation defines the pre-allocation value for each IPAM pool
IPAMMultiPoolPreAllocation = "ipam-multi-pool-pre-allocation"
// IPAMDefaultIPPool defines the default IP Pool when using multi-pool
IPAMDefaultIPPool = "ipam-default-ip-pool"
// XDPModeNative for loading progs with XDPModeLinkDriver
XDPModeNative = "native"
// XDPModeBestEffort for loading progs with XDPModeLinkDriver
XDPModeBestEffort = "best-effort"
// XDPModeGeneric for loading progs with XDPModeLinkGeneric
XDPModeGeneric = "testing-only"
// XDPModeDisabled for not having XDP enabled
XDPModeDisabled = "disabled"
// XDPModeLinkDriver is the tc selector for native XDP
XDPModeLinkDriver = "xdpdrv"
// XDPModeLinkGeneric is the tc selector for generic XDP
XDPModeLinkGeneric = "xdpgeneric"
// XDPModeLinkNone for not having XDP enabled
XDPModeLinkNone = XDPModeDisabled
// K8sClientQPSLimit is the queries per second limit for the K8s client. Defaults to k8s client defaults.
K8sClientQPSLimit = "k8s-client-qps"
// K8sClientBurst is the burst value allowed for the K8s client. Defaults to k8s client defaults.
K8sClientBurst = "k8s-client-burst"
// AutoCreateCiliumNodeResource enables automatic creation of a
// CiliumNode resource for the local node
AutoCreateCiliumNodeResource = "auto-create-cilium-node-resource"
// ExcludeNodeLabelPatterns allows for excluding unnecessary labels from being propagated from k8s node to cilium
// node object. This allows for avoiding unnecessary events being broadcast to all nodes in the cluster.
ExcludeNodeLabelPatterns = "exclude-node-label-patterns"
// IPv4NativeRoutingCIDR describes a v4 CIDR in which pod IPs are routable
IPv4NativeRoutingCIDR = "ipv4-native-routing-cidr"
// IPv6NativeRoutingCIDR describes a v6 CIDR in which pod IPs are routable
IPv6NativeRoutingCIDR = "ipv6-native-routing-cidr"
// MasqueradeInterfaces is the selector used to select interfaces subject to
// egress masquerading
MasqueradeInterfaces = "egress-masquerade-interfaces"
// PolicyTriggerInterval is the amount of time between triggers of policy
// updates are invoked.
PolicyTriggerInterval = "policy-trigger-interval"
// IdentityAllocationMode specifies what mode to use for identity
// allocation
IdentityAllocationMode = "identity-allocation-mode"
// IdentityAllocationModeKVstore enables use of a key-value store such
// as etcd for identity allocation
IdentityAllocationModeKVstore = "kvstore"
// IdentityAllocationModeCRD enables use of Kubernetes CRDs for
// identity allocation
IdentityAllocationModeCRD = "crd"
// EnableLocalNodeRoute controls installation of the route which points
// the allocation prefix of the local node.
EnableLocalNodeRoute = "enable-local-node-route"
// EnableWellKnownIdentities enables the use of well-known identities.
// This is requires if identiy resolution is required to bring up the
// control plane, e.g. when using the managed etcd feature
EnableWellKnownIdentities = "enable-well-known-identities"
// PolicyAuditModeArg argument enables policy audit mode.
PolicyAuditModeArg = "policy-audit-mode"
// PolicyAccountingArg argument enable policy accounting.
PolicyAccountingArg = "policy-accounting"
// EnableHubble enables hubble in the agent.
EnableHubble = "enable-hubble"
// HubbleSocketPath specifies the UNIX domain socket for Hubble server to listen to.
HubbleSocketPath = "hubble-socket-path"
// HubbleListenAddress specifies address for Hubble server to listen to.
HubbleListenAddress = "hubble-listen-address"
// HubblePreferIpv6 controls whether IPv6 or IPv4 addresses should be preferred for
// communication to agents, if both are available.
HubblePreferIpv6 = "hubble-prefer-ipv6"
// HubbleTLSDisabled allows the Hubble server to run on the given listen
// address without TLS.
HubbleTLSDisabled = "hubble-disable-tls"
// HubbleTLSCertFile specifies the path to the public key file for the
// Hubble server. The file must contain PEM encoded data.
HubbleTLSCertFile = "hubble-tls-cert-file"
// HubbleTLSKeyFile specifies the path to the private key file for the
// Hubble server. The file must contain PEM encoded data.
HubbleTLSKeyFile = "hubble-tls-key-file"
// HubbleTLSClientCAFiles specifies the path to one or more client CA
// certificates to use for TLS with mutual authentication (mTLS). The files
// must contain PEM encoded data.
HubbleTLSClientCAFiles = "hubble-tls-client-ca-files"
// HubbleEventBufferCapacity specifies the capacity of Hubble events buffer.
HubbleEventBufferCapacity = "hubble-event-buffer-capacity"
// HubbleEventQueueSize specifies the buffer size of the channel to receive monitor events.
HubbleEventQueueSize = "hubble-event-queue-size"
// HubbleMetricsServer specifies the addresses to serve Hubble metrics on.
HubbleMetricsServer = "hubble-metrics-server"
// HubbleMetricsTLSEnabled allows the Hubble metrics server to run on the given listen
// address with TLS.
HubbleMetricsTLSEnabled = "hubble-metrics-server-enable-tls"
// HubbleMetricsServerTLSCertFile specifies the path to the public key file for the
// Hubble metrics server. The file must contain PEM encoded data.
HubbleMetricsTLSCertFile = "hubble-metrics-server-tls-cert-file"
// HubbleMetricsServerTLSKeyFile specifies the path to the private key file for the
// Hubble metrics server. The file must contain PEM encoded data.
HubbleMetricsTLSKeyFile = "hubble-metrics-server-tls-key-file"
// HubbleMetricsServerTLSClientCAFiles specifies the path to one or more client CA
// certificates to use for TLS with mutual authentication (mTLS) on the Hubble metrics server.
// The files must contain PEM encoded data.
HubbleMetricsTLSClientCAFiles = "hubble-metrics-server-tls-client-ca-files"
// HubbleMetrics specifies enabled metrics and their configuration options.
HubbleMetrics = "hubble-metrics"
// HubbleFlowlogsConfigFilePath specifies the filepath with configuration of hubble flowlogs.
// e.g. "/etc/cilium/flowlog.yaml"
HubbleFlowlogsConfigFilePath = "hubble-flowlogs-config-path"
// HubbleExportFilePath specifies the filepath to write Hubble events to.
// e.g. "/var/run/cilium/hubble/events.log"
HubbleExportFilePath = "hubble-export-file-path"
// HubbleExportFileMaxSizeMB specifies the file size in MB at which to rotate
// the Hubble export file.
HubbleExportFileMaxSizeMB = "hubble-export-file-max-size-mb"
// HubbleExportFileMaxBacks specifies the number of rotated files to keep.
HubbleExportFileMaxBackups = "hubble-export-file-max-backups"
// HubbleExportFileCompress specifies whether rotated files are compressed.
HubbleExportFileCompress = "hubble-export-file-compress"
// HubbleExportAllowlist specifies allow list filter use by exporter.
HubbleExportAllowlist = "hubble-export-allowlist"
// HubbleExportDenylist specifies deny list filter use by exporter.
HubbleExportDenylist = "hubble-export-denylist"
// HubbleExportFieldmask specifies list of fields to log in exporter.
HubbleExportFieldmask = "hubble-export-fieldmask"
// EnableHubbleRecorderAPI specifies if the Hubble Recorder API should be served
EnableHubbleRecorderAPI = "enable-hubble-recorder-api"
// EnableHubbleOpenMetrics enables exporting hubble metrics in OpenMetrics format.
EnableHubbleOpenMetrics = "enable-hubble-open-metrics"
// HubbleRecorderStoragePath specifies the directory in which pcap files
// created via the Hubble Recorder API are stored
HubbleRecorderStoragePath = "hubble-recorder-storage-path"
// HubbleRecorderSinkQueueSize is the queue size for each recorder sink
HubbleRecorderSinkQueueSize = "hubble-recorder-sink-queue-size"
// HubbleSkipUnknownCGroupIDs specifies if events with unknown cgroup ids should be skipped
HubbleSkipUnknownCGroupIDs = "hubble-skip-unknown-cgroup-ids"
// HubbleMonitorEvents specifies Cilium monitor events for Hubble to observe.
// By default, Hubble observes all monitor events.
HubbleMonitorEvents = "hubble-monitor-events"
// HubbleRedactEnabled controls if sensitive information will be redacted from L7 flows
HubbleRedactEnabled = "hubble-redact-enabled"
// HubbleRedactHttpURLQuery controls if the URL query will be redacted from flows
HubbleRedactHttpURLQuery = "hubble-redact-http-urlquery"
// HubbleRedactHttpUserInfo controls if the user info will be redacted from flows
HubbleRedactHttpUserInfo = "hubble-redact-http-userinfo"
// HubbleRedactKafkaApiKey controls if the Kafka API key will be redacted from flows
HubbleRedactKafkaApiKey = "hubble-redact-kafka-apikey"
// HubbleRedactHttpHeadersAllow controls which http headers will not be redacted from flows
HubbleRedactHttpHeadersAllow = "hubble-redact-http-headers-allow"
// HubbleRedactHttpHeadersDeny controls which http headers will be redacted from flows
HubbleRedactHttpHeadersDeny = "hubble-redact-http-headers-deny"
// HubbleDropEvents controls whether Hubble should create v1.Events
// for packet drops related to pods
HubbleDropEvents = "hubble-drop-events"
// HubbleDropEventsInterval controls the minimum time between emitting events
// with the same source and destination IP
HubbleDropEventsInterval = "hubble-drop-events-interval"
// HubbleDropEventsReasons controls which drop reasons to emit events for
HubbleDropEventsReasons = "hubble-drop-events-reasons"
// K8sClientConnectionTimeout configures the timeout for K8s client connections.
K8sClientConnectionTimeout = "k8s-client-connection-timeout"
// K8sClientConnectionKeepAlive configures the keep alive duration for K8s client connections.
K8sClientConnectionKeepAlive = "k8s-client-connection-keep-alive"
// K8sHeartbeatTimeout configures the timeout for apiserver heartbeat
K8sHeartbeatTimeout = "k8s-heartbeat-timeout"
// EnableIPv4FragmentsTrackingName is the name of the option to enable
// IPv4 fragments tracking for L4-based lookups. Needs LRU map support.
EnableIPv4FragmentsTrackingName = "enable-ipv4-fragment-tracking"
// FragmentsMapEntriesName configures max entries for BPF fragments
// tracking map.
FragmentsMapEntriesName = "bpf-fragments-map-max"
// K8sEnableAPIDiscovery enables Kubernetes API discovery
K8sEnableAPIDiscovery = "enable-k8s-api-discovery"
// LBMapEntriesName configures max entries for BPF lbmap.
LBMapEntriesName = "bpf-lb-map-max"
// LBServiceMapMaxEntries configures max entries of bpf map for services.
LBServiceMapMaxEntries = "bpf-lb-service-map-max"
// LBBackendMapMaxEntries configures max entries of bpf map for service backends.
LBBackendMapMaxEntries = "bpf-lb-service-backend-map-max"
// LBRevNatMapMaxEntries configures max entries of bpf map for reverse NAT.
LBRevNatMapMaxEntries = "bpf-lb-rev-nat-map-max"
// LBAffinityMapMaxEntries configures max entries of bpf map for session affinity.
LBAffinityMapMaxEntries = "bpf-lb-affinity-map-max"
// LBSourceRangeMapMaxEntries configures max entries of bpf map for service source ranges.
LBSourceRangeMapMaxEntries = "bpf-lb-source-range-map-max"
// LBMaglevMapMaxEntries configures max entries of bpf map for Maglev.
LBMaglevMapMaxEntries = "bpf-lb-maglev-map-max"
// CRDWaitTimeout is the timeout in which Cilium will exit if CRDs are not
// available.
CRDWaitTimeout = "crd-wait-timeout"
// EgressMultiHomeIPRuleCompat instructs Cilium to use a new scheme to
// store rules and routes under ENI and Azure IPAM modes, if false.
// Otherwise, it will use the old scheme.
EgressMultiHomeIPRuleCompat = "egress-multi-home-ip-rule-compat"
// EnableCustomCallsName is the name of the option to enable tail calls
// for user-defined custom eBPF programs.
EnableCustomCallsName = "enable-custom-calls"
// BGPAnnounceLBIP announces service IPs of type LoadBalancer via BGP
BGPAnnounceLBIP = "bgp-announce-lb-ip"
// BGPAnnouncePodCIDR announces the node's pod CIDR via BGP
BGPAnnouncePodCIDR = "bgp-announce-pod-cidr"
// BGPConfigPath is the file path to the BGP configuration. It is
// compatible with MetalLB's configuration.
BGPConfigPath = "bgp-config-path"
// BGPSecretsNamespace is the Kubernetes namespace to get BGP control plane secrets from.
BGPSecretsNamespace = "bgp-secrets-namespace"
// ExternalClusterIPName is the name of the option to enable
// cluster external access to ClusterIP services.
ExternalClusterIPName = "bpf-lb-external-clusterip"
// VLANBPFBypass instructs Cilium to bypass bpf logic for vlan tagged packets
VLANBPFBypass = "vlan-bpf-bypass"
// DisableExternalIPMitigation disable ExternalIP mitigation (CVE-2020-8554)
DisableExternalIPMitigation = "disable-external-ip-mitigation"
// EnableICMPRules enables ICMP-based rule support for Cilium Network Policies.
EnableICMPRules = "enable-icmp-rules"
// Use the CiliumInternalIPs (vs. NodeInternalIPs) for IPsec encapsulation.
UseCiliumInternalIPForIPsec = "use-cilium-internal-ip-for-ipsec"
// BypassIPAvailabilityUponRestore bypasses the IP availability error
// within IPAM upon endpoint restore and allows the use of the restored IP
// regardless of whether it's available in the pool.
BypassIPAvailabilityUponRestore = "bypass-ip-availability-upon-restore"
// EnableK8sTerminatingEndpoint enables the option to auto detect terminating
// state for endpoints in order to support graceful termination.
EnableK8sTerminatingEndpoint = "enable-k8s-terminating-endpoint"
// EnableVTEP enables cilium VXLAN VTEP integration
EnableVTEP = "enable-vtep"
// VTEP endpoint IPs
VtepEndpoint = "vtep-endpoint"
// VTEP CIDRs
VtepCIDR = "vtep-cidr"
// VTEP CIDR Mask applies to all VtepCIDR
VtepMask = "vtep-mask"
// VTEP MACs
VtepMAC = "vtep-mac"
// TCFilterPriority sets the priority of the cilium tc filter, enabling other
// filters to be inserted prior to the cilium filter.
TCFilterPriority = "bpf-filter-priority"
// Flag to enable BGP control plane features
EnableBGPControlPlane = "enable-bgp-control-plane"
// EnableRuntimeDeviceDetection is the name of the option to enable detection
// of new and removed datapath devices during the agent runtime.
EnableRuntimeDeviceDetection = "enable-runtime-device-detection"
// EnablePMTUDiscovery enables path MTU discovery to send ICMP
// fragmentation-needed replies to the client (when needed).
EnablePMTUDiscovery = "enable-pmtu-discovery"
// BPFMapEventBuffers specifies what maps should have event buffers enabled,
// and the max size and TTL of events in the buffers should be.
BPFMapEventBuffers = "bpf-map-event-buffers"
// IPAMCiliumnodeUpdateRate is the maximum rate at which the CiliumNode custom
// resource is updated.
IPAMCiliumNodeUpdateRate = "ipam-cilium-node-update-rate"
// EnableK8sNetworkPolicy enables support for K8s NetworkPolicy.
EnableK8sNetworkPolicy = "enable-k8s-networkpolicy"
// PolicyCIDRMatchMode defines the entities that CIDR selectors can reach
PolicyCIDRMatchMode = "policy-cidr-match-mode"
// EnableNodeSelectorLabels enables use of the node label based identity
EnableNodeSelectorLabels = "enable-node-selector-labels"
// NodeLabels is the list of label prefixes used to determine identity of a node (requires enabling of
// EnableNodeSelectorLabels)
NodeLabels = "node-labels"
// BPFEventsDropEnabled defines the DropNotification setting for any endpoint
BPFEventsDropEnabled = "bpf-events-drop-enabled"
// BPFEventsPolicyVerdictEnabled defines the PolicyVerdictNotification setting for any endpoint
BPFEventsPolicyVerdictEnabled = "bpf-events-policy-verdict-enabled"
// BPFEventsTraceEnabled defines the TraceNotification setting for any endpoint
BPFEventsTraceEnabled = "bpf-events-trace-enabled"
)
// Default string arguments
var (
FQDNRejectOptions = []string{FQDNProxyDenyWithNameError, FQDNProxyDenyWithRefused}
// MonitorAggregationFlagsDefault ensure that all TCP flags trigger
// monitor notifications even under medium monitor aggregation.
MonitorAggregationFlagsDefault = []string{"syn", "fin", "rst"}
)
// Available options for DaemonConfig.RoutingMode
const (
// RoutingModeNative specifies native routing mode
RoutingModeNative = "native"
// RoutingModeTunnel specifies tunneling mode
RoutingModeTunnel = "tunnel"
)
const (
// HTTP403Message specifies the response body for 403 responses, defaults to "Access denied"
HTTP403Message = "http-403-msg"
// ReadCNIConfiguration reads the CNI configuration file and extracts
// Cilium relevant information. This can be used to pass per node
// configuration to Cilium.
ReadCNIConfiguration = "read-cni-conf"
// WriteCNIConfigurationWhenReady writes the CNI configuration to the
// specified location once the agent is ready to serve requests. This
// allows to keep a Kubernetes node NotReady until Cilium is up and
// running and able to schedule endpoints.
WriteCNIConfigurationWhenReady = "write-cni-conf-when-ready"
// CNIExclusive tells the agent to remove other CNI configuration files
CNIExclusive = "cni-exclusive"
// CNIExternalRouting delegates endpoint routing to the chained CNI plugin.
CNIExternalRouting = "cni-external-routing"
// CNILogFile is the path to a log file (on the host) for the CNI plugin
// binary to use for logging.
CNILogFile = "cni-log-file"
// EnableCiliumEndpointSlice enables the cilium endpoint slicing feature.
EnableCiliumEndpointSlice = "enable-cilium-endpoint-slice"
// EnableExternalWorkloads enables the support for external workloads.
EnableExternalWorkloads = "enable-external-workloads"
)
const (
// NodePortMinDefault is the minimal port to listen for NodePort requests
NodePortMinDefault = 30000
// NodePortMaxDefault is the maximum port to listen for NodePort requests
NodePortMaxDefault = 32767
// NodePortModeSNAT is for SNATing requests to remote nodes
NodePortModeSNAT = "snat"
// NodePortModeDSR is for performing DSR for requests to remote nodes
NodePortModeDSR = "dsr"
// NodePortModeHybrid is a dual mode of the above, that is, DSR for TCP and SNAT for UDP
NodePortModeHybrid = "hybrid"
// NodePortAlgRandom is for randomly selecting a backend
NodePortAlgRandom = "random"
// NodePortAlgMaglev is for using maglev consistent hashing for backend selection
NodePortAlgMaglev = "maglev"
// DSR dispatch mode to encode service into IP option or extension header
DSRDispatchOption = "opt"
// DSR dispatch mode to encapsulate to IPIP
DSRDispatchIPIP = "ipip"
// DSR dispatch mode to encapsulate to Geneve
DSRDispatchGeneve = "geneve"
// DSR L4 translation to frontend port
DSRL4XlateFrontend = "frontend"
// DSR L4 translation to backend port
DSRL4XlateBackend = "backend"
// NodePortAccelerationDisabled means we do not accelerate NodePort via XDP
NodePortAccelerationDisabled = XDPModeDisabled
// NodePortAccelerationGeneric means we accelerate NodePort via generic XDP
NodePortAccelerationGeneric = XDPModeGeneric
// NodePortAccelerationNative means we accelerate NodePort via native XDP in the driver (preferred)
NodePortAccelerationNative = XDPModeNative
// NodePortAccelerationBestEffort means we accelerate NodePort via native XDP in the driver (preferred), but will skip devices without driver support
NodePortAccelerationBestEffort = XDPModeBestEffort
// KubeProxyReplacementTrue specifies to enable all kube-proxy replacement
// features (might panic).
KubeProxyReplacementTrue = "true"
// KubeProxyReplacementFalse specifies to enable only selected kube-proxy
// replacement features (might panic).
KubeProxyReplacementFalse = "false"
// KubeProxyReplacement healthz server bind address
KubeProxyReplacementHealthzBindAddr = "kube-proxy-replacement-healthz-bind-address"
// PprofAddressAgent is the default value for pprof in the agent
PprofAddressAgent = "localhost"
// PprofPortAgent is the default value for pprof in the agent
PprofPortAgent = 6060
)
// getEnvName returns the environment variable to be used for the given option name.
func getEnvName(option string) string {
under := strings.Replace(option, "-", "_", -1)
upper := strings.ToUpper(under)
return ciliumEnvPrefix + upper
}
// BindEnv binds the option name with a deterministic generated environment
// variable which is based on the given optName. If the same optName is bound
// more than once, this function panics.
func BindEnv(vp *viper.Viper, optName string) {
vp.BindEnv(optName, getEnvName(optName))
}
// BindEnvWithLegacyEnvFallback binds the given option name with either the same
// environment variable as BindEnv, if it's set, or with the given legacyEnvName.
//
// The function is used to work around the viper.BindEnv limitation that only
// one environment variable can be bound for an option, and we need multiple
// environment variables due to backward compatibility reasons.
func BindEnvWithLegacyEnvFallback(vp *viper.Viper, optName, legacyEnvName string) {
envName := getEnvName(optName)
if os.Getenv(envName) == "" {
envName = legacyEnvName
}
vp.BindEnv(optName, envName)
}
// LogRegisteredOptions logs all options that where bound to viper.
func LogRegisteredOptions(vp *viper.Viper, entry *logrus.Entry) {
keys := vp.AllKeys()
sort.Strings(keys)
for _, k := range keys {
ss := vp.GetStringSlice(k)
if len(ss) == 0 {
sm := vp.GetStringMap(k)
for k, v := range sm {
ss = append(ss, fmt.Sprintf("%s=%s", k, v))
}
}
if len(ss) > 0 {
entry.Infof(" --%s='%s'", k, strings.Join(ss, ","))
} else {
entry.Infof(" --%s='%s'", k, vp.GetString(k))
}
}
}
// DaemonConfig is the configuration used by Daemon.
type DaemonConfig struct {
// Private sum of the config written to file. Used to check that the config is not changed
// after.
shaSum [32]byte
CreationTime time.Time
BpfDir string // BPF template files directory
LibDir string // Cilium library files directory
RunDir string // Cilium runtime directory
ExternalEnvoyProxy bool // Whether Envoy is deployed as external DaemonSet or not
DirectRoutingDevice string // Direct routing device (used by BPF NodePort and BPF Host Routing)
LBDevInheritIPAddr string // Device which IP addr used by bpf_host devices
EnableXDPPrefilter bool // Enable XDP-based prefiltering
XDPMode string // XDP mode, values: { xdpdrv | xdpgeneric | none }
EnableTCX bool // Enable attaching endpoint programs using tcx if the kernel supports it
HostV4Addr net.IP // Host v4 address of the snooping device
HostV6Addr net.IP // Host v6 address of the snooping device
EncryptInterface []string // Set of network facing interface to encrypt over
EncryptNode bool // Set to true for encrypting node IP traffic
// If set to true the daemon will detect new and deleted datapath devices
// at runtime and reconfigure the datapath to load programs onto the new
// devices.
EnableRuntimeDeviceDetection bool
DatapathMode string // Datapath mode
RoutingMode string // Routing mode
DryMode bool // Do not create BPF maps, devices, ..
// RestoreState enables restoring the state from previous running daemons.
RestoreState bool
KeepConfig bool // Keep configuration of existing endpoints when starting up.
// AllowLocalhost defines when to allows the local stack to local endpoints
// values: { auto | always | policy }
AllowLocalhost string
// StateDir is the directory where runtime state of endpoints is stored
StateDir string
// Options changeable at runtime
Opts *IntOptions
// Mutex for serializing configuration updates to the daemon.
ConfigPatchMutex *lock.RWMutex
// Monitor contains the configuration for the node monitor.
Monitor *models.MonitorStatus
// AgentHealthPort is the TCP port for agent health status API
AgentHealthPort int
// ClusterHealthPort is the TCP port for cluster-wide network connectivity health API
ClusterHealthPort int
// ClusterMeshHealthPort is the TCP port for ClusterMesh apiserver health API
ClusterMeshHealthPort int
// AgentLabels contains additional labels to identify this agent in monitor events.
AgentLabels []string
// IPv6ClusterAllocCIDR is the base CIDR used to allocate IPv6 node
// CIDRs if allocation is not performed by an orchestration system
IPv6ClusterAllocCIDR string
// IPv6ClusterAllocCIDRBase is derived from IPv6ClusterAllocCIDR and
// contains the CIDR without the mask, e.g. "fdfd::1/64" -> "fdfd::"
//
// This variable should never be written to, it is initialized via
// DaemonConfig.Validate()
IPv6ClusterAllocCIDRBase string
// IPv6NAT46x64CIDR is the private base CIDR for the NAT46x64 gateway
IPv6NAT46x64CIDR string
// IPv6NAT46x64CIDRBase is derived from IPv6NAT46x64CIDR and contains
// the IPv6 prefix with the masked bits zeroed out
IPv6NAT46x64CIDRBase netip.Addr
// K8sRequireIPv4PodCIDR requires the k8s node resource to specify the
// IPv4 PodCIDR. Cilium will block bootstrapping until the information
// is available.
K8sRequireIPv4PodCIDR bool
// K8sRequireIPv6PodCIDR requires the k8s node resource to specify the
// IPv6 PodCIDR. Cilium will block bootstrapping until the information
// is available.
K8sRequireIPv6PodCIDR bool
// K8sServiceCacheSize is the service cache size for cilium k8s package.
K8sServiceCacheSize uint
// MTU is the maximum transmission unit of the underlying network
MTU int
// RouteMetric is the metric used for the routes added to the cilium_host device
RouteMetric int
// ClusterName is the name of the cluster
ClusterName string
// ClusterID is the unique identifier of the cluster
ClusterID uint32
// CTMapEntriesGlobalTCP is the maximum number of conntrack entries
// allowed in each TCP CT table for IPv4/IPv6.
CTMapEntriesGlobalTCP int
// CTMapEntriesGlobalAny is the maximum number of conntrack entries
// allowed in each non-TCP CT table for IPv4/IPv6.
CTMapEntriesGlobalAny int
// CTMapEntriesTimeout* values configured by the user.
CTMapEntriesTimeoutTCP time.Duration
CTMapEntriesTimeoutAny time.Duration
CTMapEntriesTimeoutSVCTCP time.Duration
CTMapEntriesTimeoutSVCTCPGrace time.Duration
CTMapEntriesTimeoutSVCAny time.Duration
CTMapEntriesTimeoutSYN time.Duration
CTMapEntriesTimeoutFIN time.Duration
// MaxInternalTimerDelay sets a maximum on all periodic timers in
// the agent in order to flush out timer-related bugs in the agent.
MaxInternalTimerDelay time.Duration
// MonitorAggregationInterval configures the interval between monitor
// messages when monitor aggregation is enabled.
MonitorAggregationInterval time.Duration
// MonitorAggregationFlags determines which TCP flags that the monitor
// aggregation ensures reports are generated for when monitor-aggragation
// is enabled. Network byte-order.
MonitorAggregationFlags uint16
// BPFEventsDefaultRateLimit specifies limit of messages per second that can be written to
// BPF events map. This limit is defined for all types of events except dbg and pcap.
// The number of messages is averaged, meaning that if no messages were written
// to the map over 5 seconds, it's possible to write more events than the value of rate limit
// in the 6th second.
//
// If BPFEventsDefaultRateLimit > 0, non-zero value for BPFEventsDefaultBurstLimit must also be provided
// lest the configuration is considered invalid.
BPFEventsDefaultRateLimit uint32
// BPFEventsDefaultBurstLimit specifies the maximum number of messages that can be written
// to BPF events map in 1 second. This limit is defined for all types of events except dbg and pcap.
//
// If BPFEventsDefaultBurstLimit > 0, non-zero value for BPFEventsDefaultRateLimit must also be provided
// lest the configuration is considered invalid.
// If both burst and rate limit are 0 or not specified, no limit is imposed.
BPFEventsDefaultBurstLimit uint32
// BPFMapsDynamicSizeRatio is ratio of total system memory to use for
// dynamic sizing of the CT, NAT, Neighbor and SockRevNAT BPF maps.
BPFMapsDynamicSizeRatio float64
// NATMapEntriesGlobal is the maximum number of NAT mappings allowed
// in the BPF NAT table
NATMapEntriesGlobal int
// NeighMapEntriesGlobal is the maximum number of neighbor mappings
// allowed in the BPF neigh table
NeighMapEntriesGlobal int
// AuthMapEntries is the maximum number of entries in the auth map.
AuthMapEntries int
// PolicyMapEntries is the maximum number of peer identities that an
// endpoint may allow traffic to exchange traffic with.
PolicyMapEntries int
// PolicyMapFullReconciliationInterval is the interval at which to perform
// the full reconciliation of the endpoint policy map.
PolicyMapFullReconciliationInterval time.Duration
// SockRevNatEntries is the maximum number of sock rev nat mappings
// allowed in the BPF rev nat table
SockRevNatEntries int
// DisableCiliumEndpointCRD disables the use of CiliumEndpoint CRD
DisableCiliumEndpointCRD bool
// MaxControllerInterval is the maximum value for a controller's
// RunInterval. Zero means unlimited.
MaxControllerInterval int
// HTTP403Message is the error message to return when a HTTP 403 is returned
// by the proxy, if L7 policy is configured.
HTTP403Message string
ProcFs string
// K8sNamespace is the name of the namespace in which Cilium is
// deployed in when running in Kubernetes mode
K8sNamespace string
// AgentNotReadyNodeTaint is a node taint which prevents pods from being
// scheduled. Once cilium is setup it is removed from the node. Mostly
// used in cloud providers to prevent existing CNI plugins from managing
// pods.
AgentNotReadyNodeTaintKey string
// JoinCluster is 'true' if the agent should join a Cilium cluster via kvstore
// registration
JoinCluster bool
// EnableIPv4 is true when IPv4 is enabled
EnableIPv4 bool
// EnableIPv6 is true when IPv6 is enabled
EnableIPv6 bool
// EnableNat46X64Gateway is true when L3 based NAT46 and NAT64 translation is enabled
EnableNat46X64Gateway bool
// EnableIPv6NDP is true when NDP is enabled for IPv6
EnableIPv6NDP bool
// EnableSRv6 is true when SRv6 encapsulation support is enabled
EnableSRv6 bool
// SRv6EncapMode is the encapsulation mode for SRv6
SRv6EncapMode string
// EnableSCTP is true when SCTP support is enabled.
EnableSCTP bool
// IPv6MCastDevice is the name of device that joins IPv6's solicitation multicast group
IPv6MCastDevice string
// EnableL7Proxy is the option to enable L7 proxy
EnableL7Proxy bool
// EnableIPSec is true when IPSec is enabled
EnableIPSec bool
// IPSec key file for stored keys
IPSecKeyFile string
// Duration of the IPsec key rotation. After that time, we will clean the
// previous IPsec key from the node.
IPsecKeyRotationDuration time.Duration
// Enable watcher for IPsec key. If disabled, a restart of the agent will
// be necessary on key rotations.
EnableIPsecKeyWatcher bool
// EnableIPSecXfrmStateCaching enables IPSec XfrmState caching.
EnableIPSecXfrmStateCaching bool
// EnableIPSecEncryptedOverlay enables IPSec encryption for overlay traffic.
EnableIPSecEncryptedOverlay bool
// EnableWireguard enables Wireguard encryption
EnableWireguard bool
// EnableEncryptionStrictMode enables strict mode for encryption
EnableEncryptionStrictMode bool
// EncryptionStrictModeCIDR is the CIDR to use for strict mode
EncryptionStrictModeCIDR netip.Prefix
// EncryptionStrictModeAllowRemoteNodeIdentities allows dynamic lookup of node identities.
// This is required when tunneling is used
// or direct routing is used and the node CIDR and pod CIDR overlap.
EncryptionStrictModeAllowRemoteNodeIdentities bool
// EnableWireguardUserspaceFallback enables the fallback to the userspace implementation
EnableWireguardUserspaceFallback bool
// WireguardPersistentKeepalive controls Wireguard PersistentKeepalive option.
WireguardPersistentKeepalive time.Duration
// EnableL2Announcements enables L2 announcement of service IPs
EnableL2Announcements bool
// L2AnnouncerLeaseDuration, if a lease has not been renewed for X amount of time, a new leader can be chosen.
L2AnnouncerLeaseDuration time.Duration
// L2AnnouncerRenewDeadline, the leader will renew the lease every X amount of time.
L2AnnouncerRenewDeadline time.Duration
// L2AnnouncerRetryPeriod, on renew failure, retry after X amount of time.
L2AnnouncerRetryPeriod time.Duration
// NodeEncryptionOptOutLabels contains the label selectors for nodes opting out of
// node-to-node encryption
// This field ignored when marshalling to JSON in DaemonConfig.StoreInFile,
// because a k8sLabels.Selector cannot be unmarshalled from JSON. The
// string is stored in NodeEncryptionOptOutLabelsString instead.
NodeEncryptionOptOutLabels k8sLabels.Selector `json:"-"`
// NodeEncryptionOptOutLabelsString is the string is used to construct
// the label selector in the above field.
NodeEncryptionOptOutLabelsString string
// CLI options
BPFRoot string
BPFSocketLBHostnsOnly bool
CGroupRoot string
BPFCompileDebug string
CompilerFlags []string
ConfigFile string
ConfigDir string
Debug bool
DebugVerbose []string
EnableSocketLB bool
EnableSocketLBTracing bool
EnableSocketLBPeer bool
EnablePolicy string
EnableTracing bool
EnableIPIPTermination bool
EnableUnreachableRoutes bool
FixedIdentityMapping map[string]string
FixedIdentityMappingValidator func(val string) (string, error) `json:"-"`
FixedZoneMapping map[string]uint8
ReverseFixedZoneMapping map[uint8]string
FixedZoneMappingValidator func(val string) (string, error) `json:"-"`
IPv4Range string
IPv6Range string
IPv4ServiceRange string
IPv6ServiceRange string
K8sSyncTimeout time.Duration
AllocatorListTimeout time.Duration
K8sWatcherEndpointSelector string
KVStore string
KVStoreOpt map[string]string
LabelPrefixFile string
Labels []string
LogDriver []string
LogOpt map[string]string
Logstash bool
LogSystemLoadConfig bool
// Masquerade specifies whether or not to masquerade packets from endpoints
// leaving the host.
EnableIPv4Masquerade bool
EnableIPv6Masquerade bool
EnableBPFMasquerade bool
EnableMasqueradeRouteSource bool
EnableIPMasqAgent bool
IPMasqAgentConfigPath string
EnableBPFClockProbe bool
EnableIPv4EgressGateway bool
EnableEnvoyConfig bool
InstallIptRules bool
MonitorAggregation string
PreAllocateMaps bool
IPv6NodeAddr string
IPv4NodeAddr string
SocketPath string
TracePayloadlen int
Version string
PrometheusServeAddr string
ToFQDNsMinTTL int
// DNSMaxIPsPerRestoredRule defines the maximum number of IPs to maintain
// for each FQDN selector in endpoint's restored DNS rules
DNSMaxIPsPerRestoredRule int
// DNSPolicyUnloadOnShutdown defines whether DNS policy rules should be unloaded on
// graceful shutdown.
DNSPolicyUnloadOnShutdown bool
// ToFQDNsProxyPort is the user-configured global, shared, DNS listen port used
// by the DNS Proxy. Both UDP and TCP are handled on the same port. When it
// is 0 a random port will be assigned, and can be obtained from
// DefaultDNSProxy below.
ToFQDNsProxyPort int
// ToFQDNsMaxIPsPerHost defines the maximum number of IPs to maintain
// for each FQDN name in an endpoint's FQDN cache
ToFQDNsMaxIPsPerHost int
// ToFQDNsMaxIPsPerHost defines the maximum number of IPs to retain for
// expired DNS lookups with still-active connections
ToFQDNsMaxDeferredConnectionDeletes int
// ToFQDNsIdleConnectionGracePeriod Time during which idle but
// previously active connections with expired DNS lookups are
// still considered alive
ToFQDNsIdleConnectionGracePeriod time.Duration
// FQDNRejectResponse is the dns-proxy response for invalid dns-proxy request
FQDNRejectResponse string
// FQDNProxyResponseMaxDelay The maximum time the DNS proxy holds an allowed
// DNS response before sending it along. Responses are sent as soon as the
// datapath is updated with the new IP information.
FQDNProxyResponseMaxDelay time.Duration
// FQDNRegexCompileLRUSize is the size of the FQDN regex compilation LRU.
// Useful for heavy but repeated FQDN MatchName or MatchPattern use.
FQDNRegexCompileLRUSize int
// Path to a file with DNS cache data to preload on startup
ToFQDNsPreCache string
// ToFQDNsEnableDNSCompression allows the DNS proxy to compress responses to
// endpoints that are larger than 512 Bytes or the EDNS0 option, if present.
ToFQDNsEnableDNSCompression bool
// DNSProxyConcurrencyLimit limits parallel processing of DNS messages in
// DNS proxy at any given point in time.
DNSProxyConcurrencyLimit int
// DNSProxyConcurrencyProcessingGracePeriod is the amount of grace time to
// wait while processing DNS messages when the DNSProxyConcurrencyLimit has
// been reached.
DNSProxyConcurrencyProcessingGracePeriod time.Duration
// DNSProxyEnableTransparentMode enables transparent mode for the DNS proxy.
DNSProxyEnableTransparentMode bool
// DNSProxyInsecureSkipTransparentModeCheck is a hidden flag that allows users
// to disable transparent mode even if IPSec is enabled
DNSProxyInsecureSkipTransparentModeCheck bool
// DNSProxyLockCount is the array size containing mutexes which protect
// against parallel handling of DNS response names.
DNSProxyLockCount int
// DNSProxyLockTimeout is timeout when acquiring the locks controlled by
// DNSProxyLockCount.
DNSProxyLockTimeout time.Duration
// DNSProxySocketLingerTimeout defines how many seconds we wait for the connection
// between the DNS proxy and the upstream server to be closed.
DNSProxySocketLingerTimeout int
// EnableXTSocketFallback allows disabling of kernel's ip_early_demux
// sysctl option if `xt_socket` kernel module is not available.
EnableXTSocketFallback bool
// EnableBPFTProxy enables implementing proxy redirection via BPF
// mechanisms rather than iptables rules.
EnableBPFTProxy bool
// EnableAutoDirectRouting enables installation of direct routes to
// other nodes when available
EnableAutoDirectRouting bool
// DirectRoutingSkipUnreachable skips installation of direct routes
// to nodes when they're not on the same L2
DirectRoutingSkipUnreachable bool
// EnableLocalNodeRoute controls installation of the route which points
// the allocation prefix of the local node.
EnableLocalNodeRoute bool
// EnableHealthChecking enables health checking between nodes and
// health endpoints
EnableHealthChecking bool
// EnableEndpointHealthChecking enables health checking between virtual
// health endpoints
EnableEndpointHealthChecking bool
// EnableHealthCheckNodePort enables health checking of NodePort by
// cilium
EnableHealthCheckNodePort bool
// EnableHealthCheckLoadBalancerIP enables health checking of LoadBalancerIP
// by cilium
EnableHealthCheckLoadBalancerIP bool
// KVstoreKeepAliveInterval is the interval in which the lease is being
// renewed. This must be set to a value lesser than the LeaseTTL ideally
// by a factor of 3.
KVstoreKeepAliveInterval time.Duration
// KVstoreLeaseTTL is the time-to-live for kvstore lease.
KVstoreLeaseTTL time.Duration
// KVstoreMaxConsecutiveQuorumErrors is the maximum number of acceptable
// kvstore consecutive quorum errors before the agent assumes permanent failure
KVstoreMaxConsecutiveQuorumErrors uint
// KVstorePeriodicSync is the time interval in which periodic
// synchronization with the kvstore occurs
KVstorePeriodicSync time.Duration
// KVstoreConnectivityTimeout is the timeout when performing kvstore operations
KVstoreConnectivityTimeout time.Duration
// IdentityChangeGracePeriod is the grace period that needs to pass
// before an endpoint that has changed its identity will start using
// that new identity. During the grace period, the new identity has
// already been allocated and other nodes in the cluster have a chance
// to whitelist the new upcoming identity of the endpoint.
IdentityChangeGracePeriod time.Duration
// IdentityRestoreGracePeriod is the grace period that needs to pass before CIDR identities
// restored during agent restart are released. If any of the restored identities remains
// unused after this time, they will be removed from the IP cache. Any of the restored
// identities that are used in network policies will remain in the IP cache until all such
// policies are removed.
//
// The default is 30 seconds for k8s clusters, and 10 minutes for kvstore clusters
IdentityRestoreGracePeriod time.Duration
// PolicyQueueSize is the size of the queues for the policy repository.
// A larger queue means that more events related to policy can be buffered.
PolicyQueueSize int
// EndpointQueueSize is the size of the EventQueue per-endpoint. A larger
// queue means that more events can be buffered per-endpoint. This is useful
// in the case where a cluster might be under high load for endpoint-related
// events, specifically those which cause many regenerations.
EndpointQueueSize int
// ConntrackGCInterval is the connection tracking garbage collection
// interval
ConntrackGCInterval time.Duration
// ConntrackGCMaxInterval if set limits the automatic GC interval calculation to
// the specified maximum value.
ConntrackGCMaxInterval time.Duration
// LoopbackIPv4 is the address to use for service loopback SNAT
LoopbackIPv4 string
// LocalRouterIPv4 is the link-local IPv4 address used for Cilium's router device
LocalRouterIPv4 string
// LocalRouterIPv6 is the link-local IPv6 address used for Cilium's router device
LocalRouterIPv6 string
// EnableEndpointRoutes enables use of per endpoint routes
EnableEndpointRoutes bool
// Specifies wheather to annotate the kubernetes nodes or not
AnnotateK8sNode bool
// EnableNodePort enables k8s NodePort service implementation in BPF
EnableNodePort bool
// EnableSVCSourceRangeCheck enables check of loadBalancerSourceRanges
EnableSVCSourceRangeCheck bool
// EnableHealthDatapath enables IPIP health probes data path
EnableHealthDatapath bool
// EnableHostPort enables k8s Pod's hostPort mapping through BPF
EnableHostPort bool
// EnableHostLegacyRouting enables the old routing path via stack.
EnableHostLegacyRouting bool
// NodePortNat46X64 indicates whether NAT46 / NAT64 can be used.
NodePortNat46X64 bool
// NodePortMode indicates in which mode NodePort implementation should run
// ("snat", "dsr" or "hybrid")
NodePortMode string
// NodePortAlg indicates which backend selection algorithm is used
// ("random" or "maglev")
NodePortAlg string
// LoadBalancerDSRDispatch indicates the method for pushing packets to
// backends under DSR ("opt" or "ipip")
LoadBalancerDSRDispatch string
// LoadBalancerDSRL4Xlate indicates the method for L4 DNAT translation
// under IPIP dispatch, that is, whether the inner packet will be
// translated to the frontend or backend port.
LoadBalancerDSRL4Xlate string
// LoadBalancerRSSv4CIDR defines the outer source IPv4 prefix for DSR/IPIP
LoadBalancerRSSv4CIDR string
LoadBalancerRSSv4 net.IPNet
// LoadBalancerRSSv4CIDR defines the outer source IPv6 prefix for DSR/IPIP
LoadBalancerRSSv6CIDR string
LoadBalancerRSSv6 net.IPNet
// LoadBalancerExternalControlPlane tells whether to not use kube-apiserver as
// its control plane in lb-only mode.
LoadBalancerExternalControlPlane bool
// EnablePMTUDiscovery indicates whether to send ICMP fragmentation-needed
// replies to the client (when needed).
EnablePMTUDiscovery bool
// Maglev backend table size (M) per service. Must be prime number.
MaglevTableSize int
// MaglevHashSeed contains the cluster-wide seed for the hash(es).
MaglevHashSeed string
// NodePortAcceleration indicates whether NodePort should be accelerated
// via XDP ("none", "generic", "native", or "best-effort")
NodePortAcceleration string
// NodePortBindProtection rejects bind requests to NodePort service ports
NodePortBindProtection bool
// EnableAutoProtectNodePortRange enables appending NodePort range to
// net.ipv4.ip_local_reserved_ports if it overlaps with ephemeral port
// range (net.ipv4.ip_local_port_range)
EnableAutoProtectNodePortRange bool
// KubeProxyReplacement controls how to enable kube-proxy replacement
// features in BPF datapath
KubeProxyReplacement string
// AddressScopeMax controls the maximum address scope for addresses to be
// considered local ones with HOST_ID in the ipcache
AddressScopeMax int
// EnableRecorder enables the datapath pcap recorder
EnableRecorder bool
// EnableMKE enables MKE specific 'chaining' for kube-proxy replacement
EnableMKE bool
// CgroupPathMKE points to the cgroupv1 net_cls mount instance
CgroupPathMKE string
// KubeProxyReplacementHealthzBindAddr is the KubeProxyReplacement healthz server bind addr
KubeProxyReplacementHealthzBindAddr string
// EnableExternalIPs enables implementation of k8s services with externalIPs in datapath
EnableExternalIPs bool
// EnableHostFirewall enables network policies for the host
EnableHostFirewall bool
// EnableLocalRedirectPolicy enables redirect policies to redirect traffic within nodes
EnableLocalRedirectPolicy bool
// NodePortMin is the minimum port address for the NodePort range
NodePortMin int
// NodePortMax is the maximum port address for the NodePort range
NodePortMax int
// EnableSessionAffinity enables a support for service sessionAffinity
EnableSessionAffinity bool
// Selection of BPF main clock source (ktime vs jiffies)
ClockSource BPFClockSource
// EnableIdentityMark enables setting the mark field with the identity for
// local traffic. This may be disabled if chaining modes and Cilium use
// conflicting marks.
EnableIdentityMark bool
// EnableHighScaleIPcache enables the special ipcache mode for high scale
// clusters. The ipcache content will be reduced to the strict minimum and
// traffic will be encapsulated to carry security identities.
EnableHighScaleIPcache bool
// KernelHz is the HZ rate the kernel is operating in
KernelHz int
// ExcludeLocalAddresses excludes certain addresses to be recognized as
// a local address
ExcludeLocalAddresses []*net.IPNet
// IPv4PodSubnets available subnets to be assign IPv4 addresses to pods from
IPv4PodSubnets []*net.IPNet
// IPv6PodSubnets available subnets to be assign IPv6 addresses to pods from
IPv6PodSubnets []*net.IPNet
// IPAM is the IPAM method to use
IPAM string
// IPAMMultiPoolPreAllocation defines the pre-allocation value for each IPAM pool
IPAMMultiPoolPreAllocation map[string]string
// IPAMDefaultIPPool the default IP Pool when using multi-pool
IPAMDefaultIPPool string
// AutoCreateCiliumNodeResource enables automatic creation of a
// CiliumNode resource for the local node
AutoCreateCiliumNodeResource bool
// ExcludeNodeLabelPatterns allows for excluding unnecessary labels from being propagated from k8s node to cilium
// node object. This allows for avoiding unnecessary events being broadcast to all nodes in the cluster.
ExcludeNodeLabelPatterns []*regexp.Regexp
// IPv4NativeRoutingCIDR describes a CIDR in which pod IPs are routable
IPv4NativeRoutingCIDR *cidr.CIDR
// IPv6NativeRoutingCIDR describes a CIDR in which pod IPs are routable
IPv6NativeRoutingCIDR *cidr.CIDR
// MasqueradeInterfaces is the selector used to select interfaces subject
// to egress masquerading. EgressMasqueradeInterfaces is the same but as
// a string representation. It's deprecated and can be removed once the GH
// issue https://github.com/cilium/cilium-cli/issues/1896 is fixed.
MasqueradeInterfaces []string
EgressMasqueradeInterfaces string
// PolicyTriggerInterval is the amount of time between when policy updates
// are triggered.
PolicyTriggerInterval time.Duration
// IdentityAllocationMode specifies what mode to use for identity
// allocation
IdentityAllocationMode string
// AllowICMPFragNeeded allows ICMP Fragmentation Needed type packets in
// the network policy for cilium-agent.
AllowICMPFragNeeded bool
// EnableWellKnownIdentities enables the use of well-known identities.
// This is requires if identiy resolution is required to bring up the
// control plane, e.g. when using the managed etcd feature
EnableWellKnownIdentities bool
// Azure options
// PolicyAuditMode enables non-drop mode for installed policies. In
// audit mode packets affected by policies will not be dropped.
// Policy related decisions can be checked via the poicy verdict messages.
PolicyAuditMode bool
// PolicyAccounting enable policy accounting
PolicyAccounting bool
// EnableHubble specifies whether to enable the hubble server.
EnableHubble bool
// HubbleSocketPath specifies the UNIX domain socket for Hubble server to listen to.
HubbleSocketPath string
// HubbleListenAddress specifies address for Hubble to listen to.
HubbleListenAddress string
// HubblePreferIpv6 controls whether IPv6 or IPv4 addresses should be preferred for
// communication to agents, if both are available.
HubblePreferIpv6 bool
// HubbleTLSDisabled allows the Hubble server to run on the given listen
// address without TLS.
HubbleTLSDisabled bool
// HubbleTLSCertFile specifies the path to the public key file for the
// Hubble server. The file must contain PEM encoded data.
HubbleTLSCertFile string
// HubbleTLSKeyFile specifies the path to the private key file for the
// Hubble server. The file must contain PEM encoded data.
HubbleTLSKeyFile string
// HubbleTLSClientCAFiles specifies the path to one or more client CA
// certificates to use for TLS with mutual authentication (mTLS). The files
// must contain PEM encoded data.
HubbleTLSClientCAFiles []string
// HubbleEventBufferCapacity specifies the capacity of Hubble events buffer.
HubbleEventBufferCapacity int
// HubbleEventQueueSize specifies the buffer size of the channel to receive monitor events.
HubbleEventQueueSize int
// HubbleMetricsServer specifies the addresses to serve Hubble metrics on.
HubbleMetricsServer string
// HubbleMetricsServerTLSEnabled allows the Hubble metrics server to run on the given listen
// address with TLS.
HubbleMetricsServerTLSEnabled bool
// HubbleMetricsServerTLSCertFile specifies the path to the public key file for the
// Hubble server. The file must contain PEM encoded data.
HubbleMetricsServerTLSCertFile string
// HubbleMetricsServerTLSKeyFile specifies the path to the private key file for the
// Hubble server. The file must contain PEM encoded data.
HubbleMetricsServerTLSKeyFile string
// HubbleMetricsServerTLSClientCAFiles specifies the path to one or more client CA
// certificates to use for TLS with mutual authentication (mTLS). The files
// must contain PEM encoded data.
HubbleMetricsServerTLSClientCAFiles []string
// HubbleMetrics specifies enabled metrics and their configuration options.
HubbleMetrics []string
// HubbleFlowlogsConfigFilePath specifies the filepath with configuration of hubble flowlogs.
// e.g. "/etc/cilium/flowlog.yaml"
HubbleFlowlogsConfigFilePath string
// HubbleExportFilePath specifies the filepath to write Hubble events to.
// e.g. "/var/run/cilium/hubble/events.log"
HubbleExportFilePath string
// HubbleExportFileMaxSizeMB specifies the file size in MB at which to rotate
// the Hubble export file.
HubbleExportFileMaxSizeMB int
// HubbleExportFileMaxBacks specifies the number of rotated files to keep.
HubbleExportFileMaxBackups int
// HubbleExportFileCompress specifies whether rotated files are compressed.
HubbleExportFileCompress bool
// HubbleExportAllowlist specifies allow list filter use by exporter.
HubbleExportAllowlist []*flowpb.FlowFilter
// HubbleExportDenylist specifies deny list filter use by exporter.
HubbleExportDenylist []*flowpb.FlowFilter
// HubbleExportFieldmask specifies list of fields to log in exporter.
HubbleExportFieldmask []string
// EnableHubbleRecorderAPI specifies if the Hubble Recorder API should be served
EnableHubbleRecorderAPI bool
// EnableHubbleOpenMetrics enables exporting hubble metrics in OpenMetrics format.
EnableHubbleOpenMetrics bool
// HubbleRecorderStoragePath specifies the directory in which pcap files
// created via the Hubble Recorder API are stored
HubbleRecorderStoragePath string
// HubbleRecorderSinkQueueSize is the queue size for each recorder sink
HubbleRecorderSinkQueueSize int
// HubbleSkipUnknownCGroupIDs specifies if events with unknown cgroup ids should be skipped
HubbleSkipUnknownCGroupIDs bool
// HubbleMonitorEvents specifies Cilium monitor events for Hubble to observe.
// By default, Hubble observes all monitor events.
HubbleMonitorEvents []string
// HubbleRedactEnabled controls if Hubble will be redacting sensitive information from L7 flows
HubbleRedactEnabled bool
// HubbleRedactURLQuery controls if the URL query will be redacted from flows
HubbleRedactHttpURLQuery bool
// HubbleRedactUserInfo controls if the user info will be redacted from flows
HubbleRedactHttpUserInfo bool
// HubbleRedactKafkaApiKey controls if Kafka API key will be redacted from flows
HubbleRedactKafkaApiKey bool
// HubbleRedactHttpHeadersAllow controls which http headers will not be redacted from flows
HubbleRedactHttpHeadersAllow []string
// HubbleRedactHttpHeadersDeny controls which http headers will be redacted from flows
HubbleRedactHttpHeadersDeny []string
// HubbleDropEvents controls whether Hubble should create v1.Events
// for packet drops related to pods
HubbleDropEvents bool
// HubbleDropEventsInterval controls the minimum time between emitting events
// with the same source and destination IP
HubbleDropEventsInterval time.Duration
// HubbleDropEventsReasons controls which drop reasons to emit events for
HubbleDropEventsReasons []string
// EnableIPv4FragmentsTracking enables IPv4 fragments tracking for
// L4-based lookups. Needs LRU map support.
EnableIPv4FragmentsTracking bool
// FragmentsMapEntries is the maximum number of fragmented datagrams
// that can simultaneously be tracked in order to retrieve their L4
// ports for all fragments.
FragmentsMapEntries int
// SizeofCTElement is the size of an element (key + value) in the CT map.
SizeofCTElement int
// SizeofNATElement is the size of an element (key + value) in the NAT map.
SizeofNATElement int
// SizeofNeighElement is the size of an element (key + value) in the neigh
// map.
SizeofNeighElement int
// SizeofSockRevElement is the size of an element (key + value) in the neigh
// map.
SizeofSockRevElement int
// k8sEnableLeasesFallbackDiscovery enables k8s to fallback to API probing to check
// for the support of Leases in Kubernetes when there is an error in discovering
// API groups using Discovery API.
// We require to check for Leases capabilities in operator only, which uses Leases for leader
// election purposes in HA mode.
// This is only enabled for cilium-operator
K8sEnableLeasesFallbackDiscovery bool
// LBMapEntries is the maximum number of entries allowed in BPF lbmap.
LBMapEntries int
// LBServiceMapEntries is the maximum number of entries allowed in BPF lbmap for services.
LBServiceMapEntries int
// LBBackendMapEntries is the maximum number of entries allowed in BPF lbmap for service backends.
LBBackendMapEntries int
// LBRevNatEntries is the maximum number of entries allowed in BPF lbmap for reverse NAT.
LBRevNatEntries int
// LBAffinityMapEntries is the maximum number of entries allowed in BPF lbmap for session affinities.
LBAffinityMapEntries int
// LBSourceRangeMapEntries is the maximum number of entries allowed in BPF lbmap for source ranges.
LBSourceRangeMapEntries int
// LBMaglevMapEntries is the maximum number of entries allowed in BPF lbmap for maglev.
LBMaglevMapEntries int
// CRDWaitTimeout is the timeout in which Cilium will exit if CRDs are not
// available.
CRDWaitTimeout time.Duration
// EgressMultiHomeIPRuleCompat instructs Cilium to use a new scheme to
// store rules and routes under ENI and Azure IPAM modes, if false.
// Otherwise, it will use the old scheme.
EgressMultiHomeIPRuleCompat bool
// InstallNoConntrackIptRules instructs Cilium to install Iptables rules to skip netfilter connection tracking on all pod traffic.
InstallNoConntrackIptRules bool
// ContainerIPLocalReservedPorts instructs the Cilium CNI plugin to reserve
// the provided comma-separated list of ports in the container network namespace
ContainerIPLocalReservedPorts string
// EnableCustomCalls enables tail call hooks for user-defined custom
// eBPF programs, typically used to collect custom per-endpoint
// metrics.
EnableCustomCalls bool
// BGPAnnounceLBIP announces service IPs of type LoadBalancer via BGP.
BGPAnnounceLBIP bool
// BGPAnnouncePodCIDR announces the node's pod CIDR via BGP.
BGPAnnouncePodCIDR bool
// BGPConfigPath is the file path to the BGP configuration. It is
// compatible with MetalLB's configuration.
BGPConfigPath string
// BGPSecretsNamespace is the Kubernetes namespace to get BGP control plane secrets from.
BGPSecretsNamespace string
// ExternalClusterIP enables routing to ClusterIP services from outside
// the cluster. This mirrors the behaviour of kube-proxy.
ExternalClusterIP bool
// ARPPingRefreshPeriod is the ARP entries refresher period.
ARPPingRefreshPeriod time.Duration
// EnableCiliumEndpointSlice enables the cilium endpoint slicing feature.
EnableCiliumEndpointSlice bool
// ARPPingKernelManaged denotes whether kernel can auto-refresh Neighbor entries
ARPPingKernelManaged bool
// VLANBPFBypass list of explicitly allowed VLAN id's for bpf logic bypass
VLANBPFBypass []int
// DisableExternalIPMigration disable externalIP mitigation (CVE-2020-8554)
DisableExternalIPMitigation bool
// EnableL2NeighDiscovery determines if cilium should perform L2 neighbor
// discovery.
EnableL2NeighDiscovery bool
// EnableICMPRules enables ICMP-based rule support for Cilium Network Policies.
EnableICMPRules bool
// Use the CiliumInternalIPs (vs. NodeInternalIPs) for IPsec encapsulation.
UseCiliumInternalIPForIPsec bool
// BypassIPAvailabilityUponRestore bypasses the IP availability error
// within IPAM upon endpoint restore and allows the use of the restored IP
// regardless of whether it's available in the pool.
BypassIPAvailabilityUponRestore bool
// EnableK8sTerminatingEndpoint enables auto-detect of terminating state for
// Kubernetes service endpoints.
EnableK8sTerminatingEndpoint bool
// EnableVTEP enable Cilium VXLAN VTEP integration
EnableVTEP bool
// VtepEndpoints VTEP endpoint IPs
VtepEndpoints []net.IP
// VtepCIDRs VTEP CIDRs
VtepCIDRs []*cidr.CIDR
// VtepMask VTEP Mask
VtepCidrMask net.IP
// VtepMACs VTEP MACs
VtepMACs []mac.MAC
// TCFilterPriority sets the priority of the cilium tc filter, enabling other
// filters to be inserted prior to the cilium filter.
TCFilterPriority uint16
// Enables BGP control plane features.
EnableBGPControlPlane bool
// BPFMapEventBuffers has configuration on what BPF map event buffers to enabled
// and configuration options for those.
BPFMapEventBuffers map[string]string
BPFMapEventBuffersValidator func(val string) (string, error) `json:"-"`
bpfMapEventConfigs BPFEventBufferConfigs
// BPFEventsDropEnabled controls whether the Cilium datapath exposes "drop" events to Cilium monitor and Hubble.
BPFEventsDropEnabled bool
// BPFEventsPolicyVerdictEnabled controls whether the Cilium datapath exposes "policy verdict" events to Cilium monitor and Hubble.
BPFEventsPolicyVerdictEnabled bool
// BPFEventsTraceEnabled controls whether the Cilium datapath exposes "trace" events to Cilium monitor and Hubble.
BPFEventsTraceEnabled bool
// IPAMCiliumNodeUpdateRate is the maximum rate at which the CiliumNode custom
// resource is updated.
IPAMCiliumNodeUpdateRate time.Duration
// EnableK8sNetworkPolicy enables support for K8s NetworkPolicy.
EnableK8sNetworkPolicy bool
// PolicyCIDRMatchMode is the list of entities that can be selected by CIDR policy.
// Currently supported values:
// - world
// - world, remote-node
PolicyCIDRMatchMode []string
// MaxConnectedClusters sets the maximum number of clusters that can be
// connected in a clustermesh.
// The value is used to determine the bit allocation for cluster ID and
// identity in a numeric identity. Values > 255 will decrease the number of
// allocatable identities.
MaxConnectedClusters uint32
// ForceDeviceRequired enforces the attachment of BPF programs on native device.
ForceDeviceRequired bool
// ServiceNoBackendResponse determines how we handle traffic to a service with no backends.
ServiceNoBackendResponse string
// EnableNodeSelectorLabels enables use of the node label based identity
EnableNodeSelectorLabels bool
// NodeLabels is the list of label prefixes used to determine identity of a node (requires enabling of
// EnableNodeSelectorLabels)
NodeLabels []string
// EnableSocketLBPodConnectionTermination enables the termination of connections from pods
// to deleted service backends when socket-LB is enabled
EnableSocketLBPodConnectionTermination bool
}
var (
// Config represents the daemon configuration
Config = &DaemonConfig{
CreationTime: time.Now(),
Opts: NewIntOptions(&DaemonOptionLibrary),
ConfigPatchMutex: new(lock.RWMutex),
Monitor: &models.MonitorStatus{Cpus: int64(runtime.NumCPU()), Npages: 64, Pagesize: int64(os.Getpagesize()), Lost: 0, Unknown: 0},
IPv6ClusterAllocCIDR: defaults.IPv6ClusterAllocCIDR,
IPv6ClusterAllocCIDRBase: defaults.IPv6ClusterAllocCIDRBase,
IPAMDefaultIPPool: defaults.IPAMDefaultIPPool,
EnableHealthChecking: defaults.EnableHealthChecking,
EnableEndpointHealthChecking: defaults.EnableEndpointHealthChecking,
EnableHealthCheckLoadBalancerIP: defaults.EnableHealthCheckLoadBalancerIP,
EnableHealthCheckNodePort: defaults.EnableHealthCheckNodePort,
EnableIPv4: defaults.EnableIPv4,
EnableIPv6: defaults.EnableIPv6,
EnableIPv6NDP: defaults.EnableIPv6NDP,
EnableSCTP: defaults.EnableSCTP,
EnableL7Proxy: defaults.EnableL7Proxy,
DNSMaxIPsPerRestoredRule: defaults.DNSMaxIPsPerRestoredRule,
ToFQDNsMaxIPsPerHost: defaults.ToFQDNsMaxIPsPerHost,
KVstorePeriodicSync: defaults.KVstorePeriodicSync,
KVstoreConnectivityTimeout: defaults.KVstoreConnectivityTimeout,
IdentityChangeGracePeriod: defaults.IdentityChangeGracePeriod,
IdentityRestoreGracePeriod: defaults.IdentityRestoreGracePeriodK8s,
FixedIdentityMapping: make(map[string]string),
KVStoreOpt: make(map[string]string),
LogOpt: make(map[string]string),
LoopbackIPv4: defaults.LoopbackIPv4,
EnableEndpointRoutes: defaults.EnableEndpointRoutes,
AnnotateK8sNode: defaults.AnnotateK8sNode,
K8sServiceCacheSize: defaults.K8sServiceCacheSize,
AutoCreateCiliumNodeResource: defaults.AutoCreateCiliumNodeResource,
IdentityAllocationMode: IdentityAllocationModeKVstore,
AllowICMPFragNeeded: defaults.AllowICMPFragNeeded,
EnableWellKnownIdentities: defaults.EnableWellKnownIdentities,
AllocatorListTimeout: defaults.AllocatorListTimeout,
EnableICMPRules: defaults.EnableICMPRules,
UseCiliumInternalIPForIPsec: defaults.UseCiliumInternalIPForIPsec,
K8sEnableLeasesFallbackDiscovery: defaults.K8sEnableLeasesFallbackDiscovery,
ExternalClusterIP: defaults.ExternalClusterIP,
EnableVTEP: defaults.EnableVTEP,
EnableBGPControlPlane: defaults.EnableBGPControlPlane,
EnableK8sNetworkPolicy: defaults.EnableK8sNetworkPolicy,
PolicyCIDRMatchMode: defaults.PolicyCIDRMatchMode,
MaxConnectedClusters: defaults.MaxConnectedClusters,
BPFEventsDropEnabled: defaults.BPFEventsDropEnabled,
BPFEventsPolicyVerdictEnabled: defaults.BPFEventsPolicyVerdictEnabled,
BPFEventsTraceEnabled: defaults.BPFEventsTraceEnabled,
EnableEnvoyConfig: defaults.EnableEnvoyConfig,
}
)
// GetIPv4NativeRoutingCIDR returns the native routing CIDR if configured
func (c *DaemonConfig) GetIPv4NativeRoutingCIDR() (cidr *cidr.CIDR) {
c.ConfigPatchMutex.RLock()
cidr = c.IPv4NativeRoutingCIDR
c.ConfigPatchMutex.RUnlock()
return
}
// SetIPv4NativeRoutingCIDR sets the native routing CIDR
func (c *DaemonConfig) SetIPv4NativeRoutingCIDR(cidr *cidr.CIDR) {
c.ConfigPatchMutex.Lock()
c.IPv4NativeRoutingCIDR = cidr
c.ConfigPatchMutex.Unlock()
}
// GetIPv6NativeRoutingCIDR returns the native routing CIDR if configured
func (c *DaemonConfig) GetIPv6NativeRoutingCIDR() (cidr *cidr.CIDR) {
c.ConfigPatchMutex.RLock()
cidr = c.IPv6NativeRoutingCIDR
c.ConfigPatchMutex.RUnlock()
return
}
// SetIPv6NativeRoutingCIDR sets the native routing CIDR
func (c *DaemonConfig) SetIPv6NativeRoutingCIDR(cidr *cidr.CIDR) {
c.ConfigPatchMutex.Lock()
c.IPv6NativeRoutingCIDR = cidr
c.ConfigPatchMutex.Unlock()
}
// IsExcludedLocalAddress returns true if the specified IP matches one of the
// excluded local IP ranges
func (c *DaemonConfig) IsExcludedLocalAddress(ip net.IP) bool {
for _, ipnet := range c.ExcludeLocalAddresses {
if ipnet.Contains(ip) {
return true
}
}
return false
}
// IsPodSubnetsDefined returns true if encryption subnets should be configured at init time.
func (c *DaemonConfig) IsPodSubnetsDefined() bool {
return len(c.IPv4PodSubnets) > 0 || len(c.IPv6PodSubnets) > 0
}
// NodeConfigFile is the name of the C header which contains the node's
// network parameters.
const nodeConfigFile = "node_config.h"
// GetNodeConfigPath returns the full path of the NodeConfigFile.
func (c *DaemonConfig) GetNodeConfigPath() string {
return filepath.Join(c.GetGlobalsDir(), nodeConfigFile)
}
// GetGlobalsDir returns the path for the globals directory.
func (c *DaemonConfig) GetGlobalsDir() string {
return filepath.Join(c.StateDir, "globals")
}
// AlwaysAllowLocalhost returns true if the daemon has the option set that
// localhost can always reach local endpoints
func (c *DaemonConfig) AlwaysAllowLocalhost() bool {
switch c.AllowLocalhost {
case AllowLocalhostAlways:
return true
case AllowLocalhostAuto, AllowLocalhostPolicy:
return false
default:
return false
}
}
// TunnelingEnabled returns true if tunneling is enabled.
func (c *DaemonConfig) TunnelingEnabled() bool {
// We check if routing mode is not native rather than checking if it's
// tunneling because, in unit tests, RoutingMode is usually not set and we
// would like for TunnelingEnabled to default to the actual default
// (tunneling is enabled) in that case.
return c.RoutingMode != RoutingModeNative
}
// AreDevicesRequired returns true if the agent needs to attach to the native
// devices to implement some features.
func (c *DaemonConfig) AreDevicesRequired() bool {
return c.EnableNodePort || c.EnableHostFirewall || c.EnableWireguard ||
c.EnableHighScaleIPcache || c.EnableL2Announcements || c.ForceDeviceRequired ||
c.EnableIPSecEncryptedOverlay
}
// When WG & encrypt-node are on, a NodePort BPF to-be forwarded request
// to a remote node running a selected service endpoint must be encrypted.
// To make the NodePort's rev-{S,D}NAT translations to happen for a reply
// from the remote node, we need to attach bpf_host to the Cilium's WG
// netdev (otherwise, the WG netdev after decrypting the reply will pass
// it to the stack which drops the packet).
func (c *DaemonConfig) NeedBPFHostOnWireGuardDevice() bool {
return c.EnableNodePort && c.EnableWireguard && c.EncryptNode
}
// MasqueradingEnabled returns true if either IPv4 or IPv6 masquerading is enabled.
func (c *DaemonConfig) MasqueradingEnabled() bool {
return c.EnableIPv4Masquerade || c.EnableIPv6Masquerade
}
// IptablesMasqueradingIPv4Enabled returns true if iptables-based
// masquerading is enabled for IPv4.
func (c *DaemonConfig) IptablesMasqueradingIPv4Enabled() bool {
return !c.EnableBPFMasquerade && c.EnableIPv4Masquerade
}
// IptablesMasqueradingIPv6Enabled returns true if iptables-based
// masquerading is enabled for IPv6.
func (c *DaemonConfig) IptablesMasqueradingIPv6Enabled() bool {
return !c.EnableBPFMasquerade && c.EnableIPv6Masquerade
}
// IptablesMasqueradingEnabled returns true if iptables-based
// masquerading is enabled.
func (c *DaemonConfig) IptablesMasqueradingEnabled() bool {
return c.IptablesMasqueradingIPv4Enabled() || c.IptablesMasqueradingIPv6Enabled()
}
// NodeIpsetNeeded returns true if a node ipsets should be used to skip
// masquerading for traffic to cluster nodes.
func (c *DaemonConfig) NodeIpsetNeeded() bool {
return !c.TunnelingEnabled() && c.IptablesMasqueradingEnabled()
}
// NodeEncryptionEnabled returns true if node encryption is enabled
func (c *DaemonConfig) NodeEncryptionEnabled() bool {
return c.EncryptNode
}
// EncryptionEnabled returns true if encryption is enabled
func (c *DaemonConfig) EncryptionEnabled() bool {
return c.EnableIPSec
}
// IPv4Enabled returns true if IPv4 is enabled
func (c *DaemonConfig) IPv4Enabled() bool {
return c.EnableIPv4
}
// IPv6Enabled returns true if IPv6 is enabled
func (c *DaemonConfig) IPv6Enabled() bool {
return c.EnableIPv6
}
// IPv6NDPEnabled returns true if IPv6 NDP support is enabled
func (c *DaemonConfig) IPv6NDPEnabled() bool {
return c.EnableIPv6NDP
}
// SCTPEnabled returns true if SCTP support is enabled
func (c *DaemonConfig) SCTPEnabled() bool {
return c.EnableSCTP
}
// HealthCheckingEnabled returns true if health checking is enabled
func (c *DaemonConfig) HealthCheckingEnabled() bool {
return c.EnableHealthChecking
}
// IPAMMode returns the IPAM mode
func (c *DaemonConfig) IPAMMode() string {
return strings.ToLower(c.IPAM)
}
// TracingEnabled returns if tracing policy (outlining which rules apply to a
// specific set of labels) is enabled.
func (c *DaemonConfig) TracingEnabled() bool {
return c.Opts.IsEnabled(PolicyTracing)
}
// UnreachableRoutesEnabled returns true if unreachable routes is enabled
func (c *DaemonConfig) UnreachableRoutesEnabled() bool {
return c.EnableUnreachableRoutes
}
// CiliumNamespaceName returns the name of the namespace in which Cilium is
// deployed in
func (c *DaemonConfig) CiliumNamespaceName() string {
return c.K8sNamespace
}
// AgentNotReadyNodeTaintValue returns the value of the taint key that cilium agents
// will manage on their nodes
func (c *DaemonConfig) AgentNotReadyNodeTaintValue() string {
if c.AgentNotReadyNodeTaintKey != "" {
return c.AgentNotReadyNodeTaintKey
} else {
return defaults.AgentNotReadyNodeTaint
}
}
// K8sNetworkPolicyEnabled returns true if cilium agent needs to support K8s NetworkPolicy, false otherwise.
func (c *DaemonConfig) K8sNetworkPolicyEnabled() bool {
return c.EnableK8sNetworkPolicy
}
func (c *DaemonConfig) PolicyCIDRMatchesNodes() bool {
for _, mode := range c.PolicyCIDRMatchMode {
if mode == "nodes" {
return true
}
}
return false
}
// PerNodeLabelsEnabled returns true if per-node labels feature
// is enabled
func (c *DaemonConfig) PerNodeLabelsEnabled() bool {
return c.EnableNodeSelectorLabels
}
func (c *DaemonConfig) validatePolicyCIDRMatchMode() error {
// Currently, the only acceptable values is "nodes".
for _, mode := range c.PolicyCIDRMatchMode {
switch mode {
case "nodes":
continue
default:
return fmt.Errorf("unknown CIDR match mode: %s", mode)
}
}
return nil
}
// DirectRoutingDeviceRequired return whether the Direct Routing Device is needed under
// the current configuration.
func (c *DaemonConfig) DirectRoutingDeviceRequired() bool {
// BPF NodePort and BPF Host Routing are using the direct routing device now.
// When tunneling is enabled, node-to-node redirection will be done by tunneling.
BPFHostRoutingEnabled := !c.EnableHostLegacyRouting
// XDP needs IPV4_DIRECT_ROUTING when building tunnel headers:
if c.EnableNodePort && c.NodePortAcceleration != NodePortAccelerationDisabled {
return true
}
return c.EnableNodePort || BPFHostRoutingEnabled || Config.EnableWireguard
}
func (c *DaemonConfig) LoadBalancerUsesDSR() bool {
return c.NodePortMode == NodePortModeDSR ||
c.NodePortMode == NodePortModeHybrid
}
func (c *DaemonConfig) validateIPv6ClusterAllocCIDR() error {
ip, cidr, err := net.ParseCIDR(c.IPv6ClusterAllocCIDR)
if err != nil {
return err
}
if ones, _ := cidr.Mask.Size(); ones != 64 {
return fmt.Errorf("Prefix length must be /64")
}
c.IPv6ClusterAllocCIDRBase = ip.Mask(cidr.Mask).String()
return nil
}
func (c *DaemonConfig) validateIPv6NAT46x64CIDR() error {
parsedPrefix, err := netip.ParsePrefix(c.IPv6NAT46x64CIDR)
if err != nil {
return err
}
if parsedPrefix.Bits() != 96 {
return fmt.Errorf("Prefix length must be /96")
}
c.IPv6NAT46x64CIDRBase = parsedPrefix.Masked().Addr()
return nil
}
func (c *DaemonConfig) validateHubbleRedact() error {
if len(c.HubbleRedactHttpHeadersAllow) > 0 && len(c.HubbleRedactHttpHeadersDeny) > 0 {
return fmt.Errorf("Only one of --hubble-redact-http-headers-allow and --hubble-redact-http-headers-deny can be specified, not both")
}
return nil
}
func (c *DaemonConfig) validateContainerIPLocalReservedPorts() error {
if c.ContainerIPLocalReservedPorts == "" || c.ContainerIPLocalReservedPorts == defaults.ContainerIPLocalReservedPortsAuto {
return nil
}
if regexp.MustCompile(`^(\d+(-\d+)?)(,\d+(-\d+)?)*$`).MatchString(c.ContainerIPLocalReservedPorts) {
return nil
}
return fmt.Errorf("Invalid comma separated list of of ranges for %s option", ContainerIPLocalReservedPorts)
}
// Validate validates the daemon configuration
func (c *DaemonConfig) Validate(vp *viper.Viper) error {
if err := c.validateIPv6ClusterAllocCIDR(); err != nil {
return fmt.Errorf("unable to parse CIDR value '%s' of option --%s: %w",
c.IPv6ClusterAllocCIDR, IPv6ClusterAllocCIDRName, err)
}
if err := c.validateIPv6NAT46x64CIDR(); err != nil {
return fmt.Errorf("unable to parse internal CIDR value '%s': %w",
c.IPv6NAT46x64CIDR, err)
}
if err := c.validateHubbleRedact(); err != nil {
return err
}
if c.MTU < 0 {
return fmt.Errorf("MTU '%d' cannot be negative", c.MTU)
}
if c.RouteMetric < 0 {
return fmt.Errorf("RouteMetric '%d' cannot be negative", c.RouteMetric)
}
if c.IPAM == ipamOption.IPAMENI && c.EnableIPv6 {
return fmt.Errorf("IPv6 cannot be enabled in ENI IPAM mode")
}
if c.EnableIPv6NDP {
if !c.EnableIPv6 {
return fmt.Errorf("IPv6NDP cannot be enabled when IPv6 is not enabled")
}
if len(c.IPv6MCastDevice) == 0 {
return fmt.Errorf("IPv6NDP cannot be enabled without %s", IPv6MCastDevice)
}
}
switch c.RoutingMode {
case RoutingModeNative, RoutingModeTunnel:
default:
return fmt.Errorf("invalid routing mode %q, valid modes = {%q, %q}",
c.RoutingMode, RoutingModeTunnel, RoutingModeNative)
}
cinfo := clustermeshTypes.ClusterInfo{
ID: c.ClusterID,
Name: c.ClusterName,
MaxConnectedClusters: c.MaxConnectedClusters,
}
if err := cinfo.InitClusterIDMax(); err != nil {
return err
}
if err := cinfo.Validate(log); err != nil {
return err
}
if err := c.checkMapSizeLimits(); err != nil {
return err
}
if err := c.checkIPv4NativeRoutingCIDR(); err != nil {
return err
}
if err := c.checkIPv6NativeRoutingCIDR(); err != nil {
return err
}
if err := c.checkIPAMDelegatedPlugin(); err != nil {
return err
}
// Validate that the KVStore Lease TTL value lies between a particular range.
if c.KVstoreLeaseTTL > defaults.KVstoreLeaseMaxTTL || c.KVstoreLeaseTTL < defaults.LockLeaseTTL {
return fmt.Errorf("KVstoreLeaseTTL does not lie in required range(%ds, %ds)",
int64(defaults.LockLeaseTTL.Seconds()),
int64(defaults.KVstoreLeaseMaxTTL.Seconds()))
}
if c.EnableVTEP {
err := c.validateVTEP(vp)
if err != nil {
return fmt.Errorf("Failed to validate VTEP configuration: %w", err)
}
}
if err := c.validatePolicyCIDRMatchMode(); err != nil {
return err
}
if err := c.validateContainerIPLocalReservedPorts(); err != nil {
return err
}
return nil
}
// ReadDirConfig reads the given directory and returns a map that maps the
// filename to the contents of that file.
func ReadDirConfig(dirName string) (map[string]interface{}, error) {
m := map[string]interface{}{}
files, err := os.ReadDir(dirName)
if err != nil && !os.IsNotExist(err) {
return nil, fmt.Errorf("unable to read configuration directory: %w", err)
}
for _, f := range files {
if f.IsDir() {
continue
}
fName := filepath.Join(dirName, f.Name())
// the file can still be a symlink to a directory
if f.Type()&os.ModeSymlink == 0 {
absFileName, err := filepath.EvalSymlinks(fName)
if err != nil {
log.WithError(err).Warnf("Unable to read configuration file %q", absFileName)
continue
}
fName = absFileName
}
fi, err := os.Stat(fName)
if err != nil {
log.WithError(err).Warnf("Unable to read configuration file %q", fName)
continue
}
if fi.Mode().IsDir() {
continue
}
b, err := os.ReadFile(fName)
if err != nil {
log.WithError(err).Warnf("Unable to read configuration file %q", fName)
continue
}
m[f.Name()] = string(bytes.TrimSpace(b))
}
return m, nil
}
// MergeConfig merges the given configuration map with viper's configuration.
func MergeConfig(vp *viper.Viper, m map[string]interface{}) error {
err := vp.MergeConfigMap(m)
if err != nil {
return fmt.Errorf("unable to read merge directory configuration: %w", err)
}
return nil
}
// ReplaceDeprecatedFields replaces the deprecated options set with the new set
// of options that overwrite the deprecated ones.
// This function replaces the deprecated fields used by environment variables
// with a different name than the option they are setting. This also replaces
// the deprecated names used in the Kubernetes ConfigMap.
// Once we remove them from this function we also need to remove them from
// daemon_main.go and warn users about the old environment variable nor the
// option in the configuration map have any effect.
func ReplaceDeprecatedFields(m map[string]interface{}) {
deprecatedFields := map[string]string{
"monitor-aggregation-level": MonitorAggregationName,
"ct-global-max-entries-tcp": CTMapEntriesGlobalTCPName,
"ct-global-max-entries-other": CTMapEntriesGlobalAnyName,
}
for deprecatedOption, newOption := range deprecatedFields {
if deprecatedValue, ok := m[deprecatedOption]; ok {
if _, ok := m[newOption]; !ok {
m[newOption] = deprecatedValue
}
}
}
}
func (c *DaemonConfig) parseExcludedLocalAddresses(s []string) error {
for _, ipString := range s {
_, ipnet, err := net.ParseCIDR(ipString)
if err != nil {
return fmt.Errorf("unable to parse excluded local address %s: %w", ipString, err)
}
c.ExcludeLocalAddresses = append(c.ExcludeLocalAddresses, ipnet)
}
return nil
}
// Populate sets all options with the values from viper
func (c *DaemonConfig) Populate(vp *viper.Viper) {
var err error
c.AgentHealthPort = vp.GetInt(AgentHealthPort)
c.ClusterHealthPort = vp.GetInt(ClusterHealthPort)
c.ClusterMeshHealthPort = vp.GetInt(ClusterMeshHealthPort)
c.AgentLabels = vp.GetStringSlice(AgentLabels)
c.AllowICMPFragNeeded = vp.GetBool(AllowICMPFragNeeded)
c.AllowLocalhost = vp.GetString(AllowLocalhost)
c.AnnotateK8sNode = vp.GetBool(AnnotateK8sNode)
c.ARPPingRefreshPeriod = vp.GetDuration(ARPPingRefreshPeriod)
c.EnableL2NeighDiscovery = vp.GetBool(EnableL2NeighDiscovery)
c.AutoCreateCiliumNodeResource = vp.GetBool(AutoCreateCiliumNodeResource)
c.BPFRoot = vp.GetString(BPFRoot)
c.CGroupRoot = vp.GetString(CGroupRoot)
c.ClusterID = vp.GetUint32(clustermeshTypes.OptClusterID)
c.ClusterName = vp.GetString(clustermeshTypes.OptClusterName)
c.MaxConnectedClusters = vp.GetUint32(clustermeshTypes.OptMaxConnectedClusters)
c.DatapathMode = vp.GetString(DatapathMode)
c.Debug = vp.GetBool(DebugArg)
c.DebugVerbose = vp.GetStringSlice(DebugVerbose)
c.DirectRoutingDevice = vp.GetString(DirectRoutingDevice)
c.EnableIPv4 = vp.GetBool(EnableIPv4Name)
c.EnableIPv6 = vp.GetBool(EnableIPv6Name)
c.EnableIPv6NDP = vp.GetBool(EnableIPv6NDPName)
c.EnableSRv6 = vp.GetBool(EnableSRv6)
c.SRv6EncapMode = vp.GetString(SRv6EncapModeName)
c.EnableSCTP = vp.GetBool(EnableSCTPName)
c.IPv6MCastDevice = vp.GetString(IPv6MCastDevice)
c.EnableIPSec = vp.GetBool(EnableIPSecName)
c.EnableWireguard = vp.GetBool(EnableWireguard)
c.EnableL2Announcements = vp.GetBool(EnableL2Announcements)
c.L2AnnouncerLeaseDuration = vp.GetDuration(L2AnnouncerLeaseDuration)
c.L2AnnouncerRenewDeadline = vp.GetDuration(L2AnnouncerRenewDeadline)
c.L2AnnouncerRetryPeriod = vp.GetDuration(L2AnnouncerRetryPeriod)
c.EnableWireguardUserspaceFallback = vp.GetBool(EnableWireguardUserspaceFallback)
c.WireguardPersistentKeepalive = vp.GetDuration(WireguardPersistentKeepalive)
c.EnableWellKnownIdentities = vp.GetBool(EnableWellKnownIdentities)
c.EnableXDPPrefilter = vp.GetBool(EnableXDPPrefilter)
c.EnableTCX = vp.GetBool(EnableTCX)
c.DisableCiliumEndpointCRD = vp.GetBool(DisableCiliumEndpointCRDName)
c.MasqueradeInterfaces = vp.GetStringSlice(MasqueradeInterfaces)
c.EgressMasqueradeInterfaces = strings.Join(c.MasqueradeInterfaces, ",")
c.BPFSocketLBHostnsOnly = vp.GetBool(BPFSocketLBHostnsOnly)
c.EnableSocketLB = vp.GetBool(EnableSocketLB)
c.EnableSocketLBTracing = vp.GetBool(EnableSocketLBTracing)
c.EnableSocketLBPodConnectionTermination = vp.GetBool(EnableSocketLBPodConnectionTermination)
c.EnableBPFTProxy = vp.GetBool(EnableBPFTProxy)
c.EnableXTSocketFallback = vp.GetBool(EnableXTSocketFallbackName)
c.EnableAutoDirectRouting = vp.GetBool(EnableAutoDirectRoutingName)
c.DirectRoutingSkipUnreachable = vp.GetBool(DirectRoutingSkipUnreachableName)
c.EnableEndpointRoutes = vp.GetBool(EnableEndpointRoutes)
c.EnableHealthChecking = vp.GetBool(EnableHealthChecking)
c.EnableEndpointHealthChecking = vp.GetBool(EnableEndpointHealthChecking)
c.EnableHealthCheckNodePort = vp.GetBool(EnableHealthCheckNodePort)
c.EnableHealthCheckLoadBalancerIP = vp.GetBool(EnableHealthCheckLoadBalancerIP)
c.EnableLocalNodeRoute = vp.GetBool(EnableLocalNodeRoute)
c.EnablePolicy = strings.ToLower(vp.GetString(EnablePolicy))
c.EnableExternalIPs = vp.GetBool(EnableExternalIPs)
c.EnableL7Proxy = vp.GetBool(EnableL7Proxy)
c.EnableTracing = vp.GetBool(EnableTracing)
c.EnableIPIPTermination = vp.GetBool(EnableIPIPTermination)
c.EnableUnreachableRoutes = vp.GetBool(EnableUnreachableRoutes)
c.EnableNodePort = vp.GetBool(EnableNodePort)
c.EnableSVCSourceRangeCheck = vp.GetBool(EnableSVCSourceRangeCheck)
c.EnableHostPort = vp.GetBool(EnableHostPort)
c.EnableHostLegacyRouting = vp.GetBool(EnableHostLegacyRouting)
c.MaglevTableSize = vp.GetInt(MaglevTableSize)
c.MaglevHashSeed = vp.GetString(MaglevHashSeed)
c.NodePortBindProtection = vp.GetBool(NodePortBindProtection)
c.EnableAutoProtectNodePortRange = vp.GetBool(EnableAutoProtectNodePortRange)
c.KubeProxyReplacement = vp.GetString(KubeProxyReplacement)
c.EnableSessionAffinity = vp.GetBool(EnableSessionAffinity)
c.EnableRecorder = vp.GetBool(EnableRecorder)
c.EnableMKE = vp.GetBool(EnableMKE)
c.CgroupPathMKE = vp.GetString(CgroupPathMKE)
c.EnableHostFirewall = vp.GetBool(EnableHostFirewall)
c.EnableLocalRedirectPolicy = vp.GetBool(EnableLocalRedirectPolicy)
c.EncryptInterface = vp.GetStringSlice(EncryptInterface)
c.EncryptNode = vp.GetBool(EncryptNode)
c.IdentityChangeGracePeriod = vp.GetDuration(IdentityChangeGracePeriod)
c.IdentityRestoreGracePeriod = vp.GetDuration(IdentityRestoreGracePeriod)
c.IPAM = vp.GetString(IPAM)
c.IPAMDefaultIPPool = vp.GetString(IPAMDefaultIPPool)
c.IPv4Range = vp.GetString(IPv4Range)
c.IPv4NodeAddr = vp.GetString(IPv4NodeAddr)
c.IPv4ServiceRange = vp.GetString(IPv4ServiceRange)
c.IPv6ClusterAllocCIDR = vp.GetString(IPv6ClusterAllocCIDRName)
c.IPv6NodeAddr = vp.GetString(IPv6NodeAddr)
c.IPv6Range = vp.GetString(IPv6Range)
c.IPv6ServiceRange = vp.GetString(IPv6ServiceRange)
c.JoinCluster = vp.GetBool(JoinClusterName)
c.K8sRequireIPv4PodCIDR = vp.GetBool(K8sRequireIPv4PodCIDRName)
c.K8sRequireIPv6PodCIDR = vp.GetBool(K8sRequireIPv6PodCIDRName)
c.K8sServiceCacheSize = uint(vp.GetInt(K8sServiceCacheSize))
c.K8sSyncTimeout = vp.GetDuration(K8sSyncTimeoutName)
c.AllocatorListTimeout = vp.GetDuration(AllocatorListTimeoutName)
c.K8sWatcherEndpointSelector = vp.GetString(K8sWatcherEndpointSelector)
c.KeepConfig = vp.GetBool(KeepConfig)
c.KVStore = vp.GetString(KVStore)
c.KVstoreLeaseTTL = vp.GetDuration(KVstoreLeaseTTL)
c.KVstoreKeepAliveInterval = c.KVstoreLeaseTTL / defaults.KVstoreKeepAliveIntervalFactor
c.KVstorePeriodicSync = vp.GetDuration(KVstorePeriodicSync)
c.KVstoreConnectivityTimeout = vp.GetDuration(KVstoreConnectivityTimeout)
c.KVstoreMaxConsecutiveQuorumErrors = vp.GetUint(KVstoreMaxConsecutiveQuorumErrorsName)
c.LabelPrefixFile = vp.GetString(LabelPrefixFile)
c.Labels = vp.GetStringSlice(Labels)
c.LibDir = vp.GetString(LibDir)
c.LogDriver = vp.GetStringSlice(LogDriver)
c.LogSystemLoadConfig = vp.GetBool(LogSystemLoadConfigName)
c.Logstash = vp.GetBool(Logstash)
c.LoopbackIPv4 = vp.GetString(LoopbackIPv4)
c.LocalRouterIPv4 = vp.GetString(LocalRouterIPv4)
c.LocalRouterIPv6 = vp.GetString(LocalRouterIPv6)
c.EnableBPFClockProbe = vp.GetBool(EnableBPFClockProbe)
c.EnableIPMasqAgent = vp.GetBool(EnableIPMasqAgent)
c.EnableIPv4EgressGateway = vp.GetBool(EnableIPv4EgressGateway)
c.EnableEnvoyConfig = vp.GetBool(EnableEnvoyConfig)
c.IPMasqAgentConfigPath = vp.GetString(IPMasqAgentConfigPath)
c.InstallIptRules = vp.GetBool(InstallIptRules)
c.IPSecKeyFile = vp.GetString(IPSecKeyFileName)
c.IPsecKeyRotationDuration = vp.GetDuration(IPsecKeyRotationDuration)
c.EnableIPsecKeyWatcher = vp.GetBool(EnableIPsecKeyWatcher)
c.EnableIPSecXfrmStateCaching = vp.GetBool(EnableIPSecXfrmStateCaching)
c.MonitorAggregation = vp.GetString(MonitorAggregationName)
c.MonitorAggregationInterval = vp.GetDuration(MonitorAggregationInterval)
c.MTU = vp.GetInt(MTUName)
c.PreAllocateMaps = vp.GetBool(PreAllocateMapsName)
c.ProcFs = vp.GetString(ProcFs)
c.RestoreState = vp.GetBool(Restore)
c.RouteMetric = vp.GetInt(RouteMetric)
c.RunDir = vp.GetString(StateDir)
c.ExternalEnvoyProxy = vp.GetBool(ExternalEnvoyProxy)
c.SocketPath = vp.GetString(SocketPath)
c.TracePayloadlen = vp.GetInt(TracePayloadlen)
c.Version = vp.GetString(Version)
c.PolicyTriggerInterval = vp.GetDuration(PolicyTriggerInterval)
c.CTMapEntriesTimeoutTCP = vp.GetDuration(CTMapEntriesTimeoutTCPName)
c.CTMapEntriesTimeoutAny = vp.GetDuration(CTMapEntriesTimeoutAnyName)
c.CTMapEntriesTimeoutSVCTCP = vp.GetDuration(CTMapEntriesTimeoutSVCTCPName)
c.CTMapEntriesTimeoutSVCTCPGrace = vp.GetDuration(CTMapEntriesTimeoutSVCTCPGraceName)
c.CTMapEntriesTimeoutSVCAny = vp.GetDuration(CTMapEntriesTimeoutSVCAnyName)
c.CTMapEntriesTimeoutSYN = vp.GetDuration(CTMapEntriesTimeoutSYNName)
c.CTMapEntriesTimeoutFIN = vp.GetDuration(CTMapEntriesTimeoutFINName)
c.PolicyAuditMode = vp.GetBool(PolicyAuditModeArg)
c.PolicyAccounting = vp.GetBool(PolicyAccountingArg)
c.EnableIPv4FragmentsTracking = vp.GetBool(EnableIPv4FragmentsTrackingName)
c.FragmentsMapEntries = vp.GetInt(FragmentsMapEntriesName)
c.CRDWaitTimeout = vp.GetDuration(CRDWaitTimeout)
c.LoadBalancerDSRDispatch = vp.GetString(LoadBalancerDSRDispatch)
c.LoadBalancerDSRL4Xlate = vp.GetString(LoadBalancerDSRL4Xlate)
c.LoadBalancerRSSv4CIDR = vp.GetString(LoadBalancerRSSv4CIDR)
c.LoadBalancerRSSv6CIDR = vp.GetString(LoadBalancerRSSv6CIDR)
c.LoadBalancerExternalControlPlane = vp.GetBool(LoadBalancerExternalControlPlane)
c.InstallNoConntrackIptRules = vp.GetBool(InstallNoConntrackIptRules)
c.ContainerIPLocalReservedPorts = vp.GetString(ContainerIPLocalReservedPorts)
c.EnableCustomCalls = vp.GetBool(EnableCustomCallsName)
c.BGPAnnounceLBIP = vp.GetBool(BGPAnnounceLBIP)
c.BGPAnnouncePodCIDR = vp.GetBool(BGPAnnouncePodCIDR)
c.BGPConfigPath = vp.GetString(BGPConfigPath)
c.BGPSecretsNamespace = vp.GetString(BGPSecretsNamespace)
c.ExternalClusterIP = vp.GetBool(ExternalClusterIPName)
c.EnableNat46X64Gateway = vp.GetBool(EnableNat46X64Gateway)
c.EnableHighScaleIPcache = vp.GetBool(EnableHighScaleIPcache)
c.EnableIPv4Masquerade = vp.GetBool(EnableIPv4Masquerade) && c.EnableIPv4
c.EnableIPv6Masquerade = vp.GetBool(EnableIPv6Masquerade) && c.EnableIPv6
c.EnableBPFMasquerade = vp.GetBool(EnableBPFMasquerade)
c.EnableMasqueradeRouteSource = vp.GetBool(EnableMasqueradeRouteSource)
c.EnablePMTUDiscovery = vp.GetBool(EnablePMTUDiscovery)
c.IPv6NAT46x64CIDR = defaults.IPv6NAT46x64CIDR
c.IPAMCiliumNodeUpdateRate = vp.GetDuration(IPAMCiliumNodeUpdateRate)
c.BPFEventsDropEnabled = vp.GetBool(BPFEventsDropEnabled)
c.BPFEventsPolicyVerdictEnabled = vp.GetBool(BPFEventsPolicyVerdictEnabled)
c.BPFEventsTraceEnabled = vp.GetBool(BPFEventsTraceEnabled)
c.EnableIPSecEncryptedOverlay = vp.GetBool(EnableIPSecEncryptedOverlay)
c.ServiceNoBackendResponse = vp.GetString(ServiceNoBackendResponse)
switch c.ServiceNoBackendResponse {
case ServiceNoBackendResponseReject, ServiceNoBackendResponseDrop:
case "":
c.ServiceNoBackendResponse = defaults.ServiceNoBackendResponse
default:
log.Fatalf("Invalid value for --%s: %s (must be 'reject' or 'drop')", ServiceNoBackendResponse, c.ServiceNoBackendResponse)
}
c.populateLoadBalancerSettings(vp)
c.EnableRuntimeDeviceDetection = vp.GetBool(EnableRuntimeDeviceDetection)
c.EgressMultiHomeIPRuleCompat = vp.GetBool(EgressMultiHomeIPRuleCompat)
vlanBPFBypassIDs := vp.GetStringSlice(VLANBPFBypass)
c.VLANBPFBypass = make([]int, 0, len(vlanBPFBypassIDs))
for _, vlanIDStr := range vlanBPFBypassIDs {
vlanID, err := strconv.Atoi(vlanIDStr)
if err != nil {
log.WithError(err).Fatalf("Cannot parse vlan ID integer from --%s option", VLANBPFBypass)
}
c.VLANBPFBypass = append(c.VLANBPFBypass, vlanID)
}
c.DisableExternalIPMitigation = vp.GetBool(DisableExternalIPMitigation)
tcFilterPrio := vp.GetUint32(TCFilterPriority)
if tcFilterPrio > math.MaxUint16 {
log.Fatalf("%s cannot be higher than %d", TCFilterPriority, math.MaxUint16)
}
c.TCFilterPriority = uint16(tcFilterPrio)
c.RoutingMode = vp.GetString(RoutingMode)
if vp.IsSet(AddressScopeMax) {
c.AddressScopeMax, err = ip.ParseScope(vp.GetString(AddressScopeMax))
if err != nil {
log.WithError(err).Fatalf("Cannot parse scope integer from --%s option", AddressScopeMax)
}
} else {
c.AddressScopeMax = defaults.AddressScopeMax
}
if c.EnableNat46X64Gateway {
if !c.EnableIPv4 || !c.EnableIPv6 {
log.Fatalf("--%s requires both --%s and --%s enabled",
EnableNat46X64Gateway, EnableIPv4Name, EnableIPv6Name)
}
}
encryptionStrictModeEnabled := vp.GetBool(EnableEncryptionStrictMode)
if encryptionStrictModeEnabled {
if c.EnableIPv6 {
log.Warnf("WireGuard encryption strict mode only support IPv4. IPv6 traffic is not protected and can be leaked.")
}
strictCIDR := vp.GetString(EncryptionStrictModeCIDR)
c.EncryptionStrictModeCIDR, err = netip.ParsePrefix(strictCIDR)
if err != nil {
log.WithError(err).Fatalf("Cannot parse CIDR %s from --%s option", strictCIDR, EncryptionStrictModeCIDR)
}
if !c.EncryptionStrictModeCIDR.Addr().Is4() {
log.Fatalf("%s must be an IPv4 CIDR", EncryptionStrictModeCIDR)
}
c.EncryptionStrictModeAllowRemoteNodeIdentities = vp.GetBool(EncryptionStrictModeAllowRemoteNodeIdentities)
c.EnableEncryptionStrictMode = encryptionStrictModeEnabled
}
ipv4NativeRoutingCIDR := vp.GetString(IPv4NativeRoutingCIDR)
if ipv4NativeRoutingCIDR != "" {
c.IPv4NativeRoutingCIDR, err = cidr.ParseCIDR(ipv4NativeRoutingCIDR)
if err != nil {
log.WithError(err).Fatalf("Unable to parse CIDR '%s'", ipv4NativeRoutingCIDR)
}
if len(c.IPv4NativeRoutingCIDR.IP) != net.IPv4len {
log.Fatalf("%s must be an IPv4 CIDR", IPv4NativeRoutingCIDR)
}
}
if c.EnableIPv4 && ipv4NativeRoutingCIDR == "" && c.EnableAutoDirectRouting {
log.Warnf("If %s is enabled, then you are recommended to also configure %s. If %s is not configured, this may lead to pod to pod traffic being masqueraded, "+
"which can cause problems with performance, observability and policy", EnableAutoDirectRoutingName, IPv4NativeRoutingCIDR, IPv4NativeRoutingCIDR)
}
ipv6NativeRoutingCIDR := vp.GetString(IPv6NativeRoutingCIDR)
if ipv6NativeRoutingCIDR != "" {
c.IPv6NativeRoutingCIDR, err = cidr.ParseCIDR(ipv6NativeRoutingCIDR)
if err != nil {
log.WithError(err).Fatalf("Unable to parse CIDR '%s'", ipv6NativeRoutingCIDR)
}
if len(c.IPv6NativeRoutingCIDR.IP) != net.IPv6len {
log.Fatalf("%s must be an IPv6 CIDR", IPv6NativeRoutingCIDR)
}
}
if c.EnableIPv6 && ipv6NativeRoutingCIDR == "" && c.EnableAutoDirectRouting {
log.Warnf("If %s is enabled, then you are recommended to also configure %s. If %s is not configured, this may lead to pod to pod traffic being masqueraded, "+
"which can cause problems with performance, observability and policy", EnableAutoDirectRoutingName, IPv6NativeRoutingCIDR, IPv6NativeRoutingCIDR)
}
if c.DirectRoutingSkipUnreachable && !c.EnableAutoDirectRouting {
log.Fatalf("Flag %s cannot be enabled when %s is not enabled. As if %s is then enabled, it may lead to unexpected behaviour causing network connectivity issues.", DirectRoutingSkipUnreachableName, EnableAutoDirectRoutingName, EnableAutoDirectRoutingName)
}
if err := c.calculateBPFMapSizes(vp); err != nil {
log.Fatal(err)
}
c.ClockSource = ClockSourceKtime
c.EnableIdentityMark = vp.GetBool(EnableIdentityMark)
// toFQDNs options
c.DNSMaxIPsPerRestoredRule = vp.GetInt(DNSMaxIPsPerRestoredRule)
c.DNSPolicyUnloadOnShutdown = vp.GetBool(DNSPolicyUnloadOnShutdown)
c.FQDNRegexCompileLRUSize = vp.GetInt(FQDNRegexCompileLRUSize)
c.ToFQDNsMaxIPsPerHost = vp.GetInt(ToFQDNsMaxIPsPerHost)
if maxZombies := vp.GetInt(ToFQDNsMaxDeferredConnectionDeletes); maxZombies >= 0 {
c.ToFQDNsMaxDeferredConnectionDeletes = vp.GetInt(ToFQDNsMaxDeferredConnectionDeletes)
} else {
log.Fatalf("%s must be positive, or 0 to disable deferred connection deletion",
ToFQDNsMaxDeferredConnectionDeletes)
}
switch {
case vp.IsSet(ToFQDNsMinTTL): // set by user
c.ToFQDNsMinTTL = vp.GetInt(ToFQDNsMinTTL)
default:
c.ToFQDNsMinTTL = defaults.ToFQDNsMinTTL
}
c.ToFQDNsProxyPort = vp.GetInt(ToFQDNsProxyPort)
c.ToFQDNsPreCache = vp.GetString(ToFQDNsPreCache)
c.ToFQDNsEnableDNSCompression = vp.GetBool(ToFQDNsEnableDNSCompression)
c.ToFQDNsIdleConnectionGracePeriod = vp.GetDuration(ToFQDNsIdleConnectionGracePeriod)
c.FQDNProxyResponseMaxDelay = vp.GetDuration(FQDNProxyResponseMaxDelay)
c.DNSProxyConcurrencyLimit = vp.GetInt(DNSProxyConcurrencyLimit)
c.DNSProxyConcurrencyProcessingGracePeriod = vp.GetDuration(DNSProxyConcurrencyProcessingGracePeriod)
c.DNSProxyEnableTransparentMode = vp.GetBool(DNSProxyEnableTransparentMode)
c.DNSProxyInsecureSkipTransparentModeCheck = vp.GetBool(DNSProxyInsecureSkipTransparentModeCheck)
c.DNSProxyLockCount = vp.GetInt(DNSProxyLockCount)
c.DNSProxyLockTimeout = vp.GetDuration(DNSProxyLockTimeout)
c.DNSProxySocketLingerTimeout = vp.GetInt(DNSProxySocketLingerTimeout)
c.FQDNRejectResponse = vp.GetString(FQDNRejectResponseCode)
// Convert IP strings into net.IPNet types
subnets, invalid := ip.ParseCIDRs(vp.GetStringSlice(IPv4PodSubnets))
if len(invalid) > 0 {
log.WithFields(
logrus.Fields{
"Subnets": invalid,
}).Warning("IPv4PodSubnets parameter can not be parsed.")
}
c.IPv4PodSubnets = subnets
subnets, invalid = ip.ParseCIDRs(vp.GetStringSlice(IPv6PodSubnets))
if len(invalid) > 0 {
log.WithFields(
logrus.Fields{
"Subnets": invalid,
}).Warning("IPv6PodSubnets parameter can not be parsed.")
}
c.IPv6PodSubnets = subnets
c.XDPMode = XDPModeLinkNone
err = c.populateNodePortRange(vp)
if err != nil {
log.WithError(err).Fatal("Failed to populate NodePortRange")
}
monitorAggregationFlags := vp.GetStringSlice(MonitorAggregationFlags)
var ctMonitorReportFlags uint16
for i := 0; i < len(monitorAggregationFlags); i++ {
value := strings.ToLower(monitorAggregationFlags[i])
flag, exists := TCPFlags[value]
if !exists {
log.Fatalf("Unable to parse TCP flag %q for %s!",
value, MonitorAggregationFlags)
}
ctMonitorReportFlags |= flag
}
c.MonitorAggregationFlags = ctMonitorReportFlags
// Map options
if m := command.GetStringMapString(vp, FixedIdentityMapping); err != nil {
log.Fatalf("unable to parse %s: %s", FixedIdentityMapping, err)
} else if len(m) != 0 {
c.FixedIdentityMapping = m
}
if m := command.GetStringMapString(vp, FixedZoneMapping); err != nil {
log.Fatalf("unable to parse %s: %s", FixedZoneMapping, err)
} else if len(m) != 0 {
forward := make(map[string]uint8, len(m))
reverse := make(map[uint8]string, len(m))
for k, v := range m {
bigN, _ := strconv.Atoi(v)
n := uint8(bigN)
if oldKey, ok := reverse[n]; ok && oldKey != k {
log.Fatalf("duplicate numeric ID entry for %s: %q and %q map to the same value %d", FixedZoneMapping, oldKey, k, n)
}
if oldN, ok := forward[k]; ok && oldN != n {
log.Fatalf("duplicate zone name entry for %s: %d and %d map to different values %s", FixedZoneMapping, oldN, n, k)
}
forward[k] = n
reverse[n] = k
}
c.FixedZoneMapping = forward
c.ReverseFixedZoneMapping = reverse
}
c.ConntrackGCInterval = vp.GetDuration(ConntrackGCInterval)
c.ConntrackGCMaxInterval = vp.GetDuration(ConntrackGCMaxInterval)
if m, err := command.GetStringMapStringE(vp, KVStoreOpt); err != nil {
log.Fatalf("unable to parse %s: %s", KVStoreOpt, err)
} else {
c.KVStoreOpt = m
}
if m, err := command.GetStringMapStringE(vp, LogOpt); err != nil {
log.Fatalf("unable to parse %s: %s", LogOpt, err)
} else {
c.LogOpt = m
}
bpfEventsDefaultRateLimit := vp.GetUint32(BPFEventsDefaultRateLimit)
bpfEventsDefaultBurstLimit := vp.GetUint32(BPFEventsDefaultBurstLimit)
switch {
case bpfEventsDefaultRateLimit > 0 && bpfEventsDefaultBurstLimit == 0:
log.Fatalf("invalid BPF events default config: burst limit must also be specified when rate limit is provided")
case bpfEventsDefaultRateLimit == 0 && bpfEventsDefaultBurstLimit > 0:
log.Fatalf("invalid BPF events default config: rate limit must also be specified when burst limit is provided")
default:
c.BPFEventsDefaultRateLimit = vp.GetUint32(BPFEventsDefaultRateLimit)
c.BPFEventsDefaultBurstLimit = vp.GetUint32(BPFEventsDefaultBurstLimit)
}
c.bpfMapEventConfigs = make(BPFEventBufferConfigs)
parseBPFMapEventConfigs(c.bpfMapEventConfigs, defaults.BPFEventBufferConfigs)
if m, err := command.GetStringMapStringE(vp, BPFMapEventBuffers); err != nil {
log.Fatalf("unable to parse %s: %s", BPFMapEventBuffers, err)
} else {
parseBPFMapEventConfigs(c.bpfMapEventConfigs, m)
}
c.NodeEncryptionOptOutLabelsString = vp.GetString(NodeEncryptionOptOutLabels)
if sel, err := k8sLabels.Parse(c.NodeEncryptionOptOutLabelsString); err != nil {
log.Fatalf("unable to parse label selector %s: %s", NodeEncryptionOptOutLabels, err)
} else {
c.NodeEncryptionOptOutLabels = sel
}
if err := c.parseExcludedLocalAddresses(vp.GetStringSlice(ExcludeLocalAddress)); err != nil {
log.WithError(err).Fatalf("Unable to parse excluded local addresses")
}
// Ensure CiliumEndpointSlice is enabled only if CiliumEndpointCRD is enabled too.
c.EnableCiliumEndpointSlice = vp.GetBool(EnableCiliumEndpointSlice)
if c.EnableCiliumEndpointSlice && c.DisableCiliumEndpointCRD {
log.Fatalf("Running Cilium with %s=%t requires %s set to false to enable CiliumEndpoint CRDs.",
EnableCiliumEndpointSlice, c.EnableCiliumEndpointSlice, DisableCiliumEndpointCRDName)
}
c.IdentityAllocationMode = vp.GetString(IdentityAllocationMode)
switch c.IdentityAllocationMode {
// This is here for tests. Some call Populate without the normal init
case "":
c.IdentityAllocationMode = IdentityAllocationModeKVstore
case IdentityAllocationModeKVstore, IdentityAllocationModeCRD:
// c.IdentityAllocationMode is set above
default:
log.Fatalf("Invalid identity allocation mode %q. It must be one of %s or %s", c.IdentityAllocationMode, IdentityAllocationModeKVstore, IdentityAllocationModeCRD)
}
if c.KVStore == "" {
if c.IdentityAllocationMode != IdentityAllocationModeCRD {
log.Warningf("Running Cilium with %q=%q requires identity allocation via CRDs. Changing %s to %q", KVStore, c.KVStore, IdentityAllocationMode, IdentityAllocationModeCRD)
c.IdentityAllocationMode = IdentityAllocationModeCRD
}
if c.DisableCiliumEndpointCRD {
log.Warningf("Running Cilium with %q=%q requires endpoint CRDs. Changing %s to %t", KVStore, c.KVStore, DisableCiliumEndpointCRDName, false)
c.DisableCiliumEndpointCRD = false
}
}
switch c.IPAM {
case ipamOption.IPAMKubernetes, ipamOption.IPAMClusterPool:
if c.EnableIPv4 {
c.K8sRequireIPv4PodCIDR = true
}
if c.EnableIPv6 {
c.K8sRequireIPv6PodCIDR = true
}
}
if m, err := command.GetStringMapStringE(vp, IPAMMultiPoolPreAllocation); err != nil {
log.Fatalf("unable to parse %s: %s", IPAMMultiPoolPreAllocation, err)
} else {
c.IPAMMultiPoolPreAllocation = m
}
if len(c.IPAMMultiPoolPreAllocation) == 0 {
// Default to the same value as IPAMDefaultIPPool
c.IPAMMultiPoolPreAllocation = map[string]string{c.IPAMDefaultIPPool: "8"}
}
c.KubeProxyReplacementHealthzBindAddr = vp.GetString(KubeProxyReplacementHealthzBindAddr)
// Hubble options.
c.EnableHubble = vp.GetBool(EnableHubble)
c.EnableHubbleOpenMetrics = vp.GetBool(EnableHubbleOpenMetrics)
c.HubbleSocketPath = vp.GetString(HubbleSocketPath)
c.HubbleListenAddress = vp.GetString(HubbleListenAddress)
c.HubblePreferIpv6 = vp.GetBool(HubblePreferIpv6)
c.HubbleTLSDisabled = vp.GetBool(HubbleTLSDisabled)
c.HubbleTLSCertFile = vp.GetString(HubbleTLSCertFile)
c.HubbleTLSKeyFile = vp.GetString(HubbleTLSKeyFile)
c.HubbleTLSClientCAFiles = vp.GetStringSlice(HubbleTLSClientCAFiles)
c.HubbleEventBufferCapacity = vp.GetInt(HubbleEventBufferCapacity)
c.HubbleEventQueueSize = vp.GetInt(HubbleEventQueueSize)
if c.HubbleEventQueueSize == 0 {
c.HubbleEventQueueSize = getDefaultMonitorQueueSize(runtime.NumCPU())
}
c.HubbleMetricsServer = vp.GetString(HubbleMetricsServer)
c.HubbleMetricsServerTLSEnabled = vp.GetBool(HubbleMetricsTLSEnabled)
c.HubbleMetricsServerTLSCertFile = vp.GetString(HubbleMetricsTLSCertFile)
c.HubbleMetricsServerTLSKeyFile = vp.GetString(HubbleMetricsTLSKeyFile)
c.HubbleMetricsServerTLSClientCAFiles = vp.GetStringSlice(HubbleMetricsTLSClientCAFiles)
c.HubbleMetrics = vp.GetStringSlice(HubbleMetrics)
c.HubbleExportFilePath = vp.GetString(HubbleExportFilePath)
c.HubbleExportFileMaxSizeMB = vp.GetInt(HubbleExportFileMaxSizeMB)
c.HubbleExportFileMaxBackups = vp.GetInt(HubbleExportFileMaxBackups)
c.HubbleExportFileCompress = vp.GetBool(HubbleExportFileCompress)
for _, enc := range vp.GetStringSlice(HubbleExportAllowlist) {
dec := json.NewDecoder(strings.NewReader(enc))
var result flowpb.FlowFilter
if err := dec.Decode(&result); err != nil {
if errors.Is(err, io.EOF) {
break
}
log.Fatalf("failed to decode hubble-export-allowlist '%v': %s", enc, err)
}
c.HubbleExportAllowlist = append(c.HubbleExportAllowlist, &result)
}
for _, enc := range vp.GetStringSlice(HubbleExportDenylist) {
dec := json.NewDecoder(strings.NewReader(enc))
var result flowpb.FlowFilter
if err := dec.Decode(&result); err != nil {
if errors.Is(err, io.EOF) {
break
}
log.Fatalf("failed to decode hubble-export-denylist '%v': %s", enc, err)
}
c.HubbleExportDenylist = append(c.HubbleExportDenylist, &result)
}
if fm := vp.GetStringSlice(HubbleExportFieldmask); len(fm) > 0 {
_, err := fieldmaskpb.New(&flowpb.Flow{}, fm...)
if err != nil {
log.Fatalf("hubble-export-fieldmask contains invalid fieldmask '%v': %s", fm, err)
}
c.HubbleExportFieldmask = vp.GetStringSlice(HubbleExportFieldmask)
}
c.HubbleFlowlogsConfigFilePath = vp.GetString(HubbleFlowlogsConfigFilePath)
c.EnableHubbleRecorderAPI = vp.GetBool(EnableHubbleRecorderAPI)
c.HubbleRecorderStoragePath = vp.GetString(HubbleRecorderStoragePath)
c.HubbleRecorderSinkQueueSize = vp.GetInt(HubbleRecorderSinkQueueSize)
c.HubbleSkipUnknownCGroupIDs = vp.GetBool(HubbleSkipUnknownCGroupIDs)
c.HubbleMonitorEvents = vp.GetStringSlice(HubbleMonitorEvents)
c.HubbleRedactEnabled = vp.GetBool(HubbleRedactEnabled)
c.HubbleRedactHttpURLQuery = vp.GetBool(HubbleRedactHttpURLQuery)
c.HubbleRedactHttpUserInfo = vp.GetBool(HubbleRedactHttpUserInfo)
c.HubbleRedactKafkaApiKey = vp.GetBool(HubbleRedactKafkaApiKey)
c.HubbleRedactHttpHeadersAllow = vp.GetStringSlice(HubbleRedactHttpHeadersAllow)
c.HubbleRedactHttpHeadersDeny = vp.GetStringSlice(HubbleRedactHttpHeadersDeny)
c.HubbleDropEvents = vp.GetBool(HubbleDropEvents)
c.HubbleDropEventsInterval = vp.GetDuration(HubbleDropEventsInterval)
c.HubbleDropEventsReasons = vp.GetStringSlice(HubbleDropEventsReasons)
// Hidden options
c.CompilerFlags = vp.GetStringSlice(CompilerFlags)
c.ConfigFile = vp.GetString(ConfigFile)
c.HTTP403Message = vp.GetString(HTTP403Message)
c.K8sNamespace = vp.GetString(K8sNamespaceName)
c.AgentNotReadyNodeTaintKey = vp.GetString(AgentNotReadyNodeTaintKeyName)
c.MaxControllerInterval = vp.GetInt(MaxCtrlIntervalName)
c.PolicyQueueSize = sanitizeIntParam(vp, PolicyQueueSize, defaults.PolicyQueueSize)
c.EndpointQueueSize = sanitizeIntParam(vp, EndpointQueueSize, defaults.EndpointQueueSize)
c.EnableICMPRules = vp.GetBool(EnableICMPRules)
c.UseCiliumInternalIPForIPsec = vp.GetBool(UseCiliumInternalIPForIPsec)
c.BypassIPAvailabilityUponRestore = vp.GetBool(BypassIPAvailabilityUponRestore)
c.EnableK8sTerminatingEndpoint = vp.GetBool(EnableK8sTerminatingEndpoint)
// VTEP integration enable option
c.EnableVTEP = vp.GetBool(EnableVTEP)
// Enable BGP control plane features
c.EnableBGPControlPlane = vp.GetBool(EnableBGPControlPlane)
// To support K8s NetworkPolicy
c.EnableK8sNetworkPolicy = vp.GetBool(EnableK8sNetworkPolicy)
c.PolicyCIDRMatchMode = vp.GetStringSlice(PolicyCIDRMatchMode)
c.EnableNodeSelectorLabels = vp.GetBool(EnableNodeSelectorLabels)
c.NodeLabels = vp.GetStringSlice(NodeLabels)
// Parse node label patterns
nodeLabelPatterns := vp.GetStringSlice(ExcludeNodeLabelPatterns)
for _, pattern := range nodeLabelPatterns {
r, err := regexp.Compile(pattern)
if err != nil {
log.WithError(err).Errorf("Unable to compile exclude node label regex pattern %s", pattern)
continue
}
c.ExcludeNodeLabelPatterns = append(c.ExcludeNodeLabelPatterns, r)
}
if c.KVStore != "" {
c.IdentityRestoreGracePeriod = defaults.IdentityRestoreGracePeriodKvstore
}
}
func (c *DaemonConfig) populateLoadBalancerSettings(vp *viper.Viper) {
c.NodePortAcceleration = vp.GetString(LoadBalancerAcceleration)
c.NodePortMode = vp.GetString(LoadBalancerMode)
c.NodePortAlg = vp.GetString(LoadBalancerAlg)
// If old settings were explicitly set by the user, then have them
// override the new ones in order to not break existing setups.
if vp.IsSet(NodePortAcceleration) {
prior := c.NodePortAcceleration
c.NodePortAcceleration = vp.GetString(NodePortAcceleration)
if vp.IsSet(LoadBalancerAcceleration) && prior != c.NodePortAcceleration {
log.Fatalf("Both --%s and --%s were set. Only use --%s instead.",
LoadBalancerAcceleration, NodePortAcceleration, LoadBalancerAcceleration)
}
}
if vp.IsSet(NodePortMode) {
prior := c.NodePortMode
c.NodePortMode = vp.GetString(NodePortMode)
if vp.IsSet(LoadBalancerMode) && prior != c.NodePortMode {
log.Fatalf("Both --%s and --%s were set. Only use --%s instead.",
LoadBalancerMode, NodePortMode, LoadBalancerMode)
}
}
if vp.IsSet(NodePortAlg) {
prior := c.NodePortAlg
c.NodePortAlg = vp.GetString(NodePortAlg)
if vp.IsSet(LoadBalancerAlg) && prior != c.NodePortAlg {
log.Fatalf("Both --%s and --%s were set. Only use --%s instead.",
LoadBalancerAlg, NodePortAlg, LoadBalancerAlg)
}
}
}
func (c *DaemonConfig) populateNodePortRange(vp *viper.Viper) error {
nodePortRange := vp.GetStringSlice(NodePortRange)
// When passed via configmap, we might not get a slice but single
// string instead, so split it if needed.
if len(nodePortRange) == 1 {
nodePortRange = strings.Split(nodePortRange[0], ",")
}
switch len(nodePortRange) {
case 2:
var err error
c.NodePortMin, err = strconv.Atoi(nodePortRange[0])
if err != nil {
return fmt.Errorf("Unable to parse min port value for NodePort range: %w", err)
}
c.NodePortMax, err = strconv.Atoi(nodePortRange[1])
if err != nil {
return fmt.Errorf("Unable to parse max port value for NodePort range: %w", err)
}
if c.NodePortMax <= c.NodePortMin {
return errors.New("NodePort range min port must be smaller than max port")
}
case 0:
if vp.IsSet(NodePortRange) {
log.Warning("NodePort range was set but is empty.")
}
default:
return fmt.Errorf("Unable to parse min/max port value for NodePort range: %s", NodePortRange)
}
return nil
}
func (c *DaemonConfig) checkMapSizeLimits() error {
if c.AuthMapEntries < AuthMapEntriesMin {
return fmt.Errorf("specified AuthMap max entries %d must exceed minimum %d", c.AuthMapEntries, AuthMapEntriesMin)
}
if c.AuthMapEntries > AuthMapEntriesMax {
return fmt.Errorf("specified AuthMap max entries %d must not exceed maximum %d", c.AuthMapEntries, AuthMapEntriesMax)
}
if c.CTMapEntriesGlobalTCP < LimitTableMin || c.CTMapEntriesGlobalAny < LimitTableMin {
return fmt.Errorf("specified CT tables values %d/%d must exceed minimum %d",
c.CTMapEntriesGlobalTCP, c.CTMapEntriesGlobalAny, LimitTableMin)
}
if c.CTMapEntriesGlobalTCP > LimitTableMax || c.CTMapEntriesGlobalAny > LimitTableMax {
return fmt.Errorf("specified CT tables values %d/%d must not exceed maximum %d",
c.CTMapEntriesGlobalTCP, c.CTMapEntriesGlobalAny, LimitTableMax)
}
if c.NATMapEntriesGlobal < LimitTableMin {
return fmt.Errorf("specified NAT table size %d must exceed minimum %d",
c.NATMapEntriesGlobal, LimitTableMin)
}
if c.NATMapEntriesGlobal > LimitTableMax {
return fmt.Errorf("specified NAT tables size %d must not exceed maximum %d",
c.NATMapEntriesGlobal, LimitTableMax)
}
if c.NATMapEntriesGlobal > c.CTMapEntriesGlobalTCP+c.CTMapEntriesGlobalAny {
if c.NATMapEntriesGlobal == NATMapEntriesGlobalDefault {
// Auto-size for the case where CT table size was adapted but NAT still on default
c.NATMapEntriesGlobal = int((c.CTMapEntriesGlobalTCP + c.CTMapEntriesGlobalAny) * 2 / 3)
} else {
return fmt.Errorf("specified NAT tables size %d must not exceed maximum CT table size %d",
c.NATMapEntriesGlobal, c.CTMapEntriesGlobalTCP+c.CTMapEntriesGlobalAny)
}
}
if c.SockRevNatEntries < LimitTableMin {
return fmt.Errorf("specified Socket Reverse NAT table size %d must exceed minimum %d",
c.SockRevNatEntries, LimitTableMin)
}
if c.SockRevNatEntries > LimitTableMax {
return fmt.Errorf("specified Socket Reverse NAT tables size %d must not exceed maximum %d",
c.SockRevNatEntries, LimitTableMax)
}
if c.PolicyMapEntries < PolicyMapMin {
return fmt.Errorf("specified PolicyMap max entries %d must exceed minimum %d",
c.PolicyMapEntries, PolicyMapMin)
}
if c.PolicyMapEntries > PolicyMapMax {
log.Warnf("specified PolicyMap max entries %d must not exceed maximum %d, lowering it to the maximum value",
c.PolicyMapEntries, PolicyMapMax)
c.PolicyMapEntries = PolicyMapMax
}
if c.FragmentsMapEntries < FragmentsMapMin {
return fmt.Errorf("specified max entries %d for fragment-tracking map must exceed minimum %d",
c.FragmentsMapEntries, FragmentsMapMin)
}
if c.FragmentsMapEntries > FragmentsMapMax {
return fmt.Errorf("specified max entries %d for fragment-tracking map must not exceed maximum %d",
c.FragmentsMapEntries, FragmentsMapMax)
}
if c.LBMapEntries <= 0 {
return fmt.Errorf("specified LBMap max entries %d must be a value greater than 0", c.LBMapEntries)
}
if c.LBServiceMapEntries < 0 ||
c.LBBackendMapEntries < 0 ||
c.LBRevNatEntries < 0 ||
c.LBAffinityMapEntries < 0 ||
c.LBSourceRangeMapEntries < 0 ||
c.LBMaglevMapEntries < 0 {
return fmt.Errorf("specified LB Service Map max entries must not be a negative value"+
"(Service Map: %d, Service Backend: %d, Reverse NAT: %d, Session Affinity: %d, Source Range: %d, Maglev: %d)",
c.LBServiceMapEntries,
c.LBBackendMapEntries,
c.LBRevNatEntries,
c.LBAffinityMapEntries,
c.LBSourceRangeMapEntries,
c.LBMaglevMapEntries)
}
return nil
}
func (c *DaemonConfig) checkIPv4NativeRoutingCIDR() error {
if c.GetIPv4NativeRoutingCIDR() != nil {
return nil
}
if !c.EnableIPv4 || !c.EnableIPv4Masquerade {
return nil
}
if c.EnableIPMasqAgent {
return nil
}
if c.TunnelingEnabled() {
return nil
}
if c.IPAMMode() == ipamOption.IPAMENI || c.IPAMMode() == ipamOption.IPAMAlibabaCloud {
return nil
}
return fmt.Errorf(
"native routing cidr must be configured with option --%s "+
"in combination with --%s=true --%s=true --%s=false --%s=%s --%s=%s",
IPv4NativeRoutingCIDR,
EnableIPv4Name, EnableIPv4Masquerade,
EnableIPMasqAgent,
RoutingMode, RoutingModeNative,
IPAM, c.IPAMMode())
}
func (c *DaemonConfig) checkIPv6NativeRoutingCIDR() error {
if c.GetIPv6NativeRoutingCIDR() != nil {
return nil
}
if !c.EnableIPv6 || !c.EnableIPv6Masquerade {
return nil
}
if c.EnableIPMasqAgent {
return nil
}
if c.TunnelingEnabled() {
return nil
}
return fmt.Errorf(
"native routing cidr must be configured with option --%s "+
"in combination with --%s=true --%s=true --%s=false --%s=%s",
IPv6NativeRoutingCIDR,
EnableIPv6Name, EnableIPv6Masquerade,
EnableIPMasqAgent,
RoutingMode, RoutingModeNative)
}
func (c *DaemonConfig) checkIPAMDelegatedPlugin() error {
if c.IPAM == ipamOption.IPAMDelegatedPlugin {
// When using IPAM delegated plugin, IP addresses are allocated by the CNI binary,
// not the daemon. Therefore, features which require the daemon to allocate IPs for itself
// must be disabled.
if c.EnableIPv4 && c.LocalRouterIPv4 == "" {
return fmt.Errorf("--%s must be provided when IPv4 is enabled with --%s=%s", LocalRouterIPv4, IPAM, ipamOption.IPAMDelegatedPlugin)
}
if c.EnableIPv6 && c.LocalRouterIPv6 == "" {
return fmt.Errorf("--%s must be provided when IPv6 is enabled with --%s=%s", LocalRouterIPv6, IPAM, ipamOption.IPAMDelegatedPlugin)
}
if c.EnableEndpointHealthChecking {
return fmt.Errorf("--%s must be disabled with --%s=%s", EnableEndpointHealthChecking, IPAM, ipamOption.IPAMDelegatedPlugin)
}
// envoy config (Ingress, Gateway API, ...) require cilium-agent to create an IP address
// specifically for differentiating envoy traffic, which is not possible
// with delegated IPAM.
if c.EnableEnvoyConfig {
return fmt.Errorf("--%s must be disabled with --%s=%s", EnableEnvoyConfig, IPAM, ipamOption.IPAMDelegatedPlugin)
}
}
return nil
}
func (c *DaemonConfig) calculateBPFMapSizes(vp *viper.Viper) error {
// BPF map size options
// Any map size explicitly set via option will override the dynamic
// sizing.
c.AuthMapEntries = vp.GetInt(AuthMapEntriesName)
c.CTMapEntriesGlobalTCP = vp.GetInt(CTMapEntriesGlobalTCPName)
c.CTMapEntriesGlobalAny = vp.GetInt(CTMapEntriesGlobalAnyName)
c.NATMapEntriesGlobal = vp.GetInt(NATMapEntriesGlobalName)
c.NeighMapEntriesGlobal = vp.GetInt(NeighMapEntriesGlobalName)
c.PolicyMapEntries = vp.GetInt(PolicyMapEntriesName)
c.PolicyMapFullReconciliationInterval = vp.GetDuration(PolicyMapFullReconciliationIntervalName)
c.SockRevNatEntries = vp.GetInt(SockRevNatEntriesName)
c.LBMapEntries = vp.GetInt(LBMapEntriesName)
c.LBServiceMapEntries = vp.GetInt(LBServiceMapMaxEntries)
c.LBBackendMapEntries = vp.GetInt(LBBackendMapMaxEntries)
c.LBRevNatEntries = vp.GetInt(LBRevNatMapMaxEntries)
c.LBAffinityMapEntries = vp.GetInt(LBAffinityMapMaxEntries)
c.LBSourceRangeMapEntries = vp.GetInt(LBSourceRangeMapMaxEntries)
c.LBMaglevMapEntries = vp.GetInt(LBMaglevMapMaxEntries)
// Don't attempt dynamic sizing if any of the sizeof members was not
// populated by the daemon (or any other caller).
if c.SizeofCTElement == 0 ||
c.SizeofNATElement == 0 ||
c.SizeofNeighElement == 0 ||
c.SizeofSockRevElement == 0 {
return nil
}
// Allow the range (0.0, 1.0] because the dynamic size will anyway be
// clamped to the table limits. Thus, a ratio of e.g. 0.98 will not lead
// to 98% of the total memory being allocated for BPF maps.
dynamicSizeRatio := vp.GetFloat64(MapEntriesGlobalDynamicSizeRatioName)
if 0.0 < dynamicSizeRatio && dynamicSizeRatio <= 1.0 {
vms, err := memory.Get()
if err != nil || vms == nil {
log.WithError(err).Fatal("Failed to get system memory")
}
c.calculateDynamicBPFMapSizes(vp, vms.Total, dynamicSizeRatio)
c.BPFMapsDynamicSizeRatio = dynamicSizeRatio
} else if dynamicSizeRatio < 0.0 {
return fmt.Errorf("specified dynamic map size ratio %f must be > 0.0", dynamicSizeRatio)
} else if dynamicSizeRatio > 1.0 {
return fmt.Errorf("specified dynamic map size ratio %f must be ≤ 1.0", dynamicSizeRatio)
}
return nil
}
// SetMapElementSizes sets the BPF map element sizes (key + value) used for
// dynamic BPF map size calculations in calculateDynamicBPFMapSizes.
func (c *DaemonConfig) SetMapElementSizes(
sizeofCTElement,
sizeofNATElement,
sizeofNeighElement,
sizeofSockRevElement int) {
c.SizeofCTElement = sizeofCTElement
c.SizeofNATElement = sizeofNATElement
c.SizeofNeighElement = sizeofNeighElement
c.SizeofSockRevElement = sizeofSockRevElement
}
func (c *DaemonConfig) calculateDynamicBPFMapSizes(vp *viper.Viper, totalMemory uint64, dynamicSizeRatio float64) {
// Heuristic:
// Distribute relative to map default entries among the different maps.
// Cap each map size by the maximum. Map size provided by the user will
// override the calculated value and also the max. There will be a check
// for maximum size later on in DaemonConfig.Validate()
//
// Calculation examples:
//
// Memory CT TCP CT Any NAT
//
// 512MB 33140 16570 33140
// 1GB 66280 33140 66280
// 4GB 265121 132560 265121
// 16GB 1060485 530242 1060485
memoryAvailableForMaps := int(float64(totalMemory) * dynamicSizeRatio)
log.Infof("Memory available for map entries (%.3f%% of %dB): %dB", dynamicSizeRatio, totalMemory, memoryAvailableForMaps)
totalMapMemoryDefault := CTMapEntriesGlobalTCPDefault*c.SizeofCTElement +
CTMapEntriesGlobalAnyDefault*c.SizeofCTElement +
NATMapEntriesGlobalDefault*c.SizeofNATElement +
// Neigh table has the same number of entries as NAT Map has.
NATMapEntriesGlobalDefault*c.SizeofNeighElement +
SockRevNATMapEntriesDefault*c.SizeofSockRevElement
log.Debugf("Total memory for default map entries: %d", totalMapMemoryDefault)
getEntries := func(entriesDefault, min, max int) int {
entries := (entriesDefault * memoryAvailableForMaps) / totalMapMemoryDefault
if entries < min {
entries = min
} else if entries > max {
log.Debugf("clamped from %d to %d", entries, max)
entries = max
}
return entries
}
// If value for a particular map was explicitly set by an
// option, disable dynamic sizing for this map and use the
// provided size.
if !vp.IsSet(CTMapEntriesGlobalTCPName) {
c.CTMapEntriesGlobalTCP =
getEntries(CTMapEntriesGlobalTCPDefault, LimitTableAutoGlobalTCPMin, LimitTableMax)
log.Infof("option %s set by dynamic sizing to %v",
CTMapEntriesGlobalTCPName, c.CTMapEntriesGlobalTCP)
} else {
log.Debugf("option %s set by user to %v", CTMapEntriesGlobalTCPName, c.CTMapEntriesGlobalTCP)
}
if !vp.IsSet(CTMapEntriesGlobalAnyName) {
c.CTMapEntriesGlobalAny =
getEntries(CTMapEntriesGlobalAnyDefault, LimitTableAutoGlobalAnyMin, LimitTableMax)
log.Infof("option %s set by dynamic sizing to %v",
CTMapEntriesGlobalAnyName, c.CTMapEntriesGlobalAny)
} else {
log.Debugf("option %s set by user to %v", CTMapEntriesGlobalAnyName, c.CTMapEntriesGlobalAny)
}
if !vp.IsSet(NATMapEntriesGlobalName) {
c.NATMapEntriesGlobal =
getEntries(NATMapEntriesGlobalDefault, LimitTableAutoNatGlobalMin, LimitTableMax)
log.Infof("option %s set by dynamic sizing to %v",
NATMapEntriesGlobalName, c.NATMapEntriesGlobal)
if c.NATMapEntriesGlobal > c.CTMapEntriesGlobalTCP+c.CTMapEntriesGlobalAny {
// CT table size was specified manually, make sure that the NAT table size
// does not exceed maximum CT table size. See
// (*DaemonConfig).checkMapSizeLimits.
c.NATMapEntriesGlobal = (c.CTMapEntriesGlobalTCP + c.CTMapEntriesGlobalAny) * 2 / 3
log.Warningf("option %s would exceed maximum determined by CT table sizes, capping to %v",
NATMapEntriesGlobalName, c.NATMapEntriesGlobal)
}
} else {
log.Debugf("option %s set by user to %v", NATMapEntriesGlobalName, c.NATMapEntriesGlobal)
}
if !vp.IsSet(NeighMapEntriesGlobalName) {
// By default we auto-size it to the same value as the NAT map since we
// need to keep at least as many neigh entries.
c.NeighMapEntriesGlobal = c.NATMapEntriesGlobal
log.Infof("option %s set by dynamic sizing to %v",
NeighMapEntriesGlobalName, c.NeighMapEntriesGlobal)
} else {
log.Debugf("option %s set by user to %v", NeighMapEntriesGlobalName, c.NeighMapEntriesGlobal)
}
if !vp.IsSet(SockRevNatEntriesName) {
c.SockRevNatEntries =
getEntries(SockRevNATMapEntriesDefault, LimitTableAutoSockRevNatMin, LimitTableMax)
log.Infof("option %s set by dynamic sizing to %v",
SockRevNatEntriesName, c.SockRevNatEntries)
} else {
log.Debugf("option %s set by user to %v", NATMapEntriesGlobalName, c.NATMapEntriesGlobal)
}
}
// Validate VTEP integration configuration
func (c *DaemonConfig) validateVTEP(vp *viper.Viper) error {
vtepEndpoints := vp.GetStringSlice(VtepEndpoint)
vtepCIDRs := vp.GetStringSlice(VtepCIDR)
vtepCidrMask := vp.GetString(VtepMask)
vtepMACs := vp.GetStringSlice(VtepMAC)
if (len(vtepEndpoints) < 1) ||
len(vtepEndpoints) != len(vtepCIDRs) ||
len(vtepEndpoints) != len(vtepMACs) {
return fmt.Errorf("VTEP configuration must have the same number of Endpoint, VTEP and MAC configurations (Found %d endpoints, %d MACs, %d CIDR ranges)", len(vtepEndpoints), len(vtepMACs), len(vtepCIDRs))
}
if len(vtepEndpoints) > defaults.MaxVTEPDevices {
return fmt.Errorf("VTEP must not exceed %d VTEP devices (Found %d VTEPs)", defaults.MaxVTEPDevices, len(vtepEndpoints))
}
for _, ep := range vtepEndpoints {
endpoint := net.ParseIP(ep)
if endpoint == nil {
return fmt.Errorf("Invalid VTEP IP: %v", ep)
}
ip4 := endpoint.To4()
if ip4 == nil {
return fmt.Errorf("Invalid VTEP IPv4 address %v", ip4)
}
c.VtepEndpoints = append(c.VtepEndpoints, endpoint)
}
for _, v := range vtepCIDRs {
externalCIDR, err := cidr.ParseCIDR(v)
if err != nil {
return fmt.Errorf("Invalid VTEP CIDR: %v", v)
}
c.VtepCIDRs = append(c.VtepCIDRs, externalCIDR)
}
mask := net.ParseIP(vtepCidrMask)
if mask == nil {
return fmt.Errorf("Invalid VTEP CIDR Mask: %v", vtepCidrMask)
}
c.VtepCidrMask = mask
for _, m := range vtepMACs {
externalMAC, err := mac.ParseMAC(m)
if err != nil {
return fmt.Errorf("Invalid VTEP MAC: %v", m)
}
c.VtepMACs = append(c.VtepMACs, externalMAC)
}
return nil
}
// KubeProxyReplacementFullyEnabled returns true if Cilium is _effectively_
// running in full KPR mode.
func (c *DaemonConfig) KubeProxyReplacementFullyEnabled() bool {
return c.EnableHostPort &&
c.EnableNodePort &&
c.EnableExternalIPs &&
c.EnableSocketLB &&
c.EnableSessionAffinity
}
var backupFileNames []string = []string{
"agent-runtime-config.json",
"agent-runtime-config-1.json",
"agent-runtime-config-2.json",
}
// StoreInFile stores the configuration in a the given directory under the file
// name 'daemon-config.json'. If this file already exists, it is renamed to
// 'daemon-config-1.json', if 'daemon-config-1.json' also exists,
// 'daemon-config-1.json' is renamed to 'daemon-config-2.json'
func (c *DaemonConfig) StoreInFile(dir string) error {
backupFiles(dir, backupFileNames)
f, err := os.Create(backupFileNames[0])
if err != nil {
return err
}
defer f.Close()
e := json.NewEncoder(f)
e.SetIndent("", " ")
// Exclude concurrent modification of fields protected by c.ConfigPatchMutex
// we store the file
c.ConfigPatchMutex.RLock()
err = e.Encode(c)
c.shaSum = c.checksum()
c.ConfigPatchMutex.RUnlock()
return err
}
func (c *DaemonConfig) checksum() [32]byte {
// take a shallow copy for summing
sumConfig := *c
// Ignore variable parts
sumConfig.Opts = nil
cBytes, err := json.Marshal(&sumConfig)
if err != nil {
return [32]byte{}
}
return sha256.Sum256(cBytes)
}
// ValidateUnchanged takes a context that is unused so that it can be used as a doFunc in a
// controller
func (c *DaemonConfig) ValidateUnchanged(context.Context) error {
// Exclude concurrent modification of fields protected by c.ConfigPatchMutex
// we store the file
c.ConfigPatchMutex.RLock()
sum := c.checksum()
c.ConfigPatchMutex.RUnlock()
if sum != c.shaSum {
return c.diffFromFile()
}
return nil
}
func (c *DaemonConfig) diffFromFile() error {
f, err := os.Open(backupFileNames[0])
if err != nil {
return err
}
fi, err := f.Stat()
if err != nil {
return err
}
fileBytes := make([]byte, fi.Size())
count, err := f.Read(fileBytes)
if err != nil {
return err
}
fileBytes = fileBytes[:count]
var config DaemonConfig
err = json.Unmarshal(fileBytes, &config)
var diff string
if err != nil {
diff = fmt.Errorf("unmarshal failed %q: %w", string(fileBytes), err).Error()
} else {
// Ignore all unexported fields during Diff.
// from https://github.com/google/go-cmp/issues/313#issuecomment-1315651560
opts := cmp.FilterPath(func(p cmp.Path) bool {
sf, ok := p.Index(-1).(cmp.StructField)
if !ok {
return false
}
r, _ := utf8.DecodeRuneInString(sf.Name())
return !unicode.IsUpper(r)
}, cmp.Ignore())
diff = cmp.Diff(&config, c, opts,
cmpopts.IgnoreTypes(&IntOptions{}),
cmpopts.IgnoreTypes(&OptionLibrary{}))
}
return fmt.Errorf("Config differs:\n%s", diff)
}
func (c *DaemonConfig) BGPControlPlaneEnabled() bool {
return c.EnableBGPControlPlane
}
func (c *DaemonConfig) IsDualStack() bool {
return c.EnableIPv4 && c.EnableIPv6
}
// IsLocalRouterIP checks if provided IP address matches either LocalRouterIPv4
// or LocalRouterIPv6
func (c *DaemonConfig) IsLocalRouterIP(ip string) bool {
return ip != "" && (c.LocalRouterIPv4 == ip || c.LocalRouterIPv6 == ip)
}
// StoreViperInFile stores viper's configuration in a the given directory under
// the file name 'viper-config.yaml'. If this file already exists, it is renamed
// to 'viper-config-1.yaml', if 'viper-config-1.yaml' also exists,
// 'viper-config-1.yaml' is renamed to 'viper-config-2.yaml'
func StoreViperInFile(dir string) error {
backupFileNames := []string{
"viper-agent-config.yaml",
"viper-agent-config-1.yaml",
"viper-agent-config-2.yaml",
}
backupFiles(dir, backupFileNames)
return viper.WriteConfigAs(backupFileNames[0])
}
func backupFiles(dir string, backupFilenames []string) {
for i := len(backupFilenames) - 1; i > 0; i-- {
newFileName := filepath.Join(dir, backupFilenames[i-1])
oldestFilename := filepath.Join(dir, backupFilenames[i])
if _, err := os.Stat(newFileName); os.IsNotExist(err) {
continue
}
err := os.Rename(newFileName, oldestFilename)
if err != nil {
log.WithError(err).WithFields(logrus.Fields{
"old-name": oldestFilename,
"new-name": newFileName,
}).Error("Unable to rename configuration files")
}
}
}
func sanitizeIntParam(vp *viper.Viper, paramName string, paramDefault int) int {
intParam := vp.GetInt(paramName)
if intParam <= 0 {
if vp.IsSet(paramName) {
log.WithFields(
logrus.Fields{
"parameter": paramName,
"defaultValue": paramDefault,
}).Warning("user-provided parameter had value <= 0 , which is invalid ; setting to default")
}
return paramDefault
}
return intParam
}
// validateConfigMap checks whether the flag exists and validate its value
func validateConfigMap(cmd *cobra.Command, m map[string]interface{}) error {
flags := cmd.Flags()
for key, value := range m {
flag := flags.Lookup(key)
if flag == nil {
continue
}
var err error
switch t := flag.Value.Type(); t {
case "bool":
_, err = cast.ToBoolE(value)
case "duration":
_, err = cast.ToDurationE(value)
case "float32":
_, err = cast.ToFloat32E(value)
case "float64":
_, err = cast.ToFloat64E(value)
case "int":
_, err = cast.ToIntE(value)
case "int8":
_, err = cast.ToInt8E(value)
case "int16":
_, err = cast.ToInt16E(value)
case "int32":
_, err = cast.ToInt32E(value)
case "int64":
_, err = cast.ToInt64E(value)
case "map":
// custom type, see pkg/option/map_options.go
err = flag.Value.Set(fmt.Sprintf("%s", value))
case "stringSlice":
_, err = cast.ToStringSliceE(value)
case "string":
_, err = cast.ToStringE(value)
case "uint":
_, err = cast.ToUintE(value)
case "uint8":
_, err = cast.ToUint8E(value)
case "uint16":
_, err = cast.ToUint16E(value)
case "uint32":
_, err = cast.ToUint32E(value)
case "uint64":
_, err = cast.ToUint64E(value)
case "stringToString":
_, err = cast.ToStringMapStringE(value)
default:
log.Warnf("Unable to validate option %s value of type %s", key, t)
}
if err != nil {
return fmt.Errorf("option %s: %w", key, err)
}
}
return nil
}
// InitConfig reads in config file and ENV variables if set.
func InitConfig(cmd *cobra.Command, programName, configName string, vp *viper.Viper) func() {
return func() {
if vp.GetBool("version") {
fmt.Printf("%s %s\n", programName, version.Version)
os.Exit(0)
}
if vp.GetString(CMDRef) != "" {
return
}
Config.ConfigFile = vp.GetString(ConfigFile) // enable ability to specify config file via flag
Config.ConfigDir = vp.GetString(ConfigDir)
vp.SetEnvPrefix("cilium")
if Config.ConfigDir != "" {
if _, err := os.Stat(Config.ConfigDir); os.IsNotExist(err) {
log.Fatalf("Non-existent configuration directory %s", Config.ConfigDir)
}
if m, err := ReadDirConfig(Config.ConfigDir); err != nil {
log.WithError(err).Fatalf("Unable to read configuration directory %s", Config.ConfigDir)
} else {
// replace deprecated fields with new fields
ReplaceDeprecatedFields(m)
// validate the config-map
if err := validateConfigMap(cmd, m); err != nil {
log.WithError(err).Fatal("Incorrect config-map flag value")
}
if err := MergeConfig(vp, m); err != nil {
log.WithError(err).Fatal("Unable to merge configuration")
}
}
}
if Config.ConfigFile != "" {
vp.SetConfigFile(Config.ConfigFile)
} else {
vp.SetConfigName(configName) // name of config file (without extension)
vp.AddConfigPath("$HOME") // adding home directory as first search path
}
// We need to check for the debug environment variable or CLI flag before
// loading the configuration file since on configuration file read failure
// we will emit a debug log entry.
if vp.GetBool(DebugArg) {
logging.SetLogLevelToDebug()
}
// If a config file is found, read it in.
if err := vp.ReadInConfig(); err == nil {
log.WithField(logfields.Path, vp.ConfigFileUsed()).
Info("Using config from file")
} else if Config.ConfigFile != "" {
log.WithField(logfields.Path, Config.ConfigFile).
Fatal("Error reading config file")
} else {
log.WithError(err).Debug("Skipped reading configuration file")
}
// Check for the debug flag again now that the configuration file may has
// been loaded, as it might have changed.
if vp.GetBool("debug") {
logging.SetLogLevelToDebug()
}
}
}
func getDefaultMonitorQueueSize(numCPU int) int {
monitorQueueSize := numCPU * defaults.MonitorQueueSizePerCPU
if monitorQueueSize > defaults.MonitorQueueSizePerCPUMaximum {
monitorQueueSize = defaults.MonitorQueueSizePerCPUMaximum
}
return monitorQueueSize
}
// BPFEventBufferConfig contains parsed configuration for a bpf map event buffer.
type BPFEventBufferConfig struct {
Enabled bool
MaxSize int
TTL time.Duration
}
// BPFEventBufferConfigs contains parsed bpf event buffer configs, indexed but map name.
type BPFEventBufferConfigs map[string]BPFEventBufferConfig
// GetEventBufferConfig returns either the relevant config for a map name, or a default
// one with enabled=false otherwise.
func (d *DaemonConfig) GetEventBufferConfig(name string) BPFEventBufferConfig {
return d.bpfMapEventConfigs.get(name)
}
func (cs BPFEventBufferConfigs) get(name string) BPFEventBufferConfig {
return cs[name]
}
// ParseEventBufferTupleString parses a event buffer configuration tuple string.
// For example: true,100,24h
// Which refers to enabled=true, maxSize=100, ttl=24hours.
func ParseEventBufferTupleString(optsStr string) (BPFEventBufferConfig, error) {
opts := strings.Split(optsStr, ",")
enabled := false
conf := BPFEventBufferConfig{}
if len(opts) != 3 {
return conf, fmt.Errorf("unexpected event buffer config value format, should be in format 'mapname=enabled,100,24h'")
}
if opts[0] != "enabled" && opts[0] != "disabled" {
return conf, fmt.Errorf("could not parse event buffer enabled: must be either 'enabled' or 'disabled'")
}
if opts[0] == "enabled" {
enabled = true
}
size, err := strconv.Atoi(opts[1])
if err != nil {
return conf, fmt.Errorf("could not parse event buffer maxSize int: %w", err)
}
ttl, err := time.ParseDuration(opts[2])
if err != nil {
return conf, fmt.Errorf("could not parse event buffer ttl duration: %w", err)
}
if size < 0 {
return conf, fmt.Errorf("event buffer max size cannot be less than zero (%d)", conf.MaxSize)
}
conf.TTL = ttl
conf.Enabled = enabled && size != 0
conf.MaxSize = size
return conf, nil
}
func parseBPFMapEventConfigs(confs BPFEventBufferConfigs, confMap map[string]string) error {
for name, confStr := range confMap {
conf, err := ParseEventBufferTupleString(confStr)
if err != nil {
return fmt.Errorf("unable to parse %s: %w", BPFMapEventBuffers, err)
}
confs[name] = conf
}
return nil
}
func (d *DaemonConfig) EnforceLXCFibLookup() bool {
// See https://github.com/cilium/cilium/issues/27343 for the symptoms.
//
// We want to enforce FIB lookup if EndpointRoutes are enabled, because
// this was a config dependency change which caused different behaviour
// since v1.14.0-snapshot.2. We will remove this hack later, once we
// have auto-device detection on by default.
return d.EnableEndpointRoutes
}
func (d *DaemonConfig) GetZone(id uint8) string {
return d.ReverseFixedZoneMapping[id]
}
func (d *DaemonConfig) GetZoneID(zone string) uint8 {
return d.FixedZoneMapping[zone]
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package option
var (
specPolicyTracing = Option{
Description: "Enable tracing when resolving policy (Debug)",
}
// DaemonOptionLibrary is the daemon's option library that should be
// used for read-only.
DaemonOptionLibrary = OptionLibrary{
PolicyTracing: &specPolicyTracing,
}
DaemonMutableOptionLibrary = OptionLibrary{
ConntrackAccounting: &specConntrackAccounting,
PolicyAccounting: &specPolicyAccounting,
ConntrackLocal: &specConntrackLocal,
Debug: &specDebug,
DebugLB: &specDebugLB,
DebugPolicy: &specDebugPolicy,
DropNotify: &specDropNotify,
TraceNotify: &specTraceNotify,
PolicyVerdictNotify: &specPolicyVerdictNotify,
PolicyAuditMode: &specPolicyAuditMode,
MonitorAggregation: &specMonitorAggregation,
SourceIPVerification: &specSourceIPVerification,
}
)
func init() {
for k, v := range DaemonMutableOptionLibrary {
DaemonOptionLibrary[k] = v
}
}
// ParseDaemonOption parses a string as daemon option
func ParseDaemonOption(opt string) (string, OptionSetting, error) {
return ParseOption(opt, &DaemonOptionLibrary)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package option
var (
endpointMutableOptionLibrary = OptionLibrary{
ConntrackAccounting: &specConntrackAccounting,
PolicyAccounting: &specPolicyAccounting,
ConntrackLocal: &specConntrackLocal,
Debug: &specDebug,
DebugLB: &specDebugLB,
DebugPolicy: &specDebugPolicy,
DropNotify: &specDropNotify,
TraceNotify: &specTraceNotify,
PolicyVerdictNotify: &specPolicyVerdictNotify,
PolicyAuditMode: &specPolicyAuditMode,
MonitorAggregation: &specMonitorAggregation,
SourceIPVerification: &specSourceIPVerification,
}
)
func GetEndpointMutableOptionLibrary() OptionLibrary {
opt := OptionLibrary{}
for k, v := range endpointMutableOptionLibrary {
opt[k] = v
}
return opt
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package option
import (
"fmt"
"strings"
)
// Validator returns a validated string along with a possible error.
type Validator func(val string) (string, error)
// MapOptions holds a map of values and a validation function.
type MapOptions struct {
vals map[string]string
validator Validator
}
// NamedMapOptions is a MapOptions struct with a configuration name.
// This struct is useful to keep reference to the assigned
// field name in the internal configuration struct.
type NamedMapOptions struct {
name string
MapOptions
}
// NewNamedMapOptions creates a reference to a new NamedMapOpts struct.
func NewNamedMapOptions(name string, values *map[string]string, validator Validator) *NamedMapOptions {
return &NamedMapOptions{
name: name,
MapOptions: *NewMapOpts(*values, validator),
}
}
// NewMapOpts creates a new MapOpts with the specified map of values and an
// optional validator.
func NewMapOpts(values map[string]string, validator Validator) *MapOptions {
if values == nil {
values = make(map[string]string)
}
return &MapOptions{
vals: values,
validator: validator,
}
}
func (opts *MapOptions) String() string {
var kvs []string
for k, v := range opts.vals {
kvs = append(kvs, fmt.Sprintf("%s=%s", k, v))
}
return strings.Join(kvs, ",")
}
// Type returns a string name for this Option type
func (opts *MapOptions) Type() string {
return "map"
}
// Set validates, if needed, the input value and adds it to the internal map,
// by splitting on '='.
func (opts *MapOptions) Set(value string) error {
if opts.validator != nil {
v, err := opts.validator(value)
if err != nil {
return err
}
value = v
}
vals := strings.SplitN(value, "=", 2)
if len(vals) == 1 {
(opts.vals)[vals[0]] = ""
} else {
(opts.vals)[vals[0]] = vals[1]
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package option
import (
"fmt"
"strconv"
"strings"
)
// MonitorAggregationLevel represents a level of aggregation for monitor events
// from the datapath. Low values represent no aggregation, that is, to increase
// the number of events emitted from the datapath; Higher values represent more
// aggregation, to minimize the number of events emitted from the datapath.
//
// The MonitorAggregationLevel does not affect the Debug option in the daemon
// or endpoint, so debug notifications will continue uninhibited by this
// setting.
type MonitorAggregationLevel OptionSetting
const (
// MonitorAggregationLevelNone represents no aggregation in the
// datapath; all packets will be monitored.
MonitorAggregationLevelNone OptionSetting = 0
// MonitorAggregationLevelLow represents aggregation of monitor events
// to emit a maximum of one trace event per packet. Trace events when
// packets are received are disabled.
MonitorAggregationLevelLowest OptionSetting = 1
// MonitorAggregationLevelLow is the same as
// MonitorAggregationLevelLowest, but may aggregate additional traffic
// in future.
MonitorAggregationLevelLow OptionSetting = 2
// MonitorAggregationLevelMedium represents aggregation of monitor
// events to only emit notifications periodically for each connection
// unless there is new information (eg, a TCP connection is closed).
MonitorAggregationLevelMedium OptionSetting = 3
// MonitorAggregationLevelMax is the maximum level of aggregation
// currently supported.
MonitorAggregationLevelMax OptionSetting = 4
)
// monitorAggregationOption maps a user-specified string to a monitor
// aggregation level.
var monitorAggregationOption = map[string]OptionSetting{
"": MonitorAggregationLevelNone,
"none": MonitorAggregationLevelNone,
"disabled": MonitorAggregationLevelNone,
"lowest": MonitorAggregationLevelLowest,
"low": MonitorAggregationLevelLow,
"medium": MonitorAggregationLevelMedium,
"max": MonitorAggregationLevelMax,
"maximum": MonitorAggregationLevelMax,
}
func init() {
for i := MonitorAggregationLevelNone; i <= MonitorAggregationLevelMax; i++ {
number := strconv.Itoa(int(i))
monitorAggregationOption[number] = OptionSetting(i)
}
}
// monitorAggregationFormat maps an aggregation level to a formatted string.
var monitorAggregationFormat = map[OptionSetting]string{
MonitorAggregationLevelNone: "None",
MonitorAggregationLevelLowest: "Lowest",
MonitorAggregationLevelLow: "Low",
MonitorAggregationLevelMedium: "Medium",
MonitorAggregationLevelMax: "Max",
}
// VerifyMonitorAggregationLevel validates the specified key/value for a
// monitor aggregation level.
func VerifyMonitorAggregationLevel(key, value string) error {
_, err := ParseMonitorAggregationLevel(value)
return err
}
// ParseMonitorAggregationLevel turns a string into a monitor aggregation
// level. The string may contain an integer value or a string representation of
// a particular monitor aggregation level.
func ParseMonitorAggregationLevel(value string) (OptionSetting, error) {
// First, attempt the string representation.
if level, ok := monitorAggregationOption[strings.ToLower(value)]; ok {
return level, nil
}
// If it's not a valid string option, attempt to parse an integer.
valueParsed, err := strconv.Atoi(value)
if err != nil {
err = fmt.Errorf("invalid monitor aggregation level %q", value)
return MonitorAggregationLevelNone, err
}
parsed := OptionSetting(valueParsed)
if parsed < MonitorAggregationLevelNone || parsed > MonitorAggregationLevelMax {
err = fmt.Errorf("monitor aggregation level must be between %d and %d",
MonitorAggregationLevelNone, MonitorAggregationLevelMax)
return MonitorAggregationLevelNone, err
}
return parsed, nil
}
// FormatMonitorAggregationLevel maps a MonitorAggregationLevel to a string.
func FormatMonitorAggregationLevel(level OptionSetting) string {
return monitorAggregationFormat[level]
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package option
import (
"fmt"
"sort"
"strings"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/lock"
)
// VerifyFunc validates option key with value and may return an error if the
// option should not be applied
type VerifyFunc func(key string, value string) error
// ParseFunc parses the option value and may return an error if the option
// cannot be parsed or applied.
type ParseFunc func(value string) (OptionSetting, error)
// FormatFunc formats the specified value as textual representation option.
type FormatFunc func(value OptionSetting) string
// Option is the structure used to specify the semantics of a configurable
// boolean option
type Option struct {
// Define is the name of the #define used for BPF programs
Define string
// Description is a short human readable description
Description string
// Immutable marks an option which is read-only
Immutable bool
// Requires is a list of required options, such options will be
// automatically enabled as required.
Requires []string
// Parse is called to parse the option. If not specified, defaults to
// NormalizeBool().
Parse ParseFunc
// FormatFunc is called to format the value for an option. If not
// specified, defaults to formatting 0 as "Disabled" and other values
// as "Enabled".
Format FormatFunc
// Verify is called prior to applying the option
Verify VerifyFunc
}
// OptionSetting specifies the different choices each Option has.
type OptionSetting int
const (
OptionDisabled OptionSetting = iota
OptionEnabled
)
// RequiresOption returns true if the option requires the specified option `name`.
func (o Option) RequiresOption(name string) bool {
for _, o := range o.Requires {
if o == name {
return true
}
}
return false
}
type OptionLibrary map[string]*Option
func (l OptionLibrary) Lookup(name string) (string, *Option) {
nameLower := strings.ToLower(name)
for k := range l {
if strings.ToLower(k) == nameLower {
return k, l[k]
}
}
return "", nil
}
func (l OptionLibrary) Define(name string) string {
if _, ok := l[name]; ok {
return l[name].Define
}
return name
}
func NormalizeBool(value string) (OptionSetting, error) {
switch strings.ToLower(value) {
case "true", "on", "enable", "enabled", "1":
return OptionEnabled, nil
case "false", "off", "disable", "disabled", "0":
return OptionDisabled, nil
default:
return OptionDisabled, fmt.Errorf("invalid option value %s", value)
}
}
// ValidateConfigurationMap validates a given configuration map based on the
// option library
func (l *OptionLibrary) ValidateConfigurationMap(n models.ConfigurationMap) (OptionMap, error) {
o := make(OptionMap)
for k, v := range n {
_, newVal, err := ParseKeyValue(l, k, v)
if err != nil {
return nil, err
}
if err := l.Validate(k, v); err != nil {
return nil, err
}
o[k] = newVal
}
return o, nil
}
func (l OptionLibrary) Validate(name string, value string) error {
key, spec := l.Lookup(name)
if key == "" {
return fmt.Errorf("unknown option %s", name)
}
if spec.Immutable {
return fmt.Errorf("specified option is immutable (read-only)")
}
if spec.Verify != nil {
return spec.Verify(key, value)
}
return nil
}
type OptionMap map[string]OptionSetting
func (om OptionMap) DeepCopy() OptionMap {
cpy := make(OptionMap, len(om))
for k, v := range om {
cpy[k] = v
}
return cpy
}
// IntOptions member functions with external access do not require
// locking by the caller, while functions with internal access presume
// the caller to have taken care of any locking needed.
type IntOptions struct {
optsMU lock.RWMutex // Protects all variables from this structure below this line
Opts OptionMap `json:"map"`
Library *OptionLibrary `json:"-"`
}
// GetImmutableModel returns the set of immutable options as a ConfigurationMap API model.
func (o *IntOptions) GetImmutableModel() *models.ConfigurationMap {
immutableCfg := make(models.ConfigurationMap)
return &immutableCfg
}
// GetMutableModel returns the set of mutable options as a ConfigurationMap API model.
func (o *IntOptions) GetMutableModel() *models.ConfigurationMap {
mutableCfg := make(models.ConfigurationMap)
o.optsMU.RLock()
for k, v := range o.Opts {
_, config := o.Library.Lookup(k)
// It's possible that an option has since been removed and thus has
// no corresponding configuration; need to check if configuration is
// nil accordingly.
if config != nil {
if config.Format == nil {
if v == OptionDisabled {
mutableCfg[k] = "Disabled"
} else {
mutableCfg[k] = "Enabled"
}
} else {
mutableCfg[k] = config.Format(v)
}
}
}
o.optsMU.RUnlock()
return &mutableCfg
}
func (o *IntOptions) DeepCopy() *IntOptions {
o.optsMU.RLock()
cpy := &IntOptions{
Opts: o.Opts.DeepCopy(),
Library: o.Library,
}
o.optsMU.RUnlock()
return cpy
}
func NewIntOptions(lib *OptionLibrary) *IntOptions {
return &IntOptions{
Opts: OptionMap{},
Library: lib,
}
}
func (o *IntOptions) getValue(key string) OptionSetting {
value, exists := o.Opts[key]
if !exists {
return OptionDisabled
}
return value
}
func (o *IntOptions) GetValue(key string) OptionSetting {
o.optsMU.RLock()
v := o.getValue(key)
o.optsMU.RUnlock()
return v
}
func (o *IntOptions) IsEnabled(key string) bool {
return o.GetValue(key) != OptionDisabled
}
// SetValidated sets the option `key` to the specified value. The caller is
// expected to have validated the input to this function.
func (o *IntOptions) SetValidated(key string, value OptionSetting) {
o.optsMU.Lock()
o.Opts[key] = value
o.optsMU.Unlock()
}
// SetBool sets the specified option to Enabled.
func (o *IntOptions) SetBool(key string, value bool) {
intValue := OptionDisabled
if value {
intValue = OptionEnabled
}
o.optsMU.Lock()
o.Opts[key] = intValue
o.optsMU.Unlock()
}
func (o *IntOptions) Delete(key string) {
o.optsMU.Lock()
delete(o.Opts, key)
o.optsMU.Unlock()
}
func (o *IntOptions) SetIfUnset(key string, value OptionSetting) {
o.optsMU.Lock()
if _, exists := o.Opts[key]; !exists {
o.Opts[key] = value
}
o.optsMU.Unlock()
}
func (o *IntOptions) InheritDefault(parent *IntOptions, key string) {
o.optsMU.RLock()
o.Opts[key] = parent.GetValue(key)
o.optsMU.RUnlock()
}
func ParseOption(arg string, lib *OptionLibrary) (string, OptionSetting, error) {
result := OptionEnabled
if arg[0] == '!' {
result = OptionDisabled
arg = arg[1:]
}
optionSplit := strings.SplitN(arg, "=", 2)
arg = optionSplit[0]
if len(optionSplit) > 1 {
if result == OptionDisabled {
return "", OptionDisabled, fmt.Errorf("invalid boolean format")
}
return ParseKeyValue(lib, arg, optionSplit[1])
}
return "", OptionDisabled, fmt.Errorf("invalid option format")
}
func ParseKeyValue(lib *OptionLibrary, arg, value string) (string, OptionSetting, error) {
var result OptionSetting
key, spec := lib.Lookup(arg)
if key == "" {
return "", OptionDisabled, fmt.Errorf("unknown option %q", arg)
}
var err error
if spec.Parse != nil {
result, err = spec.Parse(value)
} else {
result, err = NormalizeBool(value)
}
if err != nil {
return "", OptionDisabled, err
}
if spec.Immutable {
return "", OptionDisabled, fmt.Errorf("specified option is immutable (read-only)")
}
return key, result, nil
}
// getFmtOpt returns #define name if option exists and is set to true in endpoint's Opts
// map or #undef name if option does not exist or exists but is set to false
func (o *IntOptions) getFmtOpt(name string) string {
define := o.Library.Define(name)
if define == "" {
return ""
}
value := o.getValue(name)
if value != OptionDisabled {
return fmt.Sprintf("#define %s %d", o.Library.Define(name), value)
}
return "#undef " + o.Library.Define(name)
}
func (o *IntOptions) GetFmtList() string {
txt := ""
o.optsMU.RLock()
opts := make([]string, 0, len(o.Opts))
for k := range o.Opts {
opts = append(opts, k)
}
sort.Strings(opts)
for _, k := range opts {
def := o.getFmtOpt(k)
if def != "" {
txt += def + "\n"
}
}
o.optsMU.RUnlock()
return txt
}
func (o *IntOptions) Dump() {
if o == nil {
return
}
o.optsMU.RLock()
opts := make([]string, 0, len(o.Opts))
for k := range o.Opts {
opts = append(opts, k)
}
sort.Strings(opts)
for _, k := range opts {
var text string
_, option := o.Library.Lookup(k)
if option == nil || option.Format == nil {
if o.Opts[k] == OptionDisabled {
text = "Disabled"
} else {
text = "Enabled"
}
} else {
text = option.Format(o.Opts[k])
}
fmt.Printf("%-24s %s\n", k, text)
}
o.optsMU.RUnlock()
}
// Validate validates a given configuration map based on the option library
func (o *IntOptions) Validate(n models.ConfigurationMap) error {
o.optsMU.RLock()
defer o.optsMU.RUnlock()
for k, v := range n {
_, newVal, err := ParseKeyValue(o.Library, k, v)
if err != nil {
return err
}
// Ignore validation if value is identical
if oldVal, ok := o.Opts[k]; ok && oldVal == newVal {
continue
}
if err := o.Library.Validate(k, v); err != nil {
return err
}
}
return nil
}
// ChangedFunc is called by `Apply()` for each option changed
type ChangedFunc func(key string, value OptionSetting, data interface{})
// enable enables the option `name` with all its dependencies
func (o *IntOptions) enable(name string) {
if o.Library != nil {
if _, opt := o.Library.Lookup(name); opt != nil {
for _, dependency := range opt.Requires {
o.enable(dependency)
}
}
}
o.Opts[name] = OptionEnabled
}
// set enables the option `name` with all its dependencies, and sets the
// integer level of the option to `value`.
func (o *IntOptions) set(name string, value OptionSetting) {
o.enable(name)
o.Opts[name] = value
}
// disable disables the option `name`. All options which depend on the option
// to be disabled will be disabled. Options which have previously been enabled
// as a dependency will not be automatically disabled.
func (o *IntOptions) disable(name string) {
o.Opts[name] = OptionDisabled
if o.Library != nil {
// Disable all options which have a dependency on the option
// that was just disabled
for key, opt := range *o.Library {
if opt.RequiresOption(name) && o.Opts[key] != OptionDisabled {
o.disable(key)
}
}
}
}
type changedOptions struct {
key string
value OptionSetting
}
// ApplyValidated takes a configuration map and applies the changes. For an
// option which is changed, the `ChangedFunc` function is called with the
// `data` argument passed in as well. Returns the number of options changed if
// any.
//
// The caller is expected to have validated the configuration options prior to
// calling this function.
func (o *IntOptions) ApplyValidated(n OptionMap, changed ChangedFunc, data interface{}) int {
changes := make([]changedOptions, 0, len(n))
o.optsMU.Lock()
for k, optVal := range n {
val, ok := o.Opts[k]
if optVal == OptionDisabled {
/* Only disable if enabled already */
if ok && val != OptionDisabled {
o.disable(k)
changes = append(changes, changedOptions{key: k, value: optVal})
}
} else {
/* Only enable if not enabled already */
if !ok || val == OptionDisabled {
o.set(k, optVal)
changes = append(changes, changedOptions{key: k, value: optVal})
}
}
}
o.optsMU.Unlock()
for _, change := range changes {
changed(change.key, change.value, data)
}
return len(changes)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"net/netip"
"github.com/cilium/cilium/pkg/ip"
"github.com/cilium/cilium/pkg/policy/api"
)
// getPrefixesFromCIDR fetches all CIDRs referred to by the specified slice
// and returns them as regular golang CIDR objects.
func getPrefixesFromCIDR(cidrs api.CIDRSlice) []netip.Prefix {
result, _, _ := ip.ParsePrefixes(cidrs.StringSlice())
return result
}
// GetPrefixesFromCIDRSet fetches all CIDRs referred to by the specified slice
// and returns them as regular golang CIDR objects.
//
// Assumes that validation already occurred on 'rules'.
func GetPrefixesFromCIDRSet(rules api.CIDRRuleSlice) []netip.Prefix {
cidrs := api.ComputeResultantCIDRSet(rules)
return getPrefixesFromCIDR(cidrs)
}
// GetCIDRPrefixes runs through the specified 'rules' to find every reference
// to a CIDR in the rules, and returns a slice containing all of these CIDRs.
// Multiple rules referring to the same CIDR will result in multiple copies of
// the CIDR in the returned slice.
//
// Assumes that validation already occurred on 'rules'.
func GetCIDRPrefixes(rules api.Rules) []netip.Prefix {
if len(rules) == 0 {
return nil
}
res := make([]netip.Prefix, 0, 32)
for _, r := range rules {
for _, ir := range r.Ingress {
if len(ir.FromCIDR) > 0 {
res = append(res, getPrefixesFromCIDR(ir.FromCIDR)...)
}
if len(ir.FromCIDRSet) > 0 {
res = append(res, GetPrefixesFromCIDRSet(ir.FromCIDRSet)...)
}
}
for _, ir := range r.IngressDeny {
if len(ir.FromCIDR) > 0 {
res = append(res, getPrefixesFromCIDR(ir.FromCIDR)...)
}
if len(ir.FromCIDRSet) > 0 {
res = append(res, GetPrefixesFromCIDRSet(ir.FromCIDRSet)...)
}
}
for _, er := range r.Egress {
if len(er.ToCIDR) > 0 {
res = append(res, getPrefixesFromCIDR(er.ToCIDR)...)
}
if len(er.ToCIDRSet) > 0 {
res = append(res, GetPrefixesFromCIDRSet(er.ToCIDRSet)...)
}
}
for _, er := range r.EgressDeny {
if len(er.ToCIDR) > 0 {
res = append(res, getPrefixesFromCIDR(er.ToCIDR)...)
}
if len(er.ToCIDRSet) > 0 {
res = append(res, GetPrefixesFromCIDRSet(er.ToCIDRSet)...)
}
}
}
return res
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
ipcacheTypes "github.com/cilium/cilium/pkg/ipcache/types"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/source"
"github.com/cilium/cilium/pkg/time"
)
var (
log = logging.DefaultLogger.WithField(logfields.LogSubsys, "policy")
mutex lock.RWMutex // Protects enablePolicy
enablePolicy string // Whether policy enforcement is enabled.
)
// SetPolicyEnabled sets the policy enablement configuration. Valid values are:
// - endpoint.AlwaysEnforce
// - endpoint.NeverEnforce
// - endpoint.DefaultEnforcement
func SetPolicyEnabled(val string) {
mutex.Lock()
enablePolicy = val
mutex.Unlock()
}
// GetPolicyEnabled returns the policy enablement configuration
func GetPolicyEnabled() string {
mutex.RLock()
val := enablePolicy
mutex.RUnlock()
return val
}
// AddOptions are options which can be passed to PolicyAdd
type AddOptions struct {
// Replace if true indicates that existing rules with identical labels should be replaced
Replace bool
// ReplaceWithLabels if present indicates that existing rules with the
// given LabelArray should be deleted.
ReplaceWithLabels labels.LabelArray
// Generated should be set as true to signalize a the policy being inserted
// was generated by cilium-agent, e.g. dns poller.
Generated bool
// The source of this policy, one of api, fqdn or k8s
Source source.Source
// The time the policy initially began to be processed in Cilium, such as when the
// policy was received from the API server.
ProcessingStartTime time.Time
// Resource provides the object ID for the underlying object that backs
// this information from 'source'.
Resource ipcacheTypes.ResourceID
// ReplaceByResource indicates the policy repository should replace any
// rules owned by the given Resource with the new set of rules
ReplaceByResource bool
}
// DeleteOptions are options which can be passed to PolicyDelete
type DeleteOptions struct {
// The source of this policy, one of api, fqdn or k8s
Source source.Source
// Resource provides the object ID for the underlying object that backs
// this information from 'source'.
Resource ipcacheTypes.ResourceID
// DeleteByResource should be true if the resource should be used to identify
// which rules should be deleted.
DeleteByResource bool
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"fmt"
"sync/atomic"
identityPkg "github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/identity/identitymanager"
"github.com/cilium/cilium/pkg/lock"
)
// SelectorPolicy represents a cached selectorPolicy, previously resolved from
// the policy repository and ready to be distilled against a set of identities
// to compute datapath-level policy configuration.
type SelectorPolicy interface {
// Consume returns the policy in terms of connectivity to peer
// Identities.
Consume(owner PolicyOwner) *EndpointPolicy
}
// PolicyCache represents a cache of resolved policies for identities.
type PolicyCache struct {
lock.Mutex
// repo is a circular reference back to the Repository, but as
// we create only one Repository and one PolicyCache for each
// Cilium Agent process, these will never need to be garbage
// collected.
repo *Repository
policies map[identityPkg.NumericIdentity]*cachedSelectorPolicy
}
// NewPolicyCache creates a new cache of SelectorPolicy.
func NewPolicyCache(repo *Repository, subscribe bool) *PolicyCache {
cache := &PolicyCache{
repo: repo,
policies: make(map[identityPkg.NumericIdentity]*cachedSelectorPolicy),
}
if subscribe {
identitymanager.Subscribe(cache)
}
return cache
}
// lookupOrCreate adds the specified Identity to the policy cache, with a reference
// from the specified Endpoint, then returns the threadsafe copy of the policy.
func (cache *PolicyCache) lookupOrCreate(identity *identityPkg.Identity, create bool) SelectorPolicy {
cache.Lock()
defer cache.Unlock()
cip, ok := cache.policies[identity.ID]
if !ok {
if !create {
return nil
}
cip = newCachedSelectorPolicy(identity, cache.repo.GetSelectorCache())
cache.policies[identity.ID] = cip
}
return cip
}
// insert adds the specified Identity to the policy cache, with a reference
// from the specified Endpoint, then returns the threadsafe copy of the policy.
func (cache *PolicyCache) insert(identity *identityPkg.Identity) SelectorPolicy {
return cache.lookupOrCreate(identity, true)
}
// delete forgets about any cached SelectorPolicy that this endpoint uses.
//
// Returns true if the SelectorPolicy was removed from the cache.
func (cache *PolicyCache) delete(identity *identityPkg.Identity) bool {
cache.Lock()
defer cache.Unlock()
cip, ok := cache.policies[identity.ID]
if ok {
delete(cache.policies, identity.ID)
cip.getPolicy().Detach()
}
return ok
}
// updateSelectorPolicy resolves the policy for the security identity of the
// specified endpoint and stores it internally. It will skip policy resolution
// if the cached policy is already at the revision specified in the repo.
//
// Returns whether the cache was updated, or an error.
//
// Must be called with repo.Mutex held for reading.
func (cache *PolicyCache) updateSelectorPolicy(identity *identityPkg.Identity) (bool, error) {
cache.Lock()
cip, ok := cache.policies[identity.ID]
cache.Unlock()
if !ok {
return false, fmt.Errorf("SelectorPolicy not found in cache for ID %d", identity.ID)
}
// As long as UpdatePolicy() is triggered from endpoint
// regeneration, it's possible for two endpoints with the
// *same* identity to race to update the policy here. Such
// racing would lead to first of the endpoints using a
// selectorPolicy that is already detached from the selector
// cache, and thus not getting any incremental updates.
//
// Lock the 'cip' for the duration of the revision check and
// the possible policy update.
cip.Lock()
defer cip.Unlock()
// Don't resolve policy if it was already done for this or later revision.
if cip.getPolicy().Revision >= cache.repo.GetRevision() {
return false, nil
}
// Resolve the policies, which could fail
selPolicy, err := cache.repo.resolvePolicyLocked(identity)
if err != nil {
return false, err
}
cip.setPolicy(selPolicy)
return true, nil
}
// LocalEndpointIdentityAdded creates a SelectorPolicy cache entry for the
// specified Identity, without calculating any policy for it.
func (cache *PolicyCache) LocalEndpointIdentityAdded(identity *identityPkg.Identity) {
cache.insert(identity)
}
// LocalEndpointIdentityRemoved deletes the cached SelectorPolicy for the
// specified Identity.
func (cache *PolicyCache) LocalEndpointIdentityRemoved(identity *identityPkg.Identity) {
cache.delete(identity)
}
// Lookup attempts to locate the SelectorPolicy corresponding to the specified
// identity. If policy is not cached for the identity, it returns nil.
func (cache *PolicyCache) Lookup(identity *identityPkg.Identity) SelectorPolicy {
return cache.lookupOrCreate(identity, false)
}
// UpdatePolicy resolves the policy for the security identity of the specified
// endpoint and caches it for future use.
//
// The caller must provide threadsafety for iteration over the policy
// repository.
func (cache *PolicyCache) UpdatePolicy(identity *identityPkg.Identity) error {
_, err := cache.updateSelectorPolicy(identity)
return err
}
// GetAuthTypes returns the AuthTypes required by the policy between the localID and remoteID, if
// any, otherwise returns nil.
func (cache *PolicyCache) GetAuthTypes(localID, remoteID identityPkg.NumericIdentity) AuthTypes {
cache.Lock()
cip, ok := cache.policies[localID]
cache.Unlock()
if !ok {
return nil // No policy for localID (no endpoint with localID)
}
// SelectorPolicy is const after it has been created, so no locking needed to access it
selPolicy := cip.getPolicy()
var resTypes AuthTypes
for cs, authTypes := range selPolicy.L4Policy.AuthMap {
missing := false
for authType := range authTypes {
if _, exists := resTypes[authType]; !exists {
missing = true
break
}
}
// Only check if 'cs' selects 'remoteID' if one of the authTypes is still missing
// from the result
if missing && cs.Selects(remoteID) {
if resTypes == nil {
resTypes = make(AuthTypes, 1)
}
for authType := range authTypes {
resTypes[authType] = struct{}{}
}
}
}
return resTypes
}
// cachedSelectorPolicy is a wrapper around a selectorPolicy (stored in the
// 'policy' field). It is always nested directly in the owning policyCache,
// and is protected against concurrent writes via the policyCache mutex.
type cachedSelectorPolicy struct {
lock.Mutex // lock is needed to synchronize parallel policy updates
identity *identityPkg.Identity
policy atomic.Pointer[selectorPolicy]
}
func newCachedSelectorPolicy(identity *identityPkg.Identity, selectorCache *SelectorCache) *cachedSelectorPolicy {
cip := &cachedSelectorPolicy{
identity: identity,
}
cip.setPolicy(newSelectorPolicy(selectorCache))
return cip
}
// getPolicy returns a reference to the selectorPolicy that is cached.
//
// Users should treat the result as immutable state that MUST NOT be modified.
func (cip *cachedSelectorPolicy) getPolicy() *selectorPolicy {
return cip.policy.Load()
}
// setPolicy updates the reference to the SelectorPolicy that is cached.
// Calls Detach() on the old policy, if any.
func (cip *cachedSelectorPolicy) setPolicy(policy *selectorPolicy) {
oldPolicy := cip.policy.Swap(policy)
if oldPolicy != nil {
// Release the references the previous policy holds on the selector cache.
oldPolicy.Detach()
}
}
// Consume returns the EndpointPolicy that defines connectivity policy to
// Identities in the specified cache.
//
// This denotes that a particular endpoint is 'consuming' the policy from the
// selector policy cache.
func (cip *cachedSelectorPolicy) Consume(owner PolicyOwner) *EndpointPolicy {
// TODO: This currently computes the EndpointPolicy from SelectorPolicy
// on-demand, however in future the cip is intended to cache the
// EndpointPolicy for this Identity and emit datapath deltas instead.
isHost := cip.identity.ID == identityPkg.ReservedIdentityHost
return cip.getPolicy().DistillPolicy(owner, isHost)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"sync"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/lock"
)
// Endpoint refers to any structure which has the following properties:
// * a node-local ID stored as a uint16
// * a security identity
// * a means of incrementing its policy revision
// * a means of checking if it represents a node or a pod.
// * a set of labels
// * a kubernetes namespace
type Endpoint interface {
GetID16() uint16
GetSecurityIdentity() (*identity.Identity, error)
PolicyRevisionBumpEvent(rev uint64)
IsHost() bool
GetOpLabels() []string
GetK8sNamespace() string
}
// EndpointSet is used to be able to group together a given set of Endpoints
// that need to have a specific operation performed upon them (e.g., policy
// revision updates).
type EndpointSet struct {
mutex lock.RWMutex
endpoints map[Endpoint]struct{}
}
// NewEndpointSet returns an EndpointSet with the given Endpoints map
func NewEndpointSet(m map[Endpoint]struct{}) *EndpointSet {
if m != nil {
return &EndpointSet{
endpoints: m,
}
}
return &EndpointSet{
endpoints: map[Endpoint]struct{}{},
}
}
// ForEachGo runs epFunc asynchronously inside a goroutine for each endpoint in
// the EndpointSet. It signals to the provided WaitGroup when epFunc has been
// executed for each endpoint.
func (e *EndpointSet) ForEachGo(wg *sync.WaitGroup, epFunc func(epp Endpoint)) {
e.mutex.RLock()
defer e.mutex.RUnlock()
wg.Add(len(e.endpoints))
for ep := range e.endpoints {
go func(eppp Endpoint) {
epFunc(eppp)
wg.Done()
}(ep)
}
}
// Delete removes ep from the EndpointSet.
func (e *EndpointSet) Delete(ep Endpoint) {
e.mutex.Lock()
delete(e.endpoints, ep)
e.mutex.Unlock()
}
// Insert adds ep to the EndpointSet.
func (e *EndpointSet) Insert(ep Endpoint) {
e.mutex.Lock()
e.endpoints[ep] = struct{}{}
e.mutex.Unlock()
}
// Len returns the number of elements in the EndpointSet.
func (e *EndpointSet) Len() (nElem int) {
e.mutex.RLock()
nElem = len(e.endpoints)
e.mutex.RUnlock()
return
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"bytes"
"encoding/json"
"fmt"
"math/bits"
"sort"
"strconv"
"strings"
"sync/atomic"
"testing"
cilium "github.com/cilium/proxy/go/cilium/api"
"github.com/sirupsen/logrus"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/container/bitlpm"
"github.com/cilium/cilium/pkg/iana"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/policy/trafficdirection"
"github.com/cilium/cilium/pkg/u8proto"
)
// covers returns true if 'l4rule' has the effect needed for the 'l3l4rule', when 'l4rule' is added
// to the datapath, due to the l4-only rule matching if l3l4-rule is not present. This determination
// can be done here only when both rules have the same port number (or both have a wildcarded port).
func (l4rule *PerSelectorPolicy) covers(l3l4rule *PerSelectorPolicy) bool {
// Deny takes highest precedence so it is dealt with first
if l4rule != nil && l4rule.IsDeny {
// l4-only deny takes precedence
return true
} else if l3l4rule != nil && l3l4rule.IsDeny {
// Must not skip if l3l4 rule is deny while l4-only rule is not
return false
}
// Can not skip if currentRule has an explicit auth type and wildcardRule does not or if
// both have different auth types. In all other cases the auth type from the wildcardRule
// can be used also for the current rule.
// Note that the caller must deal with inheriting redirect from wildcardRule to currentRule,
// if any.
cHasAuth, cAuthType := l3l4rule.GetAuthType()
wHasAuth, wAuthType := l4rule.GetAuthType()
if cHasAuth && !wHasAuth || cHasAuth && wHasAuth && cAuthType != wAuthType {
return false
}
l3l4IsRedirect := l3l4rule.IsRedirect()
l4OnlyIsRedirect := l4rule.IsRedirect()
if l3l4IsRedirect && !l4OnlyIsRedirect {
// Can not skip if l3l4-rule is redirect while l4-only is not
return false
} else if l3l4IsRedirect && l4OnlyIsRedirect && l3l4rule.Listener != l4rule.Listener {
// L3l4 rule has a different listener, it can not be skipped
return false
}
// else can skip
return true
}
// TLS context holds the secret values resolved from an 'api.TLSContext'
type TLSContext struct {
TrustedCA string `json:"trustedCA,omitempty"`
CertificateChain string `json:"certificateChain,omitempty"`
PrivateKey string `json:"privateKey,omitempty"`
}
// Equal returns true if 'a' and 'b' have the same contents.
func (a *TLSContext) Equal(b *TLSContext) bool {
return a == nil && b == nil || a != nil && b != nil && *a == *b
}
// MarshalJSON marsahls a redacted version of the TLSContext. We want
// to see which fields are present, but not reveal their values in any
// logs, etc.
func (t *TLSContext) MarshalJSON() ([]byte, error) {
type tlsContext TLSContext
var redacted tlsContext
if t.TrustedCA != "" {
redacted.TrustedCA = "[redacted]"
}
if t.CertificateChain != "" {
redacted.CertificateChain = "[redacted]"
}
if t.PrivateKey != "" {
redacted.PrivateKey = "[redacted]"
}
return json.Marshal(&redacted)
}
type StringSet map[string]struct{}
func (a StringSet) Equal(b StringSet) bool {
if len(a) != len(b) {
return false
}
for k := range a {
if _, exists := b[k]; !exists {
return false
}
}
return true
}
// NewStringSet returns a StringSet initialized from slice of strings.
// Returns nil for an empty slice
func NewStringSet(from []string) StringSet {
if len(from) == 0 {
return nil
}
set := make(StringSet, len(from))
for _, s := range from {
set[s] = struct{}{}
}
return set
}
// Merge returns StringSet with strings from both a and b.
// Returns a or b, possibly with modifications.
func (a StringSet) Merge(b StringSet) StringSet {
if len(a) == 0 {
return b
}
for s := range b {
a[s] = struct{}{}
}
return a
}
// PerSelectorPolicy contains policy rules for a CachedSelector, i.e. for a
// selection of numerical identities.
type PerSelectorPolicy struct {
// TerminatingTLS is the TLS context for the connection terminated by
// the L7 proxy. For egress policy this specifies the server-side TLS
// parameters to be applied on the connections originated from the local
// POD and terminated by the L7 proxy. For ingress policy this specifies
// the server-side TLS parameters to be applied on the connections
// originated from a remote source and terminated by the L7 proxy.
TerminatingTLS *TLSContext `json:"terminatingTLS,omitempty"`
// OriginatingTLS is the TLS context for the connections originated by
// the L7 proxy. For egress policy this specifies the client-side TLS
// parameters for the upstream connection originating from the L7 proxy
// to the remote destination. For ingress policy this specifies the
// client-side TLS parameters for the connection from the L7 proxy to
// the local POD.
OriginatingTLS *TLSContext `json:"originatingTLS,omitempty"`
// ServerNames is a list of allowed TLS SNI values. If not empty, then
// TLS must be present and one of the provided SNIs must be indicated in the
// TLS handshake.
ServerNames StringSet `json:"serverNames,omitempty"`
// isRedirect is 'true' when traffic must be redirected
isRedirect bool `json:"-"`
// Listener is an optional fully qualified name of a Envoy Listner defined in a CiliumEnvoyConfig CRD that should be
// used for this traffic instead of the default listener
Listener string `json:"listener,omitempty"`
// Priority of the listener used when multiple listeners would apply to the same
// MapStateEntry.
// Lower numbers indicate higher priority. If left out, the proxy
// port number (10000-20000) is used as priority, so that traffic will be consistently
// redirected to the same listener. If higher priority desired, a low unique number like 1,
// 2, or 3 should be explicitly specified here. If a lower than default priority is needed,
// then a unique number higher than 20000 should be explicitly specified. Numbers on the
// default range (10000-20000) are not allowed.
Priority uint16 `json:"priority,omitempty"`
// Pre-computed HTTP rules, computed after rule merging is complete
EnvoyHTTPRules *cilium.HttpNetworkPolicyRules `json:"-"`
// CanShortCircuit is true if all 'EnvoyHTTPRules' may be
// short-circuited by other matches.
CanShortCircuit bool `json:"-"`
api.L7Rules
// Authentication is the kind of cryptographic authentication required for the traffic to be allowed
// at L3, if any.
Authentication *api.Authentication `json:"auth,omitempty"`
// IsDeny is set if this L4Filter contains should be denied
IsDeny bool `json:",omitempty"`
}
// Equal returns true if 'a' and 'b' represent the same L7 Rules
func (a *PerSelectorPolicy) Equal(b *PerSelectorPolicy) bool {
return a == nil && b == nil || a != nil && b != nil &&
a.TerminatingTLS.Equal(b.TerminatingTLS) &&
a.OriginatingTLS.Equal(b.OriginatingTLS) &&
a.ServerNames.Equal(b.ServerNames) &&
a.isRedirect == b.isRedirect &&
a.Listener == b.Listener &&
a.Priority == b.Priority &&
(a.Authentication == nil && b.Authentication == nil || a.Authentication != nil && a.Authentication.DeepEqual(b.Authentication)) &&
a.IsDeny == b.IsDeny &&
a.L7Rules.DeepEqual(&b.L7Rules)
}
// GetListener returns the listener of the PerSelectorPolicy.
func (a *PerSelectorPolicy) GetListener() string {
if a == nil {
return ""
}
return a.Listener
}
// GetPriority returns the pritority of the listener of the PerSelectorPolicy.
func (a *PerSelectorPolicy) GetPriority() uint16 {
if a == nil {
return 0
}
return a.Priority
}
// AuthType enumerates the supported authentication types in api.
// Numerically higher type takes precedence in case of conflicting auth types.
type AuthType uint8
// AuthTypes is a set of AuthTypes, usually nil if empty
type AuthTypes map[AuthType]struct{}
// Authmap maps remote selectors to their needed AuthTypes, if any
type AuthMap map[CachedSelector]AuthTypes
const (
// AuthTypeDisabled means no authentication required
AuthTypeDisabled AuthType = iota
// AuthTypeSpire is a mutual auth type that uses SPIFFE identities with a SPIRE server
AuthTypeSpire
// AuthTypeAlwaysFail is a simple auth type that always denies the request
AuthTypeAlwaysFail
)
type HasAuthType bool
const (
DefaultAuthType HasAuthType = false
ExplicitAuthType HasAuthType = true
)
// GetAuthType returns the AuthType of the L4Filter.
func (a *PerSelectorPolicy) GetAuthType() (HasAuthType, AuthType) {
if a == nil {
return DefaultAuthType, AuthTypeDisabled
}
return GetAuthType(a.Authentication)
}
// GetAuthType returns boolean HasAuthType and AuthType for the api.Authentication
// If there is no explicit auth type, (DefaultAuthType, AuthTypeDisabled) is returned
func GetAuthType(auth *api.Authentication) (HasAuthType, AuthType) {
if auth == nil {
return DefaultAuthType, AuthTypeDisabled
}
switch auth.Mode {
case api.AuthenticationModeDisabled:
return ExplicitAuthType, AuthTypeDisabled
case api.AuthenticationModeRequired:
return ExplicitAuthType, AuthTypeSpire
case api.AuthenticationModeAlwaysFail:
return ExplicitAuthType, AuthTypeAlwaysFail
default:
return DefaultAuthType, AuthTypeDisabled
}
}
// Uint8 returns AuthType as a uint8
func (a AuthType) Uint8() uint8 {
return uint8(a)
}
// String returns AuthType as a string
// This must return the strings accepted for api.AuthType
func (a AuthType) String() string {
switch a {
case AuthTypeDisabled:
return "disabled"
case AuthTypeSpire:
return "spire"
case AuthTypeAlwaysFail:
return "test-always-fail"
}
return "Unknown-auth-type-" + strconv.FormatUint(uint64(a.Uint8()), 10)
}
// IsRedirect returns true if the L7Rules are a redirect.
func (a *PerSelectorPolicy) IsRedirect() bool {
return a != nil && a.isRedirect
}
// HasL7Rules returns whether the `L7Rules` contains any L7 rules.
func (a *PerSelectorPolicy) HasL7Rules() bool {
return !a.L7Rules.IsEmpty()
}
// L7DataMap contains a map of L7 rules per endpoint where key is a CachedSelector
type L7DataMap map[CachedSelector]*PerSelectorPolicy
func (l7 L7DataMap) MarshalJSON() ([]byte, error) {
if len(l7) == 0 {
return []byte("[]"), nil
}
/* First, create a sorted slice of the selectors so we can get
* consistent JSON output */
selectors := make(CachedSelectorSlice, 0, len(l7))
for cs := range l7 {
selectors = append(selectors, cs)
}
sort.Sort(selectors)
/* Now we can iterate the slice and generate JSON entries. */
var err error
buffer := bytes.NewBufferString("[")
for _, cs := range selectors {
buffer.WriteString("{\"")
buffer.WriteString(cs.String())
buffer.WriteString("\":")
b, err := json.Marshal(l7[cs])
if err == nil {
buffer.Write(b)
} else {
buffer.WriteString("\"L7DataMap error: ")
buffer.WriteString(err.Error())
buffer.WriteString("\"")
}
buffer.WriteString("},")
}
buffer.Truncate(buffer.Len() - 1) // Drop the final ","
buffer.WriteString("]")
return buffer.Bytes(), err
}
// ShallowCopy returns a shallow copy of the L7DataMap.
func (l7 L7DataMap) ShallowCopy() L7DataMap {
m := make(L7DataMap, len(l7))
for k, v := range l7 {
m[k] = v
}
return m
}
// L7ParserType is the type used to indicate what L7 parser to use.
// Consts are defined for all well known L7 parsers.
// Unknown string values are created for key-value pair policies, which
// are then transparently used in redirect configuration.
type L7ParserType string
func (l7 L7ParserType) String() string {
return (string)(l7)
}
const (
// ParserTypeNone represents the case where no parser type is provided.
ParserTypeNone L7ParserType = ""
// ParserTypeTLS is used for TLS origination, termination, or SNI filtering without any L7
// parsing. If TLS policies are used with HTTP rules, ParserTypeHTTP is used instead.
ParserTypeTLS L7ParserType = "tls"
// ParserTypeCRD is used with a custom CiliumEnvoyConfig redirection. Incompatible with any
// parser type with L7 enforcement (HTTP, Kafka, proxylib), as the custom Listener generally
// does not support them.
ParserTypeCRD L7ParserType = "crd"
// ParserTypeHTTP specifies a HTTP parser type
ParserTypeHTTP L7ParserType = "http"
// ParserTypeKafka specifies a Kafka parser type
ParserTypeKafka L7ParserType = "kafka"
// ParserTypeDNS specifies a DNS parser type
ParserTypeDNS L7ParserType = "dns"
)
// redirectTypes is a bitmask of redirection types of multiple filters
type redirectTypes uint16
const (
// redirectTypeDNS bit is set when policy contains a redirection to DNS proxy
redirectTypeDNS redirectTypes = 1 << iota
// redirectTypeEnvoy bit is set when policy contains a redirection to Envoy
redirectTypeEnvoy
// redirectTypeProxylib bits are set when policy contains a redirection to Proxylib (via
// Envoy)
redirectTypeProxylib redirectTypes = 1<<iota | redirectTypeEnvoy
// redirectTypeNone represents the case where there is no proxy redirect
redirectTypeNone redirectTypes = redirectTypes(0)
)
func (from L7ParserType) canPromoteTo(to L7ParserType) bool {
switch from {
case ParserTypeNone:
// ParserTypeNone can be promoted to any other type
return true
case ParserTypeTLS:
// ParserTypeTLS can be promoted to any other type, except for DNS or CRD,
// but ParserTypeTLS can not be demoted to ParserTypeNone
if to != ParserTypeNone && to != ParserTypeDNS && to != ParserTypeCRD {
return true
}
}
return false
}
// Merge ParserTypes 'a' to 'b' if possible
func (a L7ParserType) Merge(b L7ParserType) (L7ParserType, error) {
if a == b {
return a, nil
}
if a.canPromoteTo(b) {
return b, nil
}
if b.canPromoteTo(a) {
return a, nil
}
return ParserTypeNone, fmt.Errorf("cannot merge conflicting L7 parsers (%s/%s)", a, b)
}
// L4Filter represents the policy (allowed remote sources / destinations of
// traffic) that applies at a specific L4 port/protocol combination (including
// all ports and protocols), at either ingress or egress. The policy here is
// specified in terms of selectors that are mapped to security identities via
// the selector cache.
type L4Filter struct {
// Port is the destination port to allow. Port 0 indicates that all traffic
// is allowed at L4.
Port uint16 `json:"port"`
// EndPort is zero for a singular port
EndPort uint16 `json:"endPort,omitempty"`
PortName string `json:"port-name,omitempty"`
// Protocol is the L4 protocol to allow or NONE
Protocol api.L4Proto `json:"protocol"`
// U8Proto is the Protocol in numeric format, or 0 for NONE
U8Proto u8proto.U8proto `json:"-"`
// wildcard is the cached selector representing a wildcard in this filter, if any.
// This is nil the wildcard selector in not in 'PerSelectorPolicies'.
// When the wildcard selector is in 'PerSelectorPolicies' this is set to that
// same selector, which can then be used as a map key to find the corresponding
// L4-only L7 policy (which can be nil).
wildcard CachedSelector
// PerSelectorPolicies is a map of policies for selectors, including any L7 rules passed to
// the L7 proxy. nil values represent cached selectors that have selector-specific policy
// restriction (such as no L7 rules). Holds references to the cached selectors, which must
// be released!
PerSelectorPolicies L7DataMap `json:"l7-rules,omitempty"`
// L7Parser specifies the L7 protocol parser (optional). If specified as
// an empty string, then means that no L7 proxy redirect is performed.
L7Parser L7ParserType `json:"-"`
// Ingress is true if filter applies at ingress; false if it applies at egress.
Ingress bool `json:"-"`
// RuleOrigin tracks which policy rules (identified by labels) are the origin for this L3/L4
// (i.e. selector and port) filter. This information is used when distilling a policy to an
// EndpointPolicy, to track which policy rules were involved for a specific verdict.
// Each LabelArrayList is in sorted order.
RuleOrigin map[CachedSelector]labels.LabelArrayList `json:"-"`
// This reference is circular, but it is cleaned up at Detach()
policy atomic.Pointer[L4Policy]
}
// SelectsAllEndpoints returns whether the L4Filter selects all
// endpoints, which is true if the wildcard endpoint selector is present in the
// map.
func (l4 *L4Filter) SelectsAllEndpoints() bool {
for cs := range l4.PerSelectorPolicies {
if cs.IsWildcard() {
return true
}
}
return false
}
// CopyL7RulesPerEndpoint returns a shallow copy of the PerSelectorPolicies of the
// L4Filter.
func (l4 *L4Filter) CopyL7RulesPerEndpoint() L7DataMap {
return l4.PerSelectorPolicies.ShallowCopy()
}
// GetL7Parser returns the L7ParserType of the L4Filter.
func (l4 *L4Filter) GetL7Parser() L7ParserType {
return l4.L7Parser
}
// GetIngress returns whether the L4Filter applies at ingress or egress.
func (l4 *L4Filter) GetIngress() bool {
return l4.Ingress
}
// GetPort returns the port at which the L4Filter applies as a uint16.
func (l4 *L4Filter) GetPort() uint16 {
return l4.Port
}
// Equals returns true if two L4Filters are equal
func (l4 *L4Filter) Equals(_ *testing.T, bL4 *L4Filter) bool {
if l4.Port == bL4.Port &&
l4.EndPort == bL4.EndPort &&
l4.PortName == bL4.PortName &&
l4.Protocol == bL4.Protocol &&
l4.Ingress == bL4.Ingress &&
l4.L7Parser == bL4.L7Parser &&
l4.wildcard == bL4.wildcard {
if len(l4.PerSelectorPolicies) != len(bL4.PerSelectorPolicies) {
return false
}
for k, v := range l4.PerSelectorPolicies {
bV, ok := bL4.PerSelectorPolicies[k]
if !ok || !bV.Equal(v) {
return false
}
}
return true
}
return false
}
// ChangeState allows caller to revert changes made by (multiple) toMapState call(s)
type ChangeState struct {
Adds Keys // Added or modified keys, if not nil
Deletes Keys // deleted keys, if not nil
Old map[Key]MapStateEntry // Old values of all modified or deleted keys, if not nil
}
// toMapState converts a single filter into a MapState entries added to 'p.PolicyMapState'.
//
// Note: It is possible for two selectors to select the same security ID. To give priority to deny,
// AuthType, and L7 redirection (e.g., for visibility purposes), the mapstate entries are added to
// 'p.PolicyMapState' using denyPreferredInsertWithChanges().
// Keys and old values of any added or deleted entries are added to 'changes'.
// 'redirects' is the map of currently realized redirects, it is used to find the proxy port for any redirects.
// p.SelectorCache is used as Identities interface during this call, which only has GetPrefix() that
// needs no lock.
func (l4 *L4Filter) toMapState(p *EndpointPolicy, features policyFeatures, redirects map[string]uint16, changes ChangeState) {
port := l4.Port
proto := uint8(l4.U8Proto)
direction := trafficdirection.Egress
if l4.Ingress {
direction = trafficdirection.Ingress
}
logger := log
if option.Config.Debug {
logger = log.WithFields(logrus.Fields{
logfields.Port: port,
logfields.PortName: l4.PortName,
logfields.Protocol: proto,
logfields.TrafficDirection: direction,
})
}
// resolve named port
if port == 0 && l4.PortName != "" {
port = p.PolicyOwner.GetNamedPort(l4.Ingress, l4.PortName, proto)
if port == 0 {
return // nothing to be done for undefined named port
}
}
var keysToAdd []Key
for _, mp := range PortRangeToMaskedPorts(port, l4.EndPort) {
keysToAdd = append(keysToAdd, Key{
Identity: 0, // Set in the loop below (if not wildcard)
DestPort: mp.port, // NOTE: Port is in host byte-order!
InvertedPortMask: ^mp.mask,
Nexthdr: proto,
TrafficDirection: direction.Uint8(),
})
}
// find the L7 rules for the wildcard entry, if any
var wildcardRule *PerSelectorPolicy
if l4.wildcard != nil {
wildcardRule = l4.PerSelectorPolicies[l4.wildcard]
}
for cs, currentRule := range l4.PerSelectorPolicies {
// have wildcard and this is an L3L4 key?
isL3L4withWildcardPresent := (l4.Port != 0 || l4.PortName != "") && l4.wildcard != nil && cs != l4.wildcard
if isL3L4withWildcardPresent && wildcardRule.covers(currentRule) {
logger.WithField(logfields.EndpointSelector, cs).Debug("ToMapState: Skipping L3/L4 key due to existing L4-only key")
continue
}
isDenyRule := currentRule != nil && currentRule.IsDeny
isRedirect := currentRule.IsRedirect()
listener := currentRule.GetListener()
if !isDenyRule && isL3L4withWildcardPresent && !isRedirect {
// Inherit the redirect status from the wildcard rule.
// This is now needed as 'covers()' can pass non-redirect L3L4 rules
// that must inherit the redirect status from the L4-only (== L3-wildcard)
// rule due to auth type on the L3L4 rule being different than in the
// L4-only rule.
isRedirect = wildcardRule.IsRedirect()
listener = wildcardRule.GetListener()
}
hasAuth, authType := currentRule.GetAuthType()
var proxyPort uint16
if isRedirect {
var exists bool
proxyID := ProxyID(uint16(p.PolicyOwner.GetID()), l4.Ingress, string(l4.Protocol), port, listener)
proxyPort, exists = redirects[proxyID]
if !exists {
// Skip unrealized redirects; this happens routineously just
// before new redirects are realized. Once created, we are called
// again.
logger.WithField(logfields.EndpointSelector, cs).Debugf("Skipping unrealized redirect %s (%v)", proxyID, redirects)
continue
}
}
entry := NewMapStateEntry(cs, l4.RuleOrigin[cs], proxyPort, currentRule.GetListener(), currentRule.GetPriority(), isDenyRule, hasAuth, authType)
if cs.IsWildcard() {
for _, keyToAdd := range keysToAdd {
keyToAdd.Identity = 0
p.policyMapState.denyPreferredInsertWithChanges(keyToAdd, entry, p.SelectorCache, features, changes)
if port == 0 {
// Allow-all
logger.WithField(logfields.EndpointSelector, cs).Debug("ToMapState: allow all")
} else {
// L4 allow
logger.WithField(logfields.EndpointSelector, cs).Debug("ToMapState: L4 allow all")
}
}
continue
}
idents := cs.GetSelections()
if option.Config.Debug {
if isDenyRule {
logger.WithFields(logrus.Fields{
logfields.EndpointSelector: cs,
logfields.PolicyID: idents,
}).Debug("ToMapState: Denied remote IDs")
} else {
logger.WithFields(logrus.Fields{
logfields.EndpointSelector: cs,
logfields.PolicyID: idents,
}).Debug("ToMapState: Allowed remote IDs")
}
}
for _, id := range idents {
for _, keyToAdd := range keysToAdd {
keyToAdd.Identity = id.Uint32()
p.policyMapState.denyPreferredInsertWithChanges(keyToAdd, entry, p.SelectorCache, features, changes)
// If Cilium is in dual-stack mode then the "World" identity
// needs to be split into two identities to represent World
// IPv6 and IPv4 traffic distinctly from one another.
if id == identity.ReservedIdentityWorld && option.Config.IsDualStack() {
keyToAdd.Identity = identity.ReservedIdentityWorldIPv4.Uint32()
p.policyMapState.denyPreferredInsertWithChanges(keyToAdd, entry, p.SelectorCache, features, changes)
keyToAdd.Identity = identity.ReservedIdentityWorldIPv6.Uint32()
p.policyMapState.denyPreferredInsertWithChanges(keyToAdd, entry, p.SelectorCache, features, changes)
}
}
}
}
if option.Config.Debug {
log.WithFields(logrus.Fields{
logfields.PolicyKeysAdded: changes.Adds,
logfields.PolicyKeysDeleted: changes.Deletes,
logfields.PolicyEntriesOld: changes.Old,
}).Debug("ToMapChange changes")
}
}
// IdentitySelectionUpdated implements CachedSelectionUser interface
// This call is made from a single goroutine in FIFO order to keep add
// and delete events ordered properly. No locks are held.
//
// The caller is responsible for making sure the same identity is not
// present in both 'added' and 'deleted'.
func (l4 *L4Filter) IdentitySelectionUpdated(cs CachedSelector, added, deleted []identity.NumericIdentity) {
log.WithFields(logrus.Fields{
logfields.EndpointSelector: cs,
logfields.AddedPolicyID: added,
logfields.DeletedPolicyID: deleted,
}).Debug("identities selected by L4Filter updated")
// Skip updates on wildcard selectors, as datapath and L7
// proxies do not need enumeration of all ids for L3 wildcard.
// This mirrors the per-selector logic in ToMapState().
if cs.IsWildcard() {
return
}
// Push endpoint policy changes.
//
// `l4.policy` is nil when the filter is detached so
// that we could not push updates on an unstable policy.
l4Policy := l4.policy.Load()
if l4Policy != nil {
l4Policy.AccumulateMapChanges(l4, cs, added, deleted)
}
}
func (l4 *L4Filter) cacheIdentitySelector(sel api.EndpointSelector, lbls labels.LabelArray, selectorCache *SelectorCache) CachedSelector {
cs, added := selectorCache.AddIdentitySelector(l4, lbls, sel)
if added {
l4.PerSelectorPolicies[cs] = nil // no per-selector policy (yet)
}
return cs
}
func (l4 *L4Filter) cacheIdentitySelectors(selectors api.EndpointSelectorSlice, lbls labels.LabelArray, selectorCache *SelectorCache) {
for _, sel := range selectors {
l4.cacheIdentitySelector(sel, lbls, selectorCache)
}
}
func (l4 *L4Filter) cacheFQDNSelectors(selectors api.FQDNSelectorSlice, lbls labels.LabelArray, selectorCache *SelectorCache) {
for _, fqdnSel := range selectors {
l4.cacheFQDNSelector(fqdnSel, lbls, selectorCache)
}
}
func (l4 *L4Filter) cacheFQDNSelector(sel api.FQDNSelector, lbls labels.LabelArray, selectorCache *SelectorCache) CachedSelector {
cs, added := selectorCache.AddFQDNSelector(l4, lbls, sel)
if added {
l4.PerSelectorPolicies[cs] = nil // no per-selector policy (yet)
}
return cs
}
// add L7 rules for all endpoints in the L7DataMap
func (l7 L7DataMap) addPolicyForSelector(rules *api.L7Rules, terminatingTLS, originatingTLS *TLSContext, auth *api.Authentication, deny bool, sni []string, listener string, priority uint16) {
isRedirect := !deny && (listener != "" || terminatingTLS != nil || originatingTLS != nil || len(sni) > 0 || !rules.IsEmpty())
for epsel := range l7 {
l7policy := &PerSelectorPolicy{
TerminatingTLS: terminatingTLS,
OriginatingTLS: originatingTLS,
Authentication: auth,
IsDeny: deny,
ServerNames: NewStringSet(sni),
isRedirect: isRedirect,
Listener: listener,
Priority: priority,
}
if rules != nil {
l7policy.L7Rules = *rules
}
l7[epsel] = l7policy
}
}
type TLSDirection string
const (
TerminatingTLS TLSDirection = "terminating"
OriginatingTLS TLSDirection = "originating"
)
func (l4 *L4Filter) getCerts(policyCtx PolicyContext, tls *api.TLSContext, direction TLSDirection) (*TLSContext, error) {
if tls == nil {
return nil, nil
}
ca, public, private, err := policyCtx.GetTLSContext(tls)
if err != nil {
log.WithError(err).Warningf("policy: Error getting %s TLS Context.", direction)
return nil, err
}
switch direction {
case TerminatingTLS:
if public == "" || private == "" {
return nil, fmt.Errorf("Terminating TLS context is missing certs.")
}
case OriginatingTLS:
if ca == "" {
return nil, fmt.Errorf("Originating TLS context is missing CA certs.")
}
default:
return nil, fmt.Errorf("invalid TLS direction: %s", direction)
}
return &TLSContext{
TrustedCA: ca,
CertificateChain: public,
PrivateKey: private,
}, nil
}
// createL4Filter creates a filter for L4 policy that applies to the specified
// endpoints and port/protocol, with reference to the original rules that the
// filter is derived from. This filter may be associated with a series of L7
// rules via the `rule` parameter.
// Not called with an empty peerEndpoints.
func createL4Filter(policyCtx PolicyContext, peerEndpoints api.EndpointSelectorSlice, auth *api.Authentication, rule api.Ports, port api.PortProtocol,
protocol api.L4Proto, ruleLabels labels.LabelArray, ingress bool, fqdns api.FQDNSelectorSlice) (*L4Filter, error) {
selectorCache := policyCtx.GetSelectorCache()
portName := ""
p := uint64(0)
if iana.IsSvcName(port.Port) {
portName = port.Port
} else {
// already validated via PortRule.Validate()
p, _ = strconv.ParseUint(port.Port, 0, 16)
}
// already validated via L4Proto.Validate(), never "ANY"
// NOTE: "ANY" for wildcarded port/proto!
u8p, _ := u8proto.ParseProtocol(string(protocol))
l4 := &L4Filter{
Port: uint16(p), // 0 for L3-only rules and named ports
EndPort: uint16(port.EndPort), // 0 for a single port, >= 'Port' for a range
PortName: portName, // non-"" for named ports
Protocol: protocol,
U8Proto: u8p,
PerSelectorPolicies: make(L7DataMap),
RuleOrigin: make(map[CachedSelector]labels.LabelArrayList), // Filled in below.
Ingress: ingress,
}
if peerEndpoints.SelectsAllEndpoints() {
l4.wildcard = l4.cacheIdentitySelector(api.WildcardEndpointSelector, ruleLabels, selectorCache)
} else {
l4.cacheIdentitySelectors(peerEndpoints, ruleLabels, selectorCache)
l4.cacheFQDNSelectors(fqdns, ruleLabels, selectorCache)
}
var terminatingTLS *TLSContext
var originatingTLS *TLSContext
var rules *api.L7Rules
var sni []string
listener := ""
var priority uint16
pr := rule.GetPortRule()
if pr != nil {
rules = pr.Rules
sni = pr.ServerNames
// Get TLS contexts, if any
var err error
terminatingTLS, err = l4.getCerts(policyCtx, pr.TerminatingTLS, TerminatingTLS)
if err != nil {
return nil, err
}
originatingTLS, err = l4.getCerts(policyCtx, pr.OriginatingTLS, OriginatingTLS)
if err != nil {
return nil, err
}
// Set parser type to TLS, if TLS. This will be overridden by L7 below, if rules
// exists.
if terminatingTLS != nil || originatingTLS != nil || len(pr.ServerNames) > 0 {
l4.L7Parser = ParserTypeTLS
}
// Determine L7ParserType from rules present. Earlier validation ensures rules
// for multiple protocols are not present here.
if rules != nil {
// we need this to redirect DNS UDP (or ANY, which is more useful)
if len(rules.DNS) > 0 {
l4.L7Parser = ParserTypeDNS
} else if protocol == api.ProtoTCP { // Other than DNS only support TCP
switch {
case len(rules.HTTP) > 0:
l4.L7Parser = ParserTypeHTTP
case len(rules.Kafka) > 0:
l4.L7Parser = ParserTypeKafka
case rules.L7Proto != "":
l4.L7Parser = (L7ParserType)(rules.L7Proto)
}
}
}
// Override the parser type to CRD is applicable.
if pr.Listener != nil {
l4.L7Parser = ParserTypeCRD
ns := policyCtx.GetNamespace()
resource := pr.Listener.EnvoyConfig
switch resource.Kind {
case "CiliumEnvoyConfig":
if ns == "" {
// Cluster-scoped CCNP tries to use namespaced
// CiliumEnvoyConfig
//
// TODO: Catch this in rule validation once we have a
// validation context in there so that we can differentiate
// between CNP and CCNP at validation time.
return nil, fmt.Errorf("Listener %q in CCNP can not use Kind CiliumEnvoyConfig", pr.Listener.Name)
}
case "CiliumClusterwideEnvoyConfig":
// CNP refers to a cluster-scoped listener
ns = ""
default:
}
listener, _ = api.ResourceQualifiedName(ns, resource.Name, pr.Listener.Name, api.ForceNamespace)
priority = pr.Listener.Priority
}
}
if l4.L7Parser != ParserTypeNone || auth != nil || policyCtx.IsDeny() {
l4.PerSelectorPolicies.addPolicyForSelector(rules, terminatingTLS, originatingTLS, auth, policyCtx.IsDeny(), sni, listener, priority)
}
for cs := range l4.PerSelectorPolicies {
l4.RuleOrigin[cs] = labels.LabelArrayList{ruleLabels}
}
return l4, nil
}
func (l4 *L4Filter) removeSelectors(selectorCache *SelectorCache) {
selectors := make(CachedSelectorSlice, 0, len(l4.PerSelectorPolicies))
for cs := range l4.PerSelectorPolicies {
selectors = append(selectors, cs)
}
selectorCache.RemoveSelectors(selectors, l4)
}
// detach releases the references held in the L4Filter and must be called before
// the filter is left to be garbage collected.
// L4Filter may still be accessed concurrently after it has been detached.
func (l4 *L4Filter) detach(selectorCache *SelectorCache) {
l4.removeSelectors(selectorCache)
l4.policy.Store(nil)
}
// attach signifies that the L4Filter is ready and reacheable for updates
// from SelectorCache. L4Filter (and L4Policy) is read-only after this is called,
// multiple goroutines will be reading the fields from that point on.
func (l4 *L4Filter) attach(ctx PolicyContext, l4Policy *L4Policy) policyFeatures {
// All rules have been added to the L4Filter at this point.
// Sort the rules label array list for more efficient equality comparison.
for _, labels := range l4.RuleOrigin {
labels.Sort()
}
var features policyFeatures
for cs, cp := range l4.PerSelectorPolicies {
if cp != nil {
if cp.IsDeny {
features.setFeature(denyRules)
}
hasAuth, authType := GetAuthType(cp.Authentication)
if hasAuth {
features.setFeature(authRules)
if authType != AuthTypeDisabled {
if l4Policy.AuthMap == nil {
l4Policy.AuthMap = make(AuthMap, 1)
}
authTypes := l4Policy.AuthMap[cs]
if authTypes == nil {
authTypes = make(AuthTypes, 1)
}
authTypes[authType] = struct{}{}
l4Policy.AuthMap[cs] = authTypes
}
}
// Compute Envoy policies when a policy is ready to be used
if len(cp.L7Rules.HTTP) > 0 {
cp.EnvoyHTTPRules, cp.CanShortCircuit = ctx.GetEnvoyHTTPRules(&cp.L7Rules)
}
}
}
l4.policy.Store(l4Policy)
return features
}
// createL4IngressFilter creates a filter for L4 policy that applies to the
// specified endpoints and port/protocol for ingress traffic, with reference
// to the original rules that the filter is derived from. This filter may be
// associated with a series of L7 rules via the `rule` parameter.
//
// hostWildcardL7 determines if L7 traffic from Host should be
// wildcarded (in the relevant daemon mode).
func createL4IngressFilter(policyCtx PolicyContext, fromEndpoints api.EndpointSelectorSlice, auth *api.Authentication, hostWildcardL7 []string, rule api.Ports, port api.PortProtocol,
protocol api.L4Proto, ruleLabels labels.LabelArray) (*L4Filter, error) {
filter, err := createL4Filter(policyCtx, fromEndpoints, auth, rule, port, protocol, ruleLabels, true, nil)
if err != nil {
return nil, err
}
// If the filter would apply proxy redirection for the Host, when we should accept
// everything from host, then wildcard Host at L7.
if len(hostWildcardL7) > 0 {
for cs, l7 := range filter.PerSelectorPolicies {
if l7.IsRedirect() && cs.Selects(identity.ReservedIdentityHost) {
for _, name := range hostWildcardL7 {
selector := api.ReservedEndpointSelectors[name]
filter.cacheIdentitySelector(selector, ruleLabels, policyCtx.GetSelectorCache())
}
}
}
}
return filter, nil
}
// createL4EgressFilter creates a filter for L4 policy that applies to the
// specified endpoints and port/protocol for egress traffic, with reference
// to the original rules that the filter is derived from. This filter may be
// associated with a series of L7 rules via the `rule` parameter.
func createL4EgressFilter(policyCtx PolicyContext, toEndpoints api.EndpointSelectorSlice, auth *api.Authentication, rule api.Ports, port api.PortProtocol,
protocol api.L4Proto, ruleLabels labels.LabelArray, fqdns api.FQDNSelectorSlice) (*L4Filter, error) {
return createL4Filter(policyCtx, toEndpoints, auth, rule, port, protocol, ruleLabels, false, fqdns)
}
// redirectType returns the redirectType for this filter
func (l4 *L4Filter) redirectType() redirectTypes {
switch l4.L7Parser {
case ParserTypeNone:
return redirectTypeNone
case ParserTypeDNS:
return redirectTypeDNS
case ParserTypeHTTP, ParserTypeTLS, ParserTypeCRD:
return redirectTypeEnvoy
default:
// all other (non-empty) values are used for proxylib redirects
return redirectTypeProxylib
}
}
// IsRedirect returns true if the L4 filter contains a port redirection
func (l4 *L4Filter) IsRedirect() bool {
return l4.L7Parser != ParserTypeNone
}
// Marshal returns the `L4Filter` in a JSON string.
func (l4 *L4Filter) Marshal() string {
b, err := json.Marshal(l4)
if err != nil {
b = []byte("\"L4Filter error: " + err.Error() + "\"")
}
return string(b)
}
// String returns the `L4Filter` in a human-readable string.
func (l4 *L4Filter) String() string {
b, err := json.Marshal(l4)
if err != nil {
return err.Error()
}
return string(b)
}
// Note: Only used for policy tracing
func (l4 *L4Filter) matchesLabels(labels labels.LabelArray) (bool, bool) {
if l4.wildcard != nil {
perSelectorPolicy := l4.PerSelectorPolicies[l4.wildcard]
isDeny := perSelectorPolicy != nil && perSelectorPolicy.IsDeny
return true, isDeny
} else if len(labels) == 0 {
return false, false
}
var selected bool
for sel, rule := range l4.PerSelectorPolicies {
// slow, but OK for tracing
idSel := sel.(*identitySelector)
if lis, ok := idSel.source.(*labelIdentitySelector); ok && lis.xxxMatches(labels) {
isDeny := rule != nil && rule.IsDeny
selected = true
if isDeny {
return true, isDeny
}
}
}
return selected, false
}
// addL4Filter adds 'filterToMerge' into the 'resMap'. Returns an error if it
// the 'filterToMerge' can't be merged with an existing filter for the same
// port and proto.
func addL4Filter(policyCtx PolicyContext,
ctx *SearchContext, resMap L4PolicyMap,
p api.PortProtocol, proto api.L4Proto,
filterToMerge *L4Filter,
ruleLabels labels.LabelArray) error {
existingFilter := resMap.ExactLookup(p.Port, uint16(p.EndPort), string(proto))
if existingFilter == nil {
resMap.Upsert(p.Port, uint16(p.EndPort), string(proto), filterToMerge)
return nil
}
selectorCache := policyCtx.GetSelectorCache()
if err := mergePortProto(ctx, existingFilter, filterToMerge, selectorCache); err != nil {
filterToMerge.detach(selectorCache)
return err
}
// To keep the rule origin tracking correct, merge the rule label arrays for each CachedSelector
// we know about. New CachedSelectors are added.
for cs, newLabels := range filterToMerge.RuleOrigin {
if existingLabels, ok := existingFilter.RuleOrigin[cs]; ok {
existingFilter.RuleOrigin[cs] = existingLabels.MergeSorted(newLabels)
} else {
existingFilter.RuleOrigin[cs] = newLabels
}
}
resMap.Upsert(p.Port, uint16(p.EndPort), string(proto), existingFilter)
return nil
}
// L4PolicyMap is a list of L4 filters indexable by port/endport/protocol
type L4PolicyMap interface {
Upsert(port string, endPort uint16, protocol string, l4 *L4Filter)
Delete(port string, endPort uint16, protocol string)
ExactLookup(port string, endPort uint16, protocol string) *L4Filter
LongestPrefixMatch(port string, protocol string) *L4Filter
Detach(selectorCache *SelectorCache)
IngressCoversContext(ctx *SearchContext) api.Decision
EgressCoversContext(ctx *SearchContext) api.Decision
ForEach(func(l4 *L4Filter) bool)
Equals(t *testing.T, bMap L4PolicyMap) bool
Diff(t *testing.T, expectedMap L4PolicyMap) string
Len() int
}
// NewL4PolicyMap creates an new L4PolicMap.
func NewL4PolicyMap() L4PolicyMap {
return &l4PolicyMap{
namedPortMap: make(map[string]*L4Filter),
rangePortMap: bitlpm.NewUintTrie[uint32, map[portProtoKey]*L4Filter](),
}
}
// NewL4PolicyMapWithValues creates an new L4PolicMap, with an initial
// set of values. The initMap argument does not support port ranges.
func NewL4PolicyMapWithValues(initMap map[string]*L4Filter) L4PolicyMap {
l4M := &l4PolicyMap{
namedPortMap: make(map[string]*L4Filter),
rangePortMap: bitlpm.NewUintTrie[uint32, map[portProtoKey]*L4Filter](),
}
for k, v := range initMap {
portProtoSlice := strings.Split(k, "/")
if len(portProtoSlice) < 2 {
continue
}
l4M.Upsert(portProtoSlice[0], 0, portProtoSlice[1], v)
}
return l4M
}
type portProtoKey struct {
port, endPort uint16
proto uint8
}
// l4PolicyMap is the implementation of L4PolicyMap
type l4PolicyMap struct {
// namedPortMap represents the named ports (a Kubernetes feature)
// that map to an L4Filter. They must be tracked at the selection
// level, because they can only be resolved at the endpoint/identity
// level. Named ports cannot have ranges.
namedPortMap map[string]*L4Filter
// rangePortMap has to keep a map of L4Filters rather than
// a single L4Filter reference so that the l4PolicyMap does
// not merge L4Filter that are not the same port range, but
// share an overlapping range in the trie.
rangePortMap *bitlpm.UintTrie[uint32, map[portProtoKey]*L4Filter]
// rangeMapLen counts the number of unique L4Filters in
// the rangePortMap. It must be tracked separately from
// rangePortMap as L4Filters are split up when
// the port range length is not a power of two.
rangeMapLen int
}
func parsePortProtocol(port, protocol string) (uint16, uint8) {
// These string values have been validated many times
// over at this point.
prt, _ := strconv.ParseUint(port, 10, 16)
proto, _ := u8proto.ParseProtocol(protocol)
return uint16(prt), uint8(proto)
}
// makePolicyMapKey creates a protocol-port uint32 with the
// upper 16 bits containing the protocol and the lower 16
// bits containing the port.
func makePolicyMapKey(port, mask uint16, proto uint8) uint32 {
return (uint32(proto) << 16) | uint32(port&mask)
}
// Upsert L4Filter adds an L4Filter indexed by protocol/port-endPort.
func (l4M *l4PolicyMap) Upsert(port string, endPort uint16, protocol string, l4 *L4Filter) {
if iana.IsSvcName(port) {
l4M.namedPortMap[port+"/"+protocol] = l4
return
}
portU, protoU := parsePortProtocol(port, protocol)
ppK := portProtoKey{
port: portU,
endPort: endPort,
proto: protoU,
}
var upsertHappened bool
for _, mp := range PortRangeToMaskedPorts(portU, endPort) {
k := makePolicyMapKey(mp.port, mp.mask, protoU)
prefix := 32 - uint(bits.TrailingZeros16(mp.mask))
portProtoMap, ok := l4M.rangePortMap.ExactLookup(prefix, k)
if !ok {
portProtoMap = make(map[portProtoKey]*L4Filter)
l4M.rangePortMap.Upsert(prefix, k, portProtoMap)
}
if !upsertHappened {
if _, ok := portProtoMap[ppK]; !ok {
l4M.rangeMapLen += 1
upsertHappened = true
}
}
portProtoMap[ppK] = l4
}
}
// Delete an L4Filter from the index by protocol/port-endPort
func (l4M *l4PolicyMap) Delete(port string, endPort uint16, protocol string) {
if iana.IsSvcName(port) {
delete(l4M.namedPortMap, port+"/"+protocol)
return
}
portU, protoU := parsePortProtocol(port, protocol)
ppK := portProtoKey{
port: portU,
endPort: endPort,
proto: protoU,
}
var deleteHappened bool
for _, mp := range PortRangeToMaskedPorts(portU, endPort) {
k := makePolicyMapKey(mp.port, mp.mask, protoU)
prefix := 32 - uint(bits.TrailingZeros16(mp.mask))
portProtoMap, ok := l4M.rangePortMap.ExactLookup(prefix, k)
if !ok {
return
}
if _, ok := portProtoMap[ppK]; ok {
delete(portProtoMap, ppK)
if !deleteHappened {
l4M.rangeMapLen -= 1
deleteHappened = true
}
}
if len(portProtoMap) == 0 {
l4M.rangePortMap.Delete(prefix, k)
}
}
}
// ExactLookup looks up an L4Filter by protocol/port-endPort and looks for an exact match.
func (l4M *l4PolicyMap) ExactLookup(port string, endPort uint16, protocol string) *L4Filter {
if iana.IsSvcName(port) {
return l4M.namedPortMap[port+"/"+protocol]
}
portU, protoU := parsePortProtocol(port, protocol)
ppK := portProtoKey{
port: portU,
endPort: endPort,
proto: protoU,
}
for _, mp := range PortRangeToMaskedPorts(portU, endPort) {
k := makePolicyMapKey(mp.port, mp.mask, protoU)
prefix := 32 - uint(bits.TrailingZeros16(mp.mask))
portProtoMap, ok := l4M.rangePortMap.ExactLookup(prefix, k)
if !ok {
return nil
}
if l4, ok := portProtoMap[ppK]; ok {
return l4
}
}
return nil
}
// LongestPrefixMatch looks up an L4Filter by protocol/port that contains the port and protocol
// by longest prefix match. If a named port is passed, then a simple lookup in the named port
// map is used, not a prefix match.
func (l4M *l4PolicyMap) LongestPrefixMatch(port string, protocol string) *L4Filter {
if iana.IsSvcName(port) {
return l4M.namedPortMap[port+"/"+protocol]
}
portU, protoU := parsePortProtocol(port, protocol)
portProtoMap, ok := l4M.rangePortMap.LongestPrefixMatch(makePolicyMapKey(portU, 0xffff, protoU))
if !ok {
return nil
}
var (
shortestPortRange uint16 = 0xffff
lastL4 *L4Filter
)
// Use the smallest port range
for k, v := range portProtoMap {
// single port match
if k.endPort <= k.port {
return v
}
if shortestPortRange > (k.endPort - k.port) {
lastL4 = v
shortestPortRange = (k.endPort - k.port)
}
}
return lastL4
}
// ForEach iterates over all L4Filters in the l4PolicyMap.
func (l4M *l4PolicyMap) ForEach(fn func(l4 *L4Filter) bool) {
for _, f := range l4M.namedPortMap {
fn(f)
}
l4PortProtoKeys := make(map[portProtoKey]struct{})
l4M.rangePortMap.ForEach(func(prefix uint, key uint32, portPortoMap map[portProtoKey]*L4Filter) bool {
for k, v := range portPortoMap {
// We check for redundant L4Filters, because we split them apart in the index.
if _, ok := l4PortProtoKeys[k]; !ok {
fn(v)
l4PortProtoKeys[k] = struct{}{}
}
}
return true
})
}
// Equals returns true if both L4PolicyMaps are equal.
func (l4M *l4PolicyMap) Equals(_ *testing.T, bMap L4PolicyMap) bool {
if l4M.Len() != bMap.Len() {
return false
}
equal := true
l4M.ForEach(func(l4 *L4Filter) bool {
port := l4.PortName
if len(port) == 0 {
port = fmt.Sprintf("%d", l4.Port)
}
l4B := bMap.ExactLookup(port, l4.EndPort, string(l4.Protocol))
equal = l4.Equals(nil, l4B)
return equal
})
return equal
}
// Diff returns the difference between to L4PolicyMaps.
func (l4M *l4PolicyMap) Diff(_ *testing.T, expected L4PolicyMap) (res string) {
res += "Missing (-), Unexpected (+):\n"
expected.ForEach(func(eV *L4Filter) bool {
port := eV.PortName
if len(port) == 0 {
port = fmt.Sprintf("%d", eV.Port)
}
oV := l4M.ExactLookup(port, eV.Port, string(eV.Protocol))
if oV != nil {
if !eV.Equals(nil, oV) {
res += "- " + eV.String() + "\n"
res += "+ " + oV.String() + "\n"
}
} else {
res += "- " + eV.String() + "\n"
}
return true
})
l4M.ForEach(func(oV *L4Filter) bool {
port := oV.PortName
if len(port) == 0 {
port = fmt.Sprintf("%d", oV.Port)
}
eV := expected.ExactLookup(port, oV.Port, string(oV.Protocol))
if eV == nil {
res += "+ " + oV.String() + "\n"
}
return true
})
return
}
// Len returns the number of entries in the map.
func (l4M *l4PolicyMap) Len() int {
if l4M == nil {
return 0
}
return len(l4M.namedPortMap) + l4M.rangeMapLen
}
type policyFeatures uint8
const (
denyRules policyFeatures = 1 << iota
authRules
allFeatures policyFeatures = ^policyFeatures(0)
)
func (pf *policyFeatures) setFeature(feature policyFeatures) {
*pf |= feature
}
func (pf policyFeatures) contains(feature policyFeatures) bool {
return pf&feature != 0
}
type L4DirectionPolicy struct {
PortRules L4PolicyMap
// features tracks properties of PortRules to skip code when features are not used
features policyFeatures
}
func newL4DirectionPolicy() L4DirectionPolicy {
return L4DirectionPolicy{
PortRules: NewL4PolicyMap(),
}
}
// Detach removes the cached selectors held by L4PolicyMap from the
// selectorCache, allowing the map to be garbage collected when there
// are no more references to it.
func (l4 L4DirectionPolicy) Detach(selectorCache *SelectorCache) {
l4.PortRules.Detach(selectorCache)
}
// detach is used directly from tracing and testing functions
func (l4M *l4PolicyMap) Detach(selectorCache *SelectorCache) {
l4M.ForEach(func(l4 *L4Filter) bool {
l4.detach(selectorCache)
return true
})
}
// Attach makes all the L4Filters to point back to the L4Policy that contains them.
// This is done before the L4PolicyMap is exposed to concurrent access.
// Returns the bitmask of all redirect types for this policymap.
func (l4 *L4DirectionPolicy) attach(ctx PolicyContext, l4Policy *L4Policy) redirectTypes {
var redirectTypes redirectTypes
var features policyFeatures
l4.PortRules.ForEach(func(f *L4Filter) bool {
features |= f.attach(ctx, l4Policy)
redirectTypes |= f.redirectType()
return true
})
l4.features = features
return redirectTypes
}
// containsAllL3L4 checks if the L4PolicyMap contains all L4 ports in `ports`.
// For L4Filters that specify ToEndpoints or FromEndpoints, uses `labels` to
// determine whether the policy allows L4 communication between the corresponding
// endpoints.
// Returns api.Denied in the following conditions:
// - If a single port is not present in the `L4PolicyMap` and is not allowed
// by the distilled L3 policy
// - If a port is present in the `L4PolicyMap`, but it applies ToEndpoints or
// FromEndpoints constraints that require labels not present in `labels`.
//
// Otherwise, returns api.Allowed.
//
// Note: Only used for policy tracing
func (l4M *l4PolicyMap) containsAllL3L4(labels labels.LabelArray, ports []*models.Port) api.Decision {
if l4M.Len() == 0 {
return api.Allowed
}
// Check L3-only filters first.
filter := l4M.ExactLookup("0", 0, "ANY")
if filter != nil {
matches, isDeny := filter.matchesLabels(labels)
switch {
case matches && isDeny:
return api.Denied
case matches:
return api.Allowed
}
}
for _, l4Ctx := range ports {
portStr := l4Ctx.Name
if !iana.IsSvcName(portStr) {
portStr = strconv.FormatUint(uint64(l4Ctx.Port), 10)
}
lwrProtocol := l4Ctx.Protocol
var isUDPDeny, isTCPDeny, isSCTPDeny bool
switch lwrProtocol {
case "", models.PortProtocolANY:
tcpFilter := l4M.LongestPrefixMatch(portStr, "TCP")
tcpmatch := tcpFilter != nil
if tcpmatch {
tcpmatch, isTCPDeny = tcpFilter.matchesLabels(labels)
}
udpFilter := l4M.LongestPrefixMatch(portStr, "UDP")
udpmatch := udpFilter != nil
if udpmatch {
udpmatch, isUDPDeny = udpFilter.matchesLabels(labels)
}
sctpFilter := l4M.LongestPrefixMatch(portStr, "SCTP")
sctpmatch := sctpFilter != nil
if sctpmatch {
sctpmatch, isSCTPDeny = sctpFilter.matchesLabels(labels)
}
if (!tcpmatch && !udpmatch && !sctpmatch) || (isTCPDeny && isUDPDeny && isSCTPDeny) {
return api.Denied
}
default:
filter := l4M.LongestPrefixMatch(portStr, lwrProtocol)
if filter == nil {
return api.Denied
}
matches, isDeny := filter.matchesLabels(labels)
if !matches || isDeny {
return api.Denied
}
}
}
return api.Allowed
}
type L4Policy struct {
Ingress L4DirectionPolicy
Egress L4DirectionPolicy
AuthMap AuthMap
// Revision is the repository revision used to generate this policy.
Revision uint64
// redirectTypes is a bitmap containing the types of redirect contained by this policy. It
// is computed after the policy maps to avoid scanning them repeatedly when using the
// L4Policy
redirectTypes redirectTypes
// Endpoint policies using this L4Policy
// These are circular references, cleaned up in Detach()
// This mutex is taken while Endpoint mutex is held, so Endpoint lock
// MUST always be taken before this mutex.
mutex lock.RWMutex
users map[*EndpointPolicy]struct{}
}
// NewL4Policy creates a new L4Policy
func NewL4Policy(revision uint64) L4Policy {
return L4Policy{
Ingress: newL4DirectionPolicy(),
Egress: newL4DirectionPolicy(),
Revision: revision,
users: make(map[*EndpointPolicy]struct{}),
}
}
// insertUser adds a user to the L4Policy so that incremental
// updates of the L4Policy may be forwarded to the users of it.
func (l4 *L4Policy) insertUser(user *EndpointPolicy) {
l4.mutex.Lock()
// 'users' is set to nil when the policy is detached. This
// happens to the old policy when it is being replaced with a
// new one, or when the last endpoint using this policy is
// removed.
// In the case of an policy update it is possible that an
// endpoint has started regeneration before the policy was
// updated, and that the policy was updated before the said
// endpoint reached this point. In this case the endpoint's
// policy is going to be recomputed soon after and we do
// nothing here.
if l4.users != nil {
l4.users[user] = struct{}{}
}
l4.mutex.Unlock()
}
// removeUser removes a user that no longer needs incremental updates
// from the L4Policy.
func (l4 *L4Policy) removeUser(user *EndpointPolicy) {
// 'users' is set to nil when the policy is detached. This
// happens to the old policy when it is being replaced with a
// new one, or when the last endpoint using this policy is
// removed.
l4.mutex.Lock()
if l4.users != nil {
delete(l4.users, user)
}
l4.mutex.Unlock()
}
// AccumulateMapChanges distributes the given changes to the registered users.
//
// The caller is responsible for making sure the same identity is not
// present in both 'adds' and 'deletes'.
func (l4Policy *L4Policy) AccumulateMapChanges(l4 *L4Filter, cs CachedSelector, adds, deletes []identity.NumericIdentity) {
port := uint16(l4.Port)
proto := uint8(l4.U8Proto)
derivedFrom := l4.RuleOrigin[cs]
direction := trafficdirection.Egress
if l4.Ingress {
direction = trafficdirection.Ingress
}
perSelectorPolicy := l4.PerSelectorPolicies[cs]
redirect := perSelectorPolicy.IsRedirect()
listener := perSelectorPolicy.GetListener()
priority := perSelectorPolicy.GetPriority()
hasAuth, authType := perSelectorPolicy.GetAuthType()
isDeny := perSelectorPolicy != nil && perSelectorPolicy.IsDeny
// Must take a copy of 'users' as GetNamedPort() will lock the Endpoint below and
// the Endpoint lock may not be taken while 'l4.mutex' is held.
l4Policy.mutex.RLock()
users := make(map[*EndpointPolicy]struct{}, len(l4Policy.users))
for user := range l4Policy.users {
users[user] = struct{}{}
}
l4Policy.mutex.RUnlock()
for epPolicy := range users {
// Skip if endpoint has no policy maps
if !epPolicy.PolicyOwner.HasBPFPolicyMap() {
continue
}
// resolve named port
if port == 0 && l4.PortName != "" {
port = epPolicy.PolicyOwner.GetNamedPort(l4.Ingress, l4.PortName, proto)
if port == 0 {
continue
}
}
var proxyPort uint16
if redirect {
var err error
proxyPort, err = epPolicy.PolicyOwner.LookupRedirectPort(l4.Ingress, string(l4.Protocol), port, listener)
if err != nil {
// This happens for new redirects that have not been realized
// yet. The accumulated changes should only be consumed after new
// redirects have been realized. ConsumeMapChanges then maps this
// invalid valut to the real redirect port before the entry is
// visible to the endpoint package.
proxyPort = unrealizedRedirectPort
}
}
var keysToAdd []Key
for _, mp := range PortRangeToMaskedPorts(port, l4.EndPort) {
keysToAdd = append(keysToAdd, Key{
DestPort: mp.port, // NOTE: Port is in host byte-order!
InvertedPortMask: ^mp.mask,
Nexthdr: proto,
TrafficDirection: direction.Uint8(),
})
}
value := NewMapStateEntry(cs, derivedFrom, proxyPort, listener, priority, isDeny, hasAuth, authType)
if option.Config.Debug {
authString := "default"
if hasAuth {
authString = authType.String()
}
log.WithFields(logrus.Fields{
logfields.EndpointSelector: cs,
logfields.AddedPolicyID: adds,
logfields.DeletedPolicyID: deletes,
logfields.Port: port,
logfields.Protocol: proto,
logfields.TrafficDirection: direction,
logfields.IsRedirect: redirect,
logfields.AuthType: authString,
logfields.Listener: listener,
logfields.ListenerPriority: priority,
}).Debug("AccumulateMapChanges")
}
epPolicy.policyMapChanges.AccumulateMapChanges(cs, adds, deletes, keysToAdd, value)
}
}
// Detach makes the L4Policy ready for garbage collection, removing
// circular pointer references.
// Note that the L4Policy itself is not modified in any way, so that it may still
// be used concurrently.
func (l4 *L4Policy) Detach(selectorCache *SelectorCache) {
l4.Ingress.Detach(selectorCache)
l4.Egress.Detach(selectorCache)
l4.mutex.Lock()
l4.users = nil
l4.mutex.Unlock()
}
// Attach makes all the L4Filters to point back to the L4Policy that contains them.
// This is done before the L4Policy is exposed to concurrent access.
func (l4 *L4Policy) Attach(ctx PolicyContext) {
ingressRedirects := l4.Ingress.attach(ctx, l4)
egressRedirects := l4.Egress.attach(ctx, l4)
l4.redirectTypes = ingressRedirects | egressRedirects
}
// IngressCoversContext checks if the receiver's ingress L4Policy contains
// all `dPorts` and `labels`.
//
// Note: Only used for policy tracing
func (l4M *l4PolicyMap) IngressCoversContext(ctx *SearchContext) api.Decision {
return l4M.containsAllL3L4(ctx.From, ctx.DPorts)
}
// EgressCoversContext checks if the receiver's egress L4Policy contains
// all `dPorts` and `labels`.
//
// Note: Only used for policy tracing
func (l4M *l4PolicyMap) EgressCoversContext(ctx *SearchContext) api.Decision {
return l4M.containsAllL3L4(ctx.To, ctx.DPorts)
}
// HasRedirect returns true if the L4 policy contains at least one port redirection
func (l4 *L4Policy) HasRedirect() bool {
return l4 != nil && l4.redirectTypes != redirectTypeNone
}
// HasEnvoyRedirect returns true if the L4 policy contains at least one port redirection to Envoy
func (l4 *L4Policy) HasEnvoyRedirect() bool {
return l4 != nil && l4.redirectTypes&redirectTypeEnvoy == redirectTypeEnvoy
}
// HasProxylibRedirect returns true if the L4 policy contains at least one port redirection to Proxylib
func (l4 *L4Policy) HasProxylibRedirect() bool {
return l4 != nil && l4.redirectTypes&redirectTypeProxylib == redirectTypeProxylib
}
func (l4 *L4Policy) GetModel() *models.L4Policy {
if l4 == nil {
return nil
}
ingress := []*models.PolicyRule{}
l4.Ingress.PortRules.ForEach(func(v *L4Filter) bool {
rulesBySelector := map[string][][]string{}
derivedFrom := labels.LabelArrayList{}
for sel, rules := range v.RuleOrigin {
derivedFrom.MergeSorted(rules)
rulesBySelector[sel.String()] = rules.GetModel()
}
ingress = append(ingress, &models.PolicyRule{
Rule: v.Marshal(),
DerivedFromRules: derivedFrom.GetModel(),
RulesBySelector: rulesBySelector,
})
return true
})
egress := []*models.PolicyRule{}
l4.Egress.PortRules.ForEach(func(v *L4Filter) bool {
derivedFrom := labels.LabelArrayList{}
for _, rules := range v.RuleOrigin {
derivedFrom.MergeSorted(rules)
}
egress = append(egress, &models.PolicyRule{
Rule: v.Marshal(),
DerivedFromRules: derivedFrom.GetModel(),
})
return true
})
return &models.L4Policy{
Ingress: ingress,
Egress: egress,
}
}
// ProxyPolicy is any type which encodes state needed to redirect to an L7
// proxy.
type ProxyPolicy interface {
CopyL7RulesPerEndpoint() L7DataMap
GetL7Parser() L7ParserType
GetIngress() bool
GetPort() uint16
GetProtocol() uint8
GetListener() string
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"bytes"
"fmt"
stdlog "log"
"sync"
"testing"
cilium "github.com/cilium/proxy/go/cilium/api"
"github.com/cilium/proxy/pkg/policy/api/kafka"
"github.com/stretchr/testify/require"
"github.com/cilium/cilium/pkg/defaults"
"github.com/cilium/cilium/pkg/fqdn/re"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/api"
)
var (
hostSelector = api.ReservedEndpointSelectors[labels.IDNameHost]
toFoo = &SearchContext{To: labels.ParseSelectLabelArray("foo")}
dummySelectorCacheUser = &DummySelectorCacheUser{}
fooSelector = api.NewESFromLabels(labels.ParseSelectLabel("foo"))
bazSelector = api.NewESFromLabels(labels.ParseSelectLabel("baz"))
selFoo = api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
selBar1 = api.NewESFromLabels(labels.ParseSelectLabel("id=bar1"))
selBar2 = api.NewESFromLabels(labels.ParseSelectLabel("id=bar2"))
)
type testData struct {
sc *SelectorCache
repo *Repository
testPolicyContext *testPolicyContextType
cachedSelectorA CachedSelector
cachedSelectorC CachedSelector
cachedSelectorHost CachedSelector
wildcardCachedSelector CachedSelector
cachedFooSelector CachedSelector
cachedBazSelector CachedSelector
cachedSelectorBar1 CachedSelector
cachedSelectorBar2 CachedSelector
}
func newTestData() *testData {
td := &testData{
sc: testNewSelectorCache(nil),
repo: NewPolicyRepository(nil, nil, nil),
testPolicyContext: &testPolicyContextType{},
}
td.testPolicyContext.sc = td.sc
td.repo.selectorCache = td.sc
td.wildcardCachedSelector, _ = td.sc.AddIdentitySelector(dummySelectorCacheUser, nil, api.WildcardEndpointSelector)
td.cachedSelectorA, _ = td.sc.AddIdentitySelector(dummySelectorCacheUser, nil, endpointSelectorA)
td.cachedSelectorC, _ = td.sc.AddIdentitySelector(dummySelectorCacheUser, nil, endpointSelectorC)
td.cachedSelectorHost, _ = td.sc.AddIdentitySelector(dummySelectorCacheUser, nil, hostSelector)
td.cachedFooSelector, _ = td.sc.AddIdentitySelector(dummySelectorCacheUser, nil, fooSelector)
td.cachedBazSelector, _ = td.sc.AddIdentitySelector(dummySelectorCacheUser, nil, bazSelector)
td.cachedSelectorBar1, _ = td.sc.AddIdentitySelector(dummySelectorCacheUser, nil, selBar1)
td.cachedSelectorBar2, _ = td.sc.AddIdentitySelector(dummySelectorCacheUser, nil, selBar2)
return td
}
// resetRepo clears only the policy repository.
// Some tests rely on the accumulated state, but a clean repo.
func (td *testData) resetRepo() *Repository {
td.repo = NewPolicyRepository(nil, nil, nil)
td.repo.selectorCache = td.sc
return td.repo
}
func (td *testData) addIdentity(id *identity.Identity) {
wg := &sync.WaitGroup{}
td.sc.UpdateIdentities(
identity.IdentityMap{
id.ID: id.LabelArray,
}, nil, wg)
wg.Wait()
}
// testPolicyContexttype is a dummy context used when evaluating rules.
type testPolicyContextType struct {
isDeny bool
ns string
sc *SelectorCache
}
func (p *testPolicyContextType) GetNamespace() string {
return p.ns
}
func (p *testPolicyContextType) GetSelectorCache() *SelectorCache {
return p.sc
}
func (p *testPolicyContextType) GetTLSContext(tls *api.TLSContext) (ca, public, private string, err error) {
switch tls.Secret.Name {
case "tls-cert":
return "", "fake public cert", "fake private key", nil
case "tls-ca-certs":
return "fake CA certs", "", "", nil
}
return "", "", "", fmt.Errorf("Unknown test secret '%s'", tls.Secret.Name)
}
func (p *testPolicyContextType) GetEnvoyHTTPRules(*api.L7Rules) (*cilium.HttpNetworkPolicyRules, bool) {
return nil, true
}
func (p *testPolicyContextType) SetDeny(isDeny bool) bool {
oldDeny := p.isDeny
p.isDeny = isDeny
return oldDeny
}
func (p *testPolicyContextType) IsDeny() bool {
return p.isDeny
}
func init() {
re.InitRegexCompileLRU(defaults.FQDNRegexCompileLRUSize)
}
// Tests in this file:
//
// How to read this table:
// Case: The test / subtest number.
// L3: Matches at L3 for rule 1, followed by rule 2.
// L4: Matches at L4.
// L7: Rules at L7 for rule 1, followed by rule 2.
// Notes: Extra information about the test.
//
// +-----+-----------------+----------+-----------------+------------------------------------------------------+
// |Case | L3 (1, 2) match | L4 match | L7 match (1, 2) | Notes |
// +=====+=================+==========+=================+======================================================+
// | 1A | *, * | 80/TCP | *, * | Allow all communication on the specified port |
// | 1B | -, - | 80/TCP | *, * | Deny all with an empty FromEndpoints slice |
// | 2A | *, * | 80/TCP | *, "GET /" | Rule 1 shadows rule 2 |
// | 2B | *, * | 80/TCP | "GET /", * | Same as 2A, but import in reverse order |
// | 3 | *, * | 80/TCP | "GET /","GET /" | Exactly duplicate rules (HTTP) |
// | 4 | *, * | 9092/TCP | "foo","foo" | Exactly duplicate rules (Kafka) |
// | 5A | *, * | 80/TCP | "foo","GET /" | Rules with conflicting L7 parser |
// | 5B | *, * | 80/TCP | "GET /","foo" | Same as 5A, but import in reverse order |
// | 6A | "id=a", * | 80/TCP | *, * | Rule 2 is a superset of rule 1 |
// | 6B | *, "id=a" | 80/TCP | *, * | Same as 6A, but import in reverse order |
// | 7A | "id=a", * | 80/TCP | "GET /", * | All traffic is allowed; traffic to A goes via proxy |
// | 7B | *, "id=a" | 80/TCP | *, "GET /" | Same as 7A, but import in reverse order |
// | 8A | "id=a", * | 80/TCP | "GET /","GET /" | Rule 2 is the same as rule 1, except matching all L3 |
// | 8B | *, "id=a" | 80/TCP | "GET /","GET /" | Same as 8A, but import in reverse order |
// | 9A | "id=a", * | 80/TCP | "foo","GET /" | Rules with conflicting L7 parser (+L3 match) |
// | 9B | *, "id=a" | 80/TCP | "GET /","foo" | Same as 9A, but import in reverse order |
// | 10 | "id=a", "id=c" | 80/TCP | "GET /","GET /" | Allow at L7 for two distinct labels (disjoint set) |
// | 11 | "id=a", "id=c" | 80/TCP | *, * | Allow at L4 for two distinct labels (disjoint set) |
// | 12 | "id=a", | 80/TCP | "GET /" | Configure to allow localhost traffic always |
// | 13 | -, - | 80/TCP | *, * | Deny all with an empty ToEndpoints slice |
// +-----+-----------------+----------+-----------------+------------------------------------------------------+
func TestMergeAllowAllL3AndAllowAllL7(t *testing.T) {
td := newTestData()
// Case 1A: Specify WildcardEndpointSelector explicitly.
td.repo.MustAddList(api.Rules{&api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}})
buffer := new(bytes.Buffer)
ctx := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctx.Logging = stdlog.New(buffer, "", 0)
l4IngressPolicy, err := td.repo.ResolveL4IngressPolicy(&ctx)
require.NoError(t, err)
t.Log(buffer)
filter := l4IngressPolicy.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.True(t, filter.Ingress)
require.True(t, filter.SelectsAllEndpoints())
require.Equal(t, ParserTypeNone, filter.L7Parser)
require.Equal(t, 1, len(filter.PerSelectorPolicies))
l4IngressPolicy.Detach(td.repo.GetSelectorCache())
// Case1B: an empty non-nil FromEndpoints does not select any identity.
td = newTestData()
td.repo.MustAddList(api.Rules{&api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}})
buffer = new(bytes.Buffer)
ctx = SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctx.Logging = stdlog.New(buffer, "", 0)
l4IngressPolicy, err = td.repo.ResolveL4IngressPolicy(&ctx)
require.NoError(t, err)
t.Log(buffer)
filter = l4IngressPolicy.ExactLookup("80", 0, "TCP")
require.Nil(t, filter)
l4IngressPolicy.Detach(td.repo.GetSelectorCache())
}
// Case 2: allow all at L3 in both rules. Allow all in one L7 rule, but second
// rule restricts at L7. Because one L7 rule allows at L7, all traffic is allowed
// at L7, but still redirected at the proxy.
// Should resolve to one rule.
func TestMergeAllowAllL3AndShadowedL7(t *testing.T) {
td := newTestData()
rule1 := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}}
buffer := new(bytes.Buffer)
ctx := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctx.Logging = stdlog.New(buffer, "", 0)
ingressState := traceState{}
res, err := rule1.resolveIngressPolicy(td.testPolicyContext, &ctx, &ingressState, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
t.Log(buffer)
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: "http",
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}, {}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}},
}})
require.EqualValues(t, expected, res)
require.Equal(t, 1, ingressState.selectedRules)
require.Equal(t, 1, ingressState.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
// Case 2B: Flip order of case 2A so that rule being merged with is different
// than rule being consumed.
td = newTestData()
td.repo.MustAddList(api.Rules{&api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}})
buffer = new(bytes.Buffer)
ctx = SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctx.Logging = stdlog.New(buffer, "", 0)
l4IngressPolicy, err := td.repo.ResolveL4IngressPolicy(&ctx)
require.NoError(t, err)
t.Log(buffer)
filter := l4IngressPolicy.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.True(t, filter.Ingress)
require.True(t, filter.SelectsAllEndpoints())
require.Equal(t, ParserTypeHTTP, filter.L7Parser)
require.Equal(t, 1, len(filter.PerSelectorPolicies))
l4IngressPolicy.Detach(td.repo.GetSelectorCache())
}
// Case 3: allow all at L3 in both rules. Both rules have same parser type and
// same API resource specified at L7 for HTTP.
func TestMergeIdenticalAllowAllL3AndRestrictedL7HTTP(t *testing.T) {
td := newTestData()
identicalHTTPRule := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}},
}})
buffer := new(bytes.Buffer)
ctxToA := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
state := traceState{}
res, err := identicalHTTPRule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.EqualValues(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
state = traceState{}
res, err = identicalHTTPRule.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
}
// Case 4: identical allow all at L3 with identical restrictions on Kafka.
func TestMergeIdenticalAllowAllL3AndRestrictedL7Kafka(t *testing.T) {
td := newTestData()
identicalKafkaRule := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: api.EndpointSelectorSlice{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9092", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: api.EndpointSelectorSlice{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9092", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
},
}}
buffer := new(bytes.Buffer)
ctxToA := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"9092/TCP": {
Port: 9092,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeKafka,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
Kafka: []kafka.PortRule{{Topic: "foo"}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}},
}})
state := traceState{}
res, err := identicalKafkaRule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.EqualValues(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
state = traceState{}
res, err = identicalKafkaRule.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
}
// Case 5: use conflicting protocols on the same port in different rules. This
// is not supported, so return an error.
func TestMergeIdenticalAllowAllL3AndMismatchingParsers(t *testing.T) {
td := newTestData()
// Case 5A: Kafka first, HTTP second.
conflictingParsersRule := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: api.EndpointSelectorSlice{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}}
buffer := new(bytes.Buffer)
ctxToA := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
state := traceState{}
res, err := conflictingParsersRule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.NotNil(t, err)
require.Nil(t, res)
// Case 5B: HTTP first, Kafka second.
conflictingParsersRule = &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: api.EndpointSelectorSlice{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
},
}}
buffer = new(bytes.Buffer)
ctxToA = SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
state = traceState{}
res, err = conflictingParsersRule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.NotNil(t, err)
require.Nil(t, res)
// Case 5B+: HTTP first, generic L7 second.
conflictingParsersIngressRule := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: api.EndpointSelectorSlice{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
L7Proto: "testing",
L7: []api.PortRuleL7{
{"method": "PUT", "path": "/Foo"},
},
},
}},
},
},
}}
buffer = new(bytes.Buffer)
ctxToA = SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
err = conflictingParsersIngressRule.Sanitize()
require.NoError(t, err)
state = traceState{}
res, err = conflictingParsersIngressRule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.NotNil(t, err)
require.Nil(t, res)
// Case 5B++: generic L7 without rules first, HTTP second.
conflictingParsersEgressRule := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
L7Proto: "testing",
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}}
buffer = new(bytes.Buffer)
ctxAToC := SearchContext{From: labelsA, To: labelsC, Trace: TRACE_VERBOSE}
ctxAToC.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
err = conflictingParsersEgressRule.Sanitize()
require.NoError(t, err)
state = traceState{}
res, err = conflictingParsersEgressRule.resolveEgressPolicy(td.testPolicyContext, &ctxAToC, &state, NewL4PolicyMap(), nil, nil)
t.Log(buffer)
require.NotNil(t, err)
require.Nil(t, res)
}
// TLS policies with and without interception
// TLS policy without L7 rules does not inspect L7, uses L7ParserType "tls"
func TestMergeTLSTCPPolicy(t *testing.T) {
td := newTestData()
egressRule := &rule{
Rule: api.Rule{
EndpointSelector: fooSelector,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
TerminatingTLS: &api.TLSContext{
Secret: &api.Secret{
Name: "tls-cert",
},
},
OriginatingTLS: &api.TLSContext{
Secret: &api.Secret{
Name: "tls-ca-certs",
},
},
}},
},
},
}}
buffer := new(bytes.Buffer)
ctxFromFoo := SearchContext{From: labels.ParseSelectLabelArray("foo"), Trace: TRACE_VERBOSE}
ctxFromFoo.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
err := egressRule.Sanitize()
require.NoError(t, err)
state := traceState{}
res, err := egressRule.resolveEgressPolicy(td.testPolicyContext, &ctxFromFoo, &state, NewL4PolicyMap(), nil, nil)
t.Log(buffer)
require.NoError(t, err)
require.NotNil(t, res)
// Since cachedSelectorA's map entry is 'nil', it will not be redirected to the proxy.
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"443/TCP": {
Port: 443,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: nil,
L7Parser: ParserTypeTLS,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorA: nil, // no proxy redirect
td.cachedSelectorC: &PerSelectorPolicy{
TerminatingTLS: &TLSContext{
CertificateChain: "fake public cert",
PrivateKey: "fake private key",
},
OriginatingTLS: &TLSContext{
TrustedCA: "fake CA certs",
},
EnvoyHTTPRules: nil,
CanShortCircuit: false,
L7Rules: api.L7Rules{},
isRedirect: true,
},
},
Ingress: false,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.cachedSelectorC: {nil},
},
}})
require.EqualValues(t, expected, res)
l4Filter := res.ExactLookup("443", 0, "TCP")
require.NotNil(t, l4Filter)
require.Equal(t, ParserTypeTLS, l4Filter.L7Parser)
log.Infof("res: %v", res)
}
func TestMergeTLSHTTPPolicy(t *testing.T) {
td := newTestData()
egressRule := &rule{
Rule: api.Rule{
EndpointSelector: fooSelector,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
TerminatingTLS: &api.TLSContext{
Secret: &api.Secret{
Name: "tls-cert",
},
},
OriginatingTLS: &api.TLSContext{
Secret: &api.Secret{
Name: "tls-ca-certs",
},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{{}},
},
}},
},
},
}}
buffer := new(bytes.Buffer)
ctxFromFoo := SearchContext{From: labels.ParseSelectLabelArray("foo"), Trace: TRACE_VERBOSE}
ctxFromFoo.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
err := egressRule.Sanitize()
require.NoError(t, err)
state := traceState{}
res, err := egressRule.resolveEgressPolicy(td.testPolicyContext, &ctxFromFoo, &state, NewL4PolicyMap(), nil, nil)
t.Log(buffer)
require.NoError(t, err)
require.NotNil(t, res)
// Since cachedSelectorA's map entry is 'nil', it will not be redirected to the proxy.
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"443/TCP": {
Port: 443,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: nil,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorA: nil, // no proxy redirect
td.cachedSelectorC: &PerSelectorPolicy{
TerminatingTLS: &TLSContext{
CertificateChain: "fake public cert",
PrivateKey: "fake private key",
},
OriginatingTLS: &TLSContext{
TrustedCA: "fake CA certs",
},
EnvoyHTTPRules: nil,
CanShortCircuit: false,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{}},
},
isRedirect: true,
},
},
Ingress: false,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.cachedSelectorC: {nil},
},
}})
require.True(t, res.Equals(t, expected), res.Diff(t, expected))
l4Filter := res.ExactLookup("443", 0, "TCP")
require.NotNil(t, l4Filter)
require.Equal(t, ParserTypeHTTP, l4Filter.L7Parser)
log.Infof("res: %v", res)
}
func TestMergeTLSSNIPolicy(t *testing.T) {
td := newTestData()
egressRule := &rule{
Rule: api.Rule{
EndpointSelector: fooSelector,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
TerminatingTLS: &api.TLSContext{
Secret: &api.Secret{
Name: "tls-cert",
},
},
OriginatingTLS: &api.TLSContext{
Secret: &api.Secret{
Name: "tls-ca-certs",
},
},
ServerNames: []string{"www.foo.com"},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
ServerNames: []string{"www.bar.com"},
}, {
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{{}},
},
}},
},
},
}}
buffer := new(bytes.Buffer)
ctxFromFoo := SearchContext{From: labels.ParseSelectLabelArray("foo"), Trace: TRACE_VERBOSE}
ctxFromFoo.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
err := egressRule.Sanitize()
require.NoError(t, err)
state := traceState{}
res, err := egressRule.resolveEgressPolicy(td.testPolicyContext, &ctxFromFoo, &state, NewL4PolicyMap(), nil, nil)
t.Log(buffer)
require.NoError(t, err)
require.NotNil(t, res)
// Since cachedSelectorA's map entry is 'nil', it will not be redirected to the proxy.
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"443/TCP": {
Port: 443,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: nil,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorA: nil, // no proxy redirect
td.cachedSelectorC: &PerSelectorPolicy{
TerminatingTLS: &TLSContext{
CertificateChain: "fake public cert",
PrivateKey: "fake private key",
},
OriginatingTLS: &TLSContext{
TrustedCA: "fake CA certs",
},
ServerNames: StringSet{"www.foo.com": {}, "www.bar.com": {}},
EnvoyHTTPRules: nil,
CanShortCircuit: false,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{}},
},
isRedirect: true,
},
},
Ingress: false,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.cachedSelectorC: {nil},
},
}})
require.EqualValues(t, expected, res)
require.True(t, res.Equals(t, expected), res.Diff(t, expected))
l4Filter := res.ExactLookup("443", 0, "TCP")
require.NotNil(t, l4Filter)
require.Equal(t, ParserTypeHTTP, l4Filter.L7Parser)
log.Infof("res: %v", res)
}
func TestMergeListenerPolicy(t *testing.T) {
td := newTestData()
//
// no namespace in policyContext (Clusterwide policy): Can not refer to EnvoyConfig
//
egressRule := &rule{
Rule: api.Rule{
EndpointSelector: fooSelector,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
Listener: &api.Listener{
EnvoyConfig: &api.EnvoyConfig{
Kind: "CiliumEnvoyConfig",
Name: "test-cec",
},
Name: "test",
},
}},
},
},
}}
buffer := new(bytes.Buffer)
ctxFromFoo := SearchContext{From: labels.ParseSelectLabelArray("foo"), Trace: TRACE_VERBOSE}
ctxFromFoo.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
err := egressRule.Sanitize()
require.NoError(t, err)
state := traceState{}
res, err := egressRule.resolveEgressPolicy(td.testPolicyContext, &ctxFromFoo, &state, NewL4PolicyMap(), nil, nil)
t.Log(buffer)
require.ErrorContains(t, err, "Listener \"test\" in CCNP can not use Kind CiliumEnvoyConfig")
require.Nil(t, res)
//
// no namespace in policyContext (Clusterwide policy): Must to ClusterwideEnvoyConfig
//
egressRule = &rule{
Rule: api.Rule{
EndpointSelector: fooSelector,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
Listener: &api.Listener{
EnvoyConfig: &api.EnvoyConfig{
Kind: "CiliumClusterwideEnvoyConfig",
Name: "shared-cec",
},
Name: "test",
},
}},
},
},
}}
buffer = new(bytes.Buffer)
ctxFromFoo = SearchContext{From: labels.ParseSelectLabelArray("foo"), Trace: TRACE_VERBOSE}
ctxFromFoo.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
err = egressRule.Sanitize()
require.NoError(t, err)
state = traceState{}
res, err = egressRule.resolveEgressPolicy(td.testPolicyContext, &ctxFromFoo, &state, NewL4PolicyMap(), nil, nil)
t.Log(buffer)
require.NoError(t, err)
require.NotNil(t, res)
// Since cachedSelectorA's map entry is 'nil', it will not be redirected to the proxy.
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"443/TCP": {
Port: 443,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: nil,
L7Parser: ParserTypeCRD,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorA: nil, // no proxy redirect
td.cachedSelectorC: &PerSelectorPolicy{
EnvoyHTTPRules: nil,
CanShortCircuit: false,
isRedirect: true,
Listener: "/shared-cec/test",
},
},
Ingress: false,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.cachedSelectorC: {nil},
},
}})
require.EqualValues(t, expected, res)
l4Filter := res.ExactLookup("443", 0, "TCP")
require.NotNil(t, l4Filter)
require.Equal(t, ParserTypeCRD, l4Filter.L7Parser)
log.Infof("res: %v", res)
//
// namespace in policyContext (Namespaced policy): Can refer to EnvoyConfig
//
egressRule = &rule{
Rule: api.Rule{
EndpointSelector: fooSelector,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
Listener: &api.Listener{
EnvoyConfig: &api.EnvoyConfig{
Kind: "CiliumEnvoyConfig",
Name: "test-cec",
},
Name: "test",
},
}},
},
},
}}
buffer = new(bytes.Buffer)
ctxFromFoo = SearchContext{From: labels.ParseSelectLabelArray("foo"), Trace: TRACE_VERBOSE}
ctxFromFoo.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
err = egressRule.Sanitize()
require.NoError(t, err)
state = traceState{}
td.testPolicyContext.ns = "default"
res, err = egressRule.resolveEgressPolicy(td.testPolicyContext, &ctxFromFoo, &state, NewL4PolicyMap(), nil, nil)
t.Log(buffer)
require.NoError(t, err)
require.NotNil(t, res)
// Since cachedSelectorA's map entry is 'nil', it will not be redirected to the proxy.
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"443/TCP": {
Port: 443,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: nil,
L7Parser: ParserTypeCRD,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorA: nil, // no proxy redirect
td.cachedSelectorC: &PerSelectorPolicy{
EnvoyHTTPRules: nil,
CanShortCircuit: false,
isRedirect: true,
Listener: "default/test-cec/test",
},
},
Ingress: false,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.cachedSelectorC: {nil},
},
}})
require.EqualValues(t, expected, res)
l4Filter = res.ExactLookup("443", 0, "TCP")
require.NotNil(t, l4Filter)
require.Equal(t, ParserTypeCRD, l4Filter.L7Parser)
log.Infof("res: %v", res)
//
// namespace in policyContext (Namespaced policy): Can refer to Cluster-socoped
// CiliumClusterwideEnvoyConfig
//
egressRule = &rule{
Rule: api.Rule{
EndpointSelector: fooSelector,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
Listener: &api.Listener{
EnvoyConfig: &api.EnvoyConfig{
Kind: "CiliumClusterwideEnvoyConfig",
Name: "shared-cec",
},
Name: "test",
},
}},
},
},
}}
buffer = new(bytes.Buffer)
ctxFromFoo = SearchContext{From: labels.ParseSelectLabelArray("foo"), Trace: TRACE_VERBOSE}
ctxFromFoo.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
err = egressRule.Sanitize()
require.NoError(t, err)
state = traceState{}
td.testPolicyContext.ns = "default"
res, err = egressRule.resolveEgressPolicy(td.testPolicyContext, &ctxFromFoo, &state, NewL4PolicyMap(), nil, nil)
t.Log(buffer)
require.NoError(t, err)
require.NotNil(t, res)
// Since cachedSelectorA's map entry is 'nil', it will not be redirected to the proxy.
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"443/TCP": {
Port: 443,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: nil,
L7Parser: ParserTypeCRD,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorA: nil, // no proxy redirect
td.cachedSelectorC: &PerSelectorPolicy{
EnvoyHTTPRules: nil,
CanShortCircuit: false,
isRedirect: true,
Listener: "/shared-cec/test",
},
},
Ingress: false,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.cachedSelectorC: {nil},
},
}})
require.EqualValues(t, expected, res)
l4Filter = res.ExactLookup("443", 0, "TCP")
require.NotNil(t, l4Filter)
require.Equal(t, ParserTypeCRD, l4Filter.L7Parser)
log.Infof("res: %v", res)
}
// Case 6: allow all at L3/L7 in one rule, and select an endpoint and allow all on L7
// in another rule. Should resolve to just allowing all on L3/L7 (first rule
// shadows the second).
func TestL3RuleShadowedByL3AllowAll(t *testing.T) {
td := newTestData()
// Case 6A: Specify WildcardEndpointSelector explicitly.
shadowRule := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}}
buffer := new(bytes.Buffer)
ctxToA := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeNone,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorA: nil,
td.wildcardCachedSelector: nil,
},
Ingress: true,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.wildcardCachedSelector: {nil},
},
}})
state := traceState{}
res, err := shadowRule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.Equal(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
state = traceState{}
res, err = shadowRule.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
// Case 6B: Reverse the ordering of the rules. Result should be the same.
shadowRule = &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}}
buffer = new(bytes.Buffer)
ctxToA = SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeNone,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
td.cachedSelectorA: nil,
},
Ingress: true,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.wildcardCachedSelector: {nil},
},
}})
state = traceState{}
res, err = shadowRule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.Equal(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
state = traceState{}
res, err = shadowRule.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
}
// Case 7: allow all at L3/L7 in one rule, and in another rule, select an endpoint
// which restricts on L7. Should resolve to just allowing all on L3/L7 (first rule
// shadows the second), but setting traffic to the HTTP proxy.
func TestL3RuleWithL7RulePartiallyShadowedByL3AllowAll(t *testing.T) {
td := newTestData()
// Case 7A: selects specific endpoint with L7 restrictions rule first, then
// rule which selects all endpoints and allows all on L7. Net result sets
// parser type to whatever is in first rule, but without the restriction
// on L7.
shadowRule := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}}
buffer := new(bytes.Buffer)
ctxToA := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
td.cachedSelectorA: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.wildcardCachedSelector: {nil},
},
}})
state := traceState{}
res, err := shadowRule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.EqualValues(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
state = traceState{}
res, err = shadowRule.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
// Case 7B: selects all endpoints and allows all on L7, then selects specific
// endpoint with L7 restrictions rule. Net result sets parser type to whatever
// is in first rule, but without the restriction on L7.
shadowRule = &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}}
buffer = new(bytes.Buffer)
ctxToA = SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
td.cachedSelectorA: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.wildcardCachedSelector: {nil},
td.cachedSelectorA: {nil},
},
}})
state = traceState{}
res, err = shadowRule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.EqualValues(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
state = traceState{}
res, err = shadowRule.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
}
// Case 8: allow all at L3 and restricts on L7 in one rule, and in another rule,
// select an endpoint which restricts the same as the first rule on L7.
// Should resolve to just allowing all on L3, but restricting on L7 for both
// wildcard and the specified endpoint.
func TestL3RuleWithL7RuleShadowedByL3AllowAll(t *testing.T) {
td := newTestData()
// Case 8A: selects specific endpoint with L7 restrictions rule first, then
// rule which selects all endpoints and restricts on the same resource on L7.
// PerSelectorPolicies contains entries for both endpoints selected in each rule
// on L7 restriction.
case8Rule := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}}
buffer := new(bytes.Buffer)
ctxToA := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
td.cachedSelectorA: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.wildcardCachedSelector: {nil},
},
}})
state := traceState{}
res, err := case8Rule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.EqualValues(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
state = traceState{}
res, err = case8Rule.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
// Case 8B: first insert rule which selects all endpoints and restricts on
// the same resource on L7. Then, insert rule which selects specific endpoint
// with L7 restrictions rule. PerSelectorPolicies contains entries for both
// endpoints selected in each rule on L7 restriction.
case8Rule = &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}}
buffer = new(bytes.Buffer)
ctxToA = SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
td.cachedSelectorA: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.wildcardCachedSelector: {nil},
},
}})
state = traceState{}
res, err = case8Rule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
t.Log(buffer)
require.NoError(t, err)
require.NotNil(t, res)
require.EqualValues(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
state = traceState{}
res, err = case8Rule.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
}
// Case 9: allow all at L3 and restricts on L7 in one rule, and in another rule,
// select an endpoint which restricts on different L7 protocol.
// Should fail as cannot have conflicting parsers on same port.
func TestL3SelectingEndpointAndL3AllowAllMergeConflictingL7(t *testing.T) {
td := newTestData()
// Case 9A: Kafka first, then HTTP.
conflictingL7Rule := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}}
buffer := new(bytes.Buffer)
ctxToA := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
state := traceState{}
res, err := conflictingL7Rule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.NotNil(t, err)
require.Nil(t, res)
state = traceState{}
res, err = conflictingL7Rule.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
// Case 9B: HTTP first, then Kafka.
conflictingL7Rule = &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
},
}}
buffer = new(bytes.Buffer)
ctxToA = SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
state = traceState{}
res, err = conflictingL7Rule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.NotNil(t, err)
require.Nil(t, res)
state = traceState{}
res, err = conflictingL7Rule.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
}
// Case 10: restrict same path / method on L7 in both rules,
// but select different endpoints in each rule.
func TestMergingWithDifferentEndpointsSelectedAllowSameL7(t *testing.T) {
td := newTestData()
selectDifferentEndpointsRestrictL7 := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}}
buffer := new(bytes.Buffer)
ctxToA := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: nil,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorC: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
td.cachedSelectorA: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.cachedSelectorC: {nil},
},
}})
state := traceState{}
res, err := selectDifferentEndpointsRestrictL7.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.EqualValues(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
buffer = new(bytes.Buffer)
ctxToC := SearchContext{To: labelsC, Trace: TRACE_VERBOSE}
ctxToC.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
state = traceState{}
res, err = selectDifferentEndpointsRestrictL7.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
}
// Case 11: allow all on L7 in both rules, but select different endpoints in each rule.
func TestMergingWithDifferentEndpointSelectedAllowAllL7(t *testing.T) {
td := newTestData()
selectDifferentEndpointsAllowAllL7 := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}}
buffer := new(bytes.Buffer)
ctxToA := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: nil,
L7Parser: ParserTypeNone,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorA: nil,
td.cachedSelectorC: nil,
},
Ingress: true,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.cachedSelectorC: {nil},
},
}})
state := traceState{}
res, err := selectDifferentEndpointsAllowAllL7.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.Equal(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
buffer = new(bytes.Buffer)
ctxToC := SearchContext{To: labelsC, Trace: TRACE_VERBOSE}
ctxToC.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
state = traceState{}
res, err = selectDifferentEndpointsAllowAllL7.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
}
// Case 12: allow all at L3 in one rule with restrictions at L7. Determine that
// the host should always be allowed. From Host should go to proxy allow all;
// other L3 should restrict at L7 in a separate filter.
func TestAllowingLocalhostShadowsL7(t *testing.T) {
td := newTestData()
// This test checks that when the AllowLocalhost=always option is
// enabled, we always wildcard the host at L7. That means we need to
// set the option in the config, and of course clean up afterwards so
// that this test doesn't affect subsequent tests.
// XXX: Does this affect other tests being run concurrently?
oldLocalhostOpt := option.Config.AllowLocalhost
option.Config.AllowLocalhost = option.AllowLocalhostAlways
defer func() { option.Config.AllowLocalhost = oldLocalhostOpt }()
rule := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}}
buffer := new(bytes.Buffer)
ctxToA := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
td.cachedSelectorHost: nil, // no proxy redirect
},
Ingress: true,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}},
}})
state := traceState{}
res, err := rule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
t.Log(buffer)
require.NoError(t, err)
require.NotNil(t, res)
require.EqualValues(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
// Endpoints not selected by the rule should not match the rule.
buffer = new(bytes.Buffer)
ctxToC := SearchContext{To: labelsC, Trace: TRACE_VERBOSE}
ctxToC.Logging = stdlog.New(buffer, "", 0)
state = traceState{}
res, err = rule.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
t.Log(buffer)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
}
func TestEntitiesL3(t *testing.T) {
td := newTestData()
allowWorldRule := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: api.EntitySlice{api.EntityAll},
},
},
},
}}
buffer := new(bytes.Buffer)
ctxFromA := SearchContext{From: labelsA, Trace: TRACE_VERBOSE}
ctxFromA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"0/ANY": {
Port: 0,
Protocol: api.ProtoAny,
U8Proto: 0,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeNone,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
Ingress: false,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}},
}})
state := traceState{}
res, err := allowWorldRule.resolveEgressPolicy(td.testPolicyContext, &ctxFromA, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.Equal(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
}
// Case 13: deny all at L3 in case of an empty non-nil toEndpoints slice.
func TestEgressEmptyToEndpoints(t *testing.T) {
td := newTestData()
rule := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}}
buffer := new(bytes.Buffer)
ctxFromA := SearchContext{From: labelsA, Trace: TRACE_VERBOSE}
ctxFromA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
state := traceState{}
res, err := rule.resolveEgressPolicy(td.testPolicyContext, &ctxFromA, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"fmt"
"net/netip"
"slices"
"strconv"
"github.com/hashicorp/go-hclog"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
"github.com/cilium/cilium/pkg/container/bitlpm"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/trafficdirection"
policyTypes "github.com/cilium/cilium/pkg/policy/types"
)
// Key and Keys are types used both internally and externally.
// The types have been lifted out, but an alias is being used
// so we don't have to change all the code everywhere.
//
// Do not use these types outside of pkg/policy or pkg/endpoint,
// lest ye find yourself with hundreds of unnecessary imports.
type Key = policyTypes.Key
type Keys = policyTypes.Keys
var (
// localHostKey represents an ingress L3 allow from the local host.
localHostKey = Key{
Identity: identity.ReservedIdentityHost.Uint32(),
InvertedPortMask: 0xffff, // This is a wildcard
TrafficDirection: trafficdirection.Ingress.Uint8(),
}
// allKey represents a key for unknown traffic, i.e., all traffic.
// We have one for each traffic direction
allKey = [2]Key{{
Identity: identity.IdentityUnknown.Uint32(),
InvertedPortMask: 0xffff,
TrafficDirection: 0,
}, {
Identity: identity.IdentityUnknown.Uint32(),
InvertedPortMask: 0xffff,
TrafficDirection: 1,
}}
)
const (
LabelKeyPolicyDerivedFrom = "io.cilium.policy.derived-from"
LabelAllowLocalHostIngress = "allow-localhost-ingress"
LabelAllowAnyIngress = "allow-any-ingress"
LabelAllowAnyEgress = "allow-any-egress"
LabelVisibilityAnnotation = "visibility-annotation"
// Using largest possible port value since it has the lowest priority
unrealizedRedirectPort = uint16(65535)
)
// MapState is a map interface for policy maps
type MapState interface {
Get(Key) (MapStateEntry, bool)
// ForEach allows iteration over the MapStateEntries. It returns true if
// the iteration was not stopped early by the callback.
ForEach(func(Key, MapStateEntry) (cont bool)) (complete bool)
GetIdentities(*logrus.Logger) ([]int64, []int64)
GetDenyIdentities(*logrus.Logger) ([]int64, []int64)
Len() int
// private accessors
deniesL4(policyOwner PolicyOwner, l4 *L4Filter) bool
//
// modifiers are private
//
delete(Key, Identities)
insert(Key, MapStateEntry, Identities)
revertChanges(Identities, ChangeState)
addVisibilityKeys(PolicyOwner, uint16, *VisibilityMetadata, Identities, ChangeState)
allowAllIdentities(ingress, egress bool)
determineAllowLocalhostIngress()
denyPreferredInsertWithChanges(newKey Key, newEntry MapStateEntry, identities Identities, features policyFeatures, changes ChangeState)
deleteKeyWithChanges(key Key, owner MapStateOwner, identities Identities, changes ChangeState)
// For testing from other packages only
Equals(MapState) bool
Diff(expected MapState) string
WithState(initMap map[Key]MapStateEntry, identities Identities) MapState
}
type mapStateValidator interface {
// identity relations tests
isSupersetOf(ancestor, descendant Key, identities Identities)
isSupersetOrSame(ancestor, descendant Key, identities Identities)
// trafficdirection/protocol/port tests
isBroader(ancestor, descendant Key)
isBroaderOrEqual(ancestor, descendant Key)
}
// mapState is a state of a policy map.
type mapState struct {
allows mapStateMap
denies mapStateMap
validator mapStateValidator
}
// Identities is a convenience interface for looking up CIDRs
// associated with an identity
type Identities interface {
GetPrefix(identity.NumericIdentity) netip.Prefix
}
// mapStateMap is a convience type representing the actual structure mapping
// policymap keys to policymap entries.
//
// The `bitlpm.Trie` indexes the TrafficDirection, Protocol, and Port of
// a policy Key but does **not** index the identity. Instead identities
// that share TrafficDirection, Protocol, and Port are indexed in a builtin
// map type that is the associated value of the key-prefix of TrafficDirection,
// Protocol, and Port. This is done so that Identity does not explode
// the size of the Trie. Consider the case of a policy that selects
// many identities. In this case, if Identity was indexed then every
// identity associated with the policy would create at least one
// intermediate node in the Trie with its own sub node associated with
// TrafficDirection, Protocol, and Port. When identity is not indexed
// then one policy will map to one key-prefix with a builtin map type
// that associates each identity with a MapStateEntry. This strategy
// greatly enhances the usefuleness of the Trie and improves lookup,
// deletion, and insertion times.
type mapStateMap struct {
// entries is the map containing the MapStateEntries
entries map[Key]MapStateEntry
// trie is a Trie that indexes policy Keys without their identity
// and stores the identities in an associated builtin map.
trie bitlpm.Trie[bitlpm.Key[Key], IDSet]
}
type IDSet struct {
// ids contains all IDs in the set
ids map[identity.NumericIdentity]struct{}
// cidr contains the subset of IDs that have a valid prefix
// nil if not needed
cidr *bitlpm.CIDRTrie[map[identity.NumericIdentity]struct{}]
}
func (msm *mapStateMap) Lookup(k Key) (MapStateEntry, bool) {
v, ok := msm.entries[k]
return v, ok
}
var ip4ZeroPrefix = netip.MustParsePrefix("0.0.0.0/0")
var ip6ZeroPrefix = netip.MustParsePrefix("::/0")
func (msm *mapStateMap) upsert(k Key, e MapStateEntry, identities Identities) {
_, exists := msm.entries[k]
// upsert entry
msm.entries[k] = e
// Update indices if 'k' is a new key
if !exists {
// Update trie
idSet, ok := msm.trie.ExactLookup(k.PrefixLength(), k)
if !ok {
idSet = IDSet{ids: make(map[identity.NumericIdentity]struct{})}
kCpy := k
kCpy.Identity = 0
msm.trie.Upsert(kCpy.PrefixLength(), kCpy, idSet)
}
id := identity.NumericIdentity(k.Identity)
idSet.ids[id] = struct{}{}
// update CIDR and ANY indices
switch {
case id == identity.ReservedIdentityWorld:
msm.insertCidr(ip4ZeroPrefix, k, &idSet)
msm.insertCidr(ip6ZeroPrefix, k, &idSet)
case id == identity.ReservedIdentityWorldIPv4:
msm.insertCidr(ip4ZeroPrefix, k, &idSet)
case id == identity.ReservedIdentityWorldIPv6:
msm.insertCidr(ip6ZeroPrefix, k, &idSet)
case id.HasLocalScope() && identities != nil:
prefix := identities.GetPrefix(id)
if prefix.IsValid() {
msm.insertCidr(prefix, k, &idSet)
}
}
}
}
func (msm *mapStateMap) insertCidr(prefix netip.Prefix, k Key, idSet *IDSet) {
if idSet.cidr == nil {
idSet.cidr = bitlpm.NewCIDRTrie[map[identity.NumericIdentity]struct{}]()
kCpy := k
kCpy.Identity = 0
msm.trie.Upsert(kCpy.PrefixLength(), kCpy, *idSet)
}
idMap, ok := idSet.cidr.ExactLookup(prefix)
if !ok || idMap == nil {
idMap = make(map[identity.NumericIdentity]struct{})
idSet.cidr.Upsert(prefix, idMap)
}
idMap[identity.NumericIdentity(k.Identity)] = struct{}{}
}
func (msm *mapStateMap) delete(k Key, identities Identities) {
_, exists := msm.entries[k]
if exists {
delete(msm.entries, k)
id := identity.NumericIdentity(k.Identity)
idSet, ok := msm.trie.ExactLookup(k.PrefixLength(), k)
if ok {
delete(idSet.ids, id)
if len(idSet.ids) == 0 {
msm.trie.Delete(k.PrefixLength(), k)
// IDSet is no longer in the trie
idSet.cidr = nil
}
}
// update CIDR and ANY indices
switch {
case id == identity.ReservedIdentityWorld:
msm.deleteCidr(ip4ZeroPrefix, k, &idSet)
msm.deleteCidr(ip6ZeroPrefix, k, &idSet)
case id == identity.ReservedIdentityWorldIPv4:
msm.deleteCidr(ip4ZeroPrefix, k, &idSet)
case id == identity.ReservedIdentityWorldIPv6:
msm.deleteCidr(ip6ZeroPrefix, k, &idSet)
case id.HasLocalScope() && identities != nil:
prefix := identities.GetPrefix(id)
if prefix.IsValid() {
msm.deleteCidr(prefix, k, &idSet)
}
}
}
}
func (msm *mapStateMap) deleteCidr(prefix netip.Prefix, k Key, idSet *IDSet) {
if idSet.cidr != nil {
idMap, ok := idSet.cidr.ExactLookup(prefix)
if ok {
if idMap != nil {
delete(idMap, identity.NumericIdentity(k.Identity))
}
// remove the idMap if empty
if len(idMap) == 0 {
idSet.cidr.Delete(prefix)
// remove the CIDR index if empty
if idSet.cidr.Len() == 0 {
idSet.cidr = nil
kCpy := k
kCpy.Identity = 0
msm.trie.Upsert(kCpy.PrefixLength(), kCpy, *idSet)
}
}
}
}
}
func (msm *mapStateMap) ForEach(f func(Key, MapStateEntry) bool) bool {
for k, e := range msm.entries {
if !f(k, e) {
return false
}
}
return true
}
func (msm *mapStateMap) forKey(k Key, f func(Key, MapStateEntry) bool) bool {
e, ok := msm.entries[k]
if ok {
return f(k, e)
}
stacktrace := hclog.Stacktrace()
log.Errorf("Missing MapStateEntry for key: %v. Stacktrace: %s", k, stacktrace)
return true
}
// ForEachNarrowerKeyWithBroaderID iterates over narrower port/proto's and broader IDs in the trie.
// Equal port/protos or identities are not included.
func (msm *mapStateMap) ForEachNarrowerKeyWithBroaderID(key Key, prefixes []netip.Prefix, f func(Key, MapStateEntry) bool) {
msm.trie.Descendants(key.PrefixLength(), key, func(_ uint, lpmKey bitlpm.Key[policyTypes.Key], idSet IDSet) bool {
// k is the key from trie with 0'ed ID
k := lpmKey.Value()
// Descendants iterates over equal port/proto, caller expects to see only narrower keys so skip it
if k.PortProtoIsEqual(key) {
return true
}
// ANY identities are not in the CIDR trie, but they are ancestors of all
// identities, visit them first, but not if key is also ANY
if key.Identity != 0 {
if _, exists := idSet.ids[0]; exists {
k.Identity = 0
if !msm.forKey(k, f) {
return false
}
}
}
// cidr is nil when empty
if idSet.cidr == nil {
return true
}
for _, prefix := range prefixes {
bailed := false
idSet.cidr.Ancestors(prefix, func(cidr netip.Prefix, ids map[identity.NumericIdentity]struct{}) bool {
for id := range ids {
if id != identity.NumericIdentity(key.Identity) {
k.Identity = uint32(id)
if !msm.forKey(k, f) {
bailed = true
return false
}
}
}
return true
})
if bailed {
return false
}
}
return true
})
}
// ForEachBroaderOrEqualKey iterates over broader or equal keys in the trie.
func (msm *mapStateMap) ForEachBroaderOrEqualKey(key Key, prefixes []netip.Prefix, f func(Key, MapStateEntry) bool) {
msm.trie.Ancestors(key.PrefixLength(), key, func(_ uint, lpmKey bitlpm.Key[policyTypes.Key], idSet IDSet) bool {
// k is the key from trie with 0'ed ID
k := lpmKey.Value()
// ANY identities are not in the CIDR trie, but they are ancestors of all
// identities, visit them first
if _, exists := idSet.ids[0]; exists {
k.Identity = 0
if !msm.forKey(k, f) {
return false
}
}
// identities without prefixes are not in the cidr trie,
// but need to visit all keys with the same identity
// ANY identity was already visited above
if len(prefixes) == 0 && key.Identity != 0 {
_, exists := idSet.ids[identity.NumericIdentity(key.Identity)]
if exists {
k.Identity = key.Identity
if !msm.forKey(k, f) {
return false
}
}
return true
}
// cidr is nil when empty
if idSet.cidr == nil {
return true
}
for _, prefix := range prefixes {
bailed := false
idSet.cidr.Ancestors(prefix, func(cidr netip.Prefix, ids map[identity.NumericIdentity]struct{}) bool {
for id := range ids {
k.Identity = uint32(id)
if !msm.forKey(k, f) {
bailed = true
return false
}
}
return true
})
if bailed {
return false
}
}
return true
})
}
// ForEachNarrowerOrEqualKey iterates over narrower or equal keys in the trie.
func (msm *mapStateMap) ForEachNarrowerOrEqualKey(key Key, prefixes []netip.Prefix, f func(Key, MapStateEntry) bool) {
msm.trie.Descendants(key.PrefixLength(), key, func(_ uint, lpmKey bitlpm.Key[policyTypes.Key], idSet IDSet) bool {
// k is the key from trie with 0'ed ID
k := lpmKey.Value()
// ANY identities are not in the CIDR trie, but all identities are descendants of
// them.
if key.Identity == 0 {
for id := range idSet.ids {
k.Identity = uint32(id)
if !msm.forKey(k, f) {
return false
}
}
}
// identities without prefixes are not in the cidr trie,
// but need to visit all keys with the same identity
// ANY identity was already visited above
if len(prefixes) == 0 && key.Identity != 0 {
_, exists := idSet.ids[identity.NumericIdentity(key.Identity)]
if exists {
k.Identity = key.Identity
if !msm.forKey(k, f) {
return false
}
}
return true
}
// cidr is nil when empty
if idSet.cidr == nil {
return true
}
for _, prefix := range prefixes {
bailed := false
idSet.cidr.Descendants(prefix, func(cidr netip.Prefix, ids map[identity.NumericIdentity]struct{}) bool {
for id := range ids {
k.Identity = uint32(id)
if !msm.forKey(k, f) {
bailed = true
return false
}
}
return true
})
if bailed {
return false
}
}
return true
})
}
// ForEachBroaderKeyWithNarrowerID iterates over broader proto/port with narrower identity in the trie.
// Equal port/protos or identities are not included.
func (msm *mapStateMap) ForEachBroaderKeyWithNarrowerID(key Key, prefixes []netip.Prefix, f func(Key, MapStateEntry) bool) {
msm.trie.Ancestors(key.PrefixLength(), key, func(_ uint, lpmKey bitlpm.Key[policyTypes.Key], idSet IDSet) bool {
// k is the key from trie with 0'ed ID
k := lpmKey.Value()
// Skip equal PortProto
if k.PortProtoIsEqual(key) {
return true
}
// ANY identities are not in the CIDR trie, but all identities are descendants of
// them.
if key.Identity == 0 {
for id := range idSet.ids {
if id != 0 {
k.Identity = uint32(id)
if !msm.forKey(k, f) {
return false
}
}
}
}
// cidr is nil when empty
if idSet.cidr == nil {
return true
}
for _, prefix := range prefixes {
bailed := false
idSet.cidr.Descendants(prefix, func(cidr netip.Prefix, ids map[identity.NumericIdentity]struct{}) bool {
for id := range ids {
if id != identity.NumericIdentity(key.Identity) {
k.Identity = uint32(id)
if !msm.forKey(k, f) {
bailed = true
return false
}
}
}
return true
})
if bailed {
return false
}
}
return true
})
}
// ForEachKeyWithBroaderOrEqualPortProto iterates over broader or equal port/proto entries in the trie.
func (msm *mapStateMap) ForEachKeyWithBroaderOrEqualPortProto(key Key, f func(Key, MapStateEntry) bool) {
msm.trie.Ancestors(key.PrefixLength(), key, func(prefix uint, lpmKey bitlpm.Key[Key], idSet IDSet) bool {
k := lpmKey.Value()
for id := range idSet.ids {
k.Identity = uint32(id)
if !msm.forKey(k, f) {
return false
}
}
return true
})
}
// ForEachKeyWithNarrowerOrEqualPortProto iterates over narrower or equal port/proto entries in the trie.
func (msm *mapStateMap) ForEachKeyWithNarrowerOrEqualPortProto(key Key, f func(Key, MapStateEntry) bool) {
msm.trie.Descendants(key.PrefixLength(), key, func(prefix uint, lpmKey bitlpm.Key[Key], idSet IDSet) bool {
k := lpmKey.Value()
for id := range idSet.ids {
k.Identity = uint32(id)
if !msm.forKey(k, f) {
return false
}
}
return true
})
}
func (msm *mapStateMap) Len() int {
return len(msm.entries)
}
type MapStateOwner interface{}
// MapStateEntry is the configuration associated with a Key in a
// MapState. This is a minimized version of policymap.PolicyEntry.
type MapStateEntry struct {
// The proxy port, in host byte order.
// If 0 (default), there is no proxy redirection for the corresponding
// Key. Any other value signifies proxy redirection.
ProxyPort uint16
// priority is used to select the Listener if multiple rules would apply different listeners
// to a policy map entry. Lower numbers indicate higher priority. If left out, the proxy
// port number (10000-20000) is used.
priority uint16
// Listener name for proxy redirection, if any
Listener string
// IsDeny is true when the policy should be denied.
IsDeny bool
// hasAuthType is 'DefaultAuthType' when policy has no explicit AuthType set. In this case the
// value of AuthType is derived from more generic entries covering this entry.
hasAuthType HasAuthType
// AuthType is non-zero when authentication is required for the traffic to be allowed.
AuthType AuthType
// DerivedFromRules tracks the policy rules this entry derives from
// In sorted order.
DerivedFromRules labels.LabelArrayList
// Owners collects the keys in the map and selectors in the policy that require this key to be present.
// TODO: keep track which selector needed the entry to be deny, redirect, or just allow.
owners map[MapStateOwner]struct{}
// dependents contains the keys for entries create based on this entry. These entries
// will be deleted once all of the owners are deleted.
dependents Keys
}
// NewMapStateEntry creates a map state entry. If redirect is true, the
// caller is expected to replace the ProxyPort field before it is added to
// the actual BPF map.
// 'cs' is used to keep track of which policy selectors need this entry. If it is 'nil' this entry
// will become sticky and cannot be completely removed via incremental updates. Even in this case
// the entry may be overridden or removed by a deny entry.
func NewMapStateEntry(cs MapStateOwner, derivedFrom labels.LabelArrayList, proxyPort uint16, listener string, priority uint16, deny bool, hasAuth HasAuthType, authType AuthType) MapStateEntry {
if proxyPort == 0 {
listener = ""
priority = 0
} else if priority == 0 {
priority = proxyPort // default for tie-breaking
}
return MapStateEntry{
ProxyPort: proxyPort,
Listener: listener,
priority: priority,
DerivedFromRules: derivedFrom,
IsDeny: deny,
hasAuthType: hasAuth,
AuthType: authType,
owners: map[MapStateOwner]struct{}{cs: {}},
}
}
// AddDependent adds 'key' to the set of dependent keys.
func (e *MapStateEntry) AddDependent(key Key) {
if e.dependents == nil {
e.dependents = make(Keys, 1)
}
e.dependents[key] = struct{}{}
}
// RemoveDependent removes 'key' from the set of dependent keys.
func (e *MapStateEntry) RemoveDependent(key Key) {
delete(e.dependents, key)
// Nil the map when empty. This is mainly to make unit testing easier.
if len(e.dependents) == 0 {
e.dependents = nil
}
}
// HasDependent returns true if the 'key' is contained
// within the set of dependent keys
func (e *MapStateEntry) HasDependent(key Key) bool {
if e.dependents == nil {
return false
}
_, ok := e.dependents[key]
return ok
}
// HasSameOwners returns true if both MapStateEntries
// have the same owners as one another (which means that
// one of the entries is redundant).
func (e *MapStateEntry) HasSameOwners(bEntry *MapStateEntry) bool {
if e == nil && bEntry == nil {
return true
}
if len(e.owners) != len(bEntry.owners) {
return false
}
for _, owner := range e.owners {
if _, ok := bEntry.owners[owner]; !ok {
return false
}
}
return true
}
var worldNets = map[identity.NumericIdentity][]netip.Prefix{
identity.ReservedIdentityWorld: {
netip.PrefixFrom(netip.IPv4Unspecified(), 0),
netip.PrefixFrom(netip.IPv6Unspecified(), 0),
},
identity.ReservedIdentityWorldIPv4: {
netip.PrefixFrom(netip.IPv4Unspecified(), 0),
},
identity.ReservedIdentityWorldIPv6: {
netip.PrefixFrom(netip.IPv6Unspecified(), 0),
},
}
// getNets returns the most specific CIDR for an identity. For the "World" identity
// it returns both IPv4 and IPv6.
func getNets(identities Identities, ident uint32) []netip.Prefix {
// World identities are handled explicitly for two reasons:
// 1. 'identities' may be nil, but world identities are still expected to be considered
// 2. SelectorCache is not be informed of reserved/world identities in all test cases
// 3. identities.GetPrefix() does not return world identities
id := identity.NumericIdentity(ident)
if id <= identity.ReservedIdentityWorldIPv6 {
return worldNets[id]
}
// CIDR identities have a local scope, so we can skip the rest if id is not of local scope.
if !id.HasLocalScope() || identities == nil {
return nil
}
prefix := identities.GetPrefix(id)
if prefix.IsValid() {
return []netip.Prefix{prefix}
}
return nil
}
// NewMapState creates a new MapState interface
func NewMapState() MapState {
return newMapState()
}
func (ms *mapState) WithState(initMap map[Key]MapStateEntry, identities Identities) MapState {
return ms.withState(initMap, identities)
}
func (ms *mapState) withState(initMap map[Key]MapStateEntry, identities Identities) *mapState {
for k, v := range initMap {
ms.insert(k, v, identities)
}
return ms
}
func newMapStateMap() mapStateMap {
return mapStateMap{
entries: make(map[Key]MapStateEntry),
trie: bitlpm.NewTrie[Key, IDSet](policyTypes.MapStatePrefixLen),
}
}
func newMapState() *mapState {
return &mapState{
allows: newMapStateMap(),
denies: newMapStateMap(),
}
}
// Get the MapStateEntry that matches the Key.
func (ms *mapState) Get(k Key) (MapStateEntry, bool) {
if k.DestPort == 0 && k.InvertedPortMask != 0xffff {
stacktrace := hclog.Stacktrace()
log.Errorf("mapState.Get: invalid wildcard port with non-zero mask: %v. Stacktrace: %s", k, stacktrace)
}
v, ok := ms.denies.Lookup(k)
if ok {
return v, ok
}
return ms.allows.Lookup(k)
}
// insert the Key and matcthing MapStateEntry into the
// MapState
func (ms *mapState) insert(k Key, v MapStateEntry, identities Identities) {
if k.DestPort == 0 && k.InvertedPortMask != 0xffff {
stacktrace := hclog.Stacktrace()
log.Errorf("mapState.insert: invalid wildcard port with non-zero mask: %v. Stacktrace: %s", k, stacktrace)
}
if v.IsDeny {
ms.allows.delete(k, identities)
ms.denies.upsert(k, v, identities)
} else {
ms.denies.delete(k, identities)
ms.allows.upsert(k, v, identities)
}
}
// Delete removes the Key an related MapStateEntry.
func (ms *mapState) delete(k Key, identities Identities) {
ms.allows.delete(k, identities)
ms.denies.delete(k, identities)
}
// ForEach iterates over every Key MapStateEntry and stops when the function
// argument returns false. It returns false iff the iteration was cut short.
func (ms *mapState) ForEach(f func(Key, MapStateEntry) (cont bool)) (complete bool) {
return ms.allows.ForEach(f) && ms.denies.ForEach(f)
}
// Len returns the length of the map
func (ms *mapState) Len() int {
return ms.allows.Len() + ms.denies.Len()
}
// Equals determines if this MapState is equal to the
// argument MapState
// Only used for testing, but also from the endpoint package!
func (msA *mapState) Equals(msB MapState) bool {
if msA.Len() != msB.Len() {
return false
}
return msA.ForEach(func(kA Key, vA MapStateEntry) bool {
vB, ok := msB.Get(kA)
return ok && (&vB).DatapathEqual(&vA)
})
}
// Diff returns the string of differences between 'obtained' and 'expected' prefixed with
// '+ ' or '- ' for obtaining something unexpected, or not obtaining the expected, respectively.
// For use in debugging.
func (obtained *mapState) Diff(expected MapState) (res string) {
res += "Missing (-), Unexpected (+):\n"
expected.ForEach(func(kE Key, vE MapStateEntry) bool {
if vO, ok := obtained.Get(kE); ok {
if !(&vO).DatapathEqual(&vE) {
res += "- " + kE.String() + ": " + vE.String() + "\n"
res += "+ " + kE.String() + ": " + vO.String() + "\n"
}
} else {
res += "- " + kE.String() + ": " + vE.String() + "\n"
}
return true
})
obtained.ForEach(func(kE Key, vE MapStateEntry) bool {
if _, ok := expected.Get(kE); !ok {
res += "+ " + kE.String() + ": " + vE.String() + "\n"
}
return true
})
return res
}
// AddDependent adds 'key' to the set of dependent keys.
func (ms *mapState) AddDependent(owner Key, dependent Key, identities Identities, changes ChangeState) {
if e, exists := ms.allows.Lookup(owner); exists {
ms.addDependentOnEntry(owner, e, dependent, identities, changes)
} else if e, exists := ms.denies.Lookup(owner); exists {
ms.addDependentOnEntry(owner, e, dependent, identities, changes)
}
}
// addDependentOnEntry adds 'dependent' to the set of dependent keys of 'e'.
func (ms *mapState) addDependentOnEntry(owner Key, e MapStateEntry, dependent Key, identities Identities, changes ChangeState) {
if _, exists := e.dependents[dependent]; !exists {
if changes.Old != nil {
changes.Old[owner] = e
}
e.AddDependent(dependent)
ms.insert(owner, e, identities)
}
}
// RemoveDependent removes 'key' from the list of dependent keys.
// This is called when a dependent entry is being deleted.
// If 'old' is not nil, then old value is added there before any modifications.
func (ms *mapState) RemoveDependent(owner Key, dependent Key, identities Identities, changes ChangeState) {
if e, exists := ms.allows.Lookup(owner); exists {
changes.insertOldIfNotExists(owner, e)
e.RemoveDependent(dependent)
ms.denies.delete(owner, identities)
ms.allows.upsert(owner, e, identities)
return
}
if e, exists := ms.denies.Lookup(owner); exists {
changes.insertOldIfNotExists(owner, e)
e.RemoveDependent(dependent)
ms.allows.delete(owner, identities)
ms.denies.upsert(owner, e, identities)
}
}
// Merge adds owners, dependents, and DerivedFromRules from a new 'entry' to an existing
// entry 'e'. 'entry' is not modified.
// IsDeny, ProxyPort, and AuthType are merged by giving precedence to deny over non-deny, proxy
// redirection over no proxy redirection, and explicit auth type over default auth type.
func (e *MapStateEntry) Merge(entry *MapStateEntry) {
// Deny is sticky
if !e.IsDeny {
e.IsDeny = entry.IsDeny
}
// Deny entries have no proxy redirection nor auth requirement
if e.IsDeny {
e.ProxyPort = 0
e.Listener = ""
e.priority = 0
e.hasAuthType = DefaultAuthType
e.AuthType = AuthTypeDisabled
} else {
// Proxy port takes precedence, but may be updated due to priority
if entry.IsRedirectEntry() {
// Lower number has higher priority, but non-redirects have 0 priority
// value.
// Proxy port value is the tie-breaker when priorities have the same value.
if !e.IsRedirectEntry() || entry.priority < e.priority || entry.priority == e.priority && entry.ProxyPort < e.ProxyPort {
e.ProxyPort = entry.ProxyPort
e.Listener = entry.Listener
e.priority = entry.priority
}
}
// Explicit auth takes precedence over defaulted one.
if entry.hasAuthType == ExplicitAuthType {
if e.hasAuthType == ExplicitAuthType {
// Numerically higher AuthType takes precedence when both are explicitly defined
if entry.AuthType > e.AuthType {
e.AuthType = entry.AuthType
}
} else {
e.hasAuthType = ExplicitAuthType
e.AuthType = entry.AuthType
}
} else if e.hasAuthType == DefaultAuthType {
e.AuthType = entry.AuthType // new default takes precedence
}
}
if e.owners == nil && len(entry.owners) > 0 {
e.owners = make(map[MapStateOwner]struct{}, len(entry.owners))
}
for k, v := range entry.owners {
e.owners[k] = v
}
// merge dependents
for k := range entry.dependents {
e.AddDependent(k)
}
// merge DerivedFromRules
if len(entry.DerivedFromRules) > 0 {
e.DerivedFromRules.MergeSorted(entry.DerivedFromRules)
}
}
// IsRedirectEntry returns true if the entry redirects to a proxy port
func (e *MapStateEntry) IsRedirectEntry() bool {
return e.ProxyPort != 0
}
// DatapathEqual returns true of two entries are equal in the datapath's PoV,
// i.e., IsDeny, ProxyPort and AuthType are the same for both entries.
func (e *MapStateEntry) DatapathEqual(o *MapStateEntry) bool {
if e == nil || o == nil {
return e == o
}
return e.IsDeny == o.IsDeny && e.ProxyPort == o.ProxyPort && e.AuthType == o.AuthType
}
// DeepEqual is a manually generated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
// Defined manually due to deepequal-gen not supporting interface types.
// 'cachedNets' member is ignored in comparison, as it is a cached value and
// makes no functional difference.
func (e *MapStateEntry) DeepEqual(o *MapStateEntry) bool {
if !e.DatapathEqual(o) {
return false
}
if e.Listener != o.Listener || e.priority != o.priority {
return false
}
if !e.DerivedFromRules.DeepEqual(&o.DerivedFromRules) {
return false
}
if len(e.owners) != len(o.owners) {
return false
}
for k := range o.owners {
if _, exists := e.owners[k]; !exists {
return false
}
}
if len(e.dependents) != len(o.dependents) {
return false
}
for k := range o.dependents {
if _, exists := e.dependents[k]; !exists {
return false
}
}
// ignoring cachedNets
return true
}
// String returns a string representation of the MapStateEntry
func (e MapStateEntry) String() string {
return "ProxyPort=" + strconv.FormatUint(uint64(e.ProxyPort), 10) +
",Listener=" + e.Listener +
",IsDeny=" + strconv.FormatBool(e.IsDeny) +
",AuthType=" + e.AuthType.String() +
",DerivedFromRules=" + fmt.Sprintf("%v", e.DerivedFromRules)
}
// denyPreferredInsert inserts a key and entry into the map by given preference
// to deny entries, and L3-only deny entries over L3-L4 allows.
// This form may be used when a full policy is computed and we are not yet interested
// in accumulating incremental changes.
// Caller may insert the same MapStateEntry multiple times for different Keys, but all from the same
// owner.
func (ms *mapState) denyPreferredInsert(newKey Key, newEntry MapStateEntry, identities Identities, features policyFeatures) {
// Enforce nil values from NewMapStateEntry
newEntry.dependents = nil
ms.denyPreferredInsertWithChanges(newKey, newEntry, identities, features, ChangeState{})
}
// addKeyWithChanges adds a 'key' with value 'entry' to 'keys' keeping track of incremental changes in 'adds' and 'deletes', and any changed or removed old values in 'old', if not nil.
func (ms *mapState) addKeyWithChanges(key Key, entry MapStateEntry, identities Identities, changes ChangeState) {
// Keep all owners that need this entry so that it is deleted only if all the owners delete their contribution
var datapathEqual bool
oldEntry, exists := ms.Get(key)
if exists {
// Deny entry can only be overridden by another deny entry
if oldEntry.IsDeny && !entry.IsDeny {
return
}
// Do nothing if entries are equal
if entry.DeepEqual(&oldEntry) {
return // nothing to do
}
// Save old value before any changes, if desired
if changes.Old != nil {
changes.insertOldIfNotExists(key, oldEntry)
}
// Compare for datapath equalness before merging, as the old entry is updated in
// place!
datapathEqual = oldEntry.DatapathEqual(&entry)
oldEntry.Merge(&entry)
ms.insert(key, oldEntry, identities)
} else {
// Newly inserted entries must have their own containers, so that they
// remain separate when new owners/dependents are added to existing entries
entry.DerivedFromRules = slices.Clone(entry.DerivedFromRules)
entry.owners = maps.Clone(entry.owners)
entry.dependents = maps.Clone(entry.dependents)
ms.insert(key, entry, identities)
}
// Record an incremental Add if desired and entry is new or changed
if changes.Adds != nil && (!exists || !datapathEqual) {
changes.Adds[key] = struct{}{}
// Key add overrides any previous delete of the same key
if changes.Deletes != nil {
delete(changes.Deletes, key)
}
}
}
// deleteKeyWithChanges deletes a 'key' from 'keys' keeping track of incremental changes in 'adds' and 'deletes'.
// The key is unconditionally deleted if 'cs' is nil, otherwise only the contribution of this 'cs' is removed.
func (ms *mapState) deleteKeyWithChanges(key Key, owner MapStateOwner, identities Identities, changes ChangeState) {
if entry, exists := ms.Get(key); exists {
// Save old value before any changes, if desired
oldAdded := changes.insertOldIfNotExists(key, entry)
if owner != nil {
// remove the contribution of the given selector only
if _, exists = entry.owners[owner]; exists {
// Remove the contribution of this selector from the entry
delete(entry.owners, owner)
if ownerKey, ok := owner.(Key); ok {
ms.RemoveDependent(ownerKey, key, identities, changes)
}
// key is not deleted if other owners still need it
if len(entry.owners) > 0 {
return
}
} else {
// 'owner' was not found, do not change anything
if oldAdded {
delete(changes.Old, key)
}
return
}
}
// Remove this key from all owners' dependents maps if no owner was given.
// Owner is nil when deleting more specific entries (e.g., L3/L4) when
// adding deny entries that cover them (e.g., L3-deny).
if owner == nil {
for owner := range entry.owners {
if owner != nil {
if ownerKey, ok := owner.(Key); ok {
ms.RemoveDependent(ownerKey, key, identities, changes)
}
}
}
}
// Check if dependent entries need to be deleted as well
for k := range entry.dependents {
ms.deleteKeyWithChanges(k, key, identities, changes)
}
if changes.Deletes != nil {
changes.Deletes[key] = struct{}{}
// Remove a potential previously added key
if changes.Adds != nil {
delete(changes.Adds, key)
}
}
ms.allows.delete(key, identities)
ms.denies.delete(key, identities)
}
}
// protocolsMatch checks to see if two given keys match on protocol.
// This means that either one of them covers all protocols or they
// are equal.
func protocolsMatch(a, b Key) bool {
return a.Nexthdr == 0 || b.Nexthdr == 0 || a.Nexthdr == b.Nexthdr
}
// RevertChanges undoes changes to 'keys' as indicated by 'changes.adds' and 'changes.old' collected via
// denyPreferredInsertWithChanges().
func (ms *mapState) revertChanges(identities Identities, changes ChangeState) {
for k := range changes.Adds {
ms.allows.delete(k, identities)
ms.denies.delete(k, identities)
}
// 'old' contains all the original values of both modified and deleted entries
for k, v := range changes.Old {
ms.insert(k, v, identities)
}
}
// denyPreferredInsertWithChanges contains the most important business logic for policy insertions. It inserts
// a key and entry into the map by giving preference to deny entries, and L3-only deny entries over L3-L4 allows.
// Incremental changes performed are recorded in 'adds' and 'deletes', if not nil.
// See https://docs.google.com/spreadsheets/d/1WANIoZGB48nryylQjjOw6lKjI80eVgPShrdMTMalLEw#gid=2109052536 for details
func (ms *mapState) denyPreferredInsertWithChanges(newKey Key, newEntry MapStateEntry, identities Identities, features policyFeatures, changes ChangeState) {
// Skip deny rules processing if the policy in this direction has no deny rules
if !features.contains(denyRules) {
ms.authPreferredInsert(newKey, newEntry, identities, features, changes)
return
}
// If we have a deny "all" we don't accept any kind of map entry.
if _, ok := ms.denies.Lookup(allKey[newKey.TrafficDirection]); ok {
return
}
// We cannot update the map while we are
// iterating through it, so we record the
// changes to be made and then apply them.
// Additionally, we need to perform deletes
// first so that deny entries do not get
// merged with allows that are set to be
// deleted.
var (
updates, deletes []MapChange
)
prefixes := getNets(identities, newKey.Identity)
if newEntry.IsDeny {
ms.allows.ForEachNarrowerKeyWithBroaderID(newKey, prefixes, func(k Key, v MapStateEntry) bool {
if ms.validator != nil {
ms.validator.isBroader(newKey, k)
ms.validator.isSupersetOf(k, newKey, identities)
}
// If this iterated-allow-entry is a superset of the new-entry
// and it has a more specific port-protocol than the new-entry
// then an additional copy of the new-entry with the more
// specific port-protocol of the iterated-allow-entry must be inserted.
newKeyCpy := k
newKeyCpy.Identity = newKey.Identity
l3l4DenyEntry := NewMapStateEntry(newKey, newEntry.DerivedFromRules, 0, "", 0, true, DefaultAuthType, AuthTypeDisabled)
updates = append(updates, MapChange{
Add: true,
Key: newKeyCpy,
Value: l3l4DenyEntry,
})
return true
})
ms.allows.ForEachNarrowerOrEqualKey(newKey, prefixes, func(k Key, v MapStateEntry) bool {
if ms.validator != nil {
ms.validator.isBroaderOrEqual(newKey, k)
ms.validator.isSupersetOrSame(newKey, k, identities)
}
// If the new-entry is a superset (or equal) of the iterated-allow-entry and
// the new-entry has a broader (or equal) port-protocol then we
// should delete the iterated-allow-entry
deletes = append(deletes, MapChange{
Key: k,
})
return true
})
for _, delete := range deletes {
if !delete.Add {
ms.deleteKeyWithChanges(delete.Key, nil, identities, changes)
}
}
for _, update := range updates {
if update.Add {
ms.addKeyWithChanges(update.Key, update.Value, identities, changes)
// L3-only entries can be deleted incrementally so we need to track their
// effects on other entries so that those effects can be reverted when the
// identity is removed.
newEntry.AddDependent(update.Key)
}
}
updates = nil
bailed := false
ms.denies.ForEachBroaderOrEqualKey(newKey, prefixes, func(k Key, v MapStateEntry) bool {
if ms.validator != nil {
ms.validator.isBroaderOrEqual(k, newKey)
ms.validator.isSupersetOrSame(k, newKey, identities)
}
if !v.HasDependent(newKey) && v.HasSameOwners(&newEntry) {
// If this iterated-deny-entry is a supserset (or equal) of the new-entry and
// the iterated-deny-entry has a broader (or equal) port-protocol and
// the ownership between the entries is the same then we
// should not insert the new entry (as long as it is not one
// of the special L4-only denies we created to cover the special
// case of a superset-allow with a more specific port-protocol).
//
// NOTE: This condition could be broader to reject more deny entries,
// but there *may* be performance tradeoffs.
bailed = true
return false
}
return true
})
ms.denies.ForEachNarrowerOrEqualKey(newKey, prefixes, func(k Key, v MapStateEntry) bool {
if ms.validator != nil {
ms.validator.isBroaderOrEqual(newKey, k)
ms.validator.isSupersetOrSame(newKey, k, identities)
}
if !newEntry.HasDependent(k) && newEntry.HasSameOwners(&v) {
// If this iterated-deny-entry is a subset (or equal) of the new-entry and
// the new-entry has a broader (or equal) port-protocol and
// the ownership between the entries is the same then we
// should delete the iterated-deny-entry (as long as it is not one
// of the special L4-only denies we created to cover the special
// case of a superset-allow with a more specific port-protocol).
//
// NOTE: This condition could be broader to reject more deny entries,
// but there *may* be performance tradeoffs.
updates = append(updates, MapChange{
Key: k,
})
}
return true
})
for _, update := range updates {
if !update.Add {
ms.deleteKeyWithChanges(update.Key, nil, identities, changes)
}
}
if !bailed {
ms.addKeyWithChanges(newKey, newEntry, identities, changes)
}
} else {
// NOTE: We do not delete redundant allow entries.
updates = nil
var dependents []MapChange
bailed := false
ms.denies.ForEachBroaderKeyWithNarrowerID(newKey, prefixes, func(k Key, v MapStateEntry) bool {
if ms.validator != nil {
ms.validator.isBroader(k, newKey)
ms.validator.isSupersetOf(newKey, k, identities)
}
// If the new-entry is *only* superset of the iterated-deny-entry
// and the new-entry has a more specific port-protocol than the
// iterated-deny-entry then an additional copy of the iterated-deny-entry
// with the more specific port-porotocol of the new-entry must
// be added.
denyKeyCpy := newKey
denyKeyCpy.Identity = k.Identity
l3l4DenyEntry := NewMapStateEntry(k, v.DerivedFromRules, 0, "", 0, true, DefaultAuthType, AuthTypeDisabled)
updates = append(updates, MapChange{
Add: true,
Key: denyKeyCpy,
Value: l3l4DenyEntry,
})
// L3-only entries can be deleted incrementally so we need to track their
// effects on other entries so that those effects can be reverted when the
// identity is removed.
dependents = append(dependents, MapChange{
Key: k,
Value: v,
})
return true
})
ms.denies.ForEachBroaderOrEqualKey(newKey, prefixes, func(k Key, v MapStateEntry) bool {
if ms.validator != nil {
ms.validator.isBroaderOrEqual(k, newKey)
ms.validator.isSupersetOrSame(k, newKey, identities)
}
if !v.HasDependent(newKey) {
// If the iterated-deny-entry is a superset (or equal) of the new-entry and has a
// broader (or equal) port-protocol than the new-entry then the new
// entry should not be inserted.
bailed = true
return false
}
return true
})
for i, update := range updates {
if update.Add {
ms.addKeyWithChanges(update.Key, update.Value, identities, changes)
dep := dependents[i]
ms.addDependentOnEntry(dep.Key, dep.Value, update.Key, identities, changes)
}
}
if !bailed {
ms.authPreferredInsert(newKey, newEntry, identities, features, changes)
}
}
}
// IsSuperSetOf checks if the receiver Key is a superset of the argument Key, and returns a
// specificity score of the receiver key (higher score is more specific), if so. Being a superset
// means that the receiver key would match all the traffic of the argument key without being the
// same key. Hence, a L3-only key is not a superset of a L4-only key, as the L3-only key would match
// the traffic for the given L3 only, while the L4-only key matches traffic on the given port for
// all the L3's.
// Returns 0 if the receiver key is not a superset of the argument key.
//
// Specificity score for all possible superset wildcard patterns. Datapath requires proto to be specified if port is specified.
// x. L3/proto/port
// 1. */*/*
// 2. */proto/*
// 3. */proto/port
// 4. ID/*/*
// 5. ID/proto/*
// ( ID/proto/port can not be superset of anything )
func IsSuperSetOf(k, other Key) int {
if k.TrafficDirection != other.TrafficDirection {
return 0 // TrafficDirection must match for 'k' to be a superset of 'other'
}
if k.Identity == 0 {
if other.Identity == 0 {
if k.Nexthdr == 0 { // k.DestPort == 0 is implied
if other.Nexthdr != 0 {
return 1 // */*/* is a superset of */proto/x
} // else both are */*/*
} else if k.Nexthdr == other.Nexthdr {
if k.PortIsBroader(other) {
return 2 // */proto/* is a superset of */proto/port
} // else more specific or different ports
} // else more specific or different protocol
} else {
// Wildcard L3 is a superset of a specific L3 only if wildcard L3 is also wildcard L4, or the L4's match between the keys
if k.Nexthdr == 0 { // k.DestPort == 0 is implied
return 1 // */*/* is a superset of ID/x/x
} else if k.Nexthdr == other.Nexthdr {
if k.PortIsBroader(other) {
return 2 // */proto/* is a superset of ID/proto/x
} else if k.PortIsEqual(other) {
return 3 // */proto/port is a superset of ID/proto/port
} // else more specific or different ports
} // else more specific or different protocol
}
} else if k.Identity == other.Identity {
if k.Nexthdr == 0 {
if other.Nexthdr != 0 {
return 4 // ID/*/* is a superset of ID/proto/x
} // else both are ID/*/*
} else if k.Nexthdr == other.Nexthdr {
if k.PortIsBroader(other) {
return 5 // ID/proto/* is a superset of ID/proto/port
} // else more specific or different ports
} // else more specific or different protocol
} // else more specific or different identity
return 0
}
// authPreferredInsert applies AuthType of a more generic entry to more specific entries, if not
// explicitly specified.
//
// This function is expected to be called for a map insertion after deny
// entry evaluation. If there is a map entry that is a superset of 'newKey'
// which denies traffic matching 'newKey', then this function should not be called.
func (ms *mapState) authPreferredInsert(newKey Key, newEntry MapStateEntry, identities Identities, features policyFeatures, changes ChangeState) {
if features.contains(authRules) {
if newEntry.hasAuthType == DefaultAuthType {
// New entry has a default auth type.
// Fill in the AuthType from more generic entries with an explicit auth type
maxSpecificity := 0
l3l4State := newMapStateMap()
ms.allows.ForEachKeyWithBroaderOrEqualPortProto(newKey, func(k Key, v MapStateEntry) bool {
// Nothing to be done if entry has default AuthType
if v.hasAuthType == DefaultAuthType {
return true
}
// Find out if 'k' is an identity-port-proto superset of 'newKey'
if specificity := IsSuperSetOf(k, newKey); specificity > 0 {
if specificity > maxSpecificity {
// AuthType from the most specific superset is
// applied to 'newEntry'
newEntry.AuthType = v.AuthType
maxSpecificity = specificity
}
} else {
// Check if a new L3L4 entry must be created due to L3-only
// 'k' specifying an explicit AuthType and an L4-only 'newKey' not
// having an explicit AuthType. In this case AuthType should
// only override the AuthType for the L3 & L4 combination,
// not L4 in general.
//
// These need to be collected and only added if there is a
// superset key of newKey with an explicit auth type. In
// this case AuthType of the new L4-only entry was
// overridden by a more generic entry and 'max_specificity >
// 0' after the loop.
if newKey.Identity == 0 && newKey.Nexthdr != 0 && newKey.DestPort != 0 &&
k.Identity != 0 && (k.Nexthdr == 0 || k.Nexthdr == newKey.Nexthdr && k.DestPort == 0) {
newKeyCpy := newKey
newKeyCpy.Identity = k.Identity
l3l4AuthEntry := NewMapStateEntry(k, v.DerivedFromRules, newEntry.ProxyPort, newEntry.Listener, newEntry.priority, false, DefaultAuthType, v.AuthType)
l3l4AuthEntry.DerivedFromRules.MergeSorted(newEntry.DerivedFromRules)
l3l4State.upsert(newKeyCpy, l3l4AuthEntry, identities)
}
}
return true
})
// Add collected L3/L4 entries if the auth type of the new entry was not
// overridden by a more generic entry. If it was overridden, the new L3L4
// entries are not needed as the L4-only entry with an overridden AuthType
// will be matched before the L3-only entries in the datapath.
if maxSpecificity == 0 {
l3l4State.ForEach(func(k Key, v MapStateEntry) bool {
ms.addKeyWithChanges(k, v, identities, changes)
// L3-only entries can be deleted incrementally so we need to track their
// effects on other entries so that those effects can be reverted when the
// identity is removed.
newEntry.AddDependent(k)
return true
})
}
} else {
// New entry has an explicit auth type.
// Check if the new entry is the most specific superset of any other entry
// with the default auth type, and propagate the auth type from the new
// entry to such entries.
explicitSubsetKeys := make(Keys)
defaultSubsetKeys := make(map[Key]int)
ms.allows.ForEachKeyWithNarrowerOrEqualPortProto(newKey, func(k Key, v MapStateEntry) bool {
// Find out if 'newKey' is a superset of 'k'
if specificity := IsSuperSetOf(newKey, k); specificity > 0 {
if v.hasAuthType == ExplicitAuthType {
// store for later comparison
explicitSubsetKeys[k] = struct{}{}
} else {
defaultSubsetKeys[k] = specificity
}
} else if v.hasAuthType == DefaultAuthType {
// Check if a new L3L4 entry must be created due to L3-only
// 'newKey' with an explicit AuthType and an L4-only 'k' not
// having an explicit AuthType. In this case AuthType should
// only override the AuthType for the L3 & L4 combination,
// not L4 in general.
if newKey.Identity != 0 && (newKey.Nexthdr == 0 || newKey.Nexthdr == k.Nexthdr && newKey.DestPort == 0) &&
k.Identity == 0 && k.Nexthdr != 0 && k.DestPort != 0 {
newKeyCpy := k
newKeyCpy.Identity = newKey.Identity
l3l4AuthEntry := NewMapStateEntry(newKey, newEntry.DerivedFromRules, v.ProxyPort, v.Listener, v.priority, false, DefaultAuthType, newEntry.AuthType)
l3l4AuthEntry.DerivedFromRules.MergeSorted(v.DerivedFromRules)
ms.addKeyWithChanges(newKeyCpy, l3l4AuthEntry, identities, changes)
// L3-only entries can be deleted incrementally so we need to track their
// effects on other entries so that those effects can be reverted when the
// identity is removed.
newEntry.AddDependent(newKeyCpy)
}
}
return true
})
// Find out if this newKey is the most specific superset for all the subset keys with default auth type
Next:
for k, specificity := range defaultSubsetKeys {
for l := range explicitSubsetKeys {
if s := IsSuperSetOf(l, k); s > specificity {
// k has a more specific superset key than the newKey, skip
continue Next
}
}
// newKey is the most specific superset with an explicit auth type,
// propagate auth type from newEntry to the entry of k
v, _ := ms.Get(k)
v.AuthType = newEntry.AuthType
ms.addKeyWithChanges(k, v, identities, changes) // Update the map value
}
}
}
ms.addKeyWithChanges(newKey, newEntry, identities, changes)
}
var visibilityDerivedFromLabels = labels.LabelArray{
labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelVisibilityAnnotation, labels.LabelSourceReserved),
}
var visibilityDerivedFrom = labels.LabelArrayList{visibilityDerivedFromLabels}
// insertIfNotExists only inserts `key=value` if `key` does not exist in keys already
// returns 'true' if 'key=entry' was added to 'keys'
func (changes *ChangeState) insertOldIfNotExists(key Key, entry MapStateEntry) bool {
if changes == nil || changes.Old == nil {
return false
}
if _, exists := changes.Old[key]; !exists {
// Only insert the old entry if the entry was not first added on this round of
// changes.
if _, added := changes.Adds[key]; !added {
// new containers to keep this entry separate from the one that may remain in 'keys'
entry.DerivedFromRules = slices.Clone(entry.DerivedFromRules)
entry.owners = maps.Clone(entry.owners)
entry.dependents = maps.Clone(entry.dependents)
changes.Old[key] = entry
return true
}
}
return false
}
// ForEachKeyWithPortProto calls 'f' for each Key and MapStateEntry, where the Key has the same traffic direction and and L4 fields (protocol, destination port and mask).
func (msm *mapStateMap) ForEachKeyWithPortProto(key Key, f func(Key, MapStateEntry) bool) {
// 'Identity' field in 'key' is ignored on by ExactLookup
idSet, ok := msm.trie.ExactLookup(key.PrefixLength(), key)
if ok {
for id := range idSet.ids {
k := key
k.Identity = uint32(id)
if !msm.forKey(k, f) {
return
}
}
}
}
// addVisibilityKeys adjusts and expands PolicyMapState keys
// and values to redirect for visibility on the port of the visibility
// annotation while still denying traffic on this port for identities
// for which the traffic is denied.
//
// Datapath lookup order is, from highest to lowest precedence:
// 1. L3/L4
// 2. L4-only (wildcard L3)
// 3. L3-only (wildcard L4)
// 4. Allow-all
//
// This means that the L4-only allow visibility key can only be added if there is an
// allow-all key, and all L3-only deny keys are expanded to L3/L4 keys. If no
// L4-only key is added then also the L3-only allow keys need to be expanded to
// L3/L4 keys for visibility redirection. In addition the existing L3/L4 and L4-only
// allow keys need to be redirected to the proxy port, if not already redirected.
//
// The above can be accomplished by:
//
// 1. Change existing L4-only ALLOW key on matching port that does not already
// redirect to redirect.
// - e.g., 0:80=allow,0 -> 0:80=allow,<proxyport>
// 2. If allow-all policy exists, add L4-only visibility redirect key if the L4-only
// key does not already exist.
// - e.g., 0:0=allow,0 -> add 0:80=allow,<proxyport> if 0:80 does not exist
// - this allows all traffic on port 80, but see step 5 below.
// 3. Change all L3/L4 ALLOW keys on matching port that do not already redirect to
// redirect.
// - e.g, <ID1>:80=allow,0 -> <ID1>:80=allow,<proxyport>
// 4. For each L3-only ALLOW key add the corresponding L3/L4 ALLOW redirect if no
// L3/L4 key already exists and no L4-only key already exists and one is not added.
// - e.g., <ID2>:0=allow,0 -> add <ID2>:80=allow,<proxyport> if <ID2>:80
// and 0:80 do not exist
// 5. If a new L4-only key was added: For each L3-only DENY key add the
// corresponding L3/L4 DENY key if no L3/L4 key already exists.
// - e.g., <ID3>:0=deny,0 -> add <ID3>:80=deny,0 if <ID3>:80 does not exist
//
// With the above we only change/expand existing allow keys to redirect, and
// expand existing drop keys to also drop on the port of interest, if a new
// L4-only key allowing the port is added.
//
// 'adds' and 'oldValues' are updated with the changes made. 'adds' contains both the added and
// changed keys. 'oldValues' contains the old values for changed keys. This function does not
// delete any keys.
func (ms *mapState) addVisibilityKeys(e PolicyOwner, redirectPort uint16, visMeta *VisibilityMetadata, identities Identities, changes ChangeState) {
direction := trafficdirection.Egress
if visMeta.Ingress {
direction = trafficdirection.Ingress
}
var invertedPortMask uint16
if visMeta.Port == 0 {
invertedPortMask = 0xffff
}
key := Key{
DestPort: visMeta.Port,
InvertedPortMask: invertedPortMask,
Nexthdr: uint8(visMeta.Proto),
TrafficDirection: direction.Uint8(),
}
entry := NewMapStateEntry(nil, visibilityDerivedFrom, redirectPort, "", 0, false, DefaultAuthType, AuthTypeDisabled)
_, haveAllowAllKey := ms.Get(allKey[direction])
l4Only, haveL4OnlyKey := ms.Get(key)
addL4OnlyKey := false
if haveL4OnlyKey && !l4Only.IsDeny && l4Only.ProxyPort == 0 {
// 1. Change existing L4-only ALLOW key on matching port that does not already
// redirect to redirect.
e.PolicyDebug(logrus.Fields{
logfields.BPFMapKey: key,
logfields.BPFMapValue: entry,
}, "addVisibilityKeys: Changing L4-only ALLOW key for visibility redirect")
ms.addKeyWithChanges(key, entry, identities, changes)
}
if haveAllowAllKey && !haveL4OnlyKey {
// 2. If allow-all policy exists, add L4-only visibility redirect key if the L4-only
// key does not already exist.
e.PolicyDebug(logrus.Fields{
logfields.BPFMapKey: key,
logfields.BPFMapValue: entry,
}, "addVisibilityKeys: Adding L4-only ALLOW key for visibility redirect")
addL4OnlyKey = true
ms.addKeyWithChanges(key, entry, identities, changes)
}
// We need to make changes to the map
// outside of iteration.
var updates []MapChange
//
// Loop through all L3 keys in the traffic direction of the new key
//
// Find entries with the same L4
ms.allows.ForEachKeyWithPortProto(key, func(k Key, v MapStateEntry) bool {
if k.Identity != 0 {
if v.ProxyPort == 0 {
// 3. Change all L3/L4 ALLOW keys on matching port that do not
// already redirect to redirect.
v.ProxyPort = redirectPort
// redirect port is used as the default priority for tie-breaking
// purposes when two different selectors have conflicting
// redirects. Explicit listener references in the policy can specify
// a priority, but only the default is used for visibility policy,
// as visibility will be achieved by any of the redirects.
v.priority = redirectPort
v.Listener = ""
v.DerivedFromRules = visibilityDerivedFrom
e.PolicyDebug(logrus.Fields{
logfields.BPFMapKey: k,
logfields.BPFMapValue: v,
}, "addVisibilityKeys: Changing L3/L4 ALLOW key for visibility redirect")
updates = append(updates, MapChange{
Add: true,
Key: k,
Value: v,
})
}
}
return true
})
// Find Wildcarded L4 allows, i.e., L3-only entries
if !haveL4OnlyKey && !addL4OnlyKey {
ms.allows.ForEachKeyWithPortProto(allKey[key.TrafficDirection], func(k Key, v MapStateEntry) bool {
if k.Identity != 0 {
k2 := key
k2.Identity = k.Identity
// 4. For each L3-only ALLOW key add the corresponding L3/L4
// ALLOW redirect if no L3/L4 key already exists and no
// L4-only key already exists and one is not added.
if _, ok := ms.Get(k2); !ok {
d2 := labels.LabelArrayList{visibilityDerivedFromLabels}
d2.MergeSorted(v.DerivedFromRules)
v2 := NewMapStateEntry(k, d2, redirectPort, "", 0, false, v.hasAuthType, v.AuthType)
e.PolicyDebug(logrus.Fields{
logfields.BPFMapKey: k2,
logfields.BPFMapValue: v2,
}, "addVisibilityKeys: Extending L3-only ALLOW key to L3/L4 key for visibility redirect")
updates = append(updates, MapChange{
Add: true,
Key: k2,
Value: v2,
})
// Mark the new entry as a dependent of 'v'
ms.addDependentOnEntry(k, v, k2, identities, changes)
}
}
return true
})
}
// Find Wildcarded L4 denies, i.e., L3-only entries
if addL4OnlyKey {
ms.denies.ForEachKeyWithPortProto(allKey[key.TrafficDirection], func(k Key, v MapStateEntry) bool {
if k.Identity != 0 {
k2 := k
k2.DestPort = key.DestPort
k2.InvertedPortMask = key.InvertedPortMask
k2.Nexthdr = key.Nexthdr
// 5. If a new L4-only key was added: For each L3-only DENY
// key add the corresponding L3/L4 DENY key if no L3/L4
// key already exists.
if _, ok := ms.Get(k2); !ok {
v2 := NewMapStateEntry(k, v.DerivedFromRules, 0, "", 0, true, DefaultAuthType, AuthTypeDisabled)
e.PolicyDebug(logrus.Fields{
logfields.BPFMapKey: k2,
logfields.BPFMapValue: v2,
}, "addVisibilityKeys: Extending L3-only DENY key to L3/L4 key to deny a port with visibility annotation")
updates = append(updates, MapChange{
Add: true,
Key: k2,
Value: v2,
})
// Mark the new entry as a dependent of 'v'
ms.addDependentOnEntry(k, v, k2, identities, changes)
}
}
return true
})
}
for _, update := range updates {
ms.addKeyWithChanges(update.Key, update.Value, identities, changes)
}
}
// determineAllowLocalhostIngress determines whether communication should be allowed
// from the localhost. It inserts the Key corresponding to the localhost in
// the desiredPolicyKeys if the localhost is allowed to communicate with the
// endpoint. Authentication for localhost traffic is not required.
func (ms *mapState) determineAllowLocalhostIngress() {
if option.Config.AlwaysAllowLocalhost() {
derivedFrom := labels.LabelArrayList{
labels.LabelArray{
labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowLocalHostIngress, labels.LabelSourceReserved),
},
}
es := NewMapStateEntry(nil, derivedFrom, 0, "", 0, false, ExplicitAuthType, AuthTypeDisabled) // Authentication never required for local host ingress
ms.denyPreferredInsert(localHostKey, es, nil, allFeatures)
}
}
// allowAllIdentities translates all identities in selectorCache to their
// corresponding Keys in the specified direction (ingress, egress) which allows
// all at L3.
// Note that this is used when policy is not enforced, so authentication is explicitly not required.
func (ms *mapState) allowAllIdentities(ingress, egress bool) {
if ingress {
derivedFrom := labels.LabelArrayList{
labels.LabelArray{
labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyIngress, labels.LabelSourceReserved),
},
}
ms.allows.upsert(allKey[trafficdirection.Ingress], NewMapStateEntry(nil, derivedFrom, 0, "", 0, false, ExplicitAuthType, AuthTypeDisabled), nil)
}
if egress {
derivedFrom := labels.LabelArrayList{
labels.LabelArray{
labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyEgress, labels.LabelSourceReserved),
},
}
ms.allows.upsert(allKey[trafficdirection.Egress], NewMapStateEntry(nil, derivedFrom, 0, "", 0, false, ExplicitAuthType, AuthTypeDisabled), nil)
}
}
func (ms *mapState) deniesL4(policyOwner PolicyOwner, l4 *L4Filter) bool {
port := uint16(l4.Port)
proto := uint8(l4.U8Proto)
// resolve named port
if port == 0 && l4.PortName != "" {
port = policyOwner.GetNamedPort(l4.Ingress, l4.PortName, proto)
if port == 0 {
return true
}
}
var key Key
if l4.Ingress {
key = allKey[trafficdirection.Ingress]
} else {
key = allKey[trafficdirection.Egress]
}
// Are we explicitly denying all traffic?
v, ok := ms.Get(key)
if ok && v.IsDeny {
return true
}
// Are we explicitly denying this L4-only traffic?
key.DestPort = port
key.Nexthdr = proto
v, ok = ms.Get(key)
if ok && v.IsDeny {
return true
}
// The given L4 is not categorically denied.
// Traffic to/from a specific L3 on any of the selectors can still be denied.
return false
}
func (ms *mapState) GetIdentities(log *logrus.Logger) (ingIdentities, egIdentities []int64) {
return ms.getIdentities(log, false)
}
func (ms *mapState) GetDenyIdentities(log *logrus.Logger) (ingIdentities, egIdentities []int64) {
return ms.getIdentities(log, true)
}
// GetIdentities returns the ingress and egress identities stored in the
// MapState.
// Used only for API requests.
func (ms *mapState) getIdentities(log *logrus.Logger, denied bool) (ingIdentities, egIdentities []int64) {
ms.ForEach(func(policyMapKey Key, policyMapValue MapStateEntry) bool {
if denied != policyMapValue.IsDeny {
return true
}
if policyMapKey.DestPort != 0 {
// If the port is non-zero, then the Key no longer only applies
// at L3. AllowedIngressIdentities and AllowedEgressIdentities
// contain sets of which identities (i.e., label-based L3 only)
// are allowed, so anything which contains L4-related policy should
// not be added to these sets.
return true
}
switch trafficdirection.TrafficDirection(policyMapKey.TrafficDirection) {
case trafficdirection.Ingress:
ingIdentities = append(ingIdentities, int64(policyMapKey.Identity))
case trafficdirection.Egress:
egIdentities = append(egIdentities, int64(policyMapKey.Identity))
default:
td := trafficdirection.TrafficDirection(policyMapKey.TrafficDirection)
log.WithField(logfields.TrafficDirection, td).
Errorf("Unexpected traffic direction present in policy map state for endpoint")
}
return true
})
return ingIdentities, egIdentities
}
// MapChanges collects updates to the endpoint policy on the
// granularity of individual mapstate key-value pairs for both adds
// and deletes. 'mutex' must be held for any access.
type MapChanges struct {
mutex lock.Mutex
changes []MapChange
}
type MapChange struct {
Add bool // false deletes
Key Key
Value MapStateEntry
}
// AccumulateMapChanges accumulates the given changes to the
// MapChanges.
//
// The caller is responsible for making sure the same identity is not
// present in both 'adds' and 'deletes'.
func (mc *MapChanges) AccumulateMapChanges(cs CachedSelector, adds, deletes []identity.NumericIdentity, keys []Key, value MapStateEntry) {
mc.mutex.Lock()
defer mc.mutex.Unlock()
for _, id := range adds {
for _, k := range keys {
k.Identity = id.Uint32()
mc.changes = append(mc.changes, MapChange{Add: true, Key: k, Value: value})
}
}
for _, id := range deletes {
for _, k := range keys {
k.Identity = id.Uint32()
mc.changes = append(mc.changes, MapChange{Add: false, Key: k, Value: value})
}
}
}
// consumeMapChanges transfers the incremental changes from MapChanges to the caller,
// while applying the changes to PolicyMapState.
func (mc *MapChanges) consumeMapChanges(policyOwner PolicyOwner, policyMapState MapState, identities Identities, features policyFeatures) (adds, deletes Keys) {
mc.mutex.Lock()
changes := ChangeState{
Adds: make(Keys, len(mc.changes)),
Deletes: make(Keys, len(mc.changes)),
}
var redirects map[string]uint16
if policyOwner != nil {
redirects = policyOwner.GetRealizedRedirects()
}
for i := range mc.changes {
if mc.changes[i].Add {
// Redirect entries for unrealized redirects come in with an invalid
// redirect port (65535), replace it with the actual proxy port number.
key := mc.changes[i].Key
entry := mc.changes[i].Value
if entry.ProxyPort == unrealizedRedirectPort {
var exists bool
proxyID := ProxyIDFromKey(uint16(policyOwner.GetID()), key, entry.Listener)
entry.ProxyPort, exists = redirects[proxyID]
if !exists {
log.WithFields(logrus.Fields{
logfields.PolicyKey: key,
logfields.PolicyEntry: entry,
}).Warn("consumeMapChanges: Skipping entry for unrealized redirect")
continue
}
}
// insert but do not allow non-redirect entries to overwrite a redirect entry,
// nor allow non-deny entries to overwrite deny entries.
// Collect the incremental changes to the overall state in 'mc.adds' and 'mc.deletes'.
policyMapState.denyPreferredInsertWithChanges(key, entry, identities, features, changes)
} else {
// Delete the contribution of this cs to the key and collect incremental changes
for cs := range mc.changes[i].Value.owners { // get the sole selector
policyMapState.deleteKeyWithChanges(mc.changes[i].Key, cs, identities, changes)
}
}
}
mc.changes = nil
mc.mutex.Unlock()
return changes.Adds, changes.Deletes
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"io"
stdlog "log"
"strconv"
"strings"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/labels"
)
type Tracing int
const (
TRACE_DISABLED Tracing = iota
TRACE_ENABLED
TRACE_VERBOSE
)
// TraceEnabled returns true if the SearchContext requests tracing.
func (s *SearchContext) TraceEnabled() bool {
return s.Trace != TRACE_DISABLED
}
// PolicyTrace logs the given message into the SearchContext logger only if
// TRACE_ENABLED or TRACE_VERBOSE is enabled in the receiver's SearchContext.
func (s *SearchContext) PolicyTrace(format string, a ...interface{}) {
if s.TraceEnabled() {
log.Debugf(format, a...)
if s.Logging != nil {
format = "%-" + s.CallDepth() + "s" + format
a = append([]interface{}{""}, a...)
s.Logging.Printf(format, a...)
}
}
}
// PolicyTraceVerbose logs the given message into the SearchContext logger only
// if TRACE_VERBOSE is enabled in the receiver's SearchContext.
func (s *SearchContext) PolicyTraceVerbose(format string, a ...interface{}) {
switch s.Trace {
case TRACE_VERBOSE:
log.Debugf(format, a...)
if s.Logging != nil {
s.Logging.Printf(format, a...)
}
}
}
// SearchContext defines the context while evaluating policy
type SearchContext struct {
Trace Tracing
Depth int
Logging *stdlog.Logger
From labels.LabelArray
To labels.LabelArray
DPorts []*models.Port
// rulesSelect specifies whether or not to check whether a rule which is
// being analyzed using this SearchContext matches either From or To.
// This is used to avoid using EndpointSelector.Matches() if possible,
// since it is costly in terms of performance.
rulesSelect bool
}
func (s *SearchContext) String() string {
from := make([]string, 0, len(s.From))
to := make([]string, 0, len(s.To))
dports := make([]string, 0, len(s.DPorts))
for _, fromLabel := range s.From {
from = append(from, fromLabel.String())
}
for _, toLabel := range s.To {
to = append(to, toLabel.String())
}
// We should avoid to use `fmt.Sprintf()` since
// it is well-known for not being opimal in terms of
// CPU and memory allocations.
// See https://github.com/cilium/cilium/issues/19571
for _, dport := range s.DPorts {
dportStr := dport.Name
if dportStr == "" {
dportStr = strconv.FormatUint(uint64(dport.Port), 10)
}
dports = append(dports, dportStr+"/"+dport.Protocol)
}
fromStr := strings.Join(from, ", ")
toStr := strings.Join(to, ", ")
if len(dports) != 0 {
dportStr := strings.Join(dports, ", ")
return "From: [" + fromStr + "] => To: [" + toStr + "] Ports: [" + dportStr + "]"
}
return "From: [" + fromStr + "] => To: [" + toStr + "]"
}
func (s *SearchContext) CallDepth() string {
return strconv.Itoa(s.Depth * 2)
}
// WithLogger returns a shallow copy of the received SearchContext with the
// logging set to write to 'log'.
func (s *SearchContext) WithLogger(log io.Writer) *SearchContext {
result := *s
result.Logging = stdlog.New(log, "", 0)
if result.Trace == TRACE_DISABLED {
result.Trace = TRACE_ENABLED
}
return &result
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/labels"
)
func TestSearchContextString(t *testing.T) {
for expected, sc := range map[string]SearchContext{
"From: [unspec:a, unspec:b, unspec:c] => To: [unspec:d, unspec:e, unspec:f] Ports: [HTTP/TCP, HTTPs/TCP]": {
Trace: 1,
Depth: 0,
From: labels.ParseLabelArray("a", "c", "b"),
To: labels.ParseLabelArray("d", "e", "f"),
DPorts: []*models.Port{
{
Name: "HTTP",
Port: 80,
Protocol: "TCP",
},
{
Name: "HTTPs",
Port: 442,
Protocol: "TCP",
},
},
rulesSelect: false,
},
"From: [unspec:a, unspec:b, unspec:c] => To: [unspec:d, unspec:e, unspec:f] Ports: [80/TCP, 442/TCP]": {
Trace: 1,
Depth: 0,
From: labels.ParseLabelArray("a", "c", "b"),
To: labels.ParseLabelArray("d", "e", "f"),
DPorts: []*models.Port{
{
Port: 80,
Protocol: "TCP",
},
{
Port: 442,
Protocol: "TCP",
},
},
rulesSelect: false,
},
"From: [k8s:a, local:b, unspec:c] => To: [unspec:d, unspec:e, unspec:f]": {
Trace: 1,
Depth: 0,
From: labels.ParseLabelArray("k8s:a", "unspec:c", "local:b"),
To: labels.ParseLabelArray("d", "e", "f"),
rulesSelect: false,
},
} {
str := sc.String()
require.Equal(t, expected, str)
}
}
func BenchmarkSearchContextString(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for _, sc := range []SearchContext{
{
Trace: 1,
Depth: 0,
From: labels.ParseLabelArray("a", "t", "b"),
To: labels.ParseLabelArray("d", "e", "f"),
DPorts: []*models.Port{
{
Name: "HTTP",
Port: 80,
Protocol: "TCP",
},
{
Name: "HTTPs",
Port: 442,
Protocol: "TCP",
},
},
rulesSelect: false,
},
{
Trace: 1,
Depth: 0,
From: labels.ParseLabelArray("a", "t", "b"),
To: labels.ParseLabelArray("d", "e", "f"),
DPorts: []*models.Port{
{
Port: 80,
Protocol: "TCP",
},
{
Port: 442,
Protocol: "TCP",
},
},
rulesSelect: false,
},
} {
_ = sc.String()
}
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"fmt"
"math"
"math/bits"
)
// MaskedPort is a port with a wild card mask value.
// The port range is represented by a masked port
// because we need to use masks for policy Keys
// that are indexed in the datapath by a bitwise
// longest-prefix-match trie.
type MaskedPort struct {
port uint16
mask uint16
}
func (m MaskedPort) String() string {
return fmt.Sprintf("{port: 0x%x, mask: 0x%x}", m.port, m.mask)
}
// maskedPort returns a new MaskedPort where 'wildcardBits' lowest bits are wildcarded.
func maskedPort(port uint16, wildcardBits int) MaskedPort {
mask := uint16(math.MaxUint16) << wildcardBits
return MaskedPort{port & mask, mask}
}
// PortRangeToMaskedPorts returns a slice of masked ports for the given port range.
// If the end port is equal to or less then the start port than the start port is returned,
// as a fully masked port.
// Ports are not returned in any particular order, so testing code needs to sort them
// for consistency.
func PortRangeToMaskedPorts(start uint16, end uint16) (ports []MaskedPort) {
// This is a wildcard.
if start == 0 && (end == 0 || end == math.MaxUint16) {
return []MaskedPort{{0, 0}}
}
// This is a single port.
if end <= start {
return []MaskedPort{{start, 0xffff}}
}
// Find the number of common leading bits. The first uncommon bit will be 0 for the start
// and 1 for the end.
commonBits := bits.LeadingZeros16(start ^ end)
// Cover the case where all the bits after the common bits are zeros on start and ones on
// end. In this case the range can be represented by a single masked port instead of two
// that would be produced below.
// For example, if the range is from 16-31 (0b10000 - 0b11111), then we return 0b1xxxx
// instead of 0b10xxx and 0b11xxx that would be produced when approaching the middle from
// the two sides.
//
// This also covers the trivial case where all the bits are in common (i.e., start == end).
mask := uint16(math.MaxUint16) >> commonBits
if start&mask == 0 && ^end&mask == 0 {
return []MaskedPort{maskedPort(start, 16-commonBits)}
}
// Find the "middle point" toward which the masked ports approach from both sides.
// This "middle point" is the highest bit that differs between the range start and end.
middleBit := 16 - 1 - commonBits
middle := uint16(1 << middleBit)
// Wildcard the trailing zeroes to the right of the middle bit of the range start.
// This covers the values immediately following the port range start, including the start itself.
// The middle bit is added to avoid counting zeroes past it.
bit := bits.TrailingZeros16(start | middle)
ports = append(ports, maskedPort(start, bit))
// Find all 0-bits between the trailing zeroes and the middle bit and add MaskedPorts where
// each found 0-bit is set and the lower bits are wildcarded. This covers the range from the
// start to the middle not covered by the trailing zeroes above.
// The current 'bit' is skipped since we know it is 1.
for bit++; bit < middleBit; bit++ {
if start&(1<<bit) == 0 {
// Adding 1<<bit will set the bit since we know it is not set
ports = append(ports, maskedPort(start+1<<bit, bit))
}
}
// Wildcard the trailing ones to the right of the middle bit of the range end.
// This covers the values immediately preceding and including the range end.
// The middle bit is added to avoid counting ones past it.
bit = bits.TrailingZeros16(^end | middle)
ports = append(ports, maskedPort(end, bit))
// Find all 1-bits between the trailing ones and the middle bit and add MaskedPorts where
// each found 1-bit is cleared and the lower bits are wildcarded. This covers the range from
// the end to the middle not covered by the trailing ones above.
// The current 'bit' is skipped since we know it is 0.
for bit++; bit < middleBit; bit++ {
if end&(1<<bit) != 0 {
// Subtracting 1<<bit will clear the bit since we know it is set
ports = append(ports, maskedPort(end-1<<bit, bit))
}
}
return ports
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"fmt"
"strconv"
"strings"
"github.com/cilium/cilium/pkg/policy/trafficdirection"
"github.com/cilium/cilium/pkg/u8proto"
)
// ProxyStatsKey returns a key for endpoint's proxy stats, which may aggregate stats from multiple
// proxy redirects on the same port.
func ProxyStatsKey(ingress bool, protocol string, port, proxyPort uint16) string {
direction := "egress"
if ingress {
direction = "ingress"
}
portStr := strconv.FormatUint(uint64(port), 10)
proxyPortStr := strconv.FormatUint(uint64(proxyPort), 10)
var str strings.Builder
str.Grow(len(direction) + 1 + len(protocol) + 1 + len(portStr) + 1 + len(proxyPortStr))
str.WriteString(direction)
str.WriteRune(':')
str.WriteString(protocol)
str.WriteRune(':')
str.WriteString(portStr)
str.WriteRune(':')
str.WriteString(proxyPortStr)
return str.String()
}
// ProxyID returns a unique string to identify a proxy mapping.
func ProxyID(endpointID uint16, ingress bool, protocol string, port uint16, listener string) string {
direction := "egress"
if ingress {
direction = "ingress"
}
epStr := strconv.FormatUint(uint64(endpointID), 10)
portStr := strconv.FormatUint(uint64(port), 10)
var str strings.Builder
str.Grow(len(epStr) + 1 + len(direction) + 1 + len(protocol) + 1 + len(portStr) + 1 + len(listener))
str.WriteString(epStr)
str.WriteRune(':')
str.WriteString(direction)
str.WriteRune(':')
str.WriteString(protocol)
str.WriteRune(':')
str.WriteString(portStr)
str.WriteRune(':')
str.WriteString(listener)
return str.String()
}
// ProxyIDFromKey returns a unique string to identify a proxy mapping.
func ProxyIDFromKey(endpointID uint16, key Key, listener string) string {
return ProxyID(endpointID, key.TrafficDirection == trafficdirection.Ingress.Uint8(), u8proto.U8proto(key.Nexthdr).String(), key.DestPort, listener)
}
// ParseProxyID parses a proxy ID returned by ProxyID and returns its components.
func ParseProxyID(proxyID string) (endpointID uint16, ingress bool, protocol string, port uint16, listener string, err error) {
comps := strings.Split(proxyID, ":")
if len(comps) != 5 {
err = fmt.Errorf("invalid proxy ID structure: %s", proxyID)
return
}
epID, err := strconv.ParseUint(comps[0], 10, 16)
if err != nil {
return
}
endpointID = uint16(epID)
ingress = comps[1] == "ingress"
protocol = comps[2]
l4port, err := strconv.ParseUint(comps[3], 10, 16)
if err != nil {
return
}
port = uint16(l4port)
listener = comps[4]
return
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"cmp"
"context"
"encoding/json"
"fmt"
"slices"
"sync"
"sync/atomic"
cilium "github.com/cilium/proxy/go/cilium/api"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/crypto/certificatemanager"
"github.com/cilium/cilium/pkg/eventqueue"
"github.com/cilium/cilium/pkg/identity"
ipcachetypes "github.com/cilium/cilium/pkg/ipcache/types"
k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/api"
)
// PolicyContext is an interface policy resolution functions use to access the Repository.
// This way testing code can run without mocking a full Repository.
type PolicyContext interface {
// return the namespace in which the policy rule is being resolved
GetNamespace() string
// return the SelectorCache
GetSelectorCache() *SelectorCache
// GetTLSContext resolves the given 'api.TLSContext' into CA
// certs and the public and private keys, using secrets from
// k8s or from the local file system.
GetTLSContext(tls *api.TLSContext) (ca, public, private string, err error)
// GetEnvoyHTTPRules translates the given 'api.L7Rules' into
// the protobuf representation the Envoy can consume. The bool
// return parameter tells whether the rule enforcement can
// be short-circuited upon the first allowing rule. This is
// false if any of the rules has side-effects, requiring all
// such rules being evaluated.
GetEnvoyHTTPRules(l7Rules *api.L7Rules) (*cilium.HttpNetworkPolicyRules, bool)
// IsDeny returns true if the policy computation should be done for the
// policy deny case. This function returns different values depending on the
// code path as it can be changed during the policy calculation.
IsDeny() bool
// SetDeny sets the Deny field of the PolicyContext and returns the old
// value stored.
SetDeny(newValue bool) (oldValue bool)
}
type policyContext struct {
repo *Repository
ns string
// isDeny this field is set to true if the given policy computation should
// be done for the policy deny.
isDeny bool
}
// GetNamespace() returns the namespace for the policy rule being resolved
func (p *policyContext) GetNamespace() string {
return p.ns
}
// GetSelectorCache() returns the selector cache used by the Repository
func (p *policyContext) GetSelectorCache() *SelectorCache {
return p.repo.GetSelectorCache()
}
// GetTLSContext() returns data for TLS Context via a CertificateManager
func (p *policyContext) GetTLSContext(tls *api.TLSContext) (ca, public, private string, err error) {
if p.repo.certManager == nil {
return "", "", "", fmt.Errorf("No Certificate Manager set on Policy Repository")
}
return p.repo.certManager.GetTLSContext(context.TODO(), tls, p.ns)
}
func (p *policyContext) GetEnvoyHTTPRules(l7Rules *api.L7Rules) (*cilium.HttpNetworkPolicyRules, bool) {
return p.repo.GetEnvoyHTTPRules(l7Rules, p.ns)
}
// IsDeny returns true if the policy computation should be done for the
// policy deny case. This function return different values depending on the
// code path as it can be changed during the policy calculation.
func (p *policyContext) IsDeny() bool {
return p.isDeny
}
// SetDeny sets the Deny field of the PolicyContext and returns the old
// value stored.
func (p *policyContext) SetDeny(deny bool) bool {
oldDeny := p.isDeny
p.isDeny = deny
return oldDeny
}
// Repository is a list of policy rules which in combination form the security
// policy. A policy repository can be
type Repository struct {
// Mutex protects the whole policy tree
Mutex lock.RWMutex
rules map[ruleKey]*rule
rulesByResource map[ipcachetypes.ResourceID]map[ruleKey]*rule
// We will need a way to synthesize a rule key for rules without a resource;
// these are - in practice - very rare, as they only come from the local API,
// never via k8s.
nextID uint
// revision is the revision of the policy repository. It will be
// incremented whenever the policy repository is changed.
// Always positive (>0).
revision atomic.Uint64
// RepositoryChangeQueue is a queue which serializes changes to the policy
// repository.
RepositoryChangeQueue *eventqueue.EventQueue
// RuleReactionQueue is a queue which serializes the resultant events that
// need to occur after updating the state of the policy repository. This
// can include queueing endpoint regenerations, policy revision increments
// for endpoints, etc.
RuleReactionQueue *eventqueue.EventQueue
// SelectorCache tracks the selectors used in the policies
// resolved from the repository.
selectorCache *SelectorCache
// PolicyCache tracks the selector policies created from this repo
policyCache *PolicyCache
certManager certificatemanager.CertificateManager
secretManager certificatemanager.SecretManager
getEnvoyHTTPRules func(certificatemanager.SecretManager, *api.L7Rules, string) (*cilium.HttpNetworkPolicyRules, bool)
}
// GetSelectorCache() returns the selector cache used by the Repository
func (p *Repository) GetSelectorCache() *SelectorCache {
return p.selectorCache
}
// GetAuthTypes returns the AuthTypes required by the policy between the localID and remoteID
func (p *Repository) GetAuthTypes(localID, remoteID identity.NumericIdentity) AuthTypes {
return p.policyCache.GetAuthTypes(localID, remoteID)
}
func (p *Repository) SetEnvoyRulesFunc(f func(certificatemanager.SecretManager, *api.L7Rules, string) (*cilium.HttpNetworkPolicyRules, bool)) {
p.getEnvoyHTTPRules = f
}
func (p *Repository) GetEnvoyHTTPRules(l7Rules *api.L7Rules, ns string) (*cilium.HttpNetworkPolicyRules, bool) {
if p.getEnvoyHTTPRules == nil {
return nil, true
}
return p.getEnvoyHTTPRules(p.secretManager, l7Rules, ns)
}
// GetPolicyCache() returns the policy cache used by the Repository
func (p *Repository) GetPolicyCache() *PolicyCache {
return p.policyCache
}
// NewPolicyRepository creates a new policy repository.
// Only used for unit tests.
func NewPolicyRepository(
initialIDs identity.IdentityMap,
certManager certificatemanager.CertificateManager,
secretManager certificatemanager.SecretManager,
) *Repository {
repo := NewStoppedPolicyRepository(initialIDs, certManager, secretManager)
repo.Start()
return repo
}
// NewStoppedPolicyRepository creates a new policy repository without starting
// queues.
//
// Qeues must be allocated via [Repository.Start]. The function serves to
// satisfy hive invariants.
func NewStoppedPolicyRepository(
initialIDs identity.IdentityMap,
certManager certificatemanager.CertificateManager,
secretManager certificatemanager.SecretManager,
) *Repository {
selectorCache := NewSelectorCache(initialIDs)
repo := &Repository{
rules: make(map[ruleKey]*rule),
rulesByResource: make(map[ipcachetypes.ResourceID]map[ruleKey]*rule),
selectorCache: selectorCache,
certManager: certManager,
secretManager: secretManager,
}
repo.revision.Store(1)
repo.policyCache = NewPolicyCache(repo, true)
return repo
}
// traceState is an internal structure used to collect information
// while determining policy decision
type traceState struct {
// selectedRules is the number of rules with matching EndpointSelector
selectedRules int
// matchedRules is the number of rules that have allowed traffic
matchedRules int
// matchedDenyRules is the number of rules that have denied traffic
matchedDenyRules int
// constrainedRules counts how many "FromRequires" constraints are
// unsatisfied
constrainedRules int
// ruleID is the rule ID currently being evaluated
ruleID int
}
func (state *traceState) trace(rules int, ctx *SearchContext) {
ctx.PolicyTrace("%d/%d rules selected\n", state.selectedRules, rules)
if state.constrainedRules > 0 {
ctx.PolicyTrace("Found unsatisfied FromRequires constraint\n")
} else {
if state.matchedRules > 0 {
ctx.PolicyTrace("Found allow rule\n")
} else {
ctx.PolicyTrace("Found no allow rule\n")
}
if state.matchedDenyRules > 0 {
ctx.PolicyTrace("Found deny rule\n")
} else {
ctx.PolicyTrace("Found no deny rule\n")
}
}
}
// Start allocates and starts various queues used by the Repository.
//
// Must only be called if using [NewStoppedPolicyRepository]
func (p *Repository) Start() {
p.RepositoryChangeQueue = eventqueue.NewEventQueueBuffered("repository-change-queue", option.Config.PolicyQueueSize)
p.RuleReactionQueue = eventqueue.NewEventQueueBuffered("repository-reaction-queue", option.Config.PolicyQueueSize)
p.RepositoryChangeQueue.Run()
p.RuleReactionQueue.Run()
}
// ResolveL4IngressPolicy resolves the L4 ingress policy for a set of endpoints
// by searching the policy repository for `PortRule` rules that are attached to
// a `Rule` where the EndpointSelector matches `ctx.To`. `ctx.From` takes no effect and
// is ignored in the search. If multiple `PortRule` rules are found, all rules
// are merged together. If rules contains overlapping port definitions, the first
// rule found in the repository takes precedence.
//
// TODO: Coalesce l7 rules?
//
// Caller must release resources by calling Detach() on the returned map!
//
// NOTE: This is only called from unit tests, but from multiple packages.
func (p *Repository) ResolveL4IngressPolicy(ctx *SearchContext) (L4PolicyMap, error) {
policyCtx := policyContext{
repo: p,
ns: ctx.To.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel),
}
rules := make(ruleSlice, 0, len(p.rules))
for _, rule := range p.rules {
rules = append(rules, rule)
}
// Sort for unit tests
slices.SortFunc[ruleSlice](rules, func(a, b *rule) int {
return cmp.Compare(a.key.idx, b.key.idx)
})
result, err := rules.resolveL4IngressPolicy(&policyCtx, ctx)
if err != nil {
return nil, err
}
return result, nil
}
// ResolveL4EgressPolicy resolves the L4 egress policy for a set of endpoints
// by searching the policy repository for `PortRule` rules that are attached to
// a `Rule` where the EndpointSelector matches `ctx.From`. `ctx.To` takes no effect and
// is ignored in the search. If multiple `PortRule` rules are found, all rules
// are merged together. If rules contains overlapping port definitions, the first
// rule found in the repository takes precedence.
//
// Caller must release resources by calling Detach() on the returned map!
//
// NOTE: This is only called from unit tests, but from multiple packages.
func (p *Repository) ResolveL4EgressPolicy(ctx *SearchContext) (L4PolicyMap, error) {
policyCtx := policyContext{
repo: p,
ns: ctx.From.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel),
}
rules := make(ruleSlice, 0, len(p.rules))
for _, rule := range p.rules {
rules = append(rules, rule)
}
slices.SortFunc[ruleSlice](rules, func(a, b *rule) int {
return cmp.Compare(a.key.idx, b.key.idx)
})
result, err := rules.resolveL4EgressPolicy(&policyCtx, ctx)
if err != nil {
return nil, err
}
return result, nil
}
// AllowsIngressRLocked evaluates the policy repository for the provided search
// context and returns the verdict for ingress. If no matching policy allows for
// the connection, the request will be denied. The policy repository mutex must
// be held.
//
// NOTE: This is only called from unit tests, but from multiple packages.
func (p *Repository) AllowsIngressRLocked(ctx *SearchContext) api.Decision {
// Lack of DPorts in the SearchContext means L3-only search
if len(ctx.DPorts) == 0 {
newCtx := *ctx
newCtx.DPorts = []*models.Port{{
Port: 0,
Protocol: models.PortProtocolANY,
}}
ctx = &newCtx
}
ctx.PolicyTrace("Tracing %s", ctx.String())
ingressPolicy, err := p.ResolveL4IngressPolicy(ctx)
if err != nil {
log.WithError(err).Warn("Evaluation error while resolving L4 ingress policy")
}
verdict := api.Denied
if err == nil && ingressPolicy.Len() > 0 {
verdict = ingressPolicy.IngressCoversContext(ctx)
}
ctx.PolicyTrace("Ingress verdict: %s", verdict.String())
ingressPolicy.Detach(p.GetSelectorCache())
return verdict
}
// AllowsEgressRLocked evaluates the policy repository for the provided search
// context and returns the verdict. If no matching policy allows for the
// connection, the request will be denied. The policy repository mutex must be
// held.
//
// NOTE: This is only called from unit tests, but from multiple packages.
func (p *Repository) AllowsEgressRLocked(ctx *SearchContext) api.Decision {
// Lack of DPorts in the SearchContext means L3-only search
if len(ctx.DPorts) == 0 {
newCtx := *ctx
newCtx.DPorts = []*models.Port{{
Port: 0,
Protocol: models.PortProtocolANY,
}}
ctx = &newCtx
}
ctx.PolicyTrace("Tracing %s\n", ctx.String())
egressPolicy, err := p.ResolveL4EgressPolicy(ctx)
if err != nil {
log.WithError(err).Warn("Evaluation error while resolving L4 egress policy")
}
verdict := api.Denied
if err == nil && egressPolicy.Len() > 0 {
verdict = egressPolicy.EgressCoversContext(ctx)
}
ctx.PolicyTrace("Egress verdict: %s", verdict.String())
egressPolicy.Detach(p.GetSelectorCache())
return verdict
}
// SearchRLocked searches the policy repository for rules which match the
// specified labels and will return an array of all rules which matched.
func (p *Repository) SearchRLocked(lbls labels.LabelArray) api.Rules {
result := api.Rules{}
for _, r := range p.rules {
if r.Labels.Contains(lbls) {
result = append(result, &r.Rule)
}
}
return result
}
// AddListLocked inserts a rule into the policy repository with the repository already locked
// Expects that the entire rule list has already been sanitized.
func (p *Repository) AddListLocked(rules api.Rules) (ruleSlice, uint64) {
newRules := make(ruleSlice, 0, len(rules))
for _, r := range rules {
newRule := p.newRule(*r, ruleKey{idx: p.nextID})
newRules = append(newRules, newRule)
p.insert(newRule)
p.nextID++
}
return newRules, p.BumpRevision()
}
// ReplaceByResourceLocked replaces all rules that belong to a given resource with a
// new set. The set of rules added and removed is returned, along with the new revision number.
// Resource must not be empty
func (p *Repository) ReplaceByResourceLocked(rules api.Rules, resource ipcachetypes.ResourceID) (newRules ruleSlice, oldRules ruleSlice, revision uint64) {
if len(resource) == 0 {
// This should never ever be hit, as the caller should have already validated the resource.
// However, if it does happen, it means something very wrong has happened and we are at risk
// of removing all network policies. So, we must panic rather than risk disabling network security.
panic("may not replace API rules with an empty resource")
}
if old, ok := p.rulesByResource[resource]; ok {
oldRules = make(ruleSlice, 0, len(old))
for key, oldRule := range old {
oldRules = append(oldRules, oldRule)
p.del(key)
}
}
newRules = make(ruleSlice, 0, len(rules))
if len(rules) > 0 {
p.rulesByResource[resource] = make(map[ruleKey]*rule, len(rules))
for i, r := range rules {
newRule := p.newRule(*r, ruleKey{resource: resource, idx: uint(i)})
newRules = append(newRules, newRule)
p.insert(newRule)
}
}
return newRules, oldRules, p.BumpRevision()
}
func (p *Repository) insert(r *rule) {
p.rules[r.key] = r
rid := r.key.resource
if len(rid) > 0 {
if p.rulesByResource[rid] == nil {
p.rulesByResource[rid] = map[ruleKey]*rule{}
}
p.rulesByResource[rid][r.key] = r
}
metrics.Policy.Inc()
}
func (p *Repository) del(key ruleKey) {
if p.rules[key] == nil {
return
}
delete(p.rules, key)
rid := key.resource
if len(rid) > 0 && p.rulesByResource[rid] != nil {
delete(p.rulesByResource[rid], key)
if len(p.rulesByResource[rid]) == 0 {
delete(p.rulesByResource, rid)
}
}
metrics.Policy.Dec()
}
// newRule allocates a CachedSelector for a given rule.
func (p *Repository) newRule(apiRule api.Rule, key ruleKey) *rule {
r := &rule{
Rule: apiRule,
key: key,
}
r.subjectSelector, _ = p.selectorCache.AddIdentitySelector(r, r.Labels, *r.getSelector())
return r
}
// Release releases resources owned by a given rule slice.
// This is needed because we need to evaluate deleted rules after they
// are removed from the repository, so we must allow for a specific lifecycle
func (p *Repository) Release(rs ruleSlice) {
for _, r := range rs {
if r.subjectSelector != nil {
p.selectorCache.RemoveSelector(r.subjectSelector, r)
}
}
}
// MustAddList inserts a rule into the policy repository. It is used for
// unit-testing purposes only. Panics if the rule is invalid
func (p *Repository) MustAddList(rules api.Rules) (ruleSlice, uint64) {
for i := range rules {
// FIXME(GH-31162): Many unit tests provide invalid rules
err := rules[i].Sanitize()
if err != nil {
panic(err)
}
}
p.Mutex.Lock()
defer p.Mutex.Unlock()
return p.AddListLocked(rules)
}
// Iterate iterates the policy repository, calling f for each rule. It is safe
// to execute Iterate concurrently.
func (p *Repository) Iterate(f func(rule *api.Rule)) {
p.Mutex.RWMutex.Lock()
defer p.Mutex.RWMutex.Unlock()
for _, r := range p.rules {
f(&r.Rule)
}
}
// FindSelectedEndpoints finds all endpoints selected by a given ruleSlice.
// All endpoints that are selected will be added to endpointsToRegenerate; all
// endpoints that are *not* selected (but still valid) remain in endpointsToBumpRevision.
// policySelectionWG is done when all endpoints have been considered.
func (r ruleSlice) FindSelectedEndpoints(endpointsToBumpRevision, endpointsToRegenerate *EndpointSet, policySelectionWG *sync.WaitGroup) {
endpointsToBumpRevision.ForEachGo(policySelectionWG, func(epp Endpoint) {
securityIdentity, err := epp.GetSecurityIdentity()
if err != nil || securityIdentity == nil {
// The endpoint is no longer alive, or it does not have a security identity.
// We should remove it from the set of endpoints that will be bumped
endpointsToBumpRevision.Delete(epp)
return
}
if r.matchesSubject(securityIdentity) {
endpointsToRegenerate.Insert(epp)
endpointsToBumpRevision.Delete(epp)
}
})
}
// DeleteByLabelsLocked deletes all rules in the policy repository which
// contain the specified labels. Returns the revision of the policy repository
// after deleting the rules, as well as now many rules were deleted.
func (p *Repository) DeleteByLabelsLocked(lbls labels.LabelArray) (ruleSlice, uint64, int) {
deletedRules := ruleSlice{}
for key, r := range p.rules {
if r.Labels.Contains(lbls) {
deletedRules = append(deletedRules, r)
p.del(key)
}
}
l := len(deletedRules)
if l > 0 {
p.BumpRevision()
}
return deletedRules, p.GetRevision(), l
}
func (p *Repository) DeleteByResourceLocked(rid ipcachetypes.ResourceID) (ruleSlice, uint64) {
rules := p.rulesByResource[rid]
if len(rules) == 0 {
delete(p.rulesByResource, rid)
return nil, p.GetRevision()
}
deletedRules := make(ruleSlice, 0, len(rules))
for key, rule := range rules {
p.del(key)
deletedRules = append(deletedRules, rule)
}
return deletedRules, p.BumpRevision()
}
// DeleteByLabels deletes all rules in the policy repository which contain the
// specified labels
func (p *Repository) DeleteByLabels(lbls labels.LabelArray) (uint64, int) {
p.Mutex.Lock()
defer p.Mutex.Unlock()
_, rev, numDeleted := p.DeleteByLabelsLocked(lbls)
return rev, numDeleted
}
// JSONMarshalRules returns a slice of policy rules as string in JSON
// representation
func JSONMarshalRules(rules api.Rules) string {
b, err := json.MarshalIndent(rules, "", " ")
if err != nil {
return err.Error()
}
return string(b)
}
// GetJSON returns all rules of the policy repository as string in JSON
// representation
func (p *Repository) GetJSON() string {
p.Mutex.RLock()
defer p.Mutex.RUnlock()
result := api.Rules{}
for _, r := range p.rules {
result = append(result, &r.Rule)
}
return JSONMarshalRules(result)
}
// GetRulesMatching returns whether any of the rules in a repository contain a
// rule with labels matching the labels in the provided LabelArray.
//
// Must be called with p.Mutex held
func (p *Repository) GetRulesMatching(lbls labels.LabelArray) (ingressMatch bool, egressMatch bool) {
ingressMatch = false
egressMatch = false
for _, r := range p.rules {
rulesMatch := r.getSelector().Matches(lbls)
if rulesMatch {
if len(r.Ingress) > 0 {
ingressMatch = true
}
if len(r.IngressDeny) > 0 {
ingressMatch = true
}
if len(r.Egress) > 0 {
egressMatch = true
}
if len(r.EgressDeny) > 0 {
egressMatch = true
}
}
if ingressMatch && egressMatch {
return
}
}
return
}
// NumRules returns the amount of rules in the policy repository.
//
// Must be called with p.Mutex held
func (p *Repository) NumRules() int {
return len(p.rules)
}
// GetRevision returns the revision of the policy repository
func (p *Repository) GetRevision() uint64 {
return p.revision.Load()
}
// Empty returns 'true' if repository has no rules, 'false' otherwise.
//
// Must be called without p.Mutex held
func (p *Repository) Empty() bool {
p.Mutex.Lock()
defer p.Mutex.Unlock()
return p.NumRules() == 0
}
// BumpRevision allows forcing policy regeneration
func (p *Repository) BumpRevision() uint64 {
metrics.PolicyRevision.Inc()
return p.revision.Add(1)
}
// GetRulesList returns the current policy
func (p *Repository) GetRulesList() *models.Policy {
p.Mutex.RLock()
defer p.Mutex.RUnlock()
lbls := labels.ParseSelectLabelArrayFromArray([]string{})
ruleList := p.SearchRLocked(lbls)
return &models.Policy{
Revision: int64(p.GetRevision()),
Policy: JSONMarshalRules(ruleList),
}
}
// resolvePolicyLocked returns the selectorPolicy for the provided
// identity from the set of rules in the repository. If the policy
// cannot be generated due to conflicts at L4 or L7, returns an error.
//
// Must be performed while holding the Repository lock.
func (p *Repository) resolvePolicyLocked(securityIdentity *identity.Identity) (*selectorPolicy, error) {
// First obtain whether policy applies in both traffic directions, as well
// as list of rules which actually select this endpoint. This allows us
// to not have to iterate through the entire rule list multiple times and
// perform the matching decision again when computing policy for each
// protocol layer, which is quite costly in terms of performance.
ingressEnabled, egressEnabled,
matchingRules :=
p.computePolicyEnforcementAndRules(securityIdentity)
calculatedPolicy := &selectorPolicy{
Revision: p.GetRevision(),
SelectorCache: p.GetSelectorCache(),
L4Policy: NewL4Policy(p.GetRevision()),
IngressPolicyEnabled: ingressEnabled,
EgressPolicyEnabled: egressEnabled,
}
lbls := securityIdentity.LabelArray
ingressCtx := SearchContext{
To: lbls,
rulesSelect: true,
}
egressCtx := SearchContext{
From: lbls,
rulesSelect: true,
}
if option.Config.TracingEnabled() {
ingressCtx.Trace = TRACE_ENABLED
egressCtx.Trace = TRACE_ENABLED
}
policyCtx := policyContext{
repo: p,
ns: lbls.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel),
}
if ingressEnabled {
newL4IngressPolicy, err := matchingRules.resolveL4IngressPolicy(&policyCtx, &ingressCtx)
if err != nil {
return nil, err
}
calculatedPolicy.L4Policy.Ingress.PortRules = newL4IngressPolicy
}
if egressEnabled {
newL4EgressPolicy, err := matchingRules.resolveL4EgressPolicy(&policyCtx, &egressCtx)
if err != nil {
return nil, err
}
calculatedPolicy.L4Policy.Egress.PortRules = newL4EgressPolicy
}
// Make the calculated policy ready for incremental updates
calculatedPolicy.Attach(&policyCtx)
return calculatedPolicy, nil
}
// computePolicyEnforcementAndRules returns whether policy applies at ingress or ingress
// for the given security identity, as well as a list of any rules which select
// the set of labels of the given security identity.
//
// Must be called with repo mutex held for reading.
func (p *Repository) computePolicyEnforcementAndRules(securityIdentity *identity.Identity) (
ingress, egress bool,
matchingRules ruleSlice,
) {
lbls := securityIdentity.LabelArray
// Check if policy enforcement should be enabled at the daemon level.
if lbls.Has(labels.IDNameHost) && !option.Config.EnableHostFirewall {
return false, false, nil
}
policyMode := GetPolicyEnabled()
// If policy enforcement isn't enabled, we do not enable policy
// enforcement for the endpoint. We don't care about returning any
// rules that match.
if policyMode == option.NeverEnforce {
return false, false, nil
}
matchingRules = []*rule{}
for _, r := range p.rules {
if r.matchesSubject(securityIdentity) {
matchingRules = append(matchingRules, r)
}
}
// If policy enforcement is enabled for the daemon, then it has to be
// enabled for the endpoint.
// If the endpoint has the reserved:init label, i.e. if it has not yet
// received any labels, always enforce policy (default deny).
if policyMode == option.AlwaysEnforce || lbls.Has(labels.IDNameInit) {
return true, true, matchingRules
}
// Determine the default policy for each direction.
//
// By default, endpoints have no policy and all traffic is allowed.
// If any rules select the endpoint, then the endpoint switches to a
// default-deny mode (same as traffic being enabled), per-direction.
//
// Rules, however, can optionally be configure to not enable default deny mode.
// If no rules enable default-deny, then all traffic is allowed except that explicitly
// denied by a Deny rule.
//
// There are three possible cases _per direction_:
// 1: No rules are present,
// 2: At least one default-deny rule is present. Then, policy is enabled
// 3: Only non-default-deny rules are present. Then, policy is enabled, but we must insert
// an additional allow-all rule. We must do this, even if all traffic is allowed, because
// rules may have additional effects such as enabling L7 proxy.
hasIngressDefaultDeny := false
hasEgressDefaultDeny := false
for _, r := range matchingRules {
if !ingress || !hasIngressDefaultDeny { // short-circuit len()
if len(r.Ingress) > 0 || len(r.IngressDeny) > 0 {
ingress = true
if *r.EnableDefaultDeny.Ingress {
hasIngressDefaultDeny = true
}
}
}
if !egress || !hasEgressDefaultDeny { // short-circuit len()
if len(r.Egress) > 0 || len(r.EgressDeny) > 0 {
egress = true
if *r.EnableDefaultDeny.Egress {
hasEgressDefaultDeny = true
}
}
}
if ingress && egress && hasIngressDefaultDeny && hasEgressDefaultDeny {
break
}
}
// If there only ingress default-allow rules, then insert a wildcard rule
if !hasIngressDefaultDeny && ingress {
log.WithField(logfields.Identity, securityIdentity).Info("Only default-allow policies, synthesizing ingress wildcard-allow rule")
matchingRules = append(matchingRules, wildcardRule(securityIdentity.LabelArray, true /*ingress*/))
}
// Same for egress -- synthesize a wildcard rule
if !hasEgressDefaultDeny && egress {
log.WithField(logfields.Identity, securityIdentity).Info("Only default-allow policies, synthesizing egress wildcard-allow rule")
matchingRules = append(matchingRules, wildcardRule(securityIdentity.LabelArray, false /*egress*/))
}
return
}
// wildcardRule generates a wildcard rule that only selects the given identity.
func wildcardRule(lbls labels.LabelArray, ingress bool) *rule {
r := &rule{}
if ingress {
r.Ingress = []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEntities: []api.Entity{api.EntityAll},
},
},
}
} else {
r.Egress = []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: []api.Entity{api.EntityAll},
},
},
}
}
es := api.NewESFromLabels(lbls...)
if lbls.Has(labels.IDNameHost) {
r.NodeSelector = es
} else {
r.EndpointSelector = es
}
_ = r.Sanitize()
return r
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"github.com/sirupsen/logrus"
)
// selectorPolicy is a structure which contains the resolved policy for a
// particular Identity across all layers (L3, L4, and L7), with the policy
// still determined in terms of EndpointSelectors.
type selectorPolicy struct {
// Revision is the revision of the policy repository used to generate
// this selectorPolicy.
Revision uint64
// SelectorCache managing selectors in L4Policy
SelectorCache *SelectorCache
// L4Policy contains the computed L4 and L7 policy.
L4Policy L4Policy
// IngressPolicyEnabled specifies whether this policy contains any policy
// at ingress.
IngressPolicyEnabled bool
// EgressPolicyEnabled specifies whether this policy contains any policy
// at egress.
EgressPolicyEnabled bool
}
func (p *selectorPolicy) Attach(ctx PolicyContext) {
p.L4Policy.Attach(ctx)
}
// EndpointPolicy is a structure which contains the resolved policy across all
// layers (L3, L4, and L7), distilled against a set of identities.
type EndpointPolicy struct {
// Note that all Endpoints sharing the same identity will be
// referring to a shared selectorPolicy!
*selectorPolicy
// policyMapState contains the state of this policy as it relates to the
// datapath. In the future, this will be factored out of this object to
// decouple the policy as it relates to the datapath vs. its userspace
// representation.
// It maps each Key to the proxy port if proxy redirection is needed.
// Proxy port 0 indicates no proxy redirection.
// All fields within the Key and the proxy port must be in host byte-order.
// Must only be accessed with PolicyOwner (aka Endpoint) lock taken.
policyMapState MapState
// policyMapChanges collects pending changes to the PolicyMapState
policyMapChanges MapChanges
// PolicyOwner describes any type which consumes this EndpointPolicy object.
PolicyOwner PolicyOwner
}
// PolicyOwner is anything which consumes a EndpointPolicy.
type PolicyOwner interface {
GetID() uint64
LookupRedirectPort(ingress bool, protocol string, port uint16, listener string) (uint16, error)
GetRealizedRedirects() map[string]uint16
HasBPFPolicyMap() bool
GetNamedPort(ingress bool, name string, proto uint8) uint16
PolicyDebug(fields logrus.Fields, msg string)
}
// newSelectorPolicy returns an empty selectorPolicy stub.
func newSelectorPolicy(selectorCache *SelectorCache) *selectorPolicy {
return &selectorPolicy{
Revision: 0,
SelectorCache: selectorCache,
L4Policy: NewL4Policy(0),
}
}
// insertUser adds a user to the L4Policy so that incremental
// updates of the L4Policy may be fowarded.
func (p *selectorPolicy) insertUser(user *EndpointPolicy) {
p.L4Policy.insertUser(user)
}
// removeUser removes a user from the L4Policy so the EndpointPolicy
// can be freed when not needed any more
func (p *selectorPolicy) removeUser(user *EndpointPolicy) {
p.L4Policy.removeUser(user)
}
// Detach releases resources held by a selectorPolicy to enable
// successful eventual GC. Note that the selectorPolicy itself if not
// modified in any way, so that it can be used concurrently.
func (p *selectorPolicy) Detach() {
p.L4Policy.Detach(p.SelectorCache)
}
// DistillPolicy filters down the specified selectorPolicy (which acts
// upon selectors) into a set of concrete map entries based on the
// SelectorCache. These can subsequently be plumbed into the datapath.
//
// Called without holding the Selector cache or Repository locks.
// PolicyOwner (aka Endpoint) is also unlocked during this call,
// but the Endpoint's build mutex is held.
func (p *selectorPolicy) DistillPolicy(policyOwner PolicyOwner, isHost bool) *EndpointPolicy {
calculatedPolicy := &EndpointPolicy{
selectorPolicy: p,
policyMapState: NewMapState(),
PolicyOwner: policyOwner,
}
if !p.IngressPolicyEnabled || !p.EgressPolicyEnabled {
calculatedPolicy.policyMapState.allowAllIdentities(
!p.IngressPolicyEnabled, !p.EgressPolicyEnabled)
}
// Register the new EndpointPolicy as a receiver of delta
// updates. Any updates happening after this, but before
// computeDesiredL4PolicyMapEntries() call finishes may
// already be applied to the PolicyMapState, specifically:
//
// - policyMapChanges may contain an addition of an entry that
// is already added to the PolicyMapState
//
// - policyMapChanges may contain a deletion of an entry that
// has already been deleted from PolicyMapState
p.insertUser(calculatedPolicy)
// Must come after the 'insertUser()' above to guarantee
// PolicyMapChanges will contain all changes that are applied
// after the computation of PolicyMapState has started.
calculatedPolicy.toMapState()
if !isHost {
calculatedPolicy.policyMapState.determineAllowLocalhostIngress()
}
return calculatedPolicy
}
// GetPolicyMap gets the policy map state as the interface
// MapState
func (p *EndpointPolicy) GetPolicyMap() MapState {
return p.policyMapState
}
// SetPolicyMap sets the policy map state as the interface
// MapState. If the main argument is nil, then this method
// will initialize a new MapState object for the caller.
func (p *EndpointPolicy) SetPolicyMap(ms MapState) {
if ms == nil {
p.policyMapState = NewMapState()
return
}
p.policyMapState = ms
}
// Detach removes EndpointPolicy references from selectorPolicy
// to allow the EndpointPolicy to be GC'd.
// PolicyOwner (aka Endpoint) is also locked during this call.
func (p *EndpointPolicy) Detach() {
p.selectorPolicy.removeUser(p)
}
// NewMapStateWithInsert returns a new MapState and an insert function that can be used to populate
// it. We keep general insert functions private so that the caller can only insert to this specific
// map.
func NewMapStateWithInsert() (MapState, func(k Key, e MapStateEntry)) {
currentMap := NewMapState()
return currentMap, func(k Key, e MapStateEntry) {
currentMap.insert(k, e, nil)
}
}
func (p *EndpointPolicy) InsertMapState(key Key, entry MapStateEntry) {
// SelectorCache used as Identities interface which only has GetPrefix() that needs no lock
p.policyMapState.insert(key, entry, p.SelectorCache)
}
func (p *EndpointPolicy) DeleteMapState(key Key) {
// SelectorCache used as Identities interface which only has GetPrefix() that needs no lock
p.policyMapState.delete(key, p.SelectorCache)
}
func (p *EndpointPolicy) RevertChanges(changes ChangeState) {
// SelectorCache used as Identities interface which only has GetPrefix() that needs no lock
p.policyMapState.revertChanges(p.SelectorCache, changes)
}
func (p *EndpointPolicy) AddVisibilityKeys(e PolicyOwner, redirectPort uint16, visMeta *VisibilityMetadata, changes ChangeState) {
// SelectorCache used as Identities interface which only has GetPrefix() that needs no lock
p.policyMapState.addVisibilityKeys(e, redirectPort, visMeta, p.SelectorCache, changes)
}
// toMapState transforms the EndpointPolicy.L4Policy into
// the datapath-friendly format inside EndpointPolicy.PolicyMapState.
// Called with selectorcache locked for reading.
// Called without holding the Repository lock.
// PolicyOwner (aka Endpoint) is also unlocked during this call,
// but the Endpoint's build mutex is held.
func (p *EndpointPolicy) toMapState() {
p.L4Policy.Ingress.toMapState(p)
p.L4Policy.Egress.toMapState(p)
}
// toMapState transforms the L4DirectionPolicy into
// the datapath-friendly format inside EndpointPolicy.PolicyMapState.
// Called with selectorcache locked for reading.
// Called without holding the Repository lock.
// PolicyOwner (aka Endpoint) is also unlocked during this call,
// but the Endpoint's build mutex is held.
func (l4policy L4DirectionPolicy) toMapState(p *EndpointPolicy) {
l4policy.PortRules.ForEach(func(l4 *L4Filter) bool {
l4.toMapState(p, l4policy.features, p.PolicyOwner.GetRealizedRedirects(), ChangeState{})
return true
})
}
// createRedirectsFunc returns 'nil' if map changes should not be applied immemdiately,
// otherwise the returned map is to be used to find redirect ports for map updates.
type createRedirectsFunc func(*L4Filter) map[string]uint16
// UpdateRedirects updates redirects in the EndpointPolicy's PolicyMapState by using the provided
// function to create redirects. Changes to 'p.PolicyMapState' are collected in
// 'adds' and 'updated' so that they can be reverted when needed.
func (p *EndpointPolicy) UpdateRedirects(ingress bool, createRedirects createRedirectsFunc, changes ChangeState) {
l4policy := &p.L4Policy.Ingress
if ingress {
l4policy = &p.L4Policy.Egress
}
l4policy.updateRedirects(p, createRedirects, changes)
}
func (l4policy L4DirectionPolicy) updateRedirects(p *EndpointPolicy, createRedirects createRedirectsFunc, changes ChangeState) {
l4policy.PortRules.ForEach(func(l4 *L4Filter) bool {
if l4.IsRedirect() {
// Check if we are denying this specific L4 first regardless the L3, if there are any deny policies
if l4policy.features.contains(denyRules) && p.policyMapState.deniesL4(p.PolicyOwner, l4) {
return true
}
redirects := createRedirects(l4)
if redirects != nil {
// Set the proxy port in the policy map.
l4.toMapState(p, l4policy.features, redirects, changes)
}
}
return true
})
}
// ConsumeMapChanges transfers the changes from MapChanges to the caller.
// SelectorCache used as Identities interface which only has GetPrefix() that needs no lock.
// Endpoints explicitly wait for a WaitGroup signaling completion of AccumulatePolicyMapChanges
// calls before calling ConsumeMapChanges so that if we see any partial changes here, there will be
// another call after to cover for the rest.
// PolicyOwner (aka Endpoint) is locked during this call.
func (p *EndpointPolicy) ConsumeMapChanges() (adds, deletes Keys) {
features := p.selectorPolicy.L4Policy.Ingress.features | p.selectorPolicy.L4Policy.Egress.features
return p.policyMapChanges.consumeMapChanges(p.PolicyOwner, p.policyMapState, p.SelectorCache, features)
}
// NewEndpointPolicy returns an empty EndpointPolicy stub.
func NewEndpointPolicy(repo *Repository) *EndpointPolicy {
return &EndpointPolicy{
selectorPolicy: newSelectorPolicy(repo.GetSelectorCache()),
policyMapState: NewMapState(),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"fmt"
"strconv"
"strings"
"github.com/cilium/proxy/pkg/policy/api/kafka"
"github.com/cilium/cilium/pkg/identity"
ipcachetypes "github.com/cilium/cilium/pkg/ipcache/types"
slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/api"
)
// ruleKey is a synthetic unique identifier for a Rule
type ruleKey struct {
// resource is the owning resource of this rule
resource ipcachetypes.ResourceID
// idx is an arbitrary unique index, as resources can own multiple rules
idx uint
}
type rule struct {
api.Rule
key ruleKey
// subjectSelector is the entry in the SelectorCache that selects subjects (endpoints or nodes).
subjectSelector CachedSelector
}
// IdentitySelectionUpdated is called by the SelectorCache when a new identity is added;
// We can ignore it because the endpoint will be regenerated by the nature of
// identities being updated.
func (r *rule) IdentitySelectionUpdated(_ CachedSelector, _, _ []identity.NumericIdentity) {
}
func (r *rule) String() string {
return r.EndpointSelector.String()
}
func (r *rule) getSelector() *api.EndpointSelector {
if r.NodeSelector.LabelSelector != nil {
return &r.NodeSelector
}
return &r.EndpointSelector
}
func (epd *PerSelectorPolicy) appendL7WildcardRule(ctx *SearchContext) api.L7Rules {
// Wildcard rule only needs to be appended if some rules already exist
switch {
case len(epd.L7Rules.HTTP) > 0:
rule := api.PortRuleHTTP{}
if !rule.Exists(epd.L7Rules) {
ctx.PolicyTrace(" Merging HTTP wildcard rule: %+v\n", rule)
epd.L7Rules.HTTP = append(epd.L7Rules.HTTP, rule)
} else {
ctx.PolicyTrace(" Merging HTTP wildcard rule, equal rule already exists: %+v\n", rule)
}
case len(epd.L7Rules.Kafka) > 0:
rule := kafka.PortRule{}
rule.Sanitize()
if !rule.Exists(epd.L7Rules.Kafka) {
ctx.PolicyTrace(" Merging Kafka wildcard rule: %+v\n", rule)
epd.L7Rules.Kafka = append(epd.L7Rules.Kafka, rule)
} else {
ctx.PolicyTrace(" Merging Kafka wildcard rule, equal rule already exists: %+v\n", rule)
}
case len(epd.L7Rules.DNS) > 0:
// Wildcarding at L7 for DNS is specified via allowing all via
// MatchPattern!
rule := api.PortRuleDNS{MatchPattern: "*"}
rule.Sanitize()
if !rule.Exists(epd.L7Rules) {
ctx.PolicyTrace(" Merging DNS wildcard rule: %+v\n", rule)
epd.L7Rules.DNS = append(epd.L7Rules.DNS, rule)
} else {
ctx.PolicyTrace(" Merging DNS wildcard rule, equal rule already exists: %+v\n", rule)
}
case epd.L7Rules.L7Proto != "" && len(epd.L7Rules.L7) > 0:
rule := api.PortRuleL7{}
if !rule.Exists(epd.L7Rules) {
ctx.PolicyTrace(" Merging L7 wildcard rule: %+v\n", rule)
epd.L7Rules.L7 = append(epd.L7Rules.L7, rule)
} else {
ctx.PolicyTrace(" Merging L7 wildcard rule, equal rule already exists: %+v\n", rule)
}
}
return epd.L7Rules
}
// takesListenerPrecedenceOver returns true if the listener reference in 'l7Rules' takes precedence
// over the listener reference in 'other'.
func (l7Rules *PerSelectorPolicy) takesListenerPrecedenceOver(other *PerSelectorPolicy) bool {
var priority, otherPriority uint16
// decrement by one to wrap the undefined value (0) to be the highest numerical
// value of the uint16, which is the lowest possible priority
priority = l7Rules.Priority - 1
otherPriority = other.Priority - 1
return priority < otherPriority
}
// mergeListenerReference merges listener reference from 'newL7Rules' to 'l7Rules', giving
// precedence to listener with the lowest priority, if any.
func (l7Rules *PerSelectorPolicy) mergeListenerReference(newL7Rules *PerSelectorPolicy) error {
// Nothing to do if 'newL7Rules' has no listener reference
if newL7Rules.Listener == "" {
return nil
}
// Nothing to do if the listeners are already the same and have the same priority
if newL7Rules.Listener == l7Rules.Listener && l7Rules.Priority == newL7Rules.Priority {
return nil
}
// Nothing to do if 'l7Rules' takes precedence
if l7Rules.takesListenerPrecedenceOver(newL7Rules) {
return nil
}
// override if 'l7Rules' has no listener or 'newL7Rules' takes precedence
if l7Rules.Listener == "" || newL7Rules.takesListenerPrecedenceOver(l7Rules) {
l7Rules.Listener = newL7Rules.Listener
l7Rules.Priority = newL7Rules.Priority
return nil
}
// otherwise error on conflict
return fmt.Errorf("cannot merge conflicting CiliumEnvoyConfig Listeners (%v/%v) with the same priority (%d)", newL7Rules.Listener, l7Rules.Listener, l7Rules.Priority)
}
func mergePortProto(ctx *SearchContext, existingFilter, filterToMerge *L4Filter, selectorCache *SelectorCache) (err error) {
// Merge the L7-related data from the filter to merge
// with the L7-related data already in the existing filter.
existingFilter.L7Parser, err = existingFilter.L7Parser.Merge(filterToMerge.L7Parser)
if err != nil {
ctx.PolicyTrace(" Merge conflict: mismatching parsers %s/%s\n", filterToMerge.L7Parser, existingFilter.L7Parser)
return err
}
for cs, newL7Rules := range filterToMerge.PerSelectorPolicies {
// 'cs' will be merged or moved (see below), either way it needs
// to be removed from the map it is in now.
delete(filterToMerge.PerSelectorPolicies, cs)
if l7Rules, ok := existingFilter.PerSelectorPolicies[cs]; ok {
// existing filter already has 'cs', release and merge L7 rules
selectorCache.RemoveSelector(cs, filterToMerge)
// skip merging for reserved:none, as it is never
// selected, and toFQDN rules currently translate to
// reserved:none as an endpoint selector, causing a
// merge conflict for different toFQDN destinations
// with different TLS contexts.
if cs.IsNone() {
continue
}
if l7Rules.Equal(newL7Rules) {
continue // identical rules need no merging
}
// Merge two non-identical sets of non-nil rules
if l7Rules != nil && l7Rules.IsDeny {
// If existing rule is deny then it's a no-op
// Denies takes priority over any rule.
continue
} else if newL7Rules != nil && newL7Rules.IsDeny {
// Overwrite existing filter if the new rule is a deny case
// Denies takes priority over any rule.
existingFilter.PerSelectorPolicies[cs] = newL7Rules
continue
}
// One of the rules may be a nil rule, expand it to an empty non-nil rule
if l7Rules == nil {
l7Rules = &PerSelectorPolicy{}
}
if newL7Rules == nil {
newL7Rules = &PerSelectorPolicy{}
}
// Merge isRedirect flag
l7Rules.isRedirect = l7Rules.isRedirect || newL7Rules.isRedirect
// Merge listener reference
if err := l7Rules.mergeListenerReference(newL7Rules); err != nil {
ctx.PolicyTrace(" Merge conflict: %s\n", err.Error())
return err
}
if l7Rules.Authentication == nil || newL7Rules.Authentication == nil {
if newL7Rules.Authentication != nil {
l7Rules.Authentication = newL7Rules.Authentication
}
} else if !newL7Rules.Authentication.DeepEqual(l7Rules.Authentication) {
ctx.PolicyTrace(" Merge conflict: mismatching auth types %s/%s\n", newL7Rules.Authentication.Mode, l7Rules.Authentication.Mode)
return fmt.Errorf("cannot merge conflicting authentication types (%s/%s)", newL7Rules.Authentication.Mode, l7Rules.Authentication.Mode)
}
if l7Rules.TerminatingTLS == nil || newL7Rules.TerminatingTLS == nil {
if newL7Rules.TerminatingTLS != nil {
l7Rules.TerminatingTLS = newL7Rules.TerminatingTLS
}
} else if !newL7Rules.TerminatingTLS.Equal(l7Rules.TerminatingTLS) {
ctx.PolicyTrace(" Merge conflict: mismatching terminating TLS contexts %v/%v\n", newL7Rules.TerminatingTLS, l7Rules.TerminatingTLS)
return fmt.Errorf("cannot merge conflicting terminating TLS contexts for cached selector %s: (%v/%v)", cs.String(), newL7Rules.TerminatingTLS, l7Rules.TerminatingTLS)
}
if l7Rules.OriginatingTLS == nil || newL7Rules.OriginatingTLS == nil {
if newL7Rules.OriginatingTLS != nil {
l7Rules.OriginatingTLS = newL7Rules.OriginatingTLS
}
} else if !newL7Rules.OriginatingTLS.Equal(l7Rules.OriginatingTLS) {
ctx.PolicyTrace(" Merge conflict: mismatching originating TLS contexts %v/%v\n", newL7Rules.OriginatingTLS, l7Rules.OriginatingTLS)
return fmt.Errorf("cannot merge conflicting originating TLS contexts for cached selector %s: (%v/%v)", cs.String(), newL7Rules.OriginatingTLS, l7Rules.OriginatingTLS)
}
// For now we simply merge the set of allowed SNIs from different rules
// to/from the *same remote*, port, and protocol. This means that if any
// rule requires SNI, then all traffic to that remote/port requires TLS,
// even if other merged rules would be fine without TLS. Any SNI from all
// applicable rules is allowed.
//
// Preferably we could allow different rules for each SNI, but for now the
// combination of all L7 rules is allowed for all the SNIs. For example, if
// SNI and TLS termination are used together so that L7 filtering is
// possible, in this example:
//
// - existing: SNI: public.example.com
// - new: SNI: private.example.com HTTP: path="/public"
//
// Separately, these rule allow access to all paths at SNI
// public.example.com and path private.example.com/public, but currently we
// allow all paths also at private.example.com. This may be clamped down if
// there is sufficient demand for SNI and TLS termination together.
//
// Note however that SNI rules are typically used with `toFQDNs`, each of
// which defines a separate destination, so that SNIs for different
// `toFQDNs` will not be merged together.
l7Rules.ServerNames = l7Rules.ServerNames.Merge(newL7Rules.ServerNames)
// L7 rules can be applied with SNI filtering only if the TLS is also
// terminated
if len(l7Rules.ServerNames) > 0 && !l7Rules.L7Rules.IsEmpty() && l7Rules.TerminatingTLS == nil {
ctx.PolicyTrace(" Merge conflict: cannot use SNI filtering with L7 rules without TLS termination: %v\n", l7Rules.ServerNames)
return fmt.Errorf("cannot merge L7 rules for cached selector %s with SNI filtering without TLS termination: %v", cs.String(), l7Rules.ServerNames)
}
// empty L7 rules effectively wildcard L7. When merging with a non-empty
// rule, the empty must be expanded to an actual wildcard rule for the
// specific L7
if !l7Rules.HasL7Rules() && newL7Rules.HasL7Rules() {
l7Rules.L7Rules = newL7Rules.appendL7WildcardRule(ctx)
existingFilter.PerSelectorPolicies[cs] = l7Rules
continue
}
if l7Rules.HasL7Rules() && !newL7Rules.HasL7Rules() {
l7Rules.appendL7WildcardRule(ctx)
existingFilter.PerSelectorPolicies[cs] = l7Rules
continue
}
// We already know from the L7Parser.Merge() above that there are no
// conflicting parser types, and rule validation only allows one type of L7
// rules in a rule, so we can just merge the rules here.
for _, newRule := range newL7Rules.HTTP {
if !newRule.Exists(l7Rules.L7Rules) {
l7Rules.HTTP = append(l7Rules.HTTP, newRule)
}
}
for _, newRule := range newL7Rules.Kafka {
if !newRule.Exists(l7Rules.L7Rules.Kafka) {
l7Rules.Kafka = append(l7Rules.Kafka, newRule)
}
}
if l7Rules.L7Proto == "" && newL7Rules.L7Proto != "" {
l7Rules.L7Proto = newL7Rules.L7Proto
}
for _, newRule := range newL7Rules.L7 {
if !newRule.Exists(l7Rules.L7Rules) {
l7Rules.L7 = append(l7Rules.L7, newRule)
}
}
for _, newRule := range newL7Rules.DNS {
if !newRule.Exists(l7Rules.L7Rules) {
l7Rules.DNS = append(l7Rules.DNS, newRule)
}
}
// Update the pointer in the map in case it was newly allocated
existingFilter.PerSelectorPolicies[cs] = l7Rules
} else { // 'cs' is not in the existing filter yet
// Update selector owner to the existing filter
selectorCache.ChangeUser(cs, filterToMerge, existingFilter)
// Move L7 rules over.
existingFilter.PerSelectorPolicies[cs] = newL7Rules
if cs.IsWildcard() {
existingFilter.wildcard = cs
}
}
}
return nil
}
// mergeIngressPortProto merges all rules which share the same port & protocol that
// select a given set of endpoints. It updates the L4Filter mapped to by the specified
// port and protocol with the contents of the provided PortRule. If the rule
// being merged has conflicting L7 rules with those already in the provided
// L4PolicyMap for the specified port-protocol tuple, it returns an error.
//
// If any rules contain L7 rules that select Host or Remote Node and we should
// accept all traffic from host, the L7 rules will be translated into L7
// wildcards via 'hostWildcardL7'. That is to say, traffic will be
// forwarded to the proxy for endpoints matching those labels, but the proxy
// will allow all such traffic.
func mergeIngressPortProto(policyCtx PolicyContext, ctx *SearchContext, endpoints api.EndpointSelectorSlice, auth *api.Authentication, hostWildcardL7 []string,
r api.Ports, p api.PortProtocol, proto api.L4Proto, ruleLabels labels.LabelArray, resMap L4PolicyMap) (int, error) {
// Create a new L4Filter
filterToMerge, err := createL4IngressFilter(policyCtx, endpoints, auth, hostWildcardL7, r, p, proto, ruleLabels)
if err != nil {
return 0, err
}
err = addL4Filter(policyCtx, ctx, resMap, p, proto, filterToMerge, ruleLabels)
if err != nil {
return 0, err
}
return 1, err
}
func traceL3(ctx *SearchContext, peerEndpoints api.EndpointSelectorSlice, direction string, isDeny bool) {
var result strings.Builder
// Requirements will be cloned into every selector, only trace them once.
if len(peerEndpoints[0].MatchExpressions) > 0 {
sel := peerEndpoints[0]
result.WriteString(" Enforcing requirements ")
result.WriteString(fmt.Sprintf("%+v", sel.MatchExpressions))
result.WriteString("\n")
}
// EndpointSelector
for _, sel := range peerEndpoints {
if len(sel.MatchLabels) > 0 {
if !isDeny {
result.WriteString(" Allows ")
} else {
result.WriteString(" Denies ")
}
result.WriteString(direction)
result.WriteString(" labels ")
result.WriteString(sel.String())
result.WriteString("\n")
}
}
ctx.PolicyTrace(result.String())
}
// portRulesCoverContext determines whether L4 portions of rules cover the
// specified port models.
//
// Returns true if the list of ports is 0, or the rules match the ports.
func rulePortsCoverSearchContext(ports []api.PortProtocol, ctx *SearchContext) bool {
if len(ctx.DPorts) == 0 {
return true
}
for _, p := range ports {
for _, dp := range ctx.DPorts {
tracePort := api.PortProtocol{
Protocol: api.L4Proto(dp.Protocol),
}
if dp.Name != "" {
tracePort.Port = dp.Name
} else {
tracePort.Port = strconv.FormatUint(uint64(dp.Port), 10)
}
if p.Covers(tracePort) {
return true
}
}
}
return false
}
func mergeIngress(policyCtx PolicyContext, ctx *SearchContext, fromEndpoints api.EndpointSelectorSlice, auth *api.Authentication, toPorts, icmp api.PortsIterator, ruleLabels labels.LabelArray, resMap L4PolicyMap) (int, error) {
found := 0
// short-circuit if no endpoint is selected
if fromEndpoints == nil {
return found, nil
}
if ctx.From != nil && len(fromEndpoints) > 0 {
if ctx.TraceEnabled() {
traceL3(ctx, fromEndpoints, "from", policyCtx.IsDeny())
}
if !fromEndpoints.Matches(ctx.From) {
ctx.PolicyTrace(" No label match for %s", ctx.From)
return 0, nil
}
ctx.PolicyTrace(" Found all required labels")
}
// Daemon options may induce L3 allows for host/world. In this case, if
// we find any L7 rules matching host/world then we need to turn any L7
// restrictions on these endpoints into L7 allow-all so that the
// traffic is always allowed, but is also always redirected through the
// proxy
hostWildcardL7 := make([]string, 0, 2)
if option.Config.AlwaysAllowLocalhost() {
hostWildcardL7 = append(hostWildcardL7, labels.IDNameHost)
}
var (
cnt int
err error
)
// L3-only rule (with requirements folded into fromEndpoints).
if toPorts.Len() == 0 && icmp.Len() == 0 && len(fromEndpoints) > 0 {
cnt, err = mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, &api.PortRule{}, api.PortProtocol{Port: "0", Protocol: api.ProtoAny}, api.ProtoAny, ruleLabels, resMap)
if err != nil {
return found, err
}
}
found += cnt
err = toPorts.Iterate(func(r api.Ports) error {
// For L4 Policy, an empty slice of EndpointSelector indicates that the
// rule allows all at L3 - explicitly specify this by creating a slice
// with the WildcardEndpointSelector.
if len(fromEndpoints) == 0 {
fromEndpoints = api.EndpointSelectorSlice{api.WildcardEndpointSelector}
}
if !policyCtx.IsDeny() {
ctx.PolicyTrace(" Allows port %v\n", r.GetPortProtocols())
} else {
ctx.PolicyTrace(" Denies port %v\n", r.GetPortProtocols())
}
if !rulePortsCoverSearchContext(r.GetPortProtocols(), ctx) {
ctx.PolicyTrace(" No port match found\n")
return nil
}
pr := r.GetPortRule()
if pr != nil {
if pr.Rules != nil && pr.Rules.L7Proto != "" {
ctx.PolicyTrace(" l7proto: \"%s\"\n", pr.Rules.L7Proto)
}
if !pr.Rules.IsEmpty() {
for _, l7 := range pr.Rules.HTTP {
ctx.PolicyTrace(" %+v\n", l7)
}
for _, l7 := range pr.Rules.Kafka {
ctx.PolicyTrace(" %+v\n", l7)
}
for _, l7 := range pr.Rules.L7 {
ctx.PolicyTrace(" %+v\n", l7)
}
}
}
for _, p := range r.GetPortProtocols() {
if p.Protocol.IsAny() {
cnt, err := mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, r, p, api.ProtoTCP, ruleLabels, resMap)
if err != nil {
return err
}
found += cnt
cnt, err = mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, r, p, api.ProtoUDP, ruleLabels, resMap)
if err != nil {
return err
}
found += cnt
cnt, err = mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, r, p, api.ProtoSCTP, ruleLabels, resMap)
if err != nil {
return err
}
found += cnt
} else {
cnt, err := mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, r, p, p.Protocol, ruleLabels, resMap)
if err != nil {
return err
}
found += cnt
}
}
return nil
})
if err != nil {
return found, err
}
err = icmp.Iterate(func(r api.Ports) error {
if len(fromEndpoints) == 0 {
fromEndpoints = api.EndpointSelectorSlice{api.WildcardEndpointSelector}
}
if !policyCtx.IsDeny() {
ctx.PolicyTrace(" Allows ICMP type %v\n", r.GetPortProtocols())
} else {
ctx.PolicyTrace(" Denies ICMP type %v\n", r.GetPortProtocols())
}
if !rulePortsCoverSearchContext(r.GetPortProtocols(), ctx) {
ctx.PolicyTrace(" No ICMP type match found\n")
return nil
}
for _, p := range r.GetPortProtocols() {
cnt, err := mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, r, p, p.Protocol, ruleLabels, resMap)
if err != nil {
return err
}
found += cnt
}
return nil
})
return found, err
}
func (state *traceState) selectRule(ctx *SearchContext, r *rule) {
ctx.PolicyTrace("* Rule %s: selected\n", r)
state.selectedRules++
}
func (state *traceState) unSelectRule(ctx *SearchContext, labels labels.LabelArray, r *rule) {
ctx.PolicyTraceVerbose(" Rule %s: did not select %+v\n", r, labels)
}
// resolveIngressPolicy analyzes the rule against the given SearchContext, and
// merges it with any prior-generated policy within the provided L4Policy.
// Requirements based off of all Ingress requirements (set in FromRequires) in
// other rules are stored in the specified slice of LabelSelectorRequirement.
// These requirements are dynamically inserted into a copy of the receiver rule,
// as requirements form conjunctions across all rules.
func (r *rule) resolveIngressPolicy(
policyCtx PolicyContext,
ctx *SearchContext,
state *traceState,
result L4PolicyMap,
requirements, requirementsDeny []slim_metav1.LabelSelectorRequirement,
) (
L4PolicyMap, error,
) {
if !ctx.rulesSelect {
if !r.getSelector().Matches(ctx.To) {
state.unSelectRule(ctx, ctx.To, r)
return nil, nil
}
}
state.selectRule(ctx, r)
found, foundDeny := 0, 0
if len(r.Ingress) == 0 && len(r.IngressDeny) == 0 {
ctx.PolicyTrace(" No ingress rules\n")
}
for _, ingressRule := range r.Ingress {
fromEndpoints := ingressRule.GetSourceEndpointSelectorsWithRequirements(requirements)
cnt, err := mergeIngress(policyCtx, ctx, fromEndpoints, ingressRule.Authentication, ingressRule.ToPorts, ingressRule.ICMPs, r.Rule.Labels.DeepCopy(), result)
if err != nil {
return nil, err
}
if cnt > 0 {
found += cnt
}
}
oldDeny := policyCtx.SetDeny(true)
defer func() {
policyCtx.SetDeny(oldDeny)
}()
for _, ingressRule := range r.IngressDeny {
fromEndpoints := ingressRule.GetSourceEndpointSelectorsWithRequirements(requirementsDeny)
cnt, err := mergeIngress(policyCtx, ctx, fromEndpoints, nil, ingressRule.ToPorts, ingressRule.ICMPs, r.Rule.Labels.DeepCopy(), result)
if err != nil {
return nil, err
}
if cnt > 0 {
foundDeny += cnt
}
}
if found+foundDeny > 0 {
if found != 0 {
state.matchedRules++
}
if foundDeny != 0 {
state.matchedDenyRules++
}
return result, nil
}
return nil, nil
}
func (r *rule) matchesSubject(securityIdentity *identity.Identity) bool {
subjectIsNode := securityIdentity.ID == identity.ReservedIdentityHost
ruleSelectsNode := r.NodeSelector.LabelSelector != nil
// Short-circuit if the rule's selector type (node vs. endpoint) does not match the
// identity's type
if ruleSelectsNode != subjectIsNode {
return false
}
// Fall back to explicit label matching for the local node
// because local node has mutable labels, which are applied asynchronously to the SelectorCache.
if r.subjectSelector == nil || ruleSelectsNode {
return r.getSelector().Matches(securityIdentity.LabelArray)
}
return r.subjectSelector.Selects(securityIdentity.ID)
}
// ****************** EGRESS POLICY ******************
func mergeEgress(policyCtx PolicyContext, ctx *SearchContext, toEndpoints api.EndpointSelectorSlice, auth *api.Authentication, toPorts, icmp api.PortsIterator, ruleLabels labels.LabelArray, resMap L4PolicyMap, fqdns api.FQDNSelectorSlice) (int, error) {
found := 0
// short-circuit if no endpoint is selected
if toEndpoints == nil {
return found, nil
}
if ctx.To != nil && len(toEndpoints) > 0 {
if ctx.TraceEnabled() {
traceL3(ctx, toEndpoints, "to", policyCtx.IsDeny())
}
if !toEndpoints.Matches(ctx.To) {
ctx.PolicyTrace(" No label match for %s", ctx.To)
return 0, nil
}
ctx.PolicyTrace(" Found all required labels")
}
var (
cnt int
err error
)
// L3-only rule (with requirements folded into toEndpoints).
if toPorts.Len() == 0 && icmp.Len() == 0 && len(toEndpoints) > 0 {
cnt, err = mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, &api.PortRule{}, api.PortProtocol{Port: "0", Protocol: api.ProtoAny}, api.ProtoAny, ruleLabels, resMap, fqdns)
if err != nil {
return found, err
}
}
found += cnt
err = toPorts.Iterate(func(r api.Ports) error {
// For L4 Policy, an empty slice of EndpointSelector indicates that the
// rule allows all at L3 - explicitly specify this by creating a slice
// with the WildcardEndpointSelector.
if len(toEndpoints) == 0 {
toEndpoints = api.EndpointSelectorSlice{api.WildcardEndpointSelector}
}
if !policyCtx.IsDeny() {
ctx.PolicyTrace(" Allows port %v\n", r.GetPortProtocols())
} else {
ctx.PolicyTrace(" Denies port %v\n", r.GetPortProtocols())
}
pr := r.GetPortRule()
if pr != nil {
if !pr.Rules.IsEmpty() {
for _, l7 := range pr.Rules.HTTP {
ctx.PolicyTrace(" %+v\n", l7)
}
for _, l7 := range pr.Rules.Kafka {
ctx.PolicyTrace(" %+v\n", l7)
}
for _, l7 := range pr.Rules.L7 {
ctx.PolicyTrace(" %+v\n", l7)
}
}
}
for _, p := range r.GetPortProtocols() {
if p.Protocol.IsAny() {
cnt, err := mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, r, p, api.ProtoTCP, ruleLabels, resMap, fqdns)
if err != nil {
return err
}
found += cnt
cnt, err = mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, r, p, api.ProtoUDP, ruleLabels, resMap, fqdns)
if err != nil {
return err
}
found += cnt
cnt, err = mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, r, p, api.ProtoSCTP, ruleLabels, resMap, fqdns)
if err != nil {
return err
}
found += cnt
} else {
cnt, err := mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, r, p, p.Protocol, ruleLabels, resMap, fqdns)
if err != nil {
return err
}
found += cnt
}
}
return nil
},
)
if err != nil {
return found, err
}
err = icmp.Iterate(func(r api.Ports) error {
if len(toEndpoints) == 0 {
toEndpoints = api.EndpointSelectorSlice{api.WildcardEndpointSelector}
}
if !policyCtx.IsDeny() {
ctx.PolicyTrace(" Allows ICMP type %v\n", r.GetPortProtocols())
} else {
ctx.PolicyTrace(" Denies ICMP type %v\n", r.GetPortProtocols())
}
for _, p := range r.GetPortProtocols() {
cnt, err := mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, r, p, p.Protocol, ruleLabels, resMap, fqdns)
if err != nil {
return err
}
found += cnt
}
return nil
})
return found, err
}
// mergeEgressPortProto merges all rules which share the same port & protocol that
// select a given set of endpoints. It updates the L4Filter mapped to by the specified
// port and protocol with the contents of the provided PortRule. If the rule
// being merged has conflicting L7 rules with those already in the provided
// L4PolicyMap for the specified port-protocol tuple, it returns an error.
func mergeEgressPortProto(policyCtx PolicyContext, ctx *SearchContext, endpoints api.EndpointSelectorSlice, auth *api.Authentication, r api.Ports, p api.PortProtocol,
proto api.L4Proto, ruleLabels labels.LabelArray, resMap L4PolicyMap, fqdns api.FQDNSelectorSlice) (int, error) {
// Create a new L4Filter
filterToMerge, err := createL4EgressFilter(policyCtx, endpoints, auth, r, p, proto, ruleLabels, fqdns)
if err != nil {
return 0, err
}
err = addL4Filter(policyCtx, ctx, resMap, p, proto, filterToMerge, ruleLabels)
if err != nil {
return 0, err
}
return 1, err
}
func (r *rule) resolveEgressPolicy(
policyCtx PolicyContext,
ctx *SearchContext,
state *traceState,
result L4PolicyMap,
requirements, requirementsDeny []slim_metav1.LabelSelectorRequirement,
) (
L4PolicyMap, error,
) {
if !ctx.rulesSelect {
if !r.getSelector().Matches(ctx.From) {
state.unSelectRule(ctx, ctx.From, r)
return nil, nil
}
}
state.selectRule(ctx, r)
found, foundDeny := 0, 0
if len(r.Egress) == 0 && len(r.EgressDeny) == 0 {
ctx.PolicyTrace(" No egress rules\n")
}
for _, egressRule := range r.Egress {
toEndpoints := egressRule.GetDestinationEndpointSelectorsWithRequirements(requirements)
cnt, err := mergeEgress(policyCtx, ctx, toEndpoints, egressRule.Authentication, egressRule.ToPorts, egressRule.ICMPs, r.Rule.Labels.DeepCopy(), result, egressRule.ToFQDNs)
if err != nil {
return nil, err
}
if cnt > 0 {
found += cnt
}
}
oldDeny := policyCtx.SetDeny(true)
defer func() {
policyCtx.SetDeny(oldDeny)
}()
for _, egressRule := range r.EgressDeny {
toEndpoints := egressRule.GetDestinationEndpointSelectorsWithRequirements(requirementsDeny)
cnt, err := mergeEgress(policyCtx, ctx, toEndpoints, nil, egressRule.ToPorts, egressRule.ICMPs, r.Rule.Labels.DeepCopy(), result, nil)
if err != nil {
return nil, err
}
if cnt > 0 {
foundDeny += cnt
}
}
if found+foundDeny > 0 {
if found != 0 {
state.matchedRules++
}
if foundDeny != 0 {
state.matchedDenyRules++
}
return result, nil
}
return nil, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"bytes"
"fmt"
stdlog "log"
"strings"
"testing"
"github.com/cilium/proxy/pkg/policy/api/kafka"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/util/intstr"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/u8proto"
)
func TestL4Policy(t *testing.T) {
td := newTestData()
toBar := &SearchContext{To: labels.ParseSelectLabelArray("bar")}
fromBar := &SearchContext{From: labels.ParseSelectLabelArray("bar")}
toFoo := &SearchContext{To: labels.ParseSelectLabelArray("foo")}
fromFoo := &SearchContext{From: labels.ParseSelectLabelArray("foo")}
rule1 := &rule{
Rule: api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
{Port: "8080", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
Egress: []api.EgressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "3000", Protocol: api.ProtoAny},
},
}},
},
},
},
}
l7rules := api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
}
l7map := L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: l7rules,
isRedirect: true,
},
}
expected := NewL4Policy(0)
expected.Ingress.PortRules.Upsert("80", 0, "TCP", &L4Filter{
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: "http", PerSelectorPolicies: l7map, Ingress: true,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}},
})
expected.Ingress.PortRules.Upsert("8080", 0, "TCP", &L4Filter{
Port: 8080, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: "http", PerSelectorPolicies: l7map, Ingress: true,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}},
})
expected.Egress.PortRules.Upsert("3000", 0, "TCP", &L4Filter{
Port: 3000, Protocol: api.ProtoTCP, U8Proto: 6, Ingress: false,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}},
})
expected.Egress.PortRules.Upsert("3000", 0, "UDP", &L4Filter{
Port: 3000, Protocol: api.ProtoUDP, U8Proto: 17, Ingress: false,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}},
})
expected.Egress.PortRules.Upsert("3000", 0, "SCTP", &L4Filter{
Port: 3000, Protocol: api.ProtoSCTP, U8Proto: 132, Ingress: false,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}},
})
ingressState := traceState{}
egressState := traceState{}
res := NewL4Policy(0)
var err error
res.Ingress.PortRules, err =
rule1.resolveIngressPolicy(td.testPolicyContext, toBar, &ingressState, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res.Ingress)
res.Egress.PortRules, err =
rule1.resolveEgressPolicy(td.testPolicyContext, fromBar, &egressState, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res.Egress)
require.Equal(t, &expected, &res)
require.Equal(t, 1, ingressState.selectedRules)
require.Equal(t, 1, ingressState.matchedRules)
require.Equal(t, 1, egressState.selectedRules)
require.Equal(t, 1, egressState.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
// Foo isn't selected in the rule1's policy.
ingressState = traceState{}
egressState = traceState{}
res1, err := rule1.resolveIngressPolicy(td.testPolicyContext, toFoo, &ingressState, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
res2, err := rule1.resolveEgressPolicy(td.testPolicyContext, fromFoo, &ingressState, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res1)
require.Nil(t, res2)
require.Equal(t, 0, ingressState.selectedRules)
require.Equal(t, 0, ingressState.matchedRules)
require.Equal(t, 0, egressState.selectedRules)
require.Equal(t, 0, egressState.matchedRules)
// This rule actually overlaps with the existing ingress "http" rule,
// so we'd expect it to merge.
rule2 := &rule{
Rule: api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{
{
// Note that this allows all on 80, so the result should wildcard HTTP
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
Egress: []api.EgressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "3000", Protocol: api.ProtoAny},
},
}},
},
},
},
}
expected = NewL4Policy(0)
expected.Ingress.PortRules.Upsert("80", 0, "TCP", &L4Filter{
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}, {}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}},
})
expected.Egress.PortRules.Upsert("3000", 0, "TCP", &L4Filter{
Port: 3000, Protocol: api.ProtoTCP, U8Proto: 6, Ingress: false,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}},
})
expected.Egress.PortRules.Upsert("3000", 0, "UDP", &L4Filter{
Port: 3000, Protocol: api.ProtoUDP, U8Proto: 17, Ingress: false,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}},
})
expected.Egress.PortRules.Upsert("3000", 0, "SCTP", &L4Filter{
Port: 3000, Protocol: api.ProtoSCTP, U8Proto: 132, Ingress: false,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}},
})
ingressState = traceState{}
egressState = traceState{}
res = NewL4Policy(0)
buffer := new(bytes.Buffer)
ctx := SearchContext{To: labels.ParseSelectLabelArray("bar"), Trace: TRACE_VERBOSE}
ctx.Logging = stdlog.New(buffer, "", 0)
res.Ingress.PortRules, err = rule2.resolveIngressPolicy(td.testPolicyContext, &ctx, &ingressState, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res.Ingress)
t.Log(buffer)
res.Egress.PortRules, err = rule2.resolveEgressPolicy(td.testPolicyContext, fromBar, &egressState, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res.Egress)
require.Equal(t, 1, res.Ingress.PortRules.Len())
require.Equal(t, &expected, &res)
require.Equal(t, 1, ingressState.selectedRules)
require.Equal(t, 1, ingressState.matchedRules)
require.Equal(t, 1, egressState.selectedRules)
require.Equal(t, 1, egressState.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
ingressState = traceState{}
egressState = traceState{}
res1, err = rule2.resolveIngressPolicy(td.testPolicyContext, toFoo, &ingressState, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res1)
res2, err = rule2.resolveEgressPolicy(td.testPolicyContext, fromFoo, &egressState, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res2)
require.Equal(t, 0, ingressState.selectedRules)
require.Equal(t, 0, ingressState.matchedRules)
require.Equal(t, 0, egressState.selectedRules)
require.Equal(t, 0, egressState.matchedRules)
}
func TestMergeL4PolicyIngress(t *testing.T) {
td := newTestData()
toBar := &SearchContext{To: labels.ParseSelectLabelArray("bar")}
//toFoo := &SearchContext{To: labels.ParseSelectLabelArray("foo")}
rule1 := &rule{
Rule: api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{fooSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{bazSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
},
}
mergedES := L7DataMap{
td.cachedFooSelector: nil,
td.cachedBazSelector: nil,
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
L7Parser: ParserTypeNone, PerSelectorPolicies: mergedES, Ingress: true,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.cachedFooSelector: {nil},
td.cachedBazSelector: {nil},
},
}})
state := traceState{}
res, err := rule1.resolveIngressPolicy(td.testPolicyContext, toBar, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.Equal(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
}
func TestMergeL4PolicyEgress(t *testing.T) {
td := newTestData()
buffer := new(bytes.Buffer)
fromBar := &SearchContext{
From: labels.ParseSelectLabelArray("bar"),
Logging: stdlog.New(buffer, "", 0),
Trace: TRACE_VERBOSE,
}
// bar can access foo with TCP on port 80, and baz with TCP on port 80.
rule1 := &rule{
Rule: api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{fooSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{bazSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
},
}
mergedES := L7DataMap{
td.cachedFooSelector: nil,
td.cachedBazSelector: nil,
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
L7Parser: ParserTypeNone, PerSelectorPolicies: mergedES, Ingress: false,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.cachedFooSelector: {nil},
td.cachedBazSelector: {nil},
},
}})
state := traceState{}
res, err := rule1.resolveEgressPolicy(td.testPolicyContext, fromBar, &state, NewL4PolicyMap(), nil, nil)
t.Log(buffer)
require.NoError(t, err)
require.NotNil(t, res)
require.Equal(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
}
func TestMergeL7PolicyIngress(t *testing.T) {
td := newTestData()
toBar := &SearchContext{To: labels.ParseSelectLabelArray("bar")}
toFoo := &SearchContext{To: labels.ParseSelectLabelArray("foo")}
fooSelectorSlice := []api.EndpointSelector{
fooSelector,
}
rule1 := &rule{
Rule: api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{
{
// Note that this allows all on 80, so the result should wildcard HTTP
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: fooSelectorSlice,
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}, {}},
},
isRedirect: true,
},
td.cachedFooSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.cachedFooSelector: {nil},
td.wildcardCachedSelector: {nil},
},
}})
state := traceState{}
res, err := rule1.resolveIngressPolicy(td.testPolicyContext, toBar, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.EqualValues(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
state = traceState{}
res, err = rule1.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
rule2 := &rule{
Rule: api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: fooSelectorSlice,
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
},
},
}
l7rules := api.L7Rules{
Kafka: []kafka.PortRule{{Topic: "foo"}},
}
l7map := L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: l7rules,
isRedirect: true,
},
td.cachedFooSelector: &PerSelectorPolicy{
L7Rules: l7rules,
isRedirect: true,
},
}
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: "kafka", PerSelectorPolicies: l7map, Ingress: true,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.cachedFooSelector: {nil},
td.wildcardCachedSelector: {nil},
},
}})
state = traceState{}
res, err = rule2.resolveIngressPolicy(td.testPolicyContext, toBar, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.EqualValues(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
state = traceState{}
res, err = rule2.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
// Resolve rule1's policy, then try to add rule2.
res, err = rule1.resolveIngressPolicy(td.testPolicyContext, toBar, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
state = traceState{}
_, err = rule2.resolveIngressPolicy(td.testPolicyContext, toBar, &state, res, nil, nil)
require.NotNil(t, err)
res.Detach(td.sc)
// Similar to 'rule2', but with different topics for the l3-dependent
// rule and the l4-only rule.
rule3 := &rule{
Rule: api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: fooSelectorSlice,
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "bar"},
},
},
}},
},
},
},
}
fooRules := api.L7Rules{
Kafka: []kafka.PortRule{{Topic: "foo"}},
}
barRules := api.L7Rules{
Kafka: []kafka.PortRule{{Topic: "bar"}},
}
// The L3-dependent L7 rules are not merged together.
l7map = L7DataMap{
td.cachedFooSelector: &PerSelectorPolicy{
L7Rules: fooRules,
isRedirect: true,
},
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: barRules,
isRedirect: true,
},
}
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: "kafka", PerSelectorPolicies: l7map, Ingress: true,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.cachedFooSelector: {nil},
td.wildcardCachedSelector: {nil},
},
}})
state = traceState{}
res, err = rule3.resolveIngressPolicy(td.testPolicyContext, toBar, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.EqualValues(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
}
func TestMergeL7PolicyEgress(t *testing.T) {
td := newTestData()
fromBar := &SearchContext{From: labels.ParseSelectLabelArray("bar")}
fromFoo := &SearchContext{From: labels.ParseSelectLabelArray("foo")}
fooSelector := []api.EndpointSelector{
api.NewESFromLabels(labels.ParseSelectLabel("foo")),
}
rule1 := &rule{
Rule: api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Egress: []api.EgressRule{
{
// Note that this allows all on 80, so the result should wildcard HTTP
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/public"},
},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: fooSelector,
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/private"},
},
},
}},
},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/public", Method: "GET"}, {}},
},
isRedirect: true,
},
td.cachedFooSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/private", Method: "GET"}},
},
isRedirect: true,
},
},
Ingress: false,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.wildcardCachedSelector: {nil},
td.cachedFooSelector: {nil},
},
}})
state := traceState{}
res, err := rule1.resolveEgressPolicy(td.testPolicyContext, fromBar, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.EqualValues(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
state = traceState{}
res, err = rule1.resolveEgressPolicy(td.testPolicyContext, fromFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
rule2 := &rule{
Rule: api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Egress: []api.EgressRule{
{
// Note that this allows all on 9092, so the result should wildcard Kafka
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9092", Protocol: api.ProtoTCP},
},
}},
},
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9092", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: fooSelector,
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9092", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
},
},
}
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"9092/TCP": {
Port: 9092, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeKafka,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
Kafka: []kafka.PortRule{{Topic: "foo"}, {}},
},
isRedirect: true,
},
td.cachedFooSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
Kafka: []kafka.PortRule{{Topic: "foo"}},
},
isRedirect: true,
},
},
Ingress: false,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.cachedFooSelector: {nil},
td.wildcardCachedSelector: {nil},
},
}})
state = traceState{}
res, err = rule2.resolveEgressPolicy(td.testPolicyContext, fromBar, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.EqualValues(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
state = traceState{}
res, err = rule2.resolveEgressPolicy(td.testPolicyContext, fromFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
// Resolve rule1's policy, then try to add rule2.
res, err = rule1.resolveEgressPolicy(td.testPolicyContext, fromBar, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
res.Detach(td.sc)
// Similar to 'rule2', but with different topics for the l3-dependent
// rule and the l4-only rule.
rule3 := &rule{
Rule: api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: fooSelector,
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "bar"},
},
},
}},
},
},
},
}
fooRules := api.L7Rules{
Kafka: []kafka.PortRule{{Topic: "foo"}},
}
barRules := api.L7Rules{
Kafka: []kafka.PortRule{{Topic: "bar"}},
}
// The l3-dependent l7 rules are not merged together.
l7map := L7DataMap{
td.cachedFooSelector: &PerSelectorPolicy{
L7Rules: fooRules,
isRedirect: true,
},
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: barRules,
isRedirect: true,
},
}
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: "kafka", PerSelectorPolicies: l7map, Ingress: false,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.cachedFooSelector: {nil},
td.wildcardCachedSelector: {nil},
},
}})
state = traceState{}
res, err = rule3.resolveEgressPolicy(td.testPolicyContext, fromBar, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.EqualValues(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
}
func TestRuleWithNoEndpointSelector(t *testing.T) {
apiRule1 := api.Rule{
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: []api.CIDR{
"10.0.1.0/24",
"192.168.2.0",
"10.0.3.1",
"2001:db8::1/48",
"2001:db9::",
},
},
},
},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: []api.CIDR{
"10.1.0.0/16",
"2001:dbf::/64",
},
},
}, {
EgressCommonRule: api.EgressCommonRule{
ToCIDRSet: []api.CIDRRule{{Cidr: api.CIDR("10.0.0.0/8"), ExceptCIDRs: []api.CIDR{"10.96.0.0/12"}}},
},
},
},
}
err := apiRule1.Sanitize()
require.NotNil(t, err)
}
func TestL3Policy(t *testing.T) {
apiRule1 := api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: []api.CIDR{
"10.0.1.0/24",
"192.168.2.0",
"10.0.3.1",
"2001:db8::1/48",
"2001:db9::",
},
},
},
},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: []api.CIDR{
"10.1.0.0/16",
"2001:dbf::/64",
},
},
}, {
EgressCommonRule: api.EgressCommonRule{
ToCIDRSet: []api.CIDRRule{{Cidr: api.CIDR("10.0.0.0/8"), ExceptCIDRs: []api.CIDR{"10.96.0.0/12"}}},
},
},
},
}
err := apiRule1.Sanitize()
require.NoError(t, err)
rule1 := &rule{Rule: apiRule1}
err = rule1.Sanitize()
require.NoError(t, err)
// Must be parsable, make sure Validate fails when not.
err = (&api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: []api.CIDR{"10.0.1..0/24"},
},
}},
}).Sanitize()
require.NotNil(t, err)
// Test CIDRRule with no provided CIDR or ExceptionCIDR.
// Should fail as CIDR is required.
err = (&api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDRSet: []api.CIDRRule{{Cidr: "", ExceptCIDRs: nil}},
},
}},
}).Sanitize()
require.NotNil(t, err)
// Test CIDRRule with only CIDR provided; should not fail, as ExceptionCIDR
// is optional.
err = (&api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDRSet: []api.CIDRRule{{Cidr: "10.0.1.0/24", ExceptCIDRs: nil}},
},
}},
}).Sanitize()
require.NoError(t, err)
// Cannot provide just an IP to a CIDRRule; Cidr must be of format
// <IP>/<prefix>.
err = (&api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDRSet: []api.CIDRRule{{Cidr: "10.0.1.32", ExceptCIDRs: nil}},
},
}},
}).Sanitize()
require.NotNil(t, err)
// Cannot exclude a range that is not part of the CIDR.
err = (&api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDRSet: []api.CIDRRule{{Cidr: "10.0.0.0/10", ExceptCIDRs: []api.CIDR{"10.64.0.0/11"}}},
},
}},
}).Sanitize()
require.NotNil(t, err)
// Must have a contiguous mask, make sure Validate fails when not.
err = (&api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: []api.CIDR{"10.0.1.0/128.0.0.128"},
},
}},
}).Sanitize()
require.NotNil(t, err)
// Prefix length must be in range for the address, make sure
// Validate fails if given prefix length is out of range.
err = (&api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: []api.CIDR{"10.0.1.0/34"},
},
}},
}).Sanitize()
require.NotNil(t, err)
}
func TestICMPPolicy(t *testing.T) {
td := newTestData()
var err error
toBar := &SearchContext{To: labels.ParseSelectLabelArray("bar")}
fromBar := &SearchContext{From: labels.ParseSelectLabelArray("bar")}
// A rule for ICMP
icmpV4Type := intstr.FromInt(8)
rule1 := &rule{
Rule: api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{
{
ICMPs: api.ICMPRules{{
Fields: []api.ICMPField{{
Type: &icmpV4Type,
}},
}},
},
},
Egress: []api.EgressRule{
{
ICMPs: api.ICMPRules{{
Fields: []api.ICMPField{{
Type: &icmpV4Type,
}},
}},
},
},
},
}
expected := NewL4Policy(0)
expected.Ingress.PortRules.Upsert("8", 0, "ICMP", &L4Filter{
Port: 8,
Protocol: api.ProtoICMP,
U8Proto: u8proto.ProtoIDs["icmp"],
Ingress: true,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}},
})
expected.Egress.PortRules.Upsert("8", 0, "ICMP", &L4Filter{
Port: 8,
Protocol: api.ProtoICMP,
U8Proto: u8proto.ProtoIDs["icmp"],
Ingress: false,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}},
})
ingressState := traceState{}
egressState := traceState{}
res := NewL4Policy(0)
res.Ingress.PortRules, err =
rule1.resolveIngressPolicy(td.testPolicyContext, toBar, &ingressState, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res.Ingress)
res.Egress.PortRules, err =
rule1.resolveEgressPolicy(td.testPolicyContext, fromBar, &egressState, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res.Egress)
require.Equal(t, &expected, &res)
require.Equal(t, 1, ingressState.selectedRules)
require.Equal(t, 1, ingressState.matchedRules)
require.Equal(t, 1, egressState.selectedRules)
require.Equal(t, 1, egressState.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
// A rule for Ports and ICMP
rule2 := &rule{
Rule: api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
ICMPs: api.ICMPRules{{
Fields: []api.ICMPField{{
Type: &icmpV4Type,
}},
}},
},
},
},
}
expected = NewL4Policy(0)
expected.Ingress.PortRules.Upsert("80", 0, "TCP", &L4Filter{
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: u8proto.ProtoIDs["tcp"],
Ingress: true,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}},
})
expected.Ingress.PortRules.Upsert("8", 0, "ICMP", &L4Filter{
Port: 8,
Protocol: api.ProtoICMP,
U8Proto: u8proto.ProtoIDs["icmp"],
Ingress: true,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}},
})
ingressState = traceState{}
res = NewL4Policy(0)
res.Ingress.PortRules, err =
rule2.resolveIngressPolicy(td.testPolicyContext, toBar, &ingressState, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res.Ingress)
require.Equal(t, &expected, &res)
require.Equal(t, 1, ingressState.selectedRules)
require.Equal(t, 1, ingressState.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
// A rule for ICMPv6
icmpV6Type := intstr.FromInt(128)
rule3 := &rule{
Rule: api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{
{
ICMPs: api.ICMPRules{{
Fields: []api.ICMPField{{
Family: "IPv6",
Type: &icmpV6Type,
}},
}},
},
},
},
}
expected = NewL4Policy(0)
expected.Ingress.PortRules.Upsert("128", 0, "ICMPV6", &L4Filter{
Port: 128,
Protocol: api.ProtoICMPv6,
U8Proto: u8proto.ProtoIDs["icmpv6"],
Ingress: true,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}},
})
ingressState = traceState{}
res = NewL4Policy(0)
res.Ingress.PortRules, err =
rule3.resolveIngressPolicy(td.testPolicyContext, toBar, &ingressState, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res.Ingress)
require.Equal(t, &expected, &res)
require.Equal(t, 1, ingressState.selectedRules)
require.Equal(t, 1, ingressState.matchedRules)
}
// Tests the restrictions of combining certain label-based L3 and L4 policies.
// This ensures that the user is informed of policy combinations that are not
// implemented in the datapath.
func TestEgressRuleRestrictions(t *testing.T) {
fooSelector := []api.EndpointSelector{
api.NewESFromLabels(labels.ParseSelectLabel("foo")),
}
// Cannot combine ToEndpoints and ToCIDR
apiRule1 := api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: []api.CIDR{
"10.1.0.0/16",
"2001:dbf::/64",
},
ToEndpoints: fooSelector,
},
},
},
}
err := apiRule1.Sanitize()
require.NotNil(t, err)
}
func TestPolicyEntityValidationEgress(t *testing.T) {
r := api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: []api.Entity{api.EntityWorld},
},
},
},
}
require.Nil(t, r.Sanitize())
require.Equal(t, 1, len(r.Egress[0].ToEntities))
r.Egress[0].ToEntities = []api.Entity{api.EntityHost}
require.Nil(t, r.Sanitize())
require.Equal(t, 1, len(r.Egress[0].ToEntities))
r.Egress[0].ToEntities = []api.Entity{"trololo"}
require.NotNil(t, r.Sanitize())
}
func TestPolicyEntityValidationIngress(t *testing.T) {
r := api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEntities: []api.Entity{api.EntityWorld},
},
},
},
}
require.Nil(t, r.Sanitize())
require.Equal(t, 1, len(r.Ingress[0].FromEntities))
r.Ingress[0].FromEntities = []api.Entity{api.EntityHost}
require.Nil(t, r.Sanitize())
require.Equal(t, 1, len(r.Ingress[0].FromEntities))
r.Ingress[0].FromEntities = []api.Entity{"trololo"}
require.NotNil(t, r.Sanitize())
}
func TestPolicyEntityValidationEntitySelectorsFill(t *testing.T) {
r := api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEntities: []api.Entity{api.EntityWorld, api.EntityHost},
},
},
},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: []api.Entity{api.EntityWorld, api.EntityHost},
},
},
},
}
require.Nil(t, r.Sanitize())
require.Equal(t, 2, len(r.Ingress[0].FromEntities))
require.Equal(t, 2, len(r.Egress[0].ToEntities))
}
func TestL3RuleLabels(t *testing.T) {
td := newTestData()
ruleLabels := map[string]labels.LabelArray{
"rule0": labels.ParseLabelArray("name=apiRule0"),
"rule1": labels.ParseLabelArray("name=apiRule1"),
"rule2": labels.ParseLabelArray("name=apiRule2"),
}
rules := map[string]api.Rule{
"rule0": {
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Labels: ruleLabels["rule0"],
Ingress: []api.IngressRule{},
Egress: []api.EgressRule{},
},
"rule1": {
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Labels: ruleLabels["rule1"],
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: []api.CIDR{"10.0.1.0/32"},
},
},
},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: []api.CIDR{"10.1.0.0/32"},
},
},
},
},
"rule2": {
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Labels: ruleLabels["rule2"],
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: []api.CIDR{"10.0.2.0/32"},
},
},
},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: []api.CIDR{"10.2.0.0/32"},
},
},
},
},
}
testCases := []struct {
description string // the description to print in asserts
rulesToApply []string // the rules from the rules map to resolve, in order
expectedIngressLabels map[string]labels.LabelArrayList // the slice of LabelArray we should see, per CIDR prefix
expectedEgressLabels map[string]labels.LabelArrayList // the slice of LabelArray we should see, per CIDR prefix
}{
{
description: "Empty rule that matches. Should not apply labels",
rulesToApply: []string{"rule0"},
expectedIngressLabels: nil,
expectedEgressLabels: nil,
}, {
description: "A rule that matches. Should apply labels",
rulesToApply: []string{"rule1"},
expectedIngressLabels: map[string]labels.LabelArrayList{"10.0.1.0/32": {ruleLabels["rule1"]}},
expectedEgressLabels: map[string]labels.LabelArrayList{"10.1.0.0/32": {ruleLabels["rule1"]}},
}, {
description: "Multiple matching rules. Should apply labels from all that have rule entries",
rulesToApply: []string{"rule0", "rule1", "rule2"},
expectedIngressLabels: map[string]labels.LabelArrayList{
"10.0.1.0/32": {ruleLabels["rule1"]},
"10.0.2.0/32": {ruleLabels["rule2"]}},
expectedEgressLabels: map[string]labels.LabelArrayList{
"10.1.0.0/32": {ruleLabels["rule1"]},
"10.2.0.0/32": {ruleLabels["rule2"]}},
}}
// endpoint selector for all tests
toBar := &SearchContext{To: labels.ParseSelectLabelArray("bar"), Trace: TRACE_VERBOSE}
fromBar := &SearchContext{From: labels.ParseSelectLabelArray("bar"), Trace: TRACE_VERBOSE}
for _, test := range testCases {
finalPolicy := NewL4Policy(0)
for _, r := range test.rulesToApply {
apiRule := rules[r]
err := apiRule.Sanitize()
require.NoError(t, err, "Cannot sanitize Rule: %+v", apiRule)
rule := &rule{Rule: apiRule}
_, err = rule.resolveIngressPolicy(td.testPolicyContext, toBar, &traceState{}, finalPolicy.Ingress.PortRules, nil, nil)
require.NoError(t, err)
_, err = rule.resolveEgressPolicy(td.testPolicyContext, fromBar, &traceState{}, finalPolicy.Egress.PortRules, nil, nil)
require.NoError(t, err)
}
// For debugging the test:
//require.EqualValues(t, NewL4PolicyMap(), finalPolicy.Ingress)
type expectedResult map[string]labels.LabelArrayList
mapDirectionalResultsToExpectedOutput := map[*L4Filter]expectedResult{
finalPolicy.Ingress.PortRules.ExactLookup("0", 0, "ANY"): test.expectedIngressLabels,
finalPolicy.Egress.PortRules.ExactLookup("0", 0, "ANY"): test.expectedEgressLabels,
}
for filter, exp := range mapDirectionalResultsToExpectedOutput {
if len(exp) > 0 {
for cidr, rule := range exp {
matches := false
for _, origin := range filter.RuleOrigin {
if origin.Equals(rule) {
matches = true
break
}
}
require.True(t, matches, fmt.Sprintf("%s: expected filter %+v to be derived from rule %s", test.description, filter, rule))
matches = false
for sel := range filter.PerSelectorPolicies {
cidrLabels := labels.ParseLabelArray("cidr:" + cidr)
t.Logf("Testing %+v", cidrLabels)
if matches = sel.(*identitySelector).source.(*labelIdentitySelector).xxxMatches(cidrLabels); matches {
break
}
}
require.True(t, matches, fmt.Sprintf("%s: expected cidr %s to match filter %+v", test.description, cidr, filter))
}
}
}
}
}
func TestL4RuleLabels(t *testing.T) {
td := newTestData()
ruleLabels := map[string]labels.LabelArray{
"rule0": labels.ParseLabelArray("name=apiRule0"),
"rule1": labels.ParseLabelArray("name=apiRule1"),
"rule2": labels.ParseLabelArray("name=apiRule2"),
}
rules := map[string]api.Rule{
"rule0": {
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Labels: ruleLabels["rule0"],
Ingress: []api.IngressRule{},
Egress: []api.EgressRule{},
},
"rule1": {
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Labels: ruleLabels["rule1"],
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{{Port: "1010", Protocol: api.ProtoTCP}},
}},
},
},
Egress: []api.EgressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{{Port: "1100", Protocol: api.ProtoTCP}},
}},
},
},
},
"rule2": {
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Labels: ruleLabels["rule2"],
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{{Port: "1020", Protocol: api.ProtoTCP}},
}},
},
},
Egress: []api.EgressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{{Port: "1200", Protocol: api.ProtoTCP}},
}},
},
},
},
}
testCases := []struct {
description string // the description to print in asserts
rulesToApply []string // the rules from the rules map to resolve, in order
expectedIngressLabels map[string]labels.LabelArrayList // the slice of LabelArray we should see, in order
expectedEgressLabels map[string]labels.LabelArrayList // the slice of LabelArray we should see, in order
}{
{
description: "Empty rule that matches. Should not apply labels",
rulesToApply: []string{"rule0"},
expectedIngressLabels: map[string]labels.LabelArrayList{},
expectedEgressLabels: map[string]labels.LabelArrayList{},
},
{
description: "A rule that matches. Should apply labels",
rulesToApply: []string{"rule1"},
expectedIngressLabels: map[string]labels.LabelArrayList{"1010/TCP": {ruleLabels["rule1"]}},
expectedEgressLabels: map[string]labels.LabelArrayList{"1100/TCP": {ruleLabels["rule1"]}},
}, {
description: "Multiple matching rules. Should apply labels from all that have rule entries",
rulesToApply: []string{"rule0", "rule1", "rule2"},
expectedIngressLabels: map[string]labels.LabelArrayList{
"1010/TCP": {ruleLabels["rule1"]},
"1020/TCP": {ruleLabels["rule2"]}},
expectedEgressLabels: map[string]labels.LabelArrayList{
"1100/TCP": {ruleLabels["rule1"]},
"1200/TCP": {ruleLabels["rule2"]}},
}}
// endpoint selector for all tests
toBar := &SearchContext{To: labels.ParseSelectLabelArray("bar")}
fromBar := &SearchContext{From: labels.ParseSelectLabelArray("bar")}
for _, test := range testCases {
finalPolicy := NewL4Policy(0)
for _, r := range test.rulesToApply {
apiRule := rules[r]
err := apiRule.Sanitize()
require.NoError(t, err, "Cannot sanitize api.Rule: %+v", apiRule)
rule := &rule{Rule: apiRule}
rule.resolveIngressPolicy(td.testPolicyContext, toBar, &traceState{}, finalPolicy.Ingress.PortRules, nil, nil)
rule.resolveEgressPolicy(td.testPolicyContext, fromBar, &traceState{}, finalPolicy.Egress.PortRules, nil, nil)
}
require.Equal(t, len(test.expectedIngressLabels), finalPolicy.Ingress.PortRules.Len(), fmt.Sprintf(test.description))
for portProto := range test.expectedIngressLabels {
portProtoSlice := strings.Split(portProto, "/")
out := finalPolicy.Ingress.PortRules.ExactLookup(portProtoSlice[0], 0, portProtoSlice[1])
require.NotNil(t, out, test.description)
require.Equal(t, 1, len(out.RuleOrigin), fmt.Sprintf(test.description))
require.EqualValues(t, test.expectedIngressLabels[portProto], out.RuleOrigin[out.wildcard], fmt.Sprintf(test.description))
}
require.Equal(t, len(test.expectedEgressLabels), finalPolicy.Egress.PortRules.Len(), fmt.Sprintf(test.description))
for portProto := range test.expectedEgressLabels {
portProtoSlice := strings.Split(portProto, "/")
out := finalPolicy.Egress.PortRules.ExactLookup(portProtoSlice[0], 0, portProtoSlice[1])
require.NotNil(t, out, test.description)
require.Equal(t, 1, len(out.RuleOrigin), fmt.Sprintf(test.description))
require.EqualValues(t, test.expectedEgressLabels[portProto], out.RuleOrigin[out.wildcard], fmt.Sprintf(test.description))
}
finalPolicy.Detach(td.sc)
}
}
var (
labelsA = labels.LabelArray{
labels.NewLabel("id", "a", labels.LabelSourceK8s),
}
endpointSelectorA = api.NewESFromLabels(labels.ParseSelectLabel("id=a"))
labelsB = labels.LabelArray{
labels.NewLabel("id1", "b", labels.LabelSourceK8s),
labels.NewLabel("id2", "t", labels.LabelSourceK8s),
}
labelsC = labels.LabelArray{
labels.NewLabel("id", "t", labels.LabelSourceK8s),
}
endpointSelectorC = api.NewESFromLabels(labels.ParseSelectLabel("id=t"))
ctxAToB = SearchContext{From: labelsA, To: labelsB, Trace: TRACE_VERBOSE}
ctxAToC = SearchContext{From: labelsA, To: labelsC, Trace: TRACE_VERBOSE}
)
func expectResult(t *testing.T, expected, obtained api.Decision, buffer *bytes.Buffer) {
if obtained != expected {
t.Errorf("Unexpected result: obtained=%v, expected=%v", obtained, expected)
t.Log(buffer)
}
}
func checkIngress(t *testing.T, repo *Repository, ctx *SearchContext, verdict api.Decision) {
repo.Mutex.RLock()
defer repo.Mutex.RUnlock()
buffer := new(bytes.Buffer)
ctx.Logging = stdlog.New(buffer, "", 0)
expectResult(t, verdict, repo.AllowsIngressRLocked(ctx), buffer)
}
func checkEgress(t *testing.T, repo *Repository, ctx *SearchContext, verdict api.Decision) {
repo.Mutex.RLock()
defer repo.Mutex.RUnlock()
buffer := new(bytes.Buffer)
ctx.Logging = stdlog.New(buffer, "", 0)
expectResult(t, verdict, repo.AllowsEgressRLocked(ctx), buffer)
}
func TestIngressAllowAll(t *testing.T) {
td := newTestData()
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorC,
Ingress: []api.IngressRule{
{
// Allow all L3&L4 ingress rule
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
api.WildcardEndpointSelector,
},
},
},
},
},
})
checkIngress(t, repo, &ctxAToB, api.Denied)
checkIngress(t, repo, &ctxAToC, api.Allowed)
ctxAToC80 := ctxAToC
ctxAToC80.DPorts = []*models.Port{{Port: 80, Protocol: models.PortProtocolTCP}}
checkIngress(t, repo, &ctxAToC80, api.Allowed)
ctxAToC90 := ctxAToC
ctxAToC90.DPorts = []*models.Port{{Name: "port-90", Protocol: models.PortProtocolTCP}}
checkIngress(t, repo, &ctxAToC90, api.Allowed)
}
func TestIngressAllowAllL4Overlap(t *testing.T) {
td := newTestData()
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorC,
Ingress: []api.IngressRule{
{
// Allow all L3&L4 ingress rule
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
api.WildcardEndpointSelector,
},
},
},
{
// This rule is a subset of the above
// rule and should *NOT* restrict to
// port 80 only
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
},
})
ctxAToC80 := ctxAToC
ctxAToC80.DPorts = []*models.Port{{Port: 80, Protocol: models.PortProtocolTCP}}
checkIngress(t, repo, &ctxAToC80, api.Allowed)
ctxAToC90 := ctxAToC
ctxAToC90.DPorts = []*models.Port{{Port: 90, Protocol: models.PortProtocolTCP}}
checkIngress(t, repo, &ctxAToC90, api.Allowed)
}
func TestIngressAllowAllL4OverlapNamedPort(t *testing.T) {
td := newTestData()
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorC,
Ingress: []api.IngressRule{
{
// Allow all L3&L4 ingress rule
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
api.WildcardEndpointSelector,
},
},
},
{
// This rule is a subset of the above
// rule and should *NOT* restrict to
// port 80 only
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "port-80", Protocol: api.ProtoTCP},
},
}},
},
},
},
})
ctxAToC80 := ctxAToC
ctxAToC80.DPorts = []*models.Port{{Name: "port-80", Protocol: models.PortProtocolTCP}}
checkIngress(t, repo, &ctxAToC80, api.Allowed)
ctxAToC90 := ctxAToC
ctxAToC90.DPorts = []*models.Port{{Port: 90, Protocol: models.PortProtocolTCP}}
checkIngress(t, repo, &ctxAToC90, api.Allowed)
}
func TestIngressL4AllowAll(t *testing.T) {
td := newTestData()
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorC,
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
},
})
ctxAToC80 := ctxAToC
ctxAToC80.DPorts = []*models.Port{{Port: 80, Protocol: models.PortProtocolTCP}}
checkIngress(t, repo, &ctxAToC80, api.Allowed)
ctxAToC90 := ctxAToC
ctxAToC90.DPorts = []*models.Port{{Port: 90, Protocol: models.PortProtocolTCP}}
checkIngress(t, repo, &ctxAToC90, api.Denied)
ctxAToCNamed90 := ctxAToC
ctxAToCNamed90.DPorts = []*models.Port{{Name: "port-90", Protocol: models.PortProtocolTCP}}
checkIngress(t, repo, &ctxAToCNamed90, api.Denied)
l4IngressPolicy, err := repo.ResolveL4IngressPolicy(&ctxAToC80)
require.NoError(t, err)
filter := l4IngressPolicy.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.True(t, filter.Ingress)
require.Equal(t, 1, len(filter.PerSelectorPolicies))
require.Nil(t, filter.PerSelectorPolicies[td.wildcardCachedSelector])
l4IngressPolicy.Detach(repo.GetSelectorCache())
}
func TestIngressL4AllowAllNamedPort(t *testing.T) {
td := newTestData()
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorC,
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "port-80", Protocol: api.ProtoTCP},
},
}},
},
},
},
})
ctxAToCNamed80 := ctxAToC
ctxAToCNamed80.DPorts = []*models.Port{{Name: "port-80", Protocol: models.PortProtocolTCP}}
checkIngress(t, repo, &ctxAToCNamed80, api.Allowed)
ctxAToC80 := ctxAToC
ctxAToC80.DPorts = []*models.Port{{Port: 80, Protocol: models.PortProtocolTCP}}
checkIngress(t, repo, &ctxAToC80, api.Denied)
ctxAToC90 := ctxAToC
ctxAToC90.DPorts = []*models.Port{{Port: 90, Protocol: models.PortProtocolTCP}}
checkIngress(t, repo, &ctxAToC90, api.Denied)
ctxAToCNamed90 := ctxAToC
ctxAToCNamed90.DPorts = []*models.Port{{Name: "port-90", Protocol: models.PortProtocolTCP}}
checkIngress(t, repo, &ctxAToCNamed90, api.Denied)
l4IngressPolicy, err := repo.ResolveL4IngressPolicy(&ctxAToCNamed80)
require.NoError(t, err)
filter := l4IngressPolicy.ExactLookup("port-80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(0), filter.Port)
require.Equal(t, "port-80", filter.PortName)
require.True(t, filter.Ingress)
require.Equal(t, 1, len(filter.PerSelectorPolicies))
require.Nil(t, filter.PerSelectorPolicies[td.wildcardCachedSelector])
l4IngressPolicy.Detach(repo.GetSelectorCache())
}
func TestEgressAllowAll(t *testing.T) {
td := newTestData()
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{
api.WildcardEndpointSelector,
},
},
},
},
},
})
checkEgress(t, repo, &ctxAToB, api.Allowed)
checkEgress(t, repo, &ctxAToC, api.Allowed)
ctxAToC80 := ctxAToC
ctxAToC80.DPorts = []*models.Port{{Port: 80, Protocol: models.PortProtocolTCP}}
checkEgress(t, repo, &ctxAToC80, api.Allowed)
ctxAToC90 := ctxAToC
ctxAToC90.DPorts = []*models.Port{{Port: 90, Protocol: models.PortProtocolTCP}}
checkEgress(t, repo, &ctxAToC90, api.Allowed)
}
func TestEgressL4AllowAll(t *testing.T) {
td := newTestData()
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
},
})
ctxAToC80 := ctxAToC
ctxAToC80.DPorts = []*models.Port{{Port: 80, Protocol: models.PortProtocolTCP}}
checkEgress(t, repo, &ctxAToC80, api.Allowed)
ctxAToC90 := ctxAToC
ctxAToC90.DPorts = []*models.Port{{Port: 90, Protocol: models.PortProtocolTCP}}
checkEgress(t, repo, &ctxAToC90, api.Denied)
buffer := new(bytes.Buffer)
ctx := SearchContext{From: labelsA, Trace: TRACE_VERBOSE}
ctx.Logging = stdlog.New(buffer, "", 0)
l4EgressPolicy, err := repo.ResolveL4EgressPolicy(&ctx)
require.NoError(t, err)
t.Log(buffer)
filter := l4EgressPolicy.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.Equal(t, false, filter.Ingress)
require.Equal(t, 1, len(filter.PerSelectorPolicies))
require.Nil(t, filter.PerSelectorPolicies[td.wildcardCachedSelector])
l4EgressPolicy.Detach(repo.GetSelectorCache())
}
func TestEgressL4AllowWorld(t *testing.T) {
td := newTestData()
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: []api.Entity{api.EntityWorld},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
},
})
worldLabel := labels.ParseSelectLabelArray("reserved:world")
ctxAToWorld80 := SearchContext{From: labelsA, To: worldLabel, Trace: TRACE_VERBOSE}
ctxAToWorld80.DPorts = []*models.Port{{Port: 80, Protocol: models.PortProtocolTCP}}
checkEgress(t, repo, &ctxAToWorld80, api.Allowed)
ctxAToWorld90 := ctxAToWorld80
ctxAToWorld90.DPorts = []*models.Port{{Port: 90, Protocol: models.PortProtocolTCP}}
checkEgress(t, repo, &ctxAToWorld90, api.Denied)
// Pod to pod must be denied on port 80 and 90, only world was whitelisted
fooLabel := labels.ParseSelectLabelArray("k8s:app=foo")
ctxAToFoo := SearchContext{From: labelsA, To: fooLabel, Trace: TRACE_VERBOSE,
DPorts: []*models.Port{{Port: 80, Protocol: models.PortProtocolTCP}}}
checkEgress(t, repo, &ctxAToFoo, api.Denied)
ctxAToFoo90 := ctxAToFoo
ctxAToFoo90.DPorts = []*models.Port{{Port: 90, Protocol: models.PortProtocolTCP}}
checkEgress(t, repo, &ctxAToFoo90, api.Denied)
buffer := new(bytes.Buffer)
ctx := SearchContext{From: labelsA, Trace: TRACE_VERBOSE}
ctx.Logging = stdlog.New(buffer, "", 0)
l4EgressPolicy, err := repo.ResolveL4EgressPolicy(&ctx)
require.NoError(t, err)
t.Log(buffer)
filter := l4EgressPolicy.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.Equal(t, false, filter.Ingress)
require.Equal(t, 3, len(filter.PerSelectorPolicies))
l4EgressPolicy.Detach(repo.GetSelectorCache())
}
func TestEgressL4AllowAllEntity(t *testing.T) {
td := newTestData()
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: []api.Entity{api.EntityAll},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
},
})
worldLabel := labels.ParseSelectLabelArray("reserved:world")
ctxAToWorld80 := SearchContext{From: labelsA, To: worldLabel, Trace: TRACE_VERBOSE}
ctxAToWorld80.DPorts = []*models.Port{{Port: 80, Protocol: models.PortProtocolTCP}}
checkEgress(t, repo, &ctxAToWorld80, api.Allowed)
ctxAToWorld90 := ctxAToWorld80
ctxAToWorld90.DPorts = []*models.Port{{Port: 90, Protocol: models.PortProtocolTCP}}
checkEgress(t, repo, &ctxAToWorld90, api.Denied)
// Pod to pod must be allowed on port 80, denied on port 90 (all identity)
fooLabel := labels.ParseSelectLabelArray("k8s:app=foo")
ctxAToFoo := SearchContext{From: labelsA, To: fooLabel, Trace: TRACE_VERBOSE,
DPorts: []*models.Port{{Port: 80, Protocol: models.PortProtocolTCP}}}
checkEgress(t, repo, &ctxAToFoo, api.Allowed)
ctxAToFoo90 := ctxAToFoo
ctxAToFoo90.DPorts = []*models.Port{{Port: 90, Protocol: models.PortProtocolTCP}}
checkEgress(t, repo, &ctxAToFoo90, api.Denied)
buffer := new(bytes.Buffer)
ctx := SearchContext{From: labelsA, Trace: TRACE_VERBOSE}
ctx.Logging = stdlog.New(buffer, "", 0)
l4EgressPolicy, err := repo.ResolveL4EgressPolicy(&ctx)
require.NoError(t, err)
t.Log(buffer)
filter := l4EgressPolicy.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.Equal(t, false, filter.Ingress)
require.Equal(t, 1, len(filter.PerSelectorPolicies))
l4EgressPolicy.Detach(repo.GetSelectorCache())
}
func TestEgressL3AllowWorld(t *testing.T) {
td := newTestData()
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: []api.Entity{api.EntityWorld},
},
},
},
},
})
worldLabel := labels.ParseSelectLabelArray("reserved:world")
ctxAToWorld80 := SearchContext{From: labelsA, To: worldLabel, Trace: TRACE_VERBOSE}
ctxAToWorld80.DPorts = []*models.Port{{Port: 80, Protocol: models.PortProtocolTCP}}
checkEgress(t, repo, &ctxAToWorld80, api.Allowed)
ctxAToWorld90 := ctxAToWorld80
ctxAToWorld90.DPorts = []*models.Port{{Port: 90, Protocol: models.PortProtocolTCP}}
checkEgress(t, repo, &ctxAToWorld90, api.Allowed)
// Pod to pod must be denied on port 80 and 90, only world was whitelisted
fooLabel := labels.ParseSelectLabelArray("k8s:app=foo")
ctxAToFoo := SearchContext{From: labelsA, To: fooLabel, Trace: TRACE_VERBOSE,
DPorts: []*models.Port{{Port: 80, Protocol: models.PortProtocolTCP}}}
checkEgress(t, repo, &ctxAToFoo, api.Denied)
ctxAToFoo90 := ctxAToFoo
ctxAToFoo90.DPorts = []*models.Port{{Port: 90, Protocol: models.PortProtocolTCP}}
checkEgress(t, repo, &ctxAToFoo90, api.Denied)
buffer := new(bytes.Buffer)
ctx := SearchContext{From: labelsA, Trace: TRACE_VERBOSE}
ctx.Logging = stdlog.New(buffer, "", 0)
}
func TestEgressL3AllowAllEntity(t *testing.T) {
td := newTestData()
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: []api.Entity{api.EntityAll},
},
},
},
},
})
worldLabel := labels.ParseSelectLabelArray("reserved:world")
ctxAToWorld80 := SearchContext{From: labelsA, To: worldLabel, Trace: TRACE_VERBOSE}
ctxAToWorld80.DPorts = []*models.Port{{Port: 80, Protocol: models.PortProtocolTCP}}
checkEgress(t, repo, &ctxAToWorld80, api.Allowed)
ctxAToWorld90 := ctxAToWorld80
ctxAToWorld90.DPorts = []*models.Port{{Port: 90, Protocol: models.PortProtocolTCP}}
checkEgress(t, repo, &ctxAToWorld90, api.Allowed)
// Pod to pod must be allowed on both port 80 and 90 (L3 only rule)
fooLabel := labels.ParseSelectLabelArray("k8s:app=foo")
ctxAToFoo := SearchContext{From: labelsA, To: fooLabel, Trace: TRACE_VERBOSE,
DPorts: []*models.Port{{Port: 80, Protocol: models.PortProtocolTCP}}}
checkEgress(t, repo, &ctxAToFoo, api.Allowed)
ctxAToFoo90 := ctxAToFoo
ctxAToFoo90.DPorts = []*models.Port{{Port: 90, Protocol: models.PortProtocolTCP}}
checkEgress(t, repo, &ctxAToFoo90, api.Allowed)
buffer := new(bytes.Buffer)
ctx := SearchContext{From: labelsA, Trace: TRACE_VERBOSE}
ctx.Logging = stdlog.New(buffer, "", 0)
}
func TestL4WildcardMerge(t *testing.T) {
// First, test implicit case.
//
// Test the case where if we have rules that select the same endpoint on the
// same port-protocol tuple with one that is L4-only, and the other applying
// at L4 and L7, that the L4-only rule shadows the L4-L7 rule. This is because
// L4-only rule implicitly allows all traffic at L7, so the L7-related
// parts of the L4-L7 rule are useless.
td := newTestData()
repo := td.repo
repo.MustAddList(api.Rules{&api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "7000", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
L7Proto: "testparser",
L7: []api.PortRuleL7{
{"Key": "Value"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "7000", Protocol: api.ProtoTCP},
},
}},
},
},
}})
expected := &L4Filter{
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: "http",
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
td.cachedSelectorC: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorC: {nil},
td.wildcardCachedSelector: {nil},
},
}
buffer := new(bytes.Buffer)
ctx := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctx.Logging = stdlog.New(buffer, "", 0)
l4IngressPolicy, err := repo.ResolveL4IngressPolicy(&ctx)
require.NoError(t, err)
t.Log(buffer)
filter := l4IngressPolicy.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.True(t, filter.Ingress)
require.Equal(t, 2, len(filter.PerSelectorPolicies))
require.NotNil(t, filter.PerSelectorPolicies[td.cachedSelectorC])
require.Nil(t, filter.PerSelectorPolicies[td.wildcardCachedSelector])
require.EqualValues(t, expected, filter)
require.Equal(t, ParserTypeHTTP, filter.L7Parser)
expectedL7 := &L4Filter{
Port: 7000, Protocol: api.ProtoTCP, U8Proto: 6,
L7Parser: "testparser",
PerSelectorPolicies: L7DataMap{
td.cachedSelectorC: &PerSelectorPolicy{
L7Rules: api.L7Rules{
L7Proto: "testparser",
L7: []api.PortRuleL7{{"Key": "Value"}, {}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{td.cachedSelectorC: {nil}},
}
filterL7 := l4IngressPolicy.ExactLookup("7000", 0, "TCP")
require.NotNil(t, filterL7)
require.Equal(t, uint16(7000), filterL7.Port)
require.True(t, filterL7.Ingress)
require.Equal(t, 1, len(filterL7.PerSelectorPolicies))
require.NotNil(t, filterL7.PerSelectorPolicies[td.cachedSelectorC])
require.Nil(t, filterL7.PerSelectorPolicies[td.wildcardCachedSelector])
require.EqualValues(t, expectedL7, filterL7)
require.Equal(t, L7ParserType("testparser"), filterL7.L7Parser)
l4IngressPolicy.Detach(repo.GetSelectorCache())
// Test the reverse order as well; ensure that we check both conditions
// for if L4-only policy is in the L4Filter for the same port-protocol tuple,
// and L7 metadata exists in the L4Filter we are adding; expect to resolve
// to L4-only policy without any L7-metadata.
repo = td.resetRepo()
repo.MustAddList(api.Rules{&api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "7000", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "7000", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
L7Proto: "testparser",
L7: []api.PortRuleL7{
{"Key": "Value"},
},
},
}},
},
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}})
buffer = new(bytes.Buffer)
ctx = SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctx.Logging = stdlog.New(buffer, "", 0)
l4IngressPolicy, err = repo.ResolveL4IngressPolicy(&ctx)
require.NoError(t, err)
t.Log(buffer)
filter = l4IngressPolicy.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.True(t, filter.Ingress)
require.Equal(t, 2, len(filter.PerSelectorPolicies))
require.Nil(t, filter.PerSelectorPolicies[td.wildcardCachedSelector])
require.NotNil(t, filter.PerSelectorPolicies[td.cachedSelectorC])
require.EqualValues(t, expected, filter)
require.Equal(t, ParserTypeHTTP, filter.L7Parser)
filterL7 = l4IngressPolicy.ExactLookup("7000", 0, "TCP")
require.NotNil(t, filterL7)
require.Equal(t, uint16(7000), filterL7.Port)
require.True(t, filterL7.Ingress)
require.Equal(t, 1, len(filterL7.PerSelectorPolicies))
require.NotNil(t, filterL7.PerSelectorPolicies[td.cachedSelectorC])
require.Nil(t, filterL7.PerSelectorPolicies[td.wildcardCachedSelector])
require.EqualValues(t, expectedL7, filterL7)
require.Equal(t, L7ParserType("testparser"), filterL7.L7Parser)
// Second, test the expeicit allow at L3.
repo = td.resetRepo()
repo.MustAddList(api.Rules{&api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}})
buffer = new(bytes.Buffer)
ctx = SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctx.Logging = stdlog.New(buffer, "", 0)
l4IngressPolicy, err = repo.ResolveL4IngressPolicy(&ctx)
require.NoError(t, err)
t.Log(buffer)
filter = l4IngressPolicy.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.True(t, filter.Ingress)
require.Equal(t, ParserTypeHTTP, filter.L7Parser)
require.Equal(t, 2, len(filter.PerSelectorPolicies))
require.EqualValues(t, expected, filter)
// Test the reverse order as well; ensure that we check both conditions
// for if L4-only policy is in the L4Filter for the same port-protocol tuple,
// and L7 metadata exists in the L4Filter we are adding; expect to resolve
// to L4-only policy without any L7-metadata.
repo = td.resetRepo()
repo.MustAddList(api.Rules{&api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}})
buffer = new(bytes.Buffer)
ctx = SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctx.Logging = stdlog.New(buffer, "", 0)
l4IngressPolicy, err = repo.ResolveL4IngressPolicy(&ctx)
require.NoError(t, err)
t.Log(buffer)
filter = l4IngressPolicy.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.True(t, filter.Ingress)
require.Equal(t, ParserTypeHTTP, filter.L7Parser)
require.Equal(t, 2, len(filter.PerSelectorPolicies))
require.EqualValues(t, expected, filter)
}
func TestL3L4L7Merge(t *testing.T) {
// First rule allows ingress from all endpoints to port 80 only on
// GET to "/". However, second rule allows all traffic on port 80 only to a
// specific endpoint. When these rules are merged, it equates to allowing
// all traffic from port 80 from any endpoint.
//
// TODO: This comment can't be correct, the resulting policy
// should allow all on port 80 only from endpoint C, traffic
// from all other endpoints should still only allow only GET
// on "/".
td := newTestData()
repo := td.repo
repo.MustAddList(api.Rules{&api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}})
buffer := new(bytes.Buffer)
ctx := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctx.Logging = stdlog.New(buffer, "", 0)
l4IngressPolicy, err := repo.ResolveL4IngressPolicy(&ctx)
require.NoError(t, err)
t.Log(buffer)
filter := l4IngressPolicy.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.True(t, filter.Ingress)
require.Equal(t, 2, len(filter.PerSelectorPolicies))
require.NotNil(t, filter.PerSelectorPolicies[td.wildcardCachedSelector])
require.Nil(t, filter.PerSelectorPolicies[td.cachedSelectorC])
require.Equal(t, ParserTypeHTTP, filter.L7Parser)
require.Equal(t, 2, len(filter.PerSelectorPolicies))
require.Equal(t, &L4Filter{
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: "http",
PerSelectorPolicies: L7DataMap{
td.cachedSelectorC: nil,
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorC: {nil},
td.wildcardCachedSelector: {nil},
},
}, filter)
repo = td.resetRepo()
repo.MustAddList(api.Rules{&api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}})
buffer = new(bytes.Buffer)
ctx = SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctx.Logging = stdlog.New(buffer, "", 0)
l4IngressPolicy, err = repo.ResolveL4IngressPolicy(&ctx)
require.NoError(t, err)
t.Log(buffer)
filter = l4IngressPolicy.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.True(t, filter.Ingress)
require.Equal(t, ParserTypeHTTP, filter.L7Parser)
require.Equal(t, 2, len(filter.PerSelectorPolicies))
require.NotNil(t, filter.PerSelectorPolicies[td.wildcardCachedSelector])
require.Nil(t, filter.PerSelectorPolicies[td.cachedSelectorC])
require.Equal(t, &L4Filter{
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: "http",
PerSelectorPolicies: L7DataMap{
td.cachedSelectorC: nil,
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorC: {nil},
td.wildcardCachedSelector: {nil},
},
}, filter)
}
func TestMatches(t *testing.T) {
td := newTestData()
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
},
},
},
&api.Rule{
NodeSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
},
},
},
})
epRule := repo.rules[ruleKey{idx: 0}]
hostRule := repo.rules[ruleKey{idx: 1}]
selectedEpLabels := labels.ParseSelectLabel("id=a")
selectedIdentity := identity.NewIdentity(54321, labels.Labels{selectedEpLabels.Key: selectedEpLabels})
td.addIdentity(selectedIdentity)
notSelectedEpLabels := labels.ParseSelectLabel("id=b")
notSelectedIdentity := identity.NewIdentity(9876, labels.Labels{notSelectedEpLabels.Key: notSelectedEpLabels})
td.addIdentity(notSelectedIdentity)
hostLabels := labels.Labels{selectedEpLabels.Key: selectedEpLabels}
hostLabels.MergeLabels(labels.LabelHost)
hostIdentity := identity.NewIdentity(identity.ReservedIdentityHost, hostLabels)
td.addIdentity(hostIdentity)
// notSelectedEndpoint is not selected by rule, so we it shouldn't be added
// to EndpointsSelected.
require.Equal(t, false, epRule.matchesSubject(notSelectedIdentity))
// selectedEndpoint is selected by rule, so we it should be added to
// EndpointsSelected.
require.True(t, epRule.matchesSubject(selectedIdentity))
// Test again to check for caching working correctly.
require.True(t, epRule.matchesSubject(selectedIdentity))
// Possible scenario where an endpoint is deleted, and soon after another
// endpoint is added with the same ID, but with a different identity. Matching
// needs to handle this case correctly.
require.Equal(t, false, epRule.matchesSubject(notSelectedIdentity))
// host endpoint is not selected by rule, so we it shouldn't be added to EndpointsSelected.
require.Equal(t, false, epRule.matchesSubject(hostIdentity))
// selectedEndpoint is not selected by rule, so we it shouldn't be added to EndpointsSelected.
require.Equal(t, false, hostRule.matchesSubject(selectedIdentity))
// host endpoint is selected by rule, but host labels are mutable, so don't cache them
require.True(t, hostRule.matchesSubject(hostIdentity))
// Assert that mutable host identities are handled
// First, add an additional label, ensure that match succeeds
hostLabels.MergeLabels(labels.NewLabelsFromModel([]string{"foo=bar"}))
hostIdentity = identity.NewIdentity(identity.ReservedIdentityHost, hostLabels)
td.addIdentity(hostIdentity)
require.True(t, hostRule.matchesSubject(hostIdentity))
// Then, change host to id=c, which is not selected, and ensure match is correct
hostIdentity = identity.NewIdentity(identity.ReservedIdentityHost, labels.NewLabelsFromModel([]string{"id=c"}))
td.addIdentity(hostIdentity)
require.False(t, hostRule.matchesSubject(hostIdentity))
}
func BenchmarkRuleString(b *testing.B) {
r := &rule{
Rule: api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
{Port: "8080", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
Egress: []api.EgressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "3000", Protocol: api.ProtoAny},
},
}},
},
},
},
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = r.String()
}
}
// Test merging of L7 rules when the same rules apply to multiple selectors.
// This was added to prevent regression of a bug where the merging of l7 rules for "foo"
// also affected the rules for "baz".
func TestMergeL7PolicyEgressWithMultipleSelectors(t *testing.T) {
td := newTestData()
fromBar := &SearchContext{From: labels.ParseSelectLabelArray("bar")}
fromFoo := &SearchContext{From: labels.ParseSelectLabelArray("foo")}
fooSelector := []api.EndpointSelector{
api.NewESFromLabels(labels.ParseSelectLabel("foo")),
}
foobazSelector := []api.EndpointSelector{
api.NewESFromLabels(labels.ParseSelectLabel("foo")),
api.NewESFromLabels(labels.ParseSelectLabel("baz")),
}
rule1 := &rule{
Rule: api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: fooSelector,
},
// Note that this allows all on 80, so the result should wildcard HTTP to "foo"
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: foobazSelector,
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET"},
},
},
}},
},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.cachedFooSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Method: "GET"}, {}},
},
isRedirect: true,
},
td.cachedBazSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Method: "GET"}},
},
isRedirect: true,
},
},
Ingress: false,
RuleOrigin: map[CachedSelector]labels.LabelArrayList{
td.cachedBazSelector: {nil},
td.cachedFooSelector: {nil},
},
}})
state := traceState{}
res, err := rule1.resolveEgressPolicy(td.testPolicyContext, fromBar, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.EqualValues(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
state = traceState{}
res, err = rule1.resolveEgressPolicy(td.testPolicyContext, fromFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
}
func TestMergeListenerReference(t *testing.T) {
// No listener remains a no listener
ps := &PerSelectorPolicy{}
err := ps.mergeListenerReference(ps)
require.NoError(t, err)
require.Equal(t, "", ps.Listener)
require.Equal(t, uint16(0), ps.Priority)
// Listener reference remains when the other has none
ps0 := &PerSelectorPolicy{Listener: "listener0"}
err = ps0.mergeListenerReference(ps)
require.NoError(t, err)
require.Equal(t, "listener0", ps0.Listener)
require.Equal(t, uint16(0), ps0.Priority)
// Listener reference is propagated when there is none to begin with
err = ps.mergeListenerReference(ps0)
require.NoError(t, err)
require.Equal(t, "listener0", ps.Listener)
require.Equal(t, uint16(0), ps.Priority)
// A listener is not changed when there is no change
err = ps0.mergeListenerReference(ps0)
require.NoError(t, err)
require.Equal(t, "listener0", ps0.Listener)
require.Equal(t, uint16(0), ps0.Priority)
// Cannot merge two different listeners with the default (zero) priority
ps0a := &PerSelectorPolicy{Listener: "listener0a"}
err = ps0.mergeListenerReference(ps0a)
require.NotNil(t, err)
err = ps0a.mergeListenerReference(ps0)
require.NotNil(t, err)
// Listener with a defined (non-zero) priority takes precedence over
// a listener with an undefined (zero) priority
ps1 := &PerSelectorPolicy{Listener: "listener1", Priority: 1}
err = ps1.mergeListenerReference(ps0)
require.NoError(t, err)
require.Equal(t, "listener1", ps1.Listener)
require.Equal(t, uint16(1), ps1.Priority)
err = ps0.mergeListenerReference(ps1)
require.NoError(t, err)
require.Equal(t, "listener1", ps0.Listener)
require.Equal(t, uint16(1), ps0.Priority)
// Listener with the lower priority value takes precedence
ps2 := &PerSelectorPolicy{Listener: "listener2", Priority: 2}
err = ps1.mergeListenerReference(ps2)
require.NoError(t, err)
require.Equal(t, "listener1", ps1.Listener)
require.Equal(t, uint16(1), ps1.Priority)
err = ps2.mergeListenerReference(ps1)
require.NoError(t, err)
require.Equal(t, "listener1", ps2.Listener)
require.Equal(t, uint16(1), ps2.Priority)
// Cannot merge two different listeners with the same priority
ps12 := &PerSelectorPolicy{Listener: "listener1", Priority: 2}
ps2 = &PerSelectorPolicy{Listener: "listener2", Priority: 2}
err = ps12.mergeListenerReference(ps2)
require.NotNil(t, err)
err = ps2.mergeListenerReference(ps12)
require.NotNil(t, err)
// Lower priority is propagated also when the listeners are the same
ps23 := &PerSelectorPolicy{Listener: "listener2", Priority: 3}
err = ps2.mergeListenerReference(ps23)
require.NoError(t, err)
require.Equal(t, "listener2", ps2.Listener)
require.Equal(t, uint16(2), ps2.Priority)
err = ps23.mergeListenerReference(ps2)
require.NoError(t, err)
require.Equal(t, "listener2", ps23.Listener)
require.Equal(t, uint16(2), ps23.Priority)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"github.com/cilium/cilium/pkg/identity"
slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
policyapi "github.com/cilium/cilium/pkg/policy/api"
)
// ruleSlice is a wrapper around a slice of *rule, which allows for functions
// to be written with []*rule as a receiver.
type ruleSlice []*rule
func (rules ruleSlice) resolveL4IngressPolicy(policyCtx PolicyContext, ctx *SearchContext) (L4PolicyMap, error) {
result := NewL4PolicyMap()
ctx.PolicyTrace("\n")
ctx.PolicyTrace("Resolving ingress policy for %+v\n", ctx.To)
state := traceState{}
var matchedRules ruleSlice
var requirements, requirementsDeny []slim_metav1.LabelSelectorRequirement
// Iterate over all FromRequires which select ctx.To. These requirements
// will be appended to each EndpointSelector's MatchExpressions in
// each FromEndpoints for all ingress rules. This ensures that FromRequires
// is taken into account when evaluating policy at L4.
for _, r := range rules {
if ctx.rulesSelect || r.getSelector().Matches(ctx.To) {
matchedRules = append(matchedRules, r)
for _, ingressRule := range r.Ingress {
for _, requirement := range ingressRule.FromRequires {
requirements = append(requirements, requirement.ConvertToLabelSelectorRequirementSlice()...)
}
}
for _, ingressRule := range r.IngressDeny {
for _, requirement := range ingressRule.FromRequires {
requirementsDeny = append(requirementsDeny, requirement.ConvertToLabelSelectorRequirementSlice()...)
}
}
}
}
// Only dealing with matching rules from now on. Mark it in the ctx
oldRulesSelect := ctx.rulesSelect
ctx.rulesSelect = true
for _, r := range matchedRules {
_, err := r.resolveIngressPolicy(policyCtx, ctx, &state, result, requirements, requirementsDeny)
if err != nil {
return nil, err
}
state.ruleID++
}
state.trace(len(rules), ctx)
// Restore ctx in case caller uses it again.
ctx.rulesSelect = oldRulesSelect
return result, nil
}
func (rules ruleSlice) resolveL4EgressPolicy(policyCtx PolicyContext, ctx *SearchContext) (L4PolicyMap, error) {
result := NewL4PolicyMap()
ctx.PolicyTrace("\n")
ctx.PolicyTrace("Resolving egress policy for %+v\n", ctx.From)
state := traceState{}
var matchedRules ruleSlice
var requirements, requirementsDeny []slim_metav1.LabelSelectorRequirement
// Iterate over all ToRequires which select ctx.To. These requirements will
// be appended to each EndpointSelector's MatchExpressions in each
// ToEndpoints for all egress rules. This ensures that ToRequires is
// taken into account when evaluating policy at L4.
for _, r := range rules {
if ctx.rulesSelect || r.getSelector().Matches(ctx.From) {
matchedRules = append(matchedRules, r)
for _, egressRule := range r.Egress {
for _, requirement := range egressRule.ToRequires {
requirements = append(requirements, requirement.ConvertToLabelSelectorRequirementSlice()...)
}
}
for _, egressRule := range r.EgressDeny {
for _, requirement := range egressRule.ToRequires {
requirementsDeny = append(requirementsDeny, requirement.ConvertToLabelSelectorRequirementSlice()...)
}
}
}
}
// Only dealing with matching rules from now on. Mark it in the ctx
oldRulesSelect := ctx.rulesSelect
ctx.rulesSelect = true
for i, r := range matchedRules {
state.ruleID = i
_, err := r.resolveEgressPolicy(policyCtx, ctx, &state, result, requirements, requirementsDeny)
if err != nil {
return nil, err
}
state.ruleID++
}
state.trace(len(rules), ctx)
// Restore ctx in case caller uses it again.
ctx.rulesSelect = oldRulesSelect
return result, nil
}
// matchesSubject determines whether any rule in a set of rules selects the given
// security identity as a subject (i.e. non-peer).
func (rules ruleSlice) matchesSubject(securityIdentity *identity.Identity) bool {
for _, r := range rules {
if r.matchesSubject(securityIdentity) {
return true
}
}
return false
}
// AsPolicyRules return the internal policyapi.Rule objects as a policyapi.Rules object
func (rules ruleSlice) AsPolicyRules() policyapi.Rules {
policyRules := make(policyapi.Rules, 0, len(rules))
for _, r := range rules {
policyRules = append(policyRules, &r.Rule)
}
return policyRules
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"net/netip"
"strings"
"sync"
"github.com/sirupsen/logrus"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/identity"
k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/policy/api"
)
// scIdentity is the information we need about a an identity that rules can select
type scIdentity struct {
NID identity.NumericIdentity
lbls labels.LabelArray
namespace string // value of the namespace label, or ""
}
// scIdentityCache is a cache of Identities keyed by the numeric identity
type scIdentityCache map[identity.NumericIdentity]scIdentity
func newIdentity(nid identity.NumericIdentity, lbls labels.LabelArray) scIdentity {
return scIdentity{
NID: nid,
lbls: lbls,
namespace: lbls.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel),
}
}
// getLocalScopePrefix returns the most specific CIDR for a local scope identity.
// WORLD IDs are not considered here.
func getLocalScopePrefix(id identity.NumericIdentity, lbls labels.LabelArray) netip.Prefix {
var mostSpecificCidr netip.Prefix
if id.HasLocalScope() {
maskSize := -1 // allow for 0-length prefix (e.g., "0.0.0.0/0")
for _, lbl := range lbls {
if lbl.Source == labels.LabelSourceCIDR {
// Reverse the transformation done in labels.maskedIPToLabel()
// as ':' is not allowed within a k8s label, colons are represented
// with '-'.
cidr := strings.ReplaceAll(lbl.Key, "-", ":")
prefix, err := netip.ParsePrefix(cidr)
if err == nil {
if n := prefix.Bits(); n > maskSize {
mostSpecificCidr = prefix.Masked()
maskSize = n
}
} else {
log.WithError(err).WithField(logfields.Prefix, lbl.Key).Error("getLocalScopePrefix: netip.ParsePrefix failed")
}
}
}
}
return mostSpecificCidr
}
// userNotification stores the information needed to call
// IdentitySelectionUpdated callbacks to notify users of selector's
// identity changes. These are queued to be able to call the callbacks
// in FIFO order while not holding any locks.
type userNotification struct {
user CachedSelectionUser
selector CachedSelector
added []identity.NumericIdentity
deleted []identity.NumericIdentity
wg *sync.WaitGroup
}
// SelectorCache caches identities, identity selectors, and the
// subsets of identities each selector selects.
type SelectorCache struct {
prefixMap lock.Map[identity.NumericIdentity, netip.Prefix]
mutex lock.RWMutex
// idCache contains all known identities as informed by the
// kv-store and the local identity facility via our
// UpdateIdentities() function.
idCache scIdentityCache
// map key is the string representation of the selector being cached.
selectors map[string]*identitySelector
localIdentityNotifier identityNotifier
// userCond is a condition variable for receiving signals
// about addition of new elements in userNotes
userCond *sync.Cond
// userMutex protects userNotes and is linked to userCond
userMutex lock.Mutex
// userNotes holds a FIFO list of user notifications to be made
userNotes []userNotification
// used to lazily start the handler for user notifications.
startNotificationsHandlerOnce sync.Once
}
// GetModel returns the API model of the SelectorCache.
func (sc *SelectorCache) GetModel() models.SelectorCache {
sc.mutex.RLock()
defer sc.mutex.RUnlock()
selCacheMdl := make(models.SelectorCache, 0, len(sc.selectors))
for selector, idSel := range sc.selectors {
selections := idSel.GetSelections()
ids := make([]int64, 0, len(selections))
for i := range selections {
ids = append(ids, int64(selections[i]))
}
selMdl := &models.SelectorIdentityMapping{
Selector: selector,
Identities: ids,
Users: int64(idSel.numUsers()),
Labels: labelArrayToModel(idSel.GetMetadataLabels()),
}
selCacheMdl = append(selCacheMdl, selMdl)
}
return selCacheMdl
}
func labelArrayToModel(arr labels.LabelArray) models.LabelArray {
lbls := make(models.LabelArray, 0, len(arr))
for _, l := range arr {
lbls = append(lbls, &models.Label{
Key: l.Key,
Value: l.Value,
Source: l.Source,
})
}
return lbls
}
func (sc *SelectorCache) handleUserNotifications() {
for {
sc.userMutex.Lock()
for len(sc.userNotes) == 0 {
sc.userCond.Wait()
}
// get the current batch of notifications and release the lock so that SelectorCache
// can't block on userMutex while we call IdentitySelectionUpdated callbacks below.
notifications := sc.userNotes
sc.userNotes = nil
sc.userMutex.Unlock()
for _, n := range notifications {
n.user.IdentitySelectionUpdated(n.selector, n.added, n.deleted)
n.wg.Done()
}
}
}
func (sc *SelectorCache) queueUserNotification(user CachedSelectionUser, selector CachedSelector, added, deleted []identity.NumericIdentity, wg *sync.WaitGroup) {
sc.startNotificationsHandlerOnce.Do(func() {
go sc.handleUserNotifications()
})
wg.Add(1)
sc.userMutex.Lock()
sc.userNotes = append(sc.userNotes, userNotification{
user: user,
selector: selector,
added: added,
deleted: deleted,
wg: wg,
})
sc.userMutex.Unlock()
sc.userCond.Signal()
}
// NewSelectorCache creates a new SelectorCache with the given identities.
func NewSelectorCache(ids identity.IdentityMap) *SelectorCache {
sc := &SelectorCache{
idCache: make(map[identity.NumericIdentity]scIdentity, len(ids)),
selectors: make(map[string]*identitySelector),
}
sc.userCond = sync.NewCond(&sc.userMutex)
for nid, lbls := range ids {
// store prefix into prefix map
prefix := getLocalScopePrefix(nid, lbls)
if prefix.IsValid() {
sc.prefixMap.Store(nid, prefix)
}
sc.idCache[nid] = newIdentity(nid, lbls)
}
return sc
}
// SetLocalIdentityNotifier injects the provided identityNotifier into the
// SelectorCache. Currently, this is used to inject the FQDN subsystem into
// the SelectorCache so the SelectorCache can notify the FQDN subsystem when
// it should be aware of a given FQDNSelector for which CIDR identities need
// to be provided upon DNS lookups which corespond to said FQDNSelector.
func (sc *SelectorCache) SetLocalIdentityNotifier(pop identityNotifier) {
sc.localIdentityNotifier = pop
}
var (
// Empty slice of numeric identities used for all selectors that select nothing
emptySelection identity.NumericIdentitySlice
// wildcardSelectorKey is used to compare if a key is for a wildcard
wildcardSelectorKey = api.WildcardEndpointSelector.LabelSelector.String()
// noneSelectorKey is used to compare if a key is for "reserved:none"
noneSelectorKey = api.EndpointSelectorNone.LabelSelector.String()
)
// identityNotifier provides a means for other subsystems to be made aware of a
// given FQDNSelector (currently pkg/fqdn) so that said subsystems can notify
// the IPCache about IPs which correspond to said FQDNSelector.
// This is necessary as there is nothing intrinsic about an IP that says that
// it corresponds to a given FQDNSelector; this relationship is contained only
// via DNS responses, which are handled externally.
type identityNotifier interface {
// RegisterFQDNSelector exposes this FQDNSelector so that the identity labels
// of IPs contained in a DNS response that matches said selector can be
// associated with that selector.
RegisterFQDNSelector(selector api.FQDNSelector)
// UnregisterFQDNSelector removes this FQDNSelector from the set of
// IPs which are being tracked by the identityNotifier. The result
// of this is that an IP may be evicted from IPCache if it is no longer
// selected by any other FQDN selector.
// This occurs when there are no more users of a given FQDNSelector for the
// SelectorCache.
UnregisterFQDNSelector(selector api.FQDNSelector)
}
// AddFQDNSelector adds the given api.FQDNSelector in to the selector cache. If
// an identical EndpointSelector has already been cached, the corresponding
// CachedSelector is returned, otherwise one is created and added to the cache.
func (sc *SelectorCache) AddFQDNSelector(user CachedSelectionUser, lbls labels.LabelArray, fqdnSelec api.FQDNSelector) (cachedSelector CachedSelector, added bool) {
key := fqdnSelec.String()
sc.mutex.Lock()
defer sc.mutex.Unlock()
// If the selector already exists, use it.
idSel, exists := sc.selectors[key]
if exists {
return idSel, idSel.addUser(user)
}
source := &fqdnSelector{
selector: fqdnSelec,
}
// Make the FQDN subsystem aware of this selector
sc.localIdentityNotifier.RegisterFQDNSelector(source.selector)
return sc.addSelector(user, lbls, key, source)
}
func (sc *SelectorCache) addSelector(user CachedSelectionUser, lbls labels.LabelArray, key string, source selectorSource) (CachedSelector, bool) {
idSel := &identitySelector{
key: key,
users: make(map[CachedSelectionUser]struct{}),
cachedSelections: make(map[identity.NumericIdentity]struct{}),
source: source,
metadataLbls: lbls,
}
sc.selectors[key] = idSel
// Scan the cached set of IDs to determine any new matchers
for nid, identity := range sc.idCache {
if idSel.source.matches(identity) {
idSel.cachedSelections[nid] = struct{}{}
}
}
// Note: No notifications are sent for the existing
// identities. Caller must use GetSelections() to get the
// current selections after adding a selector. This way the
// behavior is the same between the two cases here (selector
// is already cached, or is a new one).
// Create the immutable slice representation of the selected
// numeric identities
idSel.updateSelections()
return idSel, idSel.addUser(user)
}
// FindCachedIdentitySelector finds the given api.EndpointSelector in the
// selector cache, returning nil if one can not be found.
func (sc *SelectorCache) FindCachedIdentitySelector(selector api.EndpointSelector) CachedSelector {
key := selector.CachedString()
sc.mutex.Lock()
idSel := sc.selectors[key]
sc.mutex.Unlock()
return idSel
}
// AddIdentitySelector adds the given api.EndpointSelector in to the
// selector cache. If an identical EndpointSelector has already been
// cached, the corresponding CachedSelector is returned, otherwise one
// is created and added to the cache.
func (sc *SelectorCache) AddIdentitySelector(user CachedSelectionUser, lbls labels.LabelArray, selector api.EndpointSelector) (cachedSelector CachedSelector, added bool) {
// The key returned here may be different for equivalent
// labelselectors, if the selector's requirements are stored
// in different orders. When this happens we'll be tracking
// essentially two copies of the same selector.
key := selector.CachedString()
sc.mutex.Lock()
defer sc.mutex.Unlock()
idSel, exists := sc.selectors[key]
if exists {
return idSel, idSel.addUser(user)
}
// Selectors are never modified once a rule is placed in the policy repository,
// so no need to deep copy.
source := &labelIdentitySelector{
selector: selector,
}
// check is selector has a namespace match or requirement
if namespaces, ok := selector.GetMatch(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel); ok {
source.namespaces = namespaces
}
return sc.addSelector(user, lbls, key, source)
}
// lock must be held
func (sc *SelectorCache) removeSelectorLocked(selector CachedSelector, user CachedSelectionUser) {
key := selector.String()
sel, exists := sc.selectors[key]
if exists {
if sel.removeUser(user) {
sel.source.remove(sc.localIdentityNotifier)
delete(sc.selectors, key)
}
}
}
// RemoveSelector removes CachedSelector for the user.
func (sc *SelectorCache) RemoveSelector(selector CachedSelector, user CachedSelectionUser) {
sc.mutex.Lock()
sc.removeSelectorLocked(selector, user)
sc.mutex.Unlock()
}
// RemoveSelectors removes CachedSelectorSlice for the user.
func (sc *SelectorCache) RemoveSelectors(selectors CachedSelectorSlice, user CachedSelectionUser) {
sc.mutex.Lock()
for _, selector := range selectors {
sc.removeSelectorLocked(selector, user)
}
sc.mutex.Unlock()
}
// ChangeUser changes the CachedSelectionUser that gets updates on the
// updates on the cached selector.
func (sc *SelectorCache) ChangeUser(selector CachedSelector, from, to CachedSelectionUser) {
key := selector.String()
sc.mutex.Lock()
idSel, exists := sc.selectors[key]
if exists {
// Add before remove so that the count does not dip to zero in between,
// as this causes FQDN unregistration (if applicable).
idSel.addUser(to)
// ignoring the return value as we have just added a user above
idSel.removeUser(from)
}
sc.mutex.Unlock()
}
// UpdateIdentities propagates identity updates to selectors
//
// The caller is responsible for making sure the same identity is not
// present in both 'added' and 'deleted'.
//
// Caller should Wait() on the returned sync.WaitGroup before triggering any
// policy updates. Policy updates may need Endpoint locks, so this Wait() can
// deadlock if the caller is holding any endpoint locks.
func (sc *SelectorCache) UpdateIdentities(added, deleted identity.IdentityMap, wg *sync.WaitGroup) {
sc.mutex.Lock()
defer sc.mutex.Unlock()
// Update idCache so that newly added selectors get
// prepopulated with all matching numeric identities.
for numericID := range deleted {
if old, exists := sc.idCache[numericID]; exists {
log.WithFields(logrus.Fields{
logfields.Identity: numericID,
logfields.Labels: old.lbls,
}).Debug("UpdateIdentities: Deleting identity")
delete(sc.idCache, numericID)
sc.prefixMap.Delete(numericID)
} else {
log.WithFields(logrus.Fields{
logfields.Identity: numericID,
}).Warning("UpdateIdentities: Skipping Delete of a non-existing identity")
delete(deleted, numericID)
}
}
for numericID, lbls := range added {
if old, exists := sc.idCache[numericID]; exists {
// Skip if no change. Not skipping if label
// order is different, but identity labels are
// sorted for the kv-store, so there should
// not be too many false negatives.
if lbls.Equals(old.lbls) {
log.WithFields(logrus.Fields{
logfields.Identity: numericID,
}).Debug("UpdateIdentities: Skipping add of an existing identical identity")
delete(added, numericID)
continue
}
scopedLog := log.WithFields(logrus.Fields{
logfields.Identity: numericID,
logfields.Labels: old.lbls,
logfields.Labels + "(new)": lbls},
)
msg := "UpdateIdentities: Updating an existing identity"
// Warn if any other ID has their labels change, besides local
// host. The local host can have its labels change at runtime if
// the kube-apiserver is running on the local host, see
// ipcache.TriggerLabelInjection().
if numericID == identity.ReservedIdentityHost {
scopedLog.Debug(msg)
} else {
scopedLog.Warning(msg)
}
} else {
log.WithFields(logrus.Fields{
logfields.Identity: numericID,
logfields.Labels: lbls,
}).Debug("UpdateIdentities: Adding a new identity")
}
// store prefix into prefix map
prefix := getLocalScopePrefix(numericID, lbls)
if prefix.IsValid() {
sc.prefixMap.Store(numericID, prefix)
}
sc.idCache[numericID] = newIdentity(numericID, lbls)
}
if len(deleted)+len(added) > 0 {
// Iterate through all locally used identity selectors and
// update the cached numeric identities as required.
for _, idSel := range sc.selectors {
var adds, dels []identity.NumericIdentity
for numericID := range deleted {
if _, exists := idSel.cachedSelections[numericID]; exists {
dels = append(dels, numericID)
delete(idSel.cachedSelections, numericID)
}
}
for numericID := range added {
matches := idSel.source.matches(sc.idCache[numericID])
_, exists := idSel.cachedSelections[numericID]
if matches && !exists {
adds = append(adds, numericID)
idSel.cachedSelections[numericID] = struct{}{}
} else if !matches && exists {
// identity was mutated and no longer matches
dels = append(dels, numericID)
delete(idSel.cachedSelections, numericID)
}
}
if len(dels)+len(adds) > 0 {
idSel.updateSelections()
idSel.notifyUsers(sc, adds, dels, wg)
}
}
}
}
// GetPrefix returns the most specific CIDR for an identity, if any.
func (sc *SelectorCache) GetPrefix(id identity.NumericIdentity) netip.Prefix {
p, _ := sc.prefixMap.Load(id)
return p
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"bytes"
"encoding/json"
"sort"
"strings"
"sync"
"sync/atomic"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/policy/api"
)
// CachedSelector represents an identity selector owned by the selector cache
type CachedSelector interface {
// GetSelections returns the cached set of numeric identities
// selected by the CachedSelector. The retuned slice must NOT
// be modified, as it is shared among multiple users.
GetSelections() identity.NumericIdentitySlice
// GetMetadataLabels returns metadata labels for additional context
// surrounding the selector. These are typically the labels associated with
// Cilium rules.
GetMetadataLabels() labels.LabelArray
// Selects return 'true' if the CachedSelector selects the given
// numeric identity.
Selects(nid identity.NumericIdentity) bool
// IsWildcard returns true if the endpoint selector selects
// all endpoints.
IsWildcard() bool
// IsNone returns true if the selector never selects anything
IsNone() bool
// String returns the string representation of this selector.
// Used as a map key.
String() string
}
// CachedSelectorSlice is a slice of CachedSelectors that can be sorted.
type CachedSelectorSlice []CachedSelector
// MarshalJSON returns the CachedSelectors as JSON formatted buffer
func (s CachedSelectorSlice) MarshalJSON() ([]byte, error) {
buffer := bytes.NewBufferString("[")
for i, selector := range s {
buf, err := json.Marshal(selector.String())
if err != nil {
return nil, err
}
buffer.Write(buf)
if i < len(s)-1 {
buffer.WriteString(",")
}
}
buffer.WriteString("]")
return buffer.Bytes(), nil
}
func (s CachedSelectorSlice) Len() int { return len(s) }
func (s CachedSelectorSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s CachedSelectorSlice) Less(i, j int) bool {
return strings.Compare(s[i].String(), s[j].String()) < 0
}
// SelectsAllEndpoints returns whether the CachedSelectorSlice selects all
// endpoints, which is true if the wildcard endpoint selector is present in the
// slice.
func (s CachedSelectorSlice) SelectsAllEndpoints() bool {
for _, selector := range s {
if selector.IsWildcard() {
return true
}
}
return false
}
// CachedSelectionUser inserts selectors into the cache and gets update
// callbacks whenever the set of selected numeric identities change for
// the CachedSelectors pushed by it.
type CachedSelectionUser interface {
// IdentitySelectionUpdated implementations MUST NOT call back
// to the name manager or the selector cache while executing this function!
//
// The caller is responsible for making sure the same identity is not
// present in both 'added' and 'deleted'.
IdentitySelectionUpdated(selector CachedSelector, added, deleted []identity.NumericIdentity)
}
// identitySelector is the internal type for all selectors in the
// selector cache.
//
// identitySelector represents the mapping of an EndpointSelector
// to a slice of identities. These mappings are updated via two
// different processes:
//
// 1. When policy rules are changed these are added and/or deleted
// depending on what selectors the rules contain. Cached selections of
// new identitySelectors are pre-populated from the set of currently
// known identities.
//
// 2. When reachable identities appear or disappear, either via local
// allocation (CIDRs), or via the KV-store (remote endpoints). In this
// case all existing identitySelectors are walked through and their
// cached selections are updated as necessary.
//
// In both of the above cases the set of existing identitySelectors is
// write locked.
//
// To minimize the upkeep the identity selectors are shared across
// all IdentityPolicies, so that only one copy exists for each
// identitySelector. Users of the SelectorCache take care of creating
// identitySelectors as needed by identity policies. The set of
// identitySelectors is read locked during an IdentityPolicy update so
// that the policy is always updated using a coherent set of
// cached selections.
//
// identitySelector is used as a map key, so it must not be implemented by a
// map, slice, or a func, or a runtime panic will be triggered. In all
// cases below identitySelector is being implemented by structs.
//
// identitySelector is used in the policy engine as a map key,
// so it must always be given to the user as a pointer to the actual type.
// (The public methods only expose the CachedSelector interface.)
type identitySelector struct {
source selectorSource
key string
selections atomic.Pointer[identity.NumericIdentitySlice]
users map[CachedSelectionUser]struct{}
cachedSelections map[identity.NumericIdentity]struct{}
metadataLbls labels.LabelArray
}
// identitySelector implements CachedSelector
var _ CachedSelector = (*identitySelector)(nil)
type selectorSource interface {
matches(scIdentity) bool
remove(identityNotifier)
}
// fqdnSelector implements the selectorSource for a FQDNSelector. A fqdnSelector
// matches an identity if the identity has a `fqdn:` label matching the FQDN
// selector string.
// In addition, the remove implementation calls back into the DNS name manager
// to unregister the FQDN selector.
type fqdnSelector struct {
selector api.FQDNSelector
}
func (f *fqdnSelector) remove(dnsProxy identityNotifier) {
dnsProxy.UnregisterFQDNSelector(f.selector)
}
// matches returns true if the identity contains at least one label
// that matches the FQDNSelector's IdentityLabel string
func (f *fqdnSelector) matches(identity scIdentity) bool {
return identity.lbls.Intersects(labels.LabelArray{f.selector.IdentityLabel()})
}
type labelIdentitySelector struct {
selector api.EndpointSelector
namespaces []string // allowed namespaces, or ""
}
// xxxMatches returns true if the CachedSelector matches given labels.
// This is slow, but only used for policy tracing, so it's OK.
func (l *labelIdentitySelector) xxxMatches(labels labels.LabelArray) bool {
return l.selector.Matches(labels)
}
func (l *labelIdentitySelector) matchesNamespace(ns string) bool {
if len(l.namespaces) > 0 {
if ns != "" {
for i := range l.namespaces {
if ns == l.namespaces[i] {
return true
}
}
}
// namespace required, but no match
return false
}
// no namespace required, match
return true
}
func (l *labelIdentitySelector) matches(identity scIdentity) bool {
return l.matchesNamespace(identity.namespace) && l.selector.Matches(identity.lbls)
}
func (l *labelIdentitySelector) remove(_ identityNotifier) {
// only useful for fqdn selectors
}
// lock must be held
//
// The caller is responsible for making sure the same identity is not
// present in both 'added' and 'deleted'.
func (i *identitySelector) notifyUsers(sc *SelectorCache, added, deleted []identity.NumericIdentity, wg *sync.WaitGroup) {
for user := range i.users {
// pass 'f' to the user as '*fqdnSelector'
sc.queueUserNotification(user, i, added, deleted, wg)
}
}
// Equal is used by checker.Equals, and only considers the identity of the selector,
// ignoring the internal state!
func (i *identitySelector) Equal(b *identitySelector) bool {
return i.key == b.key
}
//
// CachedSelector implementation (== Public API)
//
// No locking needed.
//
// GetSelections returns the set of numeric identities currently
// selected. The cached selections can be concurrently updated. In
// that case GetSelections() will return either the old or new version
// of the selections. If the old version is returned, the user is
// guaranteed to receive a notification including the update.
func (i *identitySelector) GetSelections() identity.NumericIdentitySlice {
selections := i.selections.Load()
if selections == nil {
return emptySelection
}
return *selections
}
func (i *identitySelector) GetMetadataLabels() labels.LabelArray {
return i.metadataLbls
}
// Selects return 'true' if the CachedSelector selects the given
// numeric identity.
func (i *identitySelector) Selects(nid identity.NumericIdentity) bool {
if i.IsWildcard() {
return true
}
nids := i.GetSelections()
idx := sort.Search(len(nids), func(i int) bool { return nids[i] >= nid })
return idx < len(nids) && nids[idx] == nid
}
// IsWildcard returns true if the endpoint selector selects all
// endpoints.
func (i *identitySelector) IsWildcard() bool {
return i.key == wildcardSelectorKey
}
// IsNone returns true if the endpoint selector never selects anything.
func (i *identitySelector) IsNone() bool {
return i.key == noneSelectorKey
}
// String returns the map key for this selector
func (i *identitySelector) String() string {
return i.key
}
//
// identitySelector implementation (== internal API)
//
// lock must be held
func (i *identitySelector) addUser(user CachedSelectionUser) (added bool) {
if _, exists := i.users[user]; exists {
return false
}
i.users[user] = struct{}{}
return true
}
// locks must be held for the dnsProxy and the SelectorCache (if the selector is a FQDN selector)
func (i *identitySelector) removeUser(user CachedSelectionUser) (last bool) {
delete(i.users, user)
return len(i.users) == 0
}
// lock must be held
func (i *identitySelector) numUsers() int {
return len(i.users)
}
// updateSelections updates the immutable slice representation of the
// cached selections after the cached selections have been changed.
//
// lock must be held
func (i *identitySelector) updateSelections() {
selections := make(identity.NumericIdentitySlice, len(i.cachedSelections))
idx := 0
for nid := range i.cachedSelections {
selections[idx] = nid
idx++
}
// Sort the numeric identities so that the map iteration order
// does not matter. This makes testing easier, but may help
// identifying changes easier also otherwise.
sort.Slice(selections, func(i, j int) bool {
return selections[i] < selections[j]
})
i.setSelections(&selections)
}
func (i *identitySelector) setSelections(selections *identity.NumericIdentitySlice) {
if len(*selections) > 0 {
i.selections.Store(selections)
} else {
i.selections.Store(&emptySelection)
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"net/netip"
"sync"
"testing"
"github.com/stretchr/testify/require"
"github.com/cilium/cilium/pkg/identity"
k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/policy/api"
testidentity "github.com/cilium/cilium/pkg/testutils/identity"
)
type DummySelectorCacheUser struct{}
func (d *DummySelectorCacheUser) IdentitySelectionUpdated(selector CachedSelector, added, deleted []identity.NumericIdentity) {
}
type cachedSelectionUser struct {
t *testing.T
sc *SelectorCache
name string
updateMutex lock.Mutex
updateCond *sync.Cond
selections map[CachedSelector][]identity.NumericIdentity
notifications int
adds int
deletes int
}
func (sc *SelectorCache) haveUserNotifications() bool {
sc.userMutex.Lock()
defer sc.userMutex.Unlock()
return len(sc.userNotes) > 0
}
func newUser(t *testing.T, name string, sc *SelectorCache) *cachedSelectionUser {
csu := &cachedSelectionUser{
t: t,
sc: sc,
name: name,
selections: make(map[CachedSelector][]identity.NumericIdentity),
}
csu.updateCond = sync.NewCond(&csu.updateMutex)
return csu
}
func haveNid(nid identity.NumericIdentity, selections []identity.NumericIdentity) bool {
for i := range selections {
if selections[i] == nid {
return true
}
}
return false
}
func (csu *cachedSelectionUser) AddIdentitySelector(sel api.EndpointSelector) CachedSelector {
csu.updateMutex.Lock()
defer csu.updateMutex.Unlock()
cached, added := csu.sc.AddIdentitySelector(csu, nil, sel)
require.NotEqual(csu.t, nil, cached)
_, exists := csu.selections[cached]
// Not added if already exists for this user
require.Equal(csu.t, !exists, added)
csu.selections[cached] = cached.GetSelections()
// Pre-existing selections are not notified as updates
require.False(csu.t, csu.sc.haveUserNotifications())
return cached
}
func (csu *cachedSelectionUser) AddFQDNSelector(sel api.FQDNSelector) CachedSelector {
csu.updateMutex.Lock()
defer csu.updateMutex.Unlock()
cached, added := csu.sc.AddFQDNSelector(csu, nil, sel)
require.NotEqual(csu.t, nil, cached)
_, exists := csu.selections[cached]
// Not added if already exists for this user
require.Equal(csu.t, !exists, added)
csu.selections[cached] = cached.GetSelections()
// Pre-existing selections are not notified as updates
require.False(csu.t, csu.sc.haveUserNotifications())
return cached
}
func (csu *cachedSelectionUser) RemoveSelector(sel CachedSelector) {
csu.updateMutex.Lock()
defer csu.updateMutex.Unlock()
csu.sc.RemoveSelector(sel, csu)
delete(csu.selections, sel)
// No notifications for a removed selector
require.False(csu.t, csu.sc.haveUserNotifications())
}
func (csu *cachedSelectionUser) Reset() {
csu.updateMutex.Lock()
defer csu.updateMutex.Unlock()
csu.notifications = 0
}
func (csu *cachedSelectionUser) WaitForUpdate() (adds, deletes int) {
csu.updateMutex.Lock()
defer csu.updateMutex.Unlock()
for csu.notifications == 0 {
csu.updateCond.Wait()
}
return csu.adds, csu.deletes
}
func (csu *cachedSelectionUser) IdentitySelectionUpdated(selector CachedSelector, added, deleted []identity.NumericIdentity) {
csu.updateMutex.Lock()
defer csu.updateMutex.Unlock()
csu.notifications++
csu.adds += len(added)
csu.deletes += len(deleted)
selections := selector.GetSelections()
// Validate added & deleted against the selections
for _, add := range added {
require.True(csu.t, haveNid(add, selections))
}
for _, del := range deleted {
require.False(csu.t, haveNid(del, selections))
}
// update selections
csu.selections[selector] = selections
csu.updateCond.Signal()
}
// Mock CachedSelector for unit testing.
type testCachedSelector struct {
name string
wildcard bool
selections []identity.NumericIdentity
}
func newTestCachedSelector(name string, wildcard bool, selections ...int) *testCachedSelector {
cs := &testCachedSelector{
name: name,
wildcard: wildcard,
selections: make([]identity.NumericIdentity, 0, len(selections)),
}
cs.addSelections(selections...)
return cs
}
// returns selections as []identity.NumericIdentity
func (cs *testCachedSelector) addSelections(selections ...int) (adds []identity.NumericIdentity) {
for _, id := range selections {
nid := identity.NumericIdentity(id)
adds = append(adds, nid)
if cs == nil {
continue
}
if !cs.Selects(nid) {
cs.selections = append(cs.selections, nid)
}
}
return adds
}
// returns selections as []identity.NumericIdentity
func (cs *testCachedSelector) deleteSelections(selections ...int) (deletes []identity.NumericIdentity) {
for _, id := range selections {
nid := identity.NumericIdentity(id)
deletes = append(deletes, nid)
if cs == nil {
continue
}
for i := 0; i < len(cs.selections); i++ {
if nid == cs.selections[i] {
cs.selections = append(cs.selections[:i], cs.selections[i+1:]...)
i--
}
}
}
return deletes
}
// CachedSelector interface
func (cs *testCachedSelector) GetSelections() identity.NumericIdentitySlice {
return cs.selections
}
func (cs *testCachedSelector) GetMetadataLabels() labels.LabelArray {
return nil
}
func (cs *testCachedSelector) Selects(nid identity.NumericIdentity) bool {
for _, id := range cs.selections {
if id == nid {
return true
}
}
return false
}
func (cs *testCachedSelector) IsWildcard() bool {
return cs.wildcard
}
func (cs *testCachedSelector) IsNone() bool {
return false
}
func (cs *testCachedSelector) String() string {
return cs.name
}
func TestAddRemoveSelector(t *testing.T) {
sc := testNewSelectorCache(identity.IdentityMap{})
// Add some identities to the identity cache
wg := &sync.WaitGroup{}
sc.UpdateIdentities(identity.IdentityMap{
1234: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s),
k8sConst.PodNamespaceLabel: labels.NewLabel(k8sConst.PodNamespaceLabel, "default", labels.LabelSourceK8s)}.LabelArray(),
2345: labels.Labels{"app": labels.NewLabel("app", "test2", labels.LabelSourceK8s)}.LabelArray(),
}, nil, wg)
wg.Wait()
testSelector := api.NewESFromLabels(labels.NewLabel("app", "test", labels.LabelSourceK8s),
labels.NewLabel(k8sConst.PodNamespaceLabel, "default", labels.LabelSourceK8s))
user1 := newUser(t, "user1", sc)
cached := user1.AddIdentitySelector(testSelector)
// Current selections contain the numeric identities of existing identities that match
selections := cached.GetSelections()
require.Equal(t, 1, len(selections))
require.Equal(t, identity.NumericIdentity(1234), selections[0])
// Try add the same selector from the same user the second time
testSelector = api.NewESFromLabels(labels.NewLabel("app", "test", labels.LabelSourceK8s),
labels.NewLabel(k8sConst.PodNamespaceLabel, "default", labels.LabelSourceK8s))
cached2 := user1.AddIdentitySelector(testSelector)
require.Equal(t, cached, cached2)
// Add the same selector from a different user
testSelector = api.NewESFromLabels(labels.NewLabel("app", "test", labels.LabelSourceK8s),
labels.NewLabel(k8sConst.PodNamespaceLabel, "default", labels.LabelSourceK8s))
user2 := newUser(t, "user2", sc)
cached3 := user2.AddIdentitySelector(testSelector)
// Same old CachedSelector is returned, nothing new is cached
require.Equal(t, cached, cached3)
// Removing the first user does not remove the cached selector
user1.RemoveSelector(cached)
// Remove is idempotent
user1.RemoveSelector(cached)
// Removing the last user removes the cached selector
user2.RemoveSelector(cached3)
// Remove is idempotent
user2.RemoveSelector(cached3)
// All identities removed
require.Equal(t, 0, len(sc.selectors))
}
func TestMultipleIdentitySelectors(t *testing.T) {
sc := testNewSelectorCache(identity.IdentityMap{})
// Add some identities to the identity cache
wg := &sync.WaitGroup{}
li1 := identity.IdentityScopeLocal
li2 := li1 + 1
sc.UpdateIdentities(identity.IdentityMap{
1234: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s)}.LabelArray(),
2345: labels.Labels{"app": labels.NewLabel("app", "test2", labels.LabelSourceK8s)}.LabelArray(),
li1: labels.GetCIDRLabels(netip.MustParsePrefix("10.0.0.1/32")).LabelArray(),
li2: labels.GetCIDRLabels(netip.MustParsePrefix("10.0.0.0/8")).LabelArray(),
}, nil, wg)
wg.Wait()
testSelector := api.NewESFromLabels(labels.NewLabel("app", "test", labels.LabelSourceAny))
test2Selector := api.NewESFromLabels(labels.NewLabel("app", "test2", labels.LabelSourceAny))
// Test both exact and broader CIDR selectors
cidr32Selector := api.NewESFromLabels(labels.NewLabel("cidr:10.0.0.1/32", "", labels.LabelSourceCIDR))
cidr24Selector := api.NewESFromLabels(labels.NewLabel("cidr:10.0.0.0/24", "", labels.LabelSourceCIDR))
cidr8Selector := api.NewESFromLabels(labels.NewLabel("cidr:10.0.0.0/8", "", labels.LabelSourceCIDR))
cidr7Selector := api.NewESFromLabels(labels.NewLabel("cidr:10.0.0.0/7", "", labels.LabelSourceCIDR))
user1 := newUser(t, "user1", sc)
cached := user1.AddIdentitySelector(testSelector)
// Current selections contain the numeric identities of existing identities that match
selections := cached.GetSelections()
require.Equal(t, 1, len(selections))
require.Equal(t, identity.NumericIdentity(1234), selections[0])
// Add another selector from the same user
cached2 := user1.AddIdentitySelector(test2Selector)
require.NotEqual(t, cached, cached2)
// Current selections contain the numeric identities of existing identities that match
selections2 := cached2.GetSelections()
require.Equal(t, 1, len(selections2))
require.Equal(t, identity.NumericIdentity(2345), selections2[0])
shouldSelect := func(sel api.EndpointSelector, wantIDs ...identity.NumericIdentity) {
csel := user1.AddIdentitySelector(sel)
selections := csel.GetSelections()
require.EqualValues(t, identity.NumericIdentitySlice(wantIDs), selections)
user1.RemoveSelector(csel)
}
shouldSelect(cidr32Selector, li1)
shouldSelect(cidr24Selector, li1)
shouldSelect(cidr8Selector, li1, li2)
shouldSelect(cidr7Selector, li1, li2)
user1.RemoveSelector(cached)
user1.RemoveSelector(cached2)
// All identities removed
require.Equal(t, 0, len(sc.selectors))
}
func TestIdentityUpdates(t *testing.T) {
sc := testNewSelectorCache(identity.IdentityMap{})
// Add some identities to the identity cache
wg := &sync.WaitGroup{}
sc.UpdateIdentities(identity.IdentityMap{
1234: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s)}.LabelArray(),
2345: labels.Labels{"app": labels.NewLabel("app", "test2", labels.LabelSourceK8s)}.LabelArray(),
}, nil, wg)
wg.Wait()
testSelector := api.NewESFromLabels(labels.NewLabel("app", "test", labels.LabelSourceAny))
test2Selector := api.NewESFromLabels(labels.NewLabel("app", "test2", labels.LabelSourceAny))
user1 := newUser(t, "user1", sc)
cached := user1.AddIdentitySelector(testSelector)
// Current selections contain the numeric identities of existing identities that match
selections := cached.GetSelections()
require.Equal(t, 1, len(selections))
require.Equal(t, identity.NumericIdentity(1234), selections[0])
// Add another selector from the same user
cached2 := user1.AddIdentitySelector(test2Selector)
require.NotEqual(t, cached, cached2)
// Current selections contain the numeric identities of existing identities that match
selections2 := cached2.GetSelections()
require.Equal(t, 1, len(selections2))
require.Equal(t, identity.NumericIdentity(2345), selections2[0])
user1.Reset()
// Add some identities to the identity cache
wg = &sync.WaitGroup{}
sc.UpdateIdentities(identity.IdentityMap{
12345: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s)}.LabelArray(),
}, nil, wg)
wg.Wait()
adds, deletes := user1.WaitForUpdate()
require.Equal(t, 1, adds)
require.Equal(t, 0, deletes)
// Current selections contain the numeric identities of existing identities that match
selections = cached.GetSelections()
require.Equal(t, 2, len(selections))
require.Equal(t, identity.NumericIdentity(1234), selections[0])
require.Equal(t, identity.NumericIdentity(12345), selections[1])
user1.Reset()
// Remove some identities from the identity cache
wg = &sync.WaitGroup{}
sc.UpdateIdentities(nil, identity.IdentityMap{
12345: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s)}.LabelArray(),
}, wg)
wg.Wait()
adds, deletes = user1.WaitForUpdate()
require.Equal(t, 1, adds)
require.Equal(t, 1, deletes)
// Current selections contain the numeric identities of existing identities that match
selections = cached.GetSelections()
require.Equal(t, 1, len(selections))
require.Equal(t, identity.NumericIdentity(1234), selections[0])
user1.RemoveSelector(cached)
user1.RemoveSelector(cached2)
// All identities removed
require.Equal(t, 0, len(sc.selectors))
}
func TestIdentityUpdatesMultipleUsers(t *testing.T) {
sc := testNewSelectorCache(identity.IdentityMap{})
// Add some identities to the identity cache
wg := &sync.WaitGroup{}
sc.UpdateIdentities(identity.IdentityMap{
1234: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s)}.LabelArray(),
2345: labels.Labels{"app": labels.NewLabel("app", "test2", labels.LabelSourceK8s)}.LabelArray(),
}, nil, wg)
wg.Wait()
testSelector := api.NewESFromLabels(labels.NewLabel("app", "test", labels.LabelSourceK8s))
user1 := newUser(t, "user1", sc)
cached := user1.AddIdentitySelector(testSelector)
// Add same selector from a different user
user2 := newUser(t, "user2", sc)
cached2 := user2.AddIdentitySelector(testSelector)
require.Equal(t, cached, cached2)
user1.Reset()
user2.Reset()
// Add some identities to the identity cache
wg = &sync.WaitGroup{}
sc.UpdateIdentities(identity.IdentityMap{
123: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s)}.LabelArray(),
234: labels.Labels{"app": labels.NewLabel("app", "test2", labels.LabelSourceK8s)}.LabelArray(),
345: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s)}.LabelArray(),
}, nil, wg)
wg.Wait()
adds, deletes := user1.WaitForUpdate()
require.Equal(t, 2, adds)
require.Equal(t, 0, deletes)
adds, deletes = user2.WaitForUpdate()
require.Equal(t, 2, adds)
require.Equal(t, 0, deletes)
// Current selections contain the numeric identities of existing identities that match
selections := cached.GetSelections()
require.Equal(t, 3, len(selections))
require.Equal(t, identity.NumericIdentity(123), selections[0])
require.Equal(t, identity.NumericIdentity(345), selections[1])
require.Equal(t, identity.NumericIdentity(1234), selections[2])
require.EqualValues(t, cached2.GetSelections(), cached.GetSelections())
user1.Reset()
user2.Reset()
// Remove some identities from the identity cache
wg = &sync.WaitGroup{}
sc.UpdateIdentities(nil, identity.IdentityMap{
123: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s)}.LabelArray(),
234: labels.Labels{"app": labels.NewLabel("app", "test2", labels.LabelSourceK8s)}.LabelArray(),
}, wg)
wg.Wait()
adds, deletes = user1.WaitForUpdate()
require.Equal(t, 2, adds)
require.Equal(t, 1, deletes)
adds, deletes = user2.WaitForUpdate()
require.Equal(t, 2, adds)
require.Equal(t, 1, deletes)
// Current selections contain the numeric identities of existing identities that match
selections = cached.GetSelections()
require.Equal(t, 2, len(selections))
require.Equal(t, identity.NumericIdentity(345), selections[0])
require.Equal(t, identity.NumericIdentity(1234), selections[1])
require.EqualValues(t, cached2.GetSelections(), cached.GetSelections())
user1.RemoveSelector(cached)
user2.RemoveSelector(cached2)
// All identities removed
require.Equal(t, 0, len(sc.selectors))
}
func TestSelectorManagerCanGetBeforeSet(t *testing.T) {
defer func() {
r := recover()
require.Equal(t, nil, r)
}()
idSel := identitySelector{
key: "test",
users: make(map[CachedSelectionUser]struct{}),
}
selections := idSel.GetSelections()
require.NotEqual(t, nil, selections)
require.Equal(t, 0, len(selections))
}
func testNewSelectorCache(ids identity.IdentityMap) *SelectorCache {
sc := NewSelectorCache(ids)
sc.SetLocalIdentityNotifier(testidentity.NewDummyIdentityNotifier())
return sc
}
func Test_getLocalScopePrefix(t *testing.T) {
prefix := getLocalScopePrefix(identity.ReservedIdentityWorld, nil)
require.False(t, prefix.IsValid())
prefix = getLocalScopePrefix(identity.ReservedIdentityWorld, labels.LabelArray{labels.Label{Source: labels.LabelSourceCIDR, Key: "0.0.0.0/0"}})
require.False(t, prefix.IsValid())
prefix = getLocalScopePrefix(identity.IdentityScopeLocal, labels.LabelArray{labels.Label{Source: labels.LabelSourceCIDR, Key: "0.0.0.0/0"}})
require.True(t, prefix.IsValid())
require.Equal(t, "0.0.0.0/0", prefix.String())
prefix = getLocalScopePrefix(identity.IdentityScopeLocal, labels.LabelArray{labels.Label{Source: labels.LabelSourceCIDR, Key: "::/0"}})
require.True(t, prefix.IsValid())
require.Equal(t, "::/0", prefix.String())
prefix = getLocalScopePrefix(identity.IdentityScopeLocal, labels.LabelArray{labels.Label{Source: labels.LabelSourceCIDR, Key: "--/0"}})
require.True(t, prefix.IsValid())
require.Equal(t, "::/0", prefix.String())
prefix = getLocalScopePrefix(identity.IdentityScopeLocal, labels.LabelArray{
labels.Label{Source: labels.LabelSourceCIDR, Key: "ff--/8"},
labels.Label{Source: labels.LabelSourceCIDR, Key: "--/0"},
labels.Label{Source: labels.LabelSourceCIDR, Key: "--1/128"},
})
require.True(t, prefix.IsValid())
require.Equal(t, "::1/128", prefix.String())
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"strings"
"sync"
"github.com/cilium/cilium/pkg/endpoint/regeneration"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/time"
"github.com/cilium/cilium/pkg/trigger"
)
// TriggerPolicyUpdates triggers the policy update trigger.
//
// To follow what the trigger does, see NewUpdater.
func (u *Updater) TriggerPolicyUpdates(force bool, reason string) {
if force {
log.Debugf("Artificially increasing policy revision to enforce policy recalculation")
u.repo.BumpRevision()
}
u.TriggerWithReason(reason)
}
// NewUpdater returns a new Updater instance to handle triggering policy
// updates ready for use.
func NewUpdater(r *Repository, regen regenerator) *Updater {
t, err := trigger.NewTrigger(trigger.Parameters{
Name: "policy_update",
MetricsObserver: &TriggerMetrics{},
MinInterval: option.Config.PolicyTriggerInterval,
// Triggers policy updates for every local endpoint.
// This may be called in a variety of situations: after policy changes,
// changes in agent configuration, changes in endpoint labels, and
// change of security identities.
TriggerFunc: func(reasons []string) {
log.Debug("Regenerating all endpoints")
reason := strings.Join(reasons, ", ")
regenerationMetadata := ®eneration.ExternalRegenerationMetadata{
Reason: reason,
RegenerationLevel: regeneration.RegenerateWithoutDatapath,
}
regen.RegenerateAllEndpoints(regenerationMetadata)
},
})
if err != nil {
panic(err) // unreachable, only occurs if TriggerFunc is nil
}
return &Updater{
Trigger: t,
repo: r,
}
}
// Updater is responsible for triggering policy updates, in order to perform
// policy recalculation.
type Updater struct {
*trigger.Trigger
repo *Repository
}
type regenerator interface {
// RegenerateAllEndpoints should trigger a regeneration of all endpoints.
RegenerateAllEndpoints(*regeneration.ExternalRegenerationMetadata) *sync.WaitGroup
}
// TriggerMetrics handles the metrics for trigger policy recalculations.
type TriggerMetrics struct{}
func (p *TriggerMetrics) QueueEvent(reason string) {
if metrics.TriggerPolicyUpdateTotal.IsEnabled() {
metrics.TriggerPolicyUpdateTotal.WithLabelValues(reason).Inc()
}
}
func (p *TriggerMetrics) PostRun(duration, latency time.Duration, folds int) {
if metrics.TriggerPolicyUpdateCallDuration.IsEnabled() {
metrics.TriggerPolicyUpdateCallDuration.WithLabelValues("duration").Observe(duration.Seconds())
metrics.TriggerPolicyUpdateCallDuration.WithLabelValues("latency").Observe(latency.Seconds())
}
if metrics.TriggerPolicyUpdateFolds.IsEnabled() {
metrics.TriggerPolicyUpdateFolds.Set(float64(folds))
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import "github.com/cilium/cilium/pkg/labels"
// JoinPath returns a joined path from a and b.
func JoinPath(a, b string) string {
return a + labels.PathDelimiter + b
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"fmt"
"regexp"
"strconv"
"strings"
ciliumio "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/u8proto"
)
var (
singleAnnotationRegex = "<(Ingress|Egress)/([1-9][0-9]{1,5})/(TCP|UDP|SCTP|ANY)/([A-Za-z]{3,32})>"
annotationRegex = regexp.MustCompile(fmt.Sprintf(`^((%s)(,(%s))*)$`, singleAnnotationRegex, singleAnnotationRegex))
)
func validateL7ProtocolWithDirection(dir string, proto L7ParserType) error {
switch proto {
case ParserTypeHTTP:
return nil
case ParserTypeDNS:
if dir == "Egress" {
return nil
}
case ParserTypeKafka:
return nil
default:
return fmt.Errorf("unsupported parser type %s", proto)
}
return fmt.Errorf("%s not allowed with direction %s", proto, dir)
}
// NewVisibilityPolicy generates the VisibilityPolicy that is encoded in the
// annotation parameter.
// Returns an error:
// - if the annotation does not correspond to the expected
// format for a visibility annotation.
// - if there is a conflict between the state encoded in the annotation (e.g.,
// different L7 protocols for the same L4 port / protocol / traffic direction.
func NewVisibilityPolicy(anno, namespace, pod string) (*VisibilityPolicy, error) {
if !annotationRegex.MatchString(anno) {
return nil, fmt.Errorf("annotation for proxy visibility did not match expected format %s", annotationRegex.String())
}
nvp := &VisibilityPolicy{
Ingress: make(DirectionalVisibilityPolicy),
Egress: make(DirectionalVisibilityPolicy),
}
// TODO: look into using regex groups.
anSplit := strings.Split(anno, ",")
for i := range anSplit {
proxyAnnoSplit := strings.Split(anSplit[i], "/")
if len(proxyAnnoSplit) != 4 {
err := fmt.Errorf("invalid number of fields (%d) in annotation", len(proxyAnnoSplit))
return nil, err
}
// <Ingress|Egress --> Ingress|Egress
// Don't need to validate the content itself, regex already did that.
direction := proxyAnnoSplit[0][1:]
port := proxyAnnoSplit[1]
portInt, err := strconv.ParseUint(port, 10, 16)
if err != nil {
return nil, fmt.Errorf("unable to parse port: %w", err)
}
// Don't need to validate, regex already did that.
l4Proto := proxyAnnoSplit[2]
u8Prot, err := u8proto.ParseProtocol(l4Proto)
if err != nil {
return nil, fmt.Errorf("invalid L4 protocol %s", l4Proto)
}
// ANY equates to TCP and UDP in the datapath; the datapath itself does
// not support 'Any' protocol paired with a port at L4.
var protos []u8proto.U8proto
if u8Prot == u8proto.ANY {
protos = append(protos, u8proto.TCP)
protos = append(protos, u8proto.UDP)
protos = append(protos, u8proto.SCTP)
} else {
protos = append(protos, u8Prot)
}
// Remove trailing '>'.
l7Protocol := L7ParserType(strings.ToLower(proxyAnnoSplit[3][:len(proxyAnnoSplit[3])-1]))
if err := validateL7ProtocolWithDirection(direction, l7Protocol); err != nil {
return nil, err
}
var dvp DirectionalVisibilityPolicy
var ingress bool
if direction == "Ingress" {
dvp = nvp.Ingress
ingress = true
} else {
dvp = nvp.Egress
ingress = false
}
for _, prot := range protos {
pp := strconv.FormatUint(portInt, 10) + "/" + prot.String()
if res, ok := dvp[pp]; ok {
if res.Parser != l7Protocol {
return nil, fmt.Errorf("duplicate annotations with different L7 protocols %s and %s for %s", res.Parser, l7Protocol, pp)
}
}
l7Meta := generateL7AllowAllRules(l7Protocol, namespace, pod)
dvp[pp] = &VisibilityMetadata{
Parser: l7Protocol,
Port: uint16(portInt),
Proto: prot,
Ingress: ingress,
L7Metadata: l7Meta,
}
}
}
return nvp, nil
}
func generateL7AllowAllRules(parser L7ParserType, namespace, pod string) L7DataMap {
var m L7DataMap
switch parser {
case ParserTypeDNS:
m = L7DataMap{}
// Create an entry to explicitly allow all at L7 for DNS.
emptyL3Selector := &identitySelector{source: &labelIdentitySelector{selector: api.WildcardEndpointSelector}, key: wildcardSelectorKey}
emptyL3Selector.metadataLbls = labels.LabelArray{
labels.NewLabel(ciliumio.PolicyLabelDerivedFrom, "PodVisibilityAnnotation", labels.LabelSourceK8s),
}
if namespace != "" {
emptyL3Selector.metadataLbls = append(emptyL3Selector.metadataLbls, labels.NewLabel(ciliumio.PodNamespaceLabel, namespace, labels.LabelSourceK8s))
}
if pod != "" {
emptyL3Selector.metadataLbls = append(emptyL3Selector.metadataLbls, labels.NewLabel(ciliumio.PodNameLabel, pod, labels.LabelSourceK8s))
}
m[emptyL3Selector] = &PerSelectorPolicy{
L7Rules: api.L7Rules{
DNS: []api.PortRuleDNS{
{
MatchPattern: "*",
},
},
},
}
}
return m
}
// VisibilityMetadata encodes state about what type of traffic should be
// redirected to an L7Proxy. Implements the ProxyPolicy interface.
// TODO: an L4Filter could be composed of this type.
type VisibilityMetadata struct {
// Parser represents the proxy to which traffic should be redirected.
Parser L7ParserType
// Port, in tandem with Proto, signifies which L4 port for which traffic
// should be redirected.
Port uint16
// Proto, in tandem with port, signifies which L4 protocol for which traffic
// should be redirected.
Proto u8proto.U8proto
// Ingress specifies whether ingress traffic at the given L4 port / protocol
// should be redirected to the proxy.
Ingress bool
// L7Metadata encodes optional information what is allowed at L7 for
// visibility. Some specific protocol parsers do not need this set for
// allowing of traffic (e.g., HTTP), but some do (e.g., DNS).
L7Metadata L7DataMap
}
// DirectionalVisibilityPolicy is a mapping of VisibilityMetadata keyed by
// L4 Port / L4 Protocol (e.g., 80/TCP) for a given traffic direction (e.g.,
// ingress or egress). This encodes at which L4 Port / L4 Protocol traffic
// should be redirected to a given L7 proxy. An empty instance of this type
// indicates that no traffic should be redirected.
type DirectionalVisibilityPolicy map[string]*VisibilityMetadata
// VisibilityPolicy represents for both ingress and egress which types of
// traffic should be redirected to a given L7 proxy.
type VisibilityPolicy struct {
Ingress DirectionalVisibilityPolicy
Egress DirectionalVisibilityPolicy
Error error
}
// CopyL7RulesPerEndpoint returns a shallow copy of the L7Metadata of the
// L4Filter.
func (v *VisibilityMetadata) CopyL7RulesPerEndpoint() L7DataMap {
if v.L7Metadata != nil {
return v.L7Metadata.ShallowCopy()
}
return nil
}
// GetL7Parser returns the L7ParserType for this VisibilityMetadata.
func (v *VisibilityMetadata) GetL7Parser() L7ParserType {
return v.Parser
}
// GetIngress returns whether the VisibilityMetadata applies at ingress or
// egress.
func (v *VisibilityMetadata) GetIngress() bool {
return v.Ingress
}
// GetPort returns at which port the VisibilityMetadata applies.
func (v *VisibilityMetadata) GetPort() uint16 {
return v.Port
}
// GetProtocol returns the protocol where the VisibilityMetadata applies.
func (v *VisibilityMetadata) GetProtocol() uint8 {
return uint8(v.Proto)
}
// GetListener returns the optional listener name.
func (l4 *VisibilityMetadata) GetListener() string {
return ""
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package safeio
import (
"fmt"
"io"
)
// ErrLimitReached indicates that ReadAllLimit has
// reached its limit before completing a full read
// of the io.Reader.
var ErrLimitReached = fmt.Errorf("read limit reached")
// ByteSize expresses the size of bytes
type ByteSize float64
const (
_ = iota // ignore first value by assigning to blank identifier
// KB is a Kilobyte
KB ByteSize = 1 << (10 * iota)
// MB is a Megabyte
MB
// GB is a Gigabyte
GB
// TB is a Terabyte
TB
// PB is a Petabyte
PB
// EB is an Exabyte
EB
// ZB is a Zettabyte
ZB
// YB is a Yottabyte
YB
)
// String converts a ByteSize to a string
func (b ByteSize) String() string {
switch {
case b >= YB:
return fmt.Sprintf("%.1fYB", b/YB)
case b >= ZB:
return fmt.Sprintf("%.1fZB", b/ZB)
case b >= EB:
return fmt.Sprintf("%.1fEB", b/EB)
case b >= PB:
return fmt.Sprintf("%.1fPB", b/PB)
case b >= TB:
return fmt.Sprintf("%.1fTB", b/TB)
case b >= GB:
return fmt.Sprintf("%.1fGB", b/GB)
case b >= MB:
return fmt.Sprintf("%.1fMB", b/MB)
case b >= KB:
return fmt.Sprintf("%.1fKB", b/KB)
}
return fmt.Sprintf("%.1fB", b)
}
// ReadAllLimit reads from r until an error, EOF, or after n bytes and returns
// the data it read. A successful call returns err == nil, not err == EOF.
// Because ReadAllLimit is defined to read from src until EOF it does not
// treat an EOF from Read as an error to be reported. If the limit is reached
// ReadAllLimit will return ErrLimitReached as an error.
func ReadAllLimit(r io.Reader, n ByteSize) ([]byte, error) {
limit := int(n + 1)
buf, err := io.ReadAll(io.LimitReader(r, int64(limit)))
if err != nil {
return buf, err
}
if len(buf) >= limit {
return buf[:limit-1], ErrLimitReached
}
return buf, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package slices
import (
"slices"
"sort"
"golang.org/x/exp/constraints"
)
// Unique deduplicates the elements in the input slice, preserving their ordering and
// modifying the slice in place.
// Unique relies on a map to find multiple occurrences of the same elements.
// For slices with a size less than 192 elements, a simpler O(N^2) search algorithm
// that does not allocate memory is used instead.
// Limit of 192 has been experimentally derived (look at BenchmarkUnique for more information).
func Unique[S ~[]T, T comparable](s S) S {
if len(s) < 2 {
return s
}
last := 0
if len(s) < 192 {
Loop:
for i := 0; i < len(s); i++ {
for j := 0; j < last; j++ {
if s[i] == s[j] {
continue Loop
}
}
s[last] = s[i]
last++
}
} else {
set := make(map[T]struct{}, len(s))
for i := 0; i < len(s); i++ {
if _, ok := set[s[i]]; ok {
continue
}
set[s[i]] = struct{}{}
s[last] = s[i]
last++
}
}
return s[:last]
}
// UniqueFunc deduplicates the elements in the input slice like Unique, but takes a
// function to extract the comparable "key" to compare T. This is slower than Unique,
// but can be used with non-comparable elements.
func UniqueFunc[S ~[]T, T any, K comparable](s S, key func(i int) K) S {
if len(s) < 2 {
return s
}
last := 0
set := make(map[K]struct{}, len(s))
for i := 0; i < len(s); i++ {
if _, ok := set[key(i)]; ok {
continue
}
set[key(i)] = struct{}{}
s[last] = s[i]
last++
}
return s[:last]
}
// SortedUnique sorts and dedup the input slice in place.
// It uses the < operator to compare the elements in the slice and thus requires
// the elements to satisfies contraints.Ordered.
func SortedUnique[S ~[]T, T constraints.Ordered](s S) S {
if len(s) < 2 {
return s
}
sort.Slice(s, func(i, j int) bool {
return s[i] < s[j]
})
return slices.Compact(s)
}
// SortedUniqueFunc is like SortedUnique but allows the user to specify custom functions
// for ordering (less function) and comparing (eq function) the elements in the slice.
// This is useful in all the cases where SortedUnique cannot be used:
// - for types that do not satisfy constraints.Ordered (e.g: composite types)
// - when the user wants to customize how elements are compared (e.g: user wants to enforce reverse ordering)
func SortedUniqueFunc[S ~[]T, T any](
s S,
less func(i, j int) bool,
eq func(a, b T) bool,
) S {
if len(s) < 2 {
return s
}
sort.Slice(s, less)
return slices.CompactFunc(s, eq)
}
// Diff returns a slice of elements which is the difference of a and b.
// The returned slice keeps the elements in the same order found in the "a" slice.
// Both input slices are considered as sets, that is, all elements are considered as
// unique when computing the difference.
func Diff[S ~[]T, T comparable](a, b S) []T {
if len(a) == 0 {
return nil
}
if len(b) == 0 {
return a
}
var diff []T
setB := make(map[T]struct{}, len(b))
for _, v := range b {
setB[v] = struct{}{}
}
setA := make(map[T]struct{}, len(a))
for _, v := range a {
// v is in b, too
if _, ok := setB[v]; ok {
continue
}
// v has been already added to diff
if _, ok := setA[v]; ok {
continue
}
diff = append(diff, v)
setA[v] = struct{}{}
}
return diff
}
// SubsetOf returns a boolean that indicates if slice a is a subset of slice b.
// In case it is not, the returned slice contains all the unique elements that are in a but not in b.
func SubsetOf[S ~[]T, T comparable](a, b S) (bool, []T) {
d := Diff(a, b)
return len(d) == 0, d
}
// XorNil returns true if one of the two slices is nil while the other is not.
func XorNil[T any](s1, s2 []T) bool {
return s1 == nil && s2 != nil ||
s1 != nil && s2 == nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// package time is a wrapper for the stdlib time library that aliases most
// underlying types, but allows overrides for testing purposes.
//
// Synced to go-1.20.7.
package time
import (
"time"
)
const (
Layout = time.Layout
ANSIC = time.ANSIC
UnixDate = time.UnixDate
RubyDate = time.RubyDate
RFC822 = time.RFC822
RFC822Z = time.RFC822Z
RFC850 = time.RFC850
RFC1123 = time.RFC1123
RFC1123Z = time.RFC1123Z
RFC3339 = time.RFC3339
RFC3339Nano = time.RFC3339Nano
Kitchen = time.Kitchen
Stamp = time.Stamp
StampMilli = time.StampMilli
StampMicro = time.StampMicro
StampNano = time.StampNano
DateTime = time.DateTime
DateOnly = time.DateOnly
TimeOnly = time.TimeOnly
Nanosecond = time.Nanosecond
Microsecond = time.Microsecond
Millisecond = time.Millisecond
Second = time.Second
Minute = time.Minute
Hour = time.Hour
)
var (
ParseDuration = time.ParseDuration
Since = time.Since
Until = time.Until
FixedZone = time.FixedZone
LoadLocation = time.LoadLocation
LoadLocationFromTZData = time.LoadLocationFromTZData
Date = time.Date
Now = time.Now
Parse = time.Parse
ParseInLocation = time.ParseInLocation
UTC = time.UTC
Unix = time.Unix
UnixMicro = time.UnixMicro
UnixMilli = time.UnixMilli
)
type (
Duration = time.Duration
Location = time.Location
Month = time.Month
ParseError = time.ParseError
Ticker = time.Ticker
Time = time.Time
Timer = time.Timer
Weekday = time.Weekday
)
var (
MaxInternalTimerDelay time.Duration
)
// After overrides the stdlib time.After to enforce maximum sleepiness via
// option.MaxInternalTimerDelay.
func After(d Duration) <-chan Time {
if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay {
d = MaxInternalTimerDelay
}
return time.After(d)
}
// Sleep overrides the stdlib time.Sleep to enforce maximum sleepiness via
// option.MaxInternalTimerDelay.
func Sleep(d time.Duration) {
if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay {
d = MaxInternalTimerDelay
}
time.Sleep(d)
}
// Tick overrides the stdlib time.Tick to enforce maximum sleepiness via
// option.MaxInternalTimerDelay.
func Tick(d Duration) <-chan time.Time {
return NewTicker(d).C
}
// NewTicker overrides the stdlib time.NewTicker to enforce maximum sleepiness
// via option.MaxInternalTimerDelay.
func NewTicker(d Duration) *time.Ticker {
if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay {
d = MaxInternalTimerDelay
}
return time.NewTicker(d)
}
// NewTimer overrides the stdlib time.NewTimer to enforce maximum sleepiness
// via option.MaxInternalTimerDelay.
func NewTimer(d Duration) *time.Timer {
if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay {
d = MaxInternalTimerDelay
}
return time.NewTimer(d)
}
// NewTimerWithoutMaxDelay returns a time.NewTimer without enforcing maximum
// sleepiness. This function should only be used in cases where the timer firing
// early impacts correctness. If in doubt, you probably should use NewTimer.
func NewTimerWithoutMaxDelay(d Duration) *time.Timer {
return time.NewTimer(d)
}
// AfterFunc overrides the stdlib time.AfterFunc to enforce maximum sleepiness
// via option.MaxInternalTimerDelay.
func AfterFunc(d Duration, f func()) *time.Timer {
if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay {
d = MaxInternalTimerDelay
}
return time.AfterFunc(d, f)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package version
import (
"encoding/base64"
"encoding/json"
"fmt"
"runtime"
"strings"
)
// CiliumVersion provides a minimal structure to the version string
type CiliumVersion struct {
// Version is the semantic version of Cilium
Version string
// Revision is the short SHA from the last commit
Revision string
// GoRuntimeVersion is the Go version used to run Cilium
GoRuntimeVersion string
// Arch is the architecture where Cilium was compiled
Arch string
// AuthorDate is the git author time reference stored as string ISO 8601 formatted
AuthorDate string
}
// ciliumVersion is set to Cilium's version, revision and git author time reference during build.
var ciliumVersion string
// Version is the complete Cilium version string including Go version.
var Version string
func init() {
// Mimic the output of `go version` and append it to ciliumVersion.
// Report GOOS/GOARCH of the actual binary, not the system it was built on, in case it was
// cross-compiled. See #13122
Version = fmt.Sprintf("%s go version %s %s/%s", ciliumVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH)
}
// FromString converts a version string into struct
func FromString(versionString string) CiliumVersion {
// string to parse: "0.13.90 a722bdb 2018-01-09T22:32:37+01:00 go version go1.9 linux/amd64"
fields := strings.Split(versionString, " ")
if len(fields) != 7 {
return CiliumVersion{}
}
cver := CiliumVersion{
Version: fields[0],
Revision: fields[1],
AuthorDate: fields[2],
GoRuntimeVersion: fields[5],
Arch: fields[6],
}
return cver
}
// GetCiliumVersion returns a initialized CiliumVersion structure
func GetCiliumVersion() CiliumVersion {
return FromString(Version)
}
// Base64 returns the version in a base64 format.
func Base64() (string, error) {
jsonBytes, err := json.Marshal(Version)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(jsonBytes), nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
//go:build !windows
package version
import (
"fmt"
"regexp"
"strings"
"github.com/blang/semver/v4"
"golang.org/x/sys/unix"
"github.com/cilium/cilium/pkg/versioncheck"
)
func parseKernelVersion(ver string) (semver.Version, error) {
verStrs := strings.Split(ver, ".")
// We are assuming the kernel version will be one of the following:
// 4.9.17-040917-generic or 4.9-040917-generic or 4-generic
// So as observed, the kernel value is N.N.N-m or N.N-m or N-m
// This implies the len(verStrs) should be between 1 and 3
if len(verStrs) < 1 || len(verStrs) > 3 {
return semver.Version{}, fmt.Errorf("unable to get kernel version from %q", ver)
}
// Given the observations, we use regular expression to extract
// the patch number from the last element of the verStrs array and
// append "0" to the verStrs array in case the until its length is
// 3 as in all cases we want to return from this function :
// Major.Minor.PatchNumber
patch := regexp.MustCompilePOSIX(`^[0-9]+`).FindString(verStrs[len(verStrs)-1])
if patch == "" {
verStrs[len(verStrs)-1] = "0"
} else {
verStrs[len(verStrs)-1] = patch
}
for len(verStrs) < 3 {
verStrs = append(verStrs, "0")
}
return versioncheck.Version(strings.Join(verStrs[:3], "."))
}
// GetKernelVersion returns the version of the Linux kernel running on this host.
func GetKernelVersion() (semver.Version, error) {
var unameBuf unix.Utsname
if err := unix.Uname(&unameBuf); err != nil {
return semver.Version{}, err
}
return parseKernelVersion(string(unameBuf.Release[:]))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Package versioncheck provides utility wrappers for go-version, allowing the
// constraints to be used as global variables.
package versioncheck
import (
"fmt"
"strconv"
"strings"
"github.com/blang/semver/v4"
)
// MustCompile wraps go-version.NewConstraint, panicing when an error is
// returns (this occurs when the constraint cannot be parsed).
// It is intended to be use similar to re.MustCompile, to ensure unparseable
// constraints are caught in testing.
func MustCompile(constraint string) semver.Range {
verCheck, err := Compile(constraint)
if err != nil {
panic(fmt.Errorf("cannot compile go-version constraint '%s': %w", constraint, err))
}
return verCheck
}
// Compile trivially wraps go-version.NewConstraint, returning the constraint
// and error
func Compile(constraint string) (semver.Range, error) {
return semver.ParseRange(constraint)
}
// MustVersion wraps go-version.NewVersion, panicing when an error is
// returns (this occurs when the version cannot be parsed).
func MustVersion(version string) semver.Version {
ver, err := Version(version)
if err != nil {
panic(fmt.Errorf("cannot compile go-version version '%s': %w", version, err))
}
return ver
}
// Version wraps go-version.NewVersion, panicing when an error is
// returns (this occurs when the version cannot be parsed).
func Version(version string) (semver.Version, error) {
ver, err := semver.ParseTolerant(version)
if err != nil {
return ver, err
}
if len(ver.Pre) == 0 {
return ver, nil
}
for _, pre := range ver.Pre {
if strings.Contains(pre.VersionStr, "rc") ||
strings.Contains(pre.VersionStr, "beta") ||
strings.Contains(pre.VersionStr, "alpha") ||
strings.Contains(pre.VersionStr, "snapshot") {
return ver, nil
}
}
strSegments := make([]string, 3)
strSegments[0] = strconv.Itoa(int(ver.Major))
strSegments[1] = strconv.Itoa(int(ver.Minor))
strSegments[2] = strconv.Itoa(int(ver.Patch))
verStr := strings.Join(strSegments, ".")
return semver.ParseTolerant(verStr)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
//go:build gofuzz
package fuzz
import (
"github.com/cilium/cilium/pkg/labels"
)
func Fuzz(data []byte) int {
label := labels.NewLabel("test", "label", "1")
err := label.UnmarshalJSON(data)
if err != nil {
return 0
}
return 1
}