/src/openvswitch/lib/dpif-netlink.c
Line | Count | Source |
1 | | /* |
2 | | * Copyright (c) 2008-2018 Nicira, Inc. |
3 | | * |
4 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | | * you may not use this file except in compliance with the License. |
6 | | * You may obtain a copy of the License at: |
7 | | * |
8 | | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | | * |
10 | | * Unless required by applicable law or agreed to in writing, software |
11 | | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | | * See the License for the specific language governing permissions and |
14 | | * limitations under the License. |
15 | | */ |
16 | | |
17 | | #include <config.h> |
18 | | |
19 | | #include "dpif-netlink.h" |
20 | | |
21 | | #include <ctype.h> |
22 | | #include <errno.h> |
23 | | #include <fcntl.h> |
24 | | #include <inttypes.h> |
25 | | #include <net/if.h> |
26 | | #include <linux/types.h> |
27 | | #include <linux/pkt_sched.h> |
28 | | #include <poll.h> |
29 | | #include <stdlib.h> |
30 | | #include <strings.h> |
31 | | #include <sys/epoll.h> |
32 | | #include <sys/stat.h> |
33 | | #include <unistd.h> |
34 | | |
35 | | #include "bitmap.h" |
36 | | #include "dpif-netlink-rtnl.h" |
37 | | #include "dpif-provider.h" |
38 | | #include "fat-rwlock.h" |
39 | | #include "flow.h" |
40 | | #include "netdev-linux.h" |
41 | | #include "netdev-offload.h" |
42 | | #include "netdev-provider.h" |
43 | | #include "netdev-vport.h" |
44 | | #include "netdev.h" |
45 | | #include "netlink-conntrack.h" |
46 | | #include "netlink-notifier.h" |
47 | | #include "netlink-socket.h" |
48 | | #include "netlink.h" |
49 | | #include "netnsid.h" |
50 | | #include "odp-util.h" |
51 | | #include "openvswitch/dynamic-string.h" |
52 | | #include "openvswitch/flow.h" |
53 | | #include "openvswitch/hmap.h" |
54 | | #include "openvswitch/match.h" |
55 | | #include "openvswitch/ofpbuf.h" |
56 | | #include "openvswitch/poll-loop.h" |
57 | | #include "openvswitch/shash.h" |
58 | | #include "openvswitch/thread.h" |
59 | | #include "openvswitch/usdt-probes.h" |
60 | | #include "openvswitch/vlog.h" |
61 | | #include "packets.h" |
62 | | #include "random.h" |
63 | | #include "sset.h" |
64 | | #include "timeval.h" |
65 | | #include "unaligned.h" |
66 | | #include "util.h" |
67 | | |
68 | | VLOG_DEFINE_THIS_MODULE(dpif_netlink); |
69 | | #ifdef _WIN32 |
70 | | #include "wmi.h" |
71 | | enum { WINDOWS = 1 }; |
72 | | #else |
73 | | enum { WINDOWS = 0 }; |
74 | | #endif |
75 | | enum { MAX_PORTS = USHRT_MAX }; |
76 | | |
77 | | /* This ethtool flag was introduced in Linux 2.6.24, so it might be |
78 | | * missing if we have old headers. */ |
79 | 0 | #define ETH_FLAG_LRO (1 << 15) /* LRO is enabled */ |
80 | | |
81 | | #define FLOW_DUMP_MAX_BATCH 50 |
82 | 0 | #define OPERATE_MAX_OPS 50 |
83 | | |
84 | | #ifndef EPOLLEXCLUSIVE |
85 | | #define EPOLLEXCLUSIVE (1u << 28) |
86 | | #endif |
87 | | |
88 | 0 | #define OVS_DP_F_UNSUPPORTED (1u << 31); |
89 | | |
90 | | /* This PID is not used by the kernel datapath when using dispatch per CPU, |
91 | | * but it is required to be set (not zero). */ |
92 | 0 | #define DPIF_NETLINK_PER_CPU_PID UINT32_MAX |
93 | | struct dpif_netlink_dp { |
94 | | /* Generic Netlink header. */ |
95 | | uint8_t cmd; |
96 | | |
97 | | /* struct ovs_header. */ |
98 | | int dp_ifindex; |
99 | | |
100 | | /* Attributes. */ |
101 | | const char *name; /* OVS_DP_ATTR_NAME. */ |
102 | | const uint32_t *upcall_pid; /* OVS_DP_ATTR_UPCALL_PID. */ |
103 | | uint32_t user_features; /* OVS_DP_ATTR_USER_FEATURES */ |
104 | | uint32_t cache_size; /* OVS_DP_ATTR_MASKS_CACHE_SIZE */ |
105 | | const struct ovs_dp_stats *stats; /* OVS_DP_ATTR_STATS. */ |
106 | | const struct ovs_dp_megaflow_stats *megaflow_stats; |
107 | | /* OVS_DP_ATTR_MEGAFLOW_STATS.*/ |
108 | | const uint32_t *upcall_pids; /* OVS_DP_ATTR_PER_CPU_PIDS */ |
109 | | uint32_t n_upcall_pids; |
110 | | }; |
111 | | |
112 | | static void dpif_netlink_dp_init(struct dpif_netlink_dp *); |
113 | | static int dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp *, |
114 | | const struct ofpbuf *); |
115 | | static void dpif_netlink_dp_dump_start(struct nl_dump *); |
116 | | static int dpif_netlink_dp_transact(const struct dpif_netlink_dp *request, |
117 | | struct dpif_netlink_dp *reply, |
118 | | struct ofpbuf **bufp); |
119 | | static int dpif_netlink_dp_get(const struct dpif *, |
120 | | struct dpif_netlink_dp *reply, |
121 | | struct ofpbuf **bufp); |
122 | | static int |
123 | | dpif_netlink_set_features(struct dpif *dpif_, uint32_t new_features); |
124 | | |
125 | | static void |
126 | | dpif_netlink_unixctl_dispatch_mode(struct unixctl_conn *conn, int argc, |
127 | | const char *argv[], void *aux); |
128 | | |
129 | | struct dpif_netlink_flow { |
130 | | /* Generic Netlink header. */ |
131 | | uint8_t cmd; |
132 | | |
133 | | /* struct ovs_header. */ |
134 | | unsigned int nlmsg_flags; |
135 | | int dp_ifindex; |
136 | | |
137 | | /* Attributes. |
138 | | * |
139 | | * The 'stats' member points to 64-bit data that might only be aligned on |
140 | | * 32-bit boundaries, so get_unaligned_u64() should be used to access its |
141 | | * values. |
142 | | * |
143 | | * If 'actions' is nonnull then OVS_FLOW_ATTR_ACTIONS will be included in |
144 | | * the Netlink version of the command, even if actions_len is zero. */ |
145 | | const struct nlattr *key; /* OVS_FLOW_ATTR_KEY. */ |
146 | | size_t key_len; |
147 | | const struct nlattr *mask; /* OVS_FLOW_ATTR_MASK. */ |
148 | | size_t mask_len; |
149 | | const struct nlattr *actions; /* OVS_FLOW_ATTR_ACTIONS. */ |
150 | | size_t actions_len; |
151 | | ovs_u128 ufid; /* OVS_FLOW_ATTR_FLOW_ID. */ |
152 | | bool ufid_present; /* Is there a UFID? */ |
153 | | bool ufid_terse; /* Skip serializing key/mask/acts? */ |
154 | | const struct ovs_flow_stats *stats; /* OVS_FLOW_ATTR_STATS. */ |
155 | | const uint8_t *tcp_flags; /* OVS_FLOW_ATTR_TCP_FLAGS. */ |
156 | | const ovs_32aligned_u64 *used; /* OVS_FLOW_ATTR_USED. */ |
157 | | bool clear; /* OVS_FLOW_ATTR_CLEAR. */ |
158 | | bool probe; /* OVS_FLOW_ATTR_PROBE. */ |
159 | | }; |
160 | | |
161 | | static void dpif_netlink_flow_init(struct dpif_netlink_flow *); |
162 | | static int dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow *, |
163 | | const struct ofpbuf *); |
164 | | static void dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow *, |
165 | | struct ofpbuf *); |
166 | | static int dpif_netlink_flow_transact(struct dpif_netlink_flow *request, |
167 | | struct dpif_netlink_flow *reply, |
168 | | struct ofpbuf **bufp); |
169 | | static void dpif_netlink_flow_get_stats(const struct dpif_netlink_flow *, |
170 | | struct dpif_flow_stats *); |
171 | | static void dpif_netlink_flow_to_dpif_flow(struct dpif_flow *, |
172 | | const struct dpif_netlink_flow *); |
173 | | |
174 | | /* One of the dpif channels between the kernel and userspace. */ |
175 | | struct dpif_channel { |
176 | | struct nl_sock *sock; /* Netlink socket. */ |
177 | | long long int last_poll; /* Last time this channel was polled. */ |
178 | | }; |
179 | | |
180 | | #ifdef _WIN32 |
181 | | #define VPORT_SOCK_POOL_SIZE 1 |
182 | | /* On Windows, there is no native support for epoll. There are equivalent |
183 | | * interfaces though, that are not used currently. For simpicity, a pool of |
184 | | * netlink sockets is used. Each socket is represented by 'struct |
185 | | * dpif_windows_vport_sock'. Since it is a pool, multiple OVS ports may be |
186 | | * sharing the same socket. In the future, we can add a reference count and |
187 | | * such fields. */ |
188 | | struct dpif_windows_vport_sock { |
189 | | struct nl_sock *nl_sock; /* netlink socket. */ |
190 | | }; |
191 | | #endif |
192 | | |
193 | | struct dpif_handler { |
194 | | /* per-vport dispatch mode. */ |
195 | | struct epoll_event *epoll_events; |
196 | | int epoll_fd; /* epoll fd that includes channel socks. */ |
197 | | int n_events; /* Num events returned by epoll_wait(). */ |
198 | | int event_offset; /* Offset into 'epoll_events'. */ |
199 | | |
200 | | /* per-cpu dispatch mode. */ |
201 | | struct nl_sock *sock; /* Each handler thread holds one netlink |
202 | | socket. */ |
203 | | |
204 | | #ifdef _WIN32 |
205 | | /* Pool of sockets. */ |
206 | | struct dpif_windows_vport_sock *vport_sock_pool; |
207 | | size_t last_used_pool_idx; /* Index to aid in allocating a |
208 | | socket in the pool to a port. */ |
209 | | #endif |
210 | | }; |
211 | | |
212 | | /* Datapath interface for the openvswitch Linux kernel module. */ |
213 | | struct dpif_netlink { |
214 | | struct dpif dpif; |
215 | | int dp_ifindex; |
216 | | uint32_t user_features; |
217 | | |
218 | | /* Upcall messages. */ |
219 | | struct fat_rwlock upcall_lock; |
220 | | struct dpif_handler *handlers; |
221 | | uint32_t n_handlers; /* Num of upcall handlers. */ |
222 | | |
223 | | /* Per-vport dispatch mode. */ |
224 | | struct dpif_channel *channels; /* Array of channels for each port. */ |
225 | | int uc_array_size; /* Size of 'handler->channels' and */ |
226 | | /* 'handler->epoll_events'. */ |
227 | | |
228 | | /* Change notification. */ |
229 | | struct nl_sock *port_notifier; /* vport multicast group subscriber. */ |
230 | | bool refresh_channels; |
231 | | }; |
232 | | |
233 | | static void report_loss(struct dpif_netlink *, struct dpif_channel *, |
234 | | uint32_t ch_idx, uint32_t handler_id); |
235 | | |
236 | | static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(9999, 5); |
237 | | |
238 | | /* Generic Netlink family numbers for OVS. |
239 | | * |
240 | | * Initialized by dpif_netlink_init(). */ |
241 | | static int ovs_datapath_family; |
242 | | static int ovs_vport_family; |
243 | | static int ovs_flow_family; |
244 | | static int ovs_packet_family; |
245 | | static int ovs_meter_family; |
246 | | static int ovs_ct_limit_family; |
247 | | |
248 | | /* Generic Netlink multicast groups for OVS. |
249 | | * |
250 | | * Initialized by dpif_netlink_init(). */ |
251 | | static unsigned int ovs_vport_mcgroup; |
252 | | |
253 | | /* If true, tunnel devices are created using OVS compat/genetlink. |
254 | | * If false, tunnel devices are created with rtnetlink and using light weight |
255 | | * tunnels. If we fail to create the tunnel the rtnetlink+LWT, then we fallback |
256 | | * to using the compat interface. */ |
257 | | static bool ovs_tunnels_out_of_tree = true; |
258 | | |
259 | | static int dpif_netlink_init(void); |
260 | | static int open_dpif(const struct dpif_netlink_dp *, struct dpif **); |
261 | | static uint32_t dpif_netlink_port_get_pid(const struct dpif *, |
262 | | odp_port_t port_no); |
263 | | static void dpif_netlink_handler_uninit(struct dpif_handler *handler); |
264 | | static int dpif_netlink_refresh_handlers_vport_dispatch(struct dpif_netlink *, |
265 | | uint32_t n_handlers); |
266 | | static void destroy_all_channels(struct dpif_netlink *); |
267 | | static int dpif_netlink_refresh_handlers_cpu_dispatch(struct dpif_netlink *); |
268 | | static void destroy_all_handlers(struct dpif_netlink *); |
269 | | |
270 | | static void dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport *, |
271 | | struct ofpbuf *); |
272 | | static int dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport *, |
273 | | const struct ofpbuf *); |
274 | | static int dpif_netlink_port_query__(const struct dpif_netlink *dpif, |
275 | | odp_port_t port_no, const char *port_name, |
276 | | struct dpif_port *dpif_port); |
277 | | static void vport_del_channels(struct dpif_netlink *, odp_port_t); |
278 | | |
279 | | static int |
280 | | create_nl_sock(struct dpif_netlink *dpif OVS_UNUSED, struct nl_sock **sockp) |
281 | | OVS_REQ_WRLOCK(dpif->upcall_lock) |
282 | 0 | { |
283 | 0 | #ifndef _WIN32 |
284 | 0 | return nl_sock_create(NETLINK_GENERIC, sockp); |
285 | | #else |
286 | | /* Pick netlink sockets to use in a round-robin fashion from each |
287 | | * handler's pool of sockets. */ |
288 | | struct dpif_handler *handler = &dpif->handlers[0]; |
289 | | struct dpif_windows_vport_sock *sock_pool = handler->vport_sock_pool; |
290 | | size_t index = handler->last_used_pool_idx; |
291 | | |
292 | | /* A pool of sockets is allocated when the handler is initialized. */ |
293 | | if (sock_pool == NULL) { |
294 | | *sockp = NULL; |
295 | | return EINVAL; |
296 | | } |
297 | | |
298 | | ovs_assert(index < VPORT_SOCK_POOL_SIZE); |
299 | | *sockp = sock_pool[index].nl_sock; |
300 | | ovs_assert(*sockp); |
301 | | index = (index == VPORT_SOCK_POOL_SIZE - 1) ? 0 : index + 1; |
302 | | handler->last_used_pool_idx = index; |
303 | | return 0; |
304 | | #endif |
305 | 0 | } |
306 | | |
307 | | static void |
308 | | close_nl_sock(struct nl_sock *sock) |
309 | 0 | { |
310 | 0 | #ifndef _WIN32 |
311 | 0 | nl_sock_destroy(sock); |
312 | 0 | #endif |
313 | 0 | } |
314 | | |
315 | | static struct dpif_netlink * |
316 | | dpif_netlink_cast(const struct dpif *dpif) |
317 | 0 | { |
318 | 0 | dpif_assert_class(dpif, &dpif_netlink_class); |
319 | 0 | return CONTAINER_OF(dpif, struct dpif_netlink, dpif); |
320 | 0 | } |
321 | | |
322 | | static inline bool |
323 | 0 | dpif_netlink_upcall_per_cpu(const struct dpif_netlink *dpif) { |
324 | 0 | return !!((dpif)->user_features & OVS_DP_F_DISPATCH_UPCALL_PER_CPU); |
325 | 0 | } |
326 | | |
327 | | static int |
328 | | dpif_netlink_enumerate(struct sset *all_dps, |
329 | | const struct dpif_class *dpif_class OVS_UNUSED) |
330 | 0 | { |
331 | 0 | struct nl_dump dump; |
332 | 0 | uint64_t reply_stub[NL_DUMP_BUFSIZE / 8]; |
333 | 0 | struct ofpbuf msg, buf; |
334 | 0 | int error; |
335 | |
|
336 | 0 | error = dpif_netlink_init(); |
337 | 0 | if (error) { |
338 | 0 | return error; |
339 | 0 | } |
340 | | |
341 | 0 | ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub); |
342 | 0 | dpif_netlink_dp_dump_start(&dump); |
343 | 0 | while (nl_dump_next(&dump, &msg, &buf)) { |
344 | 0 | struct dpif_netlink_dp dp; |
345 | |
|
346 | 0 | if (!dpif_netlink_dp_from_ofpbuf(&dp, &msg)) { |
347 | 0 | sset_add(all_dps, dp.name); |
348 | 0 | } |
349 | 0 | } |
350 | 0 | ofpbuf_uninit(&buf); |
351 | 0 | return nl_dump_done(&dump); |
352 | 0 | } |
353 | | |
354 | | static int |
355 | | dpif_netlink_open(const struct dpif_class *class OVS_UNUSED, const char *name, |
356 | | bool create, struct dpif **dpifp) |
357 | 0 | { |
358 | 0 | struct dpif_netlink_dp dp_request, dp; |
359 | 0 | struct ofpbuf *buf; |
360 | 0 | uint32_t upcall_pid; |
361 | 0 | int error; |
362 | |
|
363 | 0 | error = dpif_netlink_init(); |
364 | 0 | if (error) { |
365 | 0 | return error; |
366 | 0 | } |
367 | | |
368 | | /* Create or look up datapath. */ |
369 | 0 | dpif_netlink_dp_init(&dp_request); |
370 | 0 | upcall_pid = 0; |
371 | 0 | dp_request.upcall_pid = &upcall_pid; |
372 | 0 | dp_request.name = name; |
373 | |
|
374 | 0 | if (create) { |
375 | 0 | dp_request.cmd = OVS_DP_CMD_NEW; |
376 | 0 | } else { |
377 | 0 | dp_request.cmd = OVS_DP_CMD_GET; |
378 | |
|
379 | 0 | error = dpif_netlink_dp_transact(&dp_request, &dp, &buf); |
380 | 0 | if (error) { |
381 | 0 | return error; |
382 | 0 | } |
383 | 0 | dp_request.user_features = dp.user_features; |
384 | 0 | ofpbuf_delete(buf); |
385 | | |
386 | | /* Use OVS_DP_CMD_SET to report user features */ |
387 | 0 | dp_request.cmd = OVS_DP_CMD_SET; |
388 | 0 | } |
389 | | |
390 | | /* Some older kernels will not reject unknown features. This will cause |
391 | | * 'ovs-vswitchd' to incorrectly assume a feature is supported. In order to |
392 | | * test for that, we attempt to set a feature that we know is not supported |
393 | | * by any kernel. If this feature is not rejected, we can assume we are |
394 | | * running on one of these older kernels. |
395 | | */ |
396 | 0 | dp_request.user_features |= OVS_DP_F_UNALIGNED; |
397 | 0 | dp_request.user_features |= OVS_DP_F_VPORT_PIDS; |
398 | 0 | dp_request.user_features |= OVS_DP_F_UNSUPPORTED; |
399 | 0 | error = dpif_netlink_dp_transact(&dp_request, NULL, NULL); |
400 | 0 | if (error) { |
401 | | /* The Open vSwitch kernel module has two modes for dispatching |
402 | | * upcalls: per-vport and per-cpu. |
403 | | * |
404 | | * When dispatching upcalls per-vport, the kernel will |
405 | | * send the upcall via a Netlink socket that has been selected based on |
406 | | * the vport that received the packet that is causing the upcall. |
407 | | * |
408 | | * When dispatching upcall per-cpu, the kernel will send the upcall via |
409 | | * a Netlink socket that has been selected based on the cpu that |
410 | | * received the packet that is causing the upcall. |
411 | | * |
412 | | * First we test to see if the kernel module supports per-cpu |
413 | | * dispatching (the preferred method). If it does not support per-cpu |
414 | | * dispatching, we fall back to the per-vport dispatch mode. |
415 | | */ |
416 | 0 | dp_request.user_features &= ~OVS_DP_F_UNSUPPORTED; |
417 | 0 | dp_request.user_features &= ~OVS_DP_F_VPORT_PIDS; |
418 | 0 | dp_request.user_features |= OVS_DP_F_DISPATCH_UPCALL_PER_CPU; |
419 | 0 | error = dpif_netlink_dp_transact(&dp_request, &dp, &buf); |
420 | 0 | if (error == EOPNOTSUPP) { |
421 | 0 | dp_request.user_features &= ~OVS_DP_F_DISPATCH_UPCALL_PER_CPU; |
422 | 0 | dp_request.user_features |= OVS_DP_F_VPORT_PIDS; |
423 | 0 | error = dpif_netlink_dp_transact(&dp_request, &dp, &buf); |
424 | 0 | } |
425 | 0 | if (error) { |
426 | 0 | return error; |
427 | 0 | } |
428 | | |
429 | 0 | error = open_dpif(&dp, dpifp); |
430 | 0 | dpif_netlink_set_features(*dpifp, OVS_DP_F_TC_RECIRC_SHARING); |
431 | 0 | } else { |
432 | 0 | VLOG_INFO("Kernel does not correctly support feature negotiation. " |
433 | 0 | "Using standard features."); |
434 | 0 | dp_request.cmd = OVS_DP_CMD_SET; |
435 | 0 | dp_request.user_features = 0; |
436 | 0 | dp_request.user_features |= OVS_DP_F_UNALIGNED; |
437 | 0 | dp_request.user_features |= OVS_DP_F_VPORT_PIDS; |
438 | 0 | error = dpif_netlink_dp_transact(&dp_request, &dp, &buf); |
439 | 0 | if (error) { |
440 | 0 | return error; |
441 | 0 | } |
442 | 0 | error = open_dpif(&dp, dpifp); |
443 | 0 | } |
444 | | |
445 | 0 | ofpbuf_delete(buf); |
446 | |
|
447 | 0 | if (create) { |
448 | 0 | VLOG_INFO("Datapath dispatch mode: %s", |
449 | 0 | dpif_netlink_upcall_per_cpu(dpif_netlink_cast(*dpifp)) ? |
450 | 0 | "per-cpu" : "per-vport"); |
451 | 0 | } |
452 | |
|
453 | 0 | return error; |
454 | 0 | } |
455 | | |
456 | | static int |
457 | | open_dpif(const struct dpif_netlink_dp *dp, struct dpif **dpifp) |
458 | 0 | { |
459 | 0 | struct dpif_netlink *dpif; |
460 | |
|
461 | 0 | dpif = xzalloc(sizeof *dpif); |
462 | 0 | dpif->port_notifier = NULL; |
463 | 0 | fat_rwlock_init(&dpif->upcall_lock); |
464 | |
|
465 | 0 | dpif_init(&dpif->dpif, &dpif_netlink_class, dp->name, |
466 | 0 | dp->dp_ifindex, dp->dp_ifindex); |
467 | |
|
468 | 0 | dpif->dp_ifindex = dp->dp_ifindex; |
469 | 0 | dpif->user_features = dp->user_features; |
470 | 0 | *dpifp = &dpif->dpif; |
471 | |
|
472 | 0 | return 0; |
473 | 0 | } |
474 | | |
475 | | #ifdef _WIN32 |
476 | | static void |
477 | | vport_delete_sock_pool(struct dpif_handler *handler) |
478 | | OVS_REQ_WRLOCK(dpif->upcall_lock) |
479 | | { |
480 | | if (handler->vport_sock_pool) { |
481 | | uint32_t i; |
482 | | struct dpif_windows_vport_sock *sock_pool = |
483 | | handler->vport_sock_pool; |
484 | | |
485 | | for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) { |
486 | | if (sock_pool[i].nl_sock) { |
487 | | nl_sock_unsubscribe_packets(sock_pool[i].nl_sock); |
488 | | nl_sock_destroy(sock_pool[i].nl_sock); |
489 | | sock_pool[i].nl_sock = NULL; |
490 | | } |
491 | | } |
492 | | |
493 | | free(handler->vport_sock_pool); |
494 | | handler->vport_sock_pool = NULL; |
495 | | } |
496 | | } |
497 | | |
498 | | static int |
499 | | vport_create_sock_pool(struct dpif_handler *handler) |
500 | | OVS_REQ_WRLOCK(dpif->upcall_lock) |
501 | | { |
502 | | struct dpif_windows_vport_sock *sock_pool; |
503 | | size_t i; |
504 | | int error = 0; |
505 | | |
506 | | sock_pool = xzalloc(VPORT_SOCK_POOL_SIZE * sizeof *sock_pool); |
507 | | for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) { |
508 | | error = nl_sock_create(NETLINK_GENERIC, &sock_pool[i].nl_sock); |
509 | | if (error) { |
510 | | goto error; |
511 | | } |
512 | | |
513 | | /* Enable the netlink socket to receive packets. This is equivalent to |
514 | | * calling nl_sock_join_mcgroup() to receive events. */ |
515 | | error = nl_sock_subscribe_packets(sock_pool[i].nl_sock); |
516 | | if (error) { |
517 | | goto error; |
518 | | } |
519 | | } |
520 | | |
521 | | handler->vport_sock_pool = sock_pool; |
522 | | handler->last_used_pool_idx = 0; |
523 | | return 0; |
524 | | |
525 | | error: |
526 | | vport_delete_sock_pool(handler); |
527 | | return error; |
528 | | } |
529 | | #endif /* _WIN32 */ |
530 | | |
531 | | /* Given the port number 'port_idx', extracts the pid of netlink socket |
532 | | * associated to the port and assigns it to 'upcall_pid'. */ |
533 | | static bool |
534 | | vport_get_pid(struct dpif_netlink *dpif, uint32_t port_idx, |
535 | | uint32_t *upcall_pid) |
536 | 0 | { |
537 | | /* Since the nl_sock can only be assigned in either all |
538 | | * or none "dpif" channels, the following check |
539 | | * would suffice. */ |
540 | 0 | if (!dpif->channels[port_idx].sock) { |
541 | 0 | return false; |
542 | 0 | } |
543 | 0 | ovs_assert(!WINDOWS || dpif->n_handlers <= 1); |
544 | |
|
545 | 0 | *upcall_pid = nl_sock_pid(dpif->channels[port_idx].sock); |
546 | |
|
547 | 0 | return true; |
548 | 0 | } |
549 | | |
550 | | static int |
551 | | vport_add_channel(struct dpif_netlink *dpif, odp_port_t port_no, |
552 | | struct nl_sock *sock) |
553 | 0 | { |
554 | 0 | struct epoll_event event; |
555 | 0 | uint32_t port_idx = odp_to_u32(port_no); |
556 | 0 | size_t i; |
557 | 0 | int error; |
558 | |
|
559 | 0 | if (dpif->handlers == NULL) { |
560 | 0 | close_nl_sock(sock); |
561 | 0 | return 0; |
562 | 0 | } |
563 | | |
564 | | /* We assume that the datapath densely chooses port numbers, which can |
565 | | * therefore be used as an index into 'channels' and 'epoll_events' of |
566 | | * 'dpif'. */ |
567 | 0 | if (port_idx >= dpif->uc_array_size) { |
568 | 0 | uint32_t new_size = port_idx + 1; |
569 | |
|
570 | 0 | if (new_size > MAX_PORTS) { |
571 | 0 | VLOG_WARN_RL(&error_rl, "%s: datapath port %"PRIu32" too big", |
572 | 0 | dpif_name(&dpif->dpif), port_no); |
573 | 0 | return EFBIG; |
574 | 0 | } |
575 | | |
576 | 0 | dpif->channels = xrealloc(dpif->channels, |
577 | 0 | new_size * sizeof *dpif->channels); |
578 | |
|
579 | 0 | for (i = dpif->uc_array_size; i < new_size; i++) { |
580 | 0 | dpif->channels[i].sock = NULL; |
581 | 0 | } |
582 | |
|
583 | 0 | for (i = 0; i < dpif->n_handlers; i++) { |
584 | 0 | struct dpif_handler *handler = &dpif->handlers[i]; |
585 | |
|
586 | 0 | handler->epoll_events = xrealloc(handler->epoll_events, |
587 | 0 | new_size * sizeof *handler->epoll_events); |
588 | |
|
589 | 0 | } |
590 | 0 | dpif->uc_array_size = new_size; |
591 | 0 | } |
592 | | |
593 | 0 | vport_del_channels(dpif, port_no); |
594 | |
|
595 | 0 | memset(&event, 0, sizeof event); |
596 | 0 | event.events = EPOLLIN | EPOLLEXCLUSIVE; |
597 | 0 | event.data.u32 = port_idx; |
598 | |
|
599 | 0 | for (i = 0; i < dpif->n_handlers; i++) { |
600 | 0 | struct dpif_handler *handler = &dpif->handlers[i]; |
601 | |
|
602 | 0 | #ifndef _WIN32 |
603 | 0 | if (epoll_ctl(handler->epoll_fd, EPOLL_CTL_ADD, nl_sock_fd(sock), |
604 | 0 | &event) < 0) { |
605 | 0 | error = errno; |
606 | 0 | goto error; |
607 | 0 | } |
608 | 0 | #endif |
609 | 0 | } |
610 | 0 | dpif->channels[port_idx].sock = sock; |
611 | 0 | dpif->channels[port_idx].last_poll = LLONG_MIN; |
612 | |
|
613 | 0 | return 0; |
614 | | |
615 | 0 | error: |
616 | 0 | #ifndef _WIN32 |
617 | 0 | while (i--) { |
618 | 0 | epoll_ctl(dpif->handlers[i].epoll_fd, EPOLL_CTL_DEL, |
619 | 0 | nl_sock_fd(sock), NULL); |
620 | 0 | } |
621 | 0 | #endif |
622 | 0 | dpif->channels[port_idx].sock = NULL; |
623 | |
|
624 | 0 | return error; |
625 | 0 | } |
626 | | |
627 | | static void |
628 | | vport_del_channels(struct dpif_netlink *dpif, odp_port_t port_no) |
629 | 0 | { |
630 | 0 | uint32_t port_idx = odp_to_u32(port_no); |
631 | 0 | size_t i; |
632 | |
|
633 | 0 | if (!dpif->handlers || port_idx >= dpif->uc_array_size |
634 | 0 | || !dpif->channels[port_idx].sock) { |
635 | 0 | return; |
636 | 0 | } |
637 | | |
638 | 0 | for (i = 0; i < dpif->n_handlers; i++) { |
639 | 0 | struct dpif_handler *handler = &dpif->handlers[i]; |
640 | 0 | #ifndef _WIN32 |
641 | 0 | epoll_ctl(handler->epoll_fd, EPOLL_CTL_DEL, |
642 | 0 | nl_sock_fd(dpif->channels[port_idx].sock), NULL); |
643 | 0 | #endif |
644 | 0 | handler->event_offset = handler->n_events = 0; |
645 | 0 | } |
646 | 0 | #ifndef _WIN32 |
647 | 0 | nl_sock_destroy(dpif->channels[port_idx].sock); |
648 | 0 | #endif |
649 | 0 | dpif->channels[port_idx].sock = NULL; |
650 | 0 | } |
651 | | |
652 | | static void |
653 | | destroy_all_channels(struct dpif_netlink *dpif) |
654 | | OVS_REQ_WRLOCK(dpif->upcall_lock) |
655 | 0 | { |
656 | 0 | unsigned int i; |
657 | |
|
658 | 0 | if (!dpif->handlers) { |
659 | 0 | return; |
660 | 0 | } |
661 | | |
662 | 0 | for (i = 0; i < dpif->uc_array_size; i++ ) { |
663 | 0 | struct dpif_netlink_vport vport_request; |
664 | 0 | uint32_t upcall_pids = 0; |
665 | |
|
666 | 0 | if (!dpif->channels[i].sock) { |
667 | 0 | continue; |
668 | 0 | } |
669 | | |
670 | | /* Turn off upcalls. */ |
671 | 0 | dpif_netlink_vport_init(&vport_request); |
672 | 0 | vport_request.cmd = OVS_VPORT_CMD_SET; |
673 | 0 | vport_request.dp_ifindex = dpif->dp_ifindex; |
674 | 0 | vport_request.port_no = u32_to_odp(i); |
675 | 0 | vport_request.n_upcall_pids = 1; |
676 | 0 | vport_request.upcall_pids = &upcall_pids; |
677 | 0 | dpif_netlink_vport_transact(&vport_request, NULL, NULL); |
678 | |
|
679 | 0 | vport_del_channels(dpif, u32_to_odp(i)); |
680 | 0 | } |
681 | |
|
682 | 0 | for (i = 0; i < dpif->n_handlers; i++) { |
683 | 0 | struct dpif_handler *handler = &dpif->handlers[i]; |
684 | |
|
685 | 0 | dpif_netlink_handler_uninit(handler); |
686 | 0 | free(handler->epoll_events); |
687 | 0 | } |
688 | 0 | free(dpif->channels); |
689 | 0 | free(dpif->handlers); |
690 | 0 | dpif->handlers = NULL; |
691 | 0 | dpif->channels = NULL; |
692 | 0 | dpif->n_handlers = 0; |
693 | 0 | dpif->uc_array_size = 0; |
694 | 0 | } |
695 | | |
696 | | static void |
697 | | destroy_all_handlers(struct dpif_netlink *dpif) |
698 | | OVS_REQ_WRLOCK(dpif->upcall_lock) |
699 | 0 | { |
700 | 0 | int i = 0; |
701 | |
|
702 | 0 | if (!dpif->handlers) { |
703 | 0 | return; |
704 | 0 | } |
705 | 0 | for (i = 0; i < dpif->n_handlers; i++) { |
706 | 0 | struct dpif_handler *handler = &dpif->handlers[i]; |
707 | 0 | close_nl_sock(handler->sock); |
708 | 0 | } |
709 | 0 | free(dpif->handlers); |
710 | 0 | dpif->handlers = NULL; |
711 | 0 | dpif->n_handlers = 0; |
712 | 0 | } |
713 | | |
714 | | static void |
715 | | dpif_netlink_close(struct dpif *dpif_) |
716 | 0 | { |
717 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
718 | |
|
719 | 0 | nl_sock_destroy(dpif->port_notifier); |
720 | |
|
721 | 0 | fat_rwlock_wrlock(&dpif->upcall_lock); |
722 | 0 | if (dpif_netlink_upcall_per_cpu(dpif)) { |
723 | 0 | destroy_all_handlers(dpif); |
724 | 0 | } else { |
725 | 0 | destroy_all_channels(dpif); |
726 | 0 | } |
727 | 0 | fat_rwlock_unlock(&dpif->upcall_lock); |
728 | |
|
729 | 0 | fat_rwlock_destroy(&dpif->upcall_lock); |
730 | 0 | free(dpif); |
731 | 0 | } |
732 | | |
733 | | static int |
734 | | dpif_netlink_destroy(struct dpif *dpif_) |
735 | 0 | { |
736 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
737 | 0 | struct dpif_netlink_dp dp; |
738 | |
|
739 | 0 | dpif_netlink_dp_init(&dp); |
740 | 0 | dp.cmd = OVS_DP_CMD_DEL; |
741 | 0 | dp.dp_ifindex = dpif->dp_ifindex; |
742 | 0 | return dpif_netlink_dp_transact(&dp, NULL, NULL); |
743 | 0 | } |
744 | | |
745 | | static bool |
746 | | dpif_netlink_run(struct dpif *dpif_) |
747 | 0 | { |
748 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
749 | |
|
750 | 0 | if (!dpif_netlink_upcall_per_cpu(dpif)) { |
751 | 0 | if (dpif->refresh_channels) { |
752 | 0 | dpif->refresh_channels = false; |
753 | 0 | fat_rwlock_wrlock(&dpif->upcall_lock); |
754 | 0 | dpif_netlink_refresh_handlers_vport_dispatch(dpif, |
755 | 0 | dpif->n_handlers); |
756 | 0 | fat_rwlock_unlock(&dpif->upcall_lock); |
757 | 0 | } |
758 | 0 | } |
759 | 0 | return false; |
760 | 0 | } |
761 | | |
762 | | static int |
763 | | dpif_netlink_get_stats(const struct dpif *dpif_, struct dpif_dp_stats *stats) |
764 | 0 | { |
765 | 0 | struct dpif_netlink_dp dp; |
766 | 0 | struct ofpbuf *buf; |
767 | 0 | int error; |
768 | |
|
769 | 0 | error = dpif_netlink_dp_get(dpif_, &dp, &buf); |
770 | 0 | if (!error) { |
771 | 0 | memset(stats, 0, sizeof *stats); |
772 | |
|
773 | 0 | if (dp.stats) { |
774 | 0 | stats->n_hit = get_32aligned_u64(&dp.stats->n_hit); |
775 | 0 | stats->n_missed = get_32aligned_u64(&dp.stats->n_missed); |
776 | 0 | stats->n_lost = get_32aligned_u64(&dp.stats->n_lost); |
777 | 0 | stats->n_flows = get_32aligned_u64(&dp.stats->n_flows); |
778 | 0 | } |
779 | |
|
780 | 0 | if (dp.megaflow_stats) { |
781 | 0 | stats->n_masks = dp.megaflow_stats->n_masks; |
782 | 0 | stats->n_mask_hit = get_32aligned_u64( |
783 | 0 | &dp.megaflow_stats->n_mask_hit); |
784 | 0 | stats->n_cache_hit = get_32aligned_u64( |
785 | 0 | &dp.megaflow_stats->n_cache_hit); |
786 | |
|
787 | 0 | if (!stats->n_cache_hit) { |
788 | | /* Old kernels don't use this field and always |
789 | | * report zero instead. Disable this stat. */ |
790 | 0 | stats->n_cache_hit = UINT64_MAX; |
791 | 0 | } |
792 | 0 | } else { |
793 | 0 | stats->n_masks = UINT32_MAX; |
794 | 0 | stats->n_mask_hit = UINT64_MAX; |
795 | 0 | stats->n_cache_hit = UINT64_MAX; |
796 | 0 | } |
797 | 0 | ofpbuf_delete(buf); |
798 | 0 | } |
799 | 0 | return error; |
800 | 0 | } |
801 | | |
802 | | static int |
803 | | dpif_netlink_set_handler_pids(struct dpif *dpif_, const uint32_t *upcall_pids, |
804 | | uint32_t n_upcall_pids) |
805 | 0 | { |
806 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
807 | 0 | int largest_cpu_id = ovs_numa_get_largest_core_id(); |
808 | 0 | struct dpif_netlink_dp request, reply; |
809 | 0 | struct ofpbuf *bufp; |
810 | |
|
811 | 0 | uint32_t *corrected; |
812 | 0 | int error, i, n_cores; |
813 | |
|
814 | 0 | if (largest_cpu_id == OVS_NUMA_UNSPEC) { |
815 | 0 | largest_cpu_id = -1; |
816 | 0 | } |
817 | | |
818 | | /* Some systems have non-continuous cpu core ids. count_total_cores() |
819 | | * would return an accurate number, however, this number cannot be used. |
820 | | * e.g. If the largest core_id of a system is cpu9, but the system only |
821 | | * has 4 cpus then the OVS kernel module would throw a "CPU mismatch" |
822 | | * warning. With the MAX() in place in this example we send an array of |
823 | | * size 10 and prevent the warning. This has no bearing on the number of |
824 | | * threads created. |
825 | | */ |
826 | 0 | n_cores = MAX(count_total_cores(), largest_cpu_id + 1); |
827 | 0 | VLOG_DBG("Dispatch mode(per-cpu): Setting up handler PIDs for %d cores", |
828 | 0 | n_cores); |
829 | |
|
830 | 0 | dpif_netlink_dp_init(&request); |
831 | 0 | request.cmd = OVS_DP_CMD_SET; |
832 | 0 | request.name = dpif_->base_name; |
833 | 0 | request.dp_ifindex = dpif->dp_ifindex; |
834 | 0 | request.user_features = dpif->user_features | |
835 | 0 | OVS_DP_F_DISPATCH_UPCALL_PER_CPU; |
836 | |
|
837 | 0 | corrected = xcalloc(n_cores, sizeof *corrected); |
838 | |
|
839 | 0 | for (i = 0; i < n_cores; i++) { |
840 | 0 | corrected[i] = upcall_pids[i % n_upcall_pids]; |
841 | 0 | } |
842 | 0 | request.upcall_pids = corrected; |
843 | 0 | request.n_upcall_pids = n_cores; |
844 | |
|
845 | 0 | error = dpif_netlink_dp_transact(&request, &reply, &bufp); |
846 | 0 | if (!error) { |
847 | 0 | dpif->user_features = reply.user_features; |
848 | 0 | ofpbuf_delete(bufp); |
849 | 0 | if (!dpif_netlink_upcall_per_cpu(dpif)) { |
850 | 0 | error = -EOPNOTSUPP; |
851 | 0 | } |
852 | 0 | } |
853 | 0 | free(corrected); |
854 | 0 | return error; |
855 | 0 | } |
856 | | |
857 | | static int |
858 | | dpif_netlink_set_features(struct dpif *dpif_, uint32_t new_features) |
859 | 0 | { |
860 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
861 | 0 | struct dpif_netlink_dp request, reply; |
862 | 0 | struct ofpbuf *bufp; |
863 | 0 | int error; |
864 | |
|
865 | 0 | dpif_netlink_dp_init(&request); |
866 | 0 | request.cmd = OVS_DP_CMD_SET; |
867 | 0 | request.name = dpif_->base_name; |
868 | 0 | request.dp_ifindex = dpif->dp_ifindex; |
869 | 0 | request.user_features = dpif->user_features | new_features; |
870 | |
|
871 | 0 | error = dpif_netlink_dp_transact(&request, &reply, &bufp); |
872 | 0 | if (!error) { |
873 | 0 | dpif->user_features = reply.user_features; |
874 | 0 | ofpbuf_delete(bufp); |
875 | 0 | if (!(dpif->user_features & new_features)) { |
876 | 0 | return -EOPNOTSUPP; |
877 | 0 | } |
878 | 0 | } |
879 | | |
880 | 0 | return error; |
881 | 0 | } |
882 | | |
883 | | static const char * |
884 | | get_vport_type(const struct dpif_netlink_vport *vport) |
885 | 0 | { |
886 | 0 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20); |
887 | |
|
888 | 0 | switch (vport->type) { |
889 | 0 | case OVS_VPORT_TYPE_NETDEV: { |
890 | 0 | const char *type = netdev_get_type_from_name(vport->name); |
891 | |
|
892 | 0 | return type ? type : "system"; |
893 | 0 | } |
894 | | |
895 | 0 | case OVS_VPORT_TYPE_INTERNAL: |
896 | 0 | return "internal"; |
897 | | |
898 | 0 | case OVS_VPORT_TYPE_GENEVE: |
899 | 0 | return "geneve"; |
900 | | |
901 | 0 | case OVS_VPORT_TYPE_GRE: |
902 | 0 | return "gre"; |
903 | | |
904 | 0 | case OVS_VPORT_TYPE_VXLAN: |
905 | 0 | return "vxlan"; |
906 | | |
907 | 0 | case OVS_VPORT_TYPE_ERSPAN: |
908 | 0 | return "erspan"; |
909 | | |
910 | 0 | case OVS_VPORT_TYPE_IP6ERSPAN: |
911 | 0 | return "ip6erspan"; |
912 | | |
913 | 0 | case OVS_VPORT_TYPE_IP6GRE: |
914 | 0 | return "ip6gre"; |
915 | | |
916 | 0 | case OVS_VPORT_TYPE_GTPU: |
917 | 0 | return "gtpu"; |
918 | | |
919 | 0 | case OVS_VPORT_TYPE_SRV6: |
920 | 0 | return "srv6"; |
921 | | |
922 | 0 | case OVS_VPORT_TYPE_BAREUDP: |
923 | 0 | return "bareudp"; |
924 | | |
925 | 0 | case OVS_VPORT_TYPE_UNSPEC: |
926 | 0 | case __OVS_VPORT_TYPE_MAX: |
927 | 0 | break; |
928 | 0 | } |
929 | | |
930 | 0 | VLOG_WARN_RL(&rl, "dp%d: port `%s' has unsupported type %u", |
931 | 0 | vport->dp_ifindex, vport->name, (unsigned int) vport->type); |
932 | 0 | return "unknown"; |
933 | 0 | } |
934 | | |
935 | | enum ovs_vport_type |
936 | | netdev_to_ovs_vport_type(const char *type) |
937 | 0 | { |
938 | 0 | if (!strcmp(type, "tap") || !strcmp(type, "system")) { |
939 | 0 | return OVS_VPORT_TYPE_NETDEV; |
940 | 0 | } else if (!strcmp(type, "internal")) { |
941 | 0 | return OVS_VPORT_TYPE_INTERNAL; |
942 | 0 | } else if (!strcmp(type, "geneve")) { |
943 | 0 | return OVS_VPORT_TYPE_GENEVE; |
944 | 0 | } else if (!strcmp(type, "vxlan")) { |
945 | 0 | return OVS_VPORT_TYPE_VXLAN; |
946 | 0 | } else if (!strcmp(type, "erspan")) { |
947 | 0 | return OVS_VPORT_TYPE_ERSPAN; |
948 | 0 | } else if (!strcmp(type, "ip6erspan")) { |
949 | 0 | return OVS_VPORT_TYPE_IP6ERSPAN; |
950 | 0 | } else if (!strcmp(type, "ip6gre")) { |
951 | 0 | return OVS_VPORT_TYPE_IP6GRE; |
952 | 0 | } else if (!strcmp(type, "gre")) { |
953 | 0 | return OVS_VPORT_TYPE_GRE; |
954 | 0 | } else if (!strcmp(type, "gtpu")) { |
955 | 0 | return OVS_VPORT_TYPE_GTPU; |
956 | 0 | } else if (!strcmp(type, "srv6")) { |
957 | 0 | return OVS_VPORT_TYPE_SRV6; |
958 | 0 | } else if (!strcmp(type, "bareudp")) { |
959 | 0 | return OVS_VPORT_TYPE_BAREUDP; |
960 | 0 | } else { |
961 | 0 | return OVS_VPORT_TYPE_UNSPEC; |
962 | 0 | } |
963 | 0 | } |
964 | | |
965 | | static int |
966 | | dpif_netlink_port_add__(struct dpif_netlink *dpif, const char *name, |
967 | | enum ovs_vport_type type, |
968 | | struct ofpbuf *options, |
969 | | odp_port_t *port_nop) |
970 | | OVS_REQ_WRLOCK(dpif->upcall_lock) |
971 | 0 | { |
972 | 0 | struct dpif_netlink_vport request, reply; |
973 | 0 | struct ofpbuf *buf; |
974 | 0 | struct nl_sock *sock = NULL; |
975 | 0 | uint32_t upcall_pids = 0; |
976 | 0 | int error = 0; |
977 | | |
978 | | /* per-cpu dispatch mode does not require a socket per vport. */ |
979 | 0 | if (!dpif_netlink_upcall_per_cpu(dpif)) { |
980 | 0 | if (dpif->handlers) { |
981 | 0 | error = create_nl_sock(dpif, &sock); |
982 | 0 | if (error) { |
983 | 0 | return error; |
984 | 0 | } |
985 | 0 | } |
986 | 0 | if (sock) { |
987 | 0 | upcall_pids = nl_sock_pid(sock); |
988 | 0 | } |
989 | 0 | } |
990 | | |
991 | 0 | dpif_netlink_vport_init(&request); |
992 | 0 | request.cmd = OVS_VPORT_CMD_NEW; |
993 | 0 | request.dp_ifindex = dpif->dp_ifindex; |
994 | 0 | request.type = type; |
995 | 0 | request.name = name; |
996 | |
|
997 | 0 | request.port_no = *port_nop; |
998 | 0 | request.n_upcall_pids = 1; |
999 | 0 | request.upcall_pids = &upcall_pids; |
1000 | |
|
1001 | 0 | if (options) { |
1002 | 0 | request.options = options->data; |
1003 | 0 | request.options_len = options->size; |
1004 | 0 | } |
1005 | |
|
1006 | 0 | error = dpif_netlink_vport_transact(&request, &reply, &buf); |
1007 | 0 | if (!error) { |
1008 | 0 | *port_nop = reply.port_no; |
1009 | 0 | } else { |
1010 | 0 | if (error == EBUSY && *port_nop != ODPP_NONE) { |
1011 | 0 | VLOG_INFO("%s: requested port %"PRIu32" is in use", |
1012 | 0 | dpif_name(&dpif->dpif), *port_nop); |
1013 | 0 | } |
1014 | |
|
1015 | 0 | close_nl_sock(sock); |
1016 | 0 | goto exit; |
1017 | 0 | } |
1018 | | |
1019 | 0 | if (!dpif_netlink_upcall_per_cpu(dpif)) { |
1020 | 0 | error = vport_add_channel(dpif, *port_nop, sock); |
1021 | 0 | if (error) { |
1022 | 0 | VLOG_INFO("%s: could not add channel for port %s", |
1023 | 0 | dpif_name(&dpif->dpif), name); |
1024 | | |
1025 | | /* Delete the port. */ |
1026 | 0 | dpif_netlink_vport_init(&request); |
1027 | 0 | request.cmd = OVS_VPORT_CMD_DEL; |
1028 | 0 | request.dp_ifindex = dpif->dp_ifindex; |
1029 | 0 | request.port_no = *port_nop; |
1030 | 0 | dpif_netlink_vport_transact(&request, NULL, NULL); |
1031 | 0 | close_nl_sock(sock); |
1032 | 0 | goto exit; |
1033 | 0 | } |
1034 | 0 | } |
1035 | | |
1036 | 0 | exit: |
1037 | 0 | ofpbuf_delete(buf); |
1038 | |
|
1039 | 0 | return error; |
1040 | 0 | } |
1041 | | |
1042 | | static int |
1043 | | dpif_netlink_port_add_compat(struct dpif_netlink *dpif, struct netdev *netdev, |
1044 | | odp_port_t *port_nop) |
1045 | | OVS_REQ_WRLOCK(dpif->upcall_lock) |
1046 | 0 | { |
1047 | 0 | const struct netdev_tunnel_config *tnl_cfg; |
1048 | 0 | char namebuf[NETDEV_VPORT_NAME_BUFSIZE]; |
1049 | 0 | const char *type = netdev_get_type(netdev); |
1050 | 0 | uint64_t options_stub[64 / 8]; |
1051 | 0 | enum ovs_vport_type ovs_type; |
1052 | 0 | struct ofpbuf options; |
1053 | 0 | const char *name; |
1054 | |
|
1055 | 0 | name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf); |
1056 | |
|
1057 | 0 | ovs_type = netdev_to_ovs_vport_type(netdev_get_type(netdev)); |
1058 | 0 | if (ovs_type == OVS_VPORT_TYPE_UNSPEC) { |
1059 | 0 | VLOG_WARN_RL(&error_rl, "%s: cannot create port `%s' because it has " |
1060 | 0 | "unsupported type `%s'", |
1061 | 0 | dpif_name(&dpif->dpif), name, type); |
1062 | 0 | return EINVAL; |
1063 | 0 | } |
1064 | | |
1065 | 0 | if (ovs_type == OVS_VPORT_TYPE_NETDEV) { |
1066 | | #ifdef _WIN32 |
1067 | | /* XXX : Map appropiate Windows handle */ |
1068 | | #else |
1069 | 0 | netdev_linux_ethtool_set_flag(netdev, ETH_FLAG_LRO, "LRO", false); |
1070 | 0 | #endif |
1071 | 0 | } |
1072 | |
|
1073 | | #ifdef _WIN32 |
1074 | | if (ovs_type == OVS_VPORT_TYPE_INTERNAL) { |
1075 | | if (!create_wmi_port(name)){ |
1076 | | VLOG_ERR("Could not create wmi internal port with name:%s", name); |
1077 | | return EINVAL; |
1078 | | }; |
1079 | | } |
1080 | | #endif |
1081 | |
|
1082 | 0 | tnl_cfg = netdev_get_tunnel_config(netdev); |
1083 | 0 | if (tnl_cfg && (tnl_cfg->dst_port != 0 || tnl_cfg->exts)) { |
1084 | 0 | ofpbuf_use_stack(&options, options_stub, sizeof options_stub); |
1085 | 0 | if (tnl_cfg->dst_port) { |
1086 | 0 | nl_msg_put_u16(&options, OVS_TUNNEL_ATTR_DST_PORT, |
1087 | 0 | ntohs(tnl_cfg->dst_port)); |
1088 | 0 | } |
1089 | 0 | if (tnl_cfg->exts) { |
1090 | 0 | size_t ext_ofs; |
1091 | 0 | int i; |
1092 | |
|
1093 | 0 | ext_ofs = nl_msg_start_nested(&options, OVS_TUNNEL_ATTR_EXTENSION); |
1094 | 0 | for (i = 0; i < 32; i++) { |
1095 | 0 | if (tnl_cfg->exts & (UINT32_C(1) << i)) { |
1096 | 0 | nl_msg_put_flag(&options, i); |
1097 | 0 | } |
1098 | 0 | } |
1099 | 0 | nl_msg_end_nested(&options, ext_ofs); |
1100 | 0 | } |
1101 | 0 | return dpif_netlink_port_add__(dpif, name, ovs_type, &options, |
1102 | 0 | port_nop); |
1103 | 0 | } else { |
1104 | 0 | return dpif_netlink_port_add__(dpif, name, ovs_type, NULL, port_nop); |
1105 | 0 | } |
1106 | |
|
1107 | 0 | } |
1108 | | |
1109 | | static int |
1110 | | dpif_netlink_rtnl_port_create_and_add(struct dpif_netlink *dpif, |
1111 | | struct netdev *netdev, |
1112 | | odp_port_t *port_nop) |
1113 | | OVS_REQ_WRLOCK(dpif->upcall_lock) |
1114 | 0 | { |
1115 | 0 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20); |
1116 | 0 | char namebuf[NETDEV_VPORT_NAME_BUFSIZE]; |
1117 | 0 | const char *name; |
1118 | 0 | int error; |
1119 | |
|
1120 | 0 | error = dpif_netlink_rtnl_port_create(netdev); |
1121 | 0 | if (error) { |
1122 | 0 | if (error != EOPNOTSUPP) { |
1123 | 0 | VLOG_WARN_RL(&rl, "Failed to create %s with rtnetlink: %s", |
1124 | 0 | netdev_get_name(netdev), ovs_strerror(error)); |
1125 | 0 | } |
1126 | 0 | return error; |
1127 | 0 | } |
1128 | | |
1129 | 0 | name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf); |
1130 | 0 | error = dpif_netlink_port_add__(dpif, name, OVS_VPORT_TYPE_NETDEV, NULL, |
1131 | 0 | port_nop); |
1132 | 0 | if (error) { |
1133 | 0 | dpif_netlink_rtnl_port_destroy(name, netdev_get_type(netdev)); |
1134 | 0 | } |
1135 | 0 | return error; |
1136 | 0 | } |
1137 | | |
1138 | | static int |
1139 | | dpif_netlink_port_add(struct dpif *dpif_, struct netdev *netdev, |
1140 | | odp_port_t *port_nop) |
1141 | 0 | { |
1142 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
1143 | 0 | int error = EOPNOTSUPP; |
1144 | |
|
1145 | 0 | fat_rwlock_wrlock(&dpif->upcall_lock); |
1146 | 0 | if (!ovs_tunnels_out_of_tree) { |
1147 | 0 | error = dpif_netlink_rtnl_port_create_and_add(dpif, netdev, port_nop); |
1148 | 0 | } |
1149 | 0 | if (error) { |
1150 | 0 | error = dpif_netlink_port_add_compat(dpif, netdev, port_nop); |
1151 | 0 | } |
1152 | 0 | fat_rwlock_unlock(&dpif->upcall_lock); |
1153 | |
|
1154 | 0 | return error; |
1155 | 0 | } |
1156 | | |
1157 | | static int |
1158 | | dpif_netlink_port_del__(struct dpif_netlink *dpif, odp_port_t port_no) |
1159 | | OVS_REQ_WRLOCK(dpif->upcall_lock) |
1160 | 0 | { |
1161 | 0 | struct dpif_netlink_vport vport; |
1162 | 0 | struct dpif_port dpif_port; |
1163 | 0 | int error; |
1164 | |
|
1165 | 0 | error = dpif_netlink_port_query__(dpif, port_no, NULL, &dpif_port); |
1166 | 0 | if (error) { |
1167 | 0 | return error; |
1168 | 0 | } |
1169 | | |
1170 | 0 | dpif_netlink_vport_init(&vport); |
1171 | 0 | vport.cmd = OVS_VPORT_CMD_DEL; |
1172 | 0 | vport.dp_ifindex = dpif->dp_ifindex; |
1173 | 0 | vport.port_no = port_no; |
1174 | | #ifdef _WIN32 |
1175 | | if (!strcmp(dpif_port.type, "internal")) { |
1176 | | if (!delete_wmi_port(dpif_port.name)) { |
1177 | | VLOG_ERR("Could not delete wmi port with name: %s", |
1178 | | dpif_port.name); |
1179 | | }; |
1180 | | } |
1181 | | #endif |
1182 | 0 | error = dpif_netlink_vport_transact(&vport, NULL, NULL); |
1183 | |
|
1184 | 0 | vport_del_channels(dpif, port_no); |
1185 | |
|
1186 | 0 | if (!error && !ovs_tunnels_out_of_tree) { |
1187 | 0 | error = dpif_netlink_rtnl_port_destroy(dpif_port.name, dpif_port.type); |
1188 | 0 | if (error == EOPNOTSUPP) { |
1189 | 0 | error = 0; |
1190 | 0 | } |
1191 | 0 | } |
1192 | |
|
1193 | 0 | dpif_port_destroy(&dpif_port); |
1194 | |
|
1195 | 0 | return error; |
1196 | 0 | } |
1197 | | |
1198 | | static int |
1199 | | dpif_netlink_port_del(struct dpif *dpif_, odp_port_t port_no) |
1200 | 0 | { |
1201 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
1202 | 0 | int error; |
1203 | |
|
1204 | 0 | fat_rwlock_wrlock(&dpif->upcall_lock); |
1205 | 0 | error = dpif_netlink_port_del__(dpif, port_no); |
1206 | 0 | fat_rwlock_unlock(&dpif->upcall_lock); |
1207 | |
|
1208 | 0 | return error; |
1209 | 0 | } |
1210 | | |
1211 | | static int |
1212 | | dpif_netlink_port_query__(const struct dpif_netlink *dpif, odp_port_t port_no, |
1213 | | const char *port_name, struct dpif_port *dpif_port) |
1214 | 0 | { |
1215 | 0 | struct dpif_netlink_vport request; |
1216 | 0 | struct dpif_netlink_vport reply; |
1217 | 0 | struct ofpbuf *buf; |
1218 | 0 | int error; |
1219 | |
|
1220 | 0 | dpif_netlink_vport_init(&request); |
1221 | 0 | request.cmd = OVS_VPORT_CMD_GET; |
1222 | 0 | request.dp_ifindex = dpif->dp_ifindex; |
1223 | 0 | request.port_no = port_no; |
1224 | 0 | request.name = port_name; |
1225 | |
|
1226 | 0 | error = dpif_netlink_vport_transact(&request, &reply, &buf); |
1227 | 0 | if (!error) { |
1228 | 0 | if (reply.dp_ifindex != request.dp_ifindex) { |
1229 | | /* A query by name reported that 'port_name' is in some datapath |
1230 | | * other than 'dpif', but the caller wants to know about 'dpif'. */ |
1231 | 0 | error = ENODEV; |
1232 | 0 | } else if (dpif_port) { |
1233 | 0 | dpif_port->name = xstrdup(reply.name); |
1234 | 0 | dpif_port->type = xstrdup(get_vport_type(&reply)); |
1235 | 0 | dpif_port->port_no = reply.port_no; |
1236 | 0 | } |
1237 | 0 | ofpbuf_delete(buf); |
1238 | 0 | } |
1239 | 0 | return error; |
1240 | 0 | } |
1241 | | |
1242 | | static int |
1243 | | dpif_netlink_port_query_by_number(const struct dpif *dpif_, odp_port_t port_no, |
1244 | | struct dpif_port *dpif_port) |
1245 | 0 | { |
1246 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
1247 | |
|
1248 | 0 | return dpif_netlink_port_query__(dpif, port_no, NULL, dpif_port); |
1249 | 0 | } |
1250 | | |
1251 | | static int |
1252 | | dpif_netlink_port_query_by_name(const struct dpif *dpif_, const char *devname, |
1253 | | struct dpif_port *dpif_port) |
1254 | 0 | { |
1255 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
1256 | |
|
1257 | 0 | return dpif_netlink_port_query__(dpif, 0, devname, dpif_port); |
1258 | 0 | } |
1259 | | |
1260 | | static uint32_t |
1261 | | dpif_netlink_port_get_pid__(const struct dpif_netlink *dpif, |
1262 | | odp_port_t port_no) |
1263 | | OVS_REQ_RDLOCK(dpif->upcall_lock) |
1264 | 0 | { |
1265 | 0 | uint32_t port_idx = odp_to_u32(port_no); |
1266 | 0 | uint32_t pid = 0; |
1267 | |
|
1268 | 0 | if (dpif->handlers && dpif->uc_array_size > 0) { |
1269 | | /* The ODPP_NONE "reserved" port number uses the "ovs-system"'s |
1270 | | * channel, since it is not heavily loaded. */ |
1271 | 0 | uint32_t idx = port_idx >= dpif->uc_array_size ? 0 : port_idx; |
1272 | | |
1273 | | /* Needs to check in case the socket pointer is changed in between |
1274 | | * the holding of upcall_lock. A known case happens when the main |
1275 | | * thread deletes the vport while the handler thread is handling |
1276 | | * the upcall from that port. */ |
1277 | 0 | if (dpif->channels[idx].sock) { |
1278 | 0 | pid = nl_sock_pid(dpif->channels[idx].sock); |
1279 | 0 | } |
1280 | 0 | } |
1281 | |
|
1282 | 0 | return pid; |
1283 | 0 | } |
1284 | | |
1285 | | static uint32_t |
1286 | | dpif_netlink_port_get_pid(const struct dpif *dpif_, odp_port_t port_no) |
1287 | 0 | { |
1288 | 0 | const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
1289 | 0 | uint32_t ret; |
1290 | | |
1291 | | /* In per-cpu dispatch mode, vports do not have an associated PID */ |
1292 | 0 | if (dpif_netlink_upcall_per_cpu(dpif)) { |
1293 | | /* In per-cpu dispatch mode, this will be ignored as kernel space will |
1294 | | * select the PID before sending to user space. We set to |
1295 | | * DPIF_NETLINK_PER_CPU_PID as 0 is rejected by kernel space as an |
1296 | | * invalid PID. |
1297 | | */ |
1298 | 0 | return DPIF_NETLINK_PER_CPU_PID; |
1299 | 0 | } |
1300 | | |
1301 | 0 | fat_rwlock_rdlock(&dpif->upcall_lock); |
1302 | 0 | ret = dpif_netlink_port_get_pid__(dpif, port_no); |
1303 | 0 | fat_rwlock_unlock(&dpif->upcall_lock); |
1304 | |
|
1305 | 0 | return ret; |
1306 | 0 | } |
1307 | | |
1308 | | static int |
1309 | | dpif_netlink_flow_flush(struct dpif *dpif_) |
1310 | 0 | { |
1311 | 0 | const char *dpif_type_str = dpif_normalize_type(dpif_type(dpif_)); |
1312 | 0 | const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
1313 | 0 | struct dpif_netlink_flow flow; |
1314 | |
|
1315 | 0 | dpif_netlink_flow_init(&flow); |
1316 | 0 | flow.cmd = OVS_FLOW_CMD_DEL; |
1317 | 0 | flow.dp_ifindex = dpif->dp_ifindex; |
1318 | |
|
1319 | 0 | if (netdev_is_flow_api_enabled()) { |
1320 | 0 | netdev_ports_flow_flush(dpif_type_str); |
1321 | 0 | } |
1322 | |
|
1323 | 0 | return dpif_netlink_flow_transact(&flow, NULL, NULL); |
1324 | 0 | } |
1325 | | |
1326 | | struct dpif_netlink_port_state { |
1327 | | struct nl_dump dump; |
1328 | | struct ofpbuf buf; |
1329 | | }; |
1330 | | |
1331 | | static void |
1332 | | dpif_netlink_port_dump_start__(const struct dpif_netlink *dpif, |
1333 | | struct nl_dump *dump) |
1334 | 0 | { |
1335 | 0 | struct dpif_netlink_vport request; |
1336 | 0 | struct ofpbuf *buf; |
1337 | |
|
1338 | 0 | dpif_netlink_vport_init(&request); |
1339 | 0 | request.cmd = OVS_VPORT_CMD_GET; |
1340 | 0 | request.dp_ifindex = dpif->dp_ifindex; |
1341 | |
|
1342 | 0 | buf = ofpbuf_new(1024); |
1343 | 0 | dpif_netlink_vport_to_ofpbuf(&request, buf); |
1344 | 0 | nl_dump_start(dump, NETLINK_GENERIC, buf); |
1345 | 0 | ofpbuf_delete(buf); |
1346 | 0 | } |
1347 | | |
1348 | | static int |
1349 | | dpif_netlink_port_dump_start(const struct dpif *dpif_, void **statep) |
1350 | 0 | { |
1351 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
1352 | 0 | struct dpif_netlink_port_state *state; |
1353 | |
|
1354 | 0 | *statep = state = xmalloc(sizeof *state); |
1355 | 0 | dpif_netlink_port_dump_start__(dpif, &state->dump); |
1356 | |
|
1357 | 0 | ofpbuf_init(&state->buf, NL_DUMP_BUFSIZE); |
1358 | 0 | return 0; |
1359 | 0 | } |
1360 | | |
1361 | | static int |
1362 | | dpif_netlink_port_dump_next__(const struct dpif_netlink *dpif, |
1363 | | struct nl_dump *dump, |
1364 | | struct dpif_netlink_vport *vport, |
1365 | | struct ofpbuf *buffer) |
1366 | 0 | { |
1367 | 0 | struct ofpbuf buf; |
1368 | 0 | int error; |
1369 | |
|
1370 | 0 | if (!nl_dump_next(dump, &buf, buffer)) { |
1371 | 0 | return EOF; |
1372 | 0 | } |
1373 | | |
1374 | 0 | error = dpif_netlink_vport_from_ofpbuf(vport, &buf); |
1375 | 0 | if (error) { |
1376 | 0 | VLOG_WARN_RL(&error_rl, "%s: failed to parse vport record (%s)", |
1377 | 0 | dpif_name(&dpif->dpif), ovs_strerror(error)); |
1378 | 0 | } |
1379 | 0 | return error; |
1380 | 0 | } |
1381 | | |
1382 | | static int |
1383 | | dpif_netlink_port_dump_next(const struct dpif *dpif_, void *state_, |
1384 | | struct dpif_port *dpif_port) |
1385 | 0 | { |
1386 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
1387 | 0 | struct dpif_netlink_port_state *state = state_; |
1388 | 0 | struct dpif_netlink_vport vport; |
1389 | 0 | int error; |
1390 | |
|
1391 | 0 | error = dpif_netlink_port_dump_next__(dpif, &state->dump, &vport, |
1392 | 0 | &state->buf); |
1393 | 0 | if (error) { |
1394 | 0 | return error; |
1395 | 0 | } |
1396 | 0 | dpif_port->name = CONST_CAST(char *, vport.name); |
1397 | 0 | dpif_port->type = CONST_CAST(char *, get_vport_type(&vport)); |
1398 | 0 | dpif_port->port_no = vport.port_no; |
1399 | 0 | return 0; |
1400 | 0 | } |
1401 | | |
1402 | | static int |
1403 | | dpif_netlink_port_dump_done(const struct dpif *dpif_ OVS_UNUSED, void *state_) |
1404 | 0 | { |
1405 | 0 | struct dpif_netlink_port_state *state = state_; |
1406 | 0 | int error = nl_dump_done(&state->dump); |
1407 | |
|
1408 | 0 | ofpbuf_uninit(&state->buf); |
1409 | 0 | free(state); |
1410 | 0 | return error; |
1411 | 0 | } |
1412 | | |
1413 | | static int |
1414 | | dpif_netlink_port_poll(const struct dpif *dpif_, char **devnamep) |
1415 | 0 | { |
1416 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
1417 | | |
1418 | | /* Lazily create the Netlink socket to listen for notifications. */ |
1419 | 0 | if (!dpif->port_notifier) { |
1420 | 0 | struct nl_sock *sock; |
1421 | 0 | int error; |
1422 | |
|
1423 | 0 | error = nl_sock_create(NETLINK_GENERIC, &sock); |
1424 | 0 | if (error) { |
1425 | 0 | return error; |
1426 | 0 | } |
1427 | | |
1428 | 0 | error = nl_sock_join_mcgroup(sock, ovs_vport_mcgroup); |
1429 | 0 | if (error) { |
1430 | 0 | nl_sock_destroy(sock); |
1431 | 0 | return error; |
1432 | 0 | } |
1433 | 0 | dpif->port_notifier = sock; |
1434 | | |
1435 | | /* We have no idea of the current state so report that everything |
1436 | | * changed. */ |
1437 | 0 | return ENOBUFS; |
1438 | 0 | } |
1439 | | |
1440 | 0 | for (;;) { |
1441 | 0 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); |
1442 | 0 | uint64_t buf_stub[4096 / 8]; |
1443 | 0 | struct ofpbuf buf; |
1444 | 0 | int error; |
1445 | |
|
1446 | 0 | ofpbuf_use_stub(&buf, buf_stub, sizeof buf_stub); |
1447 | 0 | error = nl_sock_recv(dpif->port_notifier, &buf, NULL, false); |
1448 | 0 | if (!error) { |
1449 | 0 | struct dpif_netlink_vport vport; |
1450 | |
|
1451 | 0 | error = dpif_netlink_vport_from_ofpbuf(&vport, &buf); |
1452 | 0 | if (!error) { |
1453 | 0 | if (vport.dp_ifindex == dpif->dp_ifindex |
1454 | 0 | && (vport.cmd == OVS_VPORT_CMD_NEW |
1455 | 0 | || vport.cmd == OVS_VPORT_CMD_DEL |
1456 | 0 | || vport.cmd == OVS_VPORT_CMD_SET)) { |
1457 | 0 | VLOG_DBG("port_changed: dpif:%s vport:%s cmd:%"PRIu8, |
1458 | 0 | dpif->dpif.full_name, vport.name, vport.cmd); |
1459 | 0 | if (vport.cmd == OVS_VPORT_CMD_DEL && dpif->handlers) { |
1460 | 0 | dpif->refresh_channels = true; |
1461 | 0 | } |
1462 | 0 | *devnamep = xstrdup(vport.name); |
1463 | 0 | ofpbuf_uninit(&buf); |
1464 | 0 | return 0; |
1465 | 0 | } |
1466 | 0 | } |
1467 | 0 | } else if (error != EAGAIN) { |
1468 | 0 | VLOG_WARN_RL(&rl, "error reading or parsing netlink (%s)", |
1469 | 0 | ovs_strerror(error)); |
1470 | 0 | nl_sock_drain(dpif->port_notifier); |
1471 | 0 | error = ENOBUFS; |
1472 | 0 | } |
1473 | | |
1474 | 0 | ofpbuf_uninit(&buf); |
1475 | 0 | if (error) { |
1476 | 0 | return error; |
1477 | 0 | } |
1478 | 0 | } |
1479 | 0 | } |
1480 | | |
1481 | | static void |
1482 | | dpif_netlink_port_poll_wait(const struct dpif *dpif_) |
1483 | 0 | { |
1484 | 0 | const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
1485 | |
|
1486 | 0 | if (dpif->port_notifier) { |
1487 | 0 | nl_sock_wait(dpif->port_notifier, POLLIN); |
1488 | 0 | } else { |
1489 | 0 | poll_immediate_wake(); |
1490 | 0 | } |
1491 | 0 | } |
1492 | | |
1493 | | static void |
1494 | | dpif_netlink_flow_init_ufid(struct dpif_netlink_flow *request, |
1495 | | const ovs_u128 *ufid, bool terse) |
1496 | 0 | { |
1497 | 0 | if (ufid) { |
1498 | 0 | request->ufid = *ufid; |
1499 | 0 | request->ufid_present = true; |
1500 | 0 | } else { |
1501 | 0 | request->ufid_present = false; |
1502 | 0 | } |
1503 | 0 | request->ufid_terse = terse; |
1504 | 0 | } |
1505 | | |
1506 | | static void |
1507 | | dpif_netlink_init_flow_get__(const struct dpif_netlink *dpif, |
1508 | | const struct nlattr *key, size_t key_len, |
1509 | | const ovs_u128 *ufid, bool terse, |
1510 | | struct dpif_netlink_flow *request) |
1511 | 0 | { |
1512 | 0 | dpif_netlink_flow_init(request); |
1513 | 0 | request->cmd = OVS_FLOW_CMD_GET; |
1514 | 0 | request->dp_ifindex = dpif->dp_ifindex; |
1515 | 0 | request->key = key; |
1516 | 0 | request->key_len = key_len; |
1517 | 0 | dpif_netlink_flow_init_ufid(request, ufid, terse); |
1518 | 0 | } |
1519 | | |
1520 | | static void |
1521 | | dpif_netlink_init_flow_get(const struct dpif_netlink *dpif, |
1522 | | const struct dpif_flow_get *get, |
1523 | | struct dpif_netlink_flow *request) |
1524 | 0 | { |
1525 | 0 | dpif_netlink_init_flow_get__(dpif, get->key, get->key_len, get->ufid, |
1526 | 0 | false, request); |
1527 | 0 | } |
1528 | | |
1529 | | static int |
1530 | | dpif_netlink_flow_get__(const struct dpif_netlink *dpif, |
1531 | | const struct nlattr *key, size_t key_len, |
1532 | | const ovs_u128 *ufid, bool terse, |
1533 | | struct dpif_netlink_flow *reply, struct ofpbuf **bufp) |
1534 | 0 | { |
1535 | 0 | struct dpif_netlink_flow request; |
1536 | |
|
1537 | 0 | dpif_netlink_init_flow_get__(dpif, key, key_len, ufid, terse, &request); |
1538 | 0 | return dpif_netlink_flow_transact(&request, reply, bufp); |
1539 | 0 | } |
1540 | | |
1541 | | static int |
1542 | | dpif_netlink_flow_get(const struct dpif_netlink *dpif, |
1543 | | const struct dpif_netlink_flow *flow, |
1544 | | struct dpif_netlink_flow *reply, struct ofpbuf **bufp) |
1545 | 0 | { |
1546 | 0 | return dpif_netlink_flow_get__(dpif, flow->key, flow->key_len, |
1547 | 0 | flow->ufid_present ? &flow->ufid : NULL, |
1548 | 0 | false, reply, bufp); |
1549 | 0 | } |
1550 | | |
1551 | | static void |
1552 | | dpif_netlink_init_flow_put(struct dpif_netlink *dpif, |
1553 | | const struct dpif_flow_put *put, |
1554 | | struct dpif_netlink_flow *request) |
1555 | 0 | { |
1556 | 0 | static const struct nlattr dummy_action; |
1557 | |
|
1558 | 0 | dpif_netlink_flow_init(request); |
1559 | 0 | request->cmd = (put->flags & DPIF_FP_CREATE |
1560 | 0 | ? OVS_FLOW_CMD_NEW : OVS_FLOW_CMD_SET); |
1561 | 0 | request->dp_ifindex = dpif->dp_ifindex; |
1562 | 0 | request->key = put->key; |
1563 | 0 | request->key_len = put->key_len; |
1564 | 0 | request->mask = put->mask; |
1565 | 0 | request->mask_len = put->mask_len; |
1566 | 0 | dpif_netlink_flow_init_ufid(request, put->ufid, false); |
1567 | | |
1568 | | /* Ensure that OVS_FLOW_ATTR_ACTIONS will always be included. */ |
1569 | 0 | request->actions = (put->actions |
1570 | 0 | ? put->actions |
1571 | 0 | : CONST_CAST(struct nlattr *, &dummy_action)); |
1572 | 0 | request->actions_len = put->actions_len; |
1573 | 0 | if (put->flags & DPIF_FP_ZERO_STATS) { |
1574 | 0 | request->clear = true; |
1575 | 0 | } |
1576 | 0 | if (put->flags & DPIF_FP_PROBE) { |
1577 | 0 | request->probe = true; |
1578 | 0 | } |
1579 | 0 | request->nlmsg_flags = put->flags & DPIF_FP_MODIFY ? 0 : NLM_F_CREATE; |
1580 | 0 | } |
1581 | | |
1582 | | static void |
1583 | | dpif_netlink_init_flow_del__(struct dpif_netlink *dpif, |
1584 | | const struct nlattr *key, size_t key_len, |
1585 | | const ovs_u128 *ufid, bool terse, |
1586 | | struct dpif_netlink_flow *request) |
1587 | 0 | { |
1588 | 0 | dpif_netlink_flow_init(request); |
1589 | 0 | request->cmd = OVS_FLOW_CMD_DEL; |
1590 | 0 | request->dp_ifindex = dpif->dp_ifindex; |
1591 | 0 | request->key = key; |
1592 | 0 | request->key_len = key_len; |
1593 | 0 | dpif_netlink_flow_init_ufid(request, ufid, terse); |
1594 | 0 | } |
1595 | | |
1596 | | static void |
1597 | | dpif_netlink_init_flow_del(struct dpif_netlink *dpif, |
1598 | | const struct dpif_flow_del *del, |
1599 | | struct dpif_netlink_flow *request) |
1600 | 0 | { |
1601 | 0 | dpif_netlink_init_flow_del__(dpif, del->key, del->key_len, |
1602 | 0 | del->ufid, del->terse, request); |
1603 | 0 | } |
1604 | | |
1605 | | struct dpif_netlink_flow_dump { |
1606 | | struct dpif_flow_dump up; |
1607 | | struct nl_dump nl_dump; |
1608 | | atomic_int status; |
1609 | | struct netdev_flow_dump **netdev_dumps; |
1610 | | int netdev_dumps_num; /* Number of netdev_flow_dumps */ |
1611 | | struct ovs_mutex netdev_lock; /* Guards the following. */ |
1612 | | int netdev_current_dump OVS_GUARDED; /* Shared current dump */ |
1613 | | struct dpif_flow_dump_types types; /* Type of dump */ |
1614 | | }; |
1615 | | |
1616 | | static struct dpif_netlink_flow_dump * |
1617 | | dpif_netlink_flow_dump_cast(struct dpif_flow_dump *dump) |
1618 | 0 | { |
1619 | 0 | return CONTAINER_OF(dump, struct dpif_netlink_flow_dump, up); |
1620 | 0 | } |
1621 | | |
1622 | | static void |
1623 | | start_netdev_dump(const struct dpif *dpif_, |
1624 | | struct dpif_netlink_flow_dump *dump) |
1625 | 0 | { |
1626 | 0 | ovs_mutex_init(&dump->netdev_lock); |
1627 | |
|
1628 | 0 | if (!(dump->types.netdev_flows)) { |
1629 | 0 | dump->netdev_dumps_num = 0; |
1630 | 0 | dump->netdev_dumps = NULL; |
1631 | 0 | return; |
1632 | 0 | } |
1633 | | |
1634 | 0 | ovs_mutex_lock(&dump->netdev_lock); |
1635 | 0 | dump->netdev_current_dump = 0; |
1636 | 0 | dump->netdev_dumps |
1637 | 0 | = netdev_ports_flow_dump_create(dpif_normalize_type(dpif_type(dpif_)), |
1638 | 0 | &dump->netdev_dumps_num, |
1639 | 0 | dump->up.terse); |
1640 | 0 | ovs_mutex_unlock(&dump->netdev_lock); |
1641 | 0 | } |
1642 | | |
1643 | | static void |
1644 | | dpif_netlink_populate_flow_dump_types(struct dpif_netlink_flow_dump *dump, |
1645 | | struct dpif_flow_dump_types *types) |
1646 | 0 | { |
1647 | 0 | if (!types) { |
1648 | 0 | dump->types.ovs_flows = true; |
1649 | 0 | dump->types.netdev_flows = true; |
1650 | 0 | } else { |
1651 | 0 | memcpy(&dump->types, types, sizeof *types); |
1652 | 0 | } |
1653 | 0 | } |
1654 | | |
1655 | | static struct dpif_flow_dump * |
1656 | | dpif_netlink_flow_dump_create(const struct dpif *dpif_, bool terse, |
1657 | | struct dpif_flow_dump_types *types) |
1658 | 0 | { |
1659 | 0 | const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
1660 | 0 | struct dpif_netlink_flow_dump *dump; |
1661 | 0 | struct dpif_netlink_flow request; |
1662 | 0 | struct ofpbuf *buf; |
1663 | |
|
1664 | 0 | dump = xmalloc(sizeof *dump); |
1665 | 0 | dpif_flow_dump_init(&dump->up, dpif_); |
1666 | |
|
1667 | 0 | dpif_netlink_populate_flow_dump_types(dump, types); |
1668 | |
|
1669 | 0 | if (dump->types.ovs_flows) { |
1670 | 0 | dpif_netlink_flow_init(&request); |
1671 | 0 | request.cmd = OVS_FLOW_CMD_GET; |
1672 | 0 | request.dp_ifindex = dpif->dp_ifindex; |
1673 | 0 | request.ufid_present = false; |
1674 | 0 | request.ufid_terse = terse; |
1675 | |
|
1676 | 0 | buf = ofpbuf_new(1024); |
1677 | 0 | dpif_netlink_flow_to_ofpbuf(&request, buf); |
1678 | 0 | nl_dump_start(&dump->nl_dump, NETLINK_GENERIC, buf); |
1679 | 0 | ofpbuf_delete(buf); |
1680 | 0 | } |
1681 | 0 | atomic_init(&dump->status, 0); |
1682 | 0 | dump->up.terse = terse; |
1683 | |
|
1684 | 0 | start_netdev_dump(dpif_, dump); |
1685 | |
|
1686 | 0 | return &dump->up; |
1687 | 0 | } |
1688 | | |
1689 | | static int |
1690 | | dpif_netlink_flow_dump_destroy(struct dpif_flow_dump *dump_) |
1691 | 0 | { |
1692 | 0 | struct dpif_netlink_flow_dump *dump = dpif_netlink_flow_dump_cast(dump_); |
1693 | 0 | unsigned int nl_status = 0; |
1694 | 0 | int dump_status; |
1695 | |
|
1696 | 0 | if (dump->types.ovs_flows) { |
1697 | 0 | nl_status = nl_dump_done(&dump->nl_dump); |
1698 | 0 | } |
1699 | |
|
1700 | 0 | for (int i = 0; i < dump->netdev_dumps_num; i++) { |
1701 | 0 | int err = netdev_flow_dump_destroy(dump->netdev_dumps[i]); |
1702 | |
|
1703 | 0 | if (err != 0 && err != EOPNOTSUPP) { |
1704 | 0 | VLOG_ERR("failed dumping netdev: %s", ovs_strerror(err)); |
1705 | 0 | } |
1706 | 0 | } |
1707 | |
|
1708 | 0 | free(dump->netdev_dumps); |
1709 | 0 | ovs_mutex_destroy(&dump->netdev_lock); |
1710 | | |
1711 | | /* No other thread has access to 'dump' at this point. */ |
1712 | 0 | atomic_read_relaxed(&dump->status, &dump_status); |
1713 | 0 | free(dump); |
1714 | 0 | return dump_status ? dump_status : nl_status; |
1715 | 0 | } |
1716 | | |
1717 | | struct dpif_netlink_flow_dump_thread { |
1718 | | struct dpif_flow_dump_thread up; |
1719 | | struct dpif_netlink_flow_dump *dump; |
1720 | | struct dpif_netlink_flow flow; |
1721 | | struct dpif_flow_stats stats; |
1722 | | struct ofpbuf nl_flows; /* Always used to store flows. */ |
1723 | | struct ofpbuf *nl_actions; /* Used if kernel does not supply actions. */ |
1724 | | int netdev_dump_idx; /* This thread current netdev dump index */ |
1725 | | bool netdev_done; /* If we are finished dumping netdevs */ |
1726 | | |
1727 | | /* (Key/Mask/Actions) Buffers for netdev dumping */ |
1728 | | struct odputil_keybuf keybuf[FLOW_DUMP_MAX_BATCH]; |
1729 | | struct odputil_keybuf maskbuf[FLOW_DUMP_MAX_BATCH]; |
1730 | | struct odputil_keybuf actbuf[FLOW_DUMP_MAX_BATCH]; |
1731 | | }; |
1732 | | |
1733 | | static struct dpif_netlink_flow_dump_thread * |
1734 | | dpif_netlink_flow_dump_thread_cast(struct dpif_flow_dump_thread *thread) |
1735 | 0 | { |
1736 | 0 | return CONTAINER_OF(thread, struct dpif_netlink_flow_dump_thread, up); |
1737 | 0 | } |
1738 | | |
1739 | | static struct dpif_flow_dump_thread * |
1740 | | dpif_netlink_flow_dump_thread_create(struct dpif_flow_dump *dump_) |
1741 | 0 | { |
1742 | 0 | struct dpif_netlink_flow_dump *dump = dpif_netlink_flow_dump_cast(dump_); |
1743 | 0 | struct dpif_netlink_flow_dump_thread *thread; |
1744 | |
|
1745 | 0 | thread = xmalloc(sizeof *thread); |
1746 | 0 | dpif_flow_dump_thread_init(&thread->up, &dump->up); |
1747 | 0 | thread->dump = dump; |
1748 | 0 | ofpbuf_init(&thread->nl_flows, NL_DUMP_BUFSIZE); |
1749 | 0 | thread->nl_actions = NULL; |
1750 | 0 | thread->netdev_dump_idx = 0; |
1751 | 0 | thread->netdev_done = !(thread->netdev_dump_idx < dump->netdev_dumps_num); |
1752 | |
|
1753 | 0 | return &thread->up; |
1754 | 0 | } |
1755 | | |
1756 | | static void |
1757 | | dpif_netlink_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread_) |
1758 | 0 | { |
1759 | 0 | struct dpif_netlink_flow_dump_thread *thread |
1760 | 0 | = dpif_netlink_flow_dump_thread_cast(thread_); |
1761 | |
|
1762 | 0 | ofpbuf_uninit(&thread->nl_flows); |
1763 | 0 | ofpbuf_delete(thread->nl_actions); |
1764 | 0 | free(thread); |
1765 | 0 | } |
1766 | | |
1767 | | static void |
1768 | | dpif_netlink_flow_to_dpif_flow(struct dpif_flow *dpif_flow, |
1769 | | const struct dpif_netlink_flow *datapath_flow) |
1770 | 0 | { |
1771 | 0 | dpif_flow->key = datapath_flow->key; |
1772 | 0 | dpif_flow->key_len = datapath_flow->key_len; |
1773 | 0 | dpif_flow->mask = datapath_flow->mask; |
1774 | 0 | dpif_flow->mask_len = datapath_flow->mask_len; |
1775 | 0 | dpif_flow->actions = datapath_flow->actions; |
1776 | 0 | dpif_flow->actions_len = datapath_flow->actions_len; |
1777 | 0 | dpif_flow->ufid_present = datapath_flow->ufid_present; |
1778 | 0 | dpif_flow->pmd_id = PMD_ID_NULL; |
1779 | 0 | if (datapath_flow->ufid_present) { |
1780 | 0 | dpif_flow->ufid = datapath_flow->ufid; |
1781 | 0 | } else { |
1782 | 0 | ovs_assert(datapath_flow->key && datapath_flow->key_len); |
1783 | 0 | odp_flow_key_hash(datapath_flow->key, datapath_flow->key_len, |
1784 | 0 | &dpif_flow->ufid); |
1785 | 0 | } |
1786 | 0 | dpif_netlink_flow_get_stats(datapath_flow, &dpif_flow->stats); |
1787 | 0 | dpif_flow->attrs.offloaded = false; |
1788 | 0 | dpif_flow->attrs.dp_layer = "ovs"; |
1789 | 0 | dpif_flow->attrs.dp_extra_info = NULL; |
1790 | 0 | } |
1791 | | |
1792 | | /* The design is such that all threads are working together on the first dump |
1793 | | * to the last, in order (at first they all on dump 0). |
1794 | | * When the first thread finds that the given dump is finished, |
1795 | | * they all move to the next. If two or more threads find the same dump |
1796 | | * is finished at the same time, the first one will advance the shared |
1797 | | * netdev_current_dump and the others will catch up. */ |
1798 | | static void |
1799 | | dpif_netlink_advance_netdev_dump(struct dpif_netlink_flow_dump_thread *thread) |
1800 | 0 | { |
1801 | 0 | struct dpif_netlink_flow_dump *dump = thread->dump; |
1802 | |
|
1803 | 0 | ovs_mutex_lock(&dump->netdev_lock); |
1804 | | /* if we haven't finished (dumped everything) */ |
1805 | 0 | if (dump->netdev_current_dump < dump->netdev_dumps_num) { |
1806 | | /* if we are the first to find that current dump is finished |
1807 | | * advance it. */ |
1808 | 0 | if (thread->netdev_dump_idx == dump->netdev_current_dump) { |
1809 | 0 | thread->netdev_dump_idx = ++dump->netdev_current_dump; |
1810 | | /* did we just finish the last dump? done. */ |
1811 | 0 | if (dump->netdev_current_dump == dump->netdev_dumps_num) { |
1812 | 0 | thread->netdev_done = true; |
1813 | 0 | } |
1814 | 0 | } else { |
1815 | | /* otherwise, we are behind, catch up */ |
1816 | 0 | thread->netdev_dump_idx = dump->netdev_current_dump; |
1817 | 0 | } |
1818 | 0 | } else { |
1819 | | /* some other thread finished */ |
1820 | 0 | thread->netdev_done = true; |
1821 | 0 | } |
1822 | 0 | ovs_mutex_unlock(&dump->netdev_lock); |
1823 | 0 | } |
1824 | | |
1825 | | static int |
1826 | | dpif_netlink_netdev_match_to_dpif_flow(struct match *match, |
1827 | | struct ofpbuf *key_buf, |
1828 | | struct ofpbuf *mask_buf, |
1829 | | struct nlattr *actions, |
1830 | | struct dpif_flow_stats *stats, |
1831 | | struct dpif_flow_attrs *attrs, |
1832 | | ovs_u128 *ufid, |
1833 | | struct dpif_flow *flow, |
1834 | | bool terse) |
1835 | 0 | { |
1836 | 0 | memset(flow, 0, sizeof *flow); |
1837 | |
|
1838 | 0 | if (!terse) { |
1839 | 0 | struct odp_flow_key_parms odp_parms = { |
1840 | 0 | .flow = &match->flow, |
1841 | 0 | .mask = &match->wc.masks, |
1842 | 0 | .support = { |
1843 | 0 | .max_vlan_headers = 2, |
1844 | 0 | .recirc = true, |
1845 | 0 | .ct_state = true, |
1846 | 0 | .ct_zone = true, |
1847 | 0 | .ct_mark = true, |
1848 | 0 | .ct_label = true, |
1849 | 0 | }, |
1850 | 0 | }; |
1851 | 0 | size_t offset; |
1852 | | |
1853 | | /* Key */ |
1854 | 0 | offset = key_buf->size; |
1855 | 0 | flow->key = ofpbuf_tail(key_buf); |
1856 | 0 | odp_flow_key_from_flow(&odp_parms, key_buf); |
1857 | 0 | flow->key_len = key_buf->size - offset; |
1858 | | |
1859 | | /* Mask */ |
1860 | 0 | offset = mask_buf->size; |
1861 | 0 | flow->mask = ofpbuf_tail(mask_buf); |
1862 | 0 | odp_parms.key_buf = key_buf; |
1863 | 0 | odp_flow_key_from_mask(&odp_parms, mask_buf); |
1864 | 0 | flow->mask_len = mask_buf->size - offset; |
1865 | | |
1866 | | /* Actions */ |
1867 | 0 | flow->actions = nl_attr_get(actions); |
1868 | 0 | flow->actions_len = nl_attr_get_size(actions); |
1869 | 0 | } |
1870 | | |
1871 | | /* Stats */ |
1872 | 0 | memcpy(&flow->stats, stats, sizeof *stats); |
1873 | | |
1874 | | /* UFID */ |
1875 | 0 | flow->ufid_present = true; |
1876 | 0 | flow->ufid = *ufid; |
1877 | |
|
1878 | 0 | flow->pmd_id = PMD_ID_NULL; |
1879 | |
|
1880 | 0 | memcpy(&flow->attrs, attrs, sizeof *attrs); |
1881 | |
|
1882 | 0 | return 0; |
1883 | 0 | } |
1884 | | |
1885 | | static int |
1886 | | dpif_netlink_flow_dump_next(struct dpif_flow_dump_thread *thread_, |
1887 | | struct dpif_flow *flows, int max_flows) |
1888 | 0 | { |
1889 | 0 | struct dpif_netlink_flow_dump_thread *thread |
1890 | 0 | = dpif_netlink_flow_dump_thread_cast(thread_); |
1891 | 0 | struct dpif_netlink_flow_dump *dump = thread->dump; |
1892 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(thread->up.dpif); |
1893 | 0 | int n_flows; |
1894 | |
|
1895 | 0 | ofpbuf_delete(thread->nl_actions); |
1896 | 0 | thread->nl_actions = NULL; |
1897 | |
|
1898 | 0 | n_flows = 0; |
1899 | 0 | max_flows = MIN(max_flows, FLOW_DUMP_MAX_BATCH); |
1900 | |
|
1901 | 0 | while (!thread->netdev_done && n_flows < max_flows) { |
1902 | 0 | struct odputil_keybuf *maskbuf = &thread->maskbuf[n_flows]; |
1903 | 0 | struct odputil_keybuf *keybuf = &thread->keybuf[n_flows]; |
1904 | 0 | struct odputil_keybuf *actbuf = &thread->actbuf[n_flows]; |
1905 | 0 | struct ofpbuf key, mask, act; |
1906 | 0 | struct dpif_flow *f = &flows[n_flows]; |
1907 | 0 | int cur = thread->netdev_dump_idx; |
1908 | 0 | struct netdev_flow_dump *netdev_dump = dump->netdev_dumps[cur]; |
1909 | 0 | struct match match; |
1910 | 0 | struct nlattr *actions; |
1911 | 0 | struct dpif_flow_stats stats; |
1912 | 0 | struct dpif_flow_attrs attrs; |
1913 | 0 | ovs_u128 ufid; |
1914 | 0 | bool has_next; |
1915 | |
|
1916 | 0 | ofpbuf_use_stack(&key, keybuf, sizeof *keybuf); |
1917 | 0 | ofpbuf_use_stack(&act, actbuf, sizeof *actbuf); |
1918 | 0 | ofpbuf_use_stack(&mask, maskbuf, sizeof *maskbuf); |
1919 | 0 | has_next = netdev_flow_dump_next(netdev_dump, &match, |
1920 | 0 | &actions, &stats, &attrs, |
1921 | 0 | &ufid, |
1922 | 0 | &thread->nl_flows, |
1923 | 0 | &act); |
1924 | 0 | if (has_next) { |
1925 | 0 | dpif_netlink_netdev_match_to_dpif_flow(&match, |
1926 | 0 | &key, &mask, |
1927 | 0 | actions, |
1928 | 0 | &stats, |
1929 | 0 | &attrs, |
1930 | 0 | &ufid, |
1931 | 0 | f, |
1932 | 0 | dump->up.terse); |
1933 | 0 | n_flows++; |
1934 | 0 | } else { |
1935 | 0 | dpif_netlink_advance_netdev_dump(thread); |
1936 | 0 | } |
1937 | 0 | } |
1938 | |
|
1939 | 0 | if (!(dump->types.ovs_flows)) { |
1940 | 0 | return n_flows; |
1941 | 0 | } |
1942 | | |
1943 | 0 | while (!n_flows |
1944 | 0 | || (n_flows < max_flows && thread->nl_flows.size)) { |
1945 | 0 | struct dpif_netlink_flow datapath_flow; |
1946 | 0 | struct ofpbuf nl_flow; |
1947 | 0 | int error; |
1948 | | |
1949 | | /* Try to grab another flow. */ |
1950 | 0 | if (!nl_dump_next(&dump->nl_dump, &nl_flow, &thread->nl_flows)) { |
1951 | 0 | break; |
1952 | 0 | } |
1953 | | |
1954 | | /* Convert the flow to our output format. */ |
1955 | 0 | error = dpif_netlink_flow_from_ofpbuf(&datapath_flow, &nl_flow); |
1956 | 0 | if (error) { |
1957 | 0 | atomic_store_relaxed(&dump->status, error); |
1958 | 0 | break; |
1959 | 0 | } |
1960 | | |
1961 | 0 | if (dump->up.terse || datapath_flow.actions) { |
1962 | | /* Common case: we don't want actions, or the flow includes |
1963 | | * actions. */ |
1964 | 0 | dpif_netlink_flow_to_dpif_flow(&flows[n_flows++], &datapath_flow); |
1965 | 0 | } else { |
1966 | | /* Rare case: the flow does not include actions. Retrieve this |
1967 | | * individual flow again to get the actions. */ |
1968 | 0 | error = dpif_netlink_flow_get(dpif, &datapath_flow, |
1969 | 0 | &datapath_flow, &thread->nl_actions); |
1970 | 0 | if (error == ENOENT) { |
1971 | 0 | VLOG_DBG("dumped flow disappeared on get"); |
1972 | 0 | continue; |
1973 | 0 | } else if (error) { |
1974 | 0 | VLOG_WARN("error fetching dumped flow: %s", |
1975 | 0 | ovs_strerror(error)); |
1976 | 0 | atomic_store_relaxed(&dump->status, error); |
1977 | 0 | break; |
1978 | 0 | } |
1979 | | |
1980 | | /* Save this flow. Then exit, because we only have one buffer to |
1981 | | * handle this case. */ |
1982 | 0 | dpif_netlink_flow_to_dpif_flow(&flows[n_flows++], &datapath_flow); |
1983 | 0 | break; |
1984 | 0 | } |
1985 | 0 | } |
1986 | 0 | return n_flows; |
1987 | 0 | } |
1988 | | |
1989 | | static void |
1990 | | dpif_netlink_encode_execute(int dp_ifindex, const struct dpif_execute *d_exec, |
1991 | | struct ofpbuf *buf) |
1992 | 0 | { |
1993 | 0 | struct ovs_header *k_exec; |
1994 | 0 | size_t key_ofs; |
1995 | |
|
1996 | 0 | ofpbuf_prealloc_tailroom(buf, (64 |
1997 | 0 | + dp_packet_size(d_exec->packet) |
1998 | 0 | + ODP_KEY_METADATA_SIZE |
1999 | 0 | + d_exec->actions_len)); |
2000 | |
|
2001 | 0 | nl_msg_put_genlmsghdr(buf, 0, ovs_packet_family, NLM_F_REQUEST, |
2002 | 0 | OVS_PACKET_CMD_EXECUTE, OVS_PACKET_VERSION); |
2003 | |
|
2004 | 0 | k_exec = ofpbuf_put_uninit(buf, sizeof *k_exec); |
2005 | 0 | k_exec->dp_ifindex = dp_ifindex; |
2006 | |
|
2007 | 0 | nl_msg_put_unspec(buf, OVS_PACKET_ATTR_PACKET, |
2008 | 0 | dp_packet_data(d_exec->packet), |
2009 | 0 | dp_packet_size(d_exec->packet)); |
2010 | |
|
2011 | 0 | key_ofs = nl_msg_start_nested(buf, OVS_PACKET_ATTR_KEY); |
2012 | 0 | odp_key_from_dp_packet(buf, d_exec->packet); |
2013 | 0 | nl_msg_end_nested(buf, key_ofs); |
2014 | |
|
2015 | 0 | nl_msg_put_unspec(buf, OVS_PACKET_ATTR_ACTIONS, |
2016 | 0 | d_exec->actions, d_exec->actions_len); |
2017 | 0 | if (d_exec->probe) { |
2018 | 0 | nl_msg_put_flag(buf, OVS_PACKET_ATTR_PROBE); |
2019 | 0 | } |
2020 | 0 | if (d_exec->mtu) { |
2021 | 0 | nl_msg_put_u16(buf, OVS_PACKET_ATTR_MRU, d_exec->mtu); |
2022 | 0 | } |
2023 | |
|
2024 | 0 | if (d_exec->hash) { |
2025 | 0 | nl_msg_put_u64(buf, OVS_PACKET_ATTR_HASH, d_exec->hash); |
2026 | 0 | } |
2027 | |
|
2028 | 0 | if (d_exec->upcall_pid) { |
2029 | 0 | nl_msg_put_u32(buf, OVS_PACKET_ATTR_UPCALL_PID, d_exec->upcall_pid); |
2030 | 0 | } |
2031 | 0 | } |
2032 | | |
2033 | | /* Executes, against 'dpif', up to the first 'n_ops' operations in 'ops'. |
2034 | | * Returns the number actually executed (at least 1, if 'n_ops' is |
2035 | | * positive). */ |
2036 | | static size_t |
2037 | | dpif_netlink_operate__(struct dpif_netlink *dpif, |
2038 | | struct dpif_op **ops, size_t n_ops) |
2039 | 0 | { |
2040 | 0 | struct op_auxdata { |
2041 | 0 | struct nl_transaction txn; |
2042 | |
|
2043 | 0 | struct ofpbuf request; |
2044 | 0 | uint64_t request_stub[1024 / 8]; |
2045 | |
|
2046 | 0 | struct ofpbuf reply; |
2047 | 0 | uint64_t reply_stub[1024 / 8]; |
2048 | 0 | } auxes[OPERATE_MAX_OPS]; |
2049 | |
|
2050 | 0 | struct nl_transaction *txnsp[OPERATE_MAX_OPS]; |
2051 | 0 | size_t i; |
2052 | |
|
2053 | 0 | n_ops = MIN(n_ops, OPERATE_MAX_OPS); |
2054 | 0 | for (i = 0; i < n_ops; i++) { |
2055 | 0 | struct op_auxdata *aux = &auxes[i]; |
2056 | 0 | struct dpif_op *op = ops[i]; |
2057 | 0 | struct dpif_flow_put *put; |
2058 | 0 | struct dpif_flow_del *del; |
2059 | 0 | struct dpif_flow_get *get; |
2060 | 0 | struct dpif_netlink_flow flow; |
2061 | |
|
2062 | 0 | ofpbuf_use_stub(&aux->request, |
2063 | 0 | aux->request_stub, sizeof aux->request_stub); |
2064 | 0 | aux->txn.request = &aux->request; |
2065 | |
|
2066 | 0 | ofpbuf_use_stub(&aux->reply, aux->reply_stub, sizeof aux->reply_stub); |
2067 | 0 | aux->txn.reply = NULL; |
2068 | |
|
2069 | 0 | switch (op->type) { |
2070 | 0 | case DPIF_OP_FLOW_PUT: |
2071 | 0 | put = &op->flow_put; |
2072 | 0 | dpif_netlink_init_flow_put(dpif, put, &flow); |
2073 | 0 | if (put->stats) { |
2074 | 0 | flow.nlmsg_flags |= NLM_F_ECHO; |
2075 | 0 | aux->txn.reply = &aux->reply; |
2076 | 0 | } |
2077 | 0 | dpif_netlink_flow_to_ofpbuf(&flow, &aux->request); |
2078 | |
|
2079 | 0 | OVS_USDT_PROBE(dpif_netlink_operate__, op_flow_put, |
2080 | 0 | dpif, put, &flow, &aux->request); |
2081 | 0 | break; |
2082 | | |
2083 | 0 | case DPIF_OP_FLOW_DEL: |
2084 | 0 | del = &op->flow_del; |
2085 | 0 | dpif_netlink_init_flow_del(dpif, del, &flow); |
2086 | 0 | if (del->stats) { |
2087 | 0 | flow.nlmsg_flags |= NLM_F_ECHO; |
2088 | 0 | aux->txn.reply = &aux->reply; |
2089 | 0 | } |
2090 | 0 | dpif_netlink_flow_to_ofpbuf(&flow, &aux->request); |
2091 | |
|
2092 | 0 | OVS_USDT_PROBE(dpif_netlink_operate__, op_flow_del, |
2093 | 0 | dpif, del, &flow, &aux->request); |
2094 | 0 | break; |
2095 | | |
2096 | 0 | case DPIF_OP_EXECUTE: |
2097 | | /* Can't execute a packet that won't fit in a Netlink attribute. */ |
2098 | 0 | if (OVS_UNLIKELY(nl_attr_oversized( |
2099 | 0 | dp_packet_size(op->execute.packet)))) { |
2100 | | /* Report an error immediately if this is the first operation. |
2101 | | * Otherwise the easiest thing to do is to postpone to the next |
2102 | | * call (when this will be the first operation). */ |
2103 | 0 | if (i == 0) { |
2104 | 0 | VLOG_ERR_RL(&error_rl, |
2105 | 0 | "dropping oversized %"PRIu32"-byte packet", |
2106 | 0 | dp_packet_size(op->execute.packet)); |
2107 | 0 | op->error = ENOBUFS; |
2108 | 0 | return 1; |
2109 | 0 | } |
2110 | 0 | n_ops = i; |
2111 | 0 | } else { |
2112 | 0 | dpif_netlink_encode_execute(dpif->dp_ifindex, &op->execute, |
2113 | 0 | &aux->request); |
2114 | |
|
2115 | 0 | OVS_USDT_PROBE(dpif_netlink_operate__, op_flow_execute, |
2116 | 0 | dpif, &op->execute, |
2117 | 0 | dp_packet_data(op->execute.packet), |
2118 | 0 | dp_packet_size(op->execute.packet), |
2119 | 0 | &aux->request); |
2120 | 0 | } |
2121 | 0 | break; |
2122 | | |
2123 | 0 | case DPIF_OP_FLOW_GET: |
2124 | 0 | get = &op->flow_get; |
2125 | 0 | dpif_netlink_init_flow_get(dpif, get, &flow); |
2126 | 0 | aux->txn.reply = get->buffer; |
2127 | 0 | dpif_netlink_flow_to_ofpbuf(&flow, &aux->request); |
2128 | |
|
2129 | 0 | OVS_USDT_PROBE(dpif_netlink_operate__, op_flow_get, |
2130 | 0 | dpif, get, &flow, &aux->request); |
2131 | 0 | break; |
2132 | | |
2133 | 0 | default: |
2134 | 0 | OVS_NOT_REACHED(); |
2135 | 0 | } |
2136 | 0 | } |
2137 | | |
2138 | 0 | for (i = 0; i < n_ops; i++) { |
2139 | 0 | txnsp[i] = &auxes[i].txn; |
2140 | 0 | } |
2141 | 0 | nl_transact_multiple(NETLINK_GENERIC, txnsp, n_ops); |
2142 | |
|
2143 | 0 | for (i = 0; i < n_ops; i++) { |
2144 | 0 | struct op_auxdata *aux = &auxes[i]; |
2145 | 0 | struct nl_transaction *txn = &auxes[i].txn; |
2146 | 0 | struct dpif_op *op = ops[i]; |
2147 | 0 | struct dpif_flow_put *put; |
2148 | 0 | struct dpif_flow_del *del; |
2149 | 0 | struct dpif_flow_get *get; |
2150 | |
|
2151 | 0 | op->error = txn->error; |
2152 | |
|
2153 | 0 | switch (op->type) { |
2154 | 0 | case DPIF_OP_FLOW_PUT: |
2155 | 0 | put = &op->flow_put; |
2156 | 0 | if (put->stats) { |
2157 | 0 | if (!op->error) { |
2158 | 0 | struct dpif_netlink_flow reply; |
2159 | |
|
2160 | 0 | op->error = dpif_netlink_flow_from_ofpbuf(&reply, |
2161 | 0 | txn->reply); |
2162 | 0 | if (!op->error) { |
2163 | 0 | dpif_netlink_flow_get_stats(&reply, put->stats); |
2164 | 0 | } |
2165 | 0 | } |
2166 | 0 | } |
2167 | 0 | break; |
2168 | | |
2169 | 0 | case DPIF_OP_FLOW_DEL: |
2170 | 0 | del = &op->flow_del; |
2171 | 0 | if (del->stats) { |
2172 | 0 | if (!op->error) { |
2173 | 0 | struct dpif_netlink_flow reply; |
2174 | |
|
2175 | 0 | op->error = dpif_netlink_flow_from_ofpbuf(&reply, |
2176 | 0 | txn->reply); |
2177 | 0 | if (!op->error) { |
2178 | 0 | dpif_netlink_flow_get_stats(&reply, del->stats); |
2179 | 0 | } |
2180 | 0 | } |
2181 | 0 | } |
2182 | 0 | break; |
2183 | | |
2184 | 0 | case DPIF_OP_EXECUTE: |
2185 | 0 | break; |
2186 | | |
2187 | 0 | case DPIF_OP_FLOW_GET: |
2188 | 0 | get = &op->flow_get; |
2189 | 0 | if (!op->error) { |
2190 | 0 | struct dpif_netlink_flow reply; |
2191 | |
|
2192 | 0 | op->error = dpif_netlink_flow_from_ofpbuf(&reply, txn->reply); |
2193 | 0 | if (!op->error) { |
2194 | 0 | dpif_netlink_flow_to_dpif_flow(get->flow, &reply); |
2195 | 0 | } |
2196 | 0 | } |
2197 | 0 | break; |
2198 | | |
2199 | 0 | default: |
2200 | 0 | OVS_NOT_REACHED(); |
2201 | 0 | } |
2202 | | |
2203 | 0 | ofpbuf_uninit(&aux->request); |
2204 | 0 | ofpbuf_uninit(&aux->reply); |
2205 | 0 | } |
2206 | | |
2207 | 0 | return n_ops; |
2208 | 0 | } |
2209 | | |
2210 | | static int |
2211 | | parse_flow_get(struct dpif_netlink *dpif, struct dpif_flow_get *get) |
2212 | 0 | { |
2213 | 0 | const char *dpif_type_str = dpif_normalize_type(dpif_type(&dpif->dpif)); |
2214 | 0 | struct dpif_flow *dpif_flow = get->flow; |
2215 | 0 | struct match match; |
2216 | 0 | struct nlattr *actions; |
2217 | 0 | struct dpif_flow_stats stats; |
2218 | 0 | struct dpif_flow_attrs attrs; |
2219 | 0 | struct ofpbuf buf; |
2220 | 0 | uint64_t act_buf[1024 / 8]; |
2221 | 0 | struct odputil_keybuf maskbuf; |
2222 | 0 | struct odputil_keybuf keybuf; |
2223 | 0 | struct odputil_keybuf actbuf; |
2224 | 0 | struct ofpbuf key, mask, act; |
2225 | 0 | int err; |
2226 | |
|
2227 | 0 | ofpbuf_use_stack(&buf, &act_buf, sizeof act_buf); |
2228 | 0 | err = netdev_ports_flow_get(dpif_type_str, &match, &actions, get->ufid, |
2229 | 0 | &stats, &attrs, &buf); |
2230 | 0 | if (err) { |
2231 | 0 | return err; |
2232 | 0 | } |
2233 | | |
2234 | 0 | VLOG_DBG("found flow from netdev, translating to dpif flow"); |
2235 | |
|
2236 | 0 | ofpbuf_use_stack(&key, &keybuf, sizeof keybuf); |
2237 | 0 | ofpbuf_use_stack(&act, &actbuf, sizeof actbuf); |
2238 | 0 | ofpbuf_use_stack(&mask, &maskbuf, sizeof maskbuf); |
2239 | 0 | dpif_netlink_netdev_match_to_dpif_flow(&match, &key, &mask, actions, |
2240 | 0 | &stats, &attrs, |
2241 | 0 | (ovs_u128 *) get->ufid, |
2242 | 0 | dpif_flow, |
2243 | 0 | false); |
2244 | 0 | ofpbuf_put(get->buffer, nl_attr_get(actions), nl_attr_get_size(actions)); |
2245 | 0 | dpif_flow->actions = ofpbuf_at(get->buffer, 0, 0); |
2246 | 0 | dpif_flow->actions_len = nl_attr_get_size(actions); |
2247 | |
|
2248 | 0 | return 0; |
2249 | 0 | } |
2250 | | |
2251 | | static int |
2252 | | parse_flow_put(struct dpif_netlink *dpif, struct dpif_flow_put *put) |
2253 | 0 | { |
2254 | 0 | const char *dpif_type_str = dpif_normalize_type(dpif_type(&dpif->dpif)); |
2255 | 0 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20); |
2256 | 0 | struct match match; |
2257 | 0 | odp_port_t in_port; |
2258 | 0 | const struct nlattr *nla; |
2259 | 0 | size_t left; |
2260 | 0 | struct netdev *dev; |
2261 | 0 | struct offload_info info; |
2262 | 0 | int err; |
2263 | |
|
2264 | 0 | info.tc_modify_flow_deleted = false; |
2265 | 0 | if (put->flags & DPIF_FP_PROBE) { |
2266 | 0 | return EOPNOTSUPP; |
2267 | 0 | } |
2268 | | |
2269 | 0 | err = parse_key_and_mask_to_match(put->key, put->key_len, put->mask, |
2270 | 0 | put->mask_len, &match); |
2271 | 0 | if (err) { |
2272 | 0 | return err; |
2273 | 0 | } |
2274 | | |
2275 | 0 | in_port = match.flow.in_port.odp_port; |
2276 | 0 | dev = netdev_ports_get(in_port, dpif_type_str); |
2277 | 0 | if (!dev) { |
2278 | 0 | return EOPNOTSUPP; |
2279 | 0 | } |
2280 | | |
2281 | | /* Check the output port for a tunnel. */ |
2282 | 0 | NL_ATTR_FOR_EACH(nla, left, put->actions, put->actions_len) { |
2283 | 0 | if (nl_attr_type(nla) == OVS_ACTION_ATTR_OUTPUT) { |
2284 | 0 | struct netdev *outdev; |
2285 | 0 | odp_port_t out_port; |
2286 | |
|
2287 | 0 | out_port = nl_attr_get_odp_port(nla); |
2288 | 0 | outdev = netdev_ports_get(out_port, dpif_type_str); |
2289 | 0 | if (!outdev) { |
2290 | 0 | err = EOPNOTSUPP; |
2291 | 0 | goto out; |
2292 | 0 | } |
2293 | 0 | netdev_close(outdev); |
2294 | 0 | } |
2295 | 0 | } |
2296 | | |
2297 | 0 | info.recirc_id_shared_with_tc = (dpif->user_features |
2298 | 0 | & OVS_DP_F_TC_RECIRC_SHARING); |
2299 | 0 | err = netdev_flow_put(dev, &match, |
2300 | 0 | CONST_CAST(struct nlattr *, put->actions), |
2301 | 0 | put->actions_len, |
2302 | 0 | CONST_CAST(ovs_u128 *, put->ufid), |
2303 | 0 | &info, put->stats); |
2304 | |
|
2305 | 0 | if (!err) { |
2306 | 0 | if (put->flags & DPIF_FP_MODIFY) { |
2307 | 0 | struct dpif_op *opp; |
2308 | 0 | struct dpif_op op; |
2309 | |
|
2310 | 0 | op.type = DPIF_OP_FLOW_DEL; |
2311 | 0 | op.flow_del.key = put->key; |
2312 | 0 | op.flow_del.key_len = put->key_len; |
2313 | 0 | op.flow_del.ufid = put->ufid; |
2314 | 0 | op.flow_del.pmd_id = put->pmd_id; |
2315 | 0 | op.flow_del.stats = NULL; |
2316 | 0 | op.flow_del.terse = false; |
2317 | |
|
2318 | 0 | opp = &op; |
2319 | 0 | dpif_netlink_operate__(dpif, &opp, 1); |
2320 | 0 | } |
2321 | |
|
2322 | 0 | VLOG_DBG("added flow"); |
2323 | 0 | } else if (err != EEXIST) { |
2324 | 0 | struct netdev *oor_netdev = NULL; |
2325 | 0 | enum vlog_level level; |
2326 | 0 | if (err == ENOSPC && netdev_is_offload_rebalance_policy_enabled()) { |
2327 | | /* |
2328 | | * We need to set OOR on the input netdev (i.e, 'dev') for the |
2329 | | * flow. But if the flow has a tunnel attribute (i.e, decap action, |
2330 | | * with a virtual device like a VxLAN interface as its in-port), |
2331 | | * then lookup and set OOR on the underlying tunnel (real) netdev. |
2332 | | */ |
2333 | 0 | oor_netdev = flow_get_tunnel_netdev(&match.flow.tunnel); |
2334 | 0 | if (!oor_netdev) { |
2335 | | /* Not a 'tunnel' flow */ |
2336 | 0 | oor_netdev = dev; |
2337 | 0 | } |
2338 | 0 | netdev_set_hw_info(oor_netdev, HW_INFO_TYPE_OOR, true); |
2339 | 0 | } |
2340 | 0 | level = (err == ENOSPC || err == EOPNOTSUPP) ? VLL_DBG : VLL_ERR; |
2341 | 0 | VLOG_RL(&rl, level, "failed to offload flow: %s: %s", |
2342 | 0 | ovs_strerror(err), |
2343 | 0 | (oor_netdev ? oor_netdev->name : dev->name)); |
2344 | 0 | } |
2345 | |
|
2346 | 0 | out: |
2347 | 0 | if (err && err != EEXIST && (put->flags & DPIF_FP_MODIFY)) { |
2348 | | /* Modified rule can't be offloaded, try and delete from HW */ |
2349 | 0 | int del_err = 0; |
2350 | |
|
2351 | 0 | if (!info.tc_modify_flow_deleted) { |
2352 | 0 | del_err = netdev_flow_del(dev, put->ufid, put->stats); |
2353 | 0 | } |
2354 | |
|
2355 | 0 | if (!del_err) { |
2356 | | /* Delete from hw success, so old flow was offloaded. |
2357 | | * Change flags to create the flow in kernel */ |
2358 | 0 | put->flags &= ~DPIF_FP_MODIFY; |
2359 | 0 | put->flags |= DPIF_FP_CREATE; |
2360 | 0 | } else if (del_err != ENOENT) { |
2361 | 0 | VLOG_ERR_RL(&rl, "failed to delete offloaded flow: %s", |
2362 | 0 | ovs_strerror(del_err)); |
2363 | | /* stop proccesing the flow in kernel */ |
2364 | 0 | err = 0; |
2365 | 0 | } |
2366 | 0 | } |
2367 | |
|
2368 | 0 | netdev_close(dev); |
2369 | |
|
2370 | 0 | return err; |
2371 | 0 | } |
2372 | | |
2373 | | static int |
2374 | | try_send_to_netdev(struct dpif_netlink *dpif, struct dpif_op *op) |
2375 | 0 | { |
2376 | 0 | int err = EOPNOTSUPP; |
2377 | |
|
2378 | 0 | switch (op->type) { |
2379 | 0 | case DPIF_OP_FLOW_PUT: { |
2380 | 0 | struct dpif_flow_put *put = &op->flow_put; |
2381 | |
|
2382 | 0 | if (!put->ufid) { |
2383 | 0 | break; |
2384 | 0 | } |
2385 | | |
2386 | 0 | err = parse_flow_put(dpif, put); |
2387 | 0 | log_flow_put_message(&dpif->dpif, &this_module, put, 0); |
2388 | 0 | break; |
2389 | 0 | } |
2390 | 0 | case DPIF_OP_FLOW_DEL: { |
2391 | 0 | struct dpif_flow_del *del = &op->flow_del; |
2392 | |
|
2393 | 0 | if (!del->ufid) { |
2394 | 0 | break; |
2395 | 0 | } |
2396 | | |
2397 | 0 | err = netdev_ports_flow_del( |
2398 | 0 | dpif_normalize_type(dpif_type(&dpif->dpif)), |
2399 | 0 | del->ufid, |
2400 | 0 | del->stats); |
2401 | 0 | log_flow_del_message(&dpif->dpif, &this_module, del, 0); |
2402 | 0 | break; |
2403 | 0 | } |
2404 | 0 | case DPIF_OP_FLOW_GET: { |
2405 | 0 | struct dpif_flow_get *get = &op->flow_get; |
2406 | |
|
2407 | 0 | if (!op->flow_get.ufid) { |
2408 | 0 | break; |
2409 | 0 | } |
2410 | | |
2411 | 0 | err = parse_flow_get(dpif, get); |
2412 | 0 | log_flow_get_message(&dpif->dpif, &this_module, get, 0); |
2413 | 0 | break; |
2414 | 0 | } |
2415 | 0 | case DPIF_OP_EXECUTE: |
2416 | 0 | default: |
2417 | 0 | break; |
2418 | 0 | } |
2419 | | |
2420 | 0 | return err; |
2421 | 0 | } |
2422 | | |
2423 | | static void |
2424 | | dpif_netlink_operate_chunks(struct dpif_netlink *dpif, struct dpif_op **ops, |
2425 | | size_t n_ops) |
2426 | 0 | { |
2427 | 0 | while (n_ops > 0) { |
2428 | 0 | size_t chunk = dpif_netlink_operate__(dpif, ops, n_ops); |
2429 | |
|
2430 | 0 | ops += chunk; |
2431 | 0 | n_ops -= chunk; |
2432 | 0 | } |
2433 | 0 | } |
2434 | | |
2435 | | static void |
2436 | | dpif_netlink_operate(struct dpif *dpif_, struct dpif_op **ops, size_t n_ops, |
2437 | | enum dpif_offload_type offload_type) |
2438 | 0 | { |
2439 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
2440 | 0 | struct dpif_op *new_ops[OPERATE_MAX_OPS]; |
2441 | 0 | int count = 0; |
2442 | 0 | int i = 0; |
2443 | 0 | int err = 0; |
2444 | |
|
2445 | 0 | if (offload_type == DPIF_OFFLOAD_ALWAYS && !netdev_is_flow_api_enabled()) { |
2446 | 0 | VLOG_DBG("Invalid offload_type: %d", offload_type); |
2447 | 0 | return; |
2448 | 0 | } |
2449 | | |
2450 | 0 | if (offload_type != DPIF_OFFLOAD_NEVER && netdev_is_flow_api_enabled()) { |
2451 | 0 | while (n_ops > 0) { |
2452 | 0 | count = 0; |
2453 | |
|
2454 | 0 | while (n_ops > 0 && count < OPERATE_MAX_OPS) { |
2455 | 0 | struct dpif_op *op = ops[i++]; |
2456 | |
|
2457 | 0 | err = try_send_to_netdev(dpif, op); |
2458 | 0 | if (err && err != EEXIST) { |
2459 | 0 | if (offload_type == DPIF_OFFLOAD_ALWAYS) { |
2460 | | /* We got an error while offloading an op. Since |
2461 | | * OFFLOAD_ALWAYS is specified, we stop further |
2462 | | * processing and return to the caller without |
2463 | | * invoking kernel datapath as fallback. But the |
2464 | | * interface requires us to process all n_ops; so |
2465 | | * return the same error in the remaining ops too. |
2466 | | */ |
2467 | 0 | op->error = err; |
2468 | 0 | n_ops--; |
2469 | 0 | while (n_ops > 0) { |
2470 | 0 | op = ops[i++]; |
2471 | 0 | op->error = err; |
2472 | 0 | n_ops--; |
2473 | 0 | } |
2474 | 0 | return; |
2475 | 0 | } |
2476 | 0 | new_ops[count++] = op; |
2477 | 0 | } else { |
2478 | 0 | op->error = err; |
2479 | 0 | } |
2480 | | |
2481 | 0 | n_ops--; |
2482 | 0 | } |
2483 | | |
2484 | 0 | dpif_netlink_operate_chunks(dpif, new_ops, count); |
2485 | 0 | } |
2486 | 0 | } else if (offload_type != DPIF_OFFLOAD_ALWAYS) { |
2487 | 0 | dpif_netlink_operate_chunks(dpif, ops, n_ops); |
2488 | 0 | } |
2489 | 0 | } |
2490 | | |
2491 | | #if _WIN32 |
2492 | | static void |
2493 | | dpif_netlink_handler_uninit(struct dpif_handler *handler) |
2494 | | { |
2495 | | vport_delete_sock_pool(handler); |
2496 | | } |
2497 | | |
2498 | | static int |
2499 | | dpif_netlink_handler_init(struct dpif_handler *handler) |
2500 | | { |
2501 | | return vport_create_sock_pool(handler); |
2502 | | } |
2503 | | #else |
2504 | | |
2505 | | static int |
2506 | | dpif_netlink_handler_init(struct dpif_handler *handler) |
2507 | 0 | { |
2508 | 0 | handler->epoll_fd = epoll_create(10); |
2509 | 0 | return handler->epoll_fd < 0 ? errno : 0; |
2510 | 0 | } |
2511 | | |
2512 | | static void |
2513 | | dpif_netlink_handler_uninit(struct dpif_handler *handler) |
2514 | 0 | { |
2515 | 0 | close(handler->epoll_fd); |
2516 | 0 | } |
2517 | | #endif |
2518 | | |
2519 | | /* Returns true if num is a prime number, |
2520 | | * otherwise, return false. |
2521 | | */ |
2522 | | static bool |
2523 | | is_prime(uint32_t num) |
2524 | 0 | { |
2525 | 0 | if (num == 2) { |
2526 | 0 | return true; |
2527 | 0 | } |
2528 | | |
2529 | 0 | if (num < 2) { |
2530 | 0 | return false; |
2531 | 0 | } |
2532 | | |
2533 | 0 | if (num % 2 == 0) { |
2534 | 0 | return false; |
2535 | 0 | } |
2536 | | |
2537 | 0 | for (uint64_t i = 3; i * i <= num; i += 2) { |
2538 | 0 | if (num % i == 0) { |
2539 | 0 | return false; |
2540 | 0 | } |
2541 | 0 | } |
2542 | | |
2543 | 0 | return true; |
2544 | 0 | } |
2545 | | |
2546 | | /* Returns start if start is a prime number. Otherwise returns the next |
2547 | | * prime greater than start. Search is limited by UINT32_MAX. |
2548 | | * |
2549 | | * Returns 0 if no prime has been found between start and UINT32_MAX. |
2550 | | */ |
2551 | | static uint32_t |
2552 | | next_prime(uint32_t start) |
2553 | 0 | { |
2554 | 0 | if (start <= 2) { |
2555 | 0 | return 2; |
2556 | 0 | } |
2557 | | |
2558 | 0 | for (uint32_t i = start; i < UINT32_MAX; i++) { |
2559 | 0 | if (is_prime(i)) { |
2560 | 0 | return i; |
2561 | 0 | } |
2562 | 0 | } |
2563 | | |
2564 | 0 | return 0; |
2565 | 0 | } |
2566 | | |
2567 | | /* Calculates and returns the number of handler threads needed based |
2568 | | * the following formula: |
2569 | | * |
2570 | | * handlers_n = min(next_prime(active_cores + 1), total_cores) |
2571 | | */ |
2572 | | static uint32_t |
2573 | | dpif_netlink_calculate_n_handlers(void) |
2574 | 0 | { |
2575 | 0 | uint32_t total_cores = count_total_cores(); |
2576 | 0 | uint32_t n_handlers = count_cpu_cores(); |
2577 | 0 | uint32_t next_prime_num; |
2578 | | |
2579 | | /* If not all cores are available to OVS, create additional handler |
2580 | | * threads to ensure more fair distribution of load between them. |
2581 | | */ |
2582 | 0 | if (n_handlers < total_cores && total_cores > 2) { |
2583 | 0 | next_prime_num = next_prime(n_handlers + 1); |
2584 | 0 | n_handlers = MIN(next_prime_num, total_cores); |
2585 | 0 | } |
2586 | |
|
2587 | 0 | return MAX(n_handlers, 1); |
2588 | 0 | } |
2589 | | |
2590 | | static int |
2591 | | dpif_netlink_refresh_handlers_cpu_dispatch(struct dpif_netlink *dpif) |
2592 | | OVS_REQ_WRLOCK(dpif->upcall_lock) |
2593 | 0 | { |
2594 | 0 | int handler_id; |
2595 | 0 | int error = 0; |
2596 | 0 | uint32_t n_handlers; |
2597 | 0 | uint32_t *upcall_pids; |
2598 | |
|
2599 | 0 | n_handlers = dpif_netlink_calculate_n_handlers(); |
2600 | 0 | if (dpif->n_handlers != n_handlers) { |
2601 | 0 | VLOG_DBG("Dispatch mode(per-cpu): initializing %d handlers", |
2602 | 0 | n_handlers); |
2603 | 0 | destroy_all_handlers(dpif); |
2604 | 0 | upcall_pids = xzalloc(n_handlers * sizeof *upcall_pids); |
2605 | 0 | dpif->handlers = xzalloc(n_handlers * sizeof *dpif->handlers); |
2606 | 0 | for (handler_id = 0; handler_id < n_handlers; handler_id++) { |
2607 | 0 | struct dpif_handler *handler = &dpif->handlers[handler_id]; |
2608 | 0 | error = create_nl_sock(dpif, &handler->sock); |
2609 | 0 | if (error) { |
2610 | 0 | VLOG_ERR("Dispatch mode(per-cpu): Cannot create socket for" |
2611 | 0 | "handler %d", handler_id); |
2612 | 0 | continue; |
2613 | 0 | } |
2614 | 0 | upcall_pids[handler_id] = nl_sock_pid(handler->sock); |
2615 | 0 | VLOG_DBG("Dispatch mode(per-cpu): " |
2616 | 0 | "handler %d has Netlink PID of %u", |
2617 | 0 | handler_id, upcall_pids[handler_id]); |
2618 | 0 | } |
2619 | |
|
2620 | 0 | dpif->n_handlers = n_handlers; |
2621 | 0 | error = dpif_netlink_set_handler_pids(&dpif->dpif, upcall_pids, |
2622 | 0 | n_handlers); |
2623 | 0 | free(upcall_pids); |
2624 | 0 | } |
2625 | 0 | return error; |
2626 | 0 | } |
2627 | | |
2628 | | /* Synchronizes 'channels' in 'dpif->handlers' with the set of vports |
2629 | | * currently in 'dpif' in the kernel, by adding a new set of channels for |
2630 | | * any kernel vport that lacks one and deleting any channels that have no |
2631 | | * backing kernel vports. */ |
2632 | | static int |
2633 | | dpif_netlink_refresh_handlers_vport_dispatch(struct dpif_netlink *dpif, |
2634 | | uint32_t n_handlers) |
2635 | | OVS_REQ_WRLOCK(dpif->upcall_lock) |
2636 | 0 | { |
2637 | 0 | unsigned long int *keep_channels; |
2638 | 0 | struct dpif_netlink_vport vport; |
2639 | 0 | size_t keep_channels_nbits; |
2640 | 0 | struct nl_dump dump; |
2641 | 0 | uint64_t reply_stub[NL_DUMP_BUFSIZE / 8]; |
2642 | 0 | struct ofpbuf buf; |
2643 | 0 | int retval = 0; |
2644 | 0 | size_t i; |
2645 | |
|
2646 | 0 | ovs_assert(!WINDOWS || n_handlers <= 1); |
2647 | 0 | ovs_assert(!WINDOWS || dpif->n_handlers <= 1); |
2648 | |
|
2649 | 0 | if (dpif->n_handlers != n_handlers) { |
2650 | 0 | destroy_all_channels(dpif); |
2651 | 0 | dpif->handlers = xzalloc(n_handlers * sizeof *dpif->handlers); |
2652 | 0 | for (i = 0; i < n_handlers; i++) { |
2653 | 0 | int error; |
2654 | 0 | struct dpif_handler *handler = &dpif->handlers[i]; |
2655 | |
|
2656 | 0 | error = dpif_netlink_handler_init(handler); |
2657 | 0 | if (error) { |
2658 | 0 | size_t j; |
2659 | |
|
2660 | 0 | for (j = 0; j < i; j++) { |
2661 | 0 | struct dpif_handler *tmp = &dpif->handlers[j]; |
2662 | 0 | dpif_netlink_handler_uninit(tmp); |
2663 | 0 | } |
2664 | 0 | free(dpif->handlers); |
2665 | 0 | dpif->handlers = NULL; |
2666 | |
|
2667 | 0 | return error; |
2668 | 0 | } |
2669 | 0 | } |
2670 | 0 | dpif->n_handlers = n_handlers; |
2671 | 0 | } |
2672 | | |
2673 | 0 | for (i = 0; i < n_handlers; i++) { |
2674 | 0 | struct dpif_handler *handler = &dpif->handlers[i]; |
2675 | |
|
2676 | 0 | handler->event_offset = handler->n_events = 0; |
2677 | 0 | } |
2678 | |
|
2679 | 0 | keep_channels_nbits = dpif->uc_array_size; |
2680 | 0 | keep_channels = bitmap_allocate(keep_channels_nbits); |
2681 | |
|
2682 | 0 | ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub); |
2683 | 0 | dpif_netlink_port_dump_start__(dpif, &dump); |
2684 | 0 | while (!dpif_netlink_port_dump_next__(dpif, &dump, &vport, &buf)) { |
2685 | 0 | uint32_t port_no = odp_to_u32(vport.port_no); |
2686 | 0 | uint32_t upcall_pid; |
2687 | 0 | int error; |
2688 | |
|
2689 | 0 | if (port_no >= dpif->uc_array_size |
2690 | 0 | || !vport_get_pid(dpif, port_no, &upcall_pid)) { |
2691 | 0 | struct nl_sock *sock; |
2692 | 0 | error = create_nl_sock(dpif, &sock); |
2693 | |
|
2694 | 0 | if (error) { |
2695 | 0 | goto error; |
2696 | 0 | } |
2697 | | |
2698 | 0 | error = vport_add_channel(dpif, vport.port_no, sock); |
2699 | 0 | if (error) { |
2700 | 0 | VLOG_INFO("%s: could not add channels for port %s", |
2701 | 0 | dpif_name(&dpif->dpif), vport.name); |
2702 | 0 | nl_sock_destroy(sock); |
2703 | 0 | retval = error; |
2704 | 0 | goto error; |
2705 | 0 | } |
2706 | 0 | upcall_pid = nl_sock_pid(sock); |
2707 | 0 | } |
2708 | | |
2709 | | /* Configure the vport to deliver misses to 'sock'. */ |
2710 | 0 | if (vport.upcall_pids[0] == 0 |
2711 | 0 | || vport.n_upcall_pids != 1 |
2712 | 0 | || upcall_pid != vport.upcall_pids[0]) { |
2713 | 0 | struct dpif_netlink_vport vport_request; |
2714 | |
|
2715 | 0 | dpif_netlink_vport_init(&vport_request); |
2716 | 0 | vport_request.cmd = OVS_VPORT_CMD_SET; |
2717 | 0 | vport_request.dp_ifindex = dpif->dp_ifindex; |
2718 | 0 | vport_request.port_no = vport.port_no; |
2719 | 0 | vport_request.n_upcall_pids = 1; |
2720 | 0 | vport_request.upcall_pids = &upcall_pid; |
2721 | 0 | error = dpif_netlink_vport_transact(&vport_request, NULL, NULL); |
2722 | 0 | if (error) { |
2723 | 0 | VLOG_WARN_RL(&error_rl, |
2724 | 0 | "%s: failed to set upcall pid on port: %s", |
2725 | 0 | dpif_name(&dpif->dpif), ovs_strerror(error)); |
2726 | |
|
2727 | 0 | if (error != ENODEV && error != ENOENT) { |
2728 | 0 | retval = error; |
2729 | 0 | } else { |
2730 | | /* The vport isn't really there, even though the dump says |
2731 | | * it is. Probably we just hit a race after a port |
2732 | | * disappeared. */ |
2733 | 0 | } |
2734 | 0 | goto error; |
2735 | 0 | } |
2736 | 0 | } |
2737 | | |
2738 | 0 | if (port_no < keep_channels_nbits) { |
2739 | 0 | bitmap_set1(keep_channels, port_no); |
2740 | 0 | } |
2741 | 0 | continue; |
2742 | | |
2743 | 0 | error: |
2744 | 0 | vport_del_channels(dpif, vport.port_no); |
2745 | 0 | } |
2746 | 0 | nl_dump_done(&dump); |
2747 | 0 | ofpbuf_uninit(&buf); |
2748 | | |
2749 | | /* Discard any saved channels that we didn't reuse. */ |
2750 | 0 | for (i = 0; i < keep_channels_nbits; i++) { |
2751 | 0 | if (!bitmap_is_set(keep_channels, i)) { |
2752 | 0 | vport_del_channels(dpif, u32_to_odp(i)); |
2753 | 0 | } |
2754 | 0 | } |
2755 | 0 | free(keep_channels); |
2756 | |
|
2757 | 0 | return retval; |
2758 | 0 | } |
2759 | | |
2760 | | static int |
2761 | | dpif_netlink_recv_set_vport_dispatch(struct dpif_netlink *dpif, bool enable) |
2762 | | OVS_REQ_WRLOCK(dpif->upcall_lock) |
2763 | 0 | { |
2764 | 0 | if ((dpif->handlers != NULL) == enable) { |
2765 | 0 | return 0; |
2766 | 0 | } else if (!enable) { |
2767 | 0 | destroy_all_channels(dpif); |
2768 | 0 | return 0; |
2769 | 0 | } else { |
2770 | 0 | return dpif_netlink_refresh_handlers_vport_dispatch(dpif, 1); |
2771 | 0 | } |
2772 | 0 | } |
2773 | | |
2774 | | static int |
2775 | | dpif_netlink_recv_set_cpu_dispatch(struct dpif_netlink *dpif, bool enable) |
2776 | | OVS_REQ_WRLOCK(dpif->upcall_lock) |
2777 | 0 | { |
2778 | 0 | if ((dpif->handlers != NULL) == enable) { |
2779 | 0 | return 0; |
2780 | 0 | } else if (!enable) { |
2781 | 0 | destroy_all_handlers(dpif); |
2782 | 0 | return 0; |
2783 | 0 | } else { |
2784 | 0 | return dpif_netlink_refresh_handlers_cpu_dispatch(dpif); |
2785 | 0 | } |
2786 | 0 | } |
2787 | | |
2788 | | static int |
2789 | | dpif_netlink_recv_set(struct dpif *dpif_, bool enable) |
2790 | 0 | { |
2791 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
2792 | 0 | int error; |
2793 | |
|
2794 | 0 | fat_rwlock_wrlock(&dpif->upcall_lock); |
2795 | 0 | if (dpif_netlink_upcall_per_cpu(dpif)) { |
2796 | 0 | error = dpif_netlink_recv_set_cpu_dispatch(dpif, enable); |
2797 | 0 | } else { |
2798 | 0 | error = dpif_netlink_recv_set_vport_dispatch(dpif, enable); |
2799 | 0 | } |
2800 | 0 | fat_rwlock_unlock(&dpif->upcall_lock); |
2801 | |
|
2802 | 0 | return error; |
2803 | 0 | } |
2804 | | |
2805 | | static int |
2806 | | dpif_netlink_handlers_set(struct dpif *dpif_, uint32_t n_handlers) |
2807 | 0 | { |
2808 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
2809 | 0 | int error = 0; |
2810 | |
|
2811 | | #ifdef _WIN32 |
2812 | | /* Multiple upcall handlers will be supported once kernel datapath supports |
2813 | | * it. */ |
2814 | | if (n_handlers > 1) { |
2815 | | return error; |
2816 | | } |
2817 | | #endif |
2818 | |
|
2819 | 0 | fat_rwlock_wrlock(&dpif->upcall_lock); |
2820 | 0 | if (dpif->handlers) { |
2821 | 0 | if (dpif_netlink_upcall_per_cpu(dpif)) { |
2822 | 0 | error = dpif_netlink_refresh_handlers_cpu_dispatch(dpif); |
2823 | 0 | } else { |
2824 | 0 | error = dpif_netlink_refresh_handlers_vport_dispatch(dpif, |
2825 | 0 | n_handlers); |
2826 | 0 | } |
2827 | 0 | } |
2828 | 0 | fat_rwlock_unlock(&dpif->upcall_lock); |
2829 | |
|
2830 | 0 | return error; |
2831 | 0 | } |
2832 | | |
2833 | | static bool |
2834 | | dpif_netlink_number_handlers_required(struct dpif *dpif_, uint32_t *n_handlers) |
2835 | 0 | { |
2836 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
2837 | |
|
2838 | 0 | if (dpif_netlink_upcall_per_cpu(dpif)) { |
2839 | 0 | *n_handlers = dpif_netlink_calculate_n_handlers(); |
2840 | 0 | return true; |
2841 | 0 | } |
2842 | | |
2843 | 0 | return false; |
2844 | 0 | } |
2845 | | |
2846 | | static int |
2847 | | dpif_netlink_queue_to_priority(const struct dpif *dpif OVS_UNUSED, |
2848 | | uint32_t queue_id, uint32_t *priority) |
2849 | 0 | { |
2850 | 0 | if (queue_id < 0xf000) { |
2851 | 0 | *priority = TC_H_MAKE(1 << 16, queue_id + 1); |
2852 | 0 | return 0; |
2853 | 0 | } else { |
2854 | 0 | return EINVAL; |
2855 | 0 | } |
2856 | 0 | } |
2857 | | |
2858 | | static int |
2859 | | parse_odp_packet(struct ofpbuf *buf, struct dpif_upcall *upcall, |
2860 | | int *dp_ifindex) |
2861 | 0 | { |
2862 | 0 | static const struct nl_policy ovs_packet_policy[] = { |
2863 | | /* Always present. */ |
2864 | 0 | [OVS_PACKET_ATTR_PACKET] = { .type = NL_A_UNSPEC, |
2865 | 0 | .min_len = ETH_HEADER_LEN }, |
2866 | 0 | [OVS_PACKET_ATTR_KEY] = { .type = NL_A_NESTED }, |
2867 | | |
2868 | | /* OVS_PACKET_CMD_ACTION only. */ |
2869 | 0 | [OVS_PACKET_ATTR_USERDATA] = { .type = NL_A_UNSPEC, .optional = true }, |
2870 | 0 | [OVS_PACKET_ATTR_EGRESS_TUN_KEY] = { .type = NL_A_NESTED, .optional = true }, |
2871 | 0 | [OVS_PACKET_ATTR_ACTIONS] = { .type = NL_A_NESTED, .optional = true }, |
2872 | 0 | [OVS_PACKET_ATTR_MRU] = { .type = NL_A_U16, .optional = true }, |
2873 | 0 | [OVS_PACKET_ATTR_HASH] = { .type = NL_A_U64, .optional = true } |
2874 | 0 | }; |
2875 | |
|
2876 | 0 | struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size); |
2877 | 0 | struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg); |
2878 | 0 | struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl); |
2879 | 0 | struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header); |
2880 | |
|
2881 | 0 | struct nlattr *a[ARRAY_SIZE(ovs_packet_policy)]; |
2882 | 0 | if (!nlmsg || !genl || !ovs_header |
2883 | 0 | || nlmsg->nlmsg_type != ovs_packet_family |
2884 | 0 | || !nl_policy_parse(&b, 0, ovs_packet_policy, a, |
2885 | 0 | ARRAY_SIZE(ovs_packet_policy))) { |
2886 | 0 | return EINVAL; |
2887 | 0 | } |
2888 | | |
2889 | 0 | int type = (genl->cmd == OVS_PACKET_CMD_MISS ? DPIF_UC_MISS |
2890 | 0 | : genl->cmd == OVS_PACKET_CMD_ACTION ? DPIF_UC_ACTION |
2891 | 0 | : -1); |
2892 | 0 | if (type < 0) { |
2893 | 0 | return EINVAL; |
2894 | 0 | } |
2895 | | |
2896 | | /* (Re)set ALL fields of '*upcall' on successful return. */ |
2897 | 0 | upcall->type = type; |
2898 | 0 | upcall->key = CONST_CAST(struct nlattr *, |
2899 | 0 | nl_attr_get(a[OVS_PACKET_ATTR_KEY])); |
2900 | 0 | upcall->key_len = nl_attr_get_size(a[OVS_PACKET_ATTR_KEY]); |
2901 | 0 | odp_flow_key_hash(upcall->key, upcall->key_len, &upcall->ufid); |
2902 | 0 | upcall->userdata = a[OVS_PACKET_ATTR_USERDATA]; |
2903 | 0 | upcall->out_tun_key = a[OVS_PACKET_ATTR_EGRESS_TUN_KEY]; |
2904 | 0 | upcall->actions = a[OVS_PACKET_ATTR_ACTIONS]; |
2905 | 0 | upcall->mru = a[OVS_PACKET_ATTR_MRU]; |
2906 | 0 | upcall->hash = a[OVS_PACKET_ATTR_HASH]; |
2907 | | |
2908 | | /* Allow overwriting the netlink attribute header without reallocating. */ |
2909 | 0 | dp_packet_use_stub(&upcall->packet, |
2910 | 0 | CONST_CAST(struct nlattr *, |
2911 | 0 | nl_attr_get(a[OVS_PACKET_ATTR_PACKET])) - 1, |
2912 | 0 | nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]) + |
2913 | 0 | sizeof(struct nlattr)); |
2914 | 0 | dp_packet_set_data(&upcall->packet, |
2915 | 0 | (char *)dp_packet_data(&upcall->packet) + sizeof(struct nlattr)); |
2916 | 0 | dp_packet_set_size(&upcall->packet, nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET])); |
2917 | |
|
2918 | 0 | if (nl_attr_find__(upcall->key, upcall->key_len, OVS_KEY_ATTR_ETHERNET)) { |
2919 | | /* Ethernet frame */ |
2920 | 0 | upcall->packet.packet_type = htonl(PT_ETH); |
2921 | 0 | } else { |
2922 | | /* Non-Ethernet packet. Get the Ethertype from the NL attributes */ |
2923 | 0 | ovs_be16 ethertype = 0; |
2924 | 0 | const struct nlattr *et_nla = nl_attr_find__(upcall->key, |
2925 | 0 | upcall->key_len, |
2926 | 0 | OVS_KEY_ATTR_ETHERTYPE); |
2927 | 0 | if (et_nla) { |
2928 | 0 | ethertype = nl_attr_get_be16(et_nla); |
2929 | 0 | } |
2930 | 0 | upcall->packet.packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE, |
2931 | 0 | ntohs(ethertype)); |
2932 | 0 | dp_packet_set_l3(&upcall->packet, dp_packet_data(&upcall->packet)); |
2933 | 0 | } |
2934 | |
|
2935 | 0 | *dp_ifindex = ovs_header->dp_ifindex; |
2936 | |
|
2937 | 0 | return 0; |
2938 | 0 | } |
2939 | | |
2940 | | #ifdef _WIN32 |
2941 | | #define PACKET_RECV_BATCH_SIZE 50 |
2942 | | static int |
2943 | | dpif_netlink_recv_windows(struct dpif_netlink *dpif, uint32_t handler_id, |
2944 | | struct dpif_upcall *upcall, struct ofpbuf *buf) |
2945 | | OVS_REQ_RDLOCK(dpif->upcall_lock) |
2946 | | { |
2947 | | struct dpif_handler *handler; |
2948 | | int read_tries = 0; |
2949 | | struct dpif_windows_vport_sock *sock_pool; |
2950 | | uint32_t i; |
2951 | | |
2952 | | if (!dpif->handlers) { |
2953 | | return EAGAIN; |
2954 | | } |
2955 | | |
2956 | | /* Only one handler is supported currently. */ |
2957 | | if (handler_id >= 1) { |
2958 | | return EAGAIN; |
2959 | | } |
2960 | | |
2961 | | if (handler_id >= dpif->n_handlers) { |
2962 | | return EAGAIN; |
2963 | | } |
2964 | | |
2965 | | handler = &dpif->handlers[handler_id]; |
2966 | | sock_pool = handler->vport_sock_pool; |
2967 | | |
2968 | | for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) { |
2969 | | for (;;) { |
2970 | | int dp_ifindex; |
2971 | | int error; |
2972 | | |
2973 | | if (++read_tries > PACKET_RECV_BATCH_SIZE) { |
2974 | | return EAGAIN; |
2975 | | } |
2976 | | |
2977 | | error = nl_sock_recv(sock_pool[i].nl_sock, buf, NULL, false); |
2978 | | if (error == ENOBUFS) { |
2979 | | /* ENOBUFS typically means that we've received so many |
2980 | | * packets that the buffer overflowed. Try again |
2981 | | * immediately because there's almost certainly a packet |
2982 | | * waiting for us. */ |
2983 | | /* XXX: report_loss(dpif, ch, idx, handler_id); */ |
2984 | | continue; |
2985 | | } |
2986 | | |
2987 | | /* XXX: ch->last_poll = time_msec(); */ |
2988 | | if (error) { |
2989 | | if (error == EAGAIN) { |
2990 | | break; |
2991 | | } |
2992 | | return error; |
2993 | | } |
2994 | | |
2995 | | error = parse_odp_packet(buf, upcall, &dp_ifindex); |
2996 | | if (!error && dp_ifindex == dpif->dp_ifindex) { |
2997 | | upcall->pid = 0; |
2998 | | return 0; |
2999 | | } else if (error) { |
3000 | | return error; |
3001 | | } |
3002 | | } |
3003 | | } |
3004 | | |
3005 | | return EAGAIN; |
3006 | | } |
3007 | | #else |
3008 | | static int |
3009 | | dpif_netlink_recv_cpu_dispatch(struct dpif_netlink *dpif, uint32_t handler_id, |
3010 | | struct dpif_upcall *upcall, struct ofpbuf *buf) |
3011 | | OVS_REQ_RDLOCK(dpif->upcall_lock) |
3012 | 0 | { |
3013 | 0 | struct dpif_handler *handler; |
3014 | 0 | int read_tries = 0; |
3015 | |
|
3016 | 0 | if (!dpif->handlers || handler_id >= dpif->n_handlers) { |
3017 | 0 | return EAGAIN; |
3018 | 0 | } |
3019 | | |
3020 | 0 | handler = &dpif->handlers[handler_id]; |
3021 | |
|
3022 | 0 | for (;;) { |
3023 | 0 | int dp_ifindex; |
3024 | 0 | int error; |
3025 | |
|
3026 | 0 | if (++read_tries > 50) { |
3027 | 0 | return EAGAIN; |
3028 | 0 | } |
3029 | 0 | error = nl_sock_recv(handler->sock, buf, NULL, false); |
3030 | 0 | if (error == ENOBUFS) { |
3031 | | /* ENOBUFS typically means that we've received so many |
3032 | | * packets that the buffer overflowed. Try again |
3033 | | * immediately because there's almost certainly a packet |
3034 | | * waiting for us. */ |
3035 | 0 | report_loss(dpif, NULL, 0, handler_id); |
3036 | 0 | continue; |
3037 | 0 | } |
3038 | | |
3039 | 0 | if (error) { |
3040 | 0 | if (error == EAGAIN) { |
3041 | 0 | break; |
3042 | 0 | } |
3043 | 0 | return error; |
3044 | 0 | } |
3045 | | |
3046 | 0 | error = parse_odp_packet(buf, upcall, &dp_ifindex); |
3047 | 0 | if (!error && dp_ifindex == dpif->dp_ifindex) { |
3048 | 0 | upcall->pid = nl_sock_pid(handler->sock); |
3049 | 0 | return 0; |
3050 | 0 | } else if (error) { |
3051 | 0 | return error; |
3052 | 0 | } |
3053 | 0 | } |
3054 | | |
3055 | 0 | return EAGAIN; |
3056 | 0 | } |
3057 | | |
3058 | | static int |
3059 | | dpif_netlink_recv_vport_dispatch(struct dpif_netlink *dpif, |
3060 | | uint32_t handler_id, |
3061 | | struct dpif_upcall *upcall, |
3062 | | struct ofpbuf *buf) |
3063 | | OVS_REQ_RDLOCK(dpif->upcall_lock) |
3064 | 0 | { |
3065 | 0 | struct dpif_handler *handler; |
3066 | 0 | int read_tries = 0; |
3067 | |
|
3068 | 0 | if (!dpif->handlers || handler_id >= dpif->n_handlers) { |
3069 | 0 | return EAGAIN; |
3070 | 0 | } |
3071 | | |
3072 | 0 | handler = &dpif->handlers[handler_id]; |
3073 | 0 | if (handler->event_offset >= handler->n_events) { |
3074 | 0 | int retval; |
3075 | |
|
3076 | 0 | handler->event_offset = handler->n_events = 0; |
3077 | |
|
3078 | 0 | do { |
3079 | 0 | retval = epoll_wait(handler->epoll_fd, handler->epoll_events, |
3080 | 0 | dpif->uc_array_size, 0); |
3081 | 0 | } while (retval < 0 && errno == EINTR); |
3082 | |
|
3083 | 0 | if (retval < 0) { |
3084 | 0 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1); |
3085 | 0 | VLOG_WARN_RL(&rl, "epoll_wait failed (%s)", ovs_strerror(errno)); |
3086 | 0 | } else if (retval > 0) { |
3087 | 0 | handler->n_events = retval; |
3088 | 0 | } |
3089 | 0 | } |
3090 | |
|
3091 | 0 | while (handler->event_offset < handler->n_events) { |
3092 | 0 | int idx = handler->epoll_events[handler->event_offset].data.u32; |
3093 | 0 | struct dpif_channel *ch = &dpif->channels[idx]; |
3094 | |
|
3095 | 0 | handler->event_offset++; |
3096 | |
|
3097 | 0 | for (;;) { |
3098 | 0 | int dp_ifindex; |
3099 | 0 | int error; |
3100 | |
|
3101 | 0 | if (++read_tries > 50) { |
3102 | 0 | return EAGAIN; |
3103 | 0 | } |
3104 | | |
3105 | 0 | error = nl_sock_recv(ch->sock, buf, NULL, false); |
3106 | 0 | if (error == ENOBUFS) { |
3107 | | /* ENOBUFS typically means that we've received so many |
3108 | | * packets that the buffer overflowed. Try again |
3109 | | * immediately because there's almost certainly a packet |
3110 | | * waiting for us. */ |
3111 | 0 | report_loss(dpif, ch, idx, handler_id); |
3112 | 0 | continue; |
3113 | 0 | } |
3114 | | |
3115 | 0 | ch->last_poll = time_msec(); |
3116 | 0 | if (error) { |
3117 | 0 | if (error == EAGAIN) { |
3118 | 0 | break; |
3119 | 0 | } |
3120 | 0 | return error; |
3121 | 0 | } |
3122 | | |
3123 | 0 | error = parse_odp_packet(buf, upcall, &dp_ifindex); |
3124 | 0 | if (!error && dp_ifindex == dpif->dp_ifindex) { |
3125 | 0 | upcall->pid = nl_sock_pid(ch->sock); |
3126 | 0 | return 0; |
3127 | 0 | } else if (error) { |
3128 | 0 | return error; |
3129 | 0 | } |
3130 | 0 | } |
3131 | 0 | } |
3132 | | |
3133 | 0 | return EAGAIN; |
3134 | 0 | } |
3135 | | #endif |
3136 | | |
3137 | | static int |
3138 | | dpif_netlink_recv(struct dpif *dpif_, uint32_t handler_id, |
3139 | | struct dpif_upcall *upcall, struct ofpbuf *buf) |
3140 | 0 | { |
3141 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
3142 | 0 | int error; |
3143 | |
|
3144 | 0 | fat_rwlock_rdlock(&dpif->upcall_lock); |
3145 | | #ifdef _WIN32 |
3146 | | error = dpif_netlink_recv_windows(dpif, handler_id, upcall, buf); |
3147 | | #else |
3148 | 0 | if (dpif_netlink_upcall_per_cpu(dpif)) { |
3149 | 0 | error = dpif_netlink_recv_cpu_dispatch(dpif, handler_id, upcall, buf); |
3150 | 0 | } else { |
3151 | 0 | error = dpif_netlink_recv_vport_dispatch(dpif, |
3152 | 0 | handler_id, upcall, buf); |
3153 | 0 | } |
3154 | 0 | #endif |
3155 | 0 | fat_rwlock_unlock(&dpif->upcall_lock); |
3156 | |
|
3157 | 0 | return error; |
3158 | 0 | } |
3159 | | |
3160 | | #ifdef _WIN32 |
3161 | | static void |
3162 | | dpif_netlink_recv_wait_windows(struct dpif_netlink *dpif, uint32_t handler_id) |
3163 | | OVS_REQ_RDLOCK(dpif->upcall_lock) |
3164 | | { |
3165 | | uint32_t i; |
3166 | | struct dpif_windows_vport_sock *sock_pool = |
3167 | | dpif->handlers[handler_id].vport_sock_pool; |
3168 | | |
3169 | | /* Only one handler is supported currently. */ |
3170 | | if (handler_id >= 1) { |
3171 | | return; |
3172 | | } |
3173 | | |
3174 | | for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) { |
3175 | | nl_sock_wait(sock_pool[i].nl_sock, POLLIN); |
3176 | | } |
3177 | | } |
3178 | | #else |
3179 | | |
3180 | | static void |
3181 | | dpif_netlink_recv_wait_vport_dispatch(struct dpif_netlink *dpif, |
3182 | | uint32_t handler_id) |
3183 | | OVS_REQ_RDLOCK(dpif->upcall_lock) |
3184 | 0 | { |
3185 | 0 | if (dpif->handlers && handler_id < dpif->n_handlers) { |
3186 | 0 | struct dpif_handler *handler = &dpif->handlers[handler_id]; |
3187 | |
|
3188 | 0 | poll_fd_wait(handler->epoll_fd, POLLIN); |
3189 | 0 | } |
3190 | 0 | } |
3191 | | |
3192 | | static void |
3193 | | dpif_netlink_recv_wait_cpu_dispatch(struct dpif_netlink *dpif, |
3194 | | uint32_t handler_id) |
3195 | | OVS_REQ_RDLOCK(dpif->upcall_lock) |
3196 | 0 | { |
3197 | 0 | if (dpif->handlers && handler_id < dpif->n_handlers) { |
3198 | 0 | struct dpif_handler *handler = &dpif->handlers[handler_id]; |
3199 | |
|
3200 | 0 | poll_fd_wait(nl_sock_fd(handler->sock), POLLIN); |
3201 | 0 | } |
3202 | 0 | } |
3203 | | #endif |
3204 | | |
3205 | | static void |
3206 | | dpif_netlink_recv_wait(struct dpif *dpif_, uint32_t handler_id) |
3207 | 0 | { |
3208 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
3209 | |
|
3210 | 0 | fat_rwlock_rdlock(&dpif->upcall_lock); |
3211 | | #ifdef _WIN32 |
3212 | | dpif_netlink_recv_wait_windows(dpif, handler_id); |
3213 | | #else |
3214 | 0 | if (dpif_netlink_upcall_per_cpu(dpif)) { |
3215 | 0 | dpif_netlink_recv_wait_cpu_dispatch(dpif, handler_id); |
3216 | 0 | } else { |
3217 | 0 | dpif_netlink_recv_wait_vport_dispatch(dpif, handler_id); |
3218 | 0 | } |
3219 | 0 | #endif |
3220 | 0 | fat_rwlock_unlock(&dpif->upcall_lock); |
3221 | 0 | } |
3222 | | |
3223 | | static void |
3224 | | dpif_netlink_recv_purge_vport_dispatch(struct dpif_netlink *dpif) |
3225 | | OVS_REQ_WRLOCK(dpif->upcall_lock) |
3226 | 0 | { |
3227 | 0 | if (dpif->handlers) { |
3228 | 0 | size_t i; |
3229 | |
|
3230 | 0 | if (!dpif->channels[0].sock) { |
3231 | 0 | return; |
3232 | 0 | } |
3233 | 0 | for (i = 0; i < dpif->uc_array_size; i++ ) { |
3234 | |
|
3235 | 0 | nl_sock_drain(dpif->channels[i].sock); |
3236 | 0 | } |
3237 | 0 | } |
3238 | 0 | } |
3239 | | |
3240 | | static void |
3241 | | dpif_netlink_recv_purge_cpu_dispatch(struct dpif_netlink *dpif) |
3242 | | OVS_REQ_WRLOCK(dpif->upcall_lock) |
3243 | 0 | { |
3244 | 0 | int handler_id; |
3245 | |
|
3246 | 0 | if (dpif->handlers) { |
3247 | 0 | for (handler_id = 0; handler_id < dpif->n_handlers; handler_id++) { |
3248 | 0 | struct dpif_handler *handler = &dpif->handlers[handler_id]; |
3249 | 0 | nl_sock_drain(handler->sock); |
3250 | 0 | } |
3251 | 0 | } |
3252 | 0 | } |
3253 | | |
3254 | | static void |
3255 | | dpif_netlink_recv_purge(struct dpif *dpif_) |
3256 | 0 | { |
3257 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
3258 | |
|
3259 | 0 | fat_rwlock_wrlock(&dpif->upcall_lock); |
3260 | 0 | if (dpif_netlink_upcall_per_cpu(dpif)) { |
3261 | 0 | dpif_netlink_recv_purge_cpu_dispatch(dpif); |
3262 | 0 | } else { |
3263 | 0 | dpif_netlink_recv_purge_vport_dispatch(dpif); |
3264 | 0 | } |
3265 | 0 | fat_rwlock_unlock(&dpif->upcall_lock); |
3266 | 0 | } |
3267 | | |
3268 | | static char * |
3269 | | dpif_netlink_get_datapath_version(void) |
3270 | 0 | { |
3271 | 0 | char *version_str = NULL; |
3272 | |
|
3273 | 0 | #ifdef __linux__ |
3274 | |
|
3275 | 0 | #define MAX_VERSION_STR_SIZE 80 |
3276 | 0 | #define LINUX_DATAPATH_VERSION_FILE "/sys/module/openvswitch/version" |
3277 | 0 | FILE *f; |
3278 | |
|
3279 | 0 | f = fopen(LINUX_DATAPATH_VERSION_FILE, "r"); |
3280 | 0 | if (f) { |
3281 | 0 | char *newline; |
3282 | 0 | char version[MAX_VERSION_STR_SIZE]; |
3283 | |
|
3284 | 0 | if (fgets(version, MAX_VERSION_STR_SIZE, f)) { |
3285 | 0 | newline = strchr(version, '\n'); |
3286 | 0 | if (newline) { |
3287 | 0 | *newline = '\0'; |
3288 | 0 | } |
3289 | 0 | version_str = xstrdup(version); |
3290 | 0 | } |
3291 | 0 | fclose(f); |
3292 | 0 | } |
3293 | 0 | #endif |
3294 | |
|
3295 | 0 | return version_str; |
3296 | 0 | } |
3297 | | |
3298 | | struct dpif_netlink_ct_dump_state { |
3299 | | struct ct_dpif_dump_state up; |
3300 | | struct nl_ct_dump_state *nl_ct_dump; |
3301 | | }; |
3302 | | |
3303 | | static int |
3304 | | dpif_netlink_ct_dump_start(struct dpif *dpif OVS_UNUSED, |
3305 | | struct ct_dpif_dump_state **dump_, |
3306 | | const uint16_t *zone, int *ptot_bkts) |
3307 | 0 | { |
3308 | 0 | struct dpif_netlink_ct_dump_state *dump; |
3309 | 0 | int err; |
3310 | |
|
3311 | 0 | dump = xzalloc(sizeof *dump); |
3312 | 0 | err = nl_ct_dump_start(&dump->nl_ct_dump, zone, ptot_bkts); |
3313 | 0 | if (err) { |
3314 | 0 | free(dump); |
3315 | 0 | return err; |
3316 | 0 | } |
3317 | | |
3318 | 0 | *dump_ = &dump->up; |
3319 | |
|
3320 | 0 | return 0; |
3321 | 0 | } |
3322 | | |
3323 | | static int |
3324 | | dpif_netlink_ct_dump_next(struct dpif *dpif OVS_UNUSED, |
3325 | | struct ct_dpif_dump_state *dump_, |
3326 | | struct ct_dpif_entry *entry) |
3327 | 0 | { |
3328 | 0 | struct dpif_netlink_ct_dump_state *dump; |
3329 | |
|
3330 | 0 | INIT_CONTAINER(dump, dump_, up); |
3331 | |
|
3332 | 0 | return nl_ct_dump_next(dump->nl_ct_dump, entry); |
3333 | 0 | } |
3334 | | |
3335 | | static int |
3336 | | dpif_netlink_ct_dump_done(struct dpif *dpif OVS_UNUSED, |
3337 | | struct ct_dpif_dump_state *dump_) |
3338 | 0 | { |
3339 | 0 | struct dpif_netlink_ct_dump_state *dump; |
3340 | |
|
3341 | 0 | INIT_CONTAINER(dump, dump_, up); |
3342 | |
|
3343 | 0 | int err = nl_ct_dump_done(dump->nl_ct_dump); |
3344 | 0 | free(dump); |
3345 | 0 | return err; |
3346 | 0 | } |
3347 | | |
3348 | | static int |
3349 | | dpif_netlink_ct_flush(struct dpif *dpif OVS_UNUSED, const uint16_t *zone, |
3350 | | const struct ct_dpif_tuple *tuple) |
3351 | 0 | { |
3352 | 0 | if (tuple) { |
3353 | 0 | return nl_ct_flush_tuple(tuple, zone ? *zone : 0); |
3354 | 0 | } else if (zone) { |
3355 | 0 | return nl_ct_flush_zone(*zone); |
3356 | 0 | } else { |
3357 | 0 | return nl_ct_flush(); |
3358 | 0 | } |
3359 | 0 | } |
3360 | | |
3361 | | static int |
3362 | | dpif_netlink_ct_set_limits(struct dpif *dpif OVS_UNUSED, |
3363 | | const struct ovs_list *zone_limits) |
3364 | 0 | { |
3365 | 0 | if (ovs_ct_limit_family < 0) { |
3366 | 0 | return EOPNOTSUPP; |
3367 | 0 | } |
3368 | | |
3369 | 0 | struct ofpbuf *request = ofpbuf_new(NL_DUMP_BUFSIZE); |
3370 | 0 | nl_msg_put_genlmsghdr(request, 0, ovs_ct_limit_family, |
3371 | 0 | NLM_F_REQUEST | NLM_F_ECHO, OVS_CT_LIMIT_CMD_SET, |
3372 | 0 | OVS_CT_LIMIT_VERSION); |
3373 | |
|
3374 | 0 | struct ovs_header *ovs_header; |
3375 | 0 | ovs_header = ofpbuf_put_uninit(request, sizeof *ovs_header); |
3376 | 0 | ovs_header->dp_ifindex = 0; |
3377 | |
|
3378 | 0 | size_t opt_offset; |
3379 | 0 | opt_offset = nl_msg_start_nested(request, OVS_CT_LIMIT_ATTR_ZONE_LIMIT); |
3380 | |
|
3381 | 0 | if (!ovs_list_is_empty(zone_limits)) { |
3382 | 0 | struct ct_dpif_zone_limit *zone_limit; |
3383 | |
|
3384 | 0 | LIST_FOR_EACH (zone_limit, node, zone_limits) { |
3385 | 0 | struct ovs_zone_limit req_zone_limit = { |
3386 | 0 | .zone_id = zone_limit->zone, |
3387 | 0 | .limit = zone_limit->limit, |
3388 | 0 | }; |
3389 | 0 | nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit); |
3390 | 0 | } |
3391 | 0 | } |
3392 | 0 | nl_msg_end_nested(request, opt_offset); |
3393 | |
|
3394 | 0 | int err = nl_transact(NETLINK_GENERIC, request, NULL); |
3395 | 0 | ofpbuf_delete(request); |
3396 | 0 | return err; |
3397 | 0 | } |
3398 | | |
3399 | | static int |
3400 | | dpif_netlink_zone_limits_from_ofpbuf(const struct ofpbuf *buf, |
3401 | | struct ovs_list *zone_limits) |
3402 | 0 | { |
3403 | 0 | static const struct nl_policy ovs_ct_limit_policy[] = { |
3404 | 0 | [OVS_CT_LIMIT_ATTR_ZONE_LIMIT] = { .type = NL_A_NESTED, |
3405 | 0 | .optional = true }, |
3406 | 0 | }; |
3407 | |
|
3408 | 0 | struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size); |
3409 | 0 | struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg); |
3410 | 0 | struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl); |
3411 | 0 | struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header); |
3412 | |
|
3413 | 0 | struct nlattr *attr[ARRAY_SIZE(ovs_ct_limit_policy)]; |
3414 | |
|
3415 | 0 | if (!nlmsg || !genl || !ovs_header |
3416 | 0 | || nlmsg->nlmsg_type != ovs_ct_limit_family |
3417 | 0 | || !nl_policy_parse(&b, 0, ovs_ct_limit_policy, attr, |
3418 | 0 | ARRAY_SIZE(ovs_ct_limit_policy))) { |
3419 | 0 | return EINVAL; |
3420 | 0 | } |
3421 | | |
3422 | | |
3423 | 0 | if (!attr[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) { |
3424 | 0 | return EINVAL; |
3425 | 0 | } |
3426 | | |
3427 | 0 | int rem = NLA_ALIGN( |
3428 | 0 | nl_attr_get_size(attr[OVS_CT_LIMIT_ATTR_ZONE_LIMIT])); |
3429 | 0 | const struct ovs_zone_limit *zone_limit = |
3430 | 0 | nl_attr_get(attr[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]); |
3431 | |
|
3432 | 0 | while (rem >= sizeof *zone_limit) { |
3433 | 0 | if (zone_limit->zone_id >= OVS_ZONE_LIMIT_DEFAULT_ZONE && |
3434 | 0 | zone_limit->zone_id <= UINT16_MAX) { |
3435 | 0 | ct_dpif_push_zone_limit(zone_limits, zone_limit->zone_id, |
3436 | 0 | zone_limit->limit, zone_limit->count); |
3437 | 0 | } |
3438 | 0 | rem -= NLA_ALIGN(sizeof *zone_limit); |
3439 | 0 | zone_limit = ALIGNED_CAST(struct ovs_zone_limit *, |
3440 | 0 | (unsigned char *) zone_limit + NLA_ALIGN(sizeof *zone_limit)); |
3441 | 0 | } |
3442 | 0 | return 0; |
3443 | 0 | } |
3444 | | |
3445 | | static int |
3446 | | dpif_netlink_ct_get_limits(struct dpif *dpif OVS_UNUSED, |
3447 | | const struct ovs_list *zone_limits_request, |
3448 | | struct ovs_list *zone_limits_reply) |
3449 | 0 | { |
3450 | 0 | if (ovs_ct_limit_family < 0) { |
3451 | 0 | return EOPNOTSUPP; |
3452 | 0 | } |
3453 | | |
3454 | 0 | struct ofpbuf *request = ofpbuf_new(NL_DUMP_BUFSIZE); |
3455 | 0 | nl_msg_put_genlmsghdr(request, 0, ovs_ct_limit_family, |
3456 | 0 | NLM_F_REQUEST | NLM_F_ECHO, OVS_CT_LIMIT_CMD_GET, |
3457 | 0 | OVS_CT_LIMIT_VERSION); |
3458 | |
|
3459 | 0 | struct ovs_header *ovs_header; |
3460 | 0 | ovs_header = ofpbuf_put_uninit(request, sizeof *ovs_header); |
3461 | 0 | ovs_header->dp_ifindex = 0; |
3462 | |
|
3463 | 0 | if (!ovs_list_is_empty(zone_limits_request)) { |
3464 | 0 | size_t opt_offset = nl_msg_start_nested(request, |
3465 | 0 | OVS_CT_LIMIT_ATTR_ZONE_LIMIT); |
3466 | |
|
3467 | 0 | struct ct_dpif_zone_limit *zone_limit; |
3468 | 0 | LIST_FOR_EACH (zone_limit, node, zone_limits_request) { |
3469 | 0 | struct ovs_zone_limit req_zone_limit = { |
3470 | 0 | .zone_id = zone_limit->zone, |
3471 | 0 | }; |
3472 | 0 | nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit); |
3473 | 0 | } |
3474 | |
|
3475 | 0 | nl_msg_end_nested(request, opt_offset); |
3476 | 0 | } |
3477 | |
|
3478 | 0 | struct ofpbuf *reply; |
3479 | 0 | int err = nl_transact(NETLINK_GENERIC, request, &reply); |
3480 | 0 | if (err) { |
3481 | 0 | goto out; |
3482 | 0 | } |
3483 | | |
3484 | 0 | err = dpif_netlink_zone_limits_from_ofpbuf(reply, zone_limits_reply); |
3485 | |
|
3486 | 0 | out: |
3487 | 0 | ofpbuf_delete(request); |
3488 | 0 | ofpbuf_delete(reply); |
3489 | 0 | return err; |
3490 | 0 | } |
3491 | | |
3492 | | static int |
3493 | | dpif_netlink_ct_del_limits(struct dpif *dpif OVS_UNUSED, |
3494 | | const struct ovs_list *zone_limits) |
3495 | 0 | { |
3496 | 0 | if (ovs_ct_limit_family < 0) { |
3497 | 0 | return EOPNOTSUPP; |
3498 | 0 | } |
3499 | | |
3500 | 0 | struct ofpbuf *request = ofpbuf_new(NL_DUMP_BUFSIZE); |
3501 | 0 | nl_msg_put_genlmsghdr(request, 0, ovs_ct_limit_family, |
3502 | 0 | NLM_F_REQUEST | NLM_F_ECHO, OVS_CT_LIMIT_CMD_DEL, |
3503 | 0 | OVS_CT_LIMIT_VERSION); |
3504 | |
|
3505 | 0 | struct ovs_header *ovs_header; |
3506 | 0 | ovs_header = ofpbuf_put_uninit(request, sizeof *ovs_header); |
3507 | 0 | ovs_header->dp_ifindex = 0; |
3508 | |
|
3509 | 0 | if (!ovs_list_is_empty(zone_limits)) { |
3510 | 0 | size_t opt_offset = |
3511 | 0 | nl_msg_start_nested(request, OVS_CT_LIMIT_ATTR_ZONE_LIMIT); |
3512 | |
|
3513 | 0 | struct ct_dpif_zone_limit *zone_limit; |
3514 | 0 | LIST_FOR_EACH (zone_limit, node, zone_limits) { |
3515 | 0 | struct ovs_zone_limit req_zone_limit = { |
3516 | 0 | .zone_id = zone_limit->zone, |
3517 | 0 | }; |
3518 | 0 | nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit); |
3519 | 0 | } |
3520 | 0 | nl_msg_end_nested(request, opt_offset); |
3521 | 0 | } |
3522 | |
|
3523 | 0 | int err = nl_transact(NETLINK_GENERIC, request, NULL); |
3524 | |
|
3525 | 0 | ofpbuf_delete(request); |
3526 | 0 | return err; |
3527 | 0 | } |
3528 | | |
3529 | 0 | #define NL_TP_NAME_PREFIX "ovs_tp_" |
3530 | | |
3531 | | struct dpif_netlink_timeout_policy_protocol { |
3532 | | uint16_t l3num; |
3533 | | uint8_t l4num; |
3534 | | }; |
3535 | | |
3536 | | enum OVS_PACKED_ENUM dpif_netlink_support_timeout_policy_protocol { |
3537 | | DPIF_NL_TP_AF_INET_TCP, |
3538 | | DPIF_NL_TP_AF_INET_UDP, |
3539 | | DPIF_NL_TP_AF_INET_ICMP, |
3540 | | DPIF_NL_TP_AF_INET6_TCP, |
3541 | | DPIF_NL_TP_AF_INET6_UDP, |
3542 | | DPIF_NL_TP_AF_INET6_ICMPV6, |
3543 | | DPIF_NL_TP_MAX |
3544 | | }; |
3545 | | |
3546 | 0 | #define DPIF_NL_ALL_TP ((1UL << DPIF_NL_TP_MAX) - 1) |
3547 | | |
3548 | | |
3549 | | static struct dpif_netlink_timeout_policy_protocol tp_protos[] = { |
3550 | | [DPIF_NL_TP_AF_INET_TCP] = { .l3num = AF_INET, .l4num = IPPROTO_TCP }, |
3551 | | [DPIF_NL_TP_AF_INET_UDP] = { .l3num = AF_INET, .l4num = IPPROTO_UDP }, |
3552 | | [DPIF_NL_TP_AF_INET_ICMP] = { .l3num = AF_INET, .l4num = IPPROTO_ICMP }, |
3553 | | [DPIF_NL_TP_AF_INET6_TCP] = { .l3num = AF_INET6, .l4num = IPPROTO_TCP }, |
3554 | | [DPIF_NL_TP_AF_INET6_UDP] = { .l3num = AF_INET6, .l4num = IPPROTO_UDP }, |
3555 | | [DPIF_NL_TP_AF_INET6_ICMPV6] = { .l3num = AF_INET6, |
3556 | | .l4num = IPPROTO_ICMPV6 }, |
3557 | | }; |
3558 | | |
3559 | | static void |
3560 | | dpif_netlink_format_tp_name(uint32_t id, uint16_t l3num, uint8_t l4num, |
3561 | | char **tp_name) |
3562 | 0 | { |
3563 | 0 | struct ds ds = DS_EMPTY_INITIALIZER; |
3564 | 0 | ds_put_format(&ds, "%s%"PRIu32"_", NL_TP_NAME_PREFIX, id); |
3565 | 0 | ct_dpif_format_ipproto(&ds, l4num); |
3566 | |
|
3567 | 0 | if (l3num == AF_INET) { |
3568 | 0 | ds_put_cstr(&ds, "4"); |
3569 | 0 | } else if (l3num == AF_INET6 && l4num != IPPROTO_ICMPV6) { |
3570 | 0 | ds_put_cstr(&ds, "6"); |
3571 | 0 | } |
3572 | |
|
3573 | 0 | ovs_assert(ds.length < CTNL_TIMEOUT_NAME_MAX); |
3574 | |
|
3575 | 0 | *tp_name = ds_steal_cstr(&ds); |
3576 | 0 | } |
3577 | | |
3578 | | static int |
3579 | | dpif_netlink_ct_get_timeout_policy_name(struct dpif *dpif OVS_UNUSED, |
3580 | | uint32_t tp_id, uint16_t dl_type, |
3581 | | uint8_t nw_proto, char **tp_name, |
3582 | | bool *is_generic) |
3583 | 0 | { |
3584 | 0 | dpif_netlink_format_tp_name(tp_id, |
3585 | 0 | dl_type == ETH_TYPE_IP ? AF_INET : AF_INET6, |
3586 | 0 | nw_proto, tp_name); |
3587 | 0 | *is_generic = false; |
3588 | 0 | return 0; |
3589 | 0 | } |
3590 | | |
3591 | | static int |
3592 | | dpif_netlink_ct_get_features(struct dpif *dpif OVS_UNUSED, |
3593 | | enum ct_features *features) |
3594 | 0 | { |
3595 | 0 | if (features != NULL) { |
3596 | 0 | #ifndef _WIN32 |
3597 | 0 | *features = CONNTRACK_F_ZERO_SNAT; |
3598 | | #else |
3599 | | *features = 0; |
3600 | | #endif |
3601 | 0 | } |
3602 | 0 | return 0; |
3603 | 0 | } |
3604 | | |
3605 | | #define CT_DPIF_NL_TP_TCP_MAPPINGS \ |
3606 | 0 | CT_DPIF_NL_TP_MAPPING(TCP, TCP, SYN_SENT, SYN_SENT) \ |
3607 | 0 | CT_DPIF_NL_TP_MAPPING(TCP, TCP, SYN_RECV, SYN_RECV) \ |
3608 | 0 | CT_DPIF_NL_TP_MAPPING(TCP, TCP, ESTABLISHED, ESTABLISHED) \ |
3609 | 0 | CT_DPIF_NL_TP_MAPPING(TCP, TCP, FIN_WAIT, FIN_WAIT) \ |
3610 | 0 | CT_DPIF_NL_TP_MAPPING(TCP, TCP, CLOSE_WAIT, CLOSE_WAIT) \ |
3611 | 0 | CT_DPIF_NL_TP_MAPPING(TCP, TCP, LAST_ACK, LAST_ACK) \ |
3612 | 0 | CT_DPIF_NL_TP_MAPPING(TCP, TCP, TIME_WAIT, TIME_WAIT) \ |
3613 | 0 | CT_DPIF_NL_TP_MAPPING(TCP, TCP, CLOSE, CLOSE) \ |
3614 | 0 | CT_DPIF_NL_TP_MAPPING(TCP, TCP, SYN_SENT2, SYN_SENT2) \ |
3615 | 0 | CT_DPIF_NL_TP_MAPPING(TCP, TCP, RETRANSMIT, RETRANS) \ |
3616 | 0 | CT_DPIF_NL_TP_MAPPING(TCP, TCP, UNACK, UNACK) |
3617 | | |
3618 | | #define CT_DPIF_NL_TP_UDP_MAPPINGS \ |
3619 | 0 | CT_DPIF_NL_TP_MAPPING(UDP, UDP, SINGLE, UNREPLIED) \ |
3620 | 0 | CT_DPIF_NL_TP_MAPPING(UDP, UDP, MULTIPLE, REPLIED) |
3621 | | |
3622 | | #define CT_DPIF_NL_TP_ICMP_MAPPINGS \ |
3623 | 0 | CT_DPIF_NL_TP_MAPPING(ICMP, ICMP, FIRST, TIMEOUT) |
3624 | | |
3625 | | #define CT_DPIF_NL_TP_ICMPV6_MAPPINGS \ |
3626 | 0 | CT_DPIF_NL_TP_MAPPING(ICMP, ICMPV6, FIRST, TIMEOUT) |
3627 | | |
3628 | | |
3629 | 0 | #define CT_DPIF_NL_TP_MAPPING(PROTO1, PROTO2, ATTR1, ATTR2) \ |
3630 | 0 | if (tp->present & (1 << CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1)) { \ |
3631 | 0 | nl_tp->present |= 1 << CTA_TIMEOUT_##PROTO2##_##ATTR2; \ |
3632 | 0 | nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2] = \ |
3633 | 0 | tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1]; \ |
3634 | 0 | } |
3635 | | |
3636 | | static void |
3637 | | dpif_netlink_get_nl_tp_tcp_attrs(const struct ct_dpif_timeout_policy *tp, |
3638 | | struct nl_ct_timeout_policy *nl_tp) |
3639 | 0 | { |
3640 | 0 | CT_DPIF_NL_TP_TCP_MAPPINGS |
3641 | 0 | } |
3642 | | |
3643 | | static void |
3644 | | dpif_netlink_get_nl_tp_udp_attrs(const struct ct_dpif_timeout_policy *tp, |
3645 | | struct nl_ct_timeout_policy *nl_tp) |
3646 | 0 | { |
3647 | 0 | CT_DPIF_NL_TP_UDP_MAPPINGS |
3648 | 0 | } |
3649 | | |
3650 | | static void |
3651 | | dpif_netlink_get_nl_tp_icmp_attrs(const struct ct_dpif_timeout_policy *tp, |
3652 | | struct nl_ct_timeout_policy *nl_tp) |
3653 | 0 | { |
3654 | 0 | CT_DPIF_NL_TP_ICMP_MAPPINGS |
3655 | 0 | } |
3656 | | |
3657 | | static void |
3658 | | dpif_netlink_get_nl_tp_icmpv6_attrs(const struct ct_dpif_timeout_policy *tp, |
3659 | | struct nl_ct_timeout_policy *nl_tp) |
3660 | 0 | { |
3661 | 0 | CT_DPIF_NL_TP_ICMPV6_MAPPINGS |
3662 | 0 | } |
3663 | | |
3664 | | #undef CT_DPIF_NL_TP_MAPPING |
3665 | | |
3666 | | static void |
3667 | | dpif_netlink_get_nl_tp_attrs(const struct ct_dpif_timeout_policy *tp, |
3668 | | uint8_t l4num, struct nl_ct_timeout_policy *nl_tp) |
3669 | 0 | { |
3670 | 0 | nl_tp->present = 0; |
3671 | |
|
3672 | 0 | if (l4num == IPPROTO_TCP) { |
3673 | 0 | dpif_netlink_get_nl_tp_tcp_attrs(tp, nl_tp); |
3674 | 0 | } else if (l4num == IPPROTO_UDP) { |
3675 | 0 | dpif_netlink_get_nl_tp_udp_attrs(tp, nl_tp); |
3676 | 0 | } else if (l4num == IPPROTO_ICMP) { |
3677 | 0 | dpif_netlink_get_nl_tp_icmp_attrs(tp, nl_tp); |
3678 | 0 | } else if (l4num == IPPROTO_ICMPV6) { |
3679 | 0 | dpif_netlink_get_nl_tp_icmpv6_attrs(tp, nl_tp); |
3680 | 0 | } |
3681 | 0 | } |
3682 | | |
3683 | 0 | #define CT_DPIF_NL_TP_MAPPING(PROTO1, PROTO2, ATTR1, ATTR2) \ |
3684 | 0 | if (nl_tp->present & (1 << CTA_TIMEOUT_##PROTO2##_##ATTR2)) { \ |
3685 | 0 | if (tp->present & (1 << CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1)) { \ |
3686 | 0 | if (tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1] != \ |
3687 | 0 | nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2]) { \ |
3688 | 0 | VLOG_WARN_RL(&error_rl, "Inconsistent timeout policy %s " \ |
3689 | 0 | "attribute %s=%"PRIu32" while %s=%"PRIu32, \ |
3690 | 0 | nl_tp->name, "CTA_TIMEOUT_"#PROTO2"_"#ATTR2, \ |
3691 | 0 | nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2], \ |
3692 | 0 | "CT_DPIF_TP_ATTR_"#PROTO1"_"#ATTR1, \ |
3693 | 0 | tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1]); \ |
3694 | 0 | } \ |
3695 | 0 | } else { \ |
3696 | 0 | tp->present |= 1 << CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1; \ |
3697 | 0 | tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1] = \ |
3698 | 0 | nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2]; \ |
3699 | 0 | } \ |
3700 | 0 | } |
3701 | | |
3702 | | static void |
3703 | | dpif_netlink_set_ct_dpif_tp_tcp_attrs(const struct nl_ct_timeout_policy *nl_tp, |
3704 | | struct ct_dpif_timeout_policy *tp) |
3705 | 0 | { |
3706 | 0 | CT_DPIF_NL_TP_TCP_MAPPINGS |
3707 | 0 | } |
3708 | | |
3709 | | static void |
3710 | | dpif_netlink_set_ct_dpif_tp_udp_attrs(const struct nl_ct_timeout_policy *nl_tp, |
3711 | | struct ct_dpif_timeout_policy *tp) |
3712 | 0 | { |
3713 | 0 | CT_DPIF_NL_TP_UDP_MAPPINGS |
3714 | 0 | } |
3715 | | |
3716 | | static void |
3717 | | dpif_netlink_set_ct_dpif_tp_icmp_attrs( |
3718 | | const struct nl_ct_timeout_policy *nl_tp, |
3719 | | struct ct_dpif_timeout_policy *tp) |
3720 | 0 | { |
3721 | 0 | CT_DPIF_NL_TP_ICMP_MAPPINGS |
3722 | 0 | } |
3723 | | |
3724 | | static void |
3725 | | dpif_netlink_set_ct_dpif_tp_icmpv6_attrs( |
3726 | | const struct nl_ct_timeout_policy *nl_tp, |
3727 | | struct ct_dpif_timeout_policy *tp) |
3728 | 0 | { |
3729 | 0 | CT_DPIF_NL_TP_ICMPV6_MAPPINGS |
3730 | 0 | } |
3731 | | |
3732 | | #undef CT_DPIF_NL_TP_MAPPING |
3733 | | |
3734 | | static void |
3735 | | dpif_netlink_set_ct_dpif_tp_attrs(const struct nl_ct_timeout_policy *nl_tp, |
3736 | | struct ct_dpif_timeout_policy *tp) |
3737 | 0 | { |
3738 | 0 | if (nl_tp->l4num == IPPROTO_TCP) { |
3739 | 0 | dpif_netlink_set_ct_dpif_tp_tcp_attrs(nl_tp, tp); |
3740 | 0 | } else if (nl_tp->l4num == IPPROTO_UDP) { |
3741 | 0 | dpif_netlink_set_ct_dpif_tp_udp_attrs(nl_tp, tp); |
3742 | 0 | } else if (nl_tp->l4num == IPPROTO_ICMP) { |
3743 | 0 | dpif_netlink_set_ct_dpif_tp_icmp_attrs(nl_tp, tp); |
3744 | 0 | } else if (nl_tp->l4num == IPPROTO_ICMPV6) { |
3745 | 0 | dpif_netlink_set_ct_dpif_tp_icmpv6_attrs(nl_tp, tp); |
3746 | 0 | } |
3747 | 0 | } |
3748 | | |
3749 | | #ifdef _WIN32 |
3750 | | static int |
3751 | | dpif_netlink_ct_set_timeout_policy(struct dpif *dpif OVS_UNUSED, |
3752 | | const struct ct_dpif_timeout_policy *tp) |
3753 | | { |
3754 | | return EOPNOTSUPP; |
3755 | | } |
3756 | | |
3757 | | static int |
3758 | | dpif_netlink_ct_get_timeout_policy(struct dpif *dpif OVS_UNUSED, |
3759 | | uint32_t tp_id, |
3760 | | struct ct_dpif_timeout_policy *tp) |
3761 | | { |
3762 | | return EOPNOTSUPP; |
3763 | | } |
3764 | | |
3765 | | static int |
3766 | | dpif_netlink_ct_del_timeout_policy(struct dpif *dpif OVS_UNUSED, |
3767 | | uint32_t tp_id) |
3768 | | { |
3769 | | return EOPNOTSUPP; |
3770 | | } |
3771 | | |
3772 | | static int |
3773 | | dpif_netlink_ct_timeout_policy_dump_start(struct dpif *dpif OVS_UNUSED, |
3774 | | void **statep) |
3775 | | { |
3776 | | return EOPNOTSUPP; |
3777 | | } |
3778 | | |
3779 | | static int |
3780 | | dpif_netlink_ct_timeout_policy_dump_next(struct dpif *dpif OVS_UNUSED, |
3781 | | void *state, |
3782 | | struct ct_dpif_timeout_policy **tp) |
3783 | | { |
3784 | | return EOPNOTSUPP; |
3785 | | } |
3786 | | |
3787 | | static int |
3788 | | dpif_netlink_ct_timeout_policy_dump_done(struct dpif *dpif OVS_UNUSED, |
3789 | | void *state) |
3790 | | { |
3791 | | return EOPNOTSUPP; |
3792 | | } |
3793 | | #else |
3794 | | static int |
3795 | | dpif_netlink_ct_set_timeout_policy(struct dpif *dpif OVS_UNUSED, |
3796 | | const struct ct_dpif_timeout_policy *tp) |
3797 | 0 | { |
3798 | 0 | int err = 0; |
3799 | |
|
3800 | 0 | for (int i = 0; i < ARRAY_SIZE(tp_protos); ++i) { |
3801 | 0 | struct nl_ct_timeout_policy nl_tp; |
3802 | 0 | char *nl_tp_name; |
3803 | |
|
3804 | 0 | dpif_netlink_format_tp_name(tp->id, tp_protos[i].l3num, |
3805 | 0 | tp_protos[i].l4num, &nl_tp_name); |
3806 | 0 | ovs_strlcpy(nl_tp.name, nl_tp_name, sizeof nl_tp.name); |
3807 | 0 | free(nl_tp_name); |
3808 | |
|
3809 | 0 | nl_tp.l3num = tp_protos[i].l3num; |
3810 | 0 | nl_tp.l4num = tp_protos[i].l4num; |
3811 | 0 | dpif_netlink_get_nl_tp_attrs(tp, tp_protos[i].l4num, &nl_tp); |
3812 | 0 | err = nl_ct_set_timeout_policy(&nl_tp); |
3813 | 0 | if (err) { |
3814 | 0 | VLOG_WARN_RL(&error_rl, "failed to add timeout policy %s (%s)", |
3815 | 0 | nl_tp.name, ovs_strerror(err)); |
3816 | 0 | goto out; |
3817 | 0 | } |
3818 | 0 | } |
3819 | | |
3820 | 0 | out: |
3821 | 0 | return err; |
3822 | 0 | } |
3823 | | |
3824 | | static int |
3825 | | dpif_netlink_ct_get_timeout_policy(struct dpif *dpif OVS_UNUSED, |
3826 | | uint32_t tp_id, |
3827 | | struct ct_dpif_timeout_policy *tp) |
3828 | 0 | { |
3829 | 0 | int err = 0; |
3830 | |
|
3831 | 0 | tp->id = tp_id; |
3832 | 0 | tp->present = 0; |
3833 | 0 | for (int i = 0; i < ARRAY_SIZE(tp_protos); ++i) { |
3834 | 0 | struct nl_ct_timeout_policy nl_tp; |
3835 | 0 | char *nl_tp_name; |
3836 | |
|
3837 | 0 | dpif_netlink_format_tp_name(tp_id, tp_protos[i].l3num, |
3838 | 0 | tp_protos[i].l4num, &nl_tp_name); |
3839 | 0 | err = nl_ct_get_timeout_policy(nl_tp_name, &nl_tp); |
3840 | |
|
3841 | 0 | if (err) { |
3842 | 0 | VLOG_WARN_RL(&error_rl, "failed to get timeout policy %s (%s)", |
3843 | 0 | nl_tp_name, ovs_strerror(err)); |
3844 | 0 | free(nl_tp_name); |
3845 | 0 | goto out; |
3846 | 0 | } |
3847 | 0 | free(nl_tp_name); |
3848 | 0 | dpif_netlink_set_ct_dpif_tp_attrs(&nl_tp, tp); |
3849 | 0 | } |
3850 | | |
3851 | 0 | out: |
3852 | 0 | return err; |
3853 | 0 | } |
3854 | | |
3855 | | /* Returns 0 if all the sub timeout policies are deleted or not exist in the |
3856 | | * kernel. Returns 1 if any sub timeout policy deletion failed. */ |
3857 | | static int |
3858 | | dpif_netlink_ct_del_timeout_policy(struct dpif *dpif OVS_UNUSED, |
3859 | | uint32_t tp_id) |
3860 | 0 | { |
3861 | 0 | int ret = 0; |
3862 | |
|
3863 | 0 | for (int i = 0; i < ARRAY_SIZE(tp_protos); ++i) { |
3864 | 0 | char *nl_tp_name; |
3865 | 0 | dpif_netlink_format_tp_name(tp_id, tp_protos[i].l3num, |
3866 | 0 | tp_protos[i].l4num, &nl_tp_name); |
3867 | 0 | int err = nl_ct_del_timeout_policy(nl_tp_name); |
3868 | 0 | if (err == ENOENT) { |
3869 | 0 | err = 0; |
3870 | 0 | } |
3871 | 0 | if (err) { |
3872 | 0 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(6, 6); |
3873 | 0 | VLOG_INFO_RL(&rl, "failed to delete timeout policy %s (%s)", |
3874 | 0 | nl_tp_name, ovs_strerror(err)); |
3875 | 0 | ret = 1; |
3876 | 0 | } |
3877 | 0 | free(nl_tp_name); |
3878 | 0 | } |
3879 | |
|
3880 | 0 | return ret; |
3881 | 0 | } |
3882 | | |
3883 | | struct dpif_netlink_ct_timeout_policy_dump_state { |
3884 | | struct nl_ct_timeout_policy_dump_state *nl_dump_state; |
3885 | | struct hmap tp_dump_map; |
3886 | | }; |
3887 | | |
3888 | | struct dpif_netlink_tp_dump_node { |
3889 | | struct hmap_node hmap_node; /* node in tp_dump_map. */ |
3890 | | struct ct_dpif_timeout_policy *tp; |
3891 | | uint32_t l3_l4_present; |
3892 | | }; |
3893 | | |
3894 | | static struct dpif_netlink_tp_dump_node * |
3895 | | get_dpif_netlink_tp_dump_node_by_tp_id(uint32_t tp_id, |
3896 | | struct hmap *tp_dump_map) |
3897 | 0 | { |
3898 | 0 | struct dpif_netlink_tp_dump_node *tp_dump_node; |
3899 | |
|
3900 | 0 | HMAP_FOR_EACH_WITH_HASH (tp_dump_node, hmap_node, hash_int(tp_id, 0), |
3901 | 0 | tp_dump_map) { |
3902 | 0 | if (tp_dump_node->tp->id == tp_id) { |
3903 | 0 | return tp_dump_node; |
3904 | 0 | } |
3905 | 0 | } |
3906 | 0 | return NULL; |
3907 | 0 | } |
3908 | | |
3909 | | static void |
3910 | | update_dpif_netlink_tp_dump_node( |
3911 | | const struct nl_ct_timeout_policy *nl_tp, |
3912 | | struct dpif_netlink_tp_dump_node *tp_dump_node) |
3913 | 0 | { |
3914 | 0 | dpif_netlink_set_ct_dpif_tp_attrs(nl_tp, tp_dump_node->tp); |
3915 | 0 | for (int i = 0; i < DPIF_NL_TP_MAX; ++i) { |
3916 | 0 | if (nl_tp->l3num == tp_protos[i].l3num && |
3917 | 0 | nl_tp->l4num == tp_protos[i].l4num) { |
3918 | 0 | tp_dump_node->l3_l4_present |= 1 << i; |
3919 | 0 | break; |
3920 | 0 | } |
3921 | 0 | } |
3922 | 0 | } |
3923 | | |
3924 | | static int |
3925 | | dpif_netlink_ct_timeout_policy_dump_start(struct dpif *dpif OVS_UNUSED, |
3926 | | void **statep) |
3927 | 0 | { |
3928 | 0 | struct dpif_netlink_ct_timeout_policy_dump_state *dump_state; |
3929 | |
|
3930 | 0 | *statep = dump_state = xzalloc(sizeof *dump_state); |
3931 | 0 | int err = nl_ct_timeout_policy_dump_start(&dump_state->nl_dump_state); |
3932 | 0 | if (err) { |
3933 | 0 | free(dump_state); |
3934 | 0 | return err; |
3935 | 0 | } |
3936 | 0 | hmap_init(&dump_state->tp_dump_map); |
3937 | 0 | return 0; |
3938 | 0 | } |
3939 | | |
3940 | | static void |
3941 | | get_and_cleanup_tp_dump_node(struct hmap *hmap, |
3942 | | struct dpif_netlink_tp_dump_node *tp_dump_node, |
3943 | | struct ct_dpif_timeout_policy *tp) |
3944 | 0 | { |
3945 | 0 | hmap_remove(hmap, &tp_dump_node->hmap_node); |
3946 | 0 | *tp = *tp_dump_node->tp; |
3947 | 0 | free(tp_dump_node->tp); |
3948 | 0 | free(tp_dump_node); |
3949 | 0 | } |
3950 | | |
3951 | | static int |
3952 | | dpif_netlink_ct_timeout_policy_dump_next(struct dpif *dpif OVS_UNUSED, |
3953 | | void *state, |
3954 | | struct ct_dpif_timeout_policy *tp) |
3955 | 0 | { |
3956 | 0 | struct dpif_netlink_ct_timeout_policy_dump_state *dump_state = state; |
3957 | 0 | struct dpif_netlink_tp_dump_node *tp_dump_node; |
3958 | 0 | int err; |
3959 | | |
3960 | | /* Dumps all the timeout policies in the kernel. */ |
3961 | 0 | do { |
3962 | 0 | struct nl_ct_timeout_policy nl_tp; |
3963 | 0 | uint32_t tp_id; |
3964 | |
|
3965 | 0 | err = nl_ct_timeout_policy_dump_next(dump_state->nl_dump_state, |
3966 | 0 | &nl_tp); |
3967 | 0 | if (err) { |
3968 | 0 | break; |
3969 | 0 | } |
3970 | | |
3971 | | /* We only interest in OVS installed timeout policies. */ |
3972 | 0 | if (!ovs_scan(nl_tp.name, NL_TP_NAME_PREFIX"%"PRIu32, &tp_id)) { |
3973 | 0 | continue; |
3974 | 0 | } |
3975 | | |
3976 | 0 | tp_dump_node = get_dpif_netlink_tp_dump_node_by_tp_id( |
3977 | 0 | tp_id, &dump_state->tp_dump_map); |
3978 | 0 | if (!tp_dump_node) { |
3979 | 0 | tp_dump_node = xzalloc(sizeof *tp_dump_node); |
3980 | 0 | tp_dump_node->tp = xzalloc(sizeof *tp_dump_node->tp); |
3981 | 0 | tp_dump_node->tp->id = tp_id; |
3982 | 0 | hmap_insert(&dump_state->tp_dump_map, &tp_dump_node->hmap_node, |
3983 | 0 | hash_int(tp_id, 0)); |
3984 | 0 | } |
3985 | |
|
3986 | 0 | update_dpif_netlink_tp_dump_node(&nl_tp, tp_dump_node); |
3987 | | |
3988 | | /* Returns one ct_dpif_timeout_policy if we gather all the L3/L4 |
3989 | | * sub-pieces. */ |
3990 | 0 | if (tp_dump_node->l3_l4_present == DPIF_NL_ALL_TP) { |
3991 | 0 | get_and_cleanup_tp_dump_node(&dump_state->tp_dump_map, |
3992 | 0 | tp_dump_node, tp); |
3993 | 0 | break; |
3994 | 0 | } |
3995 | 0 | } while (true); |
3996 | | |
3997 | | /* Dump the incomplete timeout policies. */ |
3998 | 0 | if (err == EOF) { |
3999 | 0 | if (!hmap_is_empty(&dump_state->tp_dump_map)) { |
4000 | 0 | struct hmap_node *hmap_node = hmap_first(&dump_state->tp_dump_map); |
4001 | 0 | tp_dump_node = CONTAINER_OF(hmap_node, |
4002 | 0 | struct dpif_netlink_tp_dump_node, |
4003 | 0 | hmap_node); |
4004 | 0 | get_and_cleanup_tp_dump_node(&dump_state->tp_dump_map, |
4005 | 0 | tp_dump_node, tp); |
4006 | 0 | return 0; |
4007 | 0 | } |
4008 | 0 | } |
4009 | | |
4010 | 0 | return err; |
4011 | 0 | } |
4012 | | |
4013 | | static int |
4014 | | dpif_netlink_ct_timeout_policy_dump_done(struct dpif *dpif OVS_UNUSED, |
4015 | | void *state) |
4016 | 0 | { |
4017 | 0 | struct dpif_netlink_ct_timeout_policy_dump_state *dump_state = state; |
4018 | 0 | struct dpif_netlink_tp_dump_node *tp_dump_node; |
4019 | |
|
4020 | 0 | int err = nl_ct_timeout_policy_dump_done(dump_state->nl_dump_state); |
4021 | 0 | HMAP_FOR_EACH_POP (tp_dump_node, hmap_node, &dump_state->tp_dump_map) { |
4022 | 0 | free(tp_dump_node->tp); |
4023 | 0 | free(tp_dump_node); |
4024 | 0 | } |
4025 | 0 | hmap_destroy(&dump_state->tp_dump_map); |
4026 | 0 | free(dump_state); |
4027 | 0 | return err; |
4028 | 0 | } |
4029 | | #endif |
4030 | | |
4031 | | |
4032 | | /* Meters */ |
4033 | | |
4034 | | /* Set of supported meter flags */ |
4035 | | #define DP_SUPPORTED_METER_FLAGS_MASK \ |
4036 | 0 | (OFPMF13_STATS | OFPMF13_PKTPS | OFPMF13_KBPS | OFPMF13_BURST) |
4037 | | |
4038 | | /* Meter support was introduced in Linux 4.15. In some versions of |
4039 | | * Linux 4.15, 4.16, and 4.17, there was a bug that never set the id |
4040 | | * when the meter was created, so all meters essentially had an id of |
4041 | | * zero. Check for that condition and disable meters on those kernels. */ |
4042 | | static bool probe_broken_meters(struct dpif *); |
4043 | | |
4044 | | static void |
4045 | | dpif_netlink_meter_init(struct dpif_netlink *dpif, struct ofpbuf *buf, |
4046 | | void *stub, size_t size, uint32_t command) |
4047 | 0 | { |
4048 | 0 | ofpbuf_use_stub(buf, stub, size); |
4049 | |
|
4050 | 0 | nl_msg_put_genlmsghdr(buf, 0, ovs_meter_family, NLM_F_REQUEST | NLM_F_ECHO, |
4051 | 0 | command, OVS_METER_VERSION); |
4052 | |
|
4053 | 0 | struct ovs_header *ovs_header; |
4054 | 0 | ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header); |
4055 | 0 | ovs_header->dp_ifindex = dpif->dp_ifindex; |
4056 | 0 | } |
4057 | | |
4058 | | /* Execute meter 'request' in the kernel datapath. If the command |
4059 | | * fails, returns a positive errno value. Otherwise, stores the reply |
4060 | | * in '*replyp', parses the policy according to 'reply_policy' into the |
4061 | | * array of Netlink attribute in 'a', and returns 0. On success, the |
4062 | | * caller is responsible for calling ofpbuf_delete() on '*replyp' |
4063 | | * ('replyp' will contain pointers into 'a'). */ |
4064 | | static int |
4065 | | dpif_netlink_meter_transact(struct ofpbuf *request, struct ofpbuf **replyp, |
4066 | | const struct nl_policy *reply_policy, |
4067 | | struct nlattr **a, size_t size_a) |
4068 | 0 | { |
4069 | 0 | int error = nl_transact(NETLINK_GENERIC, request, replyp); |
4070 | 0 | ofpbuf_uninit(request); |
4071 | |
|
4072 | 0 | if (error) { |
4073 | 0 | return error; |
4074 | 0 | } |
4075 | | |
4076 | 0 | struct nlmsghdr *nlmsg = ofpbuf_try_pull(*replyp, sizeof *nlmsg); |
4077 | 0 | struct genlmsghdr *genl = ofpbuf_try_pull(*replyp, sizeof *genl); |
4078 | 0 | struct ovs_header *ovs_header = ofpbuf_try_pull(*replyp, |
4079 | 0 | sizeof *ovs_header); |
4080 | 0 | if (!nlmsg || !genl || !ovs_header |
4081 | 0 | || nlmsg->nlmsg_type != ovs_meter_family |
4082 | 0 | || !nl_policy_parse(*replyp, 0, reply_policy, a, size_a)) { |
4083 | 0 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); |
4084 | 0 | VLOG_DBG_RL(&rl, |
4085 | 0 | "Kernel module response to meter tranaction is invalid"); |
4086 | 0 | ofpbuf_delete(*replyp); |
4087 | 0 | return EINVAL; |
4088 | 0 | } |
4089 | 0 | return 0; |
4090 | 0 | } |
4091 | | |
4092 | | static void |
4093 | | dpif_netlink_meter_get_features(const struct dpif *dpif_, |
4094 | | struct ofputil_meter_features *features) |
4095 | 0 | { |
4096 | 0 | if (probe_broken_meters(CONST_CAST(struct dpif *, dpif_))) { |
4097 | 0 | return; |
4098 | 0 | } |
4099 | | |
4100 | 0 | struct ofpbuf buf, *msg; |
4101 | 0 | uint64_t stub[1024 / 8]; |
4102 | |
|
4103 | 0 | static const struct nl_policy ovs_meter_features_policy[] = { |
4104 | 0 | [OVS_METER_ATTR_MAX_METERS] = { .type = NL_A_U32 }, |
4105 | 0 | [OVS_METER_ATTR_MAX_BANDS] = { .type = NL_A_U32 }, |
4106 | 0 | [OVS_METER_ATTR_BANDS] = { .type = NL_A_NESTED, .optional = true }, |
4107 | 0 | }; |
4108 | 0 | struct nlattr *a[ARRAY_SIZE(ovs_meter_features_policy)]; |
4109 | |
|
4110 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
4111 | 0 | dpif_netlink_meter_init(dpif, &buf, stub, sizeof stub, |
4112 | 0 | OVS_METER_CMD_FEATURES); |
4113 | 0 | if (dpif_netlink_meter_transact(&buf, &msg, ovs_meter_features_policy, a, |
4114 | 0 | ARRAY_SIZE(ovs_meter_features_policy))) { |
4115 | 0 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); |
4116 | 0 | VLOG_INFO_RL(&rl, |
4117 | 0 | "dpif_netlink_meter_transact OVS_METER_CMD_FEATURES failed"); |
4118 | 0 | return; |
4119 | 0 | } |
4120 | | |
4121 | 0 | features->max_meters = nl_attr_get_u32(a[OVS_METER_ATTR_MAX_METERS]); |
4122 | 0 | features->max_bands = nl_attr_get_u32(a[OVS_METER_ATTR_MAX_BANDS]); |
4123 | | |
4124 | | /* Bands is a nested attribute of zero or more nested |
4125 | | * band attributes. */ |
4126 | 0 | if (a[OVS_METER_ATTR_BANDS]) { |
4127 | 0 | const struct nlattr *nla; |
4128 | 0 | size_t left; |
4129 | |
|
4130 | 0 | NL_NESTED_FOR_EACH (nla, left, a[OVS_METER_ATTR_BANDS]) { |
4131 | 0 | const struct nlattr *band_nla; |
4132 | 0 | size_t band_left; |
4133 | |
|
4134 | 0 | NL_NESTED_FOR_EACH (band_nla, band_left, nla) { |
4135 | 0 | if (nl_attr_type(band_nla) == OVS_BAND_ATTR_TYPE) { |
4136 | 0 | if (nl_attr_get_size(band_nla) == sizeof(uint32_t)) { |
4137 | 0 | switch (nl_attr_get_u32(band_nla)) { |
4138 | 0 | case OVS_METER_BAND_TYPE_DROP: |
4139 | 0 | features->band_types |= 1 << OFPMBT13_DROP; |
4140 | 0 | break; |
4141 | 0 | } |
4142 | 0 | } |
4143 | 0 | } |
4144 | 0 | } |
4145 | 0 | } |
4146 | 0 | } |
4147 | 0 | features->capabilities = DP_SUPPORTED_METER_FLAGS_MASK; |
4148 | |
|
4149 | 0 | ofpbuf_delete(msg); |
4150 | 0 | } |
4151 | | |
4152 | | static int |
4153 | | dpif_netlink_meter_set__(struct dpif *dpif_, ofproto_meter_id meter_id, |
4154 | | struct ofputil_meter_config *config) |
4155 | 0 | { |
4156 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
4157 | 0 | struct ofpbuf buf, *msg; |
4158 | 0 | uint64_t stub[1024 / 8]; |
4159 | |
|
4160 | 0 | static const struct nl_policy ovs_meter_set_response_policy[] = { |
4161 | 0 | [OVS_METER_ATTR_ID] = { .type = NL_A_U32 }, |
4162 | 0 | }; |
4163 | 0 | struct nlattr *a[ARRAY_SIZE(ovs_meter_set_response_policy)]; |
4164 | |
|
4165 | 0 | if (config->flags & ~DP_SUPPORTED_METER_FLAGS_MASK) { |
4166 | 0 | return EBADF; /* Unsupported flags set */ |
4167 | 0 | } |
4168 | | |
4169 | 0 | for (size_t i = 0; i < config->n_bands; i++) { |
4170 | 0 | switch (config->bands[i].type) { |
4171 | 0 | case OFPMBT13_DROP: |
4172 | 0 | break; |
4173 | 0 | default: |
4174 | 0 | return ENODEV; /* Unsupported band type */ |
4175 | 0 | } |
4176 | 0 | } |
4177 | | |
4178 | 0 | dpif_netlink_meter_init(dpif, &buf, stub, sizeof stub, OVS_METER_CMD_SET); |
4179 | |
|
4180 | 0 | nl_msg_put_u32(&buf, OVS_METER_ATTR_ID, meter_id.uint32); |
4181 | |
|
4182 | 0 | if (config->flags & OFPMF13_KBPS) { |
4183 | 0 | nl_msg_put_flag(&buf, OVS_METER_ATTR_KBPS); |
4184 | 0 | } |
4185 | |
|
4186 | 0 | size_t bands_offset = nl_msg_start_nested(&buf, OVS_METER_ATTR_BANDS); |
4187 | | /* Bands */ |
4188 | 0 | for (size_t i = 0; i < config->n_bands; ++i) { |
4189 | 0 | struct ofputil_meter_band * band = &config->bands[i]; |
4190 | 0 | uint32_t band_type; |
4191 | |
|
4192 | 0 | size_t band_offset = nl_msg_start_nested(&buf, OVS_BAND_ATTR_UNSPEC); |
4193 | |
|
4194 | 0 | switch (band->type) { |
4195 | 0 | case OFPMBT13_DROP: |
4196 | 0 | band_type = OVS_METER_BAND_TYPE_DROP; |
4197 | 0 | break; |
4198 | 0 | default: |
4199 | 0 | band_type = OVS_METER_BAND_TYPE_UNSPEC; |
4200 | 0 | } |
4201 | 0 | nl_msg_put_u32(&buf, OVS_BAND_ATTR_TYPE, band_type); |
4202 | 0 | nl_msg_put_u32(&buf, OVS_BAND_ATTR_RATE, band->rate); |
4203 | 0 | nl_msg_put_u32(&buf, OVS_BAND_ATTR_BURST, |
4204 | 0 | config->flags & OFPMF13_BURST ? |
4205 | 0 | band->burst_size : band->rate); |
4206 | 0 | nl_msg_end_nested(&buf, band_offset); |
4207 | 0 | } |
4208 | 0 | nl_msg_end_nested(&buf, bands_offset); |
4209 | |
|
4210 | 0 | int error = dpif_netlink_meter_transact(&buf, &msg, |
4211 | 0 | ovs_meter_set_response_policy, a, |
4212 | 0 | ARRAY_SIZE(ovs_meter_set_response_policy)); |
4213 | 0 | if (error) { |
4214 | 0 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); |
4215 | 0 | VLOG_INFO_RL(&rl, |
4216 | 0 | "dpif_netlink_meter_transact OVS_METER_CMD_SET failed"); |
4217 | 0 | return error; |
4218 | 0 | } |
4219 | | |
4220 | 0 | if (nl_attr_get_u32(a[OVS_METER_ATTR_ID]) != meter_id.uint32) { |
4221 | 0 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); |
4222 | 0 | VLOG_INFO_RL(&rl, |
4223 | 0 | "Kernel returned a different meter id than requested"); |
4224 | 0 | } |
4225 | 0 | ofpbuf_delete(msg); |
4226 | 0 | return 0; |
4227 | 0 | } |
4228 | | |
4229 | | static int |
4230 | | dpif_netlink_meter_set(struct dpif *dpif_, ofproto_meter_id meter_id, |
4231 | | struct ofputil_meter_config *config) |
4232 | 0 | { |
4233 | 0 | int err; |
4234 | |
|
4235 | 0 | if (probe_broken_meters(dpif_)) { |
4236 | 0 | return ENOMEM; |
4237 | 0 | } |
4238 | | |
4239 | 0 | err = dpif_netlink_meter_set__(dpif_, meter_id, config); |
4240 | 0 | if (!err && netdev_is_flow_api_enabled()) { |
4241 | 0 | meter_offload_set(meter_id, config); |
4242 | 0 | } |
4243 | |
|
4244 | 0 | return err; |
4245 | 0 | } |
4246 | | |
4247 | | /* Retrieve statistics and/or delete meter 'meter_id'. Statistics are |
4248 | | * stored in 'stats', if it is not null. If 'command' is |
4249 | | * OVS_METER_CMD_DEL, the meter is deleted and statistics are optionally |
4250 | | * retrieved. If 'command' is OVS_METER_CMD_GET, then statistics are |
4251 | | * simply retrieved. */ |
4252 | | static int |
4253 | | dpif_netlink_meter_get_stats(const struct dpif *dpif_, |
4254 | | ofproto_meter_id meter_id, |
4255 | | struct ofputil_meter_stats *stats, |
4256 | | uint16_t max_bands, |
4257 | | enum ovs_meter_cmd command) |
4258 | 0 | { |
4259 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
4260 | 0 | struct ofpbuf buf, *msg; |
4261 | 0 | uint64_t stub[1024 / 8]; |
4262 | |
|
4263 | 0 | static const struct nl_policy ovs_meter_stats_policy[] = { |
4264 | 0 | [OVS_METER_ATTR_ID] = { .type = NL_A_U32, .optional = true}, |
4265 | 0 | [OVS_METER_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_flow_stats), |
4266 | 0 | .optional = true}, |
4267 | 0 | [OVS_METER_ATTR_BANDS] = { .type = NL_A_NESTED, .optional = true }, |
4268 | 0 | }; |
4269 | 0 | struct nlattr *a[ARRAY_SIZE(ovs_meter_stats_policy)]; |
4270 | |
|
4271 | 0 | dpif_netlink_meter_init(dpif, &buf, stub, sizeof stub, command); |
4272 | |
|
4273 | 0 | nl_msg_put_u32(&buf, OVS_METER_ATTR_ID, meter_id.uint32); |
4274 | |
|
4275 | 0 | int error = dpif_netlink_meter_transact(&buf, &msg, |
4276 | 0 | ovs_meter_stats_policy, a, |
4277 | 0 | ARRAY_SIZE(ovs_meter_stats_policy)); |
4278 | 0 | if (error) { |
4279 | 0 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); |
4280 | 0 | VLOG_RL(&rl, error == ENOENT ? VLL_DBG : VLL_WARN, |
4281 | 0 | "dpif_netlink_meter_transact %s failed: %s", |
4282 | 0 | command == OVS_METER_CMD_GET ? "get" : "del", |
4283 | 0 | ovs_strerror(error)); |
4284 | 0 | return error; |
4285 | 0 | } |
4286 | | |
4287 | 0 | if (a[OVS_METER_ATTR_ID] |
4288 | 0 | && nl_attr_get_u32(a[OVS_METER_ATTR_ID]) != meter_id.uint32) { |
4289 | 0 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); |
4290 | 0 | VLOG_INFO_RL(&rl, |
4291 | 0 | "Kernel returned a different meter id than requested"); |
4292 | 0 | ofpbuf_delete(msg); |
4293 | 0 | return EINVAL; |
4294 | 0 | } |
4295 | | |
4296 | 0 | if (stats && a[OVS_METER_ATTR_STATS]) { |
4297 | | /* return stats */ |
4298 | 0 | const struct ovs_flow_stats *stat; |
4299 | 0 | const struct nlattr *nla; |
4300 | 0 | size_t left; |
4301 | |
|
4302 | 0 | stat = nl_attr_get(a[OVS_METER_ATTR_STATS]); |
4303 | 0 | stats->packet_in_count = get_32aligned_u64(&stat->n_packets); |
4304 | 0 | stats->byte_in_count = get_32aligned_u64(&stat->n_bytes); |
4305 | |
|
4306 | 0 | if (a[OVS_METER_ATTR_BANDS]) { |
4307 | 0 | size_t n_bands = 0; |
4308 | 0 | NL_NESTED_FOR_EACH (nla, left, a[OVS_METER_ATTR_BANDS]) { |
4309 | 0 | const struct nlattr *band_nla; |
4310 | 0 | band_nla = nl_attr_find_nested(nla, OVS_BAND_ATTR_STATS); |
4311 | 0 | if (band_nla && nl_attr_get_size(band_nla) \ |
4312 | 0 | == sizeof(struct ovs_flow_stats)) { |
4313 | 0 | stat = nl_attr_get(band_nla); |
4314 | |
|
4315 | 0 | if (n_bands < max_bands) { |
4316 | 0 | stats->bands[n_bands].packet_count |
4317 | 0 | = get_32aligned_u64(&stat->n_packets); |
4318 | 0 | stats->bands[n_bands].byte_count |
4319 | 0 | = get_32aligned_u64(&stat->n_bytes); |
4320 | 0 | ++n_bands; |
4321 | 0 | } |
4322 | 0 | } else { |
4323 | 0 | stats->bands[n_bands].packet_count = 0; |
4324 | 0 | stats->bands[n_bands].byte_count = 0; |
4325 | 0 | ++n_bands; |
4326 | 0 | } |
4327 | 0 | } |
4328 | 0 | stats->n_bands = n_bands; |
4329 | 0 | } else { |
4330 | | /* For a non-existent meter, return 0 stats. */ |
4331 | 0 | stats->n_bands = 0; |
4332 | 0 | } |
4333 | 0 | } |
4334 | |
|
4335 | 0 | ofpbuf_delete(msg); |
4336 | 0 | return error; |
4337 | 0 | } |
4338 | | |
4339 | | static int |
4340 | | dpif_netlink_meter_get(const struct dpif *dpif, ofproto_meter_id meter_id, |
4341 | | struct ofputil_meter_stats *stats, uint16_t max_bands) |
4342 | 0 | { |
4343 | 0 | int err; |
4344 | |
|
4345 | 0 | err = dpif_netlink_meter_get_stats(dpif, meter_id, stats, max_bands, |
4346 | 0 | OVS_METER_CMD_GET); |
4347 | 0 | if (!err && netdev_is_flow_api_enabled()) { |
4348 | 0 | meter_offload_get(meter_id, stats); |
4349 | 0 | } |
4350 | |
|
4351 | 0 | return err; |
4352 | 0 | } |
4353 | | |
4354 | | static int |
4355 | | dpif_netlink_meter_del(struct dpif *dpif, ofproto_meter_id meter_id, |
4356 | | struct ofputil_meter_stats *stats, uint16_t max_bands) |
4357 | 0 | { |
4358 | 0 | int err; |
4359 | |
|
4360 | 0 | err = dpif_netlink_meter_get_stats(dpif, meter_id, stats, |
4361 | 0 | max_bands, OVS_METER_CMD_DEL); |
4362 | 0 | if (!err && netdev_is_flow_api_enabled()) { |
4363 | 0 | meter_offload_del(meter_id, stats); |
4364 | 0 | } |
4365 | |
|
4366 | 0 | return err; |
4367 | 0 | } |
4368 | | |
4369 | | static bool |
4370 | | probe_broken_meters__(struct dpif *dpif) |
4371 | 0 | { |
4372 | | /* This test is destructive if a probe occurs while ovs-vswitchd is |
4373 | | * running (e.g., an ovs-dpctl meter command is called), so choose a |
4374 | | * high meter id to make this less likely to occur. |
4375 | | * |
4376 | | * In Linux kernel v5.10+ meters are stored in a table that is not |
4377 | | * a real hash table. It's just an array with 'meter_id % size' used |
4378 | | * as an index. The numbers are chosen to fit into the minimal table |
4379 | | * size (1024) without wrapping, so these IDs are guaranteed to be |
4380 | | * found under normal conditions in the meter table, if such meters |
4381 | | * exist. It's possible to break this check by creating some meters |
4382 | | * in the kernel manually with different IDs that map onto the same |
4383 | | * indexes, but that should not be a big problem since ovs-vswitchd |
4384 | | * always allocates densely packed meter IDs with an id-pool. |
4385 | | * |
4386 | | * These IDs will also work in cases where the table in the kernel is |
4387 | | * a proper hash table. */ |
4388 | 0 | ofproto_meter_id id1 = { 1021 }; |
4389 | 0 | ofproto_meter_id id2 = { 1022 }; |
4390 | 0 | struct ofputil_meter_band band = {OFPMBT13_DROP, 0, 1, 0}; |
4391 | 0 | struct ofputil_meter_config config1 = { 1, OFPMF13_KBPS, 1, &band}; |
4392 | 0 | struct ofputil_meter_config config2 = { 2, OFPMF13_KBPS, 1, &band}; |
4393 | | |
4394 | | /* First check if these meters are already in the kernel. If we get |
4395 | | * a proper response from the kernel with all the good meter IDs, then |
4396 | | * meters are likley supported correctly. */ |
4397 | 0 | if (!dpif_netlink_meter_get(dpif, id1, NULL, 0) |
4398 | 0 | || !dpif_netlink_meter_get(dpif, id2, NULL, 0)) { |
4399 | 0 | return false; |
4400 | 0 | } |
4401 | | |
4402 | | /* Try adding two meters and make sure that they both come back with |
4403 | | * the proper meter id. Use the "__" version so that we don't cause |
4404 | | * a recurve deadlock. */ |
4405 | 0 | dpif_netlink_meter_set__(dpif, id1, &config1); |
4406 | 0 | dpif_netlink_meter_set__(dpif, id2, &config2); |
4407 | |
|
4408 | 0 | if (dpif_netlink_meter_get(dpif, id1, NULL, 0) |
4409 | 0 | || dpif_netlink_meter_get(dpif, id2, NULL, 0)) { |
4410 | 0 | VLOG_INFO("The kernel module has a broken meter implementation."); |
4411 | 0 | return true; |
4412 | 0 | } |
4413 | | |
4414 | 0 | dpif_netlink_meter_del(dpif, id1, NULL, 0); |
4415 | 0 | dpif_netlink_meter_del(dpif, id2, NULL, 0); |
4416 | |
|
4417 | 0 | return false; |
4418 | 0 | } |
4419 | | |
4420 | | static bool |
4421 | | probe_broken_meters(struct dpif *dpif) |
4422 | 0 | { |
4423 | | /* This is a once-only test because currently OVS only has at most a single |
4424 | | * Netlink capable datapath on any given platform. */ |
4425 | 0 | static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER; |
4426 | |
|
4427 | 0 | static bool broken_meters = false; |
4428 | 0 | if (ovsthread_once_start(&once)) { |
4429 | 0 | broken_meters = probe_broken_meters__(dpif); |
4430 | 0 | ovsthread_once_done(&once); |
4431 | 0 | } |
4432 | 0 | return broken_meters; |
4433 | 0 | } |
4434 | | |
4435 | | |
4436 | | static int |
4437 | | dpif_netlink_cache_get_supported_levels(struct dpif *dpif_, uint32_t *levels) |
4438 | 0 | { |
4439 | 0 | struct dpif_netlink_dp dp; |
4440 | 0 | struct ofpbuf *buf; |
4441 | 0 | int error; |
4442 | | |
4443 | | /* If available, in the kernel we support one level of cache. |
4444 | | * Unfortunately, there is no way to detect if the older kernel module has |
4445 | | * the cache feature. For now, we only report the cache information if the |
4446 | | * kernel module reports the OVS_DP_ATTR_MASKS_CACHE_SIZE attribute. */ |
4447 | |
|
4448 | 0 | *levels = 0; |
4449 | 0 | error = dpif_netlink_dp_get(dpif_, &dp, &buf); |
4450 | 0 | if (!error) { |
4451 | |
|
4452 | 0 | if (dp.cache_size != UINT32_MAX) { |
4453 | 0 | *levels = 1; |
4454 | 0 | } |
4455 | 0 | ofpbuf_delete(buf); |
4456 | 0 | } |
4457 | |
|
4458 | 0 | return error; |
4459 | 0 | } |
4460 | | |
4461 | | static int |
4462 | | dpif_netlink_cache_get_name(struct dpif *dpif_ OVS_UNUSED, uint32_t level, |
4463 | | const char **name) |
4464 | 0 | { |
4465 | 0 | if (level != 0) { |
4466 | 0 | return EINVAL; |
4467 | 0 | } |
4468 | | |
4469 | 0 | *name = "masks-cache"; |
4470 | 0 | return 0; |
4471 | 0 | } |
4472 | | |
4473 | | static int |
4474 | | dpif_netlink_cache_get_size(struct dpif *dpif_, uint32_t level, uint32_t *size) |
4475 | 0 | { |
4476 | 0 | struct dpif_netlink_dp dp; |
4477 | 0 | struct ofpbuf *buf; |
4478 | 0 | int error; |
4479 | |
|
4480 | 0 | if (level != 0) { |
4481 | 0 | return EINVAL; |
4482 | 0 | } |
4483 | | |
4484 | 0 | error = dpif_netlink_dp_get(dpif_, &dp, &buf); |
4485 | 0 | if (!error) { |
4486 | |
|
4487 | 0 | ofpbuf_delete(buf); |
4488 | |
|
4489 | 0 | if (dp.cache_size == UINT32_MAX) { |
4490 | 0 | return EOPNOTSUPP; |
4491 | 0 | } |
4492 | 0 | *size = dp.cache_size; |
4493 | 0 | } |
4494 | 0 | return error; |
4495 | 0 | } |
4496 | | |
4497 | | static int |
4498 | | dpif_netlink_cache_set_size(struct dpif *dpif_, uint32_t level, uint32_t size) |
4499 | 0 | { |
4500 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
4501 | 0 | struct dpif_netlink_dp request, reply; |
4502 | 0 | struct ofpbuf *bufp; |
4503 | 0 | int error; |
4504 | |
|
4505 | 0 | size = ROUND_UP_POW2(size); |
4506 | |
|
4507 | 0 | if (level != 0) { |
4508 | 0 | return EINVAL; |
4509 | 0 | } |
4510 | | |
4511 | 0 | dpif_netlink_dp_init(&request); |
4512 | 0 | request.cmd = OVS_DP_CMD_SET; |
4513 | 0 | request.name = dpif_->base_name; |
4514 | 0 | request.dp_ifindex = dpif->dp_ifindex; |
4515 | 0 | request.cache_size = size; |
4516 | | /* We need to set the dpif user_features, as the kernel module assumes the |
4517 | | * OVS_DP_ATTR_USER_FEATURES attribute is always present. If not, it will |
4518 | | * reset all the features. */ |
4519 | 0 | request.user_features = dpif->user_features; |
4520 | |
|
4521 | 0 | error = dpif_netlink_dp_transact(&request, &reply, &bufp); |
4522 | 0 | if (!error) { |
4523 | 0 | ofpbuf_delete(bufp); |
4524 | 0 | if (reply.cache_size != size) { |
4525 | 0 | return EINVAL; |
4526 | 0 | } |
4527 | 0 | } |
4528 | | |
4529 | 0 | return error; |
4530 | 0 | } |
4531 | | |
4532 | | |
4533 | | const struct dpif_class dpif_netlink_class = { |
4534 | | "system", |
4535 | | false, /* cleanup_required */ |
4536 | | false, /* synced_dp_layers */ |
4537 | | NULL, /* init */ |
4538 | | dpif_netlink_enumerate, |
4539 | | NULL, |
4540 | | dpif_netlink_open, |
4541 | | dpif_netlink_close, |
4542 | | dpif_netlink_destroy, |
4543 | | dpif_netlink_run, |
4544 | | NULL, /* wait */ |
4545 | | dpif_netlink_get_stats, |
4546 | | dpif_netlink_set_features, |
4547 | | dpif_netlink_port_add, |
4548 | | dpif_netlink_port_del, |
4549 | | NULL, /* port_set_config */ |
4550 | | dpif_netlink_port_query_by_number, |
4551 | | dpif_netlink_port_query_by_name, |
4552 | | dpif_netlink_port_get_pid, |
4553 | | dpif_netlink_port_dump_start, |
4554 | | dpif_netlink_port_dump_next, |
4555 | | dpif_netlink_port_dump_done, |
4556 | | dpif_netlink_port_poll, |
4557 | | dpif_netlink_port_poll_wait, |
4558 | | dpif_netlink_flow_flush, |
4559 | | dpif_netlink_flow_dump_create, |
4560 | | dpif_netlink_flow_dump_destroy, |
4561 | | dpif_netlink_flow_dump_thread_create, |
4562 | | dpif_netlink_flow_dump_thread_destroy, |
4563 | | dpif_netlink_flow_dump_next, |
4564 | | dpif_netlink_operate, |
4565 | | NULL, /* offload_stats_get */ |
4566 | | dpif_netlink_recv_set, |
4567 | | dpif_netlink_handlers_set, |
4568 | | dpif_netlink_number_handlers_required, |
4569 | | NULL, /* set_config */ |
4570 | | dpif_netlink_queue_to_priority, |
4571 | | dpif_netlink_recv, |
4572 | | dpif_netlink_recv_wait, |
4573 | | dpif_netlink_recv_purge, |
4574 | | NULL, /* register_dp_purge_cb */ |
4575 | | NULL, /* register_upcall_cb */ |
4576 | | NULL, /* enable_upcall */ |
4577 | | NULL, /* disable_upcall */ |
4578 | | dpif_netlink_get_datapath_version, /* get_datapath_version */ |
4579 | | dpif_netlink_ct_dump_start, |
4580 | | dpif_netlink_ct_dump_next, |
4581 | | dpif_netlink_ct_dump_done, |
4582 | | NULL, /* ct_exp_dump_start */ |
4583 | | NULL, /* ct_exp_dump_next */ |
4584 | | NULL, /* ct_exp_dump_done */ |
4585 | | dpif_netlink_ct_flush, |
4586 | | NULL, /* ct_set_maxconns */ |
4587 | | NULL, /* ct_get_maxconns */ |
4588 | | NULL, /* ct_get_nconns */ |
4589 | | NULL, /* ct_set_tcp_seq_chk */ |
4590 | | NULL, /* ct_get_tcp_seq_chk */ |
4591 | | NULL, /* ct_set_sweep_interval */ |
4592 | | NULL, /* ct_get_sweep_interval */ |
4593 | | dpif_netlink_ct_set_limits, |
4594 | | dpif_netlink_ct_get_limits, |
4595 | | dpif_netlink_ct_del_limits, |
4596 | | dpif_netlink_ct_set_timeout_policy, |
4597 | | dpif_netlink_ct_get_timeout_policy, |
4598 | | dpif_netlink_ct_del_timeout_policy, |
4599 | | dpif_netlink_ct_timeout_policy_dump_start, |
4600 | | dpif_netlink_ct_timeout_policy_dump_next, |
4601 | | dpif_netlink_ct_timeout_policy_dump_done, |
4602 | | dpif_netlink_ct_get_timeout_policy_name, |
4603 | | dpif_netlink_ct_get_features, |
4604 | | NULL, /* ipf_set_enabled */ |
4605 | | NULL, /* ipf_set_min_frag */ |
4606 | | NULL, /* ipf_set_max_nfrags */ |
4607 | | NULL, /* ipf_get_status */ |
4608 | | NULL, /* ipf_dump_start */ |
4609 | | NULL, /* ipf_dump_next */ |
4610 | | NULL, /* ipf_dump_done */ |
4611 | | dpif_netlink_meter_get_features, |
4612 | | dpif_netlink_meter_set, |
4613 | | dpif_netlink_meter_get, |
4614 | | dpif_netlink_meter_del, |
4615 | | NULL, /* bond_add */ |
4616 | | NULL, /* bond_del */ |
4617 | | NULL, /* bond_stats_get */ |
4618 | | dpif_netlink_cache_get_supported_levels, |
4619 | | dpif_netlink_cache_get_name, |
4620 | | dpif_netlink_cache_get_size, |
4621 | | dpif_netlink_cache_set_size, |
4622 | | }; |
4623 | | |
4624 | | static int |
4625 | | dpif_netlink_init(void) |
4626 | 0 | { |
4627 | 0 | static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER; |
4628 | 0 | static int error; |
4629 | |
|
4630 | 0 | if (ovsthread_once_start(&once)) { |
4631 | 0 | error = nl_lookup_genl_family(OVS_DATAPATH_FAMILY, |
4632 | 0 | &ovs_datapath_family); |
4633 | 0 | if (error) { |
4634 | 0 | VLOG_INFO("Generic Netlink family '%s' does not exist. " |
4635 | 0 | "The Open vSwitch kernel module is probably not loaded.", |
4636 | 0 | OVS_DATAPATH_FAMILY); |
4637 | 0 | } |
4638 | 0 | if (!error) { |
4639 | 0 | error = nl_lookup_genl_family(OVS_VPORT_FAMILY, &ovs_vport_family); |
4640 | 0 | } |
4641 | 0 | if (!error) { |
4642 | 0 | error = nl_lookup_genl_family(OVS_FLOW_FAMILY, &ovs_flow_family); |
4643 | 0 | } |
4644 | 0 | if (!error) { |
4645 | 0 | error = nl_lookup_genl_family(OVS_PACKET_FAMILY, |
4646 | 0 | &ovs_packet_family); |
4647 | 0 | } |
4648 | 0 | if (!error) { |
4649 | 0 | error = nl_lookup_genl_mcgroup(OVS_VPORT_FAMILY, OVS_VPORT_MCGROUP, |
4650 | 0 | &ovs_vport_mcgroup); |
4651 | 0 | } |
4652 | 0 | if (!error) { |
4653 | 0 | if (nl_lookup_genl_family(OVS_METER_FAMILY, &ovs_meter_family)) { |
4654 | 0 | VLOG_INFO("The kernel module does not support meters."); |
4655 | 0 | } |
4656 | 0 | } |
4657 | 0 | if (nl_lookup_genl_family(OVS_CT_LIMIT_FAMILY, |
4658 | 0 | &ovs_ct_limit_family) < 0) { |
4659 | 0 | VLOG_INFO("Generic Netlink family '%s' does not exist. " |
4660 | 0 | "Please update the Open vSwitch kernel module to enable " |
4661 | 0 | "the conntrack limit feature.", OVS_CT_LIMIT_FAMILY); |
4662 | 0 | } |
4663 | |
|
4664 | 0 | ovs_tunnels_out_of_tree = dpif_netlink_rtnl_probe_oot_tunnels(); |
4665 | |
|
4666 | 0 | unixctl_command_register("dpif-netlink/dispatch-mode", "", 0, 0, |
4667 | 0 | dpif_netlink_unixctl_dispatch_mode, NULL); |
4668 | |
|
4669 | 0 | ovsthread_once_done(&once); |
4670 | 0 | } |
4671 | |
|
4672 | 0 | return error; |
4673 | 0 | } |
4674 | | |
4675 | | bool |
4676 | | dpif_netlink_is_internal_device(const char *name) |
4677 | 0 | { |
4678 | 0 | struct dpif_netlink_vport reply; |
4679 | 0 | struct ofpbuf *buf; |
4680 | 0 | int error; |
4681 | |
|
4682 | 0 | error = dpif_netlink_vport_get(name, &reply, &buf); |
4683 | 0 | if (!error) { |
4684 | 0 | ofpbuf_delete(buf); |
4685 | 0 | } else if (error != ENODEV && error != ENOENT) { |
4686 | 0 | VLOG_WARN_RL(&error_rl, "%s: vport query failed (%s)", |
4687 | 0 | name, ovs_strerror(error)); |
4688 | 0 | } |
4689 | |
|
4690 | 0 | return reply.type == OVS_VPORT_TYPE_INTERNAL; |
4691 | 0 | } |
4692 | | |
4693 | | /* Parses the contents of 'buf', which contains a "struct ovs_header" followed |
4694 | | * by Netlink attributes, into 'vport'. Returns 0 if successful, otherwise a |
4695 | | * positive errno value. |
4696 | | * |
4697 | | * 'vport' will contain pointers into 'buf', so the caller should not free |
4698 | | * 'buf' while 'vport' is still in use. */ |
4699 | | static int |
4700 | | dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport *vport, |
4701 | | const struct ofpbuf *buf) |
4702 | 0 | { |
4703 | 0 | static const struct nl_policy ovs_vport_policy[] = { |
4704 | 0 | [OVS_VPORT_ATTR_PORT_NO] = { .type = NL_A_U32 }, |
4705 | 0 | [OVS_VPORT_ATTR_TYPE] = { .type = NL_A_U32 }, |
4706 | 0 | [OVS_VPORT_ATTR_NAME] = { .type = NL_A_STRING, .max_len = IFNAMSIZ }, |
4707 | 0 | [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NL_A_UNSPEC }, |
4708 | 0 | [OVS_VPORT_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_vport_stats), |
4709 | 0 | .optional = true }, |
4710 | 0 | [OVS_VPORT_ATTR_OPTIONS] = { .type = NL_A_NESTED, .optional = true }, |
4711 | 0 | [OVS_VPORT_ATTR_NETNSID] = { .type = NL_A_U32, .optional = true }, |
4712 | 0 | [OVS_VPORT_ATTR_UPCALL_STATS] = { .type = NL_A_NESTED, |
4713 | 0 | .optional = true }, |
4714 | 0 | }; |
4715 | |
|
4716 | 0 | dpif_netlink_vport_init(vport); |
4717 | |
|
4718 | 0 | struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size); |
4719 | 0 | struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg); |
4720 | 0 | struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl); |
4721 | 0 | struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header); |
4722 | |
|
4723 | 0 | struct nlattr *a[ARRAY_SIZE(ovs_vport_policy)]; |
4724 | 0 | if (!nlmsg || !genl || !ovs_header |
4725 | 0 | || nlmsg->nlmsg_type != ovs_vport_family |
4726 | 0 | || !nl_policy_parse(&b, 0, ovs_vport_policy, a, |
4727 | 0 | ARRAY_SIZE(ovs_vport_policy))) { |
4728 | 0 | return EINVAL; |
4729 | 0 | } |
4730 | | |
4731 | 0 | vport->cmd = genl->cmd; |
4732 | 0 | vport->dp_ifindex = ovs_header->dp_ifindex; |
4733 | 0 | vport->port_no = nl_attr_get_odp_port(a[OVS_VPORT_ATTR_PORT_NO]); |
4734 | 0 | vport->type = nl_attr_get_u32(a[OVS_VPORT_ATTR_TYPE]); |
4735 | 0 | vport->name = nl_attr_get_string(a[OVS_VPORT_ATTR_NAME]); |
4736 | 0 | if (a[OVS_VPORT_ATTR_UPCALL_PID]) { |
4737 | 0 | vport->n_upcall_pids = nl_attr_get_size(a[OVS_VPORT_ATTR_UPCALL_PID]) |
4738 | 0 | / (sizeof *vport->upcall_pids); |
4739 | 0 | vport->upcall_pids = nl_attr_get(a[OVS_VPORT_ATTR_UPCALL_PID]); |
4740 | |
|
4741 | 0 | } |
4742 | 0 | if (a[OVS_VPORT_ATTR_STATS]) { |
4743 | 0 | vport->stats = nl_attr_get(a[OVS_VPORT_ATTR_STATS]); |
4744 | 0 | } |
4745 | 0 | if (a[OVS_VPORT_ATTR_UPCALL_STATS]) { |
4746 | 0 | const struct nlattr *nla; |
4747 | 0 | size_t left; |
4748 | |
|
4749 | 0 | NL_NESTED_FOR_EACH (nla, left, a[OVS_VPORT_ATTR_UPCALL_STATS]) { |
4750 | 0 | if (nl_attr_type(nla) == OVS_VPORT_UPCALL_ATTR_SUCCESS) { |
4751 | 0 | vport->upcall_success = nl_attr_get_u64(nla); |
4752 | 0 | } else if (nl_attr_type(nla) == OVS_VPORT_UPCALL_ATTR_FAIL) { |
4753 | 0 | vport->upcall_fail = nl_attr_get_u64(nla); |
4754 | 0 | } |
4755 | 0 | } |
4756 | 0 | } else { |
4757 | 0 | vport->upcall_success = UINT64_MAX; |
4758 | 0 | vport->upcall_fail = UINT64_MAX; |
4759 | 0 | } |
4760 | 0 | if (a[OVS_VPORT_ATTR_OPTIONS]) { |
4761 | 0 | vport->options = nl_attr_get(a[OVS_VPORT_ATTR_OPTIONS]); |
4762 | 0 | vport->options_len = nl_attr_get_size(a[OVS_VPORT_ATTR_OPTIONS]); |
4763 | 0 | } |
4764 | 0 | if (a[OVS_VPORT_ATTR_NETNSID]) { |
4765 | 0 | netnsid_set(&vport->netnsid, |
4766 | 0 | nl_attr_get_u32(a[OVS_VPORT_ATTR_NETNSID])); |
4767 | 0 | } else { |
4768 | 0 | netnsid_set_local(&vport->netnsid); |
4769 | 0 | } |
4770 | 0 | return 0; |
4771 | 0 | } |
4772 | | |
4773 | | /* Appends to 'buf' (which must initially be empty) a "struct ovs_header" |
4774 | | * followed by Netlink attributes corresponding to 'vport'. */ |
4775 | | static void |
4776 | | dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport *vport, |
4777 | | struct ofpbuf *buf) |
4778 | 0 | { |
4779 | 0 | struct ovs_header *ovs_header; |
4780 | |
|
4781 | 0 | nl_msg_put_genlmsghdr(buf, 0, ovs_vport_family, NLM_F_REQUEST | NLM_F_ECHO, |
4782 | 0 | vport->cmd, OVS_VPORT_VERSION); |
4783 | |
|
4784 | 0 | ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header); |
4785 | 0 | ovs_header->dp_ifindex = vport->dp_ifindex; |
4786 | |
|
4787 | 0 | if (vport->port_no != ODPP_NONE) { |
4788 | 0 | nl_msg_put_odp_port(buf, OVS_VPORT_ATTR_PORT_NO, vport->port_no); |
4789 | 0 | } |
4790 | |
|
4791 | 0 | if (vport->type != OVS_VPORT_TYPE_UNSPEC) { |
4792 | 0 | nl_msg_put_u32(buf, OVS_VPORT_ATTR_TYPE, vport->type); |
4793 | 0 | } |
4794 | |
|
4795 | 0 | if (vport->name) { |
4796 | 0 | nl_msg_put_string(buf, OVS_VPORT_ATTR_NAME, vport->name); |
4797 | 0 | } |
4798 | |
|
4799 | 0 | if (vport->upcall_pids) { |
4800 | 0 | nl_msg_put_unspec(buf, OVS_VPORT_ATTR_UPCALL_PID, |
4801 | 0 | vport->upcall_pids, |
4802 | 0 | vport->n_upcall_pids * sizeof *vport->upcall_pids); |
4803 | 0 | } |
4804 | |
|
4805 | 0 | if (vport->stats) { |
4806 | 0 | nl_msg_put_unspec(buf, OVS_VPORT_ATTR_STATS, |
4807 | 0 | vport->stats, sizeof *vport->stats); |
4808 | 0 | } |
4809 | |
|
4810 | 0 | if (vport->options) { |
4811 | 0 | nl_msg_put_nested(buf, OVS_VPORT_ATTR_OPTIONS, |
4812 | 0 | vport->options, vport->options_len); |
4813 | 0 | } |
4814 | 0 | } |
4815 | | |
4816 | | /* Clears 'vport' to "empty" values. */ |
4817 | | void |
4818 | | dpif_netlink_vport_init(struct dpif_netlink_vport *vport) |
4819 | 0 | { |
4820 | 0 | memset(vport, 0, sizeof *vport); |
4821 | 0 | vport->port_no = ODPP_NONE; |
4822 | 0 | } |
4823 | | |
4824 | | /* Executes 'request' in the kernel datapath. If the command fails, returns a |
4825 | | * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0 |
4826 | | * without doing anything else. If 'reply' and 'bufp' are nonnull, then the |
4827 | | * result of the command is expected to be an ovs_vport also, which is decoded |
4828 | | * and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the |
4829 | | * reply is no longer needed ('reply' will contain pointers into '*bufp'). */ |
4830 | | int |
4831 | | dpif_netlink_vport_transact(const struct dpif_netlink_vport *request, |
4832 | | struct dpif_netlink_vport *reply, |
4833 | | struct ofpbuf **bufp) |
4834 | 0 | { |
4835 | 0 | struct ofpbuf *request_buf; |
4836 | 0 | int error; |
4837 | |
|
4838 | 0 | ovs_assert((reply != NULL) == (bufp != NULL)); |
4839 | |
|
4840 | 0 | error = dpif_netlink_init(); |
4841 | 0 | if (error) { |
4842 | 0 | if (reply) { |
4843 | 0 | *bufp = NULL; |
4844 | 0 | dpif_netlink_vport_init(reply); |
4845 | 0 | } |
4846 | 0 | return error; |
4847 | 0 | } |
4848 | | |
4849 | 0 | request_buf = ofpbuf_new(1024); |
4850 | 0 | dpif_netlink_vport_to_ofpbuf(request, request_buf); |
4851 | 0 | error = nl_transact(NETLINK_GENERIC, request_buf, bufp); |
4852 | 0 | ofpbuf_delete(request_buf); |
4853 | |
|
4854 | 0 | if (reply) { |
4855 | 0 | if (!error) { |
4856 | 0 | error = dpif_netlink_vport_from_ofpbuf(reply, *bufp); |
4857 | 0 | } |
4858 | 0 | if (error) { |
4859 | 0 | dpif_netlink_vport_init(reply); |
4860 | 0 | ofpbuf_delete(*bufp); |
4861 | 0 | *bufp = NULL; |
4862 | 0 | } |
4863 | 0 | } |
4864 | 0 | return error; |
4865 | 0 | } |
4866 | | |
4867 | | /* Obtains information about the kernel vport named 'name' and stores it into |
4868 | | * '*reply' and '*bufp'. The caller must free '*bufp' when the reply is no |
4869 | | * longer needed ('reply' will contain pointers into '*bufp'). */ |
4870 | | int |
4871 | | dpif_netlink_vport_get(const char *name, struct dpif_netlink_vport *reply, |
4872 | | struct ofpbuf **bufp) |
4873 | 0 | { |
4874 | 0 | struct dpif_netlink_vport request; |
4875 | |
|
4876 | 0 | dpif_netlink_vport_init(&request); |
4877 | 0 | request.cmd = OVS_VPORT_CMD_GET; |
4878 | 0 | request.name = name; |
4879 | |
|
4880 | 0 | return dpif_netlink_vport_transact(&request, reply, bufp); |
4881 | 0 | } |
4882 | | |
4883 | | /* Parses the contents of 'buf', which contains a "struct ovs_header" followed |
4884 | | * by Netlink attributes, into 'dp'. Returns 0 if successful, otherwise a |
4885 | | * positive errno value. |
4886 | | * |
4887 | | * 'dp' will contain pointers into 'buf', so the caller should not free 'buf' |
4888 | | * while 'dp' is still in use. */ |
4889 | | static int |
4890 | | dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp *dp, const struct ofpbuf *buf) |
4891 | 0 | { |
4892 | 0 | static const struct nl_policy ovs_datapath_policy[] = { |
4893 | 0 | [OVS_DP_ATTR_NAME] = { .type = NL_A_STRING, .max_len = IFNAMSIZ }, |
4894 | 0 | [OVS_DP_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_dp_stats), |
4895 | 0 | .optional = true }, |
4896 | 0 | [OVS_DP_ATTR_MEGAFLOW_STATS] = { |
4897 | 0 | NL_POLICY_FOR(struct ovs_dp_megaflow_stats), |
4898 | 0 | .optional = true }, |
4899 | 0 | [OVS_DP_ATTR_USER_FEATURES] = { |
4900 | 0 | .type = NL_A_U32, |
4901 | 0 | .optional = true }, |
4902 | 0 | [OVS_DP_ATTR_MASKS_CACHE_SIZE] = { |
4903 | 0 | .type = NL_A_U32, |
4904 | 0 | .optional = true }, |
4905 | 0 | }; |
4906 | |
|
4907 | 0 | dpif_netlink_dp_init(dp); |
4908 | |
|
4909 | 0 | struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size); |
4910 | 0 | struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg); |
4911 | 0 | struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl); |
4912 | 0 | struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header); |
4913 | |
|
4914 | 0 | struct nlattr *a[ARRAY_SIZE(ovs_datapath_policy)]; |
4915 | 0 | if (!nlmsg || !genl || !ovs_header |
4916 | 0 | || nlmsg->nlmsg_type != ovs_datapath_family |
4917 | 0 | || !nl_policy_parse(&b, 0, ovs_datapath_policy, a, |
4918 | 0 | ARRAY_SIZE(ovs_datapath_policy))) { |
4919 | 0 | return EINVAL; |
4920 | 0 | } |
4921 | | |
4922 | 0 | dp->cmd = genl->cmd; |
4923 | 0 | dp->dp_ifindex = ovs_header->dp_ifindex; |
4924 | 0 | dp->name = nl_attr_get_string(a[OVS_DP_ATTR_NAME]); |
4925 | 0 | if (a[OVS_DP_ATTR_STATS]) { |
4926 | 0 | dp->stats = nl_attr_get(a[OVS_DP_ATTR_STATS]); |
4927 | 0 | } |
4928 | |
|
4929 | 0 | if (a[OVS_DP_ATTR_MEGAFLOW_STATS]) { |
4930 | 0 | dp->megaflow_stats = nl_attr_get(a[OVS_DP_ATTR_MEGAFLOW_STATS]); |
4931 | 0 | } |
4932 | |
|
4933 | 0 | if (a[OVS_DP_ATTR_USER_FEATURES]) { |
4934 | 0 | dp->user_features = nl_attr_get_u32(a[OVS_DP_ATTR_USER_FEATURES]); |
4935 | 0 | } |
4936 | |
|
4937 | 0 | if (a[OVS_DP_ATTR_MASKS_CACHE_SIZE]) { |
4938 | 0 | dp->cache_size = nl_attr_get_u32(a[OVS_DP_ATTR_MASKS_CACHE_SIZE]); |
4939 | 0 | } else { |
4940 | 0 | dp->cache_size = UINT32_MAX; |
4941 | 0 | } |
4942 | |
|
4943 | 0 | return 0; |
4944 | 0 | } |
4945 | | |
4946 | | /* Appends to 'buf' the Generic Netlink message described by 'dp'. */ |
4947 | | static void |
4948 | | dpif_netlink_dp_to_ofpbuf(const struct dpif_netlink_dp *dp, struct ofpbuf *buf) |
4949 | 0 | { |
4950 | 0 | struct ovs_header *ovs_header; |
4951 | |
|
4952 | 0 | nl_msg_put_genlmsghdr(buf, 0, ovs_datapath_family, |
4953 | 0 | NLM_F_REQUEST | NLM_F_ECHO, dp->cmd, |
4954 | 0 | OVS_DATAPATH_VERSION); |
4955 | |
|
4956 | 0 | ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header); |
4957 | 0 | ovs_header->dp_ifindex = dp->dp_ifindex; |
4958 | |
|
4959 | 0 | if (dp->name) { |
4960 | 0 | nl_msg_put_string(buf, OVS_DP_ATTR_NAME, dp->name); |
4961 | 0 | } |
4962 | |
|
4963 | 0 | if (dp->upcall_pid) { |
4964 | 0 | nl_msg_put_u32(buf, OVS_DP_ATTR_UPCALL_PID, *dp->upcall_pid); |
4965 | 0 | } |
4966 | |
|
4967 | 0 | if (dp->user_features) { |
4968 | 0 | nl_msg_put_u32(buf, OVS_DP_ATTR_USER_FEATURES, dp->user_features); |
4969 | 0 | } |
4970 | |
|
4971 | 0 | if (dp->upcall_pids) { |
4972 | 0 | nl_msg_put_unspec(buf, OVS_DP_ATTR_PER_CPU_PIDS, dp->upcall_pids, |
4973 | 0 | sizeof *dp->upcall_pids * dp->n_upcall_pids); |
4974 | 0 | } |
4975 | |
|
4976 | 0 | if (dp->cache_size != UINT32_MAX) { |
4977 | 0 | nl_msg_put_u32(buf, OVS_DP_ATTR_MASKS_CACHE_SIZE, dp->cache_size); |
4978 | 0 | } |
4979 | | |
4980 | | /* Skip OVS_DP_ATTR_STATS since we never have a reason to serialize it. */ |
4981 | 0 | } |
4982 | | |
4983 | | /* Clears 'dp' to "empty" values. */ |
4984 | | static void |
4985 | | dpif_netlink_dp_init(struct dpif_netlink_dp *dp) |
4986 | 0 | { |
4987 | 0 | memset(dp, 0, sizeof *dp); |
4988 | 0 | dp->cache_size = UINT32_MAX; |
4989 | 0 | } |
4990 | | |
4991 | | static void |
4992 | | dpif_netlink_dp_dump_start(struct nl_dump *dump) |
4993 | 0 | { |
4994 | 0 | struct dpif_netlink_dp request; |
4995 | 0 | struct ofpbuf *buf; |
4996 | |
|
4997 | 0 | dpif_netlink_dp_init(&request); |
4998 | 0 | request.cmd = OVS_DP_CMD_GET; |
4999 | |
|
5000 | 0 | buf = ofpbuf_new(1024); |
5001 | 0 | dpif_netlink_dp_to_ofpbuf(&request, buf); |
5002 | 0 | nl_dump_start(dump, NETLINK_GENERIC, buf); |
5003 | 0 | ofpbuf_delete(buf); |
5004 | 0 | } |
5005 | | |
5006 | | /* Executes 'request' in the kernel datapath. If the command fails, returns a |
5007 | | * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0 |
5008 | | * without doing anything else. If 'reply' and 'bufp' are nonnull, then the |
5009 | | * result of the command is expected to be of the same form, which is decoded |
5010 | | * and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the |
5011 | | * reply is no longer needed ('reply' will contain pointers into '*bufp'). */ |
5012 | | static int |
5013 | | dpif_netlink_dp_transact(const struct dpif_netlink_dp *request, |
5014 | | struct dpif_netlink_dp *reply, struct ofpbuf **bufp) |
5015 | 0 | { |
5016 | 0 | struct ofpbuf *request_buf; |
5017 | 0 | int error; |
5018 | |
|
5019 | 0 | ovs_assert((reply != NULL) == (bufp != NULL)); |
5020 | |
|
5021 | 0 | request_buf = ofpbuf_new(1024); |
5022 | 0 | dpif_netlink_dp_to_ofpbuf(request, request_buf); |
5023 | 0 | error = nl_transact(NETLINK_GENERIC, request_buf, bufp); |
5024 | 0 | ofpbuf_delete(request_buf); |
5025 | |
|
5026 | 0 | if (reply) { |
5027 | 0 | dpif_netlink_dp_init(reply); |
5028 | 0 | if (!error) { |
5029 | 0 | error = dpif_netlink_dp_from_ofpbuf(reply, *bufp); |
5030 | 0 | } |
5031 | 0 | if (error) { |
5032 | 0 | ofpbuf_delete(*bufp); |
5033 | 0 | *bufp = NULL; |
5034 | 0 | } |
5035 | 0 | } |
5036 | 0 | return error; |
5037 | 0 | } |
5038 | | |
5039 | | /* Obtains information about 'dpif_' and stores it into '*reply' and '*bufp'. |
5040 | | * The caller must free '*bufp' when the reply is no longer needed ('reply' |
5041 | | * will contain pointers into '*bufp'). */ |
5042 | | static int |
5043 | | dpif_netlink_dp_get(const struct dpif *dpif_, struct dpif_netlink_dp *reply, |
5044 | | struct ofpbuf **bufp) |
5045 | 0 | { |
5046 | 0 | struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); |
5047 | 0 | struct dpif_netlink_dp request; |
5048 | |
|
5049 | 0 | dpif_netlink_dp_init(&request); |
5050 | 0 | request.cmd = OVS_DP_CMD_GET; |
5051 | 0 | request.dp_ifindex = dpif->dp_ifindex; |
5052 | |
|
5053 | 0 | return dpif_netlink_dp_transact(&request, reply, bufp); |
5054 | 0 | } |
5055 | | |
5056 | | /* Parses the contents of 'buf', which contains a "struct ovs_header" followed |
5057 | | * by Netlink attributes, into 'flow'. Returns 0 if successful, otherwise a |
5058 | | * positive errno value. |
5059 | | * |
5060 | | * 'flow' will contain pointers into 'buf', so the caller should not free 'buf' |
5061 | | * while 'flow' is still in use. */ |
5062 | | static int |
5063 | | dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow *flow, |
5064 | | const struct ofpbuf *buf) |
5065 | 0 | { |
5066 | 0 | static const struct nl_policy ovs_flow_policy[__OVS_FLOW_ATTR_MAX] = { |
5067 | 0 | [OVS_FLOW_ATTR_KEY] = { .type = NL_A_NESTED, .optional = true }, |
5068 | 0 | [OVS_FLOW_ATTR_MASK] = { .type = NL_A_NESTED, .optional = true }, |
5069 | 0 | [OVS_FLOW_ATTR_ACTIONS] = { .type = NL_A_NESTED, .optional = true }, |
5070 | 0 | [OVS_FLOW_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_flow_stats), |
5071 | 0 | .optional = true }, |
5072 | 0 | [OVS_FLOW_ATTR_TCP_FLAGS] = { .type = NL_A_U8, .optional = true }, |
5073 | 0 | [OVS_FLOW_ATTR_USED] = { .type = NL_A_U64, .optional = true }, |
5074 | 0 | [OVS_FLOW_ATTR_UFID] = { .type = NL_A_U128, .optional = true }, |
5075 | | /* The kernel never uses OVS_FLOW_ATTR_CLEAR. */ |
5076 | | /* The kernel never uses OVS_FLOW_ATTR_PROBE. */ |
5077 | | /* The kernel never uses OVS_FLOW_ATTR_UFID_FLAGS. */ |
5078 | 0 | }; |
5079 | |
|
5080 | 0 | dpif_netlink_flow_init(flow); |
5081 | |
|
5082 | 0 | struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size); |
5083 | 0 | struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg); |
5084 | 0 | struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl); |
5085 | 0 | struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header); |
5086 | |
|
5087 | 0 | struct nlattr *a[ARRAY_SIZE(ovs_flow_policy)]; |
5088 | 0 | if (!nlmsg || !genl || !ovs_header |
5089 | 0 | || nlmsg->nlmsg_type != ovs_flow_family |
5090 | 0 | || !nl_policy_parse(&b, 0, ovs_flow_policy, a, |
5091 | 0 | ARRAY_SIZE(ovs_flow_policy))) { |
5092 | 0 | return EINVAL; |
5093 | 0 | } |
5094 | 0 | if (!a[OVS_FLOW_ATTR_KEY] && !a[OVS_FLOW_ATTR_UFID]) { |
5095 | 0 | return EINVAL; |
5096 | 0 | } |
5097 | | |
5098 | 0 | flow->nlmsg_flags = nlmsg->nlmsg_flags; |
5099 | 0 | flow->dp_ifindex = ovs_header->dp_ifindex; |
5100 | 0 | if (a[OVS_FLOW_ATTR_KEY]) { |
5101 | 0 | flow->key = nl_attr_get(a[OVS_FLOW_ATTR_KEY]); |
5102 | 0 | flow->key_len = nl_attr_get_size(a[OVS_FLOW_ATTR_KEY]); |
5103 | 0 | } |
5104 | |
|
5105 | 0 | if (a[OVS_FLOW_ATTR_UFID]) { |
5106 | 0 | flow->ufid = nl_attr_get_u128(a[OVS_FLOW_ATTR_UFID]); |
5107 | 0 | flow->ufid_present = true; |
5108 | 0 | } |
5109 | 0 | if (a[OVS_FLOW_ATTR_MASK]) { |
5110 | 0 | flow->mask = nl_attr_get(a[OVS_FLOW_ATTR_MASK]); |
5111 | 0 | flow->mask_len = nl_attr_get_size(a[OVS_FLOW_ATTR_MASK]); |
5112 | 0 | } |
5113 | 0 | if (a[OVS_FLOW_ATTR_ACTIONS]) { |
5114 | 0 | flow->actions = nl_attr_get(a[OVS_FLOW_ATTR_ACTIONS]); |
5115 | 0 | flow->actions_len = nl_attr_get_size(a[OVS_FLOW_ATTR_ACTIONS]); |
5116 | 0 | } |
5117 | 0 | if (a[OVS_FLOW_ATTR_STATS]) { |
5118 | 0 | flow->stats = nl_attr_get(a[OVS_FLOW_ATTR_STATS]); |
5119 | 0 | } |
5120 | 0 | if (a[OVS_FLOW_ATTR_TCP_FLAGS]) { |
5121 | 0 | flow->tcp_flags = nl_attr_get(a[OVS_FLOW_ATTR_TCP_FLAGS]); |
5122 | 0 | } |
5123 | 0 | if (a[OVS_FLOW_ATTR_USED]) { |
5124 | 0 | flow->used = nl_attr_get(a[OVS_FLOW_ATTR_USED]); |
5125 | 0 | } |
5126 | 0 | return 0; |
5127 | 0 | } |
5128 | | |
5129 | | |
5130 | | /* |
5131 | | * If PACKET_TYPE attribute is present in 'data', it filters PACKET_TYPE out. |
5132 | | * If the flow is not Ethernet, the OVS_KEY_ATTR_PACKET_TYPE is converted to |
5133 | | * OVS_KEY_ATTR_ETHERTYPE. Puts 'data' to 'buf'. |
5134 | | */ |
5135 | | static void |
5136 | | put_exclude_packet_type(struct ofpbuf *buf, uint16_t type, |
5137 | | const struct nlattr *data, uint16_t data_len) |
5138 | 0 | { |
5139 | 0 | const struct nlattr *packet_type; |
5140 | |
|
5141 | 0 | packet_type = nl_attr_find__(data, data_len, OVS_KEY_ATTR_PACKET_TYPE); |
5142 | |
|
5143 | 0 | if (packet_type) { |
5144 | | /* exclude PACKET_TYPE Netlink attribute. */ |
5145 | 0 | ovs_assert(NLA_ALIGN(packet_type->nla_len) == NL_A_U32_SIZE); |
5146 | 0 | size_t packet_type_len = NL_A_U32_SIZE; |
5147 | 0 | size_t first_chunk_size = (uint8_t *)packet_type - (uint8_t *)data; |
5148 | 0 | size_t second_chunk_size = data_len - first_chunk_size |
5149 | 0 | - packet_type_len; |
5150 | 0 | struct nlattr *next_attr = nl_attr_next(packet_type); |
5151 | 0 | size_t ofs; |
5152 | |
|
5153 | 0 | ofs = nl_msg_start_nested(buf, type); |
5154 | 0 | nl_msg_put(buf, data, first_chunk_size); |
5155 | 0 | nl_msg_put(buf, next_attr, second_chunk_size); |
5156 | 0 | if (!nl_attr_find__(data, data_len, OVS_KEY_ATTR_ETHERNET)) { |
5157 | 0 | ovs_be16 pt = pt_ns_type_be(nl_attr_get_be32(packet_type)); |
5158 | 0 | const struct nlattr *nla; |
5159 | |
|
5160 | 0 | nla = nl_attr_find(buf, ofs + NLA_HDRLEN, OVS_KEY_ATTR_ETHERTYPE); |
5161 | 0 | if (nla) { |
5162 | 0 | ovs_be16 *ethertype; |
5163 | |
|
5164 | 0 | ethertype = CONST_CAST(ovs_be16 *, nl_attr_get(nla)); |
5165 | 0 | *ethertype = pt; |
5166 | 0 | } else { |
5167 | 0 | nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, pt); |
5168 | 0 | } |
5169 | 0 | } |
5170 | 0 | nl_msg_end_nested(buf, ofs); |
5171 | 0 | } else { |
5172 | 0 | nl_msg_put_unspec(buf, type, data, data_len); |
5173 | 0 | } |
5174 | 0 | } |
5175 | | |
5176 | | /* Appends to 'buf' (which must initially be empty) a "struct ovs_header" |
5177 | | * followed by Netlink attributes corresponding to 'flow'. */ |
5178 | | static void |
5179 | | dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow *flow, |
5180 | | struct ofpbuf *buf) |
5181 | 0 | { |
5182 | 0 | struct ovs_header *ovs_header; |
5183 | |
|
5184 | 0 | nl_msg_put_genlmsghdr(buf, 0, ovs_flow_family, |
5185 | 0 | NLM_F_REQUEST | flow->nlmsg_flags, |
5186 | 0 | flow->cmd, OVS_FLOW_VERSION); |
5187 | |
|
5188 | 0 | ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header); |
5189 | 0 | ovs_header->dp_ifindex = flow->dp_ifindex; |
5190 | |
|
5191 | 0 | if (flow->ufid_present) { |
5192 | 0 | nl_msg_put_u128(buf, OVS_FLOW_ATTR_UFID, flow->ufid); |
5193 | 0 | } |
5194 | 0 | if (flow->ufid_terse) { |
5195 | 0 | nl_msg_put_u32(buf, OVS_FLOW_ATTR_UFID_FLAGS, |
5196 | 0 | OVS_UFID_F_OMIT_KEY | OVS_UFID_F_OMIT_MASK |
5197 | 0 | | OVS_UFID_F_OMIT_ACTIONS); |
5198 | 0 | } |
5199 | 0 | if (!flow->ufid_terse || !flow->ufid_present) { |
5200 | 0 | if (flow->key_len) { |
5201 | 0 | put_exclude_packet_type(buf, OVS_FLOW_ATTR_KEY, flow->key, |
5202 | 0 | flow->key_len); |
5203 | 0 | } |
5204 | 0 | if (flow->mask_len) { |
5205 | 0 | put_exclude_packet_type(buf, OVS_FLOW_ATTR_MASK, flow->mask, |
5206 | 0 | flow->mask_len); |
5207 | 0 | } |
5208 | 0 | if (flow->actions || flow->actions_len) { |
5209 | 0 | nl_msg_put_unspec(buf, OVS_FLOW_ATTR_ACTIONS, |
5210 | 0 | flow->actions, flow->actions_len); |
5211 | 0 | } |
5212 | 0 | } |
5213 | | |
5214 | | /* We never need to send these to the kernel. */ |
5215 | 0 | ovs_assert(!flow->stats); |
5216 | 0 | ovs_assert(!flow->tcp_flags); |
5217 | 0 | ovs_assert(!flow->used); |
5218 | |
|
5219 | 0 | if (flow->clear) { |
5220 | 0 | nl_msg_put_flag(buf, OVS_FLOW_ATTR_CLEAR); |
5221 | 0 | } |
5222 | 0 | if (flow->probe) { |
5223 | 0 | nl_msg_put_flag(buf, OVS_FLOW_ATTR_PROBE); |
5224 | 0 | } |
5225 | 0 | } |
5226 | | |
5227 | | /* Clears 'flow' to "empty" values. */ |
5228 | | static void |
5229 | | dpif_netlink_flow_init(struct dpif_netlink_flow *flow) |
5230 | 0 | { |
5231 | 0 | memset(flow, 0, sizeof *flow); |
5232 | 0 | } |
5233 | | |
5234 | | /* Executes 'request' in the kernel datapath. If the command fails, returns a |
5235 | | * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0 |
5236 | | * without doing anything else. If 'reply' and 'bufp' are nonnull, then the |
5237 | | * result of the command is expected to be a flow also, which is decoded and |
5238 | | * stored in '*reply' and '*bufp'. The caller must free '*bufp' when the reply |
5239 | | * is no longer needed ('reply' will contain pointers into '*bufp'). */ |
5240 | | static int |
5241 | | dpif_netlink_flow_transact(struct dpif_netlink_flow *request, |
5242 | | struct dpif_netlink_flow *reply, |
5243 | | struct ofpbuf **bufp) |
5244 | 0 | { |
5245 | 0 | struct ofpbuf *request_buf; |
5246 | 0 | int error; |
5247 | |
|
5248 | 0 | ovs_assert((reply != NULL) == (bufp != NULL)); |
5249 | |
|
5250 | 0 | if (reply) { |
5251 | 0 | request->nlmsg_flags |= NLM_F_ECHO; |
5252 | 0 | } |
5253 | |
|
5254 | 0 | request_buf = ofpbuf_new(1024); |
5255 | 0 | dpif_netlink_flow_to_ofpbuf(request, request_buf); |
5256 | 0 | error = nl_transact(NETLINK_GENERIC, request_buf, bufp); |
5257 | 0 | ofpbuf_delete(request_buf); |
5258 | |
|
5259 | 0 | if (reply) { |
5260 | 0 | if (!error) { |
5261 | 0 | error = dpif_netlink_flow_from_ofpbuf(reply, *bufp); |
5262 | 0 | } |
5263 | 0 | if (error) { |
5264 | 0 | dpif_netlink_flow_init(reply); |
5265 | 0 | ofpbuf_delete(*bufp); |
5266 | 0 | *bufp = NULL; |
5267 | 0 | } |
5268 | 0 | } |
5269 | 0 | return error; |
5270 | 0 | } |
5271 | | |
5272 | | static void |
5273 | | dpif_netlink_flow_get_stats(const struct dpif_netlink_flow *flow, |
5274 | | struct dpif_flow_stats *stats) |
5275 | 0 | { |
5276 | 0 | if (flow->stats) { |
5277 | 0 | stats->n_packets = get_32aligned_u64(&flow->stats->n_packets); |
5278 | 0 | stats->n_bytes = get_32aligned_u64(&flow->stats->n_bytes); |
5279 | 0 | } else { |
5280 | 0 | stats->n_packets = 0; |
5281 | 0 | stats->n_bytes = 0; |
5282 | 0 | } |
5283 | 0 | stats->used = flow->used ? get_32aligned_u64(flow->used) : 0; |
5284 | 0 | stats->tcp_flags = flow->tcp_flags ? *flow->tcp_flags : 0; |
5285 | 0 | } |
5286 | | |
5287 | | /* Logs information about a packet that was recently lost in 'ch' (in |
5288 | | * 'dpif_'). */ |
5289 | | static void |
5290 | | report_loss(struct dpif_netlink *dpif, struct dpif_channel *ch, uint32_t ch_idx, |
5291 | | uint32_t handler_id) |
5292 | 0 | { |
5293 | 0 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5); |
5294 | 0 | struct ds s; |
5295 | |
|
5296 | 0 | if (VLOG_DROP_WARN(&rl)) { |
5297 | 0 | return; |
5298 | 0 | } |
5299 | | |
5300 | 0 | if (dpif_netlink_upcall_per_cpu(dpif)) { |
5301 | 0 | VLOG_WARN("%s: lost packet on handler %u", |
5302 | 0 | dpif_name(&dpif->dpif), handler_id); |
5303 | 0 | } else { |
5304 | 0 | ds_init(&s); |
5305 | 0 | if (ch->last_poll != LLONG_MIN) { |
5306 | 0 | ds_put_format(&s, " (last polled %lld ms ago)", |
5307 | 0 | time_msec() - ch->last_poll); |
5308 | 0 | } |
5309 | |
|
5310 | 0 | VLOG_WARN("%s: lost packet on port channel %u of handler %u%s", |
5311 | 0 | dpif_name(&dpif->dpif), ch_idx, handler_id, ds_cstr(&s)); |
5312 | 0 | ds_destroy(&s); |
5313 | 0 | } |
5314 | 0 | } |
5315 | | |
5316 | | static void |
5317 | | dpif_netlink_unixctl_dispatch_mode(struct unixctl_conn *conn, |
5318 | | int argc OVS_UNUSED, |
5319 | | const char *argv[] OVS_UNUSED, |
5320 | | void *aux OVS_UNUSED) |
5321 | 0 | { |
5322 | 0 | struct ds reply = DS_EMPTY_INITIALIZER; |
5323 | 0 | struct nl_dump dump; |
5324 | 0 | uint64_t reply_stub[NL_DUMP_BUFSIZE / 8]; |
5325 | 0 | struct ofpbuf msg, buf; |
5326 | 0 | int error; |
5327 | |
|
5328 | 0 | error = dpif_netlink_init(); |
5329 | 0 | if (error) { |
5330 | 0 | return; |
5331 | 0 | } |
5332 | | |
5333 | 0 | ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub); |
5334 | 0 | dpif_netlink_dp_dump_start(&dump); |
5335 | 0 | while (nl_dump_next(&dump, &msg, &buf)) { |
5336 | 0 | struct dpif_netlink_dp dp; |
5337 | 0 | if (!dpif_netlink_dp_from_ofpbuf(&dp, &msg)) { |
5338 | 0 | ds_put_format(&reply, "%s: ", dp.name); |
5339 | 0 | if (dp.user_features & OVS_DP_F_DISPATCH_UPCALL_PER_CPU) { |
5340 | 0 | ds_put_format(&reply, "per-cpu dispatch mode"); |
5341 | 0 | } else { |
5342 | 0 | ds_put_format(&reply, "per-vport dispatch mode"); |
5343 | 0 | } |
5344 | 0 | ds_put_format(&reply, "\n"); |
5345 | 0 | } |
5346 | 0 | } |
5347 | 0 | ofpbuf_uninit(&buf); |
5348 | 0 | error = nl_dump_done(&dump); |
5349 | 0 | if (!error) { |
5350 | 0 | unixctl_command_reply(conn, ds_cstr(&reply)); |
5351 | 0 | } |
5352 | |
|
5353 | 0 | ds_destroy(&reply); |
5354 | 0 | } |