/src/openvswitch/lib/netdev-offload.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2008 - 2014, 2016, 2017 Nicira, Inc. |
3 | | * Copyright (c) 2019 Samsung Electronics Co.,Ltd. |
4 | | * |
5 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
6 | | * you may not use this file except in compliance with the License. |
7 | | * You may obtain a copy of the License at: |
8 | | * |
9 | | * http://www.apache.org/licenses/LICENSE-2.0 |
10 | | * |
11 | | * Unless required by applicable law or agreed to in writing, software |
12 | | * distributed under the License is distributed on an "AS IS" BASIS, |
13 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
14 | | * See the License for the specific language governing permissions and |
15 | | * limitations under the License. |
16 | | */ |
17 | | |
18 | | #include <config.h> |
19 | | #include "netdev-offload.h" |
20 | | |
21 | | #include <errno.h> |
22 | | #include <inttypes.h> |
23 | | #include <sys/types.h> |
24 | | #include <netinet/in.h> |
25 | | #include <stdlib.h> |
26 | | #include <string.h> |
27 | | #include <unistd.h> |
28 | | |
29 | | #include "cmap.h" |
30 | | #include "coverage.h" |
31 | | #include "dpif.h" |
32 | | #include "dp-packet.h" |
33 | | #include "openvswitch/dynamic-string.h" |
34 | | #include "fatal-signal.h" |
35 | | #include "hash.h" |
36 | | #include "openvswitch/list.h" |
37 | | #include "netdev-offload-provider.h" |
38 | | #include "netdev-provider.h" |
39 | | #include "netdev-vport.h" |
40 | | #include "odp-netlink.h" |
41 | | #include "openflow/openflow.h" |
42 | | #include "packets.h" |
43 | | #include "openvswitch/ofp-print.h" |
44 | | #include "openvswitch/poll-loop.h" |
45 | | #include "seq.h" |
46 | | #include "openvswitch/shash.h" |
47 | | #include "smap.h" |
48 | | #include "socket-util.h" |
49 | | #include "sset.h" |
50 | | #include "svec.h" |
51 | | #include "openvswitch/vlog.h" |
52 | | #include "flow.h" |
53 | | #include "util.h" |
54 | | #ifdef __linux__ |
55 | | #include "tc.h" |
56 | | #endif |
57 | | |
58 | | VLOG_DEFINE_THIS_MODULE(netdev_offload); |
59 | | |
60 | | |
61 | | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); |
62 | | static bool netdev_flow_api_enabled = false; |
63 | | |
64 | 0 | #define DEFAULT_OFFLOAD_THREAD_NB 1 |
65 | 0 | #define MAX_OFFLOAD_THREAD_NB 10 |
66 | | |
67 | | static unsigned int offload_thread_nb = DEFAULT_OFFLOAD_THREAD_NB; |
68 | | DEFINE_EXTERN_PER_THREAD_DATA(netdev_offload_thread_id, OVSTHREAD_ID_UNSET); |
69 | | |
70 | | /* Protects 'netdev_flow_apis'. */ |
71 | | static struct ovs_mutex netdev_flow_api_provider_mutex = OVS_MUTEX_INITIALIZER; |
72 | | |
73 | | /* Contains 'struct netdev_registered_flow_api's. */ |
74 | | static struct cmap netdev_flow_apis = CMAP_INITIALIZER; |
75 | | |
76 | | struct netdev_registered_flow_api { |
77 | | struct cmap_node cmap_node; /* In 'netdev_flow_apis', by flow_api->type. */ |
78 | | const struct netdev_flow_api *flow_api; |
79 | | |
80 | | /* Number of references: one for the flow_api itself and one for every |
81 | | * instance of the netdev that uses it. */ |
82 | | struct ovs_refcount refcnt; |
83 | | }; |
84 | | |
85 | | static struct netdev_registered_flow_api * |
86 | | netdev_lookup_flow_api(const char *type) |
87 | 0 | { |
88 | 0 | struct netdev_registered_flow_api *rfa; |
89 | 0 | CMAP_FOR_EACH_WITH_HASH (rfa, cmap_node, hash_string(type, 0), |
90 | 0 | &netdev_flow_apis) { |
91 | 0 | if (!strcmp(type, rfa->flow_api->type)) { |
92 | 0 | return rfa; |
93 | 0 | } |
94 | 0 | } |
95 | 0 | return NULL; |
96 | 0 | } |
97 | | |
98 | | /* Registers a new netdev flow api provider. */ |
99 | | int |
100 | | netdev_register_flow_api_provider(const struct netdev_flow_api *new_flow_api) |
101 | | OVS_EXCLUDED(netdev_flow_api_provider_mutex) |
102 | 0 | { |
103 | 0 | int error = 0; |
104 | |
|
105 | 0 | if (!new_flow_api->init_flow_api) { |
106 | 0 | VLOG_WARN("attempted to register invalid flow api provider: %s", |
107 | 0 | new_flow_api->type); |
108 | 0 | error = EINVAL; |
109 | 0 | } |
110 | |
|
111 | 0 | ovs_mutex_lock(&netdev_flow_api_provider_mutex); |
112 | 0 | if (netdev_lookup_flow_api(new_flow_api->type)) { |
113 | 0 | VLOG_WARN("attempted to register duplicate flow api provider: %s", |
114 | 0 | new_flow_api->type); |
115 | 0 | error = EEXIST; |
116 | 0 | } else { |
117 | 0 | struct netdev_registered_flow_api *rfa; |
118 | |
|
119 | 0 | rfa = xmalloc(sizeof *rfa); |
120 | 0 | cmap_insert(&netdev_flow_apis, &rfa->cmap_node, |
121 | 0 | hash_string(new_flow_api->type, 0)); |
122 | 0 | rfa->flow_api = new_flow_api; |
123 | 0 | ovs_refcount_init(&rfa->refcnt); |
124 | 0 | VLOG_DBG("netdev: flow API '%s' registered.", new_flow_api->type); |
125 | 0 | } |
126 | 0 | ovs_mutex_unlock(&netdev_flow_api_provider_mutex); |
127 | |
|
128 | 0 | return error; |
129 | 0 | } |
130 | | |
131 | | /* Unregisters a netdev flow api provider. 'type' must have been previously |
132 | | * registered and not currently be in use by any netdevs. After unregistration |
133 | | * netdev flow api of that type cannot be used for netdevs. (However, the |
134 | | * provider may still be accessible from other threads until the next RCU grace |
135 | | * period, so the caller must not free or re-register the same netdev_flow_api |
136 | | * until that has passed.) */ |
137 | | int |
138 | | netdev_unregister_flow_api_provider(const char *type) |
139 | | OVS_EXCLUDED(netdev_flow_api_provider_mutex) |
140 | 0 | { |
141 | 0 | struct netdev_registered_flow_api *rfa; |
142 | 0 | int error; |
143 | |
|
144 | 0 | ovs_mutex_lock(&netdev_flow_api_provider_mutex); |
145 | 0 | rfa = netdev_lookup_flow_api(type); |
146 | 0 | if (!rfa) { |
147 | 0 | VLOG_WARN("attempted to unregister a flow api provider that is not " |
148 | 0 | "registered: %s", type); |
149 | 0 | error = EAFNOSUPPORT; |
150 | 0 | } else if (ovs_refcount_unref(&rfa->refcnt) != 1) { |
151 | 0 | ovs_refcount_ref(&rfa->refcnt); |
152 | 0 | VLOG_WARN("attempted to unregister in use flow api provider: %s", |
153 | 0 | type); |
154 | 0 | error = EBUSY; |
155 | 0 | } else { |
156 | 0 | cmap_remove(&netdev_flow_apis, &rfa->cmap_node, |
157 | 0 | hash_string(rfa->flow_api->type, 0)); |
158 | 0 | ovsrcu_postpone(free, rfa); |
159 | 0 | error = 0; |
160 | 0 | } |
161 | 0 | ovs_mutex_unlock(&netdev_flow_api_provider_mutex); |
162 | |
|
163 | 0 | return error; |
164 | 0 | } |
165 | | |
166 | | bool |
167 | | netdev_flow_api_equals(const struct netdev *netdev1, |
168 | | const struct netdev *netdev2) |
169 | 0 | { |
170 | 0 | const struct netdev_flow_api *netdev_flow_api1 = |
171 | 0 | ovsrcu_get(const struct netdev_flow_api *, &netdev1->flow_api); |
172 | 0 | const struct netdev_flow_api *netdev_flow_api2 = |
173 | 0 | ovsrcu_get(const struct netdev_flow_api *, &netdev2->flow_api); |
174 | |
|
175 | 0 | return netdev_flow_api1 == netdev_flow_api2; |
176 | 0 | } |
177 | | |
178 | | static int |
179 | | netdev_assign_flow_api(struct netdev *netdev) |
180 | 0 | { |
181 | 0 | struct netdev_registered_flow_api *rfa; |
182 | |
|
183 | 0 | CMAP_FOR_EACH (rfa, cmap_node, &netdev_flow_apis) { |
184 | 0 | if (!rfa->flow_api->init_flow_api(netdev)) { |
185 | 0 | ovs_refcount_ref(&rfa->refcnt); |
186 | 0 | atomic_store_relaxed(&netdev->hw_info.miss_api_supported, true); |
187 | 0 | ovsrcu_set(&netdev->flow_api, rfa->flow_api); |
188 | 0 | VLOG_INFO("%s: Assigned flow API '%s'.", |
189 | 0 | netdev_get_name(netdev), rfa->flow_api->type); |
190 | 0 | return 0; |
191 | 0 | } |
192 | 0 | VLOG_DBG("%s: flow API '%s' is not suitable.", |
193 | 0 | netdev_get_name(netdev), rfa->flow_api->type); |
194 | 0 | } |
195 | 0 | atomic_store_relaxed(&netdev->hw_info.miss_api_supported, false); |
196 | 0 | VLOG_INFO("%s: No suitable flow API found.", netdev_get_name(netdev)); |
197 | |
|
198 | 0 | return -1; |
199 | 0 | } |
200 | | |
201 | | void |
202 | | meter_offload_set(ofproto_meter_id meter_id, |
203 | | struct ofputil_meter_config *config) |
204 | 0 | { |
205 | 0 | struct netdev_registered_flow_api *rfa; |
206 | |
|
207 | 0 | CMAP_FOR_EACH (rfa, cmap_node, &netdev_flow_apis) { |
208 | 0 | if (rfa->flow_api->meter_set) { |
209 | 0 | int ret = rfa->flow_api->meter_set(meter_id, config); |
210 | 0 | if (ret) { |
211 | 0 | VLOG_DBG_RL(&rl, "Failed setting meter %u for flow api %s, " |
212 | 0 | "error %d", meter_id.uint32, rfa->flow_api->type, |
213 | 0 | ret); |
214 | 0 | } |
215 | 0 | } |
216 | 0 | } |
217 | | /* Offload APIs could fail, for example, because the offload is not |
218 | | * supported. This is fine, as the offload API should take care of this. */ |
219 | 0 | } |
220 | | |
221 | | int |
222 | | meter_offload_get(ofproto_meter_id meter_id, struct ofputil_meter_stats *stats) |
223 | 0 | { |
224 | 0 | struct netdev_registered_flow_api *rfa; |
225 | |
|
226 | 0 | CMAP_FOR_EACH (rfa, cmap_node, &netdev_flow_apis) { |
227 | 0 | if (rfa->flow_api->meter_get) { |
228 | 0 | int ret = rfa->flow_api->meter_get(meter_id, stats); |
229 | 0 | if (ret) { |
230 | 0 | VLOG_DBG_RL(&rl, "Failed getting meter %u for flow api %s, " |
231 | 0 | "error %d", meter_id.uint32, rfa->flow_api->type, |
232 | 0 | ret); |
233 | 0 | } |
234 | 0 | } |
235 | 0 | } |
236 | |
|
237 | 0 | return 0; |
238 | 0 | } |
239 | | |
240 | | int |
241 | | meter_offload_del(ofproto_meter_id meter_id, struct ofputil_meter_stats *stats) |
242 | 0 | { |
243 | 0 | struct netdev_registered_flow_api *rfa; |
244 | |
|
245 | 0 | CMAP_FOR_EACH (rfa, cmap_node, &netdev_flow_apis) { |
246 | 0 | if (rfa->flow_api->meter_del) { |
247 | 0 | int ret = rfa->flow_api->meter_del(meter_id, stats); |
248 | 0 | if (ret) { |
249 | 0 | VLOG_DBG_RL(&rl, "Failed deleting meter %u for flow api %s, " |
250 | 0 | "error %d", meter_id.uint32, rfa->flow_api->type, |
251 | 0 | ret); |
252 | 0 | } |
253 | 0 | } |
254 | 0 | } |
255 | |
|
256 | 0 | return 0; |
257 | 0 | } |
258 | | |
259 | | int |
260 | | netdev_flow_flush(struct netdev *netdev) |
261 | 0 | { |
262 | 0 | const struct netdev_flow_api *flow_api = |
263 | 0 | ovsrcu_get(const struct netdev_flow_api *, &netdev->flow_api); |
264 | |
|
265 | 0 | return (flow_api && flow_api->flow_flush) |
266 | 0 | ? flow_api->flow_flush(netdev) |
267 | 0 | : EOPNOTSUPP; |
268 | 0 | } |
269 | | |
270 | | int |
271 | | netdev_flow_dump_create(struct netdev *netdev, struct netdev_flow_dump **dump, |
272 | | bool terse) |
273 | 0 | { |
274 | 0 | const struct netdev_flow_api *flow_api = |
275 | 0 | ovsrcu_get(const struct netdev_flow_api *, &netdev->flow_api); |
276 | |
|
277 | 0 | return (flow_api && flow_api->flow_dump_create) |
278 | 0 | ? flow_api->flow_dump_create(netdev, dump, terse) |
279 | 0 | : EOPNOTSUPP; |
280 | 0 | } |
281 | | |
282 | | int |
283 | | netdev_flow_dump_destroy(struct netdev_flow_dump *dump) |
284 | 0 | { |
285 | 0 | const struct netdev_flow_api *flow_api = |
286 | 0 | ovsrcu_get(const struct netdev_flow_api *, &dump->netdev->flow_api); |
287 | |
|
288 | 0 | return (flow_api && flow_api->flow_dump_destroy) |
289 | 0 | ? flow_api->flow_dump_destroy(dump) |
290 | 0 | : EOPNOTSUPP; |
291 | 0 | } |
292 | | |
293 | | bool |
294 | | netdev_flow_dump_next(struct netdev_flow_dump *dump, struct match *match, |
295 | | struct nlattr **actions, struct dpif_flow_stats *stats, |
296 | | struct dpif_flow_attrs *attrs, ovs_u128 *ufid, |
297 | | struct ofpbuf *rbuffer, struct ofpbuf *wbuffer) |
298 | 0 | { |
299 | 0 | const struct netdev_flow_api *flow_api = |
300 | 0 | ovsrcu_get(const struct netdev_flow_api *, &dump->netdev->flow_api); |
301 | |
|
302 | 0 | return (flow_api && flow_api->flow_dump_next) |
303 | 0 | ? flow_api->flow_dump_next(dump, match, actions, stats, attrs, |
304 | 0 | ufid, rbuffer, wbuffer) |
305 | 0 | : false; |
306 | 0 | } |
307 | | |
308 | | int |
309 | | netdev_flow_put(struct netdev *netdev, struct match *match, |
310 | | struct nlattr *actions, size_t act_len, |
311 | | const ovs_u128 *ufid, struct offload_info *info, |
312 | | struct dpif_flow_stats *stats) |
313 | 0 | { |
314 | 0 | const struct netdev_flow_api *flow_api = |
315 | 0 | ovsrcu_get(const struct netdev_flow_api *, &netdev->flow_api); |
316 | |
|
317 | 0 | return (flow_api && flow_api->flow_put) |
318 | 0 | ? flow_api->flow_put(netdev, match, actions, act_len, ufid, |
319 | 0 | info, stats) |
320 | 0 | : EOPNOTSUPP; |
321 | 0 | } |
322 | | |
323 | | int |
324 | | netdev_hw_miss_packet_recover(struct netdev *netdev, |
325 | | struct dp_packet *packet) |
326 | 0 | { |
327 | 0 | const struct netdev_flow_api *flow_api; |
328 | 0 | bool miss_api_supported; |
329 | 0 | int rv; |
330 | |
|
331 | 0 | atomic_read_relaxed(&netdev->hw_info.miss_api_supported, |
332 | 0 | &miss_api_supported); |
333 | 0 | if (!miss_api_supported) { |
334 | 0 | return EOPNOTSUPP; |
335 | 0 | } |
336 | | |
337 | 0 | flow_api = ovsrcu_get(const struct netdev_flow_api *, &netdev->flow_api); |
338 | 0 | if (!flow_api || !flow_api->hw_miss_packet_recover) { |
339 | 0 | return EOPNOTSUPP; |
340 | 0 | } |
341 | | |
342 | 0 | rv = flow_api->hw_miss_packet_recover(netdev, packet); |
343 | 0 | if (rv == EOPNOTSUPP) { |
344 | | /* API unsupported by the port; avoid subsequent calls. */ |
345 | 0 | atomic_store_relaxed(&netdev->hw_info.miss_api_supported, false); |
346 | 0 | } |
347 | |
|
348 | 0 | return rv; |
349 | 0 | } |
350 | | |
351 | | int |
352 | | netdev_flow_get(struct netdev *netdev, struct match *match, |
353 | | struct nlattr **actions, const ovs_u128 *ufid, |
354 | | struct dpif_flow_stats *stats, |
355 | | struct dpif_flow_attrs *attrs, struct ofpbuf *buf) |
356 | 0 | { |
357 | 0 | const struct netdev_flow_api *flow_api = |
358 | 0 | ovsrcu_get(const struct netdev_flow_api *, &netdev->flow_api); |
359 | |
|
360 | 0 | return (flow_api && flow_api->flow_get) |
361 | 0 | ? flow_api->flow_get(netdev, match, actions, ufid, |
362 | 0 | stats, attrs, buf) |
363 | 0 | : EOPNOTSUPP; |
364 | 0 | } |
365 | | |
366 | | int |
367 | | netdev_flow_del(struct netdev *netdev, const ovs_u128 *ufid, |
368 | | struct dpif_flow_stats *stats) |
369 | 0 | { |
370 | 0 | const struct netdev_flow_api *flow_api = |
371 | 0 | ovsrcu_get(const struct netdev_flow_api *, &netdev->flow_api); |
372 | |
|
373 | 0 | return (flow_api && flow_api->flow_del) |
374 | 0 | ? flow_api->flow_del(netdev, ufid, stats) |
375 | 0 | : EOPNOTSUPP; |
376 | 0 | } |
377 | | |
378 | | int |
379 | | netdev_flow_get_n_flows(struct netdev *netdev, uint64_t *n_flows) |
380 | 0 | { |
381 | 0 | const struct netdev_flow_api *flow_api = |
382 | 0 | ovsrcu_get(const struct netdev_flow_api *, &netdev->flow_api); |
383 | |
|
384 | 0 | return (flow_api && flow_api->flow_get_n_flows) |
385 | 0 | ? flow_api->flow_get_n_flows(netdev, n_flows) |
386 | 0 | : EOPNOTSUPP; |
387 | 0 | } |
388 | | |
389 | | int |
390 | | netdev_init_flow_api(struct netdev *netdev) |
391 | 0 | { |
392 | 0 | if (!netdev_is_flow_api_enabled()) { |
393 | 0 | return EOPNOTSUPP; |
394 | 0 | } |
395 | | |
396 | 0 | if (ovsrcu_get(const struct netdev_flow_api *, &netdev->flow_api)) { |
397 | 0 | return 0; |
398 | 0 | } |
399 | | |
400 | 0 | if (netdev_assign_flow_api(netdev)) { |
401 | 0 | return EOPNOTSUPP; |
402 | 0 | } |
403 | | |
404 | 0 | return 0; |
405 | 0 | } |
406 | | |
407 | | void |
408 | | netdev_uninit_flow_api(struct netdev *netdev) |
409 | 0 | { |
410 | 0 | struct netdev_registered_flow_api *rfa; |
411 | 0 | const struct netdev_flow_api *flow_api = |
412 | 0 | ovsrcu_get(const struct netdev_flow_api *, &netdev->flow_api); |
413 | |
|
414 | 0 | if (!flow_api) { |
415 | 0 | return; |
416 | 0 | } |
417 | | |
418 | 0 | if (flow_api->uninit_flow_api) { |
419 | 0 | flow_api->uninit_flow_api(netdev); |
420 | 0 | } |
421 | |
|
422 | 0 | ovsrcu_set(&netdev->flow_api, NULL); |
423 | 0 | rfa = netdev_lookup_flow_api(flow_api->type); |
424 | 0 | ovs_refcount_unref(&rfa->refcnt); |
425 | 0 | } |
426 | | |
427 | | uint32_t |
428 | | netdev_get_block_id(struct netdev *netdev) |
429 | 0 | { |
430 | 0 | const struct netdev_class *class = netdev->netdev_class; |
431 | |
|
432 | 0 | return (class->get_block_id |
433 | 0 | ? class->get_block_id(netdev) |
434 | 0 | : 0); |
435 | 0 | } |
436 | | |
437 | | /* |
438 | | * Get the value of the hw info parameter specified by type. |
439 | | * Returns the value on success (>= 0). Returns -1 on failure. |
440 | | */ |
441 | | int |
442 | | netdev_get_hw_info(struct netdev *netdev, int type) |
443 | 0 | { |
444 | 0 | int val = -1; |
445 | |
|
446 | 0 | switch (type) { |
447 | 0 | case HW_INFO_TYPE_OOR: |
448 | 0 | val = netdev->hw_info.oor; |
449 | 0 | break; |
450 | 0 | case HW_INFO_TYPE_PEND_COUNT: |
451 | 0 | val = netdev->hw_info.pending_count; |
452 | 0 | break; |
453 | 0 | case HW_INFO_TYPE_OFFL_COUNT: |
454 | 0 | val = netdev->hw_info.offload_count; |
455 | 0 | break; |
456 | 0 | default: |
457 | 0 | break; |
458 | 0 | } |
459 | | |
460 | 0 | return val; |
461 | 0 | } |
462 | | |
463 | | /* |
464 | | * Set the value of the hw info parameter specified by type. |
465 | | */ |
466 | | void |
467 | | netdev_set_hw_info(struct netdev *netdev, int type, int val) |
468 | 0 | { |
469 | 0 | switch (type) { |
470 | 0 | case HW_INFO_TYPE_OOR: |
471 | 0 | if (val == 0) { |
472 | 0 | VLOG_DBG("Offload rebalance: netdev: %s is not OOR", netdev->name); |
473 | 0 | } |
474 | 0 | netdev->hw_info.oor = val; |
475 | 0 | break; |
476 | 0 | case HW_INFO_TYPE_PEND_COUNT: |
477 | 0 | netdev->hw_info.pending_count = val; |
478 | 0 | break; |
479 | 0 | case HW_INFO_TYPE_OFFL_COUNT: |
480 | 0 | netdev->hw_info.offload_count = val; |
481 | 0 | break; |
482 | 0 | default: |
483 | 0 | break; |
484 | 0 | } |
485 | 0 | } |
486 | | |
487 | | /* Protects below port hashmaps. */ |
488 | | static struct ovs_rwlock netdev_hmap_rwlock = OVS_RWLOCK_INITIALIZER; |
489 | | |
490 | | static struct hmap port_to_netdev OVS_GUARDED_BY(netdev_hmap_rwlock) |
491 | | = HMAP_INITIALIZER(&port_to_netdev); |
492 | | static struct hmap ifindex_to_port OVS_GUARDED_BY(netdev_hmap_rwlock) |
493 | | = HMAP_INITIALIZER(&ifindex_to_port); |
494 | | |
495 | | struct port_to_netdev_data { |
496 | | struct hmap_node portno_node; /* By (dpif_type, dpif_port.port_no). */ |
497 | | struct hmap_node ifindex_node; /* By (dpif_type, ifindex). */ |
498 | | struct netdev *netdev; |
499 | | struct dpif_port dpif_port; |
500 | | int ifindex; |
501 | | }; |
502 | | |
503 | | /* |
504 | | * Find if any netdev is in OOR state. Return true if there's at least |
505 | | * one netdev that's in OOR state; otherwise return false. |
506 | | */ |
507 | | bool |
508 | | netdev_any_oor(void) |
509 | | OVS_EXCLUDED(netdev_hmap_rwlock) |
510 | 0 | { |
511 | 0 | struct port_to_netdev_data *data; |
512 | 0 | bool oor = false; |
513 | |
|
514 | 0 | ovs_rwlock_rdlock(&netdev_hmap_rwlock); |
515 | 0 | HMAP_FOR_EACH (data, portno_node, &port_to_netdev) { |
516 | 0 | struct netdev *dev = data->netdev; |
517 | |
|
518 | 0 | if (dev->hw_info.oor) { |
519 | 0 | oor = true; |
520 | 0 | break; |
521 | 0 | } |
522 | 0 | } |
523 | 0 | ovs_rwlock_unlock(&netdev_hmap_rwlock); |
524 | |
|
525 | 0 | return oor; |
526 | 0 | } |
527 | | |
528 | | bool |
529 | | netdev_is_flow_api_enabled(void) |
530 | 0 | { |
531 | 0 | return netdev_flow_api_enabled; |
532 | 0 | } |
533 | | |
534 | | unsigned int |
535 | | netdev_offload_thread_nb(void) |
536 | 0 | { |
537 | 0 | return offload_thread_nb; |
538 | 0 | } |
539 | | |
540 | | unsigned int |
541 | | netdev_offload_ufid_to_thread_id(const ovs_u128 ufid) |
542 | 0 | { |
543 | 0 | uint32_t ufid_hash; |
544 | |
|
545 | 0 | if (netdev_offload_thread_nb() == 1) { |
546 | 0 | return 0; |
547 | 0 | } |
548 | | |
549 | 0 | ufid_hash = hash_words64_inline( |
550 | 0 | (const uint64_t [2]){ ufid.u64.lo, |
551 | 0 | ufid.u64.hi }, 2, 1); |
552 | 0 | return ufid_hash % netdev_offload_thread_nb(); |
553 | 0 | } |
554 | | |
555 | | unsigned int |
556 | | netdev_offload_thread_init(void) |
557 | 0 | { |
558 | 0 | static atomic_count next_id = ATOMIC_COUNT_INIT(0); |
559 | 0 | bool thread_is_hw_offload; |
560 | 0 | bool thread_is_rcu; |
561 | |
|
562 | 0 | thread_is_hw_offload = !strncmp(get_subprogram_name(), |
563 | 0 | "hw_offload", strlen("hw_offload")); |
564 | 0 | thread_is_rcu = !strncmp(get_subprogram_name(), "urcu", strlen("urcu")); |
565 | | |
566 | | /* Panic if any other thread besides offload and RCU tries |
567 | | * to initialize their thread ID. */ |
568 | 0 | ovs_assert(thread_is_hw_offload || thread_is_rcu); |
569 | |
|
570 | 0 | if (*netdev_offload_thread_id_get() == OVSTHREAD_ID_UNSET) { |
571 | 0 | unsigned int id; |
572 | |
|
573 | 0 | if (thread_is_rcu) { |
574 | | /* RCU will compete with other threads for shared object access. |
575 | | * Reclamation functions using a thread ID must be thread-safe. |
576 | | * For that end, and because RCU must consider all potential shared |
577 | | * objects anyway, its thread-id can be whichever, so return 0. |
578 | | */ |
579 | 0 | id = 0; |
580 | 0 | } else { |
581 | | /* Only the actual offload threads have their own ID. */ |
582 | 0 | id = atomic_count_inc(&next_id); |
583 | 0 | } |
584 | | /* Panic if any offload thread is getting a spurious ID. */ |
585 | 0 | ovs_assert(id < netdev_offload_thread_nb()); |
586 | 0 | return *netdev_offload_thread_id_get() = id; |
587 | 0 | } else { |
588 | 0 | return *netdev_offload_thread_id_get(); |
589 | 0 | } |
590 | 0 | } |
591 | | |
592 | | void |
593 | | netdev_ports_flow_flush(const char *dpif_type) |
594 | 0 | { |
595 | 0 | struct port_to_netdev_data *data; |
596 | |
|
597 | 0 | ovs_rwlock_rdlock(&netdev_hmap_rwlock); |
598 | 0 | HMAP_FOR_EACH (data, portno_node, &port_to_netdev) { |
599 | 0 | if (netdev_get_dpif_type(data->netdev) == dpif_type) { |
600 | 0 | netdev_flow_flush(data->netdev); |
601 | 0 | } |
602 | 0 | } |
603 | 0 | ovs_rwlock_unlock(&netdev_hmap_rwlock); |
604 | 0 | } |
605 | | |
606 | | void |
607 | | netdev_ports_traverse(const char *dpif_type, |
608 | | bool (*cb)(struct netdev *, odp_port_t, void *), |
609 | | void *aux) |
610 | 0 | { |
611 | 0 | struct port_to_netdev_data *data; |
612 | |
|
613 | 0 | ovs_rwlock_rdlock(&netdev_hmap_rwlock); |
614 | 0 | HMAP_FOR_EACH (data, portno_node, &port_to_netdev) { |
615 | 0 | if (netdev_get_dpif_type(data->netdev) == dpif_type) { |
616 | 0 | if (cb(data->netdev, data->dpif_port.port_no, aux)) { |
617 | 0 | break; |
618 | 0 | } |
619 | 0 | } |
620 | 0 | } |
621 | 0 | ovs_rwlock_unlock(&netdev_hmap_rwlock); |
622 | 0 | } |
623 | | |
624 | | struct netdev_flow_dump ** |
625 | | netdev_ports_flow_dump_create(const char *dpif_type, int *ports, bool terse) |
626 | 0 | { |
627 | 0 | struct port_to_netdev_data *data; |
628 | 0 | struct netdev_flow_dump **dumps; |
629 | 0 | int count = 0; |
630 | 0 | int i = 0; |
631 | |
|
632 | 0 | ovs_rwlock_rdlock(&netdev_hmap_rwlock); |
633 | 0 | HMAP_FOR_EACH (data, portno_node, &port_to_netdev) { |
634 | 0 | if (netdev_get_dpif_type(data->netdev) == dpif_type) { |
635 | 0 | count++; |
636 | 0 | } |
637 | 0 | } |
638 | |
|
639 | 0 | dumps = count ? xzalloc(sizeof *dumps * count) : NULL; |
640 | |
|
641 | 0 | HMAP_FOR_EACH (data, portno_node, &port_to_netdev) { |
642 | 0 | if (netdev_get_dpif_type(data->netdev) == dpif_type) { |
643 | 0 | if (netdev_flow_dump_create(data->netdev, &dumps[i], terse)) { |
644 | 0 | continue; |
645 | 0 | } |
646 | | |
647 | 0 | dumps[i]->port = data->dpif_port.port_no; |
648 | 0 | i++; |
649 | 0 | } |
650 | 0 | } |
651 | 0 | ovs_rwlock_unlock(&netdev_hmap_rwlock); |
652 | |
|
653 | 0 | *ports = i; |
654 | 0 | return dumps; |
655 | 0 | } |
656 | | |
657 | | int |
658 | | netdev_ports_flow_del(const char *dpif_type, const ovs_u128 *ufid, |
659 | | struct dpif_flow_stats *stats) |
660 | 0 | { |
661 | 0 | struct port_to_netdev_data *data; |
662 | |
|
663 | 0 | ovs_rwlock_rdlock(&netdev_hmap_rwlock); |
664 | 0 | HMAP_FOR_EACH (data, portno_node, &port_to_netdev) { |
665 | 0 | if (netdev_get_dpif_type(data->netdev) == dpif_type |
666 | 0 | && !netdev_flow_del(data->netdev, ufid, stats)) { |
667 | 0 | ovs_rwlock_unlock(&netdev_hmap_rwlock); |
668 | 0 | return 0; |
669 | 0 | } |
670 | 0 | } |
671 | 0 | ovs_rwlock_unlock(&netdev_hmap_rwlock); |
672 | |
|
673 | 0 | return ENOENT; |
674 | 0 | } |
675 | | |
676 | | int |
677 | | netdev_ports_flow_get(const char *dpif_type, struct match *match, |
678 | | struct nlattr **actions, const ovs_u128 *ufid, |
679 | | struct dpif_flow_stats *stats, |
680 | | struct dpif_flow_attrs *attrs, struct ofpbuf *buf) |
681 | 0 | { |
682 | 0 | struct port_to_netdev_data *data; |
683 | |
|
684 | 0 | ovs_rwlock_rdlock(&netdev_hmap_rwlock); |
685 | 0 | HMAP_FOR_EACH (data, portno_node, &port_to_netdev) { |
686 | 0 | if (netdev_get_dpif_type(data->netdev) == dpif_type |
687 | 0 | && !netdev_flow_get(data->netdev, match, actions, |
688 | 0 | ufid, stats, attrs, buf)) { |
689 | 0 | ovs_rwlock_unlock(&netdev_hmap_rwlock); |
690 | 0 | return 0; |
691 | 0 | } |
692 | 0 | } |
693 | 0 | ovs_rwlock_unlock(&netdev_hmap_rwlock); |
694 | 0 | return ENOENT; |
695 | 0 | } |
696 | | |
697 | | static uint32_t |
698 | | netdev_ports_hash(odp_port_t port, const char *dpif_type) |
699 | 0 | { |
700 | 0 | return hash_int(odp_to_u32(port), hash_pointer(dpif_type, 0)); |
701 | 0 | } |
702 | | |
703 | | static struct port_to_netdev_data * |
704 | | netdev_ports_lookup(odp_port_t port_no, const char *dpif_type) |
705 | | OVS_REQ_RDLOCK(netdev_hmap_rwlock) |
706 | 0 | { |
707 | 0 | struct port_to_netdev_data *data; |
708 | |
|
709 | 0 | HMAP_FOR_EACH_WITH_HASH (data, portno_node, |
710 | 0 | netdev_ports_hash(port_no, dpif_type), |
711 | 0 | &port_to_netdev) { |
712 | 0 | if (netdev_get_dpif_type(data->netdev) == dpif_type |
713 | 0 | && data->dpif_port.port_no == port_no) { |
714 | 0 | return data; |
715 | 0 | } |
716 | 0 | } |
717 | 0 | return NULL; |
718 | 0 | } |
719 | | |
720 | | int |
721 | | netdev_ports_insert(struct netdev *netdev, struct dpif_port *dpif_port) |
722 | 0 | { |
723 | 0 | const char *dpif_type = netdev_get_dpif_type(netdev); |
724 | 0 | struct port_to_netdev_data *data; |
725 | 0 | int ifindex = netdev_get_ifindex(netdev); |
726 | |
|
727 | 0 | ovs_assert(dpif_type); |
728 | |
|
729 | 0 | ovs_rwlock_wrlock(&netdev_hmap_rwlock); |
730 | 0 | if (netdev_ports_lookup(dpif_port->port_no, dpif_type)) { |
731 | 0 | ovs_rwlock_unlock(&netdev_hmap_rwlock); |
732 | 0 | return EEXIST; |
733 | 0 | } |
734 | | |
735 | 0 | data = xzalloc(sizeof *data); |
736 | 0 | data->netdev = netdev_ref(netdev); |
737 | 0 | dpif_port_clone(&data->dpif_port, dpif_port); |
738 | |
|
739 | 0 | if (ifindex >= 0) { |
740 | 0 | data->ifindex = ifindex; |
741 | 0 | hmap_insert(&ifindex_to_port, &data->ifindex_node, ifindex); |
742 | 0 | } else { |
743 | 0 | data->ifindex = -1; |
744 | 0 | } |
745 | |
|
746 | 0 | hmap_insert(&port_to_netdev, &data->portno_node, |
747 | 0 | netdev_ports_hash(dpif_port->port_no, dpif_type)); |
748 | 0 | ovs_rwlock_unlock(&netdev_hmap_rwlock); |
749 | |
|
750 | 0 | netdev_init_flow_api(netdev); |
751 | |
|
752 | 0 | return 0; |
753 | 0 | } |
754 | | |
755 | | struct netdev * |
756 | | netdev_ports_get(odp_port_t port_no, const char *dpif_type) |
757 | 0 | { |
758 | 0 | struct port_to_netdev_data *data; |
759 | 0 | struct netdev *ret = NULL; |
760 | |
|
761 | 0 | ovs_rwlock_rdlock(&netdev_hmap_rwlock); |
762 | 0 | data = netdev_ports_lookup(port_no, dpif_type); |
763 | 0 | if (data) { |
764 | 0 | ret = netdev_ref(data->netdev); |
765 | 0 | } |
766 | 0 | ovs_rwlock_unlock(&netdev_hmap_rwlock); |
767 | |
|
768 | 0 | return ret; |
769 | 0 | } |
770 | | |
771 | | int |
772 | | netdev_ports_remove(odp_port_t port_no, const char *dpif_type) |
773 | 0 | { |
774 | 0 | struct port_to_netdev_data *data; |
775 | 0 | int ret = ENOENT; |
776 | |
|
777 | 0 | ovs_rwlock_wrlock(&netdev_hmap_rwlock); |
778 | 0 | data = netdev_ports_lookup(port_no, dpif_type); |
779 | 0 | if (data) { |
780 | 0 | dpif_port_destroy(&data->dpif_port); |
781 | 0 | netdev_close(data->netdev); /* unref and possibly close */ |
782 | 0 | hmap_remove(&port_to_netdev, &data->portno_node); |
783 | 0 | if (data->ifindex >= 0) { |
784 | 0 | hmap_remove(&ifindex_to_port, &data->ifindex_node); |
785 | 0 | } |
786 | 0 | free(data); |
787 | 0 | ret = 0; |
788 | 0 | } |
789 | 0 | ovs_rwlock_unlock(&netdev_hmap_rwlock); |
790 | |
|
791 | 0 | return ret; |
792 | 0 | } |
793 | | |
794 | | int |
795 | | netdev_ports_get_n_flows(const char *dpif_type, odp_port_t port_no, |
796 | | uint64_t *n_flows) |
797 | 0 | { |
798 | 0 | struct port_to_netdev_data *data; |
799 | 0 | int ret = EOPNOTSUPP; |
800 | |
|
801 | 0 | ovs_rwlock_rdlock(&netdev_hmap_rwlock); |
802 | 0 | data = netdev_ports_lookup(port_no, dpif_type); |
803 | 0 | if (data) { |
804 | 0 | uint64_t thread_n_flows[MAX_OFFLOAD_THREAD_NB] = {0}; |
805 | 0 | unsigned int tid; |
806 | |
|
807 | 0 | ret = netdev_flow_get_n_flows(data->netdev, thread_n_flows); |
808 | 0 | *n_flows = 0; |
809 | 0 | if (!ret) { |
810 | 0 | for (tid = 0; tid < netdev_offload_thread_nb(); tid++) { |
811 | 0 | *n_flows += thread_n_flows[tid]; |
812 | 0 | } |
813 | 0 | } |
814 | 0 | } |
815 | 0 | ovs_rwlock_unlock(&netdev_hmap_rwlock); |
816 | 0 | return ret; |
817 | 0 | } |
818 | | |
819 | | odp_port_t |
820 | | netdev_ifindex_to_odp_port(int ifindex) |
821 | 0 | { |
822 | 0 | struct port_to_netdev_data *data; |
823 | 0 | odp_port_t ret = 0; |
824 | |
|
825 | 0 | ovs_rwlock_rdlock(&netdev_hmap_rwlock); |
826 | 0 | HMAP_FOR_EACH_WITH_HASH (data, ifindex_node, ifindex, &ifindex_to_port) { |
827 | 0 | if (data->ifindex == ifindex) { |
828 | 0 | ret = data->dpif_port.port_no; |
829 | 0 | break; |
830 | 0 | } |
831 | 0 | } |
832 | 0 | ovs_rwlock_unlock(&netdev_hmap_rwlock); |
833 | |
|
834 | 0 | return ret; |
835 | 0 | } |
836 | | |
837 | | static bool netdev_offload_rebalance_policy = false; |
838 | | |
839 | | bool |
840 | | netdev_is_offload_rebalance_policy_enabled(void) |
841 | 0 | { |
842 | 0 | return netdev_offload_rebalance_policy; |
843 | 0 | } |
844 | | |
845 | | static void |
846 | | netdev_ports_flow_init(void) |
847 | 0 | { |
848 | 0 | struct port_to_netdev_data *data; |
849 | |
|
850 | 0 | ovs_rwlock_rdlock(&netdev_hmap_rwlock); |
851 | 0 | HMAP_FOR_EACH (data, portno_node, &port_to_netdev) { |
852 | 0 | netdev_init_flow_api(data->netdev); |
853 | 0 | } |
854 | 0 | ovs_rwlock_unlock(&netdev_hmap_rwlock); |
855 | 0 | } |
856 | | |
857 | | void |
858 | | netdev_set_flow_api_enabled(const struct smap *ovs_other_config) |
859 | 0 | { |
860 | 0 | if (smap_get_bool(ovs_other_config, "hw-offload", false)) { |
861 | 0 | static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER; |
862 | |
|
863 | 0 | if (ovsthread_once_start(&once)) { |
864 | 0 | netdev_flow_api_enabled = true; |
865 | |
|
866 | 0 | offload_thread_nb = smap_get_ullong(ovs_other_config, |
867 | 0 | "n-offload-threads", |
868 | 0 | DEFAULT_OFFLOAD_THREAD_NB); |
869 | 0 | if (offload_thread_nb > MAX_OFFLOAD_THREAD_NB) { |
870 | 0 | VLOG_WARN("netdev: Invalid number of threads requested: %u", |
871 | 0 | offload_thread_nb); |
872 | 0 | offload_thread_nb = DEFAULT_OFFLOAD_THREAD_NB; |
873 | 0 | } |
874 | |
|
875 | 0 | if (smap_get(ovs_other_config, "n-offload-threads")) { |
876 | 0 | VLOG_INFO("netdev: Flow API Enabled, using %u thread%s", |
877 | 0 | offload_thread_nb, |
878 | 0 | offload_thread_nb > 1 ? "s" : ""); |
879 | 0 | } else { |
880 | 0 | VLOG_INFO("netdev: Flow API Enabled"); |
881 | 0 | } |
882 | |
|
883 | 0 | #ifdef __linux__ |
884 | 0 | tc_set_policy(smap_get_def(ovs_other_config, "tc-policy", |
885 | 0 | TC_POLICY_DEFAULT)); |
886 | 0 | #endif |
887 | |
|
888 | 0 | if (smap_get_bool(ovs_other_config, "offload-rebalance", false)) { |
889 | 0 | netdev_offload_rebalance_policy = true; |
890 | 0 | } |
891 | |
|
892 | 0 | netdev_ports_flow_init(); |
893 | |
|
894 | 0 | ovsthread_once_done(&once); |
895 | 0 | } |
896 | 0 | } |
897 | 0 | } |