/src/openvswitch/lib/dpif-offload.c
Line | Count | Source |
1 | | /* |
2 | | * Copyright (c) 2025 Red Hat, Inc. |
3 | | * |
4 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | | * you may not use this file except in compliance with the License. |
6 | | * You may obtain a copy of the License at: |
7 | | * |
8 | | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | | * |
10 | | * Unless required by applicable law or agreed to in writing, software |
11 | | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | | * See the License for the specific language governing permissions and |
14 | | * limitations under the License. |
15 | | */ |
16 | | |
17 | | #include <config.h> |
18 | | #include <errno.h> |
19 | | |
20 | | #include "dpif-offload.h" |
21 | | #include "dpif-offload-provider.h" |
22 | | #include "dpif-provider.h" |
23 | | #include "netdev-provider.h" |
24 | | #include "unixctl.h" |
25 | | #include "util.h" |
26 | | #include "vswitch-idl.h" |
27 | | |
28 | | #include "openvswitch/dynamic-string.h" |
29 | | #include "openvswitch/shash.h" |
30 | | #include "openvswitch/vlog.h" |
31 | | |
32 | | VLOG_DEFINE_THIS_MODULE(dpif_offload); |
33 | | |
34 | | static struct vlog_rate_limit rl_dbg = VLOG_RATE_LIMIT_INIT(100, 100); |
35 | | |
36 | | static struct ovs_mutex dpif_offload_mutex = OVS_MUTEX_INITIALIZER; |
37 | | static struct shash dpif_offload_classes \ |
38 | | OVS_GUARDED_BY(dpif_offload_mutex) = \ |
39 | | SHASH_INITIALIZER(&dpif_offload_classes); |
40 | | static struct shash dpif_offload_providers \ |
41 | | OVS_GUARDED_BY(dpif_offload_mutex) = \ |
42 | | SHASH_INITIALIZER(&dpif_offload_providers); |
43 | | |
44 | | static const struct dpif_offload_class *base_dpif_offload_classes[] = { |
45 | | #if defined(__linux__) |
46 | | &dpif_offload_tc_class, |
47 | | #endif |
48 | | #ifdef DPDK_NETDEV |
49 | | &dpif_offload_dpdk_class, |
50 | | #endif |
51 | | /* While adding a new offload class to this structure make sure to also |
52 | | * update the dpif_offload_provider_priority_list below. */ |
53 | | &dpif_offload_dummy_class, |
54 | | &dpif_offload_dummy_x_class, |
55 | | }; |
56 | | |
57 | 0 | #define DEFAULT_PROVIDER_PRIORITY_LIST "tc,dpdk,dummy,dummy_x" |
58 | | |
59 | | static char *dpif_offload_provider_priority_list = NULL; |
60 | | static atomic_bool offload_global_enabled = false; |
61 | | static atomic_bool offload_rebalance_policy = false; |
62 | | static struct smap iface_order_cfg = SMAP_INITIALIZER(&iface_order_cfg); |
63 | | |
64 | | static int |
65 | | dpif_offload_register_provider__(const struct dpif_offload_class *class) |
66 | | OVS_REQUIRES(dpif_offload_mutex) |
67 | 0 | { |
68 | 0 | int error; |
69 | |
|
70 | 0 | if (shash_find(&dpif_offload_classes, class->type)) { |
71 | 0 | VLOG_WARN("attempted to register duplicate dpif offload class: %s", |
72 | 0 | class->type); |
73 | 0 | return EEXIST; |
74 | 0 | } |
75 | | |
76 | 0 | if (!class->supported_dpif_types) { |
77 | 0 | VLOG_WARN("attempted to register a dpif offload class without any " |
78 | 0 | "supported dpif types: %s", class->type); |
79 | 0 | return EINVAL; |
80 | 0 | } |
81 | | |
82 | 0 | error = class->init ? class->init() : 0; |
83 | 0 | if (error) { |
84 | 0 | VLOG_WARN("failed to initialize %s dpif offload class: %s", |
85 | 0 | class->type, ovs_strerror(error)); |
86 | 0 | return error; |
87 | 0 | } |
88 | | |
89 | 0 | shash_add(&dpif_offload_classes, class->type, class); |
90 | 0 | return 0; |
91 | 0 | } |
92 | | |
93 | | static int |
94 | | dpif_offload_register_provider(const struct dpif_offload_class *class) |
95 | 0 | { |
96 | 0 | int error; |
97 | |
|
98 | 0 | ovs_mutex_lock(&dpif_offload_mutex); |
99 | 0 | error = dpif_offload_register_provider__(class); |
100 | 0 | ovs_mutex_unlock(&dpif_offload_mutex); |
101 | |
|
102 | 0 | return error; |
103 | 0 | } |
104 | | |
105 | | static void |
106 | | dpif_offload_show_classes(struct unixctl_conn *conn, int argc OVS_UNUSED, |
107 | | const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED) |
108 | 0 | { |
109 | 0 | const struct shash_node **list; |
110 | 0 | struct ds ds; |
111 | |
|
112 | 0 | ds_init(&ds); |
113 | 0 | ovs_mutex_lock(&dpif_offload_mutex); |
114 | |
|
115 | 0 | list = shash_sort(&dpif_offload_classes); |
116 | 0 | for (size_t i = 0; i < shash_count(&dpif_offload_classes); i++) { |
117 | 0 | const struct dpif_offload_class *class = list[i]->data; |
118 | |
|
119 | 0 | if (i == 0) { |
120 | 0 | ds_put_cstr(&ds, "Offload Class Supported dpif class(es)\n"); |
121 | 0 | ds_put_cstr(&ds, "---------------- ------------------------\n"); |
122 | 0 | } |
123 | |
|
124 | 0 | ds_put_format(&ds, "%-16s ", list[i]->name); |
125 | |
|
126 | 0 | for (size_t j = 0; class->supported_dpif_types[j] != NULL; j++) { |
127 | 0 | ds_put_format(&ds, "%*s%s\n", j == 0 ? 0 : 18, "", |
128 | 0 | class->supported_dpif_types[j]); |
129 | 0 | } |
130 | 0 | } |
131 | |
|
132 | 0 | ovs_mutex_unlock(&dpif_offload_mutex); |
133 | 0 | free(list); |
134 | |
|
135 | 0 | unixctl_command_reply(conn, ds_cstr(&ds)); |
136 | 0 | ds_destroy(&ds); |
137 | 0 | } |
138 | | |
139 | | void |
140 | | dpif_offload_module_init(void) |
141 | 0 | { |
142 | 0 | static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER; |
143 | |
|
144 | 0 | if (!ovsthread_once_start(&once)) { |
145 | 0 | return; |
146 | 0 | } |
147 | | |
148 | 0 | if (!dpif_offload_provider_priority_list) { |
149 | 0 | dpif_offload_provider_priority_list = |
150 | 0 | xstrdup(DEFAULT_PROVIDER_PRIORITY_LIST); |
151 | 0 | } |
152 | |
|
153 | 0 | unixctl_command_register("dpif/offload/classes", NULL, 0, 0, |
154 | 0 | dpif_offload_show_classes, NULL); |
155 | |
|
156 | 0 | for (int i = 0; i < ARRAY_SIZE(base_dpif_offload_classes); i++) { |
157 | 0 | ovs_assert(base_dpif_offload_classes[i]->open |
158 | 0 | && base_dpif_offload_classes[i]->close |
159 | 0 | && base_dpif_offload_classes[i]->can_offload |
160 | 0 | && base_dpif_offload_classes[i]->port_add |
161 | 0 | && base_dpif_offload_classes[i]->port_del |
162 | 0 | && base_dpif_offload_classes[i]->get_netdev); |
163 | |
|
164 | 0 | ovs_assert((base_dpif_offload_classes[i]->flow_dump_create && |
165 | 0 | base_dpif_offload_classes[i]->flow_dump_next && |
166 | 0 | base_dpif_offload_classes[i]->flow_dump_destroy && |
167 | 0 | base_dpif_offload_classes[i]->flow_dump_thread_create && |
168 | 0 | base_dpif_offload_classes[i]->flow_dump_thread_destroy) || |
169 | 0 | (!base_dpif_offload_classes[i]->flow_dump_create && |
170 | 0 | !base_dpif_offload_classes[i]->flow_dump_next && |
171 | 0 | !base_dpif_offload_classes[i]->flow_dump_destroy && |
172 | 0 | !base_dpif_offload_classes[i]->flow_dump_thread_create && |
173 | 0 | !base_dpif_offload_classes[i]->flow_dump_thread_destroy)); |
174 | |
|
175 | 0 | dpif_offload_register_provider(base_dpif_offload_classes[i]); |
176 | 0 | } |
177 | |
|
178 | 0 | ovsthread_once_done(&once); |
179 | 0 | } |
180 | | |
181 | | static struct dpif_offload_provider_collection* |
182 | | dpif_get_offload_provider_collection(const struct dpif *dpif) |
183 | 0 | { |
184 | 0 | return ovsrcu_get(struct dpif_offload_provider_collection *, |
185 | 0 | &dpif->offload_provider_collection); |
186 | 0 | } |
187 | | |
188 | | static struct dpif_offload_provider_collection* |
189 | | dpif_get_offload_provider_collection_with_ref(const struct dpif *dpif) |
190 | 0 | { |
191 | 0 | struct dpif_offload_provider_collection *collection; |
192 | |
|
193 | 0 | ovs_mutex_lock(&dpif_offload_mutex); |
194 | |
|
195 | 0 | collection = ovsrcu_get(struct dpif_offload_provider_collection *, |
196 | 0 | &dpif->offload_provider_collection); |
197 | |
|
198 | 0 | if (!collection || !ovs_refcount_try_ref_rcu(&collection->ref_cnt)) { |
199 | 0 | collection = NULL; |
200 | 0 | } |
201 | |
|
202 | 0 | ovs_mutex_unlock(&dpif_offload_mutex); |
203 | |
|
204 | 0 | return collection; |
205 | 0 | } |
206 | | |
207 | | static void |
208 | | dpif_attach_offload_provider_collection( |
209 | | struct dpif *dpif, struct dpif_offload_provider_collection *collection) |
210 | | OVS_REQUIRES(dpif_offload_mutex) |
211 | 0 | { |
212 | | /* When called, 'collection' should still have a refcount > 0, which is |
213 | | * guaranteed by holding the lock from the shash lookup up to this point. |
214 | | * If, for any reason, the refcount is not > 0, ovs_refcount_ref() will |
215 | | * assert. */ |
216 | 0 | ovs_refcount_ref(&collection->ref_cnt); |
217 | 0 | ovsrcu_set(&dpif->offload_provider_collection, collection); |
218 | 0 | } |
219 | | |
220 | | static int |
221 | | provider_collection_add(struct dpif_offload_provider_collection *collection, |
222 | | struct dpif_offload *offload) |
223 | 0 | { |
224 | 0 | struct ovs_list *providers_list = &collection->list; |
225 | 0 | struct dpif_offload *offload_entry; |
226 | |
|
227 | 0 | ovs_assert(collection); |
228 | |
|
229 | 0 | LIST_FOR_EACH (offload_entry, dpif_list_node, providers_list) { |
230 | 0 | if (offload_entry == offload || !strcmp(offload->name, |
231 | 0 | offload_entry->name)) { |
232 | 0 | return EEXIST; |
233 | 0 | } |
234 | 0 | } |
235 | | |
236 | 0 | ovs_list_push_back(providers_list, &offload->dpif_list_node); |
237 | 0 | return 0; |
238 | 0 | } |
239 | | |
240 | | static void |
241 | | move_provider_to_collection( |
242 | | struct dpif_offload_provider_collection *collection, |
243 | | struct dpif_offload *offload) |
244 | | OVS_REQUIRES(dpif_offload_mutex) |
245 | 0 | { |
246 | 0 | int error; |
247 | |
|
248 | 0 | ovs_list_remove(&offload->dpif_list_node); |
249 | 0 | error = provider_collection_add(collection, offload); |
250 | 0 | if (error) { |
251 | 0 | VLOG_WARN("failed to add dpif offload provider %s to %s: %s", |
252 | 0 | dpif_offload_type(offload), collection->dpif_name, |
253 | 0 | ovs_strerror(error)); |
254 | |
|
255 | 0 | offload->class->close(offload); |
256 | 0 | } |
257 | 0 | } |
258 | | |
259 | | static int |
260 | | dpif_attach_new_offload_provider_collection(struct dpif *dpif) |
261 | | OVS_REQUIRES(dpif_offload_mutex) |
262 | 0 | { |
263 | 0 | struct ovs_list provider_list = OVS_LIST_INITIALIZER(&provider_list); |
264 | 0 | const char *dpif_type_str = dpif_normalize_type(dpif_type(dpif)); |
265 | 0 | struct dpif_offload_provider_collection *collection; |
266 | 0 | struct dpif_offload *offload; |
267 | 0 | struct shash_node *node; |
268 | 0 | char *tokens, *saveptr; |
269 | | |
270 | | /* Allocate and attach collection to dpif. */ |
271 | 0 | collection = xmalloc(sizeof *collection); |
272 | 0 | collection->dpif_name = xstrdup(dpif_name(dpif)); |
273 | 0 | ovs_mutex_init_recursive(&collection->mutex); |
274 | 0 | ovs_refcount_init(&collection->ref_cnt); |
275 | 0 | ovs_list_init(&collection->list); |
276 | 0 | shash_add(&dpif_offload_providers, collection->dpif_name, collection); |
277 | | |
278 | | /* Open all the providers supporting this dpif type. */ |
279 | 0 | SHASH_FOR_EACH (node, &dpif_offload_classes) { |
280 | 0 | const struct dpif_offload_class *class = node->data; |
281 | |
|
282 | 0 | for (size_t i = 0; class->supported_dpif_types[i] != NULL; i++) { |
283 | 0 | if (!strcmp(class->supported_dpif_types[i], dpif_type_str)) { |
284 | 0 | int error = class->open(class, dpif, &offload); |
285 | |
|
286 | 0 | if (error) { |
287 | 0 | VLOG_WARN("failed to initialize dpif offload provider " |
288 | 0 | "%s for %s: %s", |
289 | 0 | class->type, dpif_name(dpif), |
290 | 0 | ovs_strerror(error)); |
291 | 0 | } else { |
292 | 0 | ovs_list_push_back(&provider_list, |
293 | 0 | &offload->dpif_list_node); |
294 | 0 | } |
295 | 0 | break; |
296 | 0 | } |
297 | 0 | } |
298 | 0 | } |
299 | | |
300 | | /* Attach all the providers based on the priority list. */ |
301 | 0 | tokens = xstrdup(dpif_offload_provider_priority_list); |
302 | |
|
303 | 0 | for (char *name = strtok_r(tokens, ",", &saveptr); |
304 | 0 | name; |
305 | 0 | name = strtok_r(NULL, ",", &saveptr)) { |
306 | |
|
307 | 0 | LIST_FOR_EACH_SAFE (offload, dpif_list_node, &provider_list) { |
308 | 0 | if (strcmp(name, offload->class->type)) { |
309 | 0 | continue; |
310 | 0 | } |
311 | | |
312 | 0 | move_provider_to_collection(collection, offload); |
313 | 0 | break; |
314 | 0 | } |
315 | 0 | } |
316 | 0 | free(tokens); |
317 | | |
318 | | /* Add remaining entries in order. */ |
319 | 0 | LIST_FOR_EACH_SAFE (offload, dpif_list_node, &provider_list) { |
320 | 0 | move_provider_to_collection(collection, offload); |
321 | 0 | } |
322 | | |
323 | | /* Attach the new collection to the dpif. */ |
324 | 0 | ovsrcu_set(&dpif->offload_provider_collection, collection); |
325 | |
|
326 | 0 | return 0; |
327 | 0 | } |
328 | | |
329 | | /* This function returns 0 if a new provider set was attached to the dpif, |
330 | | * returns EEXIST if an existing set of providers was attached, and |
331 | | * returns a negative error code on error. */ |
332 | | int |
333 | | dpif_attach_offload_providers(struct dpif *dpif) |
334 | 0 | { |
335 | 0 | struct dpif_offload_provider_collection *collection; |
336 | 0 | int rc = EEXIST; |
337 | |
|
338 | 0 | ovs_mutex_lock(&dpif_offload_mutex); |
339 | |
|
340 | 0 | collection = shash_find_data(&dpif_offload_providers, dpif_name(dpif)); |
341 | 0 | if (collection) { |
342 | 0 | dpif_attach_offload_provider_collection(dpif, collection); |
343 | 0 | } else { |
344 | 0 | rc = dpif_attach_new_offload_provider_collection(dpif); |
345 | 0 | } |
346 | |
|
347 | 0 | ovs_mutex_unlock(&dpif_offload_mutex); |
348 | 0 | return rc; |
349 | 0 | } |
350 | | |
351 | | static void |
352 | | provider_collection_free_rcu( |
353 | | struct dpif_offload_provider_collection *collection) |
354 | 0 | { |
355 | 0 | struct dpif_offload *offload_entry; |
356 | | |
357 | | /* We need to use the safe variant here as we removed the entry, and the |
358 | | * close API will free() it. */ |
359 | 0 | LIST_FOR_EACH_SAFE (offload_entry, dpif_list_node, &collection->list) { |
360 | 0 | ovs_list_remove(&offload_entry->dpif_list_node); |
361 | 0 | offload_entry->class->close(offload_entry); |
362 | 0 | } |
363 | | |
364 | | /* Free remaining resources. */ |
365 | 0 | ovs_mutex_destroy(&collection->mutex); |
366 | 0 | free(collection->dpif_name); |
367 | 0 | free(collection); |
368 | 0 | } |
369 | | |
370 | | static void |
371 | | dpif_offload_unref_collection( |
372 | | struct dpif_offload_provider_collection *collection) |
373 | 0 | { |
374 | 0 | if (!collection) { |
375 | 0 | return; |
376 | 0 | } |
377 | | |
378 | | /* Take dpif_offload_mutex so that, if collection->ref_cnt falls to |
379 | | * zero, we can't get a new reference to 'collection' through the |
380 | | * 'dpif_offload_providers' shash. */ |
381 | 0 | ovs_mutex_lock(&dpif_offload_mutex); |
382 | 0 | if (ovs_refcount_unref_relaxed(&collection->ref_cnt) == 1) { |
383 | 0 | shash_find_and_delete(&dpif_offload_providers, |
384 | 0 | collection->dpif_name); |
385 | 0 | ovsrcu_postpone(provider_collection_free_rcu, collection); |
386 | 0 | } |
387 | 0 | ovs_mutex_unlock(&dpif_offload_mutex); |
388 | 0 | } |
389 | | |
390 | | void |
391 | | dpif_detach_offload_providers(struct dpif *dpif) |
392 | 0 | { |
393 | 0 | struct dpif_offload_provider_collection *collection; |
394 | |
|
395 | 0 | collection = dpif_get_offload_provider_collection(dpif); |
396 | 0 | if (collection) { |
397 | 0 | dpif_offload_unref_collection(collection); |
398 | 0 | ovsrcu_set(&dpif->offload_provider_collection, NULL); |
399 | 0 | } |
400 | 0 | } |
401 | | |
402 | | void |
403 | | dpif_offload_set_config(struct dpif *dpif, const struct smap *other_cfg) |
404 | 0 | { |
405 | 0 | struct dpif_offload_provider_collection *collection; |
406 | 0 | struct dpif_offload *offload; |
407 | |
|
408 | 0 | collection = dpif_get_offload_provider_collection(dpif); |
409 | |
|
410 | 0 | if (!collection) { |
411 | 0 | return; |
412 | 0 | } |
413 | | |
414 | 0 | LIST_FOR_EACH (offload, dpif_list_node, &collection->list) { |
415 | 0 | if (offload->class->set_config) { |
416 | 0 | offload->class->set_config(offload, other_cfg); |
417 | 0 | } |
418 | 0 | } |
419 | 0 | } |
420 | | |
421 | | |
422 | | void |
423 | | dpif_offload_init(struct dpif_offload *offload, |
424 | | const struct dpif_offload_class *class, |
425 | | struct dpif *dpif) |
426 | 0 | { |
427 | 0 | ovs_assert(offload && class && dpif); |
428 | |
|
429 | 0 | offload->class = class; |
430 | 0 | offload->name = xasprintf("%s[%s]", class->type, dpif_name(dpif)); |
431 | 0 | offload->ports = dpif_offload_port_mgr_init(); |
432 | 0 | } |
433 | | |
434 | | void |
435 | | dpif_offload_destroy(struct dpif_offload *offload) |
436 | 0 | { |
437 | 0 | ovs_assert(offload); |
438 | |
|
439 | 0 | dpif_offload_port_mgr_destroy(offload); |
440 | 0 | free(offload->name); |
441 | 0 | } |
442 | | |
443 | | const char * |
444 | | dpif_offload_name(const struct dpif_offload *offload) |
445 | 0 | { |
446 | 0 | return offload->name; |
447 | 0 | } |
448 | | |
449 | | const char * |
450 | | dpif_offload_type(const struct dpif_offload *offload) |
451 | 0 | { |
452 | 0 | return offload->class->type; |
453 | 0 | } |
454 | | |
455 | | bool |
456 | | dpif_offload_get_debug(const struct dpif_offload *offload, struct ds *ds, |
457 | | struct json *json) |
458 | 0 | { |
459 | 0 | if (!offload->class->get_debug) { |
460 | 0 | return false; |
461 | 0 | } |
462 | | |
463 | 0 | offload->class->get_debug(offload, ds, json); |
464 | 0 | return true; |
465 | 0 | } |
466 | | |
467 | | int |
468 | | dpif_offload_stats_get(struct dpif *dpif, struct netdev_custom_stats **stats_, |
469 | | size_t *n_stats) |
470 | 0 | { |
471 | 0 | struct dpif_offload_provider_collection *collection; |
472 | 0 | struct netdev_custom_stats *stats; |
473 | 0 | struct dpif_offload *offload; |
474 | 0 | size_t n_providers; |
475 | 0 | int error = 0; |
476 | |
|
477 | 0 | *n_stats = 0; |
478 | 0 | collection = dpif_get_offload_provider_collection(dpif); |
479 | 0 | if (!collection || !dpif_offload_enabled()) { |
480 | 0 | *stats_ = NULL; |
481 | 0 | return 0; |
482 | 0 | } |
483 | | |
484 | 0 | n_providers = ovs_list_size(&collection->list); |
485 | 0 | stats = xcalloc(n_providers, sizeof(struct netdev_custom_stats)); |
486 | |
|
487 | 0 | LIST_FOR_EACH (offload, dpif_list_node, &collection->list) { |
488 | 0 | if (!offload->class->get_global_stats) { |
489 | 0 | continue; |
490 | 0 | } |
491 | | |
492 | 0 | error = offload->class->get_global_stats(offload, &stats[(*n_stats)]); |
493 | 0 | if (error) { |
494 | 0 | for (int i = 0; i < *n_stats; i++) { |
495 | 0 | netdev_free_custom_stats_counters(&stats[i]); |
496 | 0 | } |
497 | 0 | *n_stats = 0; |
498 | 0 | free(stats); |
499 | 0 | stats = NULL; |
500 | 0 | break; |
501 | 0 | } |
502 | | |
503 | 0 | (*n_stats)++; |
504 | 0 | } |
505 | |
|
506 | 0 | *stats_ = stats; |
507 | 0 | return error; |
508 | 0 | } |
509 | | |
510 | | bool |
511 | | dpif_offload_enabled(void) |
512 | 0 | { |
513 | 0 | bool enabled; |
514 | |
|
515 | 0 | atomic_read_relaxed(&offload_global_enabled, &enabled); |
516 | 0 | return enabled; |
517 | 0 | } |
518 | | |
519 | | bool |
520 | | dpif_offload_rebalance_policy_enabled(void) |
521 | 0 | { |
522 | 0 | bool enabled; |
523 | |
|
524 | 0 | atomic_read_relaxed(&offload_rebalance_policy, &enabled); |
525 | 0 | return enabled; |
526 | 0 | } |
527 | | |
528 | | void |
529 | | dpif_offload_set_netdev_offload(struct netdev *netdev, |
530 | | struct dpif_offload *offload) |
531 | 0 | { |
532 | 0 | ovsrcu_set(&netdev->dpif_offload, offload); |
533 | 0 | } |
534 | | |
535 | | static bool |
536 | | dpif_offload_try_port_add(struct dpif_offload *offload, struct netdev *netdev, |
537 | | odp_port_t port_no) |
538 | 0 | { |
539 | 0 | if (offload->class->can_offload(offload, netdev)) { |
540 | 0 | int err = offload->class->port_add(offload, netdev, port_no); |
541 | |
|
542 | 0 | if (!err) { |
543 | 0 | VLOG_DBG("netdev %s added to dpif-offload provider %s", |
544 | 0 | netdev_get_name(netdev), dpif_offload_name(offload)); |
545 | 0 | return true; |
546 | 0 | } else { |
547 | 0 | VLOG_ERR("Failed adding netdev %s to dpif-offload provider " |
548 | 0 | "%s, error %s", |
549 | 0 | netdev_get_name(netdev), dpif_offload_name(offload), |
550 | 0 | ovs_strerror(err)); |
551 | 0 | } |
552 | 0 | } else { |
553 | 0 | VLOG_DBG("netdev %s failed can_offload for dpif-offload provider %s", |
554 | 0 | netdev_get_name(netdev), dpif_offload_name(offload)); |
555 | 0 | } |
556 | 0 | return false; |
557 | 0 | } |
558 | | |
559 | | void |
560 | | dpif_offload_port_add(struct dpif *dpif, struct netdev *netdev, |
561 | | odp_port_t port_no) |
562 | 0 | { |
563 | 0 | struct dpif_offload_provider_collection *collection; |
564 | 0 | const char *port_priority = smap_get(&iface_order_cfg, |
565 | 0 | netdev_get_name(netdev)); |
566 | 0 | struct dpif_offload *offload; |
567 | |
|
568 | 0 | collection = dpif_get_offload_provider_collection(dpif); |
569 | 0 | if (!collection) { |
570 | 0 | return; |
571 | 0 | } |
572 | | |
573 | 0 | if (port_priority) { |
574 | 0 | char *tokens = xstrdup(port_priority); |
575 | 0 | char *saveptr; |
576 | |
|
577 | 0 | VLOG_DBG("for netdev %s using port priority %s", |
578 | 0 | netdev_get_name(netdev), port_priority); |
579 | |
|
580 | 0 | for (char *name = strtok_r(tokens, ",", &saveptr); |
581 | 0 | name; |
582 | 0 | name = strtok_r(NULL, ",", &saveptr)) { |
583 | 0 | bool provider_added = false; |
584 | |
|
585 | 0 | if (!strcmp("none", name)) { |
586 | 0 | break; |
587 | 0 | } |
588 | | |
589 | 0 | LIST_FOR_EACH (offload, dpif_list_node, &collection->list) { |
590 | 0 | if (!strcmp(name, offload->class->type)) { |
591 | 0 | provider_added = dpif_offload_try_port_add(offload, netdev, |
592 | 0 | port_no); |
593 | 0 | break; |
594 | 0 | } |
595 | 0 | } |
596 | |
|
597 | 0 | if (provider_added) { |
598 | 0 | break; |
599 | 0 | } |
600 | 0 | } |
601 | 0 | free(tokens); |
602 | 0 | } else { |
603 | 0 | LIST_FOR_EACH (offload, dpif_list_node, &collection->list) { |
604 | 0 | if (dpif_offload_try_port_add(offload, netdev, port_no)) { |
605 | 0 | break; |
606 | 0 | } |
607 | 0 | } |
608 | 0 | } |
609 | 0 | } |
610 | | |
611 | | void |
612 | | dpif_offload_port_del(struct dpif *dpif, odp_port_t port_no) |
613 | 0 | { |
614 | 0 | struct dpif_offload_provider_collection *collection; |
615 | 0 | struct dpif_offload *offload; |
616 | |
|
617 | 0 | collection = dpif_get_offload_provider_collection(dpif); |
618 | 0 | if (!collection) { |
619 | 0 | return; |
620 | 0 | } |
621 | | |
622 | 0 | LIST_FOR_EACH (offload, dpif_list_node, &collection->list) { |
623 | 0 | int err = offload->class->port_del(offload, port_no); |
624 | |
|
625 | 0 | if (err) { |
626 | 0 | VLOG_ERR("Failed deleting port_no %d from dpif-offload provider " |
627 | 0 | "%s, error %s", port_no, dpif_offload_name(offload), |
628 | 0 | ovs_strerror(err)); |
629 | 0 | } |
630 | 0 | } |
631 | 0 | } |
632 | | |
633 | | void |
634 | | dpif_offload_port_set_config(struct dpif *dpif, odp_port_t port_no, |
635 | | const struct smap *cfg) |
636 | 0 | { |
637 | 0 | struct dpif_offload_provider_collection *collection; |
638 | 0 | struct dpif_offload *offload; |
639 | |
|
640 | 0 | collection = dpif_get_offload_provider_collection(dpif); |
641 | 0 | if (!collection) { |
642 | 0 | return; |
643 | 0 | } |
644 | | |
645 | 0 | LIST_FOR_EACH (offload, dpif_list_node, &collection->list) { |
646 | 0 | if (offload->class->port_set_config) { |
647 | 0 | offload->class->port_set_config(offload, port_no, cfg); |
648 | 0 | } |
649 | 0 | } |
650 | 0 | } |
651 | | |
652 | | struct dpif_offload * |
653 | | dpif_offload_port_offloaded_by(const struct dpif *dpif, odp_port_t port_no) |
654 | 0 | { |
655 | 0 | struct dpif_offload_provider_collection *collection; |
656 | 0 | struct dpif_offload *offload, *offload_return = NULL; |
657 | |
|
658 | 0 | collection = dpif_get_offload_provider_collection(dpif); |
659 | 0 | if (!collection || !dpif_offload_enabled()) { |
660 | 0 | return NULL; |
661 | 0 | } |
662 | | |
663 | 0 | LIST_FOR_EACH (offload, dpif_list_node, &collection->list) { |
664 | 0 | if (offload->class->get_netdev(offload, port_no)) { |
665 | 0 | offload_return = offload; |
666 | 0 | break; |
667 | 0 | } |
668 | 0 | } |
669 | |
|
670 | 0 | return offload_return; |
671 | 0 | } |
672 | | |
673 | | struct dpif_offload_dump_state { |
674 | | const struct dpif_offload_provider_collection *collection; |
675 | | struct dpif_offload *entry; |
676 | | int error; |
677 | | }; |
678 | | |
679 | | void |
680 | | dpif_offload_dump_start(const struct dpif *dpif, void **statep) |
681 | 0 | { |
682 | 0 | struct dpif_offload_dump_state *state; |
683 | |
|
684 | 0 | state = xzalloc(sizeof *state); |
685 | 0 | state->collection = dpif_get_offload_provider_collection_with_ref(dpif); |
686 | 0 | if (!state->collection) { |
687 | 0 | state->error = EIDRM; |
688 | 0 | } |
689 | |
|
690 | 0 | *statep = state; |
691 | 0 | } |
692 | | |
693 | | bool |
694 | | dpif_offload_dump_next(void *state_, struct dpif_offload **offload) |
695 | 0 | { |
696 | 0 | struct dpif_offload_dump_state *state = state_; |
697 | |
|
698 | 0 | if (!offload || !state || state->error || !state->collection) { |
699 | 0 | return false; |
700 | 0 | } |
701 | | |
702 | 0 | if (state->entry) { |
703 | 0 | struct dpif_offload *entry = state->entry; |
704 | |
|
705 | 0 | LIST_FOR_EACH_CONTINUE (entry, dpif_list_node, |
706 | 0 | &state->collection->list) { |
707 | 0 | state->entry = entry; |
708 | 0 | *offload = entry; |
709 | 0 | return true; |
710 | 0 | } |
711 | | |
712 | 0 | state->error = EOF; |
713 | 0 | } else { |
714 | | /* Get the first entry in the list. */ |
715 | 0 | struct dpif_offload *entry; |
716 | |
|
717 | 0 | LIST_FOR_EACH (entry, dpif_list_node, &state->collection->list) { |
718 | 0 | break; |
719 | 0 | } |
720 | |
|
721 | 0 | if (entry) { |
722 | 0 | state->entry = entry; |
723 | 0 | *offload = entry; |
724 | 0 | } else { |
725 | 0 | state->error = EOF; |
726 | 0 | } |
727 | 0 | } |
728 | | |
729 | 0 | return !state->error; |
730 | 0 | } |
731 | | |
732 | | int |
733 | | dpif_offload_dump_done(void *state_) |
734 | 0 | { |
735 | 0 | struct dpif_offload_dump_state *state = state_; |
736 | 0 | int error; |
737 | |
|
738 | 0 | dpif_offload_unref_collection( |
739 | 0 | CONST_CAST(struct dpif_offload_provider_collection *, |
740 | 0 | state->collection)); |
741 | 0 | error = state->error == EOF ? 0 : state->error; |
742 | 0 | free(state); |
743 | |
|
744 | 0 | return error; |
745 | 0 | } |
746 | | |
747 | | void |
748 | | dpif_offload_set_global_cfg(const struct ovsrec_open_vswitch *cfg) |
749 | 0 | { |
750 | 0 | static struct ovsthread_once init_once = OVSTHREAD_ONCE_INITIALIZER; |
751 | 0 | const struct smap *other_cfg = &cfg->other_config; |
752 | 0 | const char *priority; |
753 | | |
754 | | /* The 'hw-offload-priority' parameter can only be set at startup, |
755 | | * any successive change needs a restart. */ |
756 | 0 | priority = smap_get(other_cfg, "hw-offload-priority"); |
757 | |
|
758 | 0 | if (ovsthread_once_start(&init_once)) { |
759 | | /* Initialize the dpif-offload layer in case it's not yet initialized |
760 | | * at the first invocation of setting the configuration. */ |
761 | 0 | dpif_offload_module_init(); |
762 | | |
763 | | /* If priority is not set keep the default value. */ |
764 | 0 | if (priority) { |
765 | 0 | char *tokens = xstrdup(priority); |
766 | 0 | char *saveptr; |
767 | |
|
768 | 0 | free(dpif_offload_provider_priority_list); |
769 | 0 | dpif_offload_provider_priority_list = xstrdup(priority); |
770 | | |
771 | | /* Log a warning for unknown offload providers. */ |
772 | 0 | for (char *name = strtok_r(tokens, ",", &saveptr); |
773 | 0 | name; |
774 | 0 | name = strtok_r(NULL, ",", &saveptr)) { |
775 | |
|
776 | 0 | if (!shash_find(&dpif_offload_classes, name)) { |
777 | 0 | VLOG_WARN("hw-offload-priority configuration has an " |
778 | 0 | "unknown type; %s", name); |
779 | 0 | } |
780 | 0 | } |
781 | 0 | free(tokens); |
782 | 0 | } |
783 | 0 | ovsthread_once_done(&init_once); |
784 | 0 | } else { |
785 | 0 | if (priority && strcmp(priority, |
786 | 0 | dpif_offload_provider_priority_list)) { |
787 | 0 | VLOG_INFO_ONCE("hw-offload-priority configuration changed; " |
788 | 0 | "restart required"); |
789 | 0 | } |
790 | 0 | } |
791 | | |
792 | | /* Handle other global configuration settings. */ |
793 | 0 | if (smap_get_bool(other_cfg, "hw-offload", false)) { |
794 | 0 | static struct ovsthread_once once_enable = OVSTHREAD_ONCE_INITIALIZER; |
795 | |
|
796 | 0 | if (ovsthread_once_start(&once_enable)) { |
797 | 0 | atomic_store_relaxed(&offload_global_enabled, true); |
798 | 0 | VLOG_INFO("Flow HW offload is enabled"); |
799 | |
|
800 | 0 | if (smap_get_bool(other_cfg, "offload-rebalance", false)) { |
801 | 0 | atomic_store_relaxed(&offload_rebalance_policy, true); |
802 | 0 | } |
803 | |
|
804 | 0 | ovsthread_once_done(&once_enable); |
805 | 0 | } |
806 | 0 | } |
807 | | |
808 | | /* Filter out the 'hw-offload-priority' per interface setting. We need it |
809 | | * before ports are added, so we can assign the correct offload-provider. |
810 | | * Note that we can safely rebuild the map here, as we only access this |
811 | | * from the same (main) thread. */ |
812 | 0 | smap_clear(&iface_order_cfg); |
813 | 0 | for (int i = 0; i < cfg->n_bridges; i++) { |
814 | 0 | const struct ovsrec_bridge *br_cfg = cfg->bridges[i]; |
815 | |
|
816 | 0 | for (int j = 0; j < br_cfg->n_ports; j++) { |
817 | 0 | const struct ovsrec_port *port_cfg = br_cfg->ports[j]; |
818 | |
|
819 | 0 | for (int k = 0; k < port_cfg->n_interfaces; k++) { |
820 | 0 | const struct ovsrec_interface *iface_cfg; |
821 | |
|
822 | 0 | iface_cfg = port_cfg->interfaces[k]; |
823 | 0 | priority = smap_get(&iface_cfg->other_config, |
824 | 0 | "hw-offload-priority"); |
825 | 0 | if (priority) { |
826 | 0 | smap_add(&iface_order_cfg, iface_cfg->name, priority); |
827 | 0 | } |
828 | 0 | } |
829 | 0 | } |
830 | 0 | } |
831 | 0 | } |
832 | | |
833 | | void |
834 | | dpif_offload_flow_flush(struct dpif *dpif) |
835 | 0 | { |
836 | 0 | struct dpif_offload_provider_collection *collection; |
837 | 0 | struct dpif_offload *offload; |
838 | |
|
839 | 0 | collection = dpif_get_offload_provider_collection(dpif); |
840 | 0 | if (!collection) { |
841 | 0 | return; |
842 | 0 | } |
843 | | |
844 | 0 | LIST_FOR_EACH (offload, dpif_list_node, &collection->list) { |
845 | 0 | if (offload->class->flow_flush) { |
846 | 0 | int err = offload->class->flow_flush(offload); |
847 | |
|
848 | 0 | if (err) { |
849 | 0 | VLOG_ERR( |
850 | 0 | "Failed flow flush on dpif-offload provider %s, error %s", |
851 | 0 | dpif_offload_name(offload), ovs_strerror(err)); |
852 | 0 | } |
853 | 0 | } |
854 | 0 | } |
855 | 0 | } |
856 | | |
857 | | enum dpif_offload_impl_type |
858 | | dpif_offload_get_impl_type(const struct dpif_offload *offload) |
859 | 0 | { |
860 | 0 | return offload->class->impl_type; |
861 | 0 | } |
862 | | |
863 | | enum dpif_offload_impl_type |
864 | | dpif_offload_get_impl_type_by_class(const char *type) |
865 | 0 | { |
866 | 0 | enum dpif_offload_impl_type impl_type = DPIF_OFFLOAD_IMPL_NONE; |
867 | 0 | struct shash_node *node; |
868 | |
|
869 | 0 | ovs_mutex_lock(&dpif_offload_mutex); |
870 | 0 | SHASH_FOR_EACH (node, &dpif_offload_classes) { |
871 | 0 | const struct dpif_offload_class *class = node->data; |
872 | |
|
873 | 0 | if (!strcmp(type, class->type)) { |
874 | 0 | impl_type = class->impl_type; |
875 | 0 | break; |
876 | 0 | } |
877 | 0 | } |
878 | 0 | ovs_mutex_unlock(&dpif_offload_mutex); |
879 | |
|
880 | 0 | return impl_type; |
881 | 0 | } |
882 | | |
883 | | uint64_t |
884 | | dpif_offload_flow_count(const struct dpif *dpif) |
885 | 0 | { |
886 | 0 | struct dpif_offload_provider_collection *collection; |
887 | 0 | const struct dpif_offload *offload; |
888 | 0 | uint64_t flow_count = 0; |
889 | |
|
890 | 0 | collection = dpif_get_offload_provider_collection(dpif); |
891 | 0 | if (!collection || !dpif_offload_enabled()) { |
892 | 0 | return 0; |
893 | 0 | } |
894 | | |
895 | 0 | LIST_FOR_EACH (offload, dpif_list_node, &collection->list) { |
896 | 0 | if (offload->class->flow_count) { |
897 | 0 | flow_count += offload->class->flow_count(offload); |
898 | 0 | } |
899 | 0 | } |
900 | |
|
901 | 0 | return flow_count; |
902 | 0 | } |
903 | | |
904 | | uint64_t |
905 | | dpif_offload_flow_count_by_impl(const struct dpif *dpif, |
906 | | enum dpif_offload_impl_type type) |
907 | 0 | { |
908 | 0 | struct dpif_offload_provider_collection *collection; |
909 | 0 | const struct dpif_offload *offload; |
910 | 0 | uint64_t flow_count = 0; |
911 | |
|
912 | 0 | collection = dpif_get_offload_provider_collection(dpif); |
913 | 0 | if (!collection || !dpif_offload_enabled()) { |
914 | 0 | return 0; |
915 | 0 | } |
916 | | |
917 | 0 | LIST_FOR_EACH (offload, dpif_list_node, &collection->list) { |
918 | 0 | if (offload->class->flow_count |
919 | 0 | && type == dpif_offload_get_impl_type(offload)) { |
920 | 0 | flow_count += offload->class->flow_count(offload); |
921 | 0 | } |
922 | 0 | } |
923 | |
|
924 | 0 | return flow_count; |
925 | 0 | } |
926 | | |
927 | | void |
928 | | dpif_offload_meter_set(const struct dpif *dpif, ofproto_meter_id meter_id, |
929 | | struct ofputil_meter_config *config) |
930 | 0 | { |
931 | 0 | struct dpif_offload_provider_collection *collection; |
932 | 0 | struct dpif_offload *offload; |
933 | |
|
934 | 0 | collection = dpif_get_offload_provider_collection(dpif); |
935 | 0 | if (!collection || !dpif_offload_enabled()) { |
936 | 0 | return; |
937 | 0 | } |
938 | | |
939 | 0 | LIST_FOR_EACH (offload, dpif_list_node, &collection->list) { |
940 | 0 | if (offload->class->meter_set) { |
941 | 0 | int err = offload->class->meter_set(offload, meter_id, config); |
942 | |
|
943 | 0 | if (err) { |
944 | | /* Offload APIs could fail, for example, because the offload |
945 | | * is not supported. This is fine, as the offload API should |
946 | | * take care of this. */ |
947 | 0 | VLOG_DBG_RL(&rl_dbg, |
948 | 0 | "Failed setting meter %u on dpif-offload provider" |
949 | 0 | " %s, error %s", meter_id.uint32, |
950 | 0 | dpif_offload_name(offload), ovs_strerror(err)); |
951 | 0 | } |
952 | 0 | } |
953 | 0 | } |
954 | 0 | } |
955 | | |
956 | | void |
957 | | dpif_offload_meter_get(const struct dpif *dpif, ofproto_meter_id meter_id, |
958 | | struct ofputil_meter_stats *stats) |
959 | 0 | { |
960 | 0 | struct dpif_offload_provider_collection *collection; |
961 | 0 | struct dpif_offload *offload; |
962 | |
|
963 | 0 | collection = dpif_get_offload_provider_collection(dpif); |
964 | 0 | if (!collection || !dpif_offload_enabled()) { |
965 | 0 | return; |
966 | 0 | } |
967 | | |
968 | 0 | LIST_FOR_EACH (offload, dpif_list_node, &collection->list) { |
969 | 0 | if (offload->class->meter_get) { |
970 | 0 | int err = offload->class->meter_get(offload, meter_id, stats); |
971 | 0 | if (err) { |
972 | 0 | VLOG_DBG_RL(&rl_dbg, |
973 | 0 | "Failed getting meter %u on dpif-offload provider" |
974 | 0 | " %s, error %s", meter_id.uint32, |
975 | 0 | dpif_offload_name(offload), ovs_strerror(err)); |
976 | 0 | } |
977 | 0 | } |
978 | 0 | } |
979 | 0 | } |
980 | | |
981 | | void |
982 | | dpif_offload_meter_del(const struct dpif *dpif, ofproto_meter_id meter_id, |
983 | | struct ofputil_meter_stats *stats) |
984 | 0 | { |
985 | 0 | struct dpif_offload_provider_collection *collection; |
986 | 0 | struct dpif_offload *offload; |
987 | |
|
988 | 0 | collection = dpif_get_offload_provider_collection(dpif); |
989 | 0 | if (!collection || !dpif_offload_enabled()) { |
990 | 0 | return; |
991 | 0 | } |
992 | | |
993 | 0 | LIST_FOR_EACH (offload, dpif_list_node, &collection->list) { |
994 | 0 | if (offload->class->meter_del) { |
995 | 0 | int err = offload->class->meter_del(offload, meter_id, stats); |
996 | 0 | if (err) { |
997 | 0 | VLOG_DBG_RL(&rl_dbg, |
998 | 0 | "Failed deleting meter %u on dpif-offload provider" |
999 | 0 | " %s, error %s", meter_id.uint32, |
1000 | 0 | dpif_offload_name(offload), ovs_strerror(err)); |
1001 | 0 | } |
1002 | 0 | } |
1003 | 0 | } |
1004 | 0 | } |
1005 | | |
1006 | | /* |
1007 | | * Further initializes a 'struct dpif_flow_dump' that was already initialized |
1008 | | * by dpif_flow_dump_create(), preparing it for use by |
1009 | | * dpif_offload_flow_dump_next(). |
1010 | | * |
1011 | | * For more details, see the documentation of dpif_flow_dump_create(). */ |
1012 | | void |
1013 | | dpif_offload_flow_dump_create(struct dpif_flow_dump *dump, |
1014 | | const struct dpif *dpif, bool terse) |
1015 | 0 | { |
1016 | 0 | struct dpif_offload_provider_collection *collection; |
1017 | 0 | const struct dpif_offload *offload; |
1018 | 0 | size_t n_providers = 0; |
1019 | 0 | int i = 0; |
1020 | |
|
1021 | 0 | collection = dpif_get_offload_provider_collection(dpif); |
1022 | 0 | if (!dump || !dpif_offload_enabled() || !collection) { |
1023 | 0 | return; |
1024 | 0 | } |
1025 | | |
1026 | 0 | LIST_FOR_EACH (offload, dpif_list_node, &collection->list) { |
1027 | 0 | if (offload->class->flow_dump_create) { |
1028 | 0 | n_providers++; |
1029 | 0 | } |
1030 | 0 | } |
1031 | |
|
1032 | 0 | if (!n_providers) { |
1033 | 0 | return; |
1034 | 0 | } |
1035 | | |
1036 | 0 | dump->offload_dumps = xmalloc(n_providers * sizeof( |
1037 | 0 | struct dpif_offload_flow_dump *)); |
1038 | |
|
1039 | 0 | LIST_FOR_EACH (offload, dpif_list_node, &collection->list) { |
1040 | 0 | if (offload->class->flow_dump_create) { |
1041 | 0 | dump->offload_dumps[i++] = |
1042 | 0 | offload->class->flow_dump_create(offload, terse); |
1043 | 0 | } |
1044 | 0 | } |
1045 | 0 | dump->n_offload_dumps = i; |
1046 | 0 | dump->offload_dump_index = 0; |
1047 | 0 | } |
1048 | | |
1049 | | /* Destroys the 'dump' data associated with the offload flow dump. |
1050 | | * This function is called as part of the general dump cleanup |
1051 | | * by dpif_flow_dump_destroy(). |
1052 | | * |
1053 | | * Returns 0 if all individual dpif-offload dump operations complete |
1054 | | * without error. If one or more providers return an error, the error |
1055 | | * code from the first failing provider is returned as a positive errno |
1056 | | * value. */ |
1057 | | int |
1058 | | dpif_offload_flow_dump_destroy(struct dpif_flow_dump *dump) |
1059 | 0 | { |
1060 | 0 | int error = 0; |
1061 | |
|
1062 | 0 | for (int i = 0; i < dump->n_offload_dumps; i++) { |
1063 | 0 | struct dpif_offload_flow_dump *offload_dump = dump->offload_dumps[i]; |
1064 | 0 | const struct dpif_offload *offload = offload_dump->offload; |
1065 | 0 | int rc = offload->class->flow_dump_destroy(offload_dump); |
1066 | |
|
1067 | 0 | if (rc && rc != EOF) { |
1068 | 0 | VLOG_ERR("Failed flow dumping on dpif-offload provider " |
1069 | 0 | "%s, error %s", dpif_offload_name(offload), |
1070 | 0 | ovs_strerror(rc)); |
1071 | 0 | if (!error) { |
1072 | 0 | error = rc; |
1073 | 0 | } |
1074 | 0 | } |
1075 | 0 | } |
1076 | 0 | ovs_mutex_destroy(&dump->offload_dump_mutex); |
1077 | 0 | free(dump->offload_dumps); |
1078 | 0 | return error; |
1079 | 0 | } |
1080 | | |
1081 | | static void |
1082 | | dpif_offload_advance_provider_dump(struct dpif_flow_dump_thread *thread) |
1083 | 0 | { |
1084 | 0 | struct dpif_flow_dump *dump = thread->dump; |
1085 | |
|
1086 | 0 | ovs_mutex_lock(&dump->offload_dump_mutex); |
1087 | | |
1088 | | /* If we haven't finished (dumped all providers). */ |
1089 | 0 | if (dump->offload_dump_index < dump->n_offload_dumps) { |
1090 | | /* If we are the first to find that current dump is finished |
1091 | | * advance it. */ |
1092 | 0 | if (thread->offload_dump_index == dump->offload_dump_index) { |
1093 | 0 | thread->offload_dump_index = ++dump->offload_dump_index; |
1094 | | /* Did we just finish the last dump? If so we are done. */ |
1095 | 0 | if (dump->offload_dump_index == dump->n_offload_dumps) { |
1096 | 0 | thread->offload_dump_done = true; |
1097 | 0 | } |
1098 | 0 | } else { |
1099 | | /* otherwise, we are behind, catch up */ |
1100 | 0 | thread->offload_dump_index = dump->offload_dump_index; |
1101 | 0 | } |
1102 | 0 | } else { |
1103 | | /* Some other thread finished. */ |
1104 | 0 | thread->offload_dump_done = true; |
1105 | 0 | } |
1106 | |
|
1107 | 0 | ovs_mutex_unlock(&dump->offload_dump_mutex); |
1108 | 0 | } |
1109 | | |
1110 | | /* This function behaves exactly the same as dpif_flow_dump_next(), |
1111 | | * so see its documentation for details. */ |
1112 | | int |
1113 | | dpif_offload_flow_dump_next(struct dpif_flow_dump_thread *thread, |
1114 | | struct dpif_flow *flows, int max_flows) |
1115 | 0 | { |
1116 | 0 | int n_flows = 0; |
1117 | |
|
1118 | 0 | ovs_assert(max_flows > 0); |
1119 | | |
1120 | | /* The logic here processes all registered offload providers and |
1121 | | * dumps all related flows. If done (i.e., it returns 0), continue |
1122 | | * with the next offload provider. */ |
1123 | 0 | while (!thread->offload_dump_done) { |
1124 | 0 | struct dpif_offload_flow_dump_thread *offload_thread; |
1125 | |
|
1126 | 0 | ovs_assert(thread->offload_dump_index < thread->n_offload_threads); |
1127 | 0 | offload_thread = thread->offload_threads[thread->offload_dump_index]; |
1128 | 0 | n_flows = offload_thread->dump->offload->class->flow_dump_next( |
1129 | 0 | offload_thread, flows, max_flows); |
1130 | |
|
1131 | 0 | if (n_flows > 0) { |
1132 | | /* If we got some flows, we need to return due to the constraint |
1133 | | * on returned flows, as explained in dpif_flow_dump_next(). */ |
1134 | 0 | break; |
1135 | 0 | } |
1136 | 0 | dpif_offload_advance_provider_dump(thread); |
1137 | 0 | } |
1138 | 0 | return MAX(n_flows, 0); |
1139 | 0 | } |
1140 | | |
1141 | | /* Further initializes a 'struct dpif_flow_dump_thread' that was already |
1142 | | * initialized by dpif_flow_dump_thread_create(), preparing it for use by |
1143 | | * dpif_offload_flow_dump_next(). */ |
1144 | | void |
1145 | | dpif_offload_flow_dump_thread_create(struct dpif_flow_dump_thread *thread, |
1146 | | struct dpif_flow_dump *dump) |
1147 | 0 | { |
1148 | 0 | if (!dpif_offload_enabled() || !dump || !dump->n_offload_dumps) { |
1149 | 0 | return; |
1150 | 0 | } |
1151 | | |
1152 | 0 | thread->n_offload_threads = dump->n_offload_dumps; |
1153 | 0 | thread->offload_dump_done = false; |
1154 | 0 | thread->offload_dump_index = 0; |
1155 | 0 | thread->offload_threads = |
1156 | 0 | xmalloc(thread->n_offload_threads * sizeof *thread->offload_threads); |
1157 | |
|
1158 | 0 | for (int i = 0; i < dump->n_offload_dumps; i++) { |
1159 | 0 | struct dpif_offload_flow_dump *offload_dump = dump->offload_dumps[i]; |
1160 | 0 | const struct dpif_offload *offload = offload_dump->offload; |
1161 | |
|
1162 | 0 | thread->offload_threads[i] = |
1163 | 0 | offload->class->flow_dump_thread_create(offload_dump); |
1164 | 0 | } |
1165 | 0 | } |
1166 | | |
1167 | | /* Destroys the 'thread' data associated with the offload flow dump. |
1168 | | * This function is called as part of the general thread cleanup |
1169 | | * by dpif_flow_dump_thread_destroy(). */ |
1170 | | void |
1171 | | dpif_offload_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread) |
1172 | 0 | { |
1173 | 0 | for (int i = 0; i < thread->n_offload_threads; i++) { |
1174 | 0 | struct dpif_offload_flow_dump_thread *offload_thread; |
1175 | 0 | const struct dpif_offload *offload; |
1176 | |
|
1177 | 0 | offload_thread = thread->offload_threads[i]; |
1178 | 0 | offload = offload_thread->dump->offload; |
1179 | 0 | offload->class->flow_dump_thread_destroy(offload_thread); |
1180 | 0 | } |
1181 | 0 | free(thread->offload_threads); |
1182 | 0 | } |
1183 | | |
1184 | | static struct netdev * |
1185 | | dpif_offload_get_netdev_by_port_id__( |
1186 | | struct dpif_offload_provider_collection *collection, |
1187 | | struct dpif_offload **offload, odp_port_t port_no) |
1188 | 0 | { |
1189 | 0 | struct dpif_offload *tmp_offload; |
1190 | 0 | struct netdev *netdev = NULL; |
1191 | |
|
1192 | 0 | LIST_FOR_EACH (tmp_offload, dpif_list_node, &collection->list) { |
1193 | 0 | netdev = tmp_offload->class->get_netdev(tmp_offload, port_no); |
1194 | 0 | if (netdev) { |
1195 | 0 | if (offload) { |
1196 | 0 | *offload = tmp_offload; |
1197 | 0 | } |
1198 | 0 | break; |
1199 | 0 | } |
1200 | 0 | } |
1201 | 0 | return netdev; |
1202 | 0 | } |
1203 | | |
1204 | | struct netdev * |
1205 | | dpif_offload_get_netdev_by_port_id(struct dpif *dpif, |
1206 | | struct dpif_offload **offload, |
1207 | | odp_port_t port_no) |
1208 | 0 | { |
1209 | 0 | struct dpif_offload_provider_collection *collection; |
1210 | |
|
1211 | 0 | collection = dpif_get_offload_provider_collection(dpif); |
1212 | 0 | if (!collection || !dpif_offload_enabled()) { |
1213 | 0 | return NULL; |
1214 | 0 | } |
1215 | | |
1216 | 0 | return dpif_offload_get_netdev_by_port_id__(collection, offload, port_no); |
1217 | 0 | } |
1218 | | |
1219 | | bool |
1220 | | dpif_offload_netdevs_out_of_resources(struct dpif *dpif) |
1221 | 0 | { |
1222 | 0 | struct dpif_offload_provider_collection *collection; |
1223 | 0 | struct dpif_offload_port *port; |
1224 | 0 | struct dpif_offload *offload; |
1225 | |
|
1226 | 0 | collection = dpif_get_offload_provider_collection(dpif); |
1227 | 0 | if (!collection || !dpif_offload_enabled()) { |
1228 | 0 | return false; |
1229 | 0 | } |
1230 | | |
1231 | 0 | LIST_FOR_EACH (offload, dpif_list_node, &collection->list) { |
1232 | 0 | DPIF_OFFLOAD_PORT_FOR_EACH (port, offload) { |
1233 | 0 | if (port->netdev->hw_info.oor) { |
1234 | 0 | return true; |
1235 | 0 | } |
1236 | 0 | } |
1237 | 0 | } |
1238 | | |
1239 | 0 | return false; |
1240 | 0 | } |
1241 | | |
1242 | | /* This function tries to offload the operations to the dpif-offload |
1243 | | * providers. It will return the number of operations not handled, whose |
1244 | | * pointers are re-arranged and available in **ops. */ |
1245 | | size_t |
1246 | | dpif_offload_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops, |
1247 | | enum dpif_offload_type offload_type) |
1248 | 0 | { |
1249 | 0 | struct dpif_offload_provider_collection *collection; |
1250 | 0 | const struct dpif_offload *offload; |
1251 | 0 | size_t n_ops_left = 0; |
1252 | |
|
1253 | 0 | collection = dpif_get_offload_provider_collection(dpif); |
1254 | 0 | if (!collection || !dpif_offload_enabled()) { |
1255 | 0 | return n_ops; |
1256 | 0 | } |
1257 | | |
1258 | 0 | for (size_t i = 0; i < n_ops; i++) { |
1259 | 0 | ops[i]->error = -1; |
1260 | 0 | } |
1261 | |
|
1262 | 0 | LIST_FOR_EACH (offload, dpif_list_node, &collection->list) { |
1263 | 0 | if (offload->class->impl_type == DPIF_OFFLOAD_IMPL_FLOWS_PROVIDER_ONLY |
1264 | 0 | && offload->class->operate) { |
1265 | |
|
1266 | 0 | offload->class->operate(dpif, offload, ops, n_ops); |
1267 | |
|
1268 | 0 | for (size_t i = 0; i < n_ops; i++) { |
1269 | 0 | struct dpif_op *op = ops[i]; |
1270 | |
|
1271 | 0 | if (op->error == EOPNOTSUPP) { |
1272 | | /* Not supported by this offload provider, try next one. */ |
1273 | 0 | op->error = -1; |
1274 | 0 | } else { |
1275 | 0 | VLOG_DBG("Tried offloading %d to dpif-offload provider " |
1276 | 0 | "%s, error %d", |
1277 | 0 | op->type, |
1278 | 0 | dpif_offload_name(offload), op->error); |
1279 | |
|
1280 | 0 | switch (op->type) { |
1281 | 0 | case DPIF_OP_FLOW_PUT: |
1282 | 0 | log_flow_put_message(dpif, &this_module, |
1283 | 0 | &op->flow_put, 0); |
1284 | 0 | break; |
1285 | 0 | case DPIF_OP_FLOW_DEL: |
1286 | 0 | log_flow_del_message(dpif, &this_module, |
1287 | 0 | &op->flow_del, 0); |
1288 | 0 | break; |
1289 | 0 | case DPIF_OP_FLOW_GET: |
1290 | 0 | log_flow_get_message(dpif, &this_module, |
1291 | 0 | &op->flow_get, 0); |
1292 | 0 | break; |
1293 | 0 | case DPIF_OP_EXECUTE: |
1294 | 0 | break; |
1295 | 0 | } |
1296 | 0 | } |
1297 | 0 | } |
1298 | 0 | } |
1299 | 0 | } |
1300 | | |
1301 | 0 | for (size_t i = 0; i < n_ops; i++) { |
1302 | 0 | struct dpif_op *op = ops[i]; |
1303 | |
|
1304 | 0 | if (offload_type == DPIF_OFFLOAD_ALWAYS) { |
1305 | | /* For DPIF_OFFLOAD_ALWAYS, we should keep the error values, |
1306 | | * and mark the unprocessed ones as EOPNOTSUPP. This way, they |
1307 | | * will not be processed by the dpif layer. */ |
1308 | 0 | if (op->error < 0) { |
1309 | 0 | op->error = EOPNOTSUPP; |
1310 | 0 | } |
1311 | 0 | continue; |
1312 | 0 | } |
1313 | | |
1314 | | /* For the other offload types, operations that were not handled or |
1315 | | * failed to offload should be processed by the dpif layer. */ |
1316 | 0 | if (op->error != 0 && op->error != EEXIST) { |
1317 | 0 | op->error = 0; |
1318 | 0 | ops[n_ops_left++] = op; |
1319 | 0 | } |
1320 | 0 | } |
1321 | |
|
1322 | 0 | return n_ops_left; |
1323 | 0 | } |
1324 | | |
1325 | | |
1326 | | bool |
1327 | | dpif_offload_netdev_same_offload(const struct netdev *a, |
1328 | | const struct netdev *b) |
1329 | 0 | { |
1330 | 0 | const struct dpif_offload *offload_a, *offload_b; |
1331 | |
|
1332 | 0 | offload_a = ovsrcu_get(const struct dpif_offload *, &a->dpif_offload); |
1333 | 0 | offload_b = ovsrcu_get(const struct dpif_offload *, &b->dpif_offload); |
1334 | |
|
1335 | 0 | return offload_a == offload_b; |
1336 | 0 | } |
1337 | | |
1338 | | int |
1339 | | dpif_offload_datapath_flow_put(const char *dpif_name, |
1340 | | struct dpif_offload_flow_put *put, |
1341 | | void **previous_flow_reference) |
1342 | 0 | { |
1343 | 0 | struct dpif_offload_provider_collection *collection; |
1344 | 0 | struct dpif_offload *offload; |
1345 | 0 | struct netdev *netdev; |
1346 | |
|
1347 | 0 | ovs_mutex_lock(&dpif_offload_mutex); |
1348 | | /* XXX: Implement a faster solution than the current dpif_name lookup. */ |
1349 | 0 | collection = shash_find_data(&dpif_offload_providers, dpif_name); |
1350 | 0 | ovs_mutex_unlock(&dpif_offload_mutex); |
1351 | |
|
1352 | 0 | if (OVS_UNLIKELY(!collection)) { |
1353 | 0 | *previous_flow_reference = NULL; |
1354 | 0 | return EOPNOTSUPP; |
1355 | 0 | } |
1356 | | |
1357 | 0 | netdev = dpif_offload_get_netdev_by_port_id__(collection, &offload, |
1358 | 0 | put->in_port); |
1359 | |
|
1360 | 0 | if (OVS_LIKELY(netdev && offload->class->netdev_flow_put)) { |
1361 | 0 | return offload->class->netdev_flow_put(offload, netdev, put, |
1362 | 0 | previous_flow_reference); |
1363 | 0 | } |
1364 | | |
1365 | 0 | *previous_flow_reference = NULL; |
1366 | 0 | return EOPNOTSUPP; |
1367 | 0 | } |
1368 | | |
1369 | | int |
1370 | | dpif_offload_datapath_flow_del(const char *dpif_name, |
1371 | | struct dpif_offload_flow_del *del) |
1372 | 0 | { |
1373 | 0 | struct dpif_offload_provider_collection *collection; |
1374 | 0 | struct dpif_offload *offload; |
1375 | 0 | struct netdev *netdev; |
1376 | |
|
1377 | 0 | ovs_mutex_lock(&dpif_offload_mutex); |
1378 | | /* XXX: Implement a faster solution than the current dpif_name lookup. */ |
1379 | 0 | collection = shash_find_data(&dpif_offload_providers, dpif_name); |
1380 | 0 | ovs_mutex_unlock(&dpif_offload_mutex); |
1381 | |
|
1382 | 0 | if (OVS_UNLIKELY(!collection)) { |
1383 | 0 | return EOPNOTSUPP; |
1384 | 0 | } |
1385 | | |
1386 | 0 | netdev = dpif_offload_get_netdev_by_port_id__(collection, &offload, |
1387 | 0 | del->in_port); |
1388 | |
|
1389 | 0 | if (OVS_LIKELY(netdev && offload->class->netdev_flow_del)) { |
1390 | 0 | return offload->class->netdev_flow_del(offload, netdev, del); |
1391 | 0 | } |
1392 | | |
1393 | 0 | return EOPNOTSUPP; |
1394 | 0 | } |
1395 | | |
1396 | | bool |
1397 | | dpif_offload_datapath_flow_stats(const char *dpif_name, odp_port_t in_port, |
1398 | | const ovs_u128 *ufid, |
1399 | | struct dpif_flow_stats *stats, |
1400 | | struct dpif_flow_attrs *attrs) |
1401 | 0 | { |
1402 | 0 | struct dpif_offload_provider_collection *collection; |
1403 | 0 | struct dpif_offload *offload; |
1404 | 0 | struct netdev *netdev; |
1405 | |
|
1406 | 0 | if (!dpif_offload_enabled()) { |
1407 | 0 | return false; |
1408 | 0 | } |
1409 | | |
1410 | 0 | ovs_mutex_lock(&dpif_offload_mutex); |
1411 | | /* XXX: Implement a faster solution than the current dpif_name lookup. */ |
1412 | 0 | collection = shash_find_data(&dpif_offload_providers, dpif_name); |
1413 | 0 | ovs_mutex_unlock(&dpif_offload_mutex); |
1414 | |
|
1415 | 0 | if (OVS_UNLIKELY(!collection)) { |
1416 | 0 | return false; |
1417 | 0 | } |
1418 | | |
1419 | 0 | netdev = dpif_offload_get_netdev_by_port_id__(collection, &offload, |
1420 | 0 | in_port); |
1421 | |
|
1422 | 0 | if (OVS_LIKELY(netdev && offload->class->netdev_flow_stats)) { |
1423 | 0 | return offload->class->netdev_flow_stats(offload, netdev, ufid, stats, |
1424 | 0 | attrs); |
1425 | 0 | } |
1426 | 0 | return false; |
1427 | 0 | } |
1428 | | |
1429 | | int |
1430 | | dpif_offload_netdev_hw_post_process(struct netdev *netdev, unsigned pmd_id, |
1431 | | struct dp_packet *packet, |
1432 | | void **flow_reference) |
1433 | 0 | { |
1434 | 0 | const struct dpif_offload *offload; |
1435 | 0 | bool post_process_api_supported; |
1436 | 0 | int rc; |
1437 | |
|
1438 | 0 | atomic_read_relaxed(&netdev->hw_info.post_process_api_supported, |
1439 | 0 | &post_process_api_supported); |
1440 | 0 | if (!post_process_api_supported) { |
1441 | 0 | return EOPNOTSUPP; |
1442 | 0 | } |
1443 | | |
1444 | 0 | offload = ovsrcu_get(const struct dpif_offload *, &netdev->dpif_offload); |
1445 | |
|
1446 | 0 | if (!offload || !offload->class->netdev_hw_post_process) { |
1447 | 0 | if (offload) { |
1448 | | /* Offload is configured and API unsupported by the port; |
1449 | | * avoid subsequent calls. */ |
1450 | 0 | atomic_store_relaxed(&netdev->hw_info.post_process_api_supported, |
1451 | 0 | false); |
1452 | 0 | } |
1453 | 0 | return EOPNOTSUPP; |
1454 | 0 | } |
1455 | | |
1456 | 0 | rc = offload->class->netdev_hw_post_process(offload, netdev, pmd_id, |
1457 | 0 | packet, flow_reference); |
1458 | 0 | if (rc == EOPNOTSUPP) { |
1459 | | /* API unsupported by the port; avoid subsequent calls. */ |
1460 | 0 | atomic_store_relaxed(&netdev->hw_info.post_process_api_supported, |
1461 | 0 | false); |
1462 | 0 | } |
1463 | 0 | return rc; |
1464 | 0 | } |
1465 | | |
1466 | | void |
1467 | | dpif_offload_datapath_register_flow_unreference_cb( |
1468 | | struct dpif *dpif, dpif_offload_flow_unreference_cb *cb) |
1469 | 0 | { |
1470 | 0 | struct dpif_offload_provider_collection *collection; |
1471 | 0 | const struct dpif_offload *offload; |
1472 | | |
1473 | | /* In this case, we assert to make sure this initialization is done after |
1474 | | * the offload providers have been assigned to the dpif. */ |
1475 | 0 | collection = dpif_get_offload_provider_collection(dpif); |
1476 | 0 | ovs_assert(collection); |
1477 | |
|
1478 | 0 | LIST_FOR_EACH (offload, dpif_list_node, &collection->list) { |
1479 | 0 | if (offload->class->register_flow_unreference_cb) { |
1480 | 0 | offload->class->register_flow_unreference_cb(offload, cb); |
1481 | 0 | } |
1482 | 0 | } |
1483 | 0 | } |
1484 | | |
1485 | | |
1486 | | struct dpif_offload_port_mgr * |
1487 | | dpif_offload_port_mgr_init(void) |
1488 | 0 | { |
1489 | 0 | struct dpif_offload_port_mgr *mgr = xmalloc(sizeof *mgr); |
1490 | |
|
1491 | 0 | ovs_mutex_init(&mgr->cmap_mod_lock); |
1492 | |
|
1493 | 0 | cmap_init(&mgr->odp_port_to_port); |
1494 | 0 | cmap_init(&mgr->netdev_to_port); |
1495 | 0 | cmap_init(&mgr->ifindex_to_port); |
1496 | |
|
1497 | 0 | return mgr; |
1498 | 0 | } |
1499 | | |
1500 | | void |
1501 | | dpif_offload_port_mgr_destroy(struct dpif_offload *offload) |
1502 | 0 | { |
1503 | 0 | struct dpif_offload_port_mgr *ports; |
1504 | |
|
1505 | 0 | if (!offload || !offload->ports) { |
1506 | 0 | return; |
1507 | 0 | } |
1508 | | |
1509 | 0 | ports = offload->ports; |
1510 | 0 | offload->ports = NULL; |
1511 | |
|
1512 | 0 | ovs_assert(cmap_count(&ports->odp_port_to_port) == 0); |
1513 | 0 | ovs_assert(cmap_count(&ports->netdev_to_port) == 0); |
1514 | 0 | ovs_assert(cmap_count(&ports->ifindex_to_port) == 0); |
1515 | |
|
1516 | 0 | cmap_destroy(&ports->odp_port_to_port); |
1517 | 0 | cmap_destroy(&ports->netdev_to_port); |
1518 | 0 | cmap_destroy(&ports->ifindex_to_port); |
1519 | 0 | free(ports); |
1520 | 0 | } |
1521 | | |
1522 | | struct dpif_offload_port * |
1523 | | dpif_offload_port_mgr_find_by_ifindex(const struct dpif_offload *offload, |
1524 | | int ifindex) |
1525 | 0 | { |
1526 | 0 | struct dpif_offload_port_mgr *ports = offload->ports; |
1527 | 0 | struct dpif_offload_port *port; |
1528 | |
|
1529 | 0 | ovs_assert(ports); |
1530 | |
|
1531 | 0 | if (ifindex < 0) { |
1532 | 0 | return NULL; |
1533 | 0 | } |
1534 | | |
1535 | 0 | CMAP_FOR_EACH_WITH_HASH (port, ifindex_node, hash_int(ifindex, 0), |
1536 | 0 | &ports->ifindex_to_port) { |
1537 | 0 | if (port->ifindex == ifindex) { |
1538 | 0 | return port; |
1539 | 0 | } |
1540 | 0 | } |
1541 | 0 | return NULL; |
1542 | 0 | } |
1543 | | |
1544 | | struct dpif_offload_port * |
1545 | | dpif_offload_port_mgr_find_by_netdev(const struct dpif_offload *offload, |
1546 | | struct netdev *netdev) |
1547 | 0 | { |
1548 | 0 | struct dpif_offload_port_mgr *ports = offload->ports; |
1549 | 0 | struct dpif_offload_port *port; |
1550 | |
|
1551 | 0 | ovs_assert(ports); |
1552 | |
|
1553 | 0 | if (!netdev) { |
1554 | 0 | return NULL; |
1555 | 0 | } |
1556 | | |
1557 | 0 | CMAP_FOR_EACH_WITH_HASH (port, netdev_node, hash_pointer(netdev, 0), |
1558 | 0 | &ports->netdev_to_port) { |
1559 | 0 | if (port->netdev == netdev) { |
1560 | 0 | return port; |
1561 | 0 | } |
1562 | 0 | } |
1563 | 0 | return NULL; |
1564 | 0 | } |
1565 | | |
1566 | | struct dpif_offload_port * |
1567 | | dpif_offload_port_mgr_find_by_odp_port(const struct dpif_offload *offload, |
1568 | | odp_port_t port_no) |
1569 | 0 | { |
1570 | 0 | struct dpif_offload_port_mgr *ports = offload->ports; |
1571 | 0 | struct dpif_offload_port *port; |
1572 | |
|
1573 | 0 | ovs_assert(ports); |
1574 | |
|
1575 | 0 | CMAP_FOR_EACH_WITH_HASH (port, odp_port_node, |
1576 | 0 | hash_int(odp_to_u32(port_no), 0), |
1577 | 0 | &ports->odp_port_to_port) { |
1578 | 0 | if (port->port_no == port_no) { |
1579 | 0 | return port; |
1580 | 0 | } |
1581 | 0 | } |
1582 | 0 | return NULL; |
1583 | 0 | } |
1584 | | |
1585 | | struct dpif_offload_port * |
1586 | | dpif_offload_port_mgr_remove(struct dpif_offload *offload, odp_port_t port_no) |
1587 | 0 | { |
1588 | | /* Note that it is the caller's responsibility to release the netdev |
1589 | | * after port removal. This should probably be done through the |
1590 | | * ovsrcu_postpone() API. */ |
1591 | 0 | struct dpif_offload_port_mgr *ports = offload->ports; |
1592 | 0 | struct dpif_offload_port *port; |
1593 | |
|
1594 | 0 | ovs_assert(ports); |
1595 | |
|
1596 | 0 | ovs_mutex_lock(&ports->cmap_mod_lock); |
1597 | |
|
1598 | 0 | port = dpif_offload_port_mgr_find_by_odp_port(offload, port_no); |
1599 | |
|
1600 | 0 | if (port) { |
1601 | 0 | cmap_remove(&ports->odp_port_to_port, &port->odp_port_node, |
1602 | 0 | hash_int(odp_to_u32(port_no), 0)); |
1603 | 0 | cmap_remove(&ports->netdev_to_port, &port->netdev_node, |
1604 | 0 | hash_pointer(port->netdev, 0)); |
1605 | |
|
1606 | 0 | if (port->ifindex >= 0) { |
1607 | 0 | cmap_remove(&ports->ifindex_to_port, &port->ifindex_node, |
1608 | 0 | hash_int(port->ifindex, 0)); |
1609 | 0 | } |
1610 | 0 | } |
1611 | |
|
1612 | 0 | ovs_mutex_unlock(&ports->cmap_mod_lock); |
1613 | 0 | return port; |
1614 | 0 | } |
1615 | | |
1616 | | bool |
1617 | | dpif_offload_port_mgr_add(struct dpif_offload *offload, |
1618 | | struct dpif_offload_port *port, |
1619 | | struct netdev *netdev, odp_port_t port_no, |
1620 | | bool need_ifindex) |
1621 | 0 | { |
1622 | 0 | struct dpif_offload_port_mgr *ports; |
1623 | | |
1624 | | /* Note that this function takes a reference to the passed-in netdev. |
1625 | | * However, on port removal it is the caller's responsibility to |
1626 | | * release this reference. */ |
1627 | 0 | ovs_assert(offload->ports && netdev); |
1628 | |
|
1629 | 0 | ports = offload->ports; |
1630 | 0 | memset(port, 0, sizeof *port); |
1631 | 0 | port->port_no = port_no; |
1632 | 0 | port->ifindex = need_ifindex ? netdev_get_ifindex(netdev) : -1; |
1633 | |
|
1634 | 0 | ovs_mutex_lock(&ports->cmap_mod_lock); |
1635 | |
|
1636 | 0 | if (dpif_offload_port_mgr_find_by_odp_port(offload, port_no) |
1637 | 0 | || dpif_offload_port_mgr_find_by_ifindex(offload, port->ifindex) |
1638 | 0 | || dpif_offload_port_mgr_find_by_netdev(offload, netdev)) { |
1639 | |
|
1640 | 0 | ovs_mutex_unlock(&ports->cmap_mod_lock); |
1641 | 0 | return false; |
1642 | 0 | } |
1643 | | |
1644 | 0 | port->netdev = netdev_ref(netdev); |
1645 | |
|
1646 | 0 | cmap_insert(&ports->odp_port_to_port, &port->odp_port_node, |
1647 | 0 | hash_int(odp_to_u32(port_no), 0)); |
1648 | |
|
1649 | 0 | cmap_insert(&ports->netdev_to_port, &port->netdev_node, |
1650 | 0 | hash_pointer(netdev, 0)); |
1651 | |
|
1652 | 0 | if (port->ifindex >= 0) { |
1653 | 0 | cmap_insert(&ports->ifindex_to_port, &port->ifindex_node, |
1654 | 0 | hash_int(port->ifindex, 0)); |
1655 | 0 | } |
1656 | |
|
1657 | 0 | ovs_mutex_unlock(&ports->cmap_mod_lock); |
1658 | 0 | return true; |
1659 | 0 | } |
1660 | | |
1661 | | size_t |
1662 | | dpif_offload_port_mgr_port_count(const struct dpif_offload *offload) |
1663 | 0 | { |
1664 | 0 | ovs_assert(offload && offload->ports); |
1665 | |
|
1666 | 0 | return cmap_count(&offload->ports->odp_port_to_port); |
1667 | 0 | } |