/src/openvswitch/lib/dpif-offload-dummy.c
Line | Count | Source |
1 | | /* |
2 | | * Copyright (c) 2025 Red Hat, Inc. |
3 | | * |
4 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | | * you may not use this file except in compliance with the License. |
6 | | * You may obtain a copy of the License at: |
7 | | * |
8 | | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | | * |
10 | | * Unless required by applicable law or agreed to in writing, software |
11 | | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | | * See the License for the specific language governing permissions and |
14 | | * limitations under the License. |
15 | | */ |
16 | | |
17 | | #include <config.h> |
18 | | #include <errno.h> |
19 | | |
20 | | #include "dpif.h" |
21 | | #include "dpif-offload.h" |
22 | | #include "dpif-offload-provider.h" |
23 | | #include "dummy.h" |
24 | | #include "id-fpool.h" |
25 | | #include "netdev-provider.h" |
26 | | #include "odp-util.h" |
27 | | #include "util.h" |
28 | | #include "uuid.h" |
29 | | |
30 | | #include "openvswitch/json.h" |
31 | | #include "openvswitch/match.h" |
32 | | #include "openvswitch/vlog.h" |
33 | | |
34 | | VLOG_DEFINE_THIS_MODULE(dpif_offload_dummy); |
35 | | |
36 | | struct pmd_id_data { |
37 | | struct hmap_node node; |
38 | | void *flow_reference; |
39 | | unsigned pmd_id; |
40 | | }; |
41 | | |
42 | | struct dummy_offloaded_flow { |
43 | | struct hmap_node node; |
44 | | struct match match; |
45 | | ovs_u128 ufid; |
46 | | uint32_t mark; |
47 | | |
48 | | /* The pmd_id_map below is also protected by the port_mutex. */ |
49 | | struct hmap pmd_id_map; |
50 | | }; |
51 | | |
52 | | struct dpif_offload_dummy { |
53 | | struct dpif_offload offload; |
54 | | struct dpif_offload_port_mgr *port_mgr; |
55 | | struct id_fpool *flow_mark_pool; |
56 | | dpif_offload_flow_unreference_cb *unreference_cb; |
57 | | |
58 | | /* Configuration specific variables. */ |
59 | | struct ovsthread_once once_enable; /* Track first-time enablement. */ |
60 | | }; |
61 | | |
62 | | struct dpif_offload_dummy_port { |
63 | | struct dpif_offload_port_mgr_port pm_port; |
64 | | |
65 | | struct ovs_mutex port_mutex; /* Protect all below members. */ |
66 | | struct hmap offloaded_flows OVS_GUARDED; |
67 | | }; |
68 | | |
69 | | static void dpif_offload_dummy_flow_unreference(struct dpif_offload_dummy *, |
70 | | unsigned pmd_id, |
71 | | void *flow_reference); |
72 | | |
73 | | static uint32_t |
74 | | dpif_offload_dummy_allocate_flow_mark(struct dpif_offload_dummy *offload_dummy) |
75 | 0 | { |
76 | 0 | static struct ovsthread_once init_once = OVSTHREAD_ONCE_INITIALIZER; |
77 | 0 | uint32_t flow_mark; |
78 | |
|
79 | 0 | if (ovsthread_once_start(&init_once)) { |
80 | | /* Haven't initiated yet, do it here. */ |
81 | 0 | offload_dummy->flow_mark_pool = id_fpool_create(1, 1, UINT32_MAX - 1); |
82 | 0 | ovsthread_once_done(&init_once); |
83 | 0 | } |
84 | |
|
85 | 0 | if (id_fpool_new_id(offload_dummy->flow_mark_pool, 0, &flow_mark)) { |
86 | 0 | return flow_mark; |
87 | 0 | } |
88 | | |
89 | 0 | return INVALID_FLOW_MARK; |
90 | 0 | } |
91 | | |
92 | | static void |
93 | | dpif_offload_dummy_free_flow_mark(struct dpif_offload_dummy *offload_dummy, |
94 | | uint32_t flow_mark) |
95 | 0 | { |
96 | 0 | if (flow_mark != INVALID_FLOW_MARK) { |
97 | 0 | id_fpool_free_id(offload_dummy->flow_mark_pool, 0, flow_mark); |
98 | 0 | } |
99 | 0 | } |
100 | | |
101 | | static struct dpif_offload_dummy_port * |
102 | | dpif_offload_dummy_cast_port(struct dpif_offload_port_mgr_port *port) |
103 | 0 | { |
104 | 0 | return CONTAINER_OF(port, struct dpif_offload_dummy_port, pm_port); |
105 | 0 | } |
106 | | |
107 | | static struct dpif_offload_dummy * |
108 | | dpif_offload_dummy_cast(const struct dpif_offload *offload) |
109 | 0 | { |
110 | 0 | return CONTAINER_OF(offload, struct dpif_offload_dummy, offload); |
111 | 0 | } |
112 | | |
113 | | static uint32_t |
114 | | dpif_offload_dummy_flow_hash(const ovs_u128 *ufid) |
115 | 0 | { |
116 | 0 | return ufid->u32[0]; |
117 | 0 | } |
118 | | |
119 | | static struct pmd_id_data * |
120 | | dpif_offload_dummy_find_flow_pmd_data( |
121 | | struct dpif_offload_dummy_port *port OVS_UNUSED, |
122 | | struct dummy_offloaded_flow *off_flow, unsigned pmd_id) |
123 | | OVS_REQUIRES(port->port_mutex) |
124 | 0 | { |
125 | 0 | size_t hash = hash_int(pmd_id, 0); |
126 | 0 | struct pmd_id_data *data; |
127 | |
|
128 | 0 | HMAP_FOR_EACH_WITH_HASH (data, node, hash, &off_flow->pmd_id_map) { |
129 | 0 | if (data->pmd_id == pmd_id) { |
130 | 0 | return data; |
131 | 0 | } |
132 | 0 | } |
133 | 0 | return NULL; |
134 | 0 | } |
135 | | |
136 | | static void |
137 | | dpif_offload_dummy_add_flow_pmd_data( |
138 | | struct dpif_offload_dummy_port *port OVS_UNUSED, |
139 | | struct dummy_offloaded_flow *off_flow, unsigned pmd_id, |
140 | | void *flow_reference) |
141 | | OVS_REQUIRES(port->port_mutex) |
142 | 0 | { |
143 | 0 | struct pmd_id_data *pmd_data = xmalloc(sizeof *pmd_data); |
144 | |
|
145 | 0 | pmd_data->pmd_id = pmd_id; |
146 | 0 | pmd_data->flow_reference = flow_reference; |
147 | 0 | hmap_insert(&off_flow->pmd_id_map, &pmd_data->node, |
148 | 0 | hash_int(pmd_id, 0)); |
149 | 0 | } |
150 | | |
151 | | static void |
152 | | dpif_offload_dummy_update_flow_pmd_data( |
153 | | struct dpif_offload_dummy_port *port, |
154 | | struct dummy_offloaded_flow *off_flow, unsigned pmd_id, |
155 | | void *flow_reference, void **previous_flow_reference) |
156 | | OVS_REQUIRES(port->port_mutex) |
157 | 0 | { |
158 | 0 | struct pmd_id_data *data = dpif_offload_dummy_find_flow_pmd_data(port, |
159 | 0 | off_flow, |
160 | 0 | pmd_id); |
161 | |
|
162 | 0 | if (data) { |
163 | 0 | *previous_flow_reference = data->flow_reference; |
164 | 0 | data->flow_reference = flow_reference; |
165 | 0 | } else { |
166 | 0 | dpif_offload_dummy_add_flow_pmd_data(port, off_flow, pmd_id, |
167 | 0 | flow_reference); |
168 | 0 | *previous_flow_reference = NULL; |
169 | 0 | } |
170 | 0 | } |
171 | | |
172 | | static bool |
173 | | dpif_offload_dummy_del_flow_pmd_data( |
174 | | struct dpif_offload_dummy_port *port OVS_UNUSED, |
175 | | struct dummy_offloaded_flow *off_flow, unsigned pmd_id, |
176 | | void *flow_reference) |
177 | | OVS_REQUIRES(port->port_mutex) |
178 | 0 | { |
179 | 0 | size_t hash = hash_int(pmd_id, 0); |
180 | 0 | struct pmd_id_data *data; |
181 | |
|
182 | 0 | HMAP_FOR_EACH_WITH_HASH (data, node, hash, &off_flow->pmd_id_map) { |
183 | 0 | if (data->pmd_id == pmd_id && data->flow_reference == flow_reference) { |
184 | 0 | hmap_remove(&off_flow->pmd_id_map, &data->node); |
185 | 0 | free(data); |
186 | 0 | return true; |
187 | 0 | } |
188 | 0 | } |
189 | | |
190 | 0 | return false; |
191 | 0 | } |
192 | | |
193 | | static void |
194 | | dpif_offload_dummy_cleanup_flow_pmd_data( |
195 | | struct dpif_offload_dummy *offload, |
196 | | struct dpif_offload_dummy_port *port OVS_UNUSED, |
197 | | struct dummy_offloaded_flow *off_flow) |
198 | | OVS_REQUIRES(port->port_mutex) |
199 | 0 | { |
200 | 0 | struct pmd_id_data *data; |
201 | |
|
202 | 0 | HMAP_FOR_EACH_SAFE (data, node, &off_flow->pmd_id_map) { |
203 | 0 | hmap_remove(&off_flow->pmd_id_map, &data->node); |
204 | |
|
205 | 0 | dpif_offload_dummy_flow_unreference(offload, data->pmd_id, |
206 | 0 | data->flow_reference); |
207 | 0 | free(data); |
208 | 0 | } |
209 | 0 | } |
210 | | |
211 | | static struct dummy_offloaded_flow * |
212 | | dpif_offload_dummy_add_flow(struct dpif_offload_dummy_port *port, |
213 | | const ovs_u128 *ufid, unsigned pmd_id, |
214 | | void *flow_reference, uint32_t mark) |
215 | | OVS_REQUIRES(port->port_mutex) |
216 | 0 | { |
217 | 0 | struct dummy_offloaded_flow *off_flow = xzalloc(sizeof *off_flow); |
218 | |
|
219 | 0 | off_flow->mark = mark; |
220 | 0 | memcpy(&off_flow->ufid, ufid, sizeof off_flow->ufid); |
221 | 0 | hmap_init(&off_flow->pmd_id_map); |
222 | 0 | dpif_offload_dummy_add_flow_pmd_data(port, off_flow, pmd_id, |
223 | 0 | flow_reference); |
224 | |
|
225 | 0 | hmap_insert(&port->offloaded_flows, &off_flow->node, |
226 | 0 | dpif_offload_dummy_flow_hash(ufid)); |
227 | |
|
228 | 0 | return off_flow; |
229 | 0 | } |
230 | | |
231 | | static void |
232 | | dpif_offload_dummy_free_flow(struct dpif_offload_dummy_port *port, |
233 | | struct dummy_offloaded_flow *off_flow, |
234 | | bool remove_from_port) |
235 | | OVS_REQUIRES(port->port_mutex) |
236 | 0 | { |
237 | 0 | if (remove_from_port) { |
238 | 0 | hmap_remove(&port->offloaded_flows, &off_flow->node); |
239 | 0 | } |
240 | 0 | ovs_assert(!hmap_count(&off_flow->pmd_id_map)); |
241 | |
|
242 | 0 | hmap_destroy(&off_flow->pmd_id_map); |
243 | 0 | free(off_flow); |
244 | 0 | } |
245 | | |
246 | | static struct dummy_offloaded_flow * |
247 | | dpif_offload_dummy_find_offloaded_flow(struct dpif_offload_dummy_port *port, |
248 | | const ovs_u128 *ufid) |
249 | | OVS_REQUIRES(port->port_mutex) |
250 | 0 | { |
251 | 0 | uint32_t hash = dpif_offload_dummy_flow_hash(ufid); |
252 | 0 | struct dummy_offloaded_flow *data; |
253 | |
|
254 | 0 | HMAP_FOR_EACH_WITH_HASH (data, node, hash, &port->offloaded_flows) { |
255 | 0 | if (ovs_u128_equals(*ufid, data->ufid)) { |
256 | 0 | return data; |
257 | 0 | } |
258 | 0 | } |
259 | | |
260 | 0 | return NULL; |
261 | 0 | } |
262 | | |
263 | | static struct dummy_offloaded_flow * |
264 | | dpif_offload_dummy_find_offloaded_flow_and_update( |
265 | | struct dpif_offload_dummy_port *port, const ovs_u128 *ufid, |
266 | | unsigned pmd_id, void *new_flow_reference, void **previous_flow_reference) |
267 | | OVS_REQUIRES(port->port_mutex) |
268 | 0 | { |
269 | 0 | struct dummy_offloaded_flow *off_flow; |
270 | |
|
271 | 0 | off_flow = dpif_offload_dummy_find_offloaded_flow(port, ufid); |
272 | 0 | if (!off_flow) { |
273 | 0 | return NULL; |
274 | 0 | } |
275 | | |
276 | 0 | dpif_offload_dummy_update_flow_pmd_data(port, off_flow, pmd_id, |
277 | 0 | new_flow_reference, |
278 | 0 | previous_flow_reference); |
279 | |
|
280 | 0 | return off_flow; |
281 | 0 | } |
282 | | |
283 | | static void |
284 | | dpif_offload_dummy_enable_offload(struct dpif_offload *dpif_offload, |
285 | | struct dpif_offload_port_mgr_port *port) |
286 | 0 | { |
287 | 0 | atomic_store_relaxed(&port->netdev->hw_info.post_process_api_supported, |
288 | 0 | true); |
289 | 0 | dpif_offload_set_netdev_offload(port->netdev, dpif_offload); |
290 | 0 | } |
291 | | |
292 | | static void |
293 | | dpif_offload_dummy_cleanup_offload(struct dpif_offload_port_mgr_port *port) |
294 | 0 | { |
295 | 0 | dpif_offload_set_netdev_offload(port->netdev, NULL); |
296 | 0 | } |
297 | | |
298 | | static void |
299 | | dpif_offload_dummy_free_port__(struct dpif_offload_dummy *offload, |
300 | | struct dpif_offload_dummy_port *port, |
301 | | bool close_netdev) |
302 | 0 | { |
303 | 0 | struct dummy_offloaded_flow *off_flow; |
304 | |
|
305 | 0 | ovs_mutex_lock(&port->port_mutex); |
306 | 0 | HMAP_FOR_EACH_POP (off_flow, node, &port->offloaded_flows) { |
307 | 0 | dpif_offload_dummy_cleanup_flow_pmd_data(offload, port, off_flow); |
308 | 0 | dpif_offload_dummy_free_flow(port, off_flow, false); |
309 | 0 | } |
310 | 0 | hmap_destroy(&port->offloaded_flows); |
311 | 0 | ovs_mutex_unlock(&port->port_mutex); |
312 | 0 | ovs_mutex_destroy(&port->port_mutex); |
313 | 0 | if (close_netdev) { |
314 | 0 | netdev_close(port->pm_port.netdev); |
315 | 0 | } |
316 | 0 | free(port); |
317 | 0 | } |
318 | | |
319 | | struct free_port_rcu { |
320 | | struct dpif_offload_dummy *offload; |
321 | | struct dpif_offload_dummy_port *port; |
322 | | }; |
323 | | |
324 | | static void |
325 | | dpif_offload_dummy_free_port_rcu(struct free_port_rcu *fpc) |
326 | 0 | { |
327 | 0 | dpif_offload_dummy_free_port__(fpc->offload, fpc->port, true); |
328 | 0 | free(fpc); |
329 | 0 | } |
330 | | |
331 | | static void |
332 | | dpif_offload_dummy_free_port(struct dpif_offload_dummy *offload, |
333 | | struct dpif_offload_dummy_port *port) |
334 | 0 | { |
335 | 0 | struct free_port_rcu *fpc = xmalloc(sizeof *fpc); |
336 | |
|
337 | 0 | fpc->offload = offload; |
338 | 0 | fpc->port = port; |
339 | 0 | ovsrcu_postpone(dpif_offload_dummy_free_port_rcu, fpc); |
340 | 0 | } |
341 | | |
342 | | static int |
343 | | dpif_offload_dummy_port_add(struct dpif_offload *dpif_offload, |
344 | | struct netdev *netdev, odp_port_t port_no) |
345 | 0 | { |
346 | 0 | struct dpif_offload_dummy_port *port = xmalloc(sizeof *port); |
347 | 0 | struct dpif_offload_dummy *offload_dummy; |
348 | |
|
349 | 0 | ovs_mutex_init(&port->port_mutex); |
350 | 0 | ovs_mutex_lock(&port->port_mutex); |
351 | 0 | hmap_init(&port->offloaded_flows); |
352 | 0 | ovs_mutex_unlock(&port->port_mutex); |
353 | |
|
354 | 0 | offload_dummy = dpif_offload_dummy_cast(dpif_offload); |
355 | 0 | if (dpif_offload_port_mgr_add(offload_dummy->port_mgr, &port->pm_port, |
356 | 0 | netdev, port_no, false)) { |
357 | |
|
358 | 0 | if (dpif_offload_enabled()) { |
359 | 0 | dpif_offload_dummy_enable_offload(dpif_offload, &port->pm_port); |
360 | 0 | } |
361 | 0 | return 0; |
362 | 0 | } |
363 | | |
364 | 0 | dpif_offload_dummy_free_port__(offload_dummy, port, false); |
365 | 0 | return EEXIST; |
366 | 0 | } |
367 | | |
368 | | static int |
369 | | dpif_offload_dummy_port_del(struct dpif_offload *dpif_offload, |
370 | | odp_port_t port_no) |
371 | 0 | { |
372 | 0 | struct dpif_offload_dummy *offload_dummy; |
373 | 0 | struct dpif_offload_port_mgr_port *port; |
374 | |
|
375 | 0 | offload_dummy = dpif_offload_dummy_cast(dpif_offload); |
376 | |
|
377 | 0 | port = dpif_offload_port_mgr_remove(offload_dummy->port_mgr, port_no); |
378 | 0 | if (port) { |
379 | 0 | struct dpif_offload_dummy_port *dummy_port; |
380 | |
|
381 | 0 | dummy_port = dpif_offload_dummy_cast_port(port); |
382 | 0 | if (dpif_offload_enabled()) { |
383 | 0 | dpif_offload_dummy_cleanup_offload(port); |
384 | 0 | } |
385 | 0 | dpif_offload_dummy_free_port(offload_dummy, dummy_port); |
386 | 0 | } |
387 | 0 | return 0; |
388 | 0 | } |
389 | | |
390 | | static int |
391 | | dpif_offload_dummy_port_dump_start(const struct dpif_offload *offload_, |
392 | | void **statep) |
393 | 0 | { |
394 | 0 | struct dpif_offload_dummy *offload = dpif_offload_dummy_cast(offload_); |
395 | |
|
396 | 0 | return dpif_offload_port_mgr_port_dump_start(offload->port_mgr, statep); |
397 | 0 | } |
398 | | |
399 | | static int |
400 | | dpif_offload_dummy_port_dump_next(const struct dpif_offload *offload_, |
401 | | void *state, |
402 | | struct dpif_offload_port *port) |
403 | 0 | { |
404 | 0 | struct dpif_offload_dummy *offload = dpif_offload_dummy_cast(offload_); |
405 | |
|
406 | 0 | return dpif_offload_port_mgr_port_dump_next(offload->port_mgr, state, |
407 | 0 | port); |
408 | 0 | } |
409 | | |
410 | | static int |
411 | | dpif_offload_dummy_port_dump_done(const struct dpif_offload *offload_, |
412 | | void *state) |
413 | 0 | { |
414 | 0 | struct dpif_offload_dummy *offload = dpif_offload_dummy_cast(offload_); |
415 | |
|
416 | 0 | return dpif_offload_port_mgr_port_dump_done(offload->port_mgr, state); |
417 | 0 | } |
418 | | |
419 | | static struct netdev * |
420 | | dpif_offload_dummy_get_netdev(struct dpif_offload *dpif_offload, |
421 | | odp_port_t port_no) |
422 | 0 | { |
423 | 0 | struct dpif_offload_dummy *offload_dummy; |
424 | 0 | struct dpif_offload_port_mgr_port *port; |
425 | |
|
426 | 0 | offload_dummy = dpif_offload_dummy_cast(dpif_offload); |
427 | |
|
428 | 0 | port = dpif_offload_port_mgr_find_by_odp_port(offload_dummy->port_mgr, |
429 | 0 | port_no); |
430 | 0 | if (!port) { |
431 | 0 | return NULL; |
432 | 0 | } |
433 | | |
434 | 0 | return port->netdev; |
435 | 0 | } |
436 | | |
437 | | static int |
438 | | dpif_offload_dummy_open(const struct dpif_offload_class *offload_class, |
439 | | struct dpif *dpif, struct dpif_offload **dpif_offload) |
440 | 0 | { |
441 | 0 | struct dpif_offload_dummy *offload_dummy; |
442 | |
|
443 | 0 | offload_dummy = xmalloc(sizeof *offload_dummy); |
444 | |
|
445 | 0 | dpif_offload_init(&offload_dummy->offload, offload_class, dpif); |
446 | 0 | offload_dummy->port_mgr = dpif_offload_port_mgr_init(); |
447 | 0 | offload_dummy->once_enable = |
448 | 0 | (struct ovsthread_once) OVSTHREAD_ONCE_INITIALIZER; |
449 | 0 | offload_dummy->flow_mark_pool = NULL; |
450 | 0 | offload_dummy->unreference_cb = NULL; |
451 | |
|
452 | 0 | *dpif_offload = &offload_dummy->offload; |
453 | 0 | return 0; |
454 | 0 | } |
455 | | |
456 | | static void |
457 | | dpif_offload_dummy_close(struct dpif_offload *dpif_offload) |
458 | 0 | { |
459 | 0 | struct dpif_offload_dummy *offload_dummy; |
460 | |
|
461 | 0 | offload_dummy = dpif_offload_dummy_cast(dpif_offload); |
462 | | |
463 | | /* The ofproto layer may not call dpif_port_del() for all ports, |
464 | | * especially internal ones, so we need to clean up any remaining ports. */ |
465 | 0 | struct dpif_offload_port_mgr_port *port; |
466 | |
|
467 | 0 | DPIF_OFFLOAD_PORT_MGR_PORT_FOR_EACH (port, offload_dummy->port_mgr) { |
468 | 0 | dpif_offload_dummy_port_del(dpif_offload, port->port_no); |
469 | 0 | } |
470 | |
|
471 | 0 | dpif_offload_port_mgr_uninit(offload_dummy->port_mgr); |
472 | 0 | if (offload_dummy->flow_mark_pool) { |
473 | 0 | id_fpool_destroy(offload_dummy->flow_mark_pool); |
474 | 0 | } |
475 | 0 | ovsthread_once_destroy(&offload_dummy->once_enable); |
476 | 0 | free(offload_dummy); |
477 | 0 | } |
478 | | |
479 | | static void |
480 | | dpif_offload_dummy_set_config(struct dpif_offload *dpif_offload, |
481 | | const struct smap *other_cfg) |
482 | 0 | { |
483 | 0 | struct dpif_offload_dummy *offload_dummy; |
484 | |
|
485 | 0 | offload_dummy = dpif_offload_dummy_cast(dpif_offload); |
486 | |
|
487 | 0 | if (smap_get_bool(other_cfg, "hw-offload", false)) { |
488 | 0 | if (ovsthread_once_start(&offload_dummy->once_enable)) { |
489 | 0 | struct dpif_offload_port_mgr_port *port; |
490 | |
|
491 | 0 | DPIF_OFFLOAD_PORT_MGR_PORT_FOR_EACH (port, |
492 | 0 | offload_dummy->port_mgr) { |
493 | 0 | dpif_offload_dummy_enable_offload(dpif_offload, port); |
494 | 0 | } |
495 | |
|
496 | 0 | ovsthread_once_done(&offload_dummy->once_enable); |
497 | 0 | } |
498 | 0 | } |
499 | 0 | } |
500 | | |
501 | | static void |
502 | | dpif_offload_dummy_get_debug(const struct dpif_offload *offload, struct ds *ds, |
503 | | struct json *json) |
504 | 0 | { |
505 | 0 | struct dpif_offload_dummy *offload_dummy; |
506 | |
|
507 | 0 | offload_dummy = dpif_offload_dummy_cast(offload); |
508 | |
|
509 | 0 | if (json) { |
510 | 0 | struct json *json_ports = json_object_create(); |
511 | 0 | struct dpif_offload_port_mgr_port *port; |
512 | |
|
513 | 0 | DPIF_OFFLOAD_PORT_MGR_PORT_FOR_EACH (port, offload_dummy->port_mgr) { |
514 | 0 | struct json *json_port = json_object_create(); |
515 | |
|
516 | 0 | json_object_put(json_port, "port_no", |
517 | 0 | json_integer_create(odp_to_u32(port->port_no))); |
518 | |
|
519 | 0 | json_object_put(json_ports, netdev_get_name(port->netdev), |
520 | 0 | json_port); |
521 | 0 | } |
522 | |
|
523 | 0 | if (!json_object_is_empty(json_ports)) { |
524 | 0 | json_object_put(json, "ports", json_ports); |
525 | 0 | } else { |
526 | 0 | json_destroy(json_ports); |
527 | 0 | } |
528 | 0 | } else if (ds) { |
529 | 0 | struct dpif_offload_port_mgr_port *port; |
530 | |
|
531 | 0 | DPIF_OFFLOAD_PORT_MGR_PORT_FOR_EACH (port, offload_dummy->port_mgr) { |
532 | 0 | ds_put_format(ds, " - %s: port_no: %u\n", |
533 | 0 | netdev_get_name(port->netdev), port->port_no); |
534 | 0 | } |
535 | 0 | } |
536 | 0 | } |
537 | | |
538 | | static int |
539 | | dpif_offload_dummy_get_global_stats(const struct dpif_offload *offload_, |
540 | | struct netdev_custom_stats *stats) |
541 | 0 | { |
542 | 0 | struct dpif_offload_dummy *offload = dpif_offload_dummy_cast(offload_); |
543 | | |
544 | | /* Add a single counter telling how many ports we are servicing. */ |
545 | 0 | stats->label = xstrdup(dpif_offload_name(offload_)); |
546 | 0 | stats->size = 1; |
547 | 0 | stats->counters = xmalloc(sizeof(struct netdev_custom_counter) * 1); |
548 | 0 | stats->counters[0].value = dpif_offload_port_mgr_port_count( |
549 | 0 | offload->port_mgr); |
550 | 0 | ovs_strzcpy(stats->counters[0].name, "Offloaded port count", |
551 | 0 | sizeof stats->counters[0].name); |
552 | |
|
553 | 0 | return 0; |
554 | 0 | } |
555 | | |
556 | | static bool |
557 | | dpif_offload_dummy_can_offload(struct dpif_offload *dpif_offload OVS_UNUSED, |
558 | | struct netdev *netdev) |
559 | 0 | { |
560 | 0 | return is_dummy_netdev_class(netdev->netdev_class); |
561 | 0 | } |
562 | | |
563 | | static void |
564 | | dpif_offload_dummy_log_operation(const char *op, int error, |
565 | | const ovs_u128 *ufid) |
566 | 0 | { |
567 | 0 | VLOG_DBG("%s to %s netdev flow "UUID_FMT, |
568 | 0 | error == 0 ? "succeed" : "failed", op, |
569 | 0 | UUID_ARGS((struct uuid *) ufid)); |
570 | 0 | } |
571 | | |
572 | | static struct dpif_offload_dummy_port * |
573 | | dpif_offload_dummy_get_port_by_netdev(const struct dpif_offload *offload_, |
574 | | struct netdev *netdev) |
575 | 0 | { |
576 | 0 | struct dpif_offload_dummy *offload = dpif_offload_dummy_cast(offload_); |
577 | 0 | struct dpif_offload_port_mgr_port *port; |
578 | |
|
579 | 0 | port = dpif_offload_port_mgr_find_by_netdev(offload->port_mgr, netdev); |
580 | 0 | if (!port) { |
581 | 0 | return NULL; |
582 | 0 | } |
583 | 0 | return dpif_offload_dummy_cast_port(port); |
584 | 0 | } |
585 | | |
586 | | static int |
587 | | dpif_offload_dummy_netdev_hw_post_process( |
588 | | const struct dpif_offload *offload_, struct netdev *netdev, |
589 | | unsigned pmd_id, struct dp_packet *packet, void **flow_reference_) |
590 | 0 | { |
591 | 0 | struct dummy_offloaded_flow *off_flow; |
592 | 0 | struct dpif_offload_dummy_port *port; |
593 | 0 | void *flow_reference = NULL; |
594 | 0 | uint32_t flow_mark; |
595 | |
|
596 | 0 | port = dpif_offload_dummy_get_port_by_netdev(offload_, netdev); |
597 | 0 | if (!port || !dp_packet_has_flow_mark(packet, &flow_mark)) { |
598 | 0 | *flow_reference_ = NULL; |
599 | 0 | return 0; |
600 | 0 | } |
601 | | |
602 | 0 | ovs_mutex_lock(&port->port_mutex); |
603 | 0 | HMAP_FOR_EACH (off_flow, node, &port->offloaded_flows) { |
604 | 0 | struct pmd_id_data *pmd_data; |
605 | |
|
606 | 0 | if (flow_mark == off_flow->mark) { |
607 | 0 | pmd_data = dpif_offload_dummy_find_flow_pmd_data(port, off_flow, |
608 | 0 | pmd_id); |
609 | 0 | if (pmd_data) { |
610 | 0 | flow_reference = pmd_data->flow_reference; |
611 | 0 | } |
612 | 0 | break; |
613 | 0 | } |
614 | 0 | } |
615 | 0 | ovs_mutex_unlock(&port->port_mutex); |
616 | |
|
617 | 0 | *flow_reference_ = flow_reference; |
618 | 0 | return 0; |
619 | 0 | } |
620 | | |
621 | | static int |
622 | | dpif_offload_dummy_netdev_flow_put(const struct dpif_offload *offload_, |
623 | | struct netdev *netdev, |
624 | | struct dpif_offload_flow_put *put, |
625 | | void **previous_flow_reference) |
626 | 0 | { |
627 | 0 | struct dpif_offload_dummy *offload = dpif_offload_dummy_cast(offload_); |
628 | 0 | struct dummy_offloaded_flow *off_flow; |
629 | 0 | struct dpif_offload_dummy_port *port; |
630 | 0 | bool modify = true; |
631 | 0 | int error = 0; |
632 | |
|
633 | 0 | port = dpif_offload_dummy_get_port_by_netdev(offload_, netdev); |
634 | 0 | if (!port) { |
635 | 0 | error = ENODEV; |
636 | 0 | goto exit; |
637 | 0 | } |
638 | | |
639 | 0 | ovs_mutex_lock(&port->port_mutex); |
640 | |
|
641 | 0 | off_flow = dpif_offload_dummy_find_offloaded_flow_and_update( |
642 | 0 | port, put->ufid, put->pmd_id, put->flow_reference, |
643 | 0 | previous_flow_reference); |
644 | |
|
645 | 0 | if (!off_flow) { |
646 | | /* Create new offloaded flow. */ |
647 | 0 | uint32_t mark = dpif_offload_dummy_allocate_flow_mark(offload); |
648 | |
|
649 | 0 | if (mark == INVALID_FLOW_MARK) { |
650 | 0 | error = ENOSPC; |
651 | 0 | goto exit_unlock; |
652 | 0 | } |
653 | | |
654 | 0 | off_flow = dpif_offload_dummy_add_flow(port, put->ufid, put->pmd_id, |
655 | 0 | put->flow_reference, mark); |
656 | 0 | modify = false; |
657 | 0 | *previous_flow_reference = NULL; |
658 | 0 | } |
659 | 0 | memcpy(&off_flow->match, put->match, sizeof *put->match); |
660 | | |
661 | | /* As we have per-netdev 'offloaded_flows', we don't need to match |
662 | | * the 'in_port' for received packets. This will also allow offloading |
663 | | * for packets passed to 'receive' command without specifying the |
664 | | * 'in_port'. */ |
665 | 0 | off_flow->match.wc.masks.in_port.odp_port = 0; |
666 | |
|
667 | 0 | if (VLOG_IS_DBG_ENABLED()) { |
668 | 0 | struct ds ds = DS_EMPTY_INITIALIZER; |
669 | |
|
670 | 0 | ds_put_format(&ds, "%s: flow put[%s]: ", netdev_get_name(netdev), |
671 | 0 | modify ? "modify" : "create"); |
672 | 0 | odp_format_ufid(put->ufid, &ds); |
673 | 0 | ds_put_cstr(&ds, " flow match: "); |
674 | 0 | match_format(put->match, NULL, &ds, OFP_DEFAULT_PRIORITY); |
675 | 0 | ds_put_format(&ds, ", mark: %"PRIu32, off_flow->mark); |
676 | |
|
677 | 0 | VLOG_DBG("%s", ds_cstr(&ds)); |
678 | 0 | ds_destroy(&ds); |
679 | 0 | } |
680 | |
|
681 | 0 | exit_unlock: |
682 | 0 | ovs_mutex_unlock(&port->port_mutex); |
683 | |
|
684 | 0 | exit: |
685 | 0 | if (put->stats) { |
686 | 0 | memset(put->stats, 0, sizeof *put->stats); |
687 | 0 | } |
688 | |
|
689 | 0 | dpif_offload_dummy_log_operation(modify ? "modify" : "add", error, |
690 | 0 | put->ufid); |
691 | 0 | return error; |
692 | 0 | } |
693 | | |
694 | | static int |
695 | | dpif_offload_dummy_netdev_flow_del(const struct dpif_offload *offload_, |
696 | | struct netdev *netdev, |
697 | | struct dpif_offload_flow_del *del) |
698 | 0 | { |
699 | 0 | struct dpif_offload_dummy *offload = dpif_offload_dummy_cast(offload_); |
700 | 0 | struct dummy_offloaded_flow *off_flow; |
701 | 0 | struct dpif_offload_dummy_port *port; |
702 | 0 | uint32_t mark = INVALID_FLOW_MARK; |
703 | 0 | const char *error = NULL; |
704 | |
|
705 | 0 | port = dpif_offload_dummy_get_port_by_netdev(offload_, netdev); |
706 | 0 | if (!port) { |
707 | 0 | error = "No such (net)device."; |
708 | 0 | goto exit; |
709 | 0 | } |
710 | | |
711 | 0 | ovs_mutex_lock(&port->port_mutex); |
712 | |
|
713 | 0 | off_flow = dpif_offload_dummy_find_offloaded_flow(port, del->ufid); |
714 | 0 | if (!off_flow) { |
715 | 0 | error = "No such flow."; |
716 | 0 | goto exit_unlock; |
717 | 0 | } |
718 | | |
719 | 0 | if (!dpif_offload_dummy_del_flow_pmd_data(port, off_flow, del->pmd_id, |
720 | 0 | del->flow_reference)) { |
721 | 0 | error = "No such flow with pmd_id and reference."; |
722 | 0 | goto exit_unlock; |
723 | 0 | } |
724 | | |
725 | 0 | mark = off_flow->mark; |
726 | 0 | if (!hmap_count(&off_flow->pmd_id_map)) { |
727 | 0 | dpif_offload_dummy_free_flow_mark(offload, mark); |
728 | 0 | dpif_offload_dummy_free_flow(port, off_flow, true); |
729 | 0 | } |
730 | |
|
731 | 0 | exit_unlock: |
732 | 0 | ovs_mutex_unlock(&port->port_mutex); |
733 | |
|
734 | 0 | exit: |
735 | 0 | if (error || VLOG_IS_DBG_ENABLED()) { |
736 | 0 | struct ds ds = DS_EMPTY_INITIALIZER; |
737 | |
|
738 | 0 | ds_put_format(&ds, "%s: ", netdev_get_name(netdev)); |
739 | 0 | if (error) { |
740 | 0 | ds_put_cstr(&ds, "failed to "); |
741 | 0 | } |
742 | 0 | ds_put_cstr(&ds, "flow del: "); |
743 | 0 | odp_format_ufid(del->ufid, &ds); |
744 | 0 | if (error) { |
745 | 0 | ds_put_format(&ds, " error: %s", error); |
746 | 0 | } else { |
747 | 0 | ds_put_format(&ds, " mark: %"PRIu32, mark); |
748 | 0 | } |
749 | 0 | VLOG(error ? VLL_WARN : VLL_DBG, "%s", ds_cstr(&ds)); |
750 | 0 | ds_destroy(&ds); |
751 | 0 | } |
752 | |
|
753 | 0 | if (del->stats) { |
754 | 0 | memset(del->stats, 0, sizeof *del->stats); |
755 | 0 | } |
756 | |
|
757 | 0 | dpif_offload_dummy_log_operation("delete", error ? -1 : 0, del->ufid); |
758 | 0 | return error ? ENOENT : 0; |
759 | 0 | } |
760 | | |
761 | | static bool |
762 | | dpif_offload_dummy_netdev_flow_stats(const struct dpif_offload *offload_, |
763 | | struct netdev *netdev, |
764 | | const ovs_u128 *ufid, |
765 | | struct dpif_flow_stats *stats, |
766 | | struct dpif_flow_attrs *attrs) |
767 | 0 | { |
768 | 0 | struct dummy_offloaded_flow *off_flow = NULL; |
769 | 0 | struct dpif_offload_dummy_port *port; |
770 | |
|
771 | 0 | port = dpif_offload_dummy_get_port_by_netdev(offload_, netdev); |
772 | 0 | if (!port) { |
773 | 0 | return false; |
774 | 0 | } |
775 | | |
776 | 0 | ovs_mutex_lock(&port->port_mutex); |
777 | 0 | off_flow = dpif_offload_dummy_find_offloaded_flow(port, ufid); |
778 | 0 | ovs_mutex_unlock(&port->port_mutex); |
779 | |
|
780 | 0 | memset(stats, 0, sizeof *stats); |
781 | 0 | attrs->offloaded = off_flow ? true : false; |
782 | 0 | attrs->dp_layer = "ovs"; /* 'ovs', since this is a partial offload. */ |
783 | 0 | attrs->dp_extra_info = NULL; |
784 | |
|
785 | 0 | return off_flow ? true : false; |
786 | 0 | } |
787 | | |
788 | | static void |
789 | | dpif_offload_dummy_register_flow_unreference_cb( |
790 | | const struct dpif_offload *offload_, dpif_offload_flow_unreference_cb *cb) |
791 | 0 | { |
792 | 0 | struct dpif_offload_dummy *offload = dpif_offload_dummy_cast(offload_); |
793 | |
|
794 | 0 | offload->unreference_cb = cb; |
795 | 0 | } |
796 | | |
797 | | static void |
798 | | dpif_offload_dummy_flow_unreference(struct dpif_offload_dummy *offload, |
799 | | unsigned pmd_id, void *flow_reference) |
800 | 0 | { |
801 | 0 | if (offload->unreference_cb) { |
802 | 0 | offload->unreference_cb(pmd_id, flow_reference); |
803 | 0 | } |
804 | 0 | } |
805 | | |
806 | | void |
807 | | dpif_offload_dummy_netdev_simulate_offload(struct netdev *netdev, |
808 | | struct dp_packet *packet, |
809 | | struct flow *flow) |
810 | 0 | { |
811 | 0 | const struct dpif_offload *offload = ovsrcu_get( |
812 | 0 | const struct dpif_offload *, &netdev->dpif_offload); |
813 | 0 | struct dpif_offload_dummy_port *port; |
814 | 0 | struct dummy_offloaded_flow *data; |
815 | 0 | struct flow packet_flow; |
816 | |
|
817 | 0 | if (!offload || strcmp(dpif_offload_type(offload), "dummy")) { |
818 | 0 | return; |
819 | 0 | } |
820 | | |
821 | 0 | port = dpif_offload_dummy_get_port_by_netdev(offload, netdev); |
822 | 0 | if (!port) { |
823 | 0 | return; |
824 | 0 | } |
825 | | |
826 | 0 | if (!flow) { |
827 | 0 | flow = &packet_flow; |
828 | 0 | flow_extract(packet, flow); |
829 | 0 | } |
830 | |
|
831 | 0 | ovs_mutex_lock(&port->port_mutex); |
832 | 0 | HMAP_FOR_EACH (data, node, &port->offloaded_flows) { |
833 | 0 | if (flow_equal_except(flow, &data->match.flow, &data->match.wc)) { |
834 | |
|
835 | 0 | dp_packet_set_flow_mark(packet, data->mark); |
836 | |
|
837 | 0 | if (VLOG_IS_DBG_ENABLED()) { |
838 | 0 | struct ds ds = DS_EMPTY_INITIALIZER; |
839 | |
|
840 | 0 | ds_put_format(&ds, "%s: packet: ", |
841 | 0 | netdev_get_name(netdev)); |
842 | | /* 'flow' does not contain proper port number here. |
843 | | * Let's just clear it as it's wildcarded anyway. */ |
844 | 0 | flow->in_port.ofp_port = 0; |
845 | 0 | flow_format(&ds, flow, NULL); |
846 | |
|
847 | 0 | ds_put_cstr(&ds, " matches with flow: "); |
848 | 0 | odp_format_ufid(&data->ufid, &ds); |
849 | 0 | ds_put_cstr(&ds, " "); |
850 | 0 | match_format(&data->match, NULL, &ds, OFP_DEFAULT_PRIORITY); |
851 | 0 | ds_put_format(&ds, " with mark: %"PRIu32, data->mark); |
852 | |
|
853 | 0 | VLOG_DBG("%s", ds_cstr(&ds)); |
854 | 0 | ds_destroy(&ds); |
855 | 0 | } |
856 | 0 | break; |
857 | 0 | } |
858 | 0 | } |
859 | 0 | ovs_mutex_unlock(&port->port_mutex); |
860 | 0 | } |
861 | | |
862 | | #define DEFINE_DPIF_DUMMY_CLASS(NAME, TYPE_STR) \ |
863 | | struct dpif_offload_class NAME = { \ |
864 | | .type = TYPE_STR, \ |
865 | | .impl_type = DPIF_OFFLOAD_IMPL_FLOWS_DPIF_SYNCED, \ |
866 | | .supported_dpif_types = (const char *const[]) {"dummy", NULL}, \ |
867 | | .open = dpif_offload_dummy_open, \ |
868 | | .close = dpif_offload_dummy_close, \ |
869 | | .set_config = dpif_offload_dummy_set_config, \ |
870 | | .get_debug = dpif_offload_dummy_get_debug, \ |
871 | | .get_global_stats = dpif_offload_dummy_get_global_stats, \ |
872 | | .can_offload = dpif_offload_dummy_can_offload, \ |
873 | | .port_add = dpif_offload_dummy_port_add, \ |
874 | | .port_del = dpif_offload_dummy_port_del, \ |
875 | | .port_dump_start = dpif_offload_dummy_port_dump_start, \ |
876 | | .port_dump_next = dpif_offload_dummy_port_dump_next, \ |
877 | | .port_dump_done = dpif_offload_dummy_port_dump_done, \ |
878 | | .get_netdev = dpif_offload_dummy_get_netdev, \ |
879 | | .netdev_hw_post_process = \ |
880 | | dpif_offload_dummy_netdev_hw_post_process, \ |
881 | | .netdev_flow_put = dpif_offload_dummy_netdev_flow_put, \ |
882 | | .netdev_flow_del = dpif_offload_dummy_netdev_flow_del, \ |
883 | | .netdev_flow_stats = dpif_offload_dummy_netdev_flow_stats, \ |
884 | | .register_flow_unreference_cb = \ |
885 | | dpif_offload_dummy_register_flow_unreference_cb, \ |
886 | | } |
887 | | |
888 | | DEFINE_DPIF_DUMMY_CLASS(dpif_offload_dummy_class, "dummy"); |
889 | | DEFINE_DPIF_DUMMY_CLASS(dpif_offload_dummy_x_class, "dummy_x"); |