Coverage Report

Created: 2026-03-02 06:37

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/openvswitch/lib/dpif-offload-dummy.c
Line
Count
Source
1
/*
2
 * Copyright (c) 2025 Red Hat, Inc.
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at:
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16
17
#include <config.h>
18
#include <errno.h>
19
20
#include "dpif.h"
21
#include "dpif-offload.h"
22
#include "dpif-offload-provider.h"
23
#include "dummy.h"
24
#include "id-fpool.h"
25
#include "netdev-provider.h"
26
#include "odp-util.h"
27
#include "util.h"
28
#include "uuid.h"
29
30
#include "openvswitch/json.h"
31
#include "openvswitch/match.h"
32
#include "openvswitch/vlog.h"
33
34
VLOG_DEFINE_THIS_MODULE(dpif_offload_dummy);
35
36
struct pmd_id_data {
37
    struct hmap_node node;
38
    void *flow_reference;
39
    unsigned pmd_id;
40
};
41
42
struct dummy_offloaded_flow {
43
    struct hmap_node node;
44
    struct match match;
45
    ovs_u128 ufid;
46
    uint32_t mark;
47
48
    /* The pmd_id_map below is also protected by the port_mutex. */
49
    struct hmap pmd_id_map;
50
 };
51
52
struct dummy_offload {
53
    struct dpif_offload offload;
54
    struct id_fpool *flow_mark_pool;
55
    dpif_offload_flow_unreference_cb *unreference_cb;
56
57
    /* Configuration specific variables. */
58
    struct ovsthread_once once_enable; /* Track first-time enablement. */
59
};
60
61
struct dummy_offload_port {
62
    struct dpif_offload_port pm_port;
63
64
    struct ovs_mutex port_mutex; /* Protect all below members. */
65
    struct hmap offloaded_flows OVS_GUARDED;
66
};
67
68
static void dummy_flow_unreference(struct dummy_offload *, unsigned pmd_id,
69
                                   void *flow_reference);
70
71
static uint32_t
72
dummy_allocate_flow_mark(struct dummy_offload *offload)
73
0
{
74
0
    static struct ovsthread_once init_once = OVSTHREAD_ONCE_INITIALIZER;
75
0
    uint32_t flow_mark;
76
77
0
    if (ovsthread_once_start(&init_once)) {
78
        /* Haven't initiated yet, do it here. */
79
0
        offload->flow_mark_pool = id_fpool_create(1, 1, UINT32_MAX - 1);
80
0
        ovsthread_once_done(&init_once);
81
0
    }
82
83
0
    if (id_fpool_new_id(offload->flow_mark_pool, 0, &flow_mark)) {
84
0
        return flow_mark;
85
0
    }
86
87
0
    return INVALID_FLOW_MARK;
88
0
}
89
90
static void
91
dummy_free_flow_mark(struct dummy_offload *offload, uint32_t flow_mark)
92
0
{
93
0
    if (flow_mark != INVALID_FLOW_MARK) {
94
0
        id_fpool_free_id(offload->flow_mark_pool, 0, flow_mark);
95
0
    }
96
0
}
97
98
static struct dummy_offload_port *
99
dummy_offload_port_cast(struct dpif_offload_port *port)
100
0
{
101
0
    return CONTAINER_OF(port, struct dummy_offload_port, pm_port);
102
0
}
103
104
static struct dummy_offload *
105
dummy_offload_cast(const struct dpif_offload *offload)
106
0
{
107
0
    return CONTAINER_OF(offload, struct dummy_offload, offload);
108
0
}
109
110
static uint32_t
111
dummy_flow_hash(const ovs_u128 *ufid)
112
0
{
113
0
    return ufid->u32[0];
114
0
}
115
116
static struct pmd_id_data *
117
dummy_find_flow_pmd_data(struct dummy_offload_port *port OVS_UNUSED,
118
                         struct dummy_offloaded_flow *off_flow,
119
                         unsigned pmd_id)
120
    OVS_REQUIRES(port->port_mutex)
121
0
{
122
0
    size_t hash = hash_int(pmd_id, 0);
123
0
    struct pmd_id_data *data;
124
125
0
    HMAP_FOR_EACH_WITH_HASH (data, node, hash, &off_flow->pmd_id_map) {
126
0
        if (data->pmd_id == pmd_id) {
127
0
            return data;
128
0
        }
129
0
    }
130
0
    return NULL;
131
0
}
132
133
static void
134
dummy_add_flow_pmd_data(struct dummy_offload_port *port OVS_UNUSED,
135
                        struct dummy_offloaded_flow *off_flow, unsigned pmd_id,
136
                        void *flow_reference)
137
    OVS_REQUIRES(port->port_mutex)
138
0
{
139
0
    struct pmd_id_data *pmd_data = xmalloc(sizeof *pmd_data);
140
141
0
    pmd_data->pmd_id = pmd_id;
142
0
    pmd_data->flow_reference = flow_reference;
143
0
    hmap_insert(&off_flow->pmd_id_map, &pmd_data->node,
144
0
                hash_int(pmd_id, 0));
145
0
}
146
147
static void
148
dummy_update_flow_pmd_data(struct dummy_offload_port *port,
149
                           struct dummy_offloaded_flow *off_flow,
150
                           unsigned pmd_id, void *flow_reference,
151
                           void **previous_flow_reference)
152
    OVS_REQUIRES(port->port_mutex)
153
0
{
154
0
    struct pmd_id_data *data = dummy_find_flow_pmd_data(port, off_flow,
155
0
                                                        pmd_id);
156
157
0
    if (data) {
158
0
        *previous_flow_reference = data->flow_reference;
159
0
        data->flow_reference = flow_reference;
160
0
    } else {
161
0
        dummy_add_flow_pmd_data(port, off_flow, pmd_id, flow_reference);
162
0
        *previous_flow_reference = NULL;
163
0
    }
164
0
}
165
166
static bool
167
dummy_del_flow_pmd_data(struct dummy_offload_port *port OVS_UNUSED,
168
                        struct dummy_offloaded_flow *off_flow, unsigned pmd_id,
169
                        void *flow_reference)
170
    OVS_REQUIRES(port->port_mutex)
171
0
{
172
0
    size_t hash = hash_int(pmd_id, 0);
173
0
    struct pmd_id_data *data;
174
175
0
    HMAP_FOR_EACH_WITH_HASH (data, node, hash, &off_flow->pmd_id_map) {
176
0
        if (data->pmd_id == pmd_id && data->flow_reference == flow_reference) {
177
0
            hmap_remove(&off_flow->pmd_id_map, &data->node);
178
0
            free(data);
179
0
            return true;
180
0
        }
181
0
    }
182
183
0
    return false;
184
0
}
185
186
static void
187
dummy_cleanup_flow_pmd_data(struct dummy_offload *offload,
188
                            struct dummy_offload_port *port OVS_UNUSED,
189
                            struct dummy_offloaded_flow *off_flow)
190
    OVS_REQUIRES(port->port_mutex)
191
0
{
192
0
    struct pmd_id_data *data;
193
194
0
    HMAP_FOR_EACH_SAFE (data, node, &off_flow->pmd_id_map) {
195
0
        hmap_remove(&off_flow->pmd_id_map, &data->node);
196
197
0
        dummy_flow_unreference(offload, data->pmd_id, data->flow_reference);
198
0
        free(data);
199
0
    }
200
0
}
201
202
static struct dummy_offloaded_flow *
203
dummy_add_flow(struct dummy_offload_port *port, const ovs_u128 *ufid,
204
               unsigned pmd_id, void *flow_reference, uint32_t mark)
205
    OVS_REQUIRES(port->port_mutex)
206
0
{
207
0
    struct dummy_offloaded_flow *off_flow = xzalloc(sizeof *off_flow);
208
209
0
    off_flow->mark = mark;
210
0
    memcpy(&off_flow->ufid, ufid, sizeof off_flow->ufid);
211
0
    hmap_init(&off_flow->pmd_id_map);
212
0
    dummy_add_flow_pmd_data(port, off_flow, pmd_id, flow_reference);
213
214
0
    hmap_insert(&port->offloaded_flows, &off_flow->node,
215
0
                dummy_flow_hash(ufid));
216
217
0
    return off_flow;
218
0
}
219
220
static void
221
dummy_free_flow(struct dummy_offload_port *port,
222
                struct dummy_offloaded_flow *off_flow, bool remove_from_port)
223
    OVS_REQUIRES(port->port_mutex)
224
0
{
225
0
    if (remove_from_port) {
226
0
        hmap_remove(&port->offloaded_flows, &off_flow->node);
227
0
    }
228
0
    ovs_assert(!hmap_count(&off_flow->pmd_id_map));
229
230
0
    hmap_destroy(&off_flow->pmd_id_map);
231
0
    free(off_flow);
232
0
}
233
234
static struct dummy_offloaded_flow *
235
dummy_find_offloaded_flow(struct dummy_offload_port *port,
236
                          const ovs_u128 *ufid)
237
    OVS_REQUIRES(port->port_mutex)
238
0
{
239
0
    uint32_t hash = dummy_flow_hash(ufid);
240
0
    struct dummy_offloaded_flow *data;
241
242
0
    HMAP_FOR_EACH_WITH_HASH (data, node, hash, &port->offloaded_flows) {
243
0
        if (ovs_u128_equals(*ufid, data->ufid)) {
244
0
            return data;
245
0
        }
246
0
    }
247
248
0
    return NULL;
249
0
}
250
251
static struct dummy_offloaded_flow *
252
dummy_find_offloaded_flow_and_update(struct dummy_offload_port *port,
253
                                     const ovs_u128 *ufid, unsigned pmd_id,
254
                                     void *new_flow_reference,
255
                                     void **previous_flow_reference)
256
    OVS_REQUIRES(port->port_mutex)
257
0
{
258
0
    struct dummy_offloaded_flow *off_flow;
259
260
0
    off_flow = dummy_find_offloaded_flow(port, ufid);
261
0
    if (!off_flow) {
262
0
        return NULL;
263
0
    }
264
265
0
    dummy_update_flow_pmd_data(port, off_flow, pmd_id, new_flow_reference,
266
0
                               previous_flow_reference);
267
268
0
    return off_flow;
269
0
}
270
271
static void
272
dummy_offload_enable(struct dpif_offload *dpif_offload,
273
                     struct dpif_offload_port *port)
274
0
{
275
0
    atomic_store_relaxed(&port->netdev->hw_info.post_process_api_supported,
276
0
                         true);
277
0
    dpif_offload_set_netdev_offload(port->netdev, dpif_offload);
278
0
}
279
280
static void
281
dummy_offload_cleanup(struct dpif_offload_port *port)
282
0
{
283
0
    dpif_offload_set_netdev_offload(port->netdev, NULL);
284
0
}
285
286
static void
287
dummy_free_port__(struct dummy_offload *offload,
288
                  struct dummy_offload_port *port, bool close_netdev)
289
0
{
290
0
    struct dummy_offloaded_flow *off_flow;
291
292
0
    ovs_mutex_lock(&port->port_mutex);
293
0
    HMAP_FOR_EACH_POP (off_flow, node, &port->offloaded_flows) {
294
0
        dummy_cleanup_flow_pmd_data(offload, port, off_flow);
295
0
        dummy_free_flow(port, off_flow, false);
296
0
    }
297
0
    hmap_destroy(&port->offloaded_flows);
298
0
    ovs_mutex_unlock(&port->port_mutex);
299
0
    ovs_mutex_destroy(&port->port_mutex);
300
0
    if (close_netdev) {
301
0
        netdev_close(port->pm_port.netdev);
302
0
    }
303
0
    free(port);
304
0
}
305
306
struct free_port_rcu {
307
    struct dummy_offload *offload;
308
    struct dummy_offload_port *port;
309
};
310
311
static void
312
dummy_free_port_rcu(struct free_port_rcu *fpc)
313
0
{
314
0
    dummy_free_port__(fpc->offload, fpc->port, true);
315
0
    free(fpc);
316
0
}
317
318
static void
319
dummy_free_port(struct dummy_offload *offload, struct dummy_offload_port *port)
320
0
{
321
0
    struct free_port_rcu *fpc = xmalloc(sizeof *fpc);
322
323
0
    fpc->offload = offload;
324
0
    fpc->port = port;
325
0
    ovsrcu_postpone(dummy_free_port_rcu, fpc);
326
0
}
327
328
static int
329
dummy_offload_port_add(struct dpif_offload *dpif_offload,
330
                       struct netdev *netdev, odp_port_t port_no)
331
0
{
332
0
    struct dummy_offload *offload = dummy_offload_cast(dpif_offload);
333
0
    struct dummy_offload_port *port = xmalloc(sizeof *port);
334
335
0
    ovs_mutex_init(&port->port_mutex);
336
0
    ovs_mutex_lock(&port->port_mutex);
337
0
    hmap_init(&port->offloaded_flows);
338
0
    ovs_mutex_unlock(&port->port_mutex);
339
340
0
    if (dpif_offload_port_mgr_add(dpif_offload, &port->pm_port, netdev,
341
0
                                  port_no, false)) {
342
343
0
        if (dpif_offload_enabled()) {
344
0
            dummy_offload_enable(dpif_offload, &port->pm_port);
345
0
        }
346
0
        return 0;
347
0
    }
348
349
0
    dummy_free_port__(offload, port, false);
350
0
    return EEXIST;
351
0
}
352
353
static int
354
dummy_offload_port_del(struct dpif_offload *dpif_offload, odp_port_t port_no)
355
0
{
356
0
    struct dummy_offload *offload = dummy_offload_cast(dpif_offload);
357
0
    struct dpif_offload_port *port;
358
359
0
    port = dpif_offload_port_mgr_remove(dpif_offload, port_no);
360
0
    if (port) {
361
0
        struct dummy_offload_port *dummy_port;
362
363
0
        dummy_port = dummy_offload_port_cast(port);
364
0
        if (dpif_offload_enabled()) {
365
0
            dummy_offload_cleanup(port);
366
0
        }
367
0
        dummy_free_port(offload, dummy_port);
368
0
    }
369
0
    return 0;
370
0
}
371
372
static struct netdev *
373
dummy_offload_get_netdev(const struct dpif_offload *dpif_offload,
374
                         odp_port_t port_no)
375
0
{
376
0
    struct dpif_offload_port *port;
377
378
0
    port = dpif_offload_port_mgr_find_by_odp_port(dpif_offload, port_no);
379
0
    if (!port) {
380
0
        return NULL;
381
0
    }
382
383
0
    return port->netdev;
384
0
}
385
386
static int
387
dummy_offload_open(const struct dpif_offload_class *offload_class,
388
                   struct dpif *dpif, struct dpif_offload **dpif_offload)
389
0
{
390
0
    struct dummy_offload *offload;
391
392
0
    offload = xmalloc(sizeof *offload);
393
394
0
    dpif_offload_init(&offload->offload, offload_class, dpif);
395
0
    offload->once_enable = (struct ovsthread_once) OVSTHREAD_ONCE_INITIALIZER;
396
0
    offload->flow_mark_pool = NULL;
397
0
    offload->unreference_cb = NULL;
398
399
0
    *dpif_offload = &offload->offload;
400
0
    return 0;
401
0
}
402
403
static void
404
dummy_offload_close(struct dpif_offload *dpif_offload)
405
0
{
406
0
    struct dummy_offload *offload = dummy_offload_cast(dpif_offload);
407
0
    struct dpif_offload_port *port;
408
409
    /* The ofproto layer may not call dpif_port_del() for all ports,
410
     * especially internal ones, so we need to clean up any remaining ports. */
411
0
    DPIF_OFFLOAD_PORT_FOR_EACH (port, dpif_offload) {
412
0
        dummy_offload_port_del(dpif_offload, port->port_no);
413
0
    }
414
415
0
    if (offload->flow_mark_pool) {
416
0
        id_fpool_destroy(offload->flow_mark_pool);
417
0
    }
418
0
    ovsthread_once_destroy(&offload->once_enable);
419
0
    dpif_offload_destroy(dpif_offload);
420
0
    free(offload);
421
0
}
422
423
static void
424
dummy_offload_set_config(struct dpif_offload *dpif_offload,
425
                         const struct smap *other_cfg)
426
0
{
427
0
    struct dummy_offload *offload = dummy_offload_cast(dpif_offload);
428
429
0
    if (smap_get_bool(other_cfg, "hw-offload", false)) {
430
0
        if (ovsthread_once_start(&offload->once_enable)) {
431
0
            struct dpif_offload_port *port;
432
433
0
            DPIF_OFFLOAD_PORT_FOR_EACH (port, dpif_offload) {
434
0
                dummy_offload_enable(dpif_offload, port);
435
0
            }
436
437
0
            ovsthread_once_done(&offload->once_enable);
438
0
        }
439
0
    }
440
0
}
441
442
static void
443
dummy_offload_get_debug(const struct dpif_offload *offload, struct ds *ds,
444
                        struct json *json)
445
0
{
446
0
    if (json) {
447
0
        struct json *json_ports = json_object_create();
448
0
        struct dpif_offload_port *port;
449
450
0
        DPIF_OFFLOAD_PORT_FOR_EACH (port, offload) {
451
0
            struct json *json_port = json_object_create();
452
453
0
            json_object_put(json_port, "port_no",
454
0
                            json_integer_create(odp_to_u32(port->port_no)));
455
456
0
            json_object_put(json_ports, netdev_get_name(port->netdev),
457
0
                            json_port);
458
0
        }
459
460
0
        if (!json_object_is_empty(json_ports)) {
461
0
            json_object_put(json, "ports", json_ports);
462
0
        } else {
463
0
            json_destroy(json_ports);
464
0
        }
465
0
    } else if (ds) {
466
0
        struct dpif_offload_port *port;
467
468
0
        DPIF_OFFLOAD_PORT_FOR_EACH (port, offload) {
469
0
            ds_put_format(ds, "  - %s: port_no: %u\n",
470
0
                          netdev_get_name(port->netdev), port->port_no);
471
0
        }
472
0
    }
473
0
}
474
475
static int
476
dummy_offload_get_global_stats(const struct dpif_offload *offload,
477
                               struct netdev_custom_stats *stats)
478
0
{
479
    /* Add a single counter telling how many ports we are servicing. */
480
0
    stats->label = xstrdup(dpif_offload_name(offload));
481
0
    stats->size = 1;
482
0
    stats->counters = xmalloc(sizeof(struct netdev_custom_counter) * 1);
483
0
    stats->counters[0].value = dpif_offload_port_mgr_port_count(offload);
484
0
    ovs_strzcpy(stats->counters[0].name, "Offloaded port count",
485
0
                sizeof stats->counters[0].name);
486
487
0
    return 0;
488
0
}
489
490
static bool
491
dummy_can_offload(struct dpif_offload *dpif_offload OVS_UNUSED,
492
                  struct netdev *netdev)
493
0
{
494
0
    return is_dummy_netdev_class(netdev->netdev_class);
495
0
}
496
497
static void
498
dummy_offload_log_operation(const char *op, int error, const ovs_u128 *ufid)
499
0
{
500
0
    VLOG_DBG("%s to %s netdev flow "UUID_FMT,
501
0
             error == 0 ? "succeed" : "failed", op,
502
0
             UUID_ARGS((struct uuid *) ufid));
503
0
}
504
505
static struct dummy_offload_port *
506
dummy_offload_get_port_by_netdev(const struct dpif_offload *offload,
507
                                 struct netdev *netdev)
508
0
{
509
0
    struct dpif_offload_port *port;
510
511
0
    port = dpif_offload_port_mgr_find_by_netdev(offload, netdev);
512
0
    if (!port) {
513
0
        return NULL;
514
0
    }
515
0
    return dummy_offload_port_cast(port);
516
0
}
517
518
static int
519
dummy_offload_hw_post_process(const struct dpif_offload *offload_,
520
                              struct netdev *netdev, unsigned pmd_id,
521
                              struct dp_packet *packet, void **flow_reference_)
522
0
{
523
0
    struct dummy_offloaded_flow *off_flow;
524
0
    struct dummy_offload_port *port;
525
0
    void *flow_reference = NULL;
526
0
    uint32_t flow_mark;
527
528
0
    port = dummy_offload_get_port_by_netdev(offload_, netdev);
529
0
    if (!port || !dp_packet_has_flow_mark(packet, &flow_mark)) {
530
0
        *flow_reference_ = NULL;
531
0
        return 0;
532
0
    }
533
534
0
    ovs_mutex_lock(&port->port_mutex);
535
0
    HMAP_FOR_EACH (off_flow, node, &port->offloaded_flows) {
536
0
        struct pmd_id_data *pmd_data;
537
538
0
        if (flow_mark == off_flow->mark) {
539
0
            pmd_data = dummy_find_flow_pmd_data(port, off_flow, pmd_id);
540
0
            if (pmd_data) {
541
0
                flow_reference = pmd_data->flow_reference;
542
0
            }
543
0
            break;
544
0
        }
545
0
    }
546
0
    ovs_mutex_unlock(&port->port_mutex);
547
548
0
     *flow_reference_ = flow_reference;
549
0
    return 0;
550
0
}
551
552
static int
553
dummy_flow_put(const struct dpif_offload *offload_, struct netdev *netdev,
554
               struct dpif_offload_flow_put *put,
555
               void **previous_flow_reference)
556
0
{
557
0
    struct dummy_offload *offload = dummy_offload_cast(offload_);
558
0
    struct dummy_offloaded_flow *off_flow;
559
0
    struct dummy_offload_port *port;
560
0
    bool modify = true;
561
0
    int error = 0;
562
563
0
    port = dummy_offload_get_port_by_netdev(offload_, netdev);
564
0
    if (!port) {
565
0
        error = ENODEV;
566
0
        goto exit;
567
0
    }
568
569
0
    ovs_mutex_lock(&port->port_mutex);
570
571
0
    off_flow = dummy_find_offloaded_flow_and_update(
572
0
        port, put->ufid, put->pmd_id, put->flow_reference,
573
0
        previous_flow_reference);
574
575
0
    if (!off_flow) {
576
        /* Create new offloaded flow. */
577
0
        uint32_t mark = dummy_allocate_flow_mark(offload);
578
579
0
        if (mark == INVALID_FLOW_MARK) {
580
0
            error = ENOSPC;
581
0
            goto exit_unlock;
582
0
        }
583
584
0
        off_flow = dummy_add_flow(port, put->ufid, put->pmd_id,
585
0
                                  put->flow_reference, mark);
586
0
        modify = false;
587
0
        *previous_flow_reference = NULL;
588
0
    }
589
0
    memcpy(&off_flow->match, put->match, sizeof *put->match);
590
591
    /* As we have per-netdev 'offloaded_flows', we don't need to match
592
     * the 'in_port' for received packets.  This will also allow offloading
593
     * for packets passed to 'receive' command without specifying the
594
     * 'in_port'. */
595
0
    off_flow->match.wc.masks.in_port.odp_port = 0;
596
597
0
    if (VLOG_IS_DBG_ENABLED()) {
598
0
        struct ds ds = DS_EMPTY_INITIALIZER;
599
600
0
        ds_put_format(&ds, "%s: flow put[%s]: ", netdev_get_name(netdev),
601
0
                      modify ? "modify" : "create");
602
0
        odp_format_ufid(put->ufid, &ds);
603
0
        ds_put_cstr(&ds, " flow match: ");
604
0
        match_format(put->match, NULL, &ds, OFP_DEFAULT_PRIORITY);
605
0
        ds_put_format(&ds, ", mark: %"PRIu32, off_flow->mark);
606
607
0
        VLOG_DBG("%s", ds_cstr(&ds));
608
0
        ds_destroy(&ds);
609
0
    }
610
611
0
exit_unlock:
612
0
    ovs_mutex_unlock(&port->port_mutex);
613
614
0
exit:
615
0
    if (put->stats) {
616
0
        memset(put->stats, 0, sizeof *put->stats);
617
0
    }
618
619
0
    dummy_offload_log_operation(modify ? "modify" : "add", error, put->ufid);
620
0
    return error;
621
0
}
622
623
static int
624
dummy_flow_del(const struct dpif_offload *offload_, struct netdev *netdev,
625
               struct dpif_offload_flow_del *del)
626
0
{
627
0
    struct dummy_offload *offload = dummy_offload_cast(offload_);
628
0
    struct dummy_offloaded_flow *off_flow;
629
0
    uint32_t mark = INVALID_FLOW_MARK;
630
0
    struct dummy_offload_port *port;
631
0
    const char *error = NULL;
632
633
0
    port = dummy_offload_get_port_by_netdev(offload_, netdev);
634
0
    if (!port) {
635
0
        error = "No such (net)device.";
636
0
        goto exit;
637
0
    }
638
639
0
    ovs_mutex_lock(&port->port_mutex);
640
641
0
    off_flow = dummy_find_offloaded_flow(port, del->ufid);
642
0
    if (!off_flow) {
643
0
        error = "No such flow.";
644
0
        goto exit_unlock;
645
0
    }
646
647
0
    if (!dummy_del_flow_pmd_data(port, off_flow, del->pmd_id,
648
0
                                 del->flow_reference)) {
649
0
        error = "No such flow with pmd_id and reference.";
650
0
        goto exit_unlock;
651
0
    }
652
653
0
    mark = off_flow->mark;
654
0
    if (!hmap_count(&off_flow->pmd_id_map)) {
655
0
        dummy_free_flow_mark(offload, mark);
656
0
        dummy_free_flow(port, off_flow, true);
657
0
    }
658
659
0
exit_unlock:
660
0
    ovs_mutex_unlock(&port->port_mutex);
661
662
0
exit:
663
0
    if (error || VLOG_IS_DBG_ENABLED()) {
664
0
        struct ds ds = DS_EMPTY_INITIALIZER;
665
666
0
        ds_put_format(&ds, "%s: ", netdev_get_name(netdev));
667
0
        if (error) {
668
0
            ds_put_cstr(&ds, "failed to ");
669
0
        }
670
0
        ds_put_cstr(&ds, "flow del: ");
671
0
        odp_format_ufid(del->ufid, &ds);
672
0
        if (error) {
673
0
            ds_put_format(&ds, " error: %s", error);
674
0
        } else {
675
0
            ds_put_format(&ds, " mark: %"PRIu32, mark);
676
0
        }
677
0
        VLOG(error ? VLL_WARN : VLL_DBG, "%s", ds_cstr(&ds));
678
0
        ds_destroy(&ds);
679
0
    }
680
681
0
    if (del->stats) {
682
0
        memset(del->stats, 0, sizeof *del->stats);
683
0
    }
684
685
0
    dummy_offload_log_operation("delete", error ? -1 : 0, del->ufid);
686
0
    return error ? ENOENT : 0;
687
0
}
688
689
static bool
690
dummy_flow_stats(const struct dpif_offload *offload_, struct netdev *netdev,
691
                 const ovs_u128 *ufid, struct dpif_flow_stats *stats,
692
                 struct dpif_flow_attrs *attrs)
693
0
{
694
0
    struct dummy_offloaded_flow *off_flow = NULL;
695
0
    struct dummy_offload_port *port;
696
697
0
    port = dummy_offload_get_port_by_netdev(offload_, netdev);
698
0
    if (!port) {
699
0
        return false;
700
0
    }
701
702
0
    ovs_mutex_lock(&port->port_mutex);
703
0
    off_flow = dummy_find_offloaded_flow(port, ufid);
704
0
    ovs_mutex_unlock(&port->port_mutex);
705
706
0
    memset(stats, 0, sizeof *stats);
707
0
    attrs->offloaded = off_flow ? true : false;
708
0
    attrs->dp_layer = "ovs"; /* 'ovs', since this is a partial offload. */
709
0
    attrs->dp_extra_info = NULL;
710
711
0
    return off_flow ? true : false;
712
0
}
713
714
static void
715
dummy_register_flow_unreference_cb(const struct dpif_offload *offload_,
716
                                   dpif_offload_flow_unreference_cb *cb)
717
0
{
718
0
    struct dummy_offload *offload = dummy_offload_cast(offload_);
719
720
0
    offload->unreference_cb = cb;
721
0
}
722
723
static void
724
dummy_flow_unreference(struct dummy_offload *offload, unsigned pmd_id,
725
                       void *flow_reference)
726
0
{
727
0
    if (offload->unreference_cb) {
728
0
        offload->unreference_cb(pmd_id, flow_reference);
729
0
    }
730
0
}
731
732
void
733
dummy_netdev_simulate_offload(struct netdev *netdev, struct dp_packet *packet,
734
                              struct flow *flow)
735
0
{
736
0
    const struct dpif_offload *offload = ovsrcu_get(
737
0
        const struct dpif_offload *, &netdev->dpif_offload);
738
0
    struct dummy_offloaded_flow *data;
739
0
    struct dummy_offload_port *port;
740
0
    struct flow packet_flow;
741
742
0
    if (!offload || strcmp(dpif_offload_type(offload), "dummy")) {
743
0
        return;
744
0
    }
745
746
0
    port = dummy_offload_get_port_by_netdev(offload, netdev);
747
0
    if (!port) {
748
0
        return;
749
0
    }
750
751
0
    if (!flow) {
752
0
        flow = &packet_flow;
753
0
        flow_extract(packet, flow);
754
0
    }
755
756
0
    ovs_mutex_lock(&port->port_mutex);
757
0
    HMAP_FOR_EACH (data, node, &port->offloaded_flows) {
758
0
        if (flow_equal_except(flow, &data->match.flow, &data->match.wc)) {
759
760
0
            dp_packet_set_flow_mark(packet, data->mark);
761
762
0
            if (VLOG_IS_DBG_ENABLED()) {
763
0
                struct ds ds = DS_EMPTY_INITIALIZER;
764
765
0
                ds_put_format(&ds, "%s: packet: ",
766
0
                              netdev_get_name(netdev));
767
                /* 'flow' does not contain proper port number here.
768
                 * Let's just clear it as it's wildcarded anyway. */
769
0
                flow->in_port.ofp_port = 0;
770
0
                flow_format(&ds, flow, NULL);
771
772
0
                ds_put_cstr(&ds, " matches with flow: ");
773
0
                odp_format_ufid(&data->ufid, &ds);
774
0
                ds_put_cstr(&ds, " ");
775
0
                match_format(&data->match, NULL, &ds, OFP_DEFAULT_PRIORITY);
776
0
                ds_put_format(&ds, " with mark: %"PRIu32, data->mark);
777
778
0
                VLOG_DBG("%s", ds_cstr(&ds));
779
0
                ds_destroy(&ds);
780
0
            }
781
0
            break;
782
0
        }
783
0
    }
784
0
    ovs_mutex_unlock(&port->port_mutex);
785
0
}
786
787
#define DEFINE_DPIF_DUMMY_CLASS(NAME, TYPE_STR)                             \
788
    struct dpif_offload_class NAME = {                                      \
789
        .type = TYPE_STR,                                                   \
790
        .impl_type = DPIF_OFFLOAD_IMPL_FLOWS_DPIF_SYNCED,                   \
791
        .supported_dpif_types = (const char *const[]) {"dummy", NULL},      \
792
        .open = dummy_offload_open,                                         \
793
        .close = dummy_offload_close,                                       \
794
        .set_config = dummy_offload_set_config,                             \
795
        .get_debug = dummy_offload_get_debug,                               \
796
        .get_global_stats = dummy_offload_get_global_stats,                 \
797
        .can_offload = dummy_can_offload,                                   \
798
        .port_add = dummy_offload_port_add,                                 \
799
        .port_del = dummy_offload_port_del,                                 \
800
        .get_netdev = dummy_offload_get_netdev,                             \
801
        .netdev_hw_post_process = dummy_offload_hw_post_process,            \
802
        .netdev_flow_put = dummy_flow_put,                                  \
803
        .netdev_flow_del = dummy_flow_del,                                  \
804
        .netdev_flow_stats = dummy_flow_stats,                              \
805
        .register_flow_unreference_cb = dummy_register_flow_unreference_cb, \
806
}
807
808
DEFINE_DPIF_DUMMY_CLASS(dpif_offload_dummy_class, "dummy");
809
DEFINE_DPIF_DUMMY_CLASS(dpif_offload_dummy_x_class, "dummy_x");