Coverage Report

Created: 2025-10-09 06:33

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/openvswitch/lib/dpif.c
Line
Count
Source
1
/*
2
 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at:
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16
17
#include <config.h>
18
#include "dpif-provider.h"
19
20
#include <ctype.h>
21
#include <errno.h>
22
#include <inttypes.h>
23
#include <stdlib.h>
24
#include <string.h>
25
26
#include "coverage.h"
27
#include "dp-packet.h"
28
#include "dpctl.h"
29
#include "dpif-netdev.h"
30
#include "flow.h"
31
#include "netdev-offload.h"
32
#include "netdev-provider.h"
33
#include "netdev.h"
34
#include "netlink.h"
35
#include "odp-execute.h"
36
#include "odp-util.h"
37
#include "packets.h"
38
#include "route-table.h"
39
#include "seq.h"
40
#include "sset.h"
41
#include "timeval.h"
42
#include "tnl-neigh-cache.h"
43
#include "tnl-ports.h"
44
#include "util.h"
45
#include "uuid.h"
46
#include "valgrind.h"
47
#include "openvswitch/dynamic-string.h"
48
#include "openvswitch/ofp-errors.h"
49
#include "openvswitch/ofp-print.h"
50
#include "openvswitch/ofpbuf.h"
51
#include "openvswitch/poll-loop.h"
52
#include "openvswitch/shash.h"
53
#include "openvswitch/usdt-probes.h"
54
#include "openvswitch/vlog.h"
55
56
VLOG_DEFINE_THIS_MODULE(dpif);
57
58
COVERAGE_DEFINE(dpif_destroy);
59
COVERAGE_DEFINE(dpif_execute);
60
COVERAGE_DEFINE(dpif_execute_error);
61
COVERAGE_DEFINE(dpif_execute_with_help);
62
COVERAGE_DEFINE(dpif_flow_del);
63
COVERAGE_DEFINE(dpif_flow_del_error);
64
COVERAGE_DEFINE(dpif_flow_flush);
65
COVERAGE_DEFINE(dpif_flow_get);
66
COVERAGE_DEFINE(dpif_flow_get_error);
67
COVERAGE_DEFINE(dpif_flow_put);
68
COVERAGE_DEFINE(dpif_flow_put_error);
69
COVERAGE_DEFINE(dpif_meter_del);
70
COVERAGE_DEFINE(dpif_meter_get);
71
COVERAGE_DEFINE(dpif_meter_set);
72
COVERAGE_DEFINE(dpif_port_add);
73
COVERAGE_DEFINE(dpif_port_del);
74
COVERAGE_DEFINE(dpif_purge);
75
76
static const struct dpif_class *base_dpif_classes[] = {
77
#if defined(__linux__) || defined(_WIN32)
78
    &dpif_netlink_class,
79
#endif
80
    &dpif_netdev_class,
81
};
82
83
struct registered_dpif_class {
84
    const struct dpif_class *dpif_class;
85
    int refcount;
86
};
87
static struct shash dpif_classes = SHASH_INITIALIZER(&dpif_classes);
88
static struct sset dpif_disallowed = SSET_INITIALIZER(&dpif_disallowed);
89
90
/* Protects 'dpif_classes', including the refcount, and 'dpif_disallowed'. */
91
static struct ovs_mutex dpif_mutex = OVS_MUTEX_INITIALIZER;
92
93
/* Rate limit for individual messages going to or from the datapath, output at
94
 * DBG level.  This is very high because, if these are enabled, it is because
95
 * we really need to see them. */
96
static struct vlog_rate_limit dpmsg_rl = VLOG_RATE_LIMIT_INIT(600, 600);
97
98
/* Not really much point in logging many dpif errors. */
99
static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(60, 5);
100
101
static void log_operation(const struct dpif *, const char *operation,
102
                          int error);
103
static bool should_log_flow_message(const struct vlog_module *module,
104
                                    int error);
105
106
/* Incremented whenever tnl route, arp, etc changes. */
107
struct seq *tnl_conf_seq;
108
109
static bool
110
dpif_is_tap_port(const char *type)
111
0
{
112
0
    return !strcmp(type, "tap");
113
0
}
114
115
static void
116
dp_initialize(void)
117
0
{
118
0
    static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
119
120
0
    if (ovsthread_once_start(&once)) {
121
0
        int i;
122
123
0
        tnl_conf_seq = seq_create();
124
0
        dpctl_unixctl_register();
125
0
        tnl_port_map_init();
126
0
        tnl_neigh_cache_init();
127
0
        route_table_init();
128
129
0
        for (i = 0; i < ARRAY_SIZE(base_dpif_classes); i++) {
130
0
            dp_register_provider(base_dpif_classes[i]);
131
0
        }
132
133
0
        ovsthread_once_done(&once);
134
0
    }
135
0
}
136
137
static int
138
dp_register_provider__(const struct dpif_class *new_class)
139
0
{
140
0
    struct registered_dpif_class *registered_class;
141
0
    int error;
142
143
0
    if (sset_contains(&dpif_disallowed, new_class->type)) {
144
0
        VLOG_DBG("attempted to register disallowed provider: %s",
145
0
                 new_class->type);
146
0
        return EINVAL;
147
0
    }
148
149
0
    if (shash_find(&dpif_classes, new_class->type)) {
150
0
        VLOG_WARN("attempted to register duplicate datapath provider: %s",
151
0
                  new_class->type);
152
0
        return EEXIST;
153
0
    }
154
155
0
    error = new_class->init ? new_class->init() : 0;
156
0
    if (error) {
157
0
        VLOG_WARN("failed to initialize %s datapath class: %s",
158
0
                  new_class->type, ovs_strerror(error));
159
0
        return error;
160
0
    }
161
162
0
    registered_class = xmalloc(sizeof *registered_class);
163
0
    registered_class->dpif_class = new_class;
164
0
    registered_class->refcount = 0;
165
166
0
    shash_add(&dpif_classes, new_class->type, registered_class);
167
168
0
    return 0;
169
0
}
170
171
/* Registers a new datapath provider.  After successful registration, new
172
 * datapaths of that type can be opened using dpif_open(). */
173
int
174
dp_register_provider(const struct dpif_class *new_class)
175
0
{
176
0
    int error;
177
178
0
    ovs_mutex_lock(&dpif_mutex);
179
0
    error = dp_register_provider__(new_class);
180
0
    ovs_mutex_unlock(&dpif_mutex);
181
182
0
    return error;
183
0
}
184
185
/* Unregisters a datapath provider.  'type' must have been previously
186
 * registered and not currently be in use by any dpifs.  After unregistration
187
 * new datapaths of that type cannot be opened using dpif_open(). */
188
static int
189
dp_unregister_provider__(const char *type)
190
0
{
191
0
    struct shash_node *node;
192
0
    struct registered_dpif_class *registered_class;
193
194
0
    node = shash_find(&dpif_classes, type);
195
0
    if (!node) {
196
0
        return EAFNOSUPPORT;
197
0
    }
198
199
0
    registered_class = node->data;
200
0
    if (registered_class->refcount) {
201
0
        VLOG_WARN("attempted to unregister in use datapath provider: %s", type);
202
0
        return EBUSY;
203
0
    }
204
205
0
    shash_delete(&dpif_classes, node);
206
0
    free(registered_class);
207
208
0
    return 0;
209
0
}
210
211
/* Unregisters a datapath provider.  'type' must have been previously
212
 * registered and not currently be in use by any dpifs.  After unregistration
213
 * new datapaths of that type cannot be opened using dpif_open(). */
214
int
215
dp_unregister_provider(const char *type)
216
0
{
217
0
    int error;
218
219
0
    dp_initialize();
220
221
0
    ovs_mutex_lock(&dpif_mutex);
222
0
    error = dp_unregister_provider__(type);
223
0
    ovs_mutex_unlock(&dpif_mutex);
224
225
0
    return error;
226
0
}
227
228
/* Disallows a provider.  Causes future calls of dp_register_provider() with
229
 * a dpif_class which implements 'type' to fail. */
230
void
231
dp_disallow_provider(const char *type)
232
0
{
233
0
    ovs_mutex_lock(&dpif_mutex);
234
0
    sset_add(&dpif_disallowed, type);
235
0
    ovs_mutex_unlock(&dpif_mutex);
236
0
}
237
238
/* Adds the types of all currently registered datapath providers to 'types'.
239
 * The caller must first initialize the sset. */
240
void
241
dp_enumerate_types(struct sset *types)
242
0
{
243
0
    struct shash_node *node;
244
245
0
    dp_initialize();
246
247
0
    ovs_mutex_lock(&dpif_mutex);
248
0
    SHASH_FOR_EACH(node, &dpif_classes) {
249
0
        const struct registered_dpif_class *registered_class = node->data;
250
0
        sset_add(types, registered_class->dpif_class->type);
251
0
    }
252
0
    ovs_mutex_unlock(&dpif_mutex);
253
0
}
254
255
static void
256
dp_class_unref(struct registered_dpif_class *rc)
257
0
{
258
0
    ovs_mutex_lock(&dpif_mutex);
259
0
    ovs_assert(rc->refcount);
260
0
    rc->refcount--;
261
0
    ovs_mutex_unlock(&dpif_mutex);
262
0
}
263
264
static struct registered_dpif_class *
265
dp_class_lookup(const char *type)
266
0
{
267
0
    struct registered_dpif_class *rc;
268
269
0
    ovs_mutex_lock(&dpif_mutex);
270
0
    rc = shash_find_data(&dpif_classes, type);
271
0
    if (rc) {
272
0
        rc->refcount++;
273
0
    }
274
0
    ovs_mutex_unlock(&dpif_mutex);
275
276
0
    return rc;
277
0
}
278
279
/* Clears 'names' and enumerates the names of all known created datapaths with
280
 * the given 'type'.  The caller must first initialize the sset.  Returns 0 if
281
 * successful, otherwise a positive errno value.
282
 *
283
 * Some kinds of datapaths might not be practically enumerable.  This is not
284
 * considered an error. */
285
int
286
dp_enumerate_names(const char *type, struct sset *names)
287
0
{
288
0
    struct registered_dpif_class *registered_class;
289
0
    const struct dpif_class *dpif_class;
290
0
    int error;
291
292
0
    dp_initialize();
293
0
    sset_clear(names);
294
295
0
    registered_class = dp_class_lookup(type);
296
0
    if (!registered_class) {
297
0
        VLOG_WARN("could not enumerate unknown type: %s", type);
298
0
        return EAFNOSUPPORT;
299
0
    }
300
301
0
    dpif_class = registered_class->dpif_class;
302
0
    error = (dpif_class->enumerate
303
0
             ? dpif_class->enumerate(names, dpif_class)
304
0
             : 0);
305
0
    if (error) {
306
0
        VLOG_WARN("failed to enumerate %s datapaths: %s", dpif_class->type,
307
0
                   ovs_strerror(error));
308
0
    }
309
0
    dp_class_unref(registered_class);
310
311
0
    return error;
312
0
}
313
314
/* Parses 'datapath_name_', which is of the form [type@]name into its
315
 * component pieces.  'name' and 'type' must be freed by the caller.
316
 *
317
 * The returned 'type' is normalized, as if by dpif_normalize_type(). */
318
void
319
dp_parse_name(const char *datapath_name_, char **name, char **type)
320
0
{
321
0
    char *datapath_name = xstrdup(datapath_name_);
322
0
    char *separator;
323
324
0
    separator = strchr(datapath_name, '@');
325
0
    if (separator) {
326
0
        *separator = '\0';
327
0
        *type = datapath_name;
328
0
        *name = xstrdup(dpif_normalize_type(separator + 1));
329
0
    } else {
330
0
        *name = datapath_name;
331
0
        *type = xstrdup(dpif_normalize_type(NULL));
332
0
    }
333
0
}
334
335
static int
336
do_open(const char *name, const char *type, bool create, struct dpif **dpifp)
337
0
{
338
0
    struct dpif *dpif = NULL;
339
0
    int error;
340
0
    struct registered_dpif_class *registered_class;
341
342
0
    dp_initialize();
343
344
0
    type = dpif_normalize_type(type);
345
0
    registered_class = dp_class_lookup(type);
346
0
    if (!registered_class) {
347
0
        VLOG_WARN("could not create datapath %s of unknown type %s", name,
348
0
                  type);
349
0
        error = EAFNOSUPPORT;
350
0
        goto exit;
351
0
    }
352
353
0
    error = registered_class->dpif_class->open(registered_class->dpif_class,
354
0
                                               name, create, &dpif);
355
0
    if (!error) {
356
0
        const char *dpif_type_str = dpif_normalize_type(dpif_type(dpif));
357
0
        struct dpif_port_dump port_dump;
358
0
        struct dpif_port dpif_port;
359
360
0
        ovs_assert(dpif->dpif_class == registered_class->dpif_class);
361
362
0
        DPIF_PORT_FOR_EACH(&dpif_port, &port_dump, dpif) {
363
0
            struct netdev *netdev;
364
0
            int err;
365
366
0
            if (dpif_is_tap_port(dpif_port.type)) {
367
0
                continue;
368
0
            }
369
370
0
            err = netdev_open(dpif_port.name, dpif_port.type, &netdev);
371
372
0
            if (!err) {
373
0
                netdev_set_dpif_type(netdev, dpif_type_str);
374
0
                netdev_ports_insert(netdev, &dpif_port);
375
0
                netdev_close(netdev);
376
0
            } else {
377
0
                VLOG_WARN("could not open netdev %s type %s: %s",
378
0
                          dpif_port.name, dpif_port.type, ovs_strerror(err));
379
0
            }
380
0
        }
381
0
    } else {
382
0
        dp_class_unref(registered_class);
383
0
    }
384
385
0
exit:
386
0
    *dpifp = error ? NULL : dpif;
387
0
    return error;
388
0
}
389
390
/* Tries to open an existing datapath named 'name' and type 'type'.  Will fail
391
 * if no datapath with 'name' and 'type' exists.  'type' may be either NULL or
392
 * the empty string to specify the default system type.  Returns 0 if
393
 * successful, otherwise a positive errno value.  On success stores a pointer
394
 * to the datapath in '*dpifp', otherwise a null pointer. */
395
int
396
dpif_open(const char *name, const char *type, struct dpif **dpifp)
397
0
{
398
0
    return do_open(name, type, false, dpifp);
399
0
}
400
401
/* Tries to create and open a new datapath with the given 'name' and 'type'.
402
 * 'type' may be either NULL or the empty string to specify the default system
403
 * type.  Will fail if a datapath with 'name' and 'type' already exists.
404
 * Returns 0 if successful, otherwise a positive errno value.  On success
405
 * stores a pointer to the datapath in '*dpifp', otherwise a null pointer. */
406
int
407
dpif_create(const char *name, const char *type, struct dpif **dpifp)
408
0
{
409
0
    return do_open(name, type, true, dpifp);
410
0
}
411
412
/* Tries to open a datapath with the given 'name' and 'type', creating it if it
413
 * does not exist.  'type' may be either NULL or the empty string to specify
414
 * the default system type.  Returns 0 if successful, otherwise a positive
415
 * errno value. On success stores a pointer to the datapath in '*dpifp',
416
 * otherwise a null pointer. */
417
int
418
dpif_create_and_open(const char *name, const char *type, struct dpif **dpifp)
419
0
{
420
0
    int error;
421
422
0
    error = dpif_create(name, type, dpifp);
423
0
    if (error == EEXIST || error == EBUSY) {
424
0
        error = dpif_open(name, type, dpifp);
425
0
        if (error) {
426
0
            VLOG_WARN("datapath %s already exists but cannot be opened: %s",
427
0
                      name, ovs_strerror(error));
428
0
        }
429
0
    } else if (error) {
430
0
        VLOG_WARN("failed to create datapath %s: %s",
431
0
                  name, ovs_strerror(error));
432
0
    }
433
0
    return error;
434
0
}
435
436
static void
437
0
dpif_remove_netdev_ports(struct dpif *dpif) {
438
0
    const char *dpif_type_str = dpif_normalize_type(dpif_type(dpif));
439
0
    struct dpif_port_dump port_dump;
440
0
    struct dpif_port dpif_port;
441
442
0
    DPIF_PORT_FOR_EACH (&dpif_port, &port_dump, dpif) {
443
0
        if (!dpif_is_tap_port(dpif_port.type)) {
444
0
            netdev_ports_remove(dpif_port.port_no, dpif_type_str);
445
0
        }
446
0
    }
447
0
}
448
449
/* Closes and frees the connection to 'dpif'.  Does not destroy the datapath
450
 * itself; call dpif_delete() first, instead, if that is desirable. */
451
void
452
dpif_close(struct dpif *dpif)
453
0
{
454
0
    if (dpif) {
455
0
        struct registered_dpif_class *rc;
456
457
0
        rc = shash_find_data(&dpif_classes, dpif->dpif_class->type);
458
459
0
        if (rc->refcount == 1) {
460
0
            dpif_remove_netdev_ports(dpif);
461
0
        }
462
0
        dpif_uninit(dpif, true);
463
0
        dp_class_unref(rc);
464
0
    }
465
0
}
466
467
/* Performs periodic work needed by 'dpif'. */
468
bool
469
dpif_run(struct dpif *dpif)
470
0
{
471
0
    if (dpif->dpif_class->run) {
472
0
        return dpif->dpif_class->run(dpif);
473
0
    }
474
0
    return false;
475
0
}
476
477
/* Arranges for poll_block() to wake up when dp_run() needs to be called for
478
 * 'dpif'. */
479
void
480
dpif_wait(struct dpif *dpif)
481
0
{
482
0
    if (dpif->dpif_class->wait) {
483
0
        dpif->dpif_class->wait(dpif);
484
0
    }
485
0
}
486
487
/* Returns the name of datapath 'dpif' prefixed with the type
488
 * (for use in log messages). */
489
const char *
490
dpif_name(const struct dpif *dpif)
491
0
{
492
0
    return dpif->full_name;
493
0
}
494
495
/* Returns the name of datapath 'dpif' without the type
496
 * (for use in device names). */
497
const char *
498
dpif_base_name(const struct dpif *dpif)
499
0
{
500
0
    return dpif->base_name;
501
0
}
502
503
/* Returns the type of datapath 'dpif'. */
504
const char *
505
dpif_type(const struct dpif *dpif)
506
0
{
507
0
    return dpif->dpif_class->type;
508
0
}
509
510
/* Checks if datapath 'dpif' requires cleanup. */
511
bool
512
dpif_cleanup_required(const struct dpif *dpif)
513
0
{
514
0
    return dpif->dpif_class->cleanup_required;
515
0
}
516
517
/* Returns the fully spelled out name for the given datapath 'type'.
518
 *
519
 * Normalized type string can be compared with strcmp().  Unnormalized type
520
 * string might be the same even if they have different spellings. */
521
const char *
522
dpif_normalize_type(const char *type)
523
0
{
524
0
    return type && type[0] ? type : "system";
525
0
}
526
527
/* Destroys the datapath that 'dpif' is connected to, first removing all of its
528
 * ports.  After calling this function, it does not make sense to pass 'dpif'
529
 * to any functions other than dpif_name() or dpif_close(). */
530
int
531
dpif_delete(struct dpif *dpif)
532
0
{
533
0
    int error;
534
535
0
    COVERAGE_INC(dpif_destroy);
536
537
0
    error = dpif->dpif_class->destroy(dpif);
538
0
    log_operation(dpif, "delete", error);
539
0
    return error;
540
0
}
541
542
/* Retrieves statistics for 'dpif' into 'stats'.  Returns 0 if successful,
543
 * otherwise a positive errno value. */
544
int
545
dpif_get_dp_stats(const struct dpif *dpif, struct dpif_dp_stats *stats)
546
0
{
547
0
    int error = dpif->dpif_class->get_stats(dpif, stats);
548
0
    if (error) {
549
0
        memset(stats, 0, sizeof *stats);
550
0
    }
551
0
    log_operation(dpif, "get_stats", error);
552
0
    return error;
553
0
}
554
555
int
556
dpif_set_features(struct dpif *dpif, uint32_t new_features)
557
0
{
558
0
    int error = dpif->dpif_class->set_features(dpif, new_features);
559
560
0
    log_operation(dpif, "set_features", error);
561
0
    return error;
562
0
}
563
564
const char *
565
dpif_port_open_type(const char *datapath_type, const char *port_type)
566
0
{
567
0
    struct registered_dpif_class *rc;
568
569
0
    datapath_type = dpif_normalize_type(datapath_type);
570
571
0
    ovs_mutex_lock(&dpif_mutex);
572
0
    rc = shash_find_data(&dpif_classes, datapath_type);
573
0
    if (rc && rc->dpif_class->port_open_type) {
574
0
        port_type = rc->dpif_class->port_open_type(rc->dpif_class, port_type);
575
0
    }
576
0
    ovs_mutex_unlock(&dpif_mutex);
577
578
0
    return port_type;
579
0
}
580
581
/* Attempts to add 'netdev' as a port on 'dpif'.  If 'port_nop' is
582
 * non-null and its value is not ODPP_NONE, then attempts to use the
583
 * value as the port number.
584
 *
585
 * If successful, returns 0 and sets '*port_nop' to the new port's port
586
 * number (if 'port_nop' is non-null).  On failure, returns a positive
587
 * errno value and sets '*port_nop' to ODPP_NONE (if 'port_nop' is
588
 * non-null). */
589
int
590
dpif_port_add(struct dpif *dpif, struct netdev *netdev, odp_port_t *port_nop)
591
0
{
592
0
    const char *netdev_name = netdev_get_name(netdev);
593
0
    odp_port_t port_no = ODPP_NONE;
594
0
    int error;
595
596
0
    COVERAGE_INC(dpif_port_add);
597
598
0
    if (port_nop) {
599
0
        port_no = *port_nop;
600
0
    }
601
602
0
    error = dpif->dpif_class->port_add(dpif, netdev, &port_no);
603
0
    if (!error) {
604
0
        VLOG_DBG_RL(&dpmsg_rl, "%s: added %s as port %"PRIu32,
605
0
                    dpif_name(dpif), netdev_name, port_no);
606
607
0
        if (!dpif_is_tap_port(netdev_get_type(netdev))) {
608
609
0
            const char *dpif_type_str = dpif_normalize_type(dpif_type(dpif));
610
0
            struct dpif_port dpif_port;
611
612
0
            netdev_set_dpif_type(netdev, dpif_type_str);
613
614
0
            dpif_port.type = CONST_CAST(char *, netdev_get_type(netdev));
615
0
            dpif_port.name = CONST_CAST(char *, netdev_name);
616
0
            dpif_port.port_no = port_no;
617
0
            netdev_ports_insert(netdev, &dpif_port);
618
0
        }
619
0
    } else {
620
0
        VLOG_WARN_RL(&error_rl, "%s: failed to add %s as port: %s",
621
0
                     dpif_name(dpif), netdev_name, ovs_strerror(error));
622
0
        port_no = ODPP_NONE;
623
0
    }
624
0
    if (port_nop) {
625
0
        *port_nop = port_no;
626
0
    }
627
0
    return error;
628
0
}
629
630
/* Attempts to remove 'dpif''s port number 'port_no'.  Returns 0 if successful,
631
 * otherwise a positive errno value. */
632
int
633
dpif_port_del(struct dpif *dpif, odp_port_t port_no, bool local_delete)
634
0
{
635
0
    int error = 0;
636
637
0
    COVERAGE_INC(dpif_port_del);
638
639
0
    if (!local_delete) {
640
0
        error = dpif->dpif_class->port_del(dpif, port_no);
641
0
        if (!error) {
642
0
            VLOG_DBG_RL(&dpmsg_rl, "%s: port_del(%"PRIu32")",
643
0
                        dpif_name(dpif), port_no);
644
0
        } else {
645
0
            log_operation(dpif, "port_del", error);
646
0
        }
647
0
    }
648
649
0
    netdev_ports_remove(port_no, dpif_normalize_type(dpif_type(dpif)));
650
0
    return error;
651
0
}
652
653
/* Makes a deep copy of 'src' into 'dst'. */
654
void
655
dpif_port_clone(struct dpif_port *dst, const struct dpif_port *src)
656
0
{
657
0
    dst->name = xstrdup(src->name);
658
0
    dst->type = xstrdup(src->type);
659
0
    dst->port_no = src->port_no;
660
0
}
661
662
/* Frees memory allocated to members of 'dpif_port'.
663
 *
664
 * Do not call this function on a dpif_port obtained from
665
 * dpif_port_dump_next(): that function retains ownership of the data in the
666
 * dpif_port. */
667
void
668
dpif_port_destroy(struct dpif_port *dpif_port)
669
0
{
670
0
    free(dpif_port->name);
671
0
    free(dpif_port->type);
672
0
}
673
674
/* Checks if port named 'devname' exists in 'dpif'.  If so, returns
675
 * true; otherwise, returns false. */
676
bool
677
dpif_port_exists(const struct dpif *dpif, const char *devname)
678
0
{
679
0
    int error = dpif->dpif_class->port_query_by_name(dpif, devname, NULL);
680
0
    if (error != 0 && error != ENODEV) {
681
0
        VLOG_WARN_RL(&error_rl, "%s: failed to query port %s: %s",
682
0
                     dpif_name(dpif), devname, ovs_strerror(error));
683
0
    }
684
685
0
    return !error;
686
0
}
687
688
/* Refreshes configuration of 'dpif's port. */
689
int
690
dpif_port_set_config(struct dpif *dpif, odp_port_t port_no,
691
                     const struct smap *cfg)
692
0
{
693
0
    int error = 0;
694
695
0
    if (dpif->dpif_class->port_set_config) {
696
0
        error = dpif->dpif_class->port_set_config(dpif, port_no, cfg);
697
0
        if (error) {
698
0
            log_operation(dpif, "port_set_config", error);
699
0
        }
700
0
    }
701
702
0
    return error;
703
0
}
704
705
/* Looks up port number 'port_no' in 'dpif'.  On success, returns 0 and
706
 * initializes '*port' appropriately; on failure, returns a positive errno
707
 * value.
708
 *
709
 * Retuns ENODEV if the port doesn't exist.  Will not log a warning in this
710
 * case unless 'warn_if_not_found' is true.
711
 *
712
 * The caller owns the data in 'port' and must free it with
713
 * dpif_port_destroy() when it is no longer needed. */
714
int
715
dpif_port_query_by_number(const struct dpif *dpif, odp_port_t port_no,
716
                          struct dpif_port *port, bool warn_if_not_found)
717
0
{
718
0
    int error = dpif->dpif_class->port_query_by_number(dpif, port_no, port);
719
0
    if (!error) {
720
0
        VLOG_DBG_RL(&dpmsg_rl, "%s: port %"PRIu32" is device %s",
721
0
                    dpif_name(dpif), port_no, port->name);
722
0
    } else {
723
0
        memset(port, 0, sizeof *port);
724
0
        if (error == ENODEV && !warn_if_not_found) {
725
0
            VLOG_DBG_RL(&dpmsg_rl, "%s: failed to query port %"PRIu32": %s",
726
0
                        dpif_name(dpif), port_no, ovs_strerror(error));
727
0
        } else {
728
0
            VLOG_WARN_RL(&error_rl, "%s: failed to query port %"PRIu32": %s",
729
0
                         dpif_name(dpif), port_no, ovs_strerror(error));
730
0
        }
731
0
    }
732
0
    return error;
733
0
}
734
735
/* Looks up port named 'devname' in 'dpif'.  On success, returns 0 and
736
 * initializes '*port' appropriately; on failure, returns a positive errno
737
 * value.
738
 *
739
 * Retuns ENODEV if the port doesn't exist.
740
 *
741
 * The caller owns the data in 'port' and must free it with
742
 * dpif_port_destroy() when it is no longer needed. */
743
int
744
dpif_port_query_by_name(const struct dpif *dpif, const char *devname,
745
                        struct dpif_port *port)
746
0
{
747
0
    int error = dpif->dpif_class->port_query_by_name(dpif, devname, port);
748
0
    if (!error) {
749
0
        VLOG_DBG_RL(&dpmsg_rl, "%s: device %s is on port %"PRIu32,
750
0
                    dpif_name(dpif), devname, port->port_no);
751
0
    } else {
752
0
        memset(port, 0, sizeof *port);
753
754
        /* For ENODEV we use DBG level because the caller is probably
755
         * interested in whether 'dpif' actually has a port 'devname', so that
756
         * it's not an issue worth logging if it doesn't.  Other errors are
757
         * uncommon and more likely to indicate a real problem. */
758
0
        VLOG_RL(&error_rl, error == ENODEV ? VLL_DBG : VLL_WARN,
759
0
                "%s: failed to query port %s: %s",
760
0
                dpif_name(dpif), devname, ovs_strerror(error));
761
0
    }
762
0
    return error;
763
0
}
764
765
/* Returns the Netlink PID value to supply in OVS_ACTION_ATTR_USERSPACE
766
 * actions as the OVS_USERSPACE_ATTR_PID attribute's value, for use in
767
 * flows whose packets arrived on port 'port_no'.
768
 *
769
 * A 'port_no' of ODPP_NONE is a special case: it returns a reserved PID, not
770
 * allocated to any port, that the client may use for special purposes.
771
 *
772
 * The return value is only meaningful when DPIF_UC_ACTION has been enabled in
773
 * the 'dpif''s listen mask.  It is allowed to change when DPIF_UC_ACTION is
774
 * disabled and then re-enabled, so a client that does that must be prepared to
775
 * update all of the flows that it installed that contain
776
 * OVS_ACTION_ATTR_USERSPACE actions. */
777
uint32_t
778
dpif_port_get_pid(const struct dpif *dpif, odp_port_t port_no)
779
0
{
780
0
    return (dpif->dpif_class->port_get_pid
781
0
            ? (dpif->dpif_class->port_get_pid)(dpif, port_no)
782
0
            : 0);
783
0
}
784
785
/* Looks up port number 'port_no' in 'dpif'.  On success, returns 0 and copies
786
 * the port's name into the 'name_size' bytes in 'name', ensuring that the
787
 * result is null-terminated.  On failure, returns a positive errno value and
788
 * makes 'name' the empty string. */
789
int
790
dpif_port_get_name(struct dpif *dpif, odp_port_t port_no,
791
                   char *name, size_t name_size)
792
0
{
793
0
    struct dpif_port port;
794
0
    int error;
795
796
0
    ovs_assert(name_size > 0);
797
798
0
    error = dpif_port_query_by_number(dpif, port_no, &port, true);
799
0
    if (!error) {
800
0
        ovs_strlcpy(name, port.name, name_size);
801
0
        dpif_port_destroy(&port);
802
0
    } else {
803
0
        *name = '\0';
804
0
    }
805
0
    return error;
806
0
}
807
808
/* Initializes 'dump' to begin dumping the ports in a dpif.
809
 *
810
 * This function provides no status indication.  An error status for the entire
811
 * dump operation is provided when it is completed by calling
812
 * dpif_port_dump_done().
813
 */
814
void
815
dpif_port_dump_start(struct dpif_port_dump *dump, const struct dpif *dpif)
816
0
{
817
0
    dump->dpif = dpif;
818
0
    dump->error = dpif->dpif_class->port_dump_start(dpif, &dump->state);
819
0
    log_operation(dpif, "port_dump_start", dump->error);
820
0
}
821
822
/* Attempts to retrieve another port from 'dump', which must have been
823
 * initialized with dpif_port_dump_start().  On success, stores a new dpif_port
824
 * into 'port' and returns true.  On failure, returns false.
825
 *
826
 * Failure might indicate an actual error or merely that the last port has been
827
 * dumped.  An error status for the entire dump operation is provided when it
828
 * is completed by calling dpif_port_dump_done().
829
 *
830
 * The dpif owns the data stored in 'port'.  It will remain valid until at
831
 * least the next time 'dump' is passed to dpif_port_dump_next() or
832
 * dpif_port_dump_done(). */
833
bool
834
dpif_port_dump_next(struct dpif_port_dump *dump, struct dpif_port *port)
835
0
{
836
0
    const struct dpif *dpif = dump->dpif;
837
838
0
    if (dump->error) {
839
0
        return false;
840
0
    }
841
842
0
    dump->error = dpif->dpif_class->port_dump_next(dpif, dump->state, port);
843
0
    if (dump->error == EOF) {
844
0
        VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all ports", dpif_name(dpif));
845
0
    } else {
846
0
        log_operation(dpif, "port_dump_next", dump->error);
847
0
    }
848
849
0
    if (dump->error) {
850
0
        dpif->dpif_class->port_dump_done(dpif, dump->state);
851
0
        return false;
852
0
    }
853
0
    return true;
854
0
}
855
856
/* Completes port table dump operation 'dump', which must have been initialized
857
 * with dpif_port_dump_start().  Returns 0 if the dump operation was
858
 * error-free, otherwise a positive errno value describing the problem. */
859
int
860
dpif_port_dump_done(struct dpif_port_dump *dump)
861
0
{
862
0
    const struct dpif *dpif = dump->dpif;
863
0
    if (!dump->error) {
864
0
        dump->error = dpif->dpif_class->port_dump_done(dpif, dump->state);
865
0
        log_operation(dpif, "port_dump_done", dump->error);
866
0
    }
867
0
    return dump->error == EOF ? 0 : dump->error;
868
0
}
869
870
/* Polls for changes in the set of ports in 'dpif'.  If the set of ports in
871
 * 'dpif' has changed, this function does one of the following:
872
 *
873
 * - Stores the name of the device that was added to or deleted from 'dpif' in
874
 *   '*devnamep' and returns 0.  The caller is responsible for freeing
875
 *   '*devnamep' (with free()) when it no longer needs it.
876
 *
877
 * - Returns ENOBUFS and sets '*devnamep' to NULL.
878
 *
879
 * This function may also return 'false positives', where it returns 0 and
880
 * '*devnamep' names a device that was not actually added or deleted or it
881
 * returns ENOBUFS without any change.
882
 *
883
 * Returns EAGAIN if the set of ports in 'dpif' has not changed.  May also
884
 * return other positive errno values to indicate that something has gone
885
 * wrong. */
886
int
887
dpif_port_poll(const struct dpif *dpif, char **devnamep)
888
0
{
889
0
    int error = dpif->dpif_class->port_poll(dpif, devnamep);
890
0
    if (error) {
891
0
        *devnamep = NULL;
892
0
    }
893
0
    return error;
894
0
}
895
896
/* Arranges for the poll loop to wake up when port_poll(dpif) will return a
897
 * value other than EAGAIN. */
898
void
899
dpif_port_poll_wait(const struct dpif *dpif)
900
0
{
901
0
    dpif->dpif_class->port_poll_wait(dpif);
902
0
}
903
904
/* Extracts the flow stats for a packet.  The 'flow' and 'packet'
905
 * arguments must have been initialized through a call to flow_extract().
906
 * 'used' is stored into stats->used. */
907
void
908
dpif_flow_stats_extract(const struct flow *flow, const struct dp_packet *packet,
909
                        long long int used, struct dpif_flow_stats *stats)
910
0
{
911
0
    stats->tcp_flags = ntohs(flow->tcp_flags);
912
0
    stats->n_bytes = dp_packet_size(packet);
913
0
    stats->n_packets = 1;
914
0
    stats->used = used;
915
0
}
916
917
/* Appends a human-readable representation of 'stats' to 's'. */
918
void
919
dpif_flow_stats_format(const struct dpif_flow_stats *stats, struct ds *s)
920
0
{
921
0
    ds_put_format(s, "packets:%"PRIu64", bytes:%"PRIu64", used:",
922
0
                  stats->n_packets, stats->n_bytes);
923
0
    if (stats->used) {
924
0
        ds_put_format(s, "%.3fs", (time_msec() - stats->used) / 1000.0);
925
0
    } else {
926
0
        ds_put_format(s, "never");
927
0
    }
928
0
    if (stats->tcp_flags) {
929
0
        ds_put_cstr(s, ", flags:");
930
0
        packet_format_tcp_flags(s, stats->tcp_flags);
931
0
    }
932
0
}
933
934
/* Deletes all flows from 'dpif'.  Returns 0 if successful, otherwise a
935
 * positive errno value.  */
936
int
937
dpif_flow_flush(struct dpif *dpif)
938
0
{
939
0
    int error;
940
941
0
    COVERAGE_INC(dpif_flow_flush);
942
943
0
    error = dpif->dpif_class->flow_flush(dpif);
944
0
    log_operation(dpif, "flow_flush", error);
945
0
    return error;
946
0
}
947
948
/* Attempts to install 'key' into the datapath, fetches it, then deletes it.
949
 * Returns true if the datapath supported installing 'flow', false otherwise.
950
 */
951
bool
952
dpif_probe_feature(struct dpif *dpif, const char *name,
953
                   const struct ofpbuf *key, const struct ofpbuf *actions,
954
                   const ovs_u128 *ufid)
955
0
{
956
0
    struct dpif_flow flow;
957
0
    struct ofpbuf reply;
958
0
    uint64_t stub[DPIF_FLOW_BUFSIZE / 8];
959
0
    bool enable_feature = false;
960
0
    int error;
961
0
    const struct nlattr *nl_actions = actions ? actions->data : NULL;
962
0
    const size_t nl_actions_size = actions ? actions->size : 0;
963
964
    /* Use DPIF_FP_MODIFY to cover the case where ovs-vswitchd is killed (and
965
     * restarted) at just the right time such that feature probes from the
966
     * previous run are still present in the datapath. */
967
0
    error = dpif_flow_put(dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY | DPIF_FP_PROBE,
968
0
                          key->data, key->size, NULL, 0,
969
0
                          nl_actions, nl_actions_size,
970
0
                          ufid, NON_PMD_CORE_ID, NULL);
971
0
    if (error) {
972
0
        if (error != EINVAL && error != EOVERFLOW) {
973
0
            VLOG_WARN("%s: %s flow probe failed (%s)",
974
0
                      dpif_name(dpif), name, ovs_strerror(error));
975
0
        }
976
0
        return false;
977
0
    }
978
979
0
    ofpbuf_use_stack(&reply, &stub, sizeof stub);
980
0
    error = dpif_flow_get(dpif, key->data, key->size, ufid,
981
0
                          NON_PMD_CORE_ID, &reply, &flow);
982
0
    if (!error
983
0
        && (!ufid || (flow.ufid_present
984
0
                      && ovs_u128_equals(*ufid, flow.ufid)))) {
985
0
        enable_feature = true;
986
0
    }
987
988
0
    error = dpif_flow_del(dpif, key->data, key->size, ufid,
989
0
                          NON_PMD_CORE_ID, NULL);
990
0
    if (error) {
991
0
        VLOG_WARN("%s: failed to delete %s feature probe flow",
992
0
                  dpif_name(dpif), name);
993
0
    }
994
995
0
    return enable_feature;
996
0
}
997
998
/* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_GET. */
999
int
1000
dpif_flow_get(struct dpif *dpif,
1001
              const struct nlattr *key, size_t key_len, const ovs_u128 *ufid,
1002
              const unsigned pmd_id, struct ofpbuf *buf, struct dpif_flow *flow)
1003
0
{
1004
0
    struct dpif_op *opp;
1005
0
    struct dpif_op op;
1006
1007
0
    op.type = DPIF_OP_FLOW_GET;
1008
0
    op.flow_get.key = key;
1009
0
    op.flow_get.key_len = key_len;
1010
0
    op.flow_get.ufid = ufid;
1011
0
    op.flow_get.pmd_id = pmd_id;
1012
0
    op.flow_get.buffer = buf;
1013
1014
0
    memset(flow, 0, sizeof *flow);
1015
0
    op.flow_get.flow = flow;
1016
0
    op.flow_get.flow->key = key;
1017
0
    op.flow_get.flow->key_len = key_len;
1018
1019
0
    opp = &op;
1020
0
    dpif_operate(dpif, &opp, 1, DPIF_OFFLOAD_AUTO);
1021
1022
0
    return op.error;
1023
0
}
1024
1025
/* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_PUT. */
1026
int
1027
dpif_flow_put(struct dpif *dpif, enum dpif_flow_put_flags flags,
1028
              const struct nlattr *key, size_t key_len,
1029
              const struct nlattr *mask, size_t mask_len,
1030
              const struct nlattr *actions, size_t actions_len,
1031
              const ovs_u128 *ufid, const unsigned pmd_id,
1032
              struct dpif_flow_stats *stats)
1033
0
{
1034
0
    struct dpif_op *opp;
1035
0
    struct dpif_op op;
1036
1037
0
    op.type = DPIF_OP_FLOW_PUT;
1038
0
    op.flow_put.flags = flags;
1039
0
    op.flow_put.key = key;
1040
0
    op.flow_put.key_len = key_len;
1041
0
    op.flow_put.mask = mask;
1042
0
    op.flow_put.mask_len = mask_len;
1043
0
    op.flow_put.actions = actions;
1044
0
    op.flow_put.actions_len = actions_len;
1045
0
    op.flow_put.ufid = ufid;
1046
0
    op.flow_put.pmd_id = pmd_id;
1047
0
    op.flow_put.stats = stats;
1048
1049
0
    opp = &op;
1050
0
    dpif_operate(dpif, &opp, 1, DPIF_OFFLOAD_AUTO);
1051
1052
0
    return op.error;
1053
0
}
1054
1055
/* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_DEL. */
1056
int
1057
dpif_flow_del(struct dpif *dpif,
1058
              const struct nlattr *key, size_t key_len, const ovs_u128 *ufid,
1059
              const unsigned pmd_id, struct dpif_flow_stats *stats)
1060
0
{
1061
0
    struct dpif_op *opp;
1062
0
    struct dpif_op op;
1063
1064
0
    op.type = DPIF_OP_FLOW_DEL;
1065
0
    op.flow_del.key = key;
1066
0
    op.flow_del.key_len = key_len;
1067
0
    op.flow_del.ufid = ufid;
1068
0
    op.flow_del.pmd_id = pmd_id;
1069
0
    op.flow_del.stats = stats;
1070
0
    op.flow_del.terse = false;
1071
1072
0
    opp = &op;
1073
0
    dpif_operate(dpif, &opp, 1, DPIF_OFFLOAD_AUTO);
1074
1075
0
    return op.error;
1076
0
}
1077
1078
/* Creates and returns a new 'struct dpif_flow_dump' for iterating through the
1079
 * flows in 'dpif'. If 'terse' is true, then only UFID and statistics will
1080
 * be returned in the dump. Otherwise, all fields will be returned.
1081
 *
1082
 * This function always successfully returns a dpif_flow_dump.  Error
1083
 * reporting is deferred to dpif_flow_dump_destroy(). */
1084
struct dpif_flow_dump *
1085
dpif_flow_dump_create(const struct dpif *dpif, bool terse,
1086
                      struct dpif_flow_dump_types *types)
1087
0
{
1088
0
    return dpif->dpif_class->flow_dump_create(dpif, terse, types);
1089
0
}
1090
1091
/* Destroys 'dump', which must have been created with dpif_flow_dump_create().
1092
 * All dpif_flow_dump_thread structures previously created for 'dump' must
1093
 * previously have been destroyed.
1094
 *
1095
 * Returns 0 if the dump operation was error-free, otherwise a positive errno
1096
 * value describing the problem. */
1097
int
1098
dpif_flow_dump_destroy(struct dpif_flow_dump *dump)
1099
0
{
1100
0
    const struct dpif *dpif = dump->dpif;
1101
0
    int error = dpif->dpif_class->flow_dump_destroy(dump);
1102
0
    log_operation(dpif, "flow_dump_destroy", error);
1103
0
    return error == EOF ? 0 : error;
1104
0
}
1105
1106
/* Returns new thread-local state for use with dpif_flow_dump_next(). */
1107
struct dpif_flow_dump_thread *
1108
dpif_flow_dump_thread_create(struct dpif_flow_dump *dump)
1109
0
{
1110
0
    return dump->dpif->dpif_class->flow_dump_thread_create(dump);
1111
0
}
1112
1113
/* Releases 'thread'. */
1114
void
1115
dpif_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread)
1116
0
{
1117
0
    thread->dpif->dpif_class->flow_dump_thread_destroy(thread);
1118
0
}
1119
1120
/* Attempts to retrieve up to 'max_flows' more flows from 'thread'.  Returns 0
1121
 * if and only if no flows remained to be retrieved, otherwise a positive
1122
 * number reflecting the number of elements in 'flows[]' that were updated.
1123
 * The number of flows returned might be less than 'max_flows' because
1124
 * fewer than 'max_flows' remained, because this particular datapath does not
1125
 * benefit from batching, or because an error occurred partway through
1126
 * retrieval.  Thus, the caller should continue calling until a 0 return value,
1127
 * even if intermediate return values are less than 'max_flows'.
1128
 *
1129
 * No error status is immediately provided.  An error status for the entire
1130
 * dump operation is provided when it is completed by calling
1131
 * dpif_flow_dump_destroy().
1132
 *
1133
 * All of the data stored into 'flows' is owned by the datapath, not by the
1134
 * caller, and the caller must not modify or free it.  The datapath guarantees
1135
 * that it remains accessible and unchanged until the first of:
1136
 *  - The next call to dpif_flow_dump_next() for 'thread', or
1137
 *  - The next rcu quiescent period. */
1138
int
1139
dpif_flow_dump_next(struct dpif_flow_dump_thread *thread,
1140
                    struct dpif_flow *flows, int max_flows)
1141
0
{
1142
0
    struct dpif *dpif = thread->dpif;
1143
0
    int n;
1144
1145
0
    ovs_assert(max_flows > 0);
1146
0
    n = dpif->dpif_class->flow_dump_next(thread, flows, max_flows);
1147
0
    if (n > 0) {
1148
0
        struct dpif_flow *f;
1149
1150
0
        for (f = flows; f < &flows[n]
1151
0
             && should_log_flow_message(&this_module, 0); f++) {
1152
0
            log_flow_message(dpif, 0, &this_module, "flow_dump",
1153
0
                             f->key, f->key_len, f->mask, f->mask_len,
1154
0
                             &f->ufid, &f->stats, f->actions, f->actions_len);
1155
0
        }
1156
0
    } else {
1157
0
        VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all flows", dpif_name(dpif));
1158
0
    }
1159
0
    return n;
1160
0
}
1161
1162
struct dpif_execute_helper_aux {
1163
    struct dpif *dpif;
1164
    const struct flow *flow;
1165
    int error;
1166
    struct ofpbuf meter_actions;
1167
};
1168
1169
/* This is called for actions that need the context of the datapath to be
1170
 * meaningful. */
1171
static void
1172
dpif_execute_helper_cb(void *aux_, struct dp_packet_batch *packets_,
1173
                       const struct nlattr *action, bool should_steal)
1174
0
{
1175
0
    struct dpif_execute_helper_aux *aux = aux_;
1176
0
    int type = nl_attr_type(action);
1177
0
    struct dp_packet *packet = packets_->packets[0];
1178
1179
0
    ovs_assert(dp_packet_batch_size(packets_) == 1);
1180
1181
0
    switch ((enum ovs_action_attr)type) {
1182
0
    case OVS_ACTION_ATTR_METER:
1183
        /* XXX: This code collects meter actions since the last action
1184
         * execution via the datapath to be executed right before the
1185
         * current action that needs to be executed by the datapath.
1186
         * This is only an approximation, but better than nothing.
1187
         * Fundamentally, we should have a mechanism by which the
1188
         * datapath could return the result of the meter action so that
1189
         * we could execute them at the right order. */
1190
0
        ofpbuf_put(&aux->meter_actions, action, NLA_ALIGN(action->nla_len));
1191
0
        break;
1192
1193
0
    case OVS_ACTION_ATTR_CT:
1194
0
    case OVS_ACTION_ATTR_OUTPUT:
1195
0
    case OVS_ACTION_ATTR_LB_OUTPUT:
1196
0
    case OVS_ACTION_ATTR_TUNNEL_PUSH:
1197
0
    case OVS_ACTION_ATTR_TUNNEL_POP:
1198
0
    case OVS_ACTION_ATTR_USERSPACE:
1199
0
    case OVS_ACTION_ATTR_PSAMPLE:
1200
0
    case OVS_ACTION_ATTR_SAMPLE:
1201
0
    case OVS_ACTION_ATTR_RECIRC: {
1202
0
        struct dpif_execute execute;
1203
0
        struct pkt_metadata *md = &packet->md;
1204
1205
0
        if (flow_tnl_dst_is_set(&md->tunnel) || aux->meter_actions.size) {
1206
0
            struct ofpbuf *execute_actions = &aux->meter_actions;
1207
1208
            /* The Linux kernel datapath throws away the tunnel information
1209
             * that we supply as metadata.  We have to use a "set" action to
1210
             * supply it. */
1211
0
            if (flow_tnl_dst_is_set(&md->tunnel)) {
1212
0
                odp_put_tunnel_action(&md->tunnel, execute_actions, NULL);
1213
0
            }
1214
0
            ofpbuf_put(execute_actions, action, NLA_ALIGN(action->nla_len));
1215
1216
0
            execute.actions = execute_actions->data;
1217
0
            execute.actions_len = execute_actions->size;
1218
0
        } else {
1219
0
            execute.actions = action;
1220
0
            execute.actions_len = NLA_ALIGN(action->nla_len);
1221
0
        }
1222
1223
0
        struct dp_packet *clone = NULL;
1224
0
        uint32_t cutlen = dp_packet_get_cutlen(packet);
1225
0
        if (cutlen && (type == OVS_ACTION_ATTR_OUTPUT
1226
0
                        || type == OVS_ACTION_ATTR_LB_OUTPUT
1227
0
                        || type == OVS_ACTION_ATTR_TUNNEL_PUSH
1228
0
                        || type == OVS_ACTION_ATTR_TUNNEL_POP
1229
0
                        || type == OVS_ACTION_ATTR_USERSPACE)) {
1230
0
            dp_packet_reset_cutlen(packet);
1231
0
            if (!should_steal) {
1232
0
                packet = clone = dp_packet_clone(packet);
1233
0
            }
1234
0
            dp_packet_set_size(packet, dp_packet_size(packet) - cutlen);
1235
0
        }
1236
1237
0
        execute.packet = packet;
1238
0
        execute.flow = aux->flow;
1239
0
        execute.needs_help = false;
1240
0
        execute.probe = false;
1241
0
        execute.mtu = 0;
1242
0
        execute.hash = 0;
1243
0
        aux->error = dpif_execute(aux->dpif, &execute);
1244
0
        log_execute_message(aux->dpif, &this_module, &execute,
1245
0
                            true, aux->error);
1246
1247
0
        dp_packet_delete(clone);
1248
1249
        /* Clear the 'aux->meter_actions' ofpbuf as it could have been
1250
         * used for sending the additional meter and/or tunnel actions. */
1251
0
        ofpbuf_clear(&aux->meter_actions);
1252
0
        break;
1253
0
    }
1254
1255
0
    case OVS_ACTION_ATTR_HASH:
1256
0
    case OVS_ACTION_ATTR_PUSH_VLAN:
1257
0
    case OVS_ACTION_ATTR_POP_VLAN:
1258
0
    case OVS_ACTION_ATTR_PUSH_MPLS:
1259
0
    case OVS_ACTION_ATTR_POP_MPLS:
1260
0
    case OVS_ACTION_ATTR_SET:
1261
0
    case OVS_ACTION_ATTR_SET_MASKED:
1262
0
    case OVS_ACTION_ATTR_TRUNC:
1263
0
    case OVS_ACTION_ATTR_PUSH_ETH:
1264
0
    case OVS_ACTION_ATTR_POP_ETH:
1265
0
    case OVS_ACTION_ATTR_CLONE:
1266
0
    case OVS_ACTION_ATTR_PUSH_NSH:
1267
0
    case OVS_ACTION_ATTR_POP_NSH:
1268
0
    case OVS_ACTION_ATTR_CT_CLEAR:
1269
0
    case OVS_ACTION_ATTR_UNSPEC:
1270
0
    case OVS_ACTION_ATTR_CHECK_PKT_LEN:
1271
0
    case OVS_ACTION_ATTR_DROP:
1272
0
    case OVS_ACTION_ATTR_ADD_MPLS:
1273
0
    case OVS_ACTION_ATTR_DEC_TTL:
1274
0
    case __OVS_ACTION_ATTR_MAX:
1275
0
        OVS_NOT_REACHED();
1276
0
    }
1277
0
    dp_packet_delete_batch(packets_, should_steal);
1278
0
}
1279
1280
/* Executes 'execute' by performing most of the actions in userspace and
1281
 * passing the fully constructed packets to 'dpif' for output and userspace
1282
 * actions.
1283
 *
1284
 * This helps with actions that a given 'dpif' doesn't implement directly. */
1285
static int
1286
dpif_execute_with_help(struct dpif *dpif, struct dpif_execute *execute)
1287
0
{
1288
0
    struct dpif_execute_helper_aux aux = {
1289
0
        .dpif = dpif,
1290
0
        .flow = execute->flow,
1291
0
        .error = 0,
1292
0
    };
1293
0
    struct dp_packet_batch pb;
1294
1295
0
    COVERAGE_INC(dpif_execute_with_help);
1296
1297
0
    ofpbuf_init(&aux.meter_actions, 0);
1298
0
    dp_packet_batch_init_packet(&pb, execute->packet);
1299
0
    odp_execute_actions(&aux, &pb, false, execute->actions,
1300
0
                        execute->actions_len, dpif_execute_helper_cb);
1301
0
    ofpbuf_uninit(&aux.meter_actions);
1302
0
    return aux.error;
1303
0
}
1304
1305
/* Returns true if the datapath needs help executing 'execute'. */
1306
static bool
1307
dpif_execute_needs_help(const struct dpif_execute *execute)
1308
0
{
1309
0
    return execute->needs_help || nl_attr_oversized(execute->actions_len);
1310
0
}
1311
1312
/* A dpif_operate() wrapper for performing a single DPIF_OP_EXECUTE. */
1313
int
1314
dpif_execute(struct dpif *dpif, struct dpif_execute *execute)
1315
0
{
1316
0
    if (execute->actions_len) {
1317
0
        struct dpif_op *opp;
1318
0
        struct dpif_op op;
1319
1320
0
        op.type = DPIF_OP_EXECUTE;
1321
0
        op.execute = *execute;
1322
1323
0
        opp = &op;
1324
0
        dpif_operate(dpif, &opp, 1, DPIF_OFFLOAD_AUTO);
1325
1326
0
        return op.error;
1327
0
    } else {
1328
0
        return 0;
1329
0
    }
1330
0
}
1331
1332
/* Executes each of the 'n_ops' operations in 'ops' on 'dpif', in the order in
1333
 * which they are specified.  Places each operation's results in the "output"
1334
 * members documented in comments, and 0 in the 'error' member on success or a
1335
 * positive errno on failure.
1336
 */
1337
void
1338
dpif_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops,
1339
             enum dpif_offload_type offload_type)
1340
0
{
1341
0
    if (offload_type == DPIF_OFFLOAD_ALWAYS && !netdev_is_flow_api_enabled()) {
1342
0
        size_t i;
1343
0
        for (i = 0; i < n_ops; i++) {
1344
0
            struct dpif_op *op = ops[i];
1345
0
            op->error = EINVAL;
1346
0
        }
1347
0
        return;
1348
0
    }
1349
1350
0
    while (n_ops > 0) {
1351
0
        size_t chunk;
1352
1353
        /* Count 'chunk', the number of ops that can be executed without
1354
         * needing any help.  Ops that need help should be rare, so we
1355
         * expect this to ordinarily be 'n_ops', that is, all the ops. */
1356
0
        for (chunk = 0; chunk < n_ops; chunk++) {
1357
0
            struct dpif_op *op = ops[chunk];
1358
1359
0
            if (op->type == DPIF_OP_EXECUTE
1360
0
                && dpif_execute_needs_help(&op->execute)) {
1361
0
                break;
1362
0
            }
1363
0
        }
1364
1365
0
        if (chunk) {
1366
            /* Execute a chunk full of ops that the dpif provider can
1367
             * handle itself, without help. */
1368
0
            size_t i;
1369
1370
0
            dpif->dpif_class->operate(dpif, ops, chunk, offload_type);
1371
1372
0
            for (i = 0; i < chunk; i++) {
1373
0
                struct dpif_op *op = ops[i];
1374
0
                int error = op->error;
1375
1376
0
                switch (op->type) {
1377
0
                case DPIF_OP_FLOW_PUT: {
1378
0
                    struct dpif_flow_put *put = &op->flow_put;
1379
1380
0
                    COVERAGE_INC(dpif_flow_put);
1381
0
                    log_flow_put_message(dpif, &this_module, put, error);
1382
0
                    if (error) {
1383
0
                        COVERAGE_INC(dpif_flow_put_error);
1384
0
                        if (put->stats) {
1385
0
                            memset(put->stats, 0, sizeof *put->stats);
1386
0
                        }
1387
0
                    }
1388
0
                    break;
1389
0
                }
1390
1391
0
                case DPIF_OP_FLOW_GET: {
1392
0
                    struct dpif_flow_get *get = &op->flow_get;
1393
1394
0
                    COVERAGE_INC(dpif_flow_get);
1395
0
                    if (error) {
1396
0
                        COVERAGE_INC(dpif_flow_get_error);
1397
0
                        memset(get->flow, 0, sizeof *get->flow);
1398
0
                    }
1399
0
                    log_flow_get_message(dpif, &this_module, get, error);
1400
0
                    break;
1401
0
                }
1402
1403
0
                case DPIF_OP_FLOW_DEL: {
1404
0
                    struct dpif_flow_del *del = &op->flow_del;
1405
1406
0
                    COVERAGE_INC(dpif_flow_del);
1407
0
                    log_flow_del_message(dpif, &this_module, del, error);
1408
0
                    if (error) {
1409
0
                        COVERAGE_INC(dpif_flow_del_error);
1410
0
                        if (del->stats) {
1411
0
                            memset(del->stats, 0, sizeof *del->stats);
1412
0
                        }
1413
0
                    }
1414
0
                    break;
1415
0
                }
1416
1417
0
                case DPIF_OP_EXECUTE:
1418
0
                    COVERAGE_INC(dpif_execute);
1419
0
                    log_execute_message(dpif, &this_module, &op->execute,
1420
0
                                        false, error);
1421
0
                    if (error) {
1422
0
                        COVERAGE_INC(dpif_execute_error);
1423
0
                    }
1424
0
                    break;
1425
0
                }
1426
0
            }
1427
1428
0
            ops += chunk;
1429
0
            n_ops -= chunk;
1430
0
        } else {
1431
            /* Help the dpif provider to execute one op. */
1432
0
            struct dpif_op *op = ops[0];
1433
1434
0
            COVERAGE_INC(dpif_execute);
1435
0
            op->error = dpif_execute_with_help(dpif, &op->execute);
1436
0
            ops++;
1437
0
            n_ops--;
1438
0
        }
1439
0
    }
1440
0
}
1441
1442
int dpif_offload_stats_get(struct dpif *dpif,
1443
                           struct netdev_custom_stats *stats)
1444
0
{
1445
0
    return (dpif->dpif_class->offload_stats_get
1446
0
            ? dpif->dpif_class->offload_stats_get(dpif, stats)
1447
0
            : EOPNOTSUPP);
1448
0
}
1449
1450
/* Returns a string that represents 'type', for use in log messages. */
1451
const char *
1452
dpif_upcall_type_to_string(enum dpif_upcall_type type)
1453
0
{
1454
0
    switch (type) {
1455
0
    case DPIF_UC_MISS: return "miss";
1456
0
    case DPIF_UC_ACTION: return "action";
1457
0
    case DPIF_N_UC_TYPES: default: return "<unknown>";
1458
0
    }
1459
0
}
1460
1461
/* Enables or disables receiving packets with dpif_recv() on 'dpif'.  Returns 0
1462
 * if successful, otherwise a positive errno value.
1463
 *
1464
 * Turning packet receive off and then back on may change the Netlink PID
1465
 * assignments returned by dpif_port_get_pid().  If the client does this, it
1466
 * must update all of the flows that have OVS_ACTION_ATTR_USERSPACE actions
1467
 * using the new PID assignment. */
1468
int
1469
dpif_recv_set(struct dpif *dpif, bool enable)
1470
0
{
1471
0
    int error = 0;
1472
1473
0
    if (dpif->dpif_class->recv_set) {
1474
0
        error = dpif->dpif_class->recv_set(dpif, enable);
1475
0
        log_operation(dpif, "recv_set", error);
1476
0
    }
1477
0
    return error;
1478
0
}
1479
1480
/* Refreshes the poll loops and Netlink sockets associated to each port,
1481
 * when the number of upcall handlers (upcall receiving thread) is changed
1482
 * to 'n_handlers' and receiving packets for 'dpif' is enabled by
1483
 * recv_set().
1484
 *
1485
 * Since multiple upcall handlers can read upcalls simultaneously from
1486
 * 'dpif', each port can have multiple Netlink sockets, one per upcall
1487
 * handler.  So, handlers_set() is responsible for the following tasks:
1488
 *
1489
 *    When receiving upcall is enabled, extends or creates the
1490
 *    configuration to support:
1491
 *
1492
 *        - 'n_handlers' Netlink sockets for each port.
1493
 *
1494
 *        - 'n_handlers' poll loops, one for each upcall handler.
1495
 *
1496
 *        - registering the Netlink sockets for the same upcall handler to
1497
 *          the corresponding poll loop.
1498
 *
1499
 * Returns 0 if successful, otherwise a positive errno value. */
1500
int
1501
dpif_handlers_set(struct dpif *dpif, uint32_t n_handlers)
1502
0
{
1503
0
    int error = 0;
1504
1505
0
    if (dpif->dpif_class->handlers_set) {
1506
0
        error = dpif->dpif_class->handlers_set(dpif, n_handlers);
1507
0
        log_operation(dpif, "handlers_set", error);
1508
0
    }
1509
0
    return error;
1510
0
}
1511
1512
/* Checks if a certain number of handlers are required.
1513
 *
1514
 * If a certain number of handlers are required, returns 'true' and sets
1515
 * 'n_handlers' to that number of handler threads.
1516
 *
1517
 * If not, returns 'false'
1518
 */
1519
bool
1520
dpif_number_handlers_required(struct dpif *dpif, uint32_t *n_handlers)
1521
0
{
1522
0
    if (dpif->dpif_class->number_handlers_required) {
1523
0
        return dpif->dpif_class->number_handlers_required(dpif, n_handlers);
1524
0
    }
1525
0
    return false;
1526
0
}
1527
1528
void
1529
dpif_register_dp_purge_cb(struct dpif *dpif, dp_purge_callback *cb, void *aux)
1530
0
{
1531
0
    if (dpif->dpif_class->register_dp_purge_cb) {
1532
0
        dpif->dpif_class->register_dp_purge_cb(dpif, cb, aux);
1533
0
    }
1534
0
}
1535
1536
void
1537
dpif_register_upcall_cb(struct dpif *dpif, upcall_callback *cb, void *aux)
1538
0
{
1539
0
    if (dpif->dpif_class->register_upcall_cb) {
1540
0
        dpif->dpif_class->register_upcall_cb(dpif, cb, aux);
1541
0
    }
1542
0
}
1543
1544
void
1545
dpif_enable_upcall(struct dpif *dpif)
1546
0
{
1547
0
    if (dpif->dpif_class->enable_upcall) {
1548
0
        dpif->dpif_class->enable_upcall(dpif);
1549
0
    }
1550
0
}
1551
1552
void
1553
dpif_disable_upcall(struct dpif *dpif)
1554
0
{
1555
0
    if (dpif->dpif_class->disable_upcall) {
1556
0
        dpif->dpif_class->disable_upcall(dpif);
1557
0
    }
1558
0
}
1559
1560
void
1561
dpif_print_packet(struct dpif *dpif, struct dpif_upcall *upcall)
1562
0
{
1563
0
    if (!VLOG_DROP_DBG(&dpmsg_rl)) {
1564
0
        struct ds flow;
1565
0
        char *packet;
1566
1567
0
        packet = ofp_dp_packet_to_string(&upcall->packet);
1568
1569
0
        ds_init(&flow);
1570
0
        odp_flow_key_format(upcall->key, upcall->key_len, &flow);
1571
1572
0
        VLOG_DBG("%s: %s upcall:\n%s\n%s",
1573
0
                 dpif_name(dpif), dpif_upcall_type_to_string(upcall->type),
1574
0
                 ds_cstr(&flow), packet);
1575
1576
0
        ds_destroy(&flow);
1577
0
        free(packet);
1578
0
    }
1579
0
}
1580
1581
/* Pass custom configuration to the datapath implementation.  Some of the
1582
 * changes can be postponed until dpif_run() is called. */
1583
int
1584
dpif_set_config(struct dpif *dpif, const struct smap *cfg)
1585
0
{
1586
0
    int error = 0;
1587
1588
0
    if (dpif->dpif_class->set_config) {
1589
0
        error = dpif->dpif_class->set_config(dpif, cfg);
1590
0
        if (error) {
1591
0
            log_operation(dpif, "set_config", error);
1592
0
        }
1593
0
    }
1594
1595
0
    return error;
1596
0
}
1597
1598
/* Polls for an upcall from 'dpif' for an upcall handler.  Since there can
1599
 * be multiple poll loops, 'handler_id' is needed as index to identify the
1600
 * corresponding poll loop.  If successful, stores the upcall into '*upcall',
1601
 * using 'buf' for storage.  Should only be called if 'recv_set' has been used
1602
 * to enable receiving packets from 'dpif'.
1603
 *
1604
 * 'upcall->key' and 'upcall->userdata' point into data in the caller-provided
1605
 * 'buf', so their memory cannot be freed separately from 'buf'.
1606
 *
1607
 * The caller owns the data of 'upcall->packet' and may modify it.  If
1608
 * packet's headroom is exhausted as it is manipulated, 'upcall->packet'
1609
 * will be reallocated.  This requires the data of 'upcall->packet' to be
1610
 * released with ofpbuf_uninit() before 'upcall' is destroyed.  However,
1611
 * when an error is returned, the 'upcall->packet' may be uninitialized
1612
 * and should not be released.
1613
 *
1614
 * Returns 0 if successful, otherwise a positive errno value.  Returns EAGAIN
1615
 * if no upcall is immediately available. */
1616
int
1617
dpif_recv(struct dpif *dpif, uint32_t handler_id, struct dpif_upcall *upcall,
1618
          struct ofpbuf *buf)
1619
0
{
1620
0
    int error = EAGAIN;
1621
1622
0
    if (dpif->dpif_class->recv) {
1623
0
        error = dpif->dpif_class->recv(dpif, handler_id, upcall, buf);
1624
0
        if (!error) {
1625
0
            OVS_USDT_PROBE(dpif_recv, recv_upcall, dpif->full_name,
1626
0
                           upcall->type,
1627
0
                           dp_packet_data(&upcall->packet),
1628
0
                           dp_packet_size(&upcall->packet),
1629
0
                           upcall->key, upcall->key_len);
1630
1631
0
            dpif_print_packet(dpif, upcall);
1632
0
        } else if (error != EAGAIN) {
1633
0
            log_operation(dpif, "recv", error);
1634
0
        }
1635
0
    }
1636
0
    return error;
1637
0
}
1638
1639
/* Discards all messages that would otherwise be received by dpif_recv() on
1640
 * 'dpif'. */
1641
void
1642
dpif_recv_purge(struct dpif *dpif)
1643
0
{
1644
0
    COVERAGE_INC(dpif_purge);
1645
0
    if (dpif->dpif_class->recv_purge) {
1646
0
        dpif->dpif_class->recv_purge(dpif);
1647
0
    }
1648
0
}
1649
1650
/* Arranges for the poll loop for an upcall handler to wake up when 'dpif'
1651
 * 'dpif' has a message queued to be received with the recv member
1652
 * function.  Since there can be multiple poll loops, 'handler_id' is
1653
 * needed as index to identify the corresponding poll loop. */
1654
void
1655
dpif_recv_wait(struct dpif *dpif, uint32_t handler_id)
1656
0
{
1657
0
    if (dpif->dpif_class->recv_wait) {
1658
0
        dpif->dpif_class->recv_wait(dpif, handler_id);
1659
0
    }
1660
0
}
1661
1662
/*
1663
 * Return the datapath version. Caller is responsible for freeing
1664
 * the string.
1665
 */
1666
char *
1667
dpif_get_dp_version(const struct dpif *dpif)
1668
0
{
1669
0
    char *version = NULL;
1670
1671
0
    if (dpif->dpif_class->get_datapath_version) {
1672
0
        version = dpif->dpif_class->get_datapath_version();
1673
0
    }
1674
1675
0
    return version;
1676
0
}
1677
1678
/* Obtains the NetFlow engine type and engine ID for 'dpif' into '*engine_type'
1679
 * and '*engine_id', respectively. */
1680
void
1681
dpif_get_netflow_ids(const struct dpif *dpif,
1682
                     uint8_t *engine_type, uint8_t *engine_id)
1683
0
{
1684
0
    *engine_type = dpif->netflow_engine_type;
1685
0
    *engine_id = dpif->netflow_engine_id;
1686
0
}
1687
1688
/* Translates OpenFlow queue ID 'queue_id' (in host byte order) into a priority
1689
 * value used for setting packet priority.
1690
 * On success, returns 0 and stores the priority into '*priority'.
1691
 * On failure, returns a positive errno value and stores 0 into '*priority'. */
1692
int
1693
dpif_queue_to_priority(const struct dpif *dpif, uint32_t queue_id,
1694
                       uint32_t *priority)
1695
0
{
1696
0
    int error = (dpif->dpif_class->queue_to_priority
1697
0
                 ? dpif->dpif_class->queue_to_priority(dpif, queue_id,
1698
0
                                                       priority)
1699
0
                 : EOPNOTSUPP);
1700
0
    if (error) {
1701
0
        *priority = 0;
1702
0
    }
1703
0
    log_operation(dpif, "queue_to_priority", error);
1704
0
    return error;
1705
0
}
1706

1707
void
1708
dpif_init(struct dpif *dpif, const struct dpif_class *dpif_class,
1709
          const char *name,
1710
          uint8_t netflow_engine_type, uint8_t netflow_engine_id)
1711
0
{
1712
0
    dpif->dpif_class = dpif_class;
1713
0
    dpif->base_name = xstrdup(name);
1714
0
    dpif->full_name = xasprintf("%s@%s", dpif_class->type, name);
1715
0
    dpif->netflow_engine_type = netflow_engine_type;
1716
0
    dpif->netflow_engine_id = netflow_engine_id;
1717
0
}
1718
1719
/* Undoes the results of initialization.
1720
 *
1721
 * Normally this function only needs to be called from dpif_close().
1722
 * However, it may be called by providers due to an error on opening
1723
 * that occurs after initialization.  It this case dpif_close() would
1724
 * never be called. */
1725
void
1726
dpif_uninit(struct dpif *dpif, bool close)
1727
0
{
1728
0
    char *base_name = dpif->base_name;
1729
0
    char *full_name = dpif->full_name;
1730
1731
0
    if (close) {
1732
0
        dpif->dpif_class->close(dpif);
1733
0
    }
1734
1735
0
    free(base_name);
1736
0
    free(full_name);
1737
0
}
1738

1739
static void
1740
log_operation(const struct dpif *dpif, const char *operation, int error)
1741
0
{
1742
0
    if (!error) {
1743
0
        VLOG_DBG_RL(&dpmsg_rl, "%s: %s success", dpif_name(dpif), operation);
1744
0
    } else if (ofperr_is_valid(error)) {
1745
0
        VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
1746
0
                     dpif_name(dpif), operation, ofperr_get_name(error));
1747
0
    } else {
1748
0
        VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
1749
0
                     dpif_name(dpif), operation, ovs_strerror(error));
1750
0
    }
1751
0
}
1752
1753
static enum vlog_level
1754
flow_message_log_level(int error)
1755
0
{
1756
    /* If flows arrive in a batch, userspace may push down multiple
1757
     * unique flow definitions that overlap when wildcards are applied.
1758
     * Kernels that support flow wildcarding will reject these flows as
1759
     * duplicates (EEXIST), so lower the log level to debug for these
1760
     * types of messages. */
1761
0
    return (error && error != EEXIST) ? VLL_WARN : VLL_DBG;
1762
0
}
1763
1764
static bool
1765
should_log_flow_message(const struct vlog_module *module, int error)
1766
0
{
1767
0
    return !vlog_should_drop(module, flow_message_log_level(error),
1768
0
                             error ? &error_rl : &dpmsg_rl);
1769
0
}
1770
1771
void
1772
log_flow_message(const struct dpif *dpif, int error,
1773
                 const struct vlog_module *module,
1774
                 const char *operation,
1775
                 const struct nlattr *key, size_t key_len,
1776
                 const struct nlattr *mask, size_t mask_len,
1777
                 const ovs_u128 *ufid, const struct dpif_flow_stats *stats,
1778
                 const struct nlattr *actions, size_t actions_len)
1779
0
{
1780
0
    struct ds ds = DS_EMPTY_INITIALIZER;
1781
0
    ds_put_format(&ds, "%s: ", dpif_name(dpif));
1782
0
    if (error) {
1783
0
        ds_put_cstr(&ds, "failed to ");
1784
0
    }
1785
0
    ds_put_format(&ds, "%s ", operation);
1786
0
    if (error) {
1787
0
        ds_put_format(&ds, "(%s) ", ovs_strerror(error));
1788
0
    }
1789
0
    if (ufid) {
1790
0
        odp_format_ufid(ufid, &ds);
1791
0
        ds_put_cstr(&ds, " ");
1792
0
    }
1793
0
    odp_flow_format(key, key_len, mask, mask_len, NULL, &ds, true, true);
1794
0
    if (stats) {
1795
0
        ds_put_cstr(&ds, ", ");
1796
0
        dpif_flow_stats_format(stats, &ds);
1797
0
    }
1798
0
    if (actions) {
1799
0
        ds_put_cstr(&ds, ", actions:");
1800
0
        format_odp_actions(&ds, actions, actions_len, NULL);
1801
0
    }
1802
0
    vlog(module, flow_message_log_level(error), "%s", ds_cstr(&ds));
1803
0
    ds_destroy(&ds);
1804
0
}
1805
1806
void
1807
log_flow_put_message(const struct dpif *dpif,
1808
                     const struct vlog_module *module,
1809
                     const struct dpif_flow_put *put,
1810
                     int error)
1811
0
{
1812
0
    if (should_log_flow_message(module, error)
1813
0
        && !(put->flags & DPIF_FP_PROBE)) {
1814
0
        struct ds s;
1815
1816
0
        ds_init(&s);
1817
0
        ds_put_cstr(&s, "put");
1818
0
        if (put->flags & DPIF_FP_CREATE) {
1819
0
            ds_put_cstr(&s, "[create]");
1820
0
        }
1821
0
        if (put->flags & DPIF_FP_MODIFY) {
1822
0
            ds_put_cstr(&s, "[modify]");
1823
0
        }
1824
0
        if (put->flags & DPIF_FP_ZERO_STATS) {
1825
0
            ds_put_cstr(&s, "[zero]");
1826
0
        }
1827
0
        log_flow_message(dpif, error, module, ds_cstr(&s),
1828
0
                         put->key, put->key_len, put->mask, put->mask_len,
1829
0
                         put->ufid, put->stats, put->actions,
1830
0
                         put->actions_len);
1831
0
        ds_destroy(&s);
1832
0
    }
1833
0
}
1834
1835
void
1836
log_flow_del_message(const struct dpif *dpif,
1837
                     const struct vlog_module *module,
1838
                     const struct dpif_flow_del *del,
1839
                     int error)
1840
0
{
1841
0
    if (should_log_flow_message(module, error)) {
1842
0
        log_flow_message(dpif, error, module, "flow_del",
1843
0
                         del->key, del->key_len,
1844
0
                         NULL, 0, del->ufid, !error ? del->stats : NULL,
1845
0
                         NULL, 0);
1846
0
    }
1847
0
}
1848
1849
/* Logs that 'execute' was executed on 'dpif' and completed with errno 'error'
1850
 * (0 for success).  'subexecute' should be true if the execution is a result
1851
 * of breaking down a larger execution that needed help, false otherwise.
1852
 *
1853
 *
1854
 * XXX In theory, the log message could be deceptive because this function is
1855
 * called after the dpif_provider's '->execute' function, which is allowed to
1856
 * modify execute->packet and execute->md.  In practice, though:
1857
 *
1858
 *     - dpif-netlink doesn't modify execute->packet or execute->md.
1859
 *
1860
 *     - dpif-netdev does modify them but it is less likely to have problems
1861
 *       because it is built into ovs-vswitchd and cannot have version skew,
1862
 *       etc.
1863
 *
1864
 * It would still be better to avoid the potential problem.  I don't know of a
1865
 * good way to do that, though, that isn't expensive. */
1866
void
1867
log_execute_message(const struct dpif *dpif,
1868
                    const struct vlog_module *module,
1869
                    const struct dpif_execute *execute,
1870
                    bool subexecute, int error)
1871
0
{
1872
0
    if (!(error ? VLOG_DROP_WARN(&error_rl) : VLOG_DROP_DBG(&dpmsg_rl))
1873
0
        && !execute->probe) {
1874
0
        struct ds ds = DS_EMPTY_INITIALIZER;
1875
0
        char *packet;
1876
0
        uint64_t stub[1024 / 8];
1877
0
        struct ofpbuf md = OFPBUF_STUB_INITIALIZER(stub);
1878
1879
0
        packet = ofp_packet_to_string(dp_packet_data(execute->packet),
1880
0
                                      dp_packet_size(execute->packet),
1881
0
                                      execute->packet->packet_type);
1882
0
        odp_key_from_dp_packet(&md, execute->packet);
1883
0
        ds_put_format(&ds, "%s: %sexecute ",
1884
0
                      dpif_name(dpif),
1885
0
                      (subexecute ? "sub-"
1886
0
                       : dpif_execute_needs_help(execute) ? "super-"
1887
0
                       : ""));
1888
0
        format_odp_actions(&ds, execute->actions, execute->actions_len, NULL);
1889
0
        if (error) {
1890
0
            ds_put_format(&ds, " failed (%s)", ovs_strerror(error));
1891
0
        }
1892
0
        ds_put_format(&ds, " on packet %s", packet);
1893
0
        ds_put_format(&ds, " with metadata ");
1894
0
        odp_flow_format(md.data, md.size, NULL, 0, NULL, &ds, true, false);
1895
0
        ds_put_format(&ds, " mtu %d", execute->mtu);
1896
0
        vlog(module, error ? VLL_WARN : VLL_DBG, "%s", ds_cstr(&ds));
1897
0
        ds_destroy(&ds);
1898
0
        free(packet);
1899
0
        ofpbuf_uninit(&md);
1900
0
    }
1901
0
}
1902
1903
void
1904
log_flow_get_message(const struct dpif *dpif,
1905
                     const struct vlog_module *module,
1906
                     const struct dpif_flow_get *get,
1907
                     int error)
1908
0
{
1909
0
    if (should_log_flow_message(module, error)) {
1910
0
        log_flow_message(dpif, error, module, "flow_get",
1911
0
                         get->key, get->key_len,
1912
0
                         get->flow->mask, get->flow->mask_len,
1913
0
                         get->ufid, &get->flow->stats,
1914
0
                         get->flow->actions, get->flow->actions_len);
1915
0
    }
1916
0
}
1917
1918
bool
1919
dpif_supports_tnl_push_pop(const struct dpif *dpif)
1920
0
{
1921
0
    return dpif_is_netdev(dpif);
1922
0
}
1923
1924
bool
1925
dpif_may_support_explicit_drop_action(const struct dpif *dpif)
1926
0
{
1927
    /* TC does not support offloading this action. */
1928
0
    return dpif_is_netdev(dpif) || !netdev_is_flow_api_enabled();
1929
0
}
1930
1931
bool
1932
dpif_supports_lb_output_action(const struct dpif *dpif)
1933
0
{
1934
    /*
1935
     * Balance-tcp optimization is currently supported in netdev
1936
     * datapath only.
1937
     */
1938
0
    return dpif_is_netdev(dpif);
1939
0
}
1940
1941
bool
1942
dpif_may_support_psample(const struct dpif *dpif)
1943
0
{
1944
    /* Userspace datapath does not support this action. */
1945
0
    return !dpif_is_netdev(dpif);
1946
0
}
1947
1948
/* Meters */
1949
void
1950
dpif_meter_get_features(const struct dpif *dpif,
1951
                        struct ofputil_meter_features *features)
1952
0
{
1953
0
    memset(features, 0, sizeof *features);
1954
0
    if (dpif->dpif_class->meter_get_features) {
1955
0
        dpif->dpif_class->meter_get_features(dpif, features);
1956
0
    }
1957
0
}
1958
1959
/* Adds or modifies the meter in 'dpif' with the given 'meter_id' and
1960
 * the configuration in 'config'.
1961
 *
1962
 * The meter id specified through 'config->meter_id' is ignored. */
1963
int
1964
dpif_meter_set(struct dpif *dpif, ofproto_meter_id meter_id,
1965
               struct ofputil_meter_config *config)
1966
0
{
1967
0
    COVERAGE_INC(dpif_meter_set);
1968
1969
0
    if (!(config->flags & (OFPMF13_KBPS | OFPMF13_PKTPS))) {
1970
0
        return EBADF; /* Rate unit type not set. */
1971
0
    }
1972
1973
0
    if ((config->flags & OFPMF13_KBPS) && (config->flags & OFPMF13_PKTPS)) {
1974
0
        return EBADF; /* Both rate units may not be set. */
1975
0
    }
1976
1977
0
    if (config->n_bands == 0) {
1978
0
        return EINVAL;
1979
0
    }
1980
1981
0
    for (size_t i = 0; i < config->n_bands; i++) {
1982
0
        if (config->bands[i].rate == 0) {
1983
0
            return EDOM; /* Rate must be non-zero */
1984
0
        }
1985
0
    }
1986
1987
0
    int error = dpif->dpif_class->meter_set(dpif, meter_id, config);
1988
0
    if (!error) {
1989
0
        VLOG_DBG_RL(&dpmsg_rl, "%s: DPIF meter %"PRIu32" set",
1990
0
                    dpif_name(dpif), meter_id.uint32);
1991
0
    } else {
1992
0
        VLOG_WARN_RL(&error_rl, "%s: failed to set DPIF meter %"PRIu32": %s",
1993
0
                     dpif_name(dpif), meter_id.uint32, ovs_strerror(error));
1994
0
    }
1995
0
    return error;
1996
0
}
1997
1998
int
1999
dpif_meter_get(const struct dpif *dpif, ofproto_meter_id meter_id,
2000
               struct ofputil_meter_stats *stats, uint16_t n_bands)
2001
0
{
2002
0
    int error;
2003
2004
0
    COVERAGE_INC(dpif_meter_get);
2005
2006
0
    error = dpif->dpif_class->meter_get(dpif, meter_id, stats, n_bands);
2007
0
    if (!error) {
2008
0
        VLOG_DBG_RL(&dpmsg_rl, "%s: DPIF meter %"PRIu32" get stats",
2009
0
                    dpif_name(dpif), meter_id.uint32);
2010
0
    } else {
2011
0
        VLOG_WARN_RL(&error_rl,
2012
0
                     "%s: failed to get DPIF meter %"PRIu32" stats: %s",
2013
0
                     dpif_name(dpif), meter_id.uint32, ovs_strerror(error));
2014
0
        stats->packet_in_count = ~0;
2015
0
        stats->byte_in_count = ~0;
2016
0
        stats->n_bands = 0;
2017
0
    }
2018
0
    return error;
2019
0
}
2020
2021
int
2022
dpif_meter_del(struct dpif *dpif, ofproto_meter_id meter_id,
2023
               struct ofputil_meter_stats *stats, uint16_t n_bands)
2024
0
{
2025
0
    int error;
2026
2027
0
    COVERAGE_INC(dpif_meter_del);
2028
2029
0
    error = dpif->dpif_class->meter_del(dpif, meter_id, stats, n_bands);
2030
0
    if (!error) {
2031
0
        VLOG_DBG_RL(&dpmsg_rl, "%s: DPIF meter %"PRIu32" deleted",
2032
0
                    dpif_name(dpif), meter_id.uint32);
2033
0
    } else {
2034
0
        VLOG_WARN_RL(&error_rl,
2035
0
                     "%s: failed to delete DPIF meter %"PRIu32": %s",
2036
0
                     dpif_name(dpif), meter_id.uint32, ovs_strerror(error));
2037
0
        if (stats) {
2038
0
            stats->packet_in_count = ~0;
2039
0
            stats->byte_in_count = ~0;
2040
0
            stats->n_bands = 0;
2041
0
        }
2042
0
    }
2043
0
    return error;
2044
0
}
2045
2046
int
2047
dpif_bond_add(struct dpif *dpif, uint32_t bond_id, odp_port_t *member_map)
2048
0
{
2049
0
    return dpif->dpif_class->bond_add
2050
0
           ? dpif->dpif_class->bond_add(dpif, bond_id, member_map)
2051
0
           : EOPNOTSUPP;
2052
0
}
2053
2054
int
2055
dpif_bond_del(struct dpif *dpif, uint32_t bond_id)
2056
0
{
2057
0
    return dpif->dpif_class->bond_del
2058
0
           ? dpif->dpif_class->bond_del(dpif, bond_id)
2059
0
           : EOPNOTSUPP;
2060
0
}
2061
2062
int
2063
dpif_bond_stats_get(struct dpif *dpif, uint32_t bond_id,
2064
                    uint64_t *n_bytes)
2065
0
{
2066
0
    memset(n_bytes, 0, BOND_BUCKETS * sizeof *n_bytes);
2067
2068
0
    return dpif->dpif_class->bond_stats_get
2069
0
           ? dpif->dpif_class->bond_stats_get(dpif, bond_id, n_bytes)
2070
0
           : EOPNOTSUPP;
2071
0
}
2072
2073
int
2074
dpif_get_n_offloaded_flows(struct dpif *dpif, uint64_t *n_flows)
2075
0
{
2076
0
    const char *dpif_type_str = dpif_normalize_type(dpif_type(dpif));
2077
0
    struct dpif_port_dump port_dump;
2078
0
    struct dpif_port dpif_port;
2079
0
    int ret, n_devs = 0;
2080
0
    uint64_t nflows;
2081
2082
0
    *n_flows = 0;
2083
0
    DPIF_PORT_FOR_EACH (&dpif_port, &port_dump, dpif) {
2084
0
        ret = netdev_ports_get_n_flows(dpif_type_str, dpif_port.port_no,
2085
0
                                       &nflows);
2086
0
        if (!ret) {
2087
0
            *n_flows += nflows;
2088
0
        } else if (ret == EOPNOTSUPP) {
2089
0
            continue;
2090
0
        }
2091
0
        n_devs++;
2092
0
    }
2093
0
    return n_devs ? 0 : EOPNOTSUPP;
2094
0
}
2095
2096
int
2097
dpif_cache_get_supported_levels(struct dpif *dpif, uint32_t *levels)
2098
0
{
2099
0
    return dpif->dpif_class->cache_get_supported_levels
2100
0
           ? dpif->dpif_class->cache_get_supported_levels(dpif, levels)
2101
0
           : EOPNOTSUPP;
2102
0
}
2103
2104
int
2105
dpif_cache_get_name(struct dpif *dpif, uint32_t level, const char **name)
2106
0
{
2107
0
    return dpif->dpif_class->cache_get_name
2108
0
           ? dpif->dpif_class->cache_get_name(dpif, level, name)
2109
0
           : EOPNOTSUPP;
2110
0
}
2111
2112
int
2113
dpif_cache_get_size(struct dpif *dpif, uint32_t level, uint32_t *size)
2114
0
{
2115
0
    return dpif->dpif_class->cache_get_size
2116
0
           ? dpif->dpif_class->cache_get_size(dpif, level, size)
2117
0
           : EOPNOTSUPP;
2118
0
}
2119
2120
int
2121
dpif_cache_set_size(struct dpif *dpif, uint32_t level, uint32_t size)
2122
0
{
2123
0
    return dpif->dpif_class->cache_set_size
2124
0
           ? dpif->dpif_class->cache_set_size(dpif, level, size)
2125
0
           : EOPNOTSUPP;
2126
0
}
2127
2128
bool
2129
dpif_synced_dp_layers(struct dpif *dpif)
2130
0
{
2131
0
    return dpif->dpif_class->synced_dp_layers;
2132
0
}