Coverage Report

Created: 2023-03-26 07:42

/src/openvswitch/lib/netdev.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2016, 2017 Nicira, Inc.
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at:
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16
17
#include <config.h>
18
#include "netdev.h"
19
20
#include <errno.h>
21
#include <inttypes.h>
22
#include <sys/types.h>
23
#include <netinet/in.h>
24
#include <stdlib.h>
25
#include <string.h>
26
#include <unistd.h>
27
28
#ifndef _WIN32
29
#include <ifaddrs.h>
30
#include <net/if.h>
31
#include <sys/ioctl.h>
32
#endif
33
34
#include "cmap.h"
35
#include "coverage.h"
36
#include "dpif.h"
37
#include "dp-packet.h"
38
#include "openvswitch/dynamic-string.h"
39
#include "fatal-signal.h"
40
#include "hash.h"
41
#include "openvswitch/list.h"
42
#include "netdev-offload-provider.h"
43
#include "netdev-provider.h"
44
#include "netdev-vport.h"
45
#include "odp-netlink.h"
46
#include "openflow/openflow.h"
47
#include "packets.h"
48
#include "openvswitch/ofp-print.h"
49
#include "openvswitch/poll-loop.h"
50
#include "seq.h"
51
#include "openvswitch/shash.h"
52
#include "smap.h"
53
#include "socket-util.h"
54
#include "sset.h"
55
#include "svec.h"
56
#include "openvswitch/vlog.h"
57
#include "flow.h"
58
#include "util.h"
59
#ifdef __linux__
60
#include "tc.h"
61
#endif
62
63
VLOG_DEFINE_THIS_MODULE(netdev);
64
65
COVERAGE_DEFINE(netdev_received);
66
COVERAGE_DEFINE(netdev_sent);
67
COVERAGE_DEFINE(netdev_add_router);
68
COVERAGE_DEFINE(netdev_get_stats);
69
COVERAGE_DEFINE(netdev_send_prepare_drops);
70
COVERAGE_DEFINE(netdev_push_header_drops);
71
72
struct netdev_saved_flags {
73
    struct netdev *netdev;
74
    struct ovs_list node;           /* In struct netdev's saved_flags_list. */
75
    enum netdev_flags saved_flags;
76
    enum netdev_flags saved_values;
77
};
78
79
/* Protects 'netdev_shash' and the mutable members of struct netdev. */
80
static struct ovs_mutex netdev_mutex = OVS_MUTEX_INITIALIZER;
81
82
/* All created network devices. */
83
static struct shash netdev_shash OVS_GUARDED_BY(netdev_mutex)
84
    = SHASH_INITIALIZER(&netdev_shash);
85
86
/* Mutual exclusion of */
87
static struct ovs_mutex netdev_class_mutex OVS_ACQ_BEFORE(netdev_mutex)
88
    = OVS_MUTEX_INITIALIZER;
89
90
/* Contains 'struct netdev_registered_class'es. */
91
static struct cmap netdev_classes = CMAP_INITIALIZER;
92
93
struct netdev_registered_class {
94
    struct cmap_node cmap_node; /* In 'netdev_classes', by class->type. */
95
    const struct netdev_class *class;
96
97
    /* Number of references: one for the class itself and one for every
98
     * instance of the class. */
99
    struct ovs_refcount refcnt;
100
};
101
102
/* This is set pretty low because we probably won't learn anything from the
103
 * additional log messages. */
104
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
105
106
static void restore_all_flags(void *aux OVS_UNUSED);
107
void update_device_args(struct netdev *, const struct shash *args);
108
#ifdef HAVE_AF_XDP
109
void signal_remove_xdp(struct netdev *netdev);
110
#endif
111
112
int
113
netdev_n_txq(const struct netdev *netdev)
114
0
{
115
0
    return netdev->n_txq;
116
0
}
117
118
int
119
netdev_n_rxq(const struct netdev *netdev)
120
0
{
121
0
    return netdev->n_rxq;
122
0
}
123
124
bool
125
netdev_is_pmd(const struct netdev *netdev)
126
0
{
127
0
    return netdev->netdev_class->is_pmd;
128
0
}
129
130
bool
131
netdev_has_tunnel_push_pop(const struct netdev *netdev)
132
0
{
133
0
    return netdev->netdev_class->push_header
134
0
           && netdev->netdev_class->pop_header;
135
0
}
136
137
static void
138
netdev_initialize(void)
139
    OVS_EXCLUDED(netdev_mutex)
140
0
{
141
0
    static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
142
143
0
    if (ovsthread_once_start(&once)) {
144
0
        fatal_signal_add_hook(restore_all_flags, NULL, NULL, true);
145
146
0
        netdev_vport_patch_register();
147
148
0
#ifdef __linux__
149
0
        netdev_register_provider(&netdev_linux_class);
150
0
        netdev_register_provider(&netdev_internal_class);
151
0
        netdev_register_provider(&netdev_tap_class);
152
0
        netdev_vport_tunnel_register();
153
154
0
        netdev_register_flow_api_provider(&netdev_offload_tc);
155
#ifdef HAVE_AF_XDP
156
        netdev_register_provider(&netdev_afxdp_class);
157
        netdev_register_provider(&netdev_afxdp_nonpmd_class);
158
#endif
159
0
#endif
160
#if defined(__FreeBSD__) || defined(__NetBSD__)
161
        netdev_register_provider(&netdev_tap_class);
162
        netdev_register_provider(&netdev_bsd_class);
163
#endif
164
#ifdef _WIN32
165
        netdev_register_provider(&netdev_windows_class);
166
        netdev_register_provider(&netdev_internal_class);
167
        netdev_vport_tunnel_register();
168
#endif
169
0
        ovsthread_once_done(&once);
170
0
    }
171
0
}
172
173
/* Performs periodic work needed by all the various kinds of netdevs.
174
 *
175
 * If your program opens any netdevs, it must call this function within its
176
 * main poll loop. */
177
void
178
netdev_run(void)
179
    OVS_EXCLUDED(netdev_mutex)
180
0
{
181
0
    netdev_initialize();
182
183
0
    struct netdev_registered_class *rc;
184
0
    CMAP_FOR_EACH (rc, cmap_node, &netdev_classes) {
185
0
        if (rc->class->run) {
186
0
            rc->class->run(rc->class);
187
0
        }
188
0
    }
189
0
}
190
191
/* Arranges for poll_block() to wake up when netdev_run() needs to be called.
192
 *
193
 * If your program opens any netdevs, it must call this function within its
194
 * main poll loop. */
195
void
196
netdev_wait(void)
197
    OVS_EXCLUDED(netdev_mutex)
198
0
{
199
0
    netdev_initialize();
200
201
0
    struct netdev_registered_class *rc;
202
0
    CMAP_FOR_EACH (rc, cmap_node, &netdev_classes) {
203
0
        if (rc->class->wait) {
204
0
            rc->class->wait(rc->class);
205
0
        }
206
0
    }
207
0
}
208
209
static struct netdev_registered_class *
210
netdev_lookup_class(const char *type)
211
0
{
212
0
    struct netdev_registered_class *rc;
213
0
    CMAP_FOR_EACH_WITH_HASH (rc, cmap_node, hash_string(type, 0),
214
0
                             &netdev_classes) {
215
0
        if (!strcmp(type, rc->class->type)) {
216
0
            return rc;
217
0
        }
218
0
    }
219
0
    return NULL;
220
0
}
221
222
/* Initializes and registers a new netdev provider.  After successful
223
 * registration, new netdevs of that type can be opened using netdev_open(). */
224
int
225
netdev_register_provider(const struct netdev_class *new_class)
226
    OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
227
0
{
228
0
    int error;
229
230
0
    ovs_mutex_lock(&netdev_class_mutex);
231
0
    if (netdev_lookup_class(new_class->type)) {
232
0
        VLOG_WARN("attempted to register duplicate netdev provider: %s",
233
0
                   new_class->type);
234
0
        error = EEXIST;
235
0
    } else {
236
0
        error = new_class->init ? new_class->init() : 0;
237
0
        if (!error) {
238
0
            struct netdev_registered_class *rc;
239
240
0
            rc = xmalloc(sizeof *rc);
241
0
            cmap_insert(&netdev_classes, &rc->cmap_node,
242
0
                        hash_string(new_class->type, 0));
243
0
            rc->class = new_class;
244
0
            ovs_refcount_init(&rc->refcnt);
245
0
        } else {
246
0
            VLOG_ERR("failed to initialize %s network device class: %s",
247
0
                     new_class->type, ovs_strerror(error));
248
0
        }
249
0
    }
250
0
    ovs_mutex_unlock(&netdev_class_mutex);
251
252
0
    return error;
253
0
}
254
255
/* Unregisters a netdev provider.  'type' must have been previously registered
256
 * and not currently be in use by any netdevs.  After unregistration new
257
 * netdevs of that type cannot be opened using netdev_open().  (However, the
258
 * provider may still be accessible from other threads until the next RCU grace
259
 * period, so the caller must not free or re-register the same netdev_class
260
 * until that has passed.) */
261
int
262
netdev_unregister_provider(const char *type)
263
    OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
264
0
{
265
0
    struct netdev_registered_class *rc;
266
0
    int error;
267
268
0
    netdev_initialize();
269
270
0
    ovs_mutex_lock(&netdev_class_mutex);
271
0
    rc = netdev_lookup_class(type);
272
0
    if (!rc) {
273
0
        VLOG_WARN("attempted to unregister a netdev provider that is not "
274
0
                  "registered: %s", type);
275
0
        error = EAFNOSUPPORT;
276
0
    } else if (ovs_refcount_unref(&rc->refcnt) != 1) {
277
0
        ovs_refcount_ref(&rc->refcnt);
278
0
        VLOG_WARN("attempted to unregister in use netdev provider: %s",
279
0
                  type);
280
0
        error = EBUSY;
281
0
    } else  {
282
0
        cmap_remove(&netdev_classes, &rc->cmap_node,
283
0
                    hash_string(rc->class->type, 0));
284
0
        ovsrcu_postpone(free, rc);
285
0
        error = 0;
286
0
    }
287
0
    ovs_mutex_unlock(&netdev_class_mutex);
288
289
0
    return error;
290
0
}
291
292
/* Clears 'types' and enumerates the types of all currently registered netdev
293
 * providers into it.  The caller must first initialize the sset. */
294
void
295
netdev_enumerate_types(struct sset *types)
296
    OVS_EXCLUDED(netdev_mutex)
297
0
{
298
0
    netdev_initialize();
299
0
    sset_clear(types);
300
301
0
    struct netdev_registered_class *rc;
302
0
    CMAP_FOR_EACH (rc, cmap_node, &netdev_classes) {
303
0
        sset_add(types, rc->class->type);
304
0
    }
305
0
}
306
307
static const char *
308
netdev_vport_type_from_name(const char *name)
309
0
{
310
0
    struct netdev_registered_class *rc;
311
0
    const char *type;
312
0
    CMAP_FOR_EACH (rc, cmap_node, &netdev_classes) {
313
0
        const char *dpif_port = netdev_vport_class_get_dpif_port(rc->class);
314
0
        if (dpif_port && !strncmp(name, dpif_port, strlen(dpif_port))) {
315
0
            type = rc->class->type;
316
0
            return type;
317
0
        }
318
0
    }
319
0
    return NULL;
320
0
}
321
322
/* Check that the network device name is not the same as any of the registered
323
 * vport providers' dpif_port name (dpif_port is NULL if the vport provider
324
 * does not define it) or the datapath internal port name (e.g. ovs-system).
325
 *
326
 * Returns true if there is a name conflict, false otherwise. */
327
bool
328
netdev_is_reserved_name(const char *name)
329
    OVS_EXCLUDED(netdev_mutex)
330
0
{
331
0
    netdev_initialize();
332
333
0
    struct netdev_registered_class *rc;
334
0
    CMAP_FOR_EACH (rc, cmap_node, &netdev_classes) {
335
0
        const char *dpif_port = netdev_vport_class_get_dpif_port(rc->class);
336
0
        if (dpif_port && !strncmp(name, dpif_port, strlen(dpif_port))) {
337
0
            return true;
338
0
        }
339
0
    }
340
341
0
    if (!strncmp(name, "ovs-", 4)) {
342
0
        struct sset types;
343
0
        const char *type;
344
345
0
        sset_init(&types);
346
0
        dp_enumerate_types(&types);
347
0
        SSET_FOR_EACH (type, &types) {
348
0
            if (!strcmp(name+4, type)) {
349
0
                sset_destroy(&types);
350
0
                return true;
351
0
            }
352
0
        }
353
0
        sset_destroy(&types);
354
0
    }
355
356
0
    return false;
357
0
}
358
359
/* Opens the network device named 'name' (e.g. "eth0") of the specified 'type'
360
 * (e.g. "system") and returns zero if successful, otherwise a positive errno
361
 * value.  On success, sets '*netdevp' to the new network device, otherwise to
362
 * null.
363
 *
364
 * Some network devices may need to be configured (with netdev_set_config())
365
 * before they can be used.
366
 *
367
 * Before opening rxqs or sending packets, '*netdevp' may need to be
368
 * reconfigured (with netdev_is_reconf_required() and netdev_reconfigure()).
369
 * */
370
int
371
netdev_open(const char *name, const char *type, struct netdev **netdevp)
372
    OVS_EXCLUDED(netdev_mutex)
373
0
{
374
0
    struct netdev *netdev;
375
0
    int error = 0;
376
377
0
    if (!name[0]) {
378
        /* Reject empty names.  This saves the providers having to do this.  At
379
         * least one screwed this up: the netdev-linux "tap" implementation
380
         * passed the name directly to the Linux TUNSETIFF call, which treats
381
         * an empty string as a request to generate a unique name. */
382
0
        return EINVAL;
383
0
    }
384
385
0
    netdev_initialize();
386
387
0
    ovs_mutex_lock(&netdev_mutex);
388
0
    netdev = shash_find_data(&netdev_shash, name);
389
390
0
    if (netdev && type && type[0]) {
391
0
        if (strcmp(type, netdev->netdev_class->type)) {
392
393
0
            if (netdev->auto_classified) {
394
                /* If this device was first created without a classification
395
                 * type, for example due to routing or tunneling code, and they
396
                 * keep a reference, a "classified" call to open will fail.
397
                 * In this case we remove the classless device, and re-add it
398
                 * below. We remove the netdev from the shash, and change the
399
                 * sequence, so owners of the old classless device can
400
                 * release/cleanup. */
401
0
                if (netdev->node) {
402
0
                    shash_delete(&netdev_shash, netdev->node);
403
0
                    netdev->node = NULL;
404
0
                    netdev_change_seq_changed(netdev);
405
0
                }
406
407
0
                netdev = NULL;
408
0
            } else {
409
0
                error = EEXIST;
410
0
            }
411
0
        } else if (netdev->auto_classified) {
412
            /* If netdev reopened with type "system", clear auto_classified. */
413
0
            netdev->auto_classified = false;
414
0
        }
415
0
    }
416
417
0
    if (!netdev) {
418
0
        struct netdev_registered_class *rc;
419
420
0
        rc = netdev_lookup_class(type && type[0] ? type : "system");
421
0
        if (rc && ovs_refcount_try_ref_rcu(&rc->refcnt)) {
422
0
            netdev = rc->class->alloc();
423
0
            if (netdev) {
424
0
                memset(netdev, 0, sizeof *netdev);
425
0
                netdev->netdev_class = rc->class;
426
0
                netdev->auto_classified = type && type[0] ? false : true;
427
0
                netdev->name = xstrdup(name);
428
0
                netdev->change_seq = 1;
429
0
                netdev->reconfigure_seq = seq_create();
430
0
                netdev->last_reconfigure_seq =
431
0
                    seq_read(netdev->reconfigure_seq);
432
0
                ovsrcu_set(&netdev->flow_api, NULL);
433
0
                netdev->hw_info.oor = false;
434
0
                atomic_init(&netdev->hw_info.miss_api_supported, false);
435
0
                netdev->node = shash_add(&netdev_shash, name, netdev);
436
437
                /* By default enable one tx and rx queue per netdev. */
438
0
                netdev->n_txq = netdev->netdev_class->send ? 1 : 0;
439
0
                netdev->n_rxq = netdev->netdev_class->rxq_alloc ? 1 : 0;
440
441
0
                ovs_list_init(&netdev->saved_flags_list);
442
443
0
                error = rc->class->construct(netdev);
444
0
                if (!error) {
445
0
                    netdev_change_seq_changed(netdev);
446
0
                } else {
447
0
                    ovs_refcount_unref(&rc->refcnt);
448
0
                    seq_destroy(netdev->reconfigure_seq);
449
0
                    free(netdev->name);
450
0
                    ovs_assert(ovs_list_is_empty(&netdev->saved_flags_list));
451
0
                    shash_delete(&netdev_shash, netdev->node);
452
0
                    rc->class->dealloc(netdev);
453
0
                }
454
0
            } else {
455
0
                error = ENOMEM;
456
0
            }
457
0
        } else {
458
0
            VLOG_WARN("could not create netdev %s of unknown type %s",
459
0
                      name, type);
460
0
            error = EAFNOSUPPORT;
461
0
        }
462
0
    }
463
464
0
    if (!error) {
465
0
        netdev->ref_cnt++;
466
0
        *netdevp = netdev;
467
0
    } else {
468
0
        *netdevp = NULL;
469
0
    }
470
0
    ovs_mutex_unlock(&netdev_mutex);
471
472
0
    return error;
473
0
}
474
475
/* Returns a reference to 'netdev_' for the caller to own. Returns null if
476
 * 'netdev_' is null. */
477
struct netdev *
478
netdev_ref(const struct netdev *netdev_)
479
    OVS_EXCLUDED(netdev_mutex)
480
0
{
481
0
    struct netdev *netdev = CONST_CAST(struct netdev *, netdev_);
482
483
0
    if (netdev) {
484
0
        ovs_mutex_lock(&netdev_mutex);
485
0
        ovs_assert(netdev->ref_cnt > 0);
486
0
        netdev->ref_cnt++;
487
0
        ovs_mutex_unlock(&netdev_mutex);
488
0
    }
489
0
    return netdev;
490
0
}
491
492
/* Reconfigures the device 'netdev' with 'args'.  'args' may be empty
493
 * or NULL if none are needed. */
494
int
495
netdev_set_config(struct netdev *netdev, const struct smap *args, char **errp)
496
    OVS_EXCLUDED(netdev_mutex)
497
0
{
498
0
    if (netdev->netdev_class->set_config) {
499
0
        const struct smap no_args = SMAP_INITIALIZER(&no_args);
500
0
        char *verbose_error = NULL;
501
0
        int error;
502
503
0
        error = netdev->netdev_class->set_config(netdev,
504
0
                                                 args ? args : &no_args,
505
0
                                                 &verbose_error);
506
0
        if (error) {
507
0
            VLOG_WARN_BUF(verbose_error ? NULL : errp,
508
0
                          "%s: could not set configuration (%s)",
509
0
                          netdev_get_name(netdev), ovs_strerror(error));
510
0
            if (verbose_error) {
511
0
                if (errp) {
512
0
                    *errp = verbose_error;
513
0
                } else {
514
0
                    free(verbose_error);
515
0
                }
516
0
            }
517
0
        }
518
0
        return error;
519
0
    } else if (args && !smap_is_empty(args)) {
520
0
        VLOG_WARN_BUF(errp, "%s: arguments provided to device that is not configurable",
521
0
                      netdev_get_name(netdev));
522
0
    }
523
0
    return 0;
524
0
}
525
526
/* Returns the current configuration for 'netdev' in 'args'.  The caller must
527
 * have already initialized 'args' with smap_init().  Returns 0 on success, in
528
 * which case 'args' will be filled with 'netdev''s configuration.  On failure
529
 * returns a positive errno value, in which case 'args' will be empty.
530
 *
531
 * The caller owns 'args' and its contents and must eventually free them with
532
 * smap_destroy(). */
533
int
534
netdev_get_config(const struct netdev *netdev, struct smap *args)
535
    OVS_EXCLUDED(netdev_mutex)
536
0
{
537
0
    int error;
538
539
0
    smap_clear(args);
540
0
    if (netdev->netdev_class->get_config) {
541
0
        error = netdev->netdev_class->get_config(netdev, args);
542
0
        if (error) {
543
0
            smap_clear(args);
544
0
        }
545
0
    } else {
546
0
        error = 0;
547
0
    }
548
549
0
    return error;
550
0
}
551
552
const struct netdev_tunnel_config *
553
netdev_get_tunnel_config(const struct netdev *netdev)
554
    OVS_EXCLUDED(netdev_mutex)
555
0
{
556
0
    if (netdev->netdev_class->get_tunnel_config) {
557
0
        return netdev->netdev_class->get_tunnel_config(netdev);
558
0
    } else {
559
0
        return NULL;
560
0
    }
561
0
}
562
563
/* Returns the id of the numa node the 'netdev' is on.  If the function
564
 * is not implemented, returns NETDEV_NUMA_UNSPEC. */
565
int
566
netdev_get_numa_id(const struct netdev *netdev)
567
0
{
568
0
    if (netdev->netdev_class->get_numa_id) {
569
0
        return netdev->netdev_class->get_numa_id(netdev);
570
0
    } else {
571
0
        return NETDEV_NUMA_UNSPEC;
572
0
    }
573
0
}
574
575
static void
576
netdev_unref(struct netdev *dev)
577
    OVS_RELEASES(netdev_mutex)
578
0
{
579
0
    ovs_assert(dev->ref_cnt);
580
0
    if (!--dev->ref_cnt) {
581
0
        const struct netdev_class *class = dev->netdev_class;
582
0
        struct netdev_registered_class *rc;
583
584
0
        netdev_uninit_flow_api(dev);
585
586
0
        dev->netdev_class->destruct(dev);
587
588
0
        if (dev->node) {
589
0
            shash_delete(&netdev_shash, dev->node);
590
0
        }
591
0
        free(dev->name);
592
0
        seq_destroy(dev->reconfigure_seq);
593
0
        dev->netdev_class->dealloc(dev);
594
0
        ovs_mutex_unlock(&netdev_mutex);
595
596
0
        rc = netdev_lookup_class(class->type);
597
0
        ovs_refcount_unref(&rc->refcnt);
598
0
    } else {
599
0
        ovs_mutex_unlock(&netdev_mutex);
600
0
    }
601
0
}
602
603
/* Closes and destroys 'netdev'. */
604
void
605
netdev_close(struct netdev *netdev)
606
    OVS_EXCLUDED(netdev_mutex)
607
0
{
608
0
    if (netdev) {
609
0
        ovs_mutex_lock(&netdev_mutex);
610
0
        netdev_unref(netdev);
611
0
    }
612
0
}
613
614
/* Removes 'netdev' from the global shash and unrefs 'netdev'.
615
 *
616
 * This allows handler and revalidator threads to still retain references
617
 * to this netdev while the main thread changes interface configuration.
618
 *
619
 * This function should only be called by the main thread when closing
620
 * netdevs during user configuration changes. Otherwise, netdev_close should be
621
 * used to close netdevs. */
622
void
623
netdev_remove(struct netdev *netdev)
624
0
{
625
0
    if (netdev) {
626
0
        ovs_mutex_lock(&netdev_mutex);
627
0
        if (netdev->node) {
628
0
            shash_delete(&netdev_shash, netdev->node);
629
0
            netdev->node = NULL;
630
0
            netdev_change_seq_changed(netdev);
631
0
        }
632
0
        netdev_unref(netdev);
633
0
    }
634
0
}
635
636
/* Parses 'netdev_name_', which is of the form [type@]name into its component
637
 * pieces.  'name' and 'type' must be freed by the caller. */
638
void
639
netdev_parse_name(const char *netdev_name_, char **name, char **type)
640
0
{
641
0
    char *netdev_name = xstrdup(netdev_name_);
642
0
    char *separator;
643
644
0
    separator = strchr(netdev_name, '@');
645
0
    if (separator) {
646
0
        *separator = '\0';
647
0
        *type = netdev_name;
648
0
        *name = xstrdup(separator + 1);
649
0
    } else {
650
0
        *name = netdev_name;
651
0
        *type = xstrdup("system");
652
0
    }
653
0
}
654
655
/* Attempts to open a netdev_rxq handle for obtaining packets received on
656
 * 'netdev'.  On success, returns 0 and stores a nonnull 'netdev_rxq *' into
657
 * '*rxp'.  On failure, returns a positive errno value and stores NULL into
658
 * '*rxp'.
659
 *
660
 * Some kinds of network devices might not support receiving packets.  This
661
 * function returns EOPNOTSUPP in that case.*/
662
int
663
netdev_rxq_open(struct netdev *netdev, struct netdev_rxq **rxp, int id)
664
    OVS_EXCLUDED(netdev_mutex)
665
0
{
666
0
    int error;
667
668
0
    if (netdev->netdev_class->rxq_alloc && id < netdev->n_rxq) {
669
0
        struct netdev_rxq *rx = netdev->netdev_class->rxq_alloc();
670
0
        if (rx) {
671
0
            rx->netdev = netdev;
672
0
            rx->queue_id = id;
673
0
            error = netdev->netdev_class->rxq_construct(rx);
674
0
            if (!error) {
675
0
                netdev_ref(netdev);
676
0
                *rxp = rx;
677
0
                return 0;
678
0
            }
679
0
            netdev->netdev_class->rxq_dealloc(rx);
680
0
        } else {
681
0
            error = ENOMEM;
682
0
        }
683
0
    } else {
684
0
        error = EOPNOTSUPP;
685
0
    }
686
687
0
    *rxp = NULL;
688
0
    return error;
689
0
}
690
691
/* Closes 'rx'. */
692
void
693
netdev_rxq_close(struct netdev_rxq *rx)
694
    OVS_EXCLUDED(netdev_mutex)
695
0
{
696
0
    if (rx) {
697
0
        struct netdev *netdev = rx->netdev;
698
0
        netdev->netdev_class->rxq_destruct(rx);
699
0
        netdev->netdev_class->rxq_dealloc(rx);
700
0
        netdev_close(netdev);
701
0
    }
702
0
}
703
704
bool netdev_rxq_enabled(struct netdev_rxq *rx)
705
0
{
706
0
    bool enabled = true;
707
708
0
    if (rx->netdev->netdev_class->rxq_enabled) {
709
0
        enabled = rx->netdev->netdev_class->rxq_enabled(rx);
710
0
    }
711
0
    return enabled;
712
0
}
713
714
/* Attempts to receive a batch of packets from 'rx'.  'batch' should point to
715
 * the beginning of an array of NETDEV_MAX_BURST pointers to dp_packet.  If
716
 * successful, this function stores pointers to up to NETDEV_MAX_BURST
717
 * dp_packets into the array, transferring ownership of the packets to the
718
 * caller, stores the number of received packets in 'batch->count', and returns
719
 * 0.
720
 *
721
 * The implementation does not necessarily initialize any non-data members of
722
 * 'batch'.  That is, the caller must initialize layer pointers and metadata
723
 * itself, if desired, e.g. with pkt_metadata_init() and miniflow_extract().
724
 *
725
 * Returns EAGAIN immediately if no packet is ready to be received or another
726
 * positive errno value if an error was encountered. */
727
int
728
netdev_rxq_recv(struct netdev_rxq *rx, struct dp_packet_batch *batch,
729
                int *qfill)
730
0
{
731
0
    int retval;
732
733
0
    retval = rx->netdev->netdev_class->rxq_recv(rx, batch, qfill);
734
0
    if (!retval) {
735
0
        COVERAGE_INC(netdev_received);
736
0
    } else {
737
0
        batch->count = 0;
738
0
    }
739
0
    return retval;
740
0
}
741
742
/* Arranges for poll_block() to wake up when a packet is ready to be received
743
 * on 'rx'. */
744
void
745
netdev_rxq_wait(struct netdev_rxq *rx)
746
0
{
747
0
    rx->netdev->netdev_class->rxq_wait(rx);
748
0
}
749
750
/* Discards any packets ready to be received on 'rx'. */
751
int
752
netdev_rxq_drain(struct netdev_rxq *rx)
753
0
{
754
0
    return (rx->netdev->netdev_class->rxq_drain
755
0
            ? rx->netdev->netdev_class->rxq_drain(rx)
756
0
            : 0);
757
0
}
758
759
/* Configures the number of tx queues of 'netdev'. Returns 0 if successful,
760
 * otherwise a positive errno value.
761
 *
762
 * 'n_txq' specifies the exact number of transmission queues to create.
763
 *
764
 * The change might not effective immediately.  The caller must check if a
765
 * reconfiguration is required with netdev_is_reconf_required() and eventually
766
 * call netdev_reconfigure() before using the new queues.
767
 *
768
 * On error, the tx queue configuration is unchanged */
769
int
770
netdev_set_tx_multiq(struct netdev *netdev, unsigned int n_txq)
771
0
{
772
0
    int error;
773
774
0
    error = (netdev->netdev_class->set_tx_multiq
775
0
             ? netdev->netdev_class->set_tx_multiq(netdev, MAX(n_txq, 1))
776
0
             : EOPNOTSUPP);
777
778
0
    if (error && error != EOPNOTSUPP) {
779
0
        VLOG_DBG_RL(&rl, "failed to set tx queue for network device %s:"
780
0
                    "%s", netdev_get_name(netdev), ovs_strerror(error));
781
0
    }
782
783
0
    return error;
784
0
}
785
786
enum netdev_pt_mode
787
netdev_get_pt_mode(const struct netdev *netdev)
788
0
{
789
0
    return (netdev->netdev_class->get_pt_mode
790
0
            ? netdev->netdev_class->get_pt_mode(netdev)
791
0
            : NETDEV_PT_LEGACY_L2);
792
0
}
793
794
/* Check if a 'packet' is compatible with 'netdev_flags'.
795
 * If a packet is incompatible, return 'false' with the 'errormsg'
796
 * pointing to a reason. */
797
static bool
798
netdev_send_prepare_packet(const uint64_t netdev_flags,
799
                           struct dp_packet *packet, char **errormsg)
800
0
{
801
0
    uint64_t l4_mask;
802
803
0
    if (dp_packet_hwol_is_tso(packet)
804
0
        && !(netdev_flags & NETDEV_TX_OFFLOAD_TCP_TSO)) {
805
            /* Fall back to GSO in software. */
806
0
            VLOG_ERR_BUF(errormsg, "No TSO support");
807
0
            return false;
808
0
    }
809
810
0
    l4_mask = dp_packet_hwol_l4_mask(packet);
811
0
    if (l4_mask) {
812
0
        if (dp_packet_hwol_l4_is_tcp(packet)) {
813
0
            if (!(netdev_flags & NETDEV_TX_OFFLOAD_TCP_CKSUM)) {
814
                /* Fall back to TCP csum in software. */
815
0
                VLOG_ERR_BUF(errormsg, "No TCP checksum support");
816
0
                return false;
817
0
            }
818
0
        } else if (dp_packet_hwol_l4_is_udp(packet)) {
819
0
            if (!(netdev_flags & NETDEV_TX_OFFLOAD_UDP_CKSUM)) {
820
                /* Fall back to UDP csum in software. */
821
0
                VLOG_ERR_BUF(errormsg, "No UDP checksum support");
822
0
                return false;
823
0
            }
824
0
        } else if (dp_packet_hwol_l4_is_sctp(packet)) {
825
0
            if (!(netdev_flags & NETDEV_TX_OFFLOAD_SCTP_CKSUM)) {
826
                /* Fall back to SCTP csum in software. */
827
0
                VLOG_ERR_BUF(errormsg, "No SCTP checksum support");
828
0
                return false;
829
0
            }
830
0
        } else {
831
0
            VLOG_ERR_BUF(errormsg, "No L4 checksum support: mask: %"PRIu64,
832
0
                         l4_mask);
833
0
            return false;
834
0
        }
835
0
    }
836
837
0
    return true;
838
0
}
839
840
/* Check if each packet in 'batch' is compatible with 'netdev' features,
841
 * otherwise either fall back to software implementation or drop it. */
842
static void
843
netdev_send_prepare_batch(const struct netdev *netdev,
844
                          struct dp_packet_batch *batch)
845
0
{
846
0
    struct dp_packet *packet;
847
0
    size_t i, size = dp_packet_batch_size(batch);
848
849
0
    DP_PACKET_BATCH_REFILL_FOR_EACH (i, size, packet, batch) {
850
0
        char *errormsg = NULL;
851
852
0
        if (netdev_send_prepare_packet(netdev->ol_flags, packet, &errormsg)) {
853
0
            dp_packet_batch_refill(batch, packet, i);
854
0
        } else {
855
0
            dp_packet_delete(packet);
856
0
            COVERAGE_INC(netdev_send_prepare_drops);
857
0
            VLOG_WARN_RL(&rl, "%s: Packet dropped: %s",
858
0
                         netdev_get_name(netdev), errormsg);
859
0
            free(errormsg);
860
0
        }
861
0
    }
862
0
}
863
864
/* Sends 'batch' on 'netdev'.  Returns 0 if successful (for every packet),
865
 * otherwise a positive errno value.  Returns EAGAIN without blocking if
866
 * at least one the packets cannot be queued immediately.  Returns EMSGSIZE
867
 * if a partial packet was transmitted or if a packet is too big or too small
868
 * to transmit on the device.
869
 *
870
 * The caller must make sure that 'netdev' supports sending by making sure that
871
 * 'netdev_n_txq(netdev)' returns >= 1.
872
 *
873
 * If the function returns a non-zero value, some of the packets might have
874
 * been sent anyway.
875
 *
876
 * The caller transfers ownership of all the packets to the network device,
877
 * regardless of success.
878
 *
879
 * If 'concurrent_txq' is true, the caller may perform concurrent calls
880
 * to netdev_send() with the same 'qid'. The netdev provider is responsible
881
 * for making sure that these concurrent calls do not create a race condition
882
 * by using locking or other synchronization if required.
883
 *
884
 * The network device is expected to maintain one or more packet
885
 * transmission queues, so that the caller does not ordinarily have to
886
 * do additional queuing of packets.  'qid' specifies the queue to use
887
 * and can be ignored if the implementation does not support multiple
888
 * queues. */
889
int
890
netdev_send(struct netdev *netdev, int qid, struct dp_packet_batch *batch,
891
            bool concurrent_txq)
892
0
{
893
0
    int error;
894
895
0
    netdev_send_prepare_batch(netdev, batch);
896
0
    if (OVS_UNLIKELY(dp_packet_batch_is_empty(batch))) {
897
0
        return 0;
898
0
    }
899
900
0
    error = netdev->netdev_class->send(netdev, qid, batch, concurrent_txq);
901
0
    if (!error) {
902
0
        COVERAGE_INC(netdev_sent);
903
0
    }
904
0
    return error;
905
0
}
906
907
/* Pop tunnel header, build tunnel metadata and resize 'batch->packets'
908
 * for further processing.
909
 *
910
 * The caller must make sure that 'netdev' support this operation by checking
911
 * that netdev_has_tunnel_push_pop() returns true. */
912
void
913
netdev_pop_header(struct netdev *netdev, struct dp_packet_batch *batch)
914
0
{
915
0
    struct dp_packet *packet;
916
0
    size_t i, size = dp_packet_batch_size(batch);
917
918
0
    DP_PACKET_BATCH_REFILL_FOR_EACH (i, size, packet, batch) {
919
0
        packet = netdev->netdev_class->pop_header(packet);
920
0
        if (packet) {
921
            /* Reset the offload flags if present, to avoid wrong
922
             * interpretation in the further packet processing when
923
             * recirculated.*/
924
0
            dp_packet_reset_offload(packet);
925
0
            pkt_metadata_init_conn(&packet->md);
926
0
            dp_packet_batch_refill(batch, packet, i);
927
0
        }
928
0
    }
929
0
}
930
931
void
932
netdev_init_tnl_build_header_params(struct netdev_tnl_build_header_params *params,
933
                                    const struct flow *tnl_flow,
934
                                    const struct in6_addr *src,
935
                                    struct eth_addr dmac,
936
                                    struct eth_addr smac)
937
0
{
938
0
    params->flow = tnl_flow;
939
0
    params->dmac = dmac;
940
0
    params->smac = smac;
941
0
    params->s_ip = src;
942
0
    params->is_ipv6 = !IN6_IS_ADDR_V4MAPPED(src);
943
0
}
944
945
int netdev_build_header(const struct netdev *netdev,
946
                        struct ovs_action_push_tnl *data,
947
                        const struct netdev_tnl_build_header_params *params)
948
0
{
949
0
    if (netdev->netdev_class->build_header) {
950
0
        return netdev->netdev_class->build_header(netdev, data, params);
951
0
    }
952
0
    return EOPNOTSUPP;
953
0
}
954
955
/* Push tunnel header (reading from tunnel metadata) and resize
956
 * 'batch->packets' for further processing.
957
 *
958
 * The caller must make sure that 'netdev' support this operation by checking
959
 * that netdev_has_tunnel_push_pop() returns true. */
960
int
961
netdev_push_header(const struct netdev *netdev,
962
                   struct dp_packet_batch *batch,
963
                   const struct ovs_action_push_tnl *data)
964
0
{
965
0
    struct dp_packet *packet;
966
0
    size_t i, size = dp_packet_batch_size(batch);
967
968
0
    DP_PACKET_BATCH_REFILL_FOR_EACH (i, size, packet, batch) {
969
0
        if (OVS_UNLIKELY(dp_packet_hwol_is_tso(packet)
970
0
                         || dp_packet_hwol_l4_mask(packet))) {
971
0
            COVERAGE_INC(netdev_push_header_drops);
972
0
            dp_packet_delete(packet);
973
0
            VLOG_WARN_RL(&rl, "%s: Tunneling packets with HW offload flags is "
974
0
                         "not supported: packet dropped",
975
0
                         netdev_get_name(netdev));
976
0
        } else {
977
0
            netdev->netdev_class->push_header(netdev, packet, data);
978
0
            pkt_metadata_init(&packet->md, data->out_port);
979
0
            dp_packet_batch_refill(batch, packet, i);
980
0
        }
981
0
    }
982
983
0
    return 0;
984
0
}
985
986
/* Registers with the poll loop to wake up from the next call to poll_block()
987
 * when the packet transmission queue has sufficient room to transmit a packet
988
 * with netdev_send().
989
 *
990
 * The network device is expected to maintain one or more packet
991
 * transmission queues, so that the caller does not ordinarily have to
992
 * do additional queuing of packets.  'qid' specifies the queue to use
993
 * and can be ignored if the implementation does not support multiple
994
 * queues. */
995
void
996
netdev_send_wait(struct netdev *netdev, int qid)
997
0
{
998
0
    if (netdev->netdev_class->send_wait) {
999
0
        netdev->netdev_class->send_wait(netdev, qid);
1000
0
    }
1001
0
}
1002
1003
/* Attempts to set 'netdev''s MAC address to 'mac'.  Returns 0 if successful,
1004
 * otherwise a positive errno value. */
1005
int
1006
netdev_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
1007
0
{
1008
0
    return netdev->netdev_class->set_etheraddr(netdev, mac);
1009
0
}
1010
1011
/* Retrieves 'netdev''s MAC address.  If successful, returns 0 and copies the
1012
 * the MAC address into 'mac'.  On failure, returns a positive errno value and
1013
 * clears 'mac' to all-zeros. */
1014
int
1015
netdev_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
1016
0
{
1017
0
    int error;
1018
1019
0
    error = netdev->netdev_class->get_etheraddr(netdev, mac);
1020
0
    if (error) {
1021
0
        memset(mac, 0, sizeof *mac);
1022
0
    }
1023
0
    return error;
1024
0
}
1025
1026
/* Returns the name of the network device that 'netdev' represents,
1027
 * e.g. "eth0".  The caller must not modify or free the returned string. */
1028
const char *
1029
netdev_get_name(const struct netdev *netdev)
1030
0
{
1031
0
    return netdev->name;
1032
0
}
1033
1034
/* Retrieves the MTU of 'netdev'.  The MTU is the maximum size of transmitted
1035
 * (and received) packets, in bytes, not including the hardware header; thus,
1036
 * this is typically 1500 bytes for Ethernet devices.
1037
 *
1038
 * If successful, returns 0 and stores the MTU size in '*mtup'.  Returns
1039
 * EOPNOTSUPP if 'netdev' does not have an MTU (as e.g. some tunnels do not).
1040
 * On other failure, returns a positive errno value.  On failure, sets '*mtup'
1041
 * to 0. */
1042
int
1043
netdev_get_mtu(const struct netdev *netdev, int *mtup)
1044
0
{
1045
0
    const struct netdev_class *class = netdev->netdev_class;
1046
0
    int error;
1047
1048
0
    error = class->get_mtu ? class->get_mtu(netdev, mtup) : EOPNOTSUPP;
1049
0
    if (error) {
1050
0
        *mtup = 0;
1051
0
        if (error != EOPNOTSUPP) {
1052
0
            VLOG_DBG_RL(&rl, "failed to retrieve MTU for network device %s: "
1053
0
                         "%s", netdev_get_name(netdev), ovs_strerror(error));
1054
0
        }
1055
0
    }
1056
0
    return error;
1057
0
}
1058
1059
/* Sets the MTU of 'netdev'.  The MTU is the maximum size of transmitted
1060
 * (and received) packets, in bytes.
1061
 *
1062
 * If successful, returns 0.  Returns EOPNOTSUPP if 'netdev' does not have an
1063
 * MTU (as e.g. some tunnels do not).  On other failure, returns a positive
1064
 * errno value. */
1065
int
1066
netdev_set_mtu(struct netdev *netdev, int mtu)
1067
0
{
1068
0
    const struct netdev_class *class = netdev->netdev_class;
1069
0
    int error;
1070
1071
0
    error = class->set_mtu ? class->set_mtu(netdev, mtu) : EOPNOTSUPP;
1072
0
    if (error && error != EOPNOTSUPP) {
1073
0
        VLOG_WARN_RL(&rl, "failed to set MTU for network device %s: %s",
1074
0
                     netdev_get_name(netdev), ovs_strerror(error));
1075
0
    }
1076
1077
0
    return error;
1078
0
}
1079
1080
/* If 'user_config' is true, the user wants to control 'netdev''s MTU and we
1081
 * should not override it.  If 'user_config' is false, we may adjust
1082
 * 'netdev''s MTU (e.g., if 'netdev' is internal). */
1083
void
1084
netdev_mtu_user_config(struct netdev *netdev, bool user_config)
1085
0
{
1086
0
    if (netdev->mtu_user_config != user_config) {
1087
0
        netdev_change_seq_changed(netdev);
1088
0
        netdev->mtu_user_config = user_config;
1089
0
    }
1090
0
}
1091
1092
/* Returns 'true' if the user explicitly specified an MTU value for 'netdev'.
1093
 * Otherwise, returns 'false', in which case we are allowed to adjust the
1094
 * device MTU. */
1095
bool
1096
netdev_mtu_is_user_config(struct netdev *netdev)
1097
0
{
1098
0
    return netdev->mtu_user_config;
1099
0
}
1100
1101
/* Returns the ifindex of 'netdev', if successful, as a positive number.  On
1102
 * failure, returns a negative errno value.
1103
 *
1104
 * The desired semantics of the ifindex value are a combination of those
1105
 * specified by POSIX for if_nametoindex() and by SNMP for ifIndex.  An ifindex
1106
 * value should be unique within a host and remain stable at least until
1107
 * reboot.  SNMP says an ifindex "ranges between 1 and the value of ifNumber"
1108
 * but many systems do not follow this rule anyhow.
1109
 *
1110
 * Some network devices may not implement support for this function.  In such
1111
 * cases this function will always return -EOPNOTSUPP.
1112
 */
1113
int
1114
netdev_get_ifindex(const struct netdev *netdev)
1115
0
{
1116
0
    int (*get_ifindex)(const struct netdev *);
1117
1118
0
    get_ifindex = netdev->netdev_class->get_ifindex;
1119
1120
0
    return get_ifindex ? get_ifindex(netdev) : -EOPNOTSUPP;
1121
0
}
1122
1123
/* Stores the features supported by 'netdev' into each of '*current',
1124
 * '*advertised', '*supported', and '*peer' that are non-null.  Each value is a
1125
 * bitmap of "enum ofp_port_features" bits, in host byte order.  Returns 0 if
1126
 * successful, otherwise a positive errno value.  On failure, all of the
1127
 * passed-in values are set to 0.
1128
 *
1129
 * Some network devices may not implement support for this function.  In such
1130
 * cases this function will always return EOPNOTSUPP. */
1131
int
1132
netdev_get_features(const struct netdev *netdev,
1133
                    enum netdev_features *current,
1134
                    enum netdev_features *advertised,
1135
                    enum netdev_features *supported,
1136
                    enum netdev_features *peer)
1137
0
{
1138
0
    int (*get_features)(const struct netdev *netdev,
1139
0
                        enum netdev_features *current,
1140
0
                        enum netdev_features *advertised,
1141
0
                        enum netdev_features *supported,
1142
0
                        enum netdev_features *peer);
1143
0
    enum netdev_features dummy[4];
1144
0
    int error;
1145
1146
0
    if (!current) {
1147
0
        current = &dummy[0];
1148
0
    }
1149
0
    if (!advertised) {
1150
0
        advertised = &dummy[1];
1151
0
    }
1152
0
    if (!supported) {
1153
0
        supported = &dummy[2];
1154
0
    }
1155
0
    if (!peer) {
1156
0
        peer = &dummy[3];
1157
0
    }
1158
1159
0
    get_features = netdev->netdev_class->get_features;
1160
0
    error = get_features
1161
0
                    ? get_features(netdev, current, advertised, supported,
1162
0
                                   peer)
1163
0
                    : EOPNOTSUPP;
1164
0
    if (error) {
1165
0
        *current = *advertised = *supported = *peer = 0;
1166
0
    }
1167
0
    return error;
1168
0
}
1169
1170
/* Returns the maximum speed of a network connection that has the NETDEV_F_*
1171
 * bits in 'features', in bits per second.  If no bits that indicate a speed
1172
 * are set in 'features', returns 'default_bps'. */
1173
uint64_t
1174
netdev_features_to_bps(enum netdev_features features,
1175
                       uint64_t default_bps)
1176
60.4k
{
1177
60.4k
    enum {
1178
60.4k
        F_1000000MB = NETDEV_F_1TB_FD,
1179
60.4k
        F_100000MB = NETDEV_F_100GB_FD,
1180
60.4k
        F_40000MB = NETDEV_F_40GB_FD,
1181
60.4k
        F_10000MB = NETDEV_F_10GB_FD,
1182
60.4k
        F_1000MB = NETDEV_F_1GB_HD | NETDEV_F_1GB_FD,
1183
60.4k
        F_100MB = NETDEV_F_100MB_HD | NETDEV_F_100MB_FD,
1184
60.4k
        F_10MB = NETDEV_F_10MB_HD | NETDEV_F_10MB_FD
1185
60.4k
    };
1186
1187
60.4k
    return (  features & F_1000000MB ? UINT64_C(1000000000000)
1188
60.4k
            : features & F_100000MB  ? UINT64_C(100000000000)
1189
60.4k
            : features & F_40000MB   ? UINT64_C(40000000000)
1190
60.4k
            : features & F_10000MB   ? UINT64_C(10000000000)
1191
60.4k
            : features & F_1000MB    ? UINT64_C(1000000000)
1192
36.5k
            : features & F_100MB     ? UINT64_C(100000000)
1193
27.5k
            : features & F_10MB      ? UINT64_C(10000000)
1194
19.3k
                                     : default_bps);
1195
60.4k
}
1196
1197
/* Returns true if any of the NETDEV_F_* bits that indicate a full-duplex link
1198
 * are set in 'features', otherwise false. */
1199
bool
1200
netdev_features_is_full_duplex(enum netdev_features features)
1201
0
{
1202
0
    return (features & (NETDEV_F_10MB_FD | NETDEV_F_100MB_FD | NETDEV_F_1GB_FD
1203
0
                        | NETDEV_F_10GB_FD | NETDEV_F_40GB_FD
1204
0
                        | NETDEV_F_100GB_FD | NETDEV_F_1TB_FD)) != 0;
1205
0
}
1206
1207
/* Set the features advertised by 'netdev' to 'advertise'.  Returns 0 if
1208
 * successful, otherwise a positive errno value. */
1209
int
1210
netdev_set_advertisements(struct netdev *netdev,
1211
                          enum netdev_features advertise)
1212
0
{
1213
0
    return (netdev->netdev_class->set_advertisements
1214
0
            ? netdev->netdev_class->set_advertisements(
1215
0
                    netdev, advertise)
1216
0
            : EOPNOTSUPP);
1217
0
}
1218
1219
static const char *
1220
netdev_feature_to_name(uint32_t bit)
1221
714k
{
1222
714k
    enum netdev_features f = bit;
1223
1224
714k
    switch (f) {
1225
62.2k
    case NETDEV_F_10MB_HD:    return "10MB-HD";
1226
56.0k
    case NETDEV_F_10MB_FD:    return "10MB-FD";
1227
63.5k
    case NETDEV_F_100MB_HD:   return "100MB-HD";
1228
65.7k
    case NETDEV_F_100MB_FD:   return "100MB-FD";
1229
53.9k
    case NETDEV_F_1GB_HD:     return "1GB-HD";
1230
58.7k
    case NETDEV_F_1GB_FD:     return "1GB-FD";
1231
52.6k
    case NETDEV_F_10GB_FD:    return "10GB-FD";
1232
4.30k
    case NETDEV_F_40GB_FD:    return "40GB-FD";
1233
9.39k
    case NETDEV_F_100GB_FD:   return "100GB-FD";
1234
5.85k
    case NETDEV_F_1TB_FD:     return "1TB-FD";
1235
8.58k
    case NETDEV_F_OTHER:      return "OTHER";
1236
51.5k
    case NETDEV_F_COPPER:     return "COPPER";
1237
56.9k
    case NETDEV_F_FIBER:      return "FIBER";
1238
53.4k
    case NETDEV_F_AUTONEG:    return "AUTO_NEG";
1239
55.6k
    case NETDEV_F_PAUSE:      return "AUTO_PAUSE";
1240
55.9k
    case NETDEV_F_PAUSE_ASYM: return "AUTO_PAUSE_ASYM";
1241
714k
    }
1242
1243
0
    return NULL;
1244
714k
}
1245
1246
void
1247
netdev_features_format(struct ds *s, enum netdev_features features)
1248
123k
{
1249
123k
    ofp_print_bit_names(s, features, netdev_feature_to_name, ' ');
1250
123k
    ds_put_char(s, '\n');
1251
123k
}
1252
1253
/* Assigns 'addr' as 'netdev''s IPv4 address and 'mask' as its netmask.  If
1254
 * 'addr' is INADDR_ANY, 'netdev''s IPv4 address is cleared.  Returns a
1255
 * positive errno value. */
1256
int
1257
netdev_set_in4(struct netdev *netdev, struct in_addr addr, struct in_addr mask)
1258
0
{
1259
0
    return (netdev->netdev_class->set_in4
1260
0
            ? netdev->netdev_class->set_in4(netdev, addr, mask)
1261
0
            : EOPNOTSUPP);
1262
0
}
1263
1264
static int
1265
netdev_get_addresses_by_name(const char *device_name,
1266
                             struct in6_addr **addrsp, int *n_addrsp)
1267
0
{
1268
0
    struct netdev *netdev;
1269
0
    int error = netdev_open(device_name, NULL, &netdev);
1270
0
    if (error) {
1271
0
        *addrsp = NULL;
1272
0
        *n_addrsp = 0;
1273
0
        return error;
1274
0
    }
1275
1276
0
    struct in6_addr *masks;
1277
0
    error = netdev_get_addr_list(netdev, addrsp, &masks, n_addrsp);
1278
0
    netdev_close(netdev);
1279
0
    free(masks);
1280
0
    return error;
1281
0
}
1282
1283
/* Obtains an IPv4 address from 'device_name' and save the address in '*in4'.
1284
 * Returns 0 if successful, otherwise a positive errno value. */
1285
int
1286
netdev_get_in4_by_name(const char *device_name, struct in_addr *in4)
1287
0
{
1288
0
    struct in6_addr *addrs;
1289
0
    int n;
1290
0
    int error = netdev_get_addresses_by_name(device_name, &addrs, &n);
1291
1292
0
    in4->s_addr = 0;
1293
0
    if (!error) {
1294
0
        error = ENOENT;
1295
0
        for (int i = 0; i < n; i++) {
1296
0
            if (IN6_IS_ADDR_V4MAPPED(&addrs[i])) {
1297
0
                in4->s_addr = in6_addr_get_mapped_ipv4(&addrs[i]);
1298
0
                error = 0;
1299
0
                break;
1300
0
            }
1301
0
        }
1302
0
    }
1303
0
    free(addrs);
1304
1305
0
    return error;
1306
0
}
1307
1308
/* Obtains an IPv4 or IPv6 address from 'device_name' and save the address in
1309
 * '*in6', representing IPv4 addresses as v6-mapped.  Returns 0 if successful,
1310
 * otherwise a positive errno value. */
1311
int
1312
netdev_get_ip_by_name(const char *device_name, struct in6_addr *in6)
1313
0
{
1314
0
    struct in6_addr *addrs;
1315
0
    int n;
1316
0
    int error = netdev_get_addresses_by_name(device_name, &addrs, &n);
1317
1318
0
    *in6 = in6addr_any;
1319
0
    if (!error) {
1320
0
        error = ENOENT;
1321
0
        for (int i = 0; i < n; i++) {
1322
0
            if (!in6_is_lla(&addrs[i])) {
1323
0
                *in6 = addrs[i];
1324
0
                error = 0;
1325
0
                break;
1326
0
            }
1327
0
        }
1328
0
    }
1329
0
    free(addrs);
1330
1331
0
    return error;
1332
0
}
1333
1334
/* Adds 'router' as a default IP gateway for the TCP/IP stack that corresponds
1335
 * to 'netdev'. */
1336
int
1337
netdev_add_router(struct netdev *netdev, struct in_addr router)
1338
0
{
1339
0
    COVERAGE_INC(netdev_add_router);
1340
0
    return (netdev->netdev_class->add_router
1341
0
            ? netdev->netdev_class->add_router(netdev, router)
1342
0
            : EOPNOTSUPP);
1343
0
}
1344
1345
/* Looks up the next hop for 'host' for the TCP/IP stack that corresponds to
1346
 * 'netdev'.  If a route cannot not be determined, sets '*next_hop' to 0,
1347
 * '*netdev_name' to null, and returns a positive errno value.  Otherwise, if a
1348
 * next hop is found, stores the next hop gateway's address (0 if 'host' is on
1349
 * a directly connected network) in '*next_hop' and a copy of the name of the
1350
 * device to reach 'host' in '*netdev_name', and returns 0.  The caller is
1351
 * responsible for freeing '*netdev_name' (by calling free()). */
1352
int
1353
netdev_get_next_hop(const struct netdev *netdev,
1354
                    const struct in_addr *host, struct in_addr *next_hop,
1355
                    char **netdev_name)
1356
0
{
1357
0
    int error = (netdev->netdev_class->get_next_hop
1358
0
                 ? netdev->netdev_class->get_next_hop(
1359
0
                        host, next_hop, netdev_name)
1360
0
                 : EOPNOTSUPP);
1361
0
    if (error) {
1362
0
        next_hop->s_addr = 0;
1363
0
        *netdev_name = NULL;
1364
0
    }
1365
0
    return error;
1366
0
}
1367
1368
/* Populates 'smap' with status information.
1369
 *
1370
 * Populates 'smap' with 'netdev' specific status information.  This
1371
 * information may be used to populate the status column of the Interface table
1372
 * as defined in ovs-vswitchd.conf.db(5). */
1373
int
1374
netdev_get_status(const struct netdev *netdev, struct smap *smap)
1375
0
{
1376
0
    return (netdev->netdev_class->get_status
1377
0
            ? netdev->netdev_class->get_status(netdev, smap)
1378
0
            : EOPNOTSUPP);
1379
0
}
1380
1381
/* Returns all assigned IP address to  'netdev' and returns 0.
1382
 * API allocates array of address and masks and set it to
1383
 * '*addr' and '*mask'.
1384
 * Otherwise, returns a positive errno value and sets '*addr', '*mask
1385
 * and '*n_addr' to NULL.
1386
 *
1387
 * The following error values have well-defined meanings:
1388
 *
1389
 *   - EADDRNOTAVAIL: 'netdev' has no assigned IPv6 address.
1390
 *
1391
 *   - EOPNOTSUPP: No IPv6 network stack attached to 'netdev'.
1392
 *
1393
 * 'addr' may be null, in which case the address itself is not reported. */
1394
int
1395
netdev_get_addr_list(const struct netdev *netdev, struct in6_addr **addr,
1396
                     struct in6_addr **mask, int *n_addr)
1397
0
{
1398
0
    int error;
1399
1400
0
    error = (netdev->netdev_class->get_addr_list
1401
0
             ? netdev->netdev_class->get_addr_list(netdev, addr, mask, n_addr): EOPNOTSUPP);
1402
0
    if (error && addr) {
1403
0
        *addr = NULL;
1404
0
        *mask = NULL;
1405
0
        *n_addr = 0;
1406
0
    }
1407
1408
0
    return error;
1409
0
}
1410
1411
/* On 'netdev', turns off the flags in 'off' and then turns on the flags in
1412
 * 'on'.  Returns 0 if successful, otherwise a positive errno value. */
1413
static int
1414
do_update_flags(struct netdev *netdev, enum netdev_flags off,
1415
                enum netdev_flags on, enum netdev_flags *old_flagsp,
1416
                struct netdev_saved_flags **sfp)
1417
    OVS_EXCLUDED(netdev_mutex)
1418
0
{
1419
0
    struct netdev_saved_flags *sf = NULL;
1420
0
    enum netdev_flags old_flags;
1421
0
    int error;
1422
1423
0
    error = netdev->netdev_class->update_flags(netdev, off & ~on, on,
1424
0
                                               &old_flags);
1425
0
    if (error) {
1426
0
        VLOG_WARN_RL(&rl, "failed to %s flags for network device %s: %s",
1427
0
                     off || on ? "set" : "get", netdev_get_name(netdev),
1428
0
                     ovs_strerror(error));
1429
0
        old_flags = 0;
1430
0
    } else if ((off || on) && sfp) {
1431
0
        enum netdev_flags new_flags = (old_flags & ~off) | on;
1432
0
        enum netdev_flags changed_flags = old_flags ^ new_flags;
1433
0
        if (changed_flags) {
1434
0
            ovs_mutex_lock(&netdev_mutex);
1435
0
            *sfp = sf = xmalloc(sizeof *sf);
1436
0
            sf->netdev = netdev;
1437
0
            ovs_list_push_front(&netdev->saved_flags_list, &sf->node);
1438
0
            sf->saved_flags = changed_flags;
1439
0
            sf->saved_values = changed_flags & new_flags;
1440
1441
0
            netdev->ref_cnt++;
1442
0
            ovs_mutex_unlock(&netdev_mutex);
1443
0
        }
1444
0
    }
1445
1446
0
    if (old_flagsp) {
1447
0
        *old_flagsp = old_flags;
1448
0
    }
1449
0
    if (sfp) {
1450
0
        *sfp = sf;
1451
0
    }
1452
1453
0
    return error;
1454
0
}
1455
1456
/* Obtains the current flags for 'netdev' and stores them into '*flagsp'.
1457
 * Returns 0 if successful, otherwise a positive errno value.  On failure,
1458
 * stores 0 into '*flagsp'. */
1459
int
1460
netdev_get_flags(const struct netdev *netdev_, enum netdev_flags *flagsp)
1461
0
{
1462
0
    struct netdev *netdev = CONST_CAST(struct netdev *, netdev_);
1463
0
    return do_update_flags(netdev, 0, 0, flagsp, NULL);
1464
0
}
1465
1466
/* Sets the flags for 'netdev' to 'flags'.
1467
 * Returns 0 if successful, otherwise a positive errno value. */
1468
int
1469
netdev_set_flags(struct netdev *netdev, enum netdev_flags flags,
1470
                 struct netdev_saved_flags **sfp)
1471
0
{
1472
0
    return do_update_flags(netdev, -1, flags, NULL, sfp);
1473
0
}
1474
1475
/* Turns on the specified 'flags' on 'netdev':
1476
 *
1477
 *    - On success, returns 0.  If 'sfp' is nonnull, sets '*sfp' to a newly
1478
 *      allocated 'struct netdev_saved_flags *' that may be passed to
1479
 *      netdev_restore_flags() to restore the original values of 'flags' on
1480
 *      'netdev' (this will happen automatically at program termination if
1481
 *      netdev_restore_flags() is never called) , or to NULL if no flags were
1482
 *      actually changed.
1483
 *
1484
 *    - On failure, returns a positive errno value.  If 'sfp' is nonnull, sets
1485
 *      '*sfp' to NULL. */
1486
int
1487
netdev_turn_flags_on(struct netdev *netdev, enum netdev_flags flags,
1488
                     struct netdev_saved_flags **sfp)
1489
0
{
1490
0
    return do_update_flags(netdev, 0, flags, NULL, sfp);
1491
0
}
1492
1493
/* Turns off the specified 'flags' on 'netdev'.  See netdev_turn_flags_on() for
1494
 * details of the interface. */
1495
int
1496
netdev_turn_flags_off(struct netdev *netdev, enum netdev_flags flags,
1497
                      struct netdev_saved_flags **sfp)
1498
0
{
1499
0
    return do_update_flags(netdev, flags, 0, NULL, sfp);
1500
0
}
1501
1502
/* Restores the flags that were saved in 'sf', and destroys 'sf'.
1503
 * Does nothing if 'sf' is NULL. */
1504
void
1505
netdev_restore_flags(struct netdev_saved_flags *sf)
1506
    OVS_EXCLUDED(netdev_mutex)
1507
0
{
1508
0
    if (sf) {
1509
0
        struct netdev *netdev = sf->netdev;
1510
0
        enum netdev_flags old_flags;
1511
1512
0
        netdev->netdev_class->update_flags(netdev,
1513
0
                                           sf->saved_flags & sf->saved_values,
1514
0
                                           sf->saved_flags & ~sf->saved_values,
1515
0
                                           &old_flags);
1516
1517
0
        ovs_mutex_lock(&netdev_mutex);
1518
0
        ovs_list_remove(&sf->node);
1519
0
        free(sf);
1520
0
        netdev_unref(netdev);
1521
0
    }
1522
0
}
1523
1524
/* Looks up the ARP table entry for 'ip' on 'netdev'.  If one exists and can be
1525
 * successfully retrieved, it stores the corresponding MAC address in 'mac' and
1526
 * returns 0.  Otherwise, it returns a positive errno value; in particular,
1527
 * ENXIO indicates that there is no ARP table entry for 'ip' on 'netdev'. */
1528
int
1529
netdev_arp_lookup(const struct netdev *netdev,
1530
                  ovs_be32 ip, struct eth_addr *mac)
1531
0
{
1532
0
    int error = (netdev->netdev_class->arp_lookup
1533
0
                 ? netdev->netdev_class->arp_lookup(netdev, ip, mac)
1534
0
                 : EOPNOTSUPP);
1535
0
    if (error) {
1536
0
        *mac = eth_addr_zero;
1537
0
    }
1538
0
    return error;
1539
0
}
1540
1541
/* Returns true if carrier is active (link light is on) on 'netdev'. */
1542
bool
1543
netdev_get_carrier(const struct netdev *netdev)
1544
0
{
1545
0
    int error;
1546
0
    enum netdev_flags flags;
1547
0
    bool carrier;
1548
1549
0
    netdev_get_flags(netdev, &flags);
1550
0
    if (!(flags & NETDEV_UP)) {
1551
0
        return false;
1552
0
    }
1553
1554
0
    if (!netdev->netdev_class->get_carrier) {
1555
0
        return true;
1556
0
    }
1557
1558
0
    error = netdev->netdev_class->get_carrier(netdev, &carrier);
1559
0
    if (error) {
1560
0
        VLOG_DBG("%s: failed to get network device carrier status, assuming "
1561
0
                 "down: %s", netdev_get_name(netdev), ovs_strerror(error));
1562
0
        carrier = false;
1563
0
    }
1564
1565
0
    return carrier;
1566
0
}
1567
1568
/* Returns the number of times 'netdev''s carrier has changed. */
1569
long long int
1570
netdev_get_carrier_resets(const struct netdev *netdev)
1571
0
{
1572
0
    return (netdev->netdev_class->get_carrier_resets
1573
0
            ? netdev->netdev_class->get_carrier_resets(netdev)
1574
0
            : 0);
1575
0
}
1576
1577
/* Attempts to force netdev_get_carrier() to poll 'netdev''s MII registers for
1578
 * link status instead of checking 'netdev''s carrier.  'netdev''s MII
1579
 * registers will be polled once ever 'interval' milliseconds.  If 'netdev'
1580
 * does not support MII, another method may be used as a fallback.  If
1581
 * 'interval' is less than or equal to zero, reverts netdev_get_carrier() to
1582
 * its normal behavior.
1583
 *
1584
 * Returns 0 if successful, otherwise a positive errno value. */
1585
int
1586
netdev_set_miimon_interval(struct netdev *netdev, long long int interval)
1587
0
{
1588
0
    return (netdev->netdev_class->set_miimon_interval
1589
0
            ? netdev->netdev_class->set_miimon_interval(netdev, interval)
1590
0
            : EOPNOTSUPP);
1591
0
}
1592
1593
/* Retrieves current device stats for 'netdev'. */
1594
int
1595
netdev_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1596
0
{
1597
0
    int error;
1598
1599
    /* Statistics are initialized before passing it to particular device
1600
     * implementation so all values are filtered out by default. */
1601
0
    memset(stats, 0xFF, sizeof *stats);
1602
1603
0
    COVERAGE_INC(netdev_get_stats);
1604
0
    error = (netdev->netdev_class->get_stats
1605
0
             ? netdev->netdev_class->get_stats(netdev, stats)
1606
0
             : EOPNOTSUPP);
1607
0
    if (error) {
1608
        /* In case of error all statistics are filtered out */
1609
0
        memset(stats, 0xff, sizeof *stats);
1610
0
    }
1611
0
    return error;
1612
0
}
1613
1614
/* Retrieves current device custom stats for 'netdev'. */
1615
int
1616
netdev_get_custom_stats(const struct netdev *netdev,
1617
                        struct netdev_custom_stats *custom_stats)
1618
0
{
1619
0
    int error;
1620
0
    memset(custom_stats, 0, sizeof *custom_stats);
1621
0
    error = (netdev->netdev_class->get_custom_stats
1622
0
             ? netdev->netdev_class->get_custom_stats(netdev, custom_stats)
1623
0
             : EOPNOTSUPP);
1624
1625
0
    return error;
1626
0
}
1627
1628
/* Attempts to set input rate limiting (policing) policy, such that:
1629
 * - up to 'kbits_rate' kbps of traffic is accepted, with a maximum
1630
 *   accumulative burst size of 'kbits' kb; and
1631
 * - up to 'kpkts' kpps of traffic is accepted, with a maximum
1632
 *   accumulative burst size of 'kpkts' kilo packets.
1633
 */
1634
int
1635
netdev_set_policing(struct netdev *netdev, uint32_t kbits_rate,
1636
                    uint32_t kbits_burst, uint32_t kpkts_rate,
1637
                    uint32_t kpkts_burst)
1638
0
{
1639
0
    return (netdev->netdev_class->set_policing
1640
0
            ? netdev->netdev_class->set_policing(netdev,
1641
0
                    kbits_rate, kbits_burst, kpkts_rate, kpkts_burst)
1642
0
            : EOPNOTSUPP);
1643
0
}
1644
1645
/* Adds to 'types' all of the forms of QoS supported by 'netdev', or leaves it
1646
 * empty if 'netdev' does not support QoS.  Any names added to 'types' should
1647
 * be documented as valid for the "type" column in the "QoS" table in
1648
 * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
1649
 *
1650
 * Every network device supports disabling QoS with a type of "", but this type
1651
 * will not be added to 'types'.
1652
 *
1653
 * The caller must initialize 'types' (e.g. with sset_init()) before calling
1654
 * this function.  The caller is responsible for destroying 'types' (e.g. with
1655
 * sset_destroy()) when it is no longer needed.
1656
 *
1657
 * Returns 0 if successful, otherwise a positive errno value. */
1658
int
1659
netdev_get_qos_types(const struct netdev *netdev, struct sset *types)
1660
0
{
1661
0
    const struct netdev_class *class = netdev->netdev_class;
1662
0
    return (class->get_qos_types
1663
0
            ? class->get_qos_types(netdev, types)
1664
0
            : 0);
1665
0
}
1666
1667
/* Queries 'netdev' for its capabilities regarding the specified 'type' of QoS,
1668
 * which should be "" or one of the types returned by netdev_get_qos_types()
1669
 * for 'netdev'.  Returns 0 if successful, otherwise a positive errno value.
1670
 * On success, initializes 'caps' with the QoS capabilities; on failure, clears
1671
 * 'caps' to all zeros. */
1672
int
1673
netdev_get_qos_capabilities(const struct netdev *netdev, const char *type,
1674
                            struct netdev_qos_capabilities *caps)
1675
0
{
1676
0
    const struct netdev_class *class = netdev->netdev_class;
1677
1678
0
    if (*type) {
1679
0
        int retval = (class->get_qos_capabilities
1680
0
                      ? class->get_qos_capabilities(netdev, type, caps)
1681
0
                      : EOPNOTSUPP);
1682
0
        if (retval) {
1683
0
            memset(caps, 0, sizeof *caps);
1684
0
        }
1685
0
        return retval;
1686
0
    } else {
1687
        /* Every netdev supports turning off QoS. */
1688
0
        memset(caps, 0, sizeof *caps);
1689
0
        return 0;
1690
0
    }
1691
0
}
1692
1693
/* Obtains the number of queues supported by 'netdev' for the specified 'type'
1694
 * of QoS.  Returns 0 if successful, otherwise a positive errno value.  Stores
1695
 * the number of queues (zero on failure) in '*n_queuesp'.
1696
 *
1697
 * This is just a simple wrapper around netdev_get_qos_capabilities(). */
1698
int
1699
netdev_get_n_queues(const struct netdev *netdev,
1700
                    const char *type, unsigned int *n_queuesp)
1701
0
{
1702
0
    struct netdev_qos_capabilities caps;
1703
0
    int retval;
1704
1705
0
    retval = netdev_get_qos_capabilities(netdev, type, &caps);
1706
0
    *n_queuesp = caps.n_queues;
1707
0
    return retval;
1708
0
}
1709
1710
/* Queries 'netdev' about its currently configured form of QoS.  If successful,
1711
 * stores the name of the current form of QoS into '*typep', stores any details
1712
 * of configuration as string key-value pairs in 'details', and returns 0.  On
1713
 * failure, sets '*typep' to NULL and returns a positive errno value.
1714
 *
1715
 * A '*typep' of "" indicates that QoS is currently disabled on 'netdev'.
1716
 *
1717
 * The caller must initialize 'details' as an empty smap (e.g. with
1718
 * smap_init()) before calling this function.  The caller must free 'details'
1719
 * when it is no longer needed (e.g. with smap_destroy()).
1720
 *
1721
 * The caller must not modify or free '*typep'.
1722
 *
1723
 * '*typep' will be one of the types returned by netdev_get_qos_types() for
1724
 * 'netdev'.  The contents of 'details' should be documented as valid for
1725
 * '*typep' in the "other_config" column in the "QoS" table in
1726
 * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)). */
1727
int
1728
netdev_get_qos(const struct netdev *netdev,
1729
               const char **typep, struct smap *details)
1730
0
{
1731
0
    const struct netdev_class *class = netdev->netdev_class;
1732
0
    int retval;
1733
1734
0
    if (class->get_qos) {
1735
0
        retval = class->get_qos(netdev, typep, details);
1736
0
        if (retval) {
1737
0
            *typep = NULL;
1738
0
            smap_clear(details);
1739
0
        }
1740
0
        return retval;
1741
0
    } else {
1742
        /* 'netdev' doesn't support QoS, so report that QoS is disabled. */
1743
0
        *typep = "";
1744
0
        return 0;
1745
0
    }
1746
0
}
1747
1748
/* Attempts to reconfigure QoS on 'netdev', changing the form of QoS to 'type'
1749
 * with details of configuration from 'details'.  Returns 0 if successful,
1750
 * otherwise a positive errno value.  On error, the previous QoS configuration
1751
 * is retained.
1752
 *
1753
 * When this function changes the type of QoS (not just 'details'), this also
1754
 * resets all queue configuration for 'netdev' to their defaults (which depend
1755
 * on the specific type of QoS).  Otherwise, the queue configuration for
1756
 * 'netdev' is unchanged.
1757
 *
1758
 * 'type' should be "" (to disable QoS) or one of the types returned by
1759
 * netdev_get_qos_types() for 'netdev'.  The contents of 'details' should be
1760
 * documented as valid for the given 'type' in the "other_config" column in the
1761
 * "QoS" table in vswitchd/vswitch.xml (which is built as
1762
 * ovs-vswitchd.conf.db(8)).
1763
 *
1764
 * NULL may be specified for 'details' if there are no configuration
1765
 * details. */
1766
int
1767
netdev_set_qos(struct netdev *netdev,
1768
               const char *type, const struct smap *details)
1769
0
{
1770
0
    const struct netdev_class *class = netdev->netdev_class;
1771
1772
0
    if (!type) {
1773
0
        type = "";
1774
0
    }
1775
1776
0
    if (class->set_qos) {
1777
0
        if (!details) {
1778
0
            static const struct smap empty = SMAP_INITIALIZER(&empty);
1779
0
            details = &empty;
1780
0
        }
1781
0
        return class->set_qos(netdev, type, details);
1782
0
    } else {
1783
0
        return *type ? EOPNOTSUPP : 0;
1784
0
    }
1785
0
}
1786
1787
/* Queries 'netdev' for information about the queue numbered 'queue_id'.  If
1788
 * successful, adds that information as string key-value pairs to 'details'.
1789
 * Returns 0 if successful, otherwise a positive errno value.
1790
 *
1791
 * 'queue_id' must be less than the number of queues supported by 'netdev' for
1792
 * the current form of QoS (e.g. as returned by netdev_get_n_queues(netdev)).
1793
 *
1794
 * The returned contents of 'details' should be documented as valid for the
1795
 * given 'type' in the "other_config" column in the "Queue" table in
1796
 * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
1797
 *
1798
 * The caller must initialize 'details' (e.g. with smap_init()) before calling
1799
 * this function.  The caller must free 'details' when it is no longer needed
1800
 * (e.g. with smap_destroy()). */
1801
int
1802
netdev_get_queue(const struct netdev *netdev,
1803
                 unsigned int queue_id, struct smap *details)
1804
0
{
1805
0
    const struct netdev_class *class = netdev->netdev_class;
1806
0
    int retval;
1807
1808
0
    retval = (class->get_queue
1809
0
              ? class->get_queue(netdev, queue_id, details)
1810
0
              : EOPNOTSUPP);
1811
0
    if (retval) {
1812
0
        smap_clear(details);
1813
0
    }
1814
0
    return retval;
1815
0
}
1816
1817
/* Configures the queue numbered 'queue_id' on 'netdev' with the key-value
1818
 * string pairs in 'details'.  The contents of 'details' should be documented
1819
 * as valid for the given 'type' in the "other_config" column in the "Queue"
1820
 * table in vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
1821
 * Returns 0 if successful, otherwise a positive errno value.  On failure, the
1822
 * given queue's configuration should be unmodified.
1823
 *
1824
 * 'queue_id' must be less than the number of queues supported by 'netdev' for
1825
 * the current form of QoS (e.g. as returned by netdev_get_n_queues(netdev)).
1826
 *
1827
 * This function does not modify 'details', and the caller retains ownership of
1828
 * it. */
1829
int
1830
netdev_set_queue(struct netdev *netdev,
1831
                 unsigned int queue_id, const struct smap *details)
1832
0
{
1833
0
    const struct netdev_class *class = netdev->netdev_class;
1834
0
    return (class->set_queue
1835
0
            ? class->set_queue(netdev, queue_id, details)
1836
0
            : EOPNOTSUPP);
1837
0
}
1838
1839
/* Attempts to delete the queue numbered 'queue_id' from 'netdev'.  Some kinds
1840
 * of QoS may have a fixed set of queues, in which case attempts to delete them
1841
 * will fail with EOPNOTSUPP.
1842
 *
1843
 * Returns 0 if successful, otherwise a positive errno value.  On failure, the
1844
 * given queue will be unmodified.
1845
 *
1846
 * 'queue_id' must be less than the number of queues supported by 'netdev' for
1847
 * the current form of QoS (e.g. as returned by
1848
 * netdev_get_n_queues(netdev)). */
1849
int
1850
netdev_delete_queue(struct netdev *netdev, unsigned int queue_id)
1851
0
{
1852
0
    const struct netdev_class *class = netdev->netdev_class;
1853
0
    return (class->delete_queue
1854
0
            ? class->delete_queue(netdev, queue_id)
1855
0
            : EOPNOTSUPP);
1856
0
}
1857
1858
/* Obtains statistics about 'queue_id' on 'netdev'.  On success, returns 0 and
1859
 * fills 'stats' with the queue's statistics; individual members of 'stats' may
1860
 * be set to all-1-bits if the statistic is unavailable.  On failure, returns a
1861
 * positive errno value and fills 'stats' with values indicating unsupported
1862
 * statistics. */
1863
int
1864
netdev_get_queue_stats(const struct netdev *netdev, unsigned int queue_id,
1865
                       struct netdev_queue_stats *stats)
1866
0
{
1867
0
    const struct netdev_class *class = netdev->netdev_class;
1868
0
    int retval;
1869
1870
0
    retval = (class->get_queue_stats
1871
0
              ? class->get_queue_stats(netdev, queue_id, stats)
1872
0
              : EOPNOTSUPP);
1873
0
    if (retval) {
1874
0
        stats->tx_bytes = UINT64_MAX;
1875
0
        stats->tx_packets = UINT64_MAX;
1876
0
        stats->tx_errors = UINT64_MAX;
1877
0
        stats->created = LLONG_MIN;
1878
0
    }
1879
0
    return retval;
1880
0
}
1881
1882
/* Initializes 'dump' to begin dumping the queues in a netdev.
1883
 *
1884
 * This function provides no status indication.  An error status for the entire
1885
 * dump operation is provided when it is completed by calling
1886
 * netdev_queue_dump_done().
1887
 */
1888
void
1889
netdev_queue_dump_start(struct netdev_queue_dump *dump,
1890
                        const struct netdev *netdev)
1891
0
{
1892
0
    dump->netdev = netdev_ref(netdev);
1893
0
    if (netdev->netdev_class->queue_dump_start) {
1894
0
        dump->error = netdev->netdev_class->queue_dump_start(netdev,
1895
0
                                                             &dump->state);
1896
0
    } else {
1897
0
        dump->error = EOPNOTSUPP;
1898
0
    }
1899
0
}
1900
1901
/* Attempts to retrieve another queue from 'dump', which must have been
1902
 * initialized with netdev_queue_dump_start().  On success, stores a new queue
1903
 * ID into '*queue_id', fills 'details' with configuration details for the
1904
 * queue, and returns true.  On failure, returns false.
1905
 *
1906
 * Queues are not necessarily dumped in increasing order of queue ID (or any
1907
 * other predictable order).
1908
 *
1909
 * Failure might indicate an actual error or merely that the last queue has
1910
 * been dumped.  An error status for the entire dump operation is provided when
1911
 * it is completed by calling netdev_queue_dump_done().
1912
 *
1913
 * The returned contents of 'details' should be documented as valid for the
1914
 * given 'type' in the "other_config" column in the "Queue" table in
1915
 * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
1916
 *
1917
 * The caller must initialize 'details' (e.g. with smap_init()) before calling
1918
 * this function.  This function will clear and replace its contents.  The
1919
 * caller must free 'details' when it is no longer needed (e.g. with
1920
 * smap_destroy()). */
1921
bool
1922
netdev_queue_dump_next(struct netdev_queue_dump *dump,
1923
                       unsigned int *queue_id, struct smap *details)
1924
0
{
1925
0
    smap_clear(details);
1926
1927
0
    const struct netdev *netdev = dump->netdev;
1928
0
    if (dump->error) {
1929
0
        return false;
1930
0
    }
1931
1932
0
    dump->error = netdev->netdev_class->queue_dump_next(netdev, dump->state,
1933
0
                                                        queue_id, details);
1934
1935
0
    if (dump->error) {
1936
0
        netdev->netdev_class->queue_dump_done(netdev, dump->state);
1937
0
        return false;
1938
0
    }
1939
0
    return true;
1940
0
}
1941
1942
/* Completes queue table dump operation 'dump', which must have been
1943
 * initialized with netdev_queue_dump_start().  Returns 0 if the dump operation
1944
 * was error-free, otherwise a positive errno value describing the problem. */
1945
int
1946
netdev_queue_dump_done(struct netdev_queue_dump *dump)
1947
0
{
1948
0
    const struct netdev *netdev = dump->netdev;
1949
0
    if (!dump->error && netdev->netdev_class->queue_dump_done) {
1950
0
        dump->error = netdev->netdev_class->queue_dump_done(netdev,
1951
0
                                                            dump->state);
1952
0
    }
1953
0
    netdev_close(dump->netdev);
1954
0
    return dump->error == EOF ? 0 : dump->error;
1955
0
}
1956
1957
/* Iterates over all of 'netdev''s queues, calling 'cb' with the queue's ID,
1958
 * its statistics, and the 'aux' specified by the caller.  The order of
1959
 * iteration is unspecified, but (when successful) each queue is visited
1960
 * exactly once.
1961
 *
1962
 * Calling this function may be more efficient than calling
1963
 * netdev_get_queue_stats() for every queue.
1964
 *
1965
 * 'cb' must not modify or free the statistics passed in.
1966
 *
1967
 * Returns 0 if successful, otherwise a positive errno value.  On error, some
1968
 * configured queues may not have been included in the iteration. */
1969
int
1970
netdev_dump_queue_stats(const struct netdev *netdev,
1971
                        netdev_dump_queue_stats_cb *cb, void *aux)
1972
0
{
1973
0
    const struct netdev_class *class = netdev->netdev_class;
1974
0
    return (class->dump_queue_stats
1975
0
            ? class->dump_queue_stats(netdev, cb, aux)
1976
0
            : EOPNOTSUPP);
1977
0
}
1978
1979

1980
/* Returns the class type of 'netdev'.
1981
 *
1982
 * The caller must not free the returned value. */
1983
const char *
1984
netdev_get_type(const struct netdev *netdev)
1985
0
{
1986
0
    return netdev->netdev_class->type;
1987
0
}
1988
1989
/* Returns the class associated with 'netdev'. */
1990
const struct netdev_class *
1991
netdev_get_class(const struct netdev *netdev)
1992
0
{
1993
0
    return netdev->netdev_class;
1994
0
}
1995
1996
/* Set the type of 'dpif' this 'netdev' belongs to. */
1997
void
1998
netdev_set_dpif_type(struct netdev *netdev, const char *type)
1999
0
{
2000
0
    netdev->dpif_type = type;
2001
0
}
2002
2003
/* Returns the type of 'dpif' this 'netdev' belongs to.
2004
 *
2005
 * The caller must not free the returned value. */
2006
const char *
2007
netdev_get_dpif_type(const struct netdev *netdev)
2008
0
{
2009
0
    return netdev->dpif_type;
2010
0
}
2011
2012
/* Returns the netdev with 'name' or NULL if there is none.
2013
 *
2014
 * The caller must free the returned netdev with netdev_close(). */
2015
struct netdev *
2016
netdev_from_name(const char *name)
2017
    OVS_EXCLUDED(netdev_mutex)
2018
0
{
2019
0
    struct netdev *netdev;
2020
2021
0
    ovs_mutex_lock(&netdev_mutex);
2022
0
    netdev = shash_find_data(&netdev_shash, name);
2023
0
    if (netdev) {
2024
0
        netdev->ref_cnt++;
2025
0
    }
2026
0
    ovs_mutex_unlock(&netdev_mutex);
2027
2028
0
    return netdev;
2029
0
}
2030
2031
/* Fills 'device_list' with devices that match 'netdev_class'.
2032
 *
2033
 * The caller is responsible for initializing and destroying 'device_list' and
2034
 * must close each device on the list. */
2035
void
2036
netdev_get_devices(const struct netdev_class *netdev_class,
2037
                   struct shash *device_list)
2038
    OVS_EXCLUDED(netdev_mutex)
2039
0
{
2040
0
    struct shash_node *node;
2041
2042
0
    ovs_mutex_lock(&netdev_mutex);
2043
0
    SHASH_FOR_EACH (node, &netdev_shash) {
2044
0
        struct netdev *dev = node->data;
2045
2046
0
        if (dev->netdev_class == netdev_class) {
2047
0
            dev->ref_cnt++;
2048
0
            shash_add(device_list, node->name, node->data);
2049
0
        }
2050
0
    }
2051
0
    ovs_mutex_unlock(&netdev_mutex);
2052
0
}
2053
2054
/* Extracts pointers to all 'netdev-vports' into an array 'vports'
2055
 * and returns it.  Stores the size of the array into '*size'.
2056
 *
2057
 * The caller is responsible for freeing 'vports' and must close
2058
 * each 'netdev-vport' in the list. */
2059
struct netdev **
2060
netdev_get_vports(size_t *size)
2061
    OVS_EXCLUDED(netdev_mutex)
2062
0
{
2063
0
    struct netdev **vports;
2064
0
    struct shash_node *node;
2065
0
    size_t n = 0;
2066
2067
0
    if (!size) {
2068
0
        return NULL;
2069
0
    }
2070
2071
    /* Explicitly allocates big enough chunk of memory. */
2072
0
    ovs_mutex_lock(&netdev_mutex);
2073
0
    vports = xmalloc(shash_count(&netdev_shash) * sizeof *vports);
2074
0
    SHASH_FOR_EACH (node, &netdev_shash) {
2075
0
        struct netdev *dev = node->data;
2076
2077
0
        if (netdev_vport_is_vport_class(dev->netdev_class)) {
2078
0
            dev->ref_cnt++;
2079
0
            vports[n] = dev;
2080
0
            n++;
2081
0
        }
2082
0
    }
2083
0
    ovs_mutex_unlock(&netdev_mutex);
2084
0
    *size = n;
2085
2086
0
    return vports;
2087
0
}
2088
2089
const char *
2090
netdev_get_type_from_name(const char *name)
2091
0
{
2092
0
    struct netdev *dev;
2093
0
    const char *type;
2094
0
    type = netdev_vport_type_from_name(name);
2095
0
    if (type == NULL) {
2096
0
        dev = netdev_from_name(name);
2097
0
        type = dev ? netdev_get_type(dev) : NULL;
2098
0
        netdev_close(dev);
2099
0
    }
2100
0
    return type;
2101
0
}
2102

2103
struct netdev *
2104
netdev_rxq_get_netdev(const struct netdev_rxq *rx)
2105
0
{
2106
0
    ovs_assert(rx->netdev->ref_cnt > 0);
2107
0
    return rx->netdev;
2108
0
}
2109
2110
const char *
2111
netdev_rxq_get_name(const struct netdev_rxq *rx)
2112
0
{
2113
0
    return netdev_get_name(netdev_rxq_get_netdev(rx));
2114
0
}
2115
2116
int
2117
netdev_rxq_get_queue_id(const struct netdev_rxq *rx)
2118
0
{
2119
0
    return rx->queue_id;
2120
0
}
2121
2122
static void
2123
restore_all_flags(void *aux OVS_UNUSED)
2124
0
{
2125
0
    struct shash_node *node;
2126
2127
0
    SHASH_FOR_EACH (node, &netdev_shash) {
2128
0
        struct netdev *netdev = node->data;
2129
0
        const struct netdev_saved_flags *sf;
2130
0
        enum netdev_flags saved_values;
2131
0
        enum netdev_flags saved_flags;
2132
2133
0
        saved_values = saved_flags = 0;
2134
0
        LIST_FOR_EACH (sf, node, &netdev->saved_flags_list) {
2135
0
            saved_flags |= sf->saved_flags;
2136
0
            saved_values &= ~sf->saved_flags;
2137
0
            saved_values |= sf->saved_flags & sf->saved_values;
2138
0
        }
2139
0
        if (saved_flags) {
2140
0
            enum netdev_flags old_flags;
2141
2142
0
            netdev->netdev_class->update_flags(netdev,
2143
0
                                               saved_flags & saved_values,
2144
0
                                               saved_flags & ~saved_values,
2145
0
                                               &old_flags);
2146
0
        }
2147
#ifdef HAVE_AF_XDP
2148
        if (netdev->netdev_class == &netdev_afxdp_class) {
2149
            signal_remove_xdp(netdev);
2150
        }
2151
#endif
2152
0
    }
2153
0
}
2154
2155
uint64_t
2156
netdev_get_change_seq(const struct netdev *netdev)
2157
0
{
2158
0
    uint64_t change_seq;
2159
2160
0
    atomic_read_explicit(&CONST_CAST(struct netdev *, netdev)->change_seq,
2161
0
                        &change_seq, memory_order_acquire);
2162
2163
0
    return change_seq;
2164
0
}
2165
2166
#ifndef _WIN32
2167
/* This implementation is shared by Linux and BSD. */
2168
2169
static struct ifaddrs *if_addr_list;
2170
static struct ovs_mutex if_addr_list_lock = OVS_MUTEX_INITIALIZER;
2171
2172
void
2173
netdev_get_addrs_list_flush(void)
2174
0
{
2175
0
    ovs_mutex_lock(&if_addr_list_lock);
2176
0
    if (if_addr_list) {
2177
0
        freeifaddrs(if_addr_list);
2178
0
        if_addr_list = NULL;
2179
0
    }
2180
0
    ovs_mutex_unlock(&if_addr_list_lock);
2181
0
}
2182
2183
int
2184
netdev_get_addrs(const char dev[], struct in6_addr **paddr,
2185
                 struct in6_addr **pmask, int *n_in)
2186
0
{
2187
0
    struct in6_addr *addr_array, *mask_array;
2188
0
    const struct ifaddrs *ifa;
2189
0
    int cnt = 0, i = 0;
2190
0
    int retries = 3;
2191
2192
0
    ovs_mutex_lock(&if_addr_list_lock);
2193
0
    if (!if_addr_list) {
2194
0
        int err;
2195
2196
0
retry:
2197
0
        err = getifaddrs(&if_addr_list);
2198
0
        if (err) {
2199
0
            ovs_mutex_unlock(&if_addr_list_lock);
2200
0
            return -err;
2201
0
        }
2202
0
        retries--;
2203
0
    }
2204
2205
0
    for (ifa = if_addr_list; ifa; ifa = ifa->ifa_next) {
2206
0
        if (!ifa->ifa_name) {
2207
0
            if (retries) {
2208
                /* Older versions of glibc have a bug on race condition with
2209
                 * address addition which may cause one of the returned
2210
                 * ifa_name values to be NULL. In such case, we know that we've
2211
                 * got an inconsistent dump. Retry but beware of an endless
2212
                 * loop. From glibc 2.28 and beyond, this workaround is not
2213
                 * needed and should be eventually removed. */
2214
0
                freeifaddrs(if_addr_list);
2215
0
                goto retry;
2216
0
            } else {
2217
0
                VLOG_WARN("Proceeding with an inconsistent dump of "
2218
0
                          "interfaces from the kernel. Some may be missing");
2219
0
            }
2220
0
        }
2221
0
        if (ifa->ifa_addr && ifa->ifa_name && ifa->ifa_netmask) {
2222
0
            int family;
2223
2224
0
            family = ifa->ifa_addr->sa_family;
2225
0
            if (family == AF_INET || family == AF_INET6) {
2226
0
                if (!strncmp(ifa->ifa_name, dev, IFNAMSIZ)) {
2227
0
                    cnt++;
2228
0
                }
2229
0
            }
2230
0
        }
2231
0
    }
2232
2233
0
    if (!cnt) {
2234
0
        ovs_mutex_unlock(&if_addr_list_lock);
2235
0
        return EADDRNOTAVAIL;
2236
0
    }
2237
0
    addr_array = xzalloc(sizeof *addr_array * cnt);
2238
0
    mask_array = xzalloc(sizeof *mask_array * cnt);
2239
0
    for (ifa = if_addr_list; ifa; ifa = ifa->ifa_next) {
2240
0
        if (ifa->ifa_name
2241
0
            && ifa->ifa_addr
2242
0
            && ifa->ifa_netmask
2243
0
            && !strncmp(ifa->ifa_name, dev, IFNAMSIZ)
2244
0
            && sa_is_ip(ifa->ifa_addr)) {
2245
0
            addr_array[i] = sa_get_address(ifa->ifa_addr);
2246
0
            mask_array[i] = sa_get_address(ifa->ifa_netmask);
2247
0
            i++;
2248
0
        }
2249
0
    }
2250
0
    ovs_mutex_unlock(&if_addr_list_lock);
2251
0
    if (paddr) {
2252
0
        *n_in = cnt;
2253
0
        *paddr = addr_array;
2254
0
        *pmask = mask_array;
2255
0
    } else {
2256
0
        free(addr_array);
2257
0
        free(mask_array);
2258
0
    }
2259
0
    return 0;
2260
0
}
2261
#endif
2262
2263
void
2264
netdev_wait_reconf_required(struct netdev *netdev)
2265
0
{
2266
0
    seq_wait(netdev->reconfigure_seq, netdev->last_reconfigure_seq);
2267
0
}
2268
2269
bool
2270
netdev_is_reconf_required(struct netdev *netdev)
2271
0
{
2272
0
    return seq_read(netdev->reconfigure_seq) != netdev->last_reconfigure_seq;
2273
0
}
2274
2275
/* Give a chance to 'netdev' to reconfigure some of its parameters.
2276
 *
2277
 * If a module uses netdev_send() and netdev_rxq_recv(), it must call this
2278
 * function when netdev_is_reconf_required() returns true.
2279
 *
2280
 * Return 0 if successful, otherwise a positive errno value.  If the
2281
 * reconfiguration fails the netdev will not be able to send or receive
2282
 * packets.
2283
 *
2284
 * When this function is called, no call to netdev_rxq_recv() or netdev_send()
2285
 * must be issued. */
2286
int
2287
netdev_reconfigure(struct netdev *netdev)
2288
0
{
2289
0
    const struct netdev_class *class = netdev->netdev_class;
2290
2291
0
    netdev->last_reconfigure_seq = seq_read(netdev->reconfigure_seq);
2292
2293
0
    return (class->reconfigure
2294
0
            ? class->reconfigure(netdev)
2295
0
            : EOPNOTSUPP);
2296
0
}
2297
2298
void
2299
netdev_free_custom_stats_counters(struct netdev_custom_stats *custom_stats)
2300
40.7k
{
2301
40.7k
    if (custom_stats) {
2302
40.7k
        if (custom_stats->counters) {
2303
6.21k
            free(custom_stats->counters);
2304
6.21k
            custom_stats->counters = NULL;
2305
6.21k
            custom_stats->size = 0;
2306
6.21k
        }
2307
40.7k
    }
2308
40.7k
}