Coverage Report

Created: 2026-01-17 06:55

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/openvswitch/lib/netdev.c
Line
Count
Source
1
/*
2
 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2016, 2017 Nicira, Inc.
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at:
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16
17
#include <config.h>
18
#include "netdev.h"
19
20
#include <errno.h>
21
#include <inttypes.h>
22
#include <sys/types.h>
23
#include <netinet/in.h>
24
#include <stdlib.h>
25
#include <string.h>
26
#include <unistd.h>
27
28
#ifndef _WIN32
29
#include <ifaddrs.h>
30
#include <net/if.h>
31
#include <sys/ioctl.h>
32
#endif
33
34
#include "cmap.h"
35
#include "coverage.h"
36
#include "dpif.h"
37
#include "dp-packet.h"
38
#include "dp-packet-gso.h"
39
#include "openvswitch/dynamic-string.h"
40
#include "fatal-signal.h"
41
#include "hash.h"
42
#include "openvswitch/list.h"
43
#include "netdev-provider.h"
44
#include "netdev-vport.h"
45
#include "odp-netlink.h"
46
#include "openvswitch/json.h"
47
#include "openflow/openflow.h"
48
#include "packets.h"
49
#include "openvswitch/ofp-print.h"
50
#include "openvswitch/poll-loop.h"
51
#include "seq.h"
52
#include "openvswitch/shash.h"
53
#include "smap.h"
54
#include "socket-util.h"
55
#include "sset.h"
56
#include "svec.h"
57
#include "openvswitch/vlog.h"
58
#include "flow.h"
59
#include "userspace-tso.h"
60
#include "util.h"
61
#ifdef __linux__
62
#include "tc.h"
63
#endif
64
65
VLOG_DEFINE_THIS_MODULE(netdev);
66
67
COVERAGE_DEFINE(netdev_received);
68
COVERAGE_DEFINE(netdev_sent);
69
COVERAGE_DEFINE(netdev_add_router);
70
COVERAGE_DEFINE(netdev_get_stats);
71
COVERAGE_DEFINE(netdev_push_header_drops);
72
COVERAGE_DEFINE(netdev_soft_seg_good);
73
COVERAGE_DEFINE(netdev_partial_seg_good);
74
75
struct netdev_saved_flags {
76
    struct netdev *netdev;
77
    struct ovs_list node;           /* In struct netdev's saved_flags_list. */
78
    enum netdev_flags saved_flags;
79
    enum netdev_flags saved_values;
80
};
81
82
/* Protects 'netdev_shash' and the mutable members of struct netdev. */
83
static struct ovs_mutex netdev_mutex = OVS_MUTEX_INITIALIZER;
84
85
/* All created network devices. */
86
static struct shash netdev_shash OVS_GUARDED_BY(netdev_mutex)
87
    = SHASH_INITIALIZER(&netdev_shash);
88
89
/* Mutual exclusion of */
90
static struct ovs_mutex netdev_class_mutex OVS_ACQ_BEFORE(netdev_mutex)
91
    = OVS_MUTEX_INITIALIZER;
92
93
/* Contains 'struct netdev_registered_class'es. */
94
static struct cmap netdev_classes = CMAP_INITIALIZER;
95
96
struct netdev_registered_class {
97
    struct cmap_node cmap_node; /* In 'netdev_classes', by class->type. */
98
    const struct netdev_class *class;
99
100
    /* Number of references: one for the class itself and one for every
101
     * instance of the class. */
102
    struct ovs_refcount refcnt;
103
};
104
105
/* This is set pretty low because we probably won't learn anything from the
106
 * additional log messages. */
107
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
108
109
static void restore_all_flags(void *aux OVS_UNUSED);
110
void update_device_args(struct netdev *, const struct shash *args);
111
#ifdef HAVE_AF_XDP
112
void signal_remove_xdp(struct netdev *netdev);
113
#endif
114
115
int
116
netdev_n_txq(const struct netdev *netdev)
117
0
{
118
0
    return netdev->n_txq;
119
0
}
120
121
int
122
netdev_n_rxq(const struct netdev *netdev)
123
0
{
124
0
    return netdev->n_rxq;
125
0
}
126
127
bool
128
netdev_is_pmd(const struct netdev *netdev)
129
0
{
130
0
    return netdev->netdev_class->is_pmd;
131
0
}
132
133
bool
134
netdev_has_tunnel_push_pop(const struct netdev *netdev)
135
0
{
136
0
    return netdev->netdev_class->push_header
137
0
           && netdev->netdev_class->pop_header;
138
0
}
139
140
static void
141
netdev_initialize(void)
142
    OVS_EXCLUDED(netdev_mutex)
143
0
{
144
0
    static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
145
146
0
    if (ovsthread_once_start(&once)) {
147
0
        fatal_signal_add_hook(restore_all_flags, NULL, NULL, true);
148
149
0
        netdev_vport_patch_register();
150
151
0
#ifdef __linux__
152
0
        netdev_register_provider(&netdev_linux_class);
153
0
        netdev_register_provider(&netdev_internal_class);
154
0
        netdev_register_provider(&netdev_tap_class);
155
0
        netdev_vport_tunnel_register();
156
#ifdef HAVE_AF_XDP
157
        netdev_register_provider(&netdev_afxdp_class);
158
        netdev_register_provider(&netdev_afxdp_nonpmd_class);
159
#endif
160
0
#endif
161
#if defined(__FreeBSD__) || defined(__NetBSD__)
162
        netdev_register_provider(&netdev_tap_class);
163
        netdev_register_provider(&netdev_bsd_class);
164
#endif
165
#ifdef _WIN32
166
        netdev_register_provider(&netdev_windows_class);
167
        netdev_register_provider(&netdev_internal_class);
168
        netdev_vport_tunnel_register();
169
#endif
170
0
        ovsthread_once_done(&once);
171
0
    }
172
0
}
173
174
/* Performs periodic work needed by all the various kinds of netdevs.
175
 *
176
 * If your program opens any netdevs, it must call this function within its
177
 * main poll loop. */
178
void
179
netdev_run(void)
180
    OVS_EXCLUDED(netdev_mutex)
181
0
{
182
0
    netdev_initialize();
183
184
0
    struct netdev_registered_class *rc;
185
0
    CMAP_FOR_EACH (rc, cmap_node, &netdev_classes) {
186
0
        if (rc->class->run) {
187
0
            rc->class->run(rc->class);
188
0
        }
189
0
    }
190
0
}
191
192
/* Arranges for poll_block() to wake up when netdev_run() needs to be called.
193
 *
194
 * If your program opens any netdevs, it must call this function within its
195
 * main poll loop. */
196
void
197
netdev_wait(void)
198
    OVS_EXCLUDED(netdev_mutex)
199
0
{
200
0
    netdev_initialize();
201
202
0
    struct netdev_registered_class *rc;
203
0
    CMAP_FOR_EACH (rc, cmap_node, &netdev_classes) {
204
0
        if (rc->class->wait) {
205
0
            rc->class->wait(rc->class);
206
0
        }
207
0
    }
208
0
}
209
210
static struct netdev_registered_class *
211
netdev_lookup_class(const char *type)
212
0
{
213
0
    struct netdev_registered_class *rc;
214
0
    CMAP_FOR_EACH_WITH_HASH (rc, cmap_node, hash_string(type, 0),
215
0
                             &netdev_classes) {
216
0
        if (!strcmp(type, rc->class->type)) {
217
0
            return rc;
218
0
        }
219
0
    }
220
0
    return NULL;
221
0
}
222
223
/* Initializes and registers a new netdev provider.  After successful
224
 * registration, new netdevs of that type can be opened using netdev_open(). */
225
int
226
netdev_register_provider(const struct netdev_class *new_class)
227
    OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
228
0
{
229
0
    int error;
230
231
0
    ovs_mutex_lock(&netdev_class_mutex);
232
0
    if (netdev_lookup_class(new_class->type)) {
233
0
        VLOG_WARN("attempted to register duplicate netdev provider: %s",
234
0
                   new_class->type);
235
0
        error = EEXIST;
236
0
    } else {
237
0
        error = new_class->init ? new_class->init() : 0;
238
0
        if (!error) {
239
0
            struct netdev_registered_class *rc;
240
241
0
            rc = xmalloc(sizeof *rc);
242
0
            cmap_insert(&netdev_classes, &rc->cmap_node,
243
0
                        hash_string(new_class->type, 0));
244
0
            rc->class = new_class;
245
0
            ovs_refcount_init(&rc->refcnt);
246
0
        } else {
247
0
            VLOG_ERR("failed to initialize %s network device class: %s",
248
0
                     new_class->type, ovs_strerror(error));
249
0
        }
250
0
    }
251
0
    ovs_mutex_unlock(&netdev_class_mutex);
252
253
0
    return error;
254
0
}
255
256
/* Unregisters a netdev provider.  'type' must have been previously registered
257
 * and not currently be in use by any netdevs.  After unregistration new
258
 * netdevs of that type cannot be opened using netdev_open().  (However, the
259
 * provider may still be accessible from other threads until the next RCU grace
260
 * period, so the caller must not free or re-register the same netdev_class
261
 * until that has passed.) */
262
int
263
netdev_unregister_provider(const char *type)
264
    OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
265
0
{
266
0
    struct netdev_registered_class *rc;
267
0
    int error;
268
269
0
    netdev_initialize();
270
271
0
    ovs_mutex_lock(&netdev_class_mutex);
272
0
    rc = netdev_lookup_class(type);
273
0
    if (!rc) {
274
0
        VLOG_WARN("attempted to unregister a netdev provider that is not "
275
0
                  "registered: %s", type);
276
0
        error = EAFNOSUPPORT;
277
0
    } else if (ovs_refcount_unref(&rc->refcnt) != 1) {
278
0
        ovs_refcount_ref(&rc->refcnt);
279
0
        VLOG_WARN("attempted to unregister in use netdev provider: %s",
280
0
                  type);
281
0
        error = EBUSY;
282
0
    } else  {
283
0
        cmap_remove(&netdev_classes, &rc->cmap_node,
284
0
                    hash_string(rc->class->type, 0));
285
0
        ovsrcu_postpone(free, rc);
286
0
        error = 0;
287
0
    }
288
0
    ovs_mutex_unlock(&netdev_class_mutex);
289
290
0
    return error;
291
0
}
292
293
/* Clears 'types' and enumerates the types of all currently registered netdev
294
 * providers into it.  The caller must first initialize the sset. */
295
void
296
netdev_enumerate_types(struct sset *types)
297
    OVS_EXCLUDED(netdev_mutex)
298
0
{
299
0
    netdev_initialize();
300
0
    sset_clear(types);
301
302
0
    struct netdev_registered_class *rc;
303
0
    CMAP_FOR_EACH (rc, cmap_node, &netdev_classes) {
304
0
        sset_add(types, rc->class->type);
305
0
    }
306
0
}
307
308
static const char *
309
netdev_vport_type_from_name(const char *name)
310
0
{
311
0
    struct netdev_registered_class *rc;
312
0
    const char *type;
313
0
    CMAP_FOR_EACH (rc, cmap_node, &netdev_classes) {
314
0
        const char *dpif_port = netdev_vport_class_get_dpif_port(rc->class);
315
0
        if (dpif_port && !strncmp(name, dpif_port, strlen(dpif_port))) {
316
0
            type = rc->class->type;
317
0
            return type;
318
0
        }
319
0
    }
320
0
    return NULL;
321
0
}
322
323
/* Check that the network device name is not the same as any of the registered
324
 * vport providers' dpif_port name (dpif_port is NULL if the vport provider
325
 * does not define it) or the datapath internal port name (e.g. ovs-system).
326
 *
327
 * Returns true if there is a name conflict, false otherwise. */
328
bool
329
netdev_is_reserved_name(const char *name)
330
    OVS_EXCLUDED(netdev_mutex)
331
0
{
332
0
    netdev_initialize();
333
334
0
    struct netdev_registered_class *rc;
335
0
    CMAP_FOR_EACH (rc, cmap_node, &netdev_classes) {
336
0
        const char *dpif_port = netdev_vport_class_get_dpif_port(rc->class);
337
0
        if (dpif_port && !strncmp(name, dpif_port, strlen(dpif_port))) {
338
0
            return true;
339
0
        }
340
0
    }
341
342
0
    if (!strncmp(name, "ovs-", 4)) {
343
0
        struct sset types;
344
0
        const char *type;
345
346
0
        sset_init(&types);
347
0
        dp_enumerate_types(&types);
348
0
        SSET_FOR_EACH (type, &types) {
349
0
            if (!strcmp(name+4, type)) {
350
0
                sset_destroy(&types);
351
0
                return true;
352
0
            }
353
0
        }
354
0
        sset_destroy(&types);
355
0
    }
356
357
0
    return false;
358
0
}
359
360
/* Opens the network device named 'name' (e.g. "eth0") of the specified 'type'
361
 * (e.g. "system") and returns zero if successful, otherwise a positive errno
362
 * value.  On success, sets '*netdevp' to the new network device, otherwise to
363
 * null.
364
 *
365
 * Some network devices may need to be configured (with netdev_set_config())
366
 * before they can be used.
367
 *
368
 * Before opening rxqs or sending packets, '*netdevp' may need to be
369
 * reconfigured (with netdev_is_reconf_required() and netdev_reconfigure()).
370
 * */
371
int
372
netdev_open(const char *name, const char *type, struct netdev **netdevp)
373
    OVS_EXCLUDED(netdev_mutex)
374
0
{
375
0
    struct netdev *netdev;
376
0
    int error = 0;
377
378
0
    if (!name[0]) {
379
        /* Reject empty names.  This saves the providers having to do this.  At
380
         * least one screwed this up: the netdev-linux "tap" implementation
381
         * passed the name directly to the Linux TUNSETIFF call, which treats
382
         * an empty string as a request to generate a unique name. */
383
0
        return EINVAL;
384
0
    }
385
386
0
    netdev_initialize();
387
388
0
    ovs_mutex_lock(&netdev_mutex);
389
0
    netdev = shash_find_data(&netdev_shash, name);
390
391
0
    if (netdev && type && type[0]) {
392
0
        if (strcmp(type, netdev->netdev_class->type)) {
393
394
0
            if (netdev->auto_classified) {
395
                /* If this device was first created without a classification
396
                 * type, for example due to routing or tunneling code, and they
397
                 * keep a reference, a "classified" call to open will fail.
398
                 * In this case we remove the classless device, and re-add it
399
                 * below. We remove the netdev from the shash, and change the
400
                 * sequence, so owners of the old classless device can
401
                 * release/cleanup. */
402
0
                if (netdev->node) {
403
0
                    shash_delete(&netdev_shash, netdev->node);
404
0
                    netdev->node = NULL;
405
0
                    netdev_change_seq_changed(netdev);
406
0
                }
407
408
0
                netdev = NULL;
409
0
            } else {
410
0
                error = EEXIST;
411
0
            }
412
0
        } else if (netdev->auto_classified) {
413
            /* If netdev reopened with type "system", clear auto_classified. */
414
0
            netdev->auto_classified = false;
415
0
        }
416
0
    }
417
418
0
    if (!netdev) {
419
0
        struct netdev_registered_class *rc;
420
421
0
        rc = netdev_lookup_class(type && type[0] ? type : "system");
422
0
        if (rc && ovs_refcount_try_ref_rcu(&rc->refcnt)) {
423
0
            netdev = rc->class->alloc();
424
0
            if (netdev) {
425
0
                memset(netdev, 0, sizeof *netdev);
426
0
                netdev->netdev_class = rc->class;
427
0
                netdev->auto_classified = type && type[0] ? false : true;
428
0
                netdev->name = xstrdup(name);
429
0
                netdev->change_seq = 1;
430
0
                netdev->reconfigure_seq = seq_create();
431
0
                netdev->last_reconfigure_seq =
432
0
                    seq_read(netdev->reconfigure_seq);
433
0
                netdev->hw_info.oor = false;
434
0
                atomic_init(&netdev->hw_info.post_process_api_supported,
435
0
                            false);
436
0
                netdev->node = shash_add(&netdev_shash, name, netdev);
437
438
                /* By default enable one tx and rx queue per netdev. */
439
0
                netdev->n_txq = netdev->netdev_class->send ? 1 : 0;
440
0
                netdev->n_rxq = netdev->netdev_class->rxq_alloc ? 1 : 0;
441
442
0
                ovs_list_init(&netdev->saved_flags_list);
443
444
0
                error = rc->class->construct(netdev);
445
0
                if (!error) {
446
0
                    netdev_change_seq_changed(netdev);
447
0
                } else {
448
0
                    ovs_refcount_unref(&rc->refcnt);
449
0
                    seq_destroy(netdev->reconfigure_seq);
450
0
                    free(netdev->name);
451
0
                    ovs_assert(ovs_list_is_empty(&netdev->saved_flags_list));
452
0
                    shash_delete(&netdev_shash, netdev->node);
453
0
                    rc->class->dealloc(netdev);
454
0
                }
455
0
            } else {
456
0
                error = ENOMEM;
457
0
            }
458
0
        } else {
459
0
            VLOG_WARN("could not create netdev %s of unknown type %s",
460
0
                      name, type);
461
0
            error = EAFNOSUPPORT;
462
0
        }
463
0
    }
464
465
0
    if (!error) {
466
0
        netdev->ref_cnt++;
467
0
        *netdevp = netdev;
468
0
    } else {
469
0
        *netdevp = NULL;
470
0
    }
471
0
    ovs_mutex_unlock(&netdev_mutex);
472
473
0
    return error;
474
0
}
475
476
/* Returns a reference to 'netdev_' for the caller to own. Returns null if
477
 * 'netdev_' is null. */
478
struct netdev *
479
netdev_ref(const struct netdev *netdev_)
480
    OVS_EXCLUDED(netdev_mutex)
481
0
{
482
0
    struct netdev *netdev = CONST_CAST(struct netdev *, netdev_);
483
484
0
    if (netdev) {
485
0
        ovs_mutex_lock(&netdev_mutex);
486
0
        ovs_assert(netdev->ref_cnt > 0);
487
0
        netdev->ref_cnt++;
488
0
        ovs_mutex_unlock(&netdev_mutex);
489
0
    }
490
0
    return netdev;
491
0
}
492
493
/* Reconfigures the device 'netdev' with 'args'.  'args' may be empty
494
 * or NULL if none are needed. */
495
int
496
netdev_set_config(struct netdev *netdev, const struct smap *args, char **errp)
497
    OVS_EXCLUDED(netdev_mutex)
498
0
{
499
0
    if (netdev->netdev_class->set_config) {
500
0
        const struct smap no_args = SMAP_INITIALIZER(&no_args);
501
0
        char *verbose_error = NULL;
502
0
        int error;
503
504
0
        error = netdev->netdev_class->set_config(netdev,
505
0
                                                 args ? args : &no_args,
506
0
                                                 &verbose_error);
507
0
        if (error) {
508
0
            VLOG_WARN_BUF(verbose_error ? NULL : errp,
509
0
                          "%s: could not set configuration (%s)",
510
0
                          netdev_get_name(netdev), ovs_strerror(error));
511
0
            if (verbose_error) {
512
0
                if (errp) {
513
0
                    *errp = verbose_error;
514
0
                } else {
515
0
                    free(verbose_error);
516
0
                }
517
0
            }
518
0
        }
519
0
        return error;
520
0
    } else if (args && !smap_is_empty(args)) {
521
0
        VLOG_WARN_BUF(errp, "%s: arguments provided to device that is not configurable",
522
0
                      netdev_get_name(netdev));
523
0
    }
524
0
    return 0;
525
0
}
526
527
/* Returns the current configuration for 'netdev' in 'args'.  The caller must
528
 * have already initialized 'args' with smap_init().  Returns 0 on success, in
529
 * which case 'args' will be filled with 'netdev''s configuration.  On failure
530
 * returns a positive errno value, in which case 'args' will be empty.
531
 *
532
 * The caller owns 'args' and its contents and must eventually free them with
533
 * smap_destroy(). */
534
int
535
netdev_get_config(const struct netdev *netdev, struct smap *args)
536
    OVS_EXCLUDED(netdev_mutex)
537
0
{
538
0
    int error;
539
540
0
    smap_clear(args);
541
0
    if (netdev->netdev_class->get_config) {
542
0
        error = netdev->netdev_class->get_config(netdev, args);
543
0
        if (error) {
544
0
            smap_clear(args);
545
0
        }
546
0
    } else {
547
0
        error = 0;
548
0
    }
549
550
0
    return error;
551
0
}
552
553
const struct netdev_tunnel_config *
554
netdev_get_tunnel_config(const struct netdev *netdev)
555
    OVS_EXCLUDED(netdev_mutex)
556
0
{
557
0
    if (netdev->netdev_class->get_tunnel_config) {
558
0
        return netdev->netdev_class->get_tunnel_config(netdev);
559
0
    } else {
560
0
        return NULL;
561
0
    }
562
0
}
563
564
/* Returns the id of the numa node the 'netdev' is on.  If the function
565
 * is not implemented, returns NETDEV_NUMA_UNSPEC. */
566
int
567
netdev_get_numa_id(const struct netdev *netdev)
568
0
{
569
0
    if (netdev->netdev_class->get_numa_id) {
570
0
        return netdev->netdev_class->get_numa_id(netdev);
571
0
    } else {
572
0
        return NETDEV_NUMA_UNSPEC;
573
0
    }
574
0
}
575
576
static void
577
netdev_unref(struct netdev *dev)
578
    OVS_RELEASES(netdev_mutex)
579
0
{
580
0
    ovs_assert(dev->ref_cnt);
581
0
    if (!--dev->ref_cnt) {
582
0
        const struct netdev_class *class = dev->netdev_class;
583
0
        struct netdev_registered_class *rc;
584
585
0
        dev->netdev_class->destruct(dev);
586
587
0
        if (dev->node) {
588
0
            shash_delete(&netdev_shash, dev->node);
589
0
        }
590
0
        free(dev->name);
591
0
        seq_destroy(dev->reconfigure_seq);
592
0
        dev->netdev_class->dealloc(dev);
593
0
        ovs_mutex_unlock(&netdev_mutex);
594
595
0
        rc = netdev_lookup_class(class->type);
596
0
        ovs_refcount_unref(&rc->refcnt);
597
0
    } else {
598
0
        ovs_mutex_unlock(&netdev_mutex);
599
0
    }
600
0
}
601
602
/* Closes and destroys 'netdev'. */
603
void
604
netdev_close(struct netdev *netdev)
605
    OVS_EXCLUDED(netdev_mutex)
606
0
{
607
0
    if (netdev) {
608
0
        ovs_mutex_lock(&netdev_mutex);
609
0
        netdev_unref(netdev);
610
0
    }
611
0
}
612
613
/* Removes 'netdev' from the global shash and unrefs 'netdev'.
614
 *
615
 * This allows handler and revalidator threads to still retain references
616
 * to this netdev while the main thread changes interface configuration.
617
 *
618
 * This function should only be called by the main thread when closing
619
 * netdevs during user configuration changes. Otherwise, netdev_close should be
620
 * used to close netdevs. */
621
void
622
netdev_remove(struct netdev *netdev)
623
0
{
624
0
    if (netdev) {
625
0
        ovs_mutex_lock(&netdev_mutex);
626
0
        if (netdev->node) {
627
0
            shash_delete(&netdev_shash, netdev->node);
628
0
            netdev->node = NULL;
629
0
            netdev_change_seq_changed(netdev);
630
0
        }
631
0
        netdev_unref(netdev);
632
0
    }
633
0
}
634
635
/* Parses 'netdev_name_', which is of the form [type@]name into its component
636
 * pieces.  'name' and 'type' must be freed by the caller. */
637
void
638
netdev_parse_name(const char *netdev_name_, char **name, char **type)
639
0
{
640
0
    char *netdev_name = xstrdup(netdev_name_);
641
0
    char *separator;
642
643
0
    separator = strchr(netdev_name, '@');
644
0
    if (separator) {
645
0
        *separator = '\0';
646
0
        *type = netdev_name;
647
0
        *name = xstrdup(separator + 1);
648
0
    } else {
649
0
        *name = netdev_name;
650
0
        *type = xstrdup("system");
651
0
    }
652
0
}
653
654
/* Attempts to open a netdev_rxq handle for obtaining packets received on
655
 * 'netdev'.  On success, returns 0 and stores a nonnull 'netdev_rxq *' into
656
 * '*rxp'.  On failure, returns a positive errno value and stores NULL into
657
 * '*rxp'.
658
 *
659
 * Some kinds of network devices might not support receiving packets.  This
660
 * function returns EOPNOTSUPP in that case.*/
661
int
662
netdev_rxq_open(struct netdev *netdev, struct netdev_rxq **rxp, int id)
663
    OVS_EXCLUDED(netdev_mutex)
664
0
{
665
0
    int error;
666
667
0
    if (netdev->netdev_class->rxq_alloc && id < netdev->n_rxq) {
668
0
        struct netdev_rxq *rx = netdev->netdev_class->rxq_alloc();
669
0
        if (rx) {
670
0
            rx->netdev = netdev;
671
0
            rx->queue_id = id;
672
0
            error = netdev->netdev_class->rxq_construct(rx);
673
0
            if (!error) {
674
0
                netdev_ref(netdev);
675
0
                *rxp = rx;
676
0
                return 0;
677
0
            }
678
0
            netdev->netdev_class->rxq_dealloc(rx);
679
0
        } else {
680
0
            error = ENOMEM;
681
0
        }
682
0
    } else {
683
0
        error = EOPNOTSUPP;
684
0
    }
685
686
0
    *rxp = NULL;
687
0
    return error;
688
0
}
689
690
/* Closes 'rx'. */
691
void
692
netdev_rxq_close(struct netdev_rxq *rx)
693
    OVS_EXCLUDED(netdev_mutex)
694
0
{
695
0
    if (rx) {
696
0
        struct netdev *netdev = rx->netdev;
697
0
        netdev->netdev_class->rxq_destruct(rx);
698
0
        netdev->netdev_class->rxq_dealloc(rx);
699
0
        netdev_close(netdev);
700
0
    }
701
0
}
702
703
bool netdev_rxq_enabled(struct netdev_rxq *rx)
704
0
{
705
0
    bool enabled = true;
706
707
0
    if (rx->netdev->netdev_class->rxq_enabled) {
708
0
        enabled = rx->netdev->netdev_class->rxq_enabled(rx);
709
0
    }
710
0
    return enabled;
711
0
}
712
713
/* Attempts to receive a batch of packets from 'rx'.  'batch' should point to
714
 * the beginning of an array of NETDEV_MAX_BURST pointers to dp_packet.  If
715
 * successful, this function stores pointers to up to NETDEV_MAX_BURST
716
 * dp_packets into the array, transferring ownership of the packets to the
717
 * caller, stores the number of received packets in 'batch->count', and returns
718
 * 0.
719
 *
720
 * The implementation does not necessarily initialize any non-data members of
721
 * 'batch'.  That is, the caller must initialize layer pointers and metadata
722
 * itself, if desired, e.g. with pkt_metadata_init() and miniflow_extract().
723
 *
724
 * Returns EAGAIN immediately if no packet is ready to be received or another
725
 * positive errno value if an error was encountered. */
726
int
727
netdev_rxq_recv(struct netdev_rxq *rx, struct dp_packet_batch *batch,
728
                int *qfill)
729
0
{
730
0
    int retval;
731
732
0
    retval = rx->netdev->netdev_class->rxq_recv(rx, batch, qfill);
733
0
    if (!retval) {
734
0
        COVERAGE_INC(netdev_received);
735
0
    } else {
736
0
        batch->count = 0;
737
0
    }
738
0
    return retval;
739
0
}
740
741
/* Arranges for poll_block() to wake up when a packet is ready to be received
742
 * on 'rx'. */
743
void
744
netdev_rxq_wait(struct netdev_rxq *rx)
745
0
{
746
0
    rx->netdev->netdev_class->rxq_wait(rx);
747
0
}
748
749
/* Discards any packets ready to be received on 'rx'. */
750
int
751
netdev_rxq_drain(struct netdev_rxq *rx)
752
0
{
753
0
    return (rx->netdev->netdev_class->rxq_drain
754
0
            ? rx->netdev->netdev_class->rxq_drain(rx)
755
0
            : 0);
756
0
}
757
758
/* Configures the number of tx queues of 'netdev'. Returns 0 if successful,
759
 * otherwise a positive errno value.
760
 *
761
 * 'n_txq' specifies the exact number of transmission queues to create.
762
 *
763
 * The change might not effective immediately.  The caller must check if a
764
 * reconfiguration is required with netdev_is_reconf_required() and eventually
765
 * call netdev_reconfigure() before using the new queues.
766
 *
767
 * On error, the tx queue configuration is unchanged */
768
int
769
netdev_set_tx_multiq(struct netdev *netdev, unsigned int n_txq)
770
0
{
771
0
    int error;
772
773
0
    error = (netdev->netdev_class->set_tx_multiq
774
0
             ? netdev->netdev_class->set_tx_multiq(netdev, MAX(n_txq, 1))
775
0
             : EOPNOTSUPP);
776
777
0
    if (error && error != EOPNOTSUPP) {
778
0
        VLOG_DBG_RL(&rl, "failed to set tx queue for network device %s:"
779
0
                    "%s", netdev_get_name(netdev), ovs_strerror(error));
780
0
    }
781
782
0
    return error;
783
0
}
784
785
enum netdev_pt_mode
786
netdev_get_pt_mode(const struct netdev *netdev)
787
0
{
788
0
    return (netdev->netdev_class->get_pt_mode
789
0
            ? netdev->netdev_class->get_pt_mode(netdev)
790
0
            : NETDEV_PT_LEGACY_L2);
791
0
}
792
793
/* Attempts to segment GSO flagged packets and send them as multiple bundles.
794
 * This function is only used if at least one packet in the current batch is
795
 * flagged for TSO and the netdev does not support this.
796
 *
797
 * The return value is 0 if all batches sent successfully, and an error code
798
 * from netdev_class->send() if at least one batch failed to send. */
799
static int
800
netdev_send_tso(struct netdev *netdev, int qid,
801
                struct dp_packet_batch *batch, bool concurrent_txq,
802
                bool partial_seg)
803
0
{
804
0
    struct dp_packet_batch *batches;
805
0
    struct dp_packet *packet;
806
0
    int retval = 0;
807
0
    int n_packets;
808
0
    int n_batches;
809
0
    int error;
810
811
    /* Calculate the total number of packets in the batch after
812
     * the (partial?) segmentation. */
813
0
    n_packets = 0;
814
0
    DP_PACKET_BATCH_FOR_EACH (i, packet, batch) {
815
0
        if (dp_packet_get_tso_segsz(packet)) {
816
0
            if (partial_seg) {
817
0
                n_packets += dp_packet_gso_partial_nr_segs(packet);
818
0
            } else {
819
0
                n_packets += dp_packet_gso_nr_segs(packet);
820
0
            }
821
0
        } else {
822
0
            n_packets++;
823
0
        }
824
0
    }
825
826
0
    if (!n_packets) {
827
0
        return 0;
828
0
    }
829
830
    /* Allocate enough batches to store all the packets in order. */
831
0
    n_batches = DIV_ROUND_UP(n_packets, NETDEV_MAX_BURST);
832
0
    batches = xmalloc(n_batches * sizeof *batches);
833
834
0
    struct dp_packet_batch *curr_batch = batches;
835
0
    struct dp_packet_batch *last_batch = &batches[n_batches - 1];
836
0
    for (curr_batch = batches; curr_batch <= last_batch; curr_batch++) {
837
0
        dp_packet_batch_init(curr_batch);
838
0
    }
839
840
    /* Do the packet segmentation if TSO is flagged. */
841
0
    size_t size = dp_packet_batch_size(batch);
842
0
    size_t k;
843
0
    curr_batch = batches;
844
0
    DP_PACKET_BATCH_REFILL_FOR_EACH (k, size, packet, batch) {
845
0
        if (dp_packet_get_tso_segsz(packet)) {
846
0
            if (partial_seg) {
847
0
                dp_packet_gso_partial(packet, &curr_batch);
848
0
                COVERAGE_INC(netdev_partial_seg_good);
849
0
            } else {
850
0
                dp_packet_gso(packet, &curr_batch);
851
0
                COVERAGE_INC(netdev_soft_seg_good);
852
0
            }
853
0
        } else {
854
0
            if (dp_packet_batch_is_full(curr_batch)) {
855
0
                curr_batch++;
856
0
            }
857
0
            dp_packet_batch_add(curr_batch, packet);
858
0
        }
859
0
    }
860
861
0
    for (curr_batch = batches; curr_batch <= last_batch; curr_batch++) {
862
0
        DP_PACKET_BATCH_FOR_EACH (i, packet, curr_batch) {
863
0
            dp_packet_ol_send_prepare(packet, netdev->ol_flags);
864
0
        }
865
866
0
        error = netdev->netdev_class->send(netdev, qid, curr_batch,
867
0
                                           concurrent_txq);
868
0
        if (!error) {
869
0
            COVERAGE_INC(netdev_sent);
870
0
        } else {
871
0
            retval = error;
872
0
        }
873
0
    }
874
0
    free(batches);
875
0
    return retval;
876
0
}
877
878
/* Sends 'batch' on 'netdev'.  Returns 0 if successful (for every packet),
879
 * otherwise a positive errno value.  Returns EAGAIN without blocking if
880
 * at least one the packets cannot be queued immediately.  Returns EMSGSIZE
881
 * if a partial packet was transmitted or if a packet is too big or too small
882
 * to transmit on the device.
883
 *
884
 * The caller must make sure that 'netdev' supports sending by making sure that
885
 * 'netdev_n_txq(netdev)' returns >= 1.
886
 *
887
 * If the function returns a non-zero value, some of the packets might have
888
 * been sent anyway.
889
 *
890
 * The caller transfers ownership of all the packets to the network device,
891
 * regardless of success.
892
 *
893
 * If 'concurrent_txq' is true, the caller may perform concurrent calls
894
 * to netdev_send() with the same 'qid'. The netdev provider is responsible
895
 * for making sure that these concurrent calls do not create a race condition
896
 * by using locking or other synchronization if required.
897
 *
898
 * The network device is expected to maintain one or more packet
899
 * transmission queues, so that the caller does not ordinarily have to
900
 * do additional queuing of packets.  'qid' specifies the queue to use
901
 * and can be ignored if the implementation does not support multiple
902
 * queues. */
903
int
904
netdev_send(struct netdev *netdev, int qid, struct dp_packet_batch *batch,
905
            bool concurrent_txq)
906
0
{
907
0
    const uint64_t netdev_flags = netdev->ol_flags;
908
0
    struct dp_packet *packet;
909
0
    int error;
910
911
0
    if (userspace_tso_enabled()) {
912
0
        if (!(netdev_flags & NETDEV_TX_OFFLOAD_TCP_TSO)) {
913
0
            DP_PACKET_BATCH_FOR_EACH (i, packet, batch) {
914
0
                if (dp_packet_get_tso_segsz(packet)) {
915
0
                    return netdev_send_tso(netdev, qid, batch, concurrent_txq,
916
0
                                           false);
917
0
                }
918
0
            }
919
0
        } else if (!(netdev_flags & (NETDEV_TX_VXLAN_TNL_TSO |
920
0
                                     NETDEV_TX_GRE_TNL_TSO |
921
0
                                     NETDEV_TX_GENEVE_TNL_TSO))) {
922
0
            DP_PACKET_BATCH_FOR_EACH (i, packet, batch) {
923
0
                if (dp_packet_get_tso_segsz(packet)
924
0
                    && dp_packet_tunnel(packet)) {
925
0
                    return netdev_send_tso(netdev, qid, batch, concurrent_txq,
926
0
                                           false);
927
0
                }
928
0
            }
929
0
        } else if (!(netdev_flags & NETDEV_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
930
0
            DP_PACKET_BATCH_FOR_EACH (i, packet, batch) {
931
0
                if (dp_packet_get_tso_segsz(packet)
932
0
                    && dp_packet_gso_partial_nr_segs(packet) != 1) {
933
0
                    return netdev_send_tso(netdev, qid, batch, concurrent_txq,
934
0
                                           true);
935
0
                }
936
0
            }
937
0
        }
938
0
    }
939
940
0
    DP_PACKET_BATCH_FOR_EACH (i, packet, batch) {
941
0
        dp_packet_ol_send_prepare(packet, netdev_flags);
942
0
    }
943
944
0
    error = netdev->netdev_class->send(netdev, qid, batch, concurrent_txq);
945
0
    if (!error) {
946
0
        COVERAGE_INC(netdev_sent);
947
0
    }
948
0
    return error;
949
0
}
950
951
/* Pop tunnel header, build tunnel metadata and resize 'batch->packets'
952
 * for further processing.
953
 *
954
 * The caller must make sure that 'netdev' support this operation by checking
955
 * that netdev_has_tunnel_push_pop() returns true. */
956
void
957
netdev_pop_header(struct netdev *netdev, struct dp_packet_batch *batch)
958
0
{
959
0
    struct dp_packet *packet;
960
0
    size_t i, size = dp_packet_batch_size(batch);
961
962
0
    DP_PACKET_BATCH_REFILL_FOR_EACH (i, size, packet, batch) {
963
0
        packet = netdev->netdev_class->pop_header(packet);
964
0
        if (packet) {
965
            /* Reset the offload flags if present, to avoid wrong
966
             * interpretation in the further packet processing when
967
             * recirculated.*/
968
0
            dp_packet_reset_offload(packet);
969
0
            pkt_metadata_init_conn(&packet->md);
970
0
            dp_packet_batch_refill(batch, packet, i);
971
0
        }
972
0
    }
973
0
}
974
975
void
976
netdev_init_tnl_build_header_params(struct netdev_tnl_build_header_params *params,
977
                                    const struct flow *tnl_flow,
978
                                    const struct in6_addr *src,
979
                                    struct eth_addr dmac,
980
                                    struct eth_addr smac)
981
0
{
982
0
    params->flow = tnl_flow;
983
0
    params->dmac = dmac;
984
0
    params->smac = smac;
985
0
    params->s_ip = src;
986
0
    params->is_ipv6 = !IN6_IS_ADDR_V4MAPPED(src);
987
0
}
988
989
int netdev_build_header(const struct netdev *netdev,
990
                        struct ovs_action_push_tnl *data,
991
                        const struct netdev_tnl_build_header_params *params)
992
0
{
993
0
    if (netdev->netdev_class->build_header) {
994
0
        return netdev->netdev_class->build_header(netdev, data, params);
995
0
    }
996
0
    return EOPNOTSUPP;
997
0
}
998
999
/* Push tunnel header (reading from tunnel metadata) and resize
1000
 * 'batch->packets' for further processing.
1001
 *
1002
 * The caller must make sure that 'netdev' support this operation by checking
1003
 * that netdev_has_tunnel_push_pop() returns true. */
1004
int
1005
netdev_push_header(const struct netdev *netdev,
1006
                   struct dp_packet_batch *batch,
1007
                   const struct ovs_action_push_tnl *data)
1008
0
{
1009
0
    struct dp_packet *packet;
1010
0
    size_t i, size = dp_packet_batch_size(batch);
1011
1012
0
    DP_PACKET_BATCH_REFILL_FOR_EACH (i, size, packet, batch) {
1013
0
        if (OVS_UNLIKELY(data->tnl_type != OVS_VPORT_TYPE_GENEVE &&
1014
0
                         data->tnl_type != OVS_VPORT_TYPE_VXLAN &&
1015
0
                         data->tnl_type != OVS_VPORT_TYPE_GRE &&
1016
0
                         data->tnl_type != OVS_VPORT_TYPE_IP6GRE &&
1017
0
                         dp_packet_get_tso_segsz(packet))) {
1018
0
            COVERAGE_INC(netdev_push_header_drops);
1019
0
            dp_packet_delete(packet);
1020
0
            VLOG_WARN_RL(&rl, "%s: Tunneling packets with TSO is not "
1021
0
                              "supported for %s tunnels: packet dropped",
1022
0
                         netdev_get_name(netdev), netdev_get_type(netdev));
1023
0
        } else {
1024
0
            if (data->tnl_type != OVS_VPORT_TYPE_GENEVE &&
1025
0
                data->tnl_type != OVS_VPORT_TYPE_VXLAN &&
1026
0
                data->tnl_type != OVS_VPORT_TYPE_GRE &&
1027
0
                data->tnl_type != OVS_VPORT_TYPE_IP6GRE) {
1028
0
                dp_packet_ol_send_prepare(packet, 0);
1029
0
            } else if (dp_packet_tunnel(packet)) {
1030
0
                if (dp_packet_get_tso_segsz(packet)) {
1031
0
                    COVERAGE_INC(netdev_push_header_drops);
1032
0
                    dp_packet_delete(packet);
1033
0
                    VLOG_WARN_RL(&rl, "%s: Tunneling packets with TSO is not "
1034
0
                                      "supported with multiple levels of "
1035
0
                                      "VXLAN, GENEVE, or GRE encapsulation.",
1036
0
                                 netdev_get_name(netdev));
1037
0
                    continue;
1038
0
                }
1039
0
                dp_packet_ol_send_prepare(packet, 0);
1040
0
            }
1041
0
            netdev->netdev_class->push_header(netdev, packet, data);
1042
1043
0
            pkt_metadata_init(&packet->md, data->out_port);
1044
0
            dp_packet_batch_refill(batch, packet, i);
1045
0
        }
1046
0
    }
1047
1048
0
    return 0;
1049
0
}
1050
1051
/* Registers with the poll loop to wake up from the next call to poll_block()
1052
 * when the packet transmission queue has sufficient room to transmit a packet
1053
 * with netdev_send().
1054
 *
1055
 * The network device is expected to maintain one or more packet
1056
 * transmission queues, so that the caller does not ordinarily have to
1057
 * do additional queuing of packets.  'qid' specifies the queue to use
1058
 * and can be ignored if the implementation does not support multiple
1059
 * queues. */
1060
void
1061
netdev_send_wait(struct netdev *netdev, int qid)
1062
0
{
1063
0
    if (netdev->netdev_class->send_wait) {
1064
0
        netdev->netdev_class->send_wait(netdev, qid);
1065
0
    }
1066
0
}
1067
1068
/* Attempts to set 'netdev''s MAC address to 'mac'.  Returns 0 if successful,
1069
 * otherwise a positive errno value. */
1070
int
1071
netdev_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
1072
0
{
1073
0
    return netdev->netdev_class->set_etheraddr(netdev, mac);
1074
0
}
1075
1076
/* Retrieves 'netdev''s MAC address.  If successful, returns 0 and copies the
1077
 * the MAC address into 'mac'.  On failure, returns a positive errno value and
1078
 * clears 'mac' to all-zeros. */
1079
int
1080
netdev_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
1081
0
{
1082
0
    int error;
1083
1084
0
    error = netdev->netdev_class->get_etheraddr(netdev, mac);
1085
0
    if (error) {
1086
0
        memset(mac, 0, sizeof *mac);
1087
0
    }
1088
0
    return error;
1089
0
}
1090
1091
/* Returns the name of the network device that 'netdev' represents,
1092
 * e.g. "eth0".  The caller must not modify or free the returned string. */
1093
const char *
1094
netdev_get_name(const struct netdev *netdev)
1095
0
{
1096
0
    return netdev->name;
1097
0
}
1098
1099
/* Retrieves the MTU of 'netdev'.  The MTU is the maximum size of transmitted
1100
 * (and received) packets, in bytes, not including the hardware header; thus,
1101
 * this is typically 1500 bytes for Ethernet devices.
1102
 *
1103
 * If successful, returns 0 and stores the MTU size in '*mtup'.  Returns
1104
 * EOPNOTSUPP if 'netdev' does not have an MTU (as e.g. some tunnels do not).
1105
 * On other failure, returns a positive errno value.  On failure, sets '*mtup'
1106
 * to 0. */
1107
int
1108
netdev_get_mtu(const struct netdev *netdev, int *mtup)
1109
0
{
1110
0
    const struct netdev_class *class = netdev->netdev_class;
1111
0
    int error;
1112
1113
0
    error = class->get_mtu ? class->get_mtu(netdev, mtup) : EOPNOTSUPP;
1114
0
    if (error) {
1115
0
        *mtup = 0;
1116
0
        if (error != EOPNOTSUPP) {
1117
0
            VLOG_DBG_RL(&rl, "failed to retrieve MTU for network device %s: "
1118
0
                         "%s", netdev_get_name(netdev), ovs_strerror(error));
1119
0
        }
1120
0
    }
1121
0
    return error;
1122
0
}
1123
1124
/* Sets the MTU of 'netdev'.  The MTU is the maximum size of transmitted
1125
 * (and received) packets, in bytes.
1126
 *
1127
 * If successful, returns 0.  Returns EOPNOTSUPP if 'netdev' does not have an
1128
 * MTU (as e.g. some tunnels do not).  On other failure, returns a positive
1129
 * errno value. */
1130
int
1131
netdev_set_mtu(struct netdev *netdev, int mtu)
1132
0
{
1133
0
    const struct netdev_class *class = netdev->netdev_class;
1134
0
    int error;
1135
1136
0
    error = class->set_mtu ? class->set_mtu(netdev, mtu) : EOPNOTSUPP;
1137
0
    if (error && error != EOPNOTSUPP) {
1138
0
        VLOG_WARN_RL(&rl, "failed to set MTU for network device %s: %s",
1139
0
                     netdev_get_name(netdev), ovs_strerror(error));
1140
0
    }
1141
1142
0
    return error;
1143
0
}
1144
1145
/* If 'user_config' is true, the user wants to control 'netdev''s MTU and we
1146
 * should not override it.  If 'user_config' is false, we may adjust
1147
 * 'netdev''s MTU (e.g., if 'netdev' is internal). */
1148
void
1149
netdev_mtu_user_config(struct netdev *netdev, bool user_config)
1150
0
{
1151
0
    if (netdev->mtu_user_config != user_config) {
1152
0
        netdev_change_seq_changed(netdev);
1153
0
        netdev->mtu_user_config = user_config;
1154
0
    }
1155
0
}
1156
1157
/* Returns 'true' if the user explicitly specified an MTU value for 'netdev'.
1158
 * Otherwise, returns 'false', in which case we are allowed to adjust the
1159
 * device MTU. */
1160
bool
1161
netdev_mtu_is_user_config(struct netdev *netdev)
1162
0
{
1163
0
    return netdev->mtu_user_config;
1164
0
}
1165
1166
/* Returns the ifindex of 'netdev', if successful, as a positive number.  On
1167
 * failure, returns a negative errno value.
1168
 *
1169
 * The desired semantics of the ifindex value are a combination of those
1170
 * specified by POSIX for if_nametoindex() and by SNMP for ifIndex.  An ifindex
1171
 * value should be unique within a host and remain stable at least until
1172
 * reboot.  SNMP says an ifindex "ranges between 1 and the value of ifNumber"
1173
 * but many systems do not follow this rule anyhow.
1174
 *
1175
 * Some network devices may not implement support for this function.  In such
1176
 * cases this function will always return -EOPNOTSUPP.
1177
 */
1178
int
1179
netdev_get_ifindex(const struct netdev *netdev)
1180
0
{
1181
0
    int (*get_ifindex)(const struct netdev *);
1182
1183
0
    get_ifindex = netdev->netdev_class->get_ifindex;
1184
1185
0
    return get_ifindex ? get_ifindex(netdev) : -EOPNOTSUPP;
1186
0
}
1187
1188
/* Stores the features supported by 'netdev' into each of '*current',
1189
 * '*advertised', '*supported', and '*peer' that are non-null.  Each value is a
1190
 * bitmap of "enum ofp_port_features" bits, in host byte order.  Returns 0 if
1191
 * successful, otherwise a positive errno value.  On failure, all of the
1192
 * passed-in values are set to 0.
1193
 *
1194
 * Some network devices may not implement support for this function.  In such
1195
 * cases this function will always return EOPNOTSUPP. */
1196
int
1197
netdev_get_features(const struct netdev *netdev,
1198
                    enum netdev_features *current,
1199
                    enum netdev_features *advertised,
1200
                    enum netdev_features *supported,
1201
                    enum netdev_features *peer)
1202
0
{
1203
0
    int (*get_features)(const struct netdev *netdev,
1204
0
                        enum netdev_features *current,
1205
0
                        enum netdev_features *advertised,
1206
0
                        enum netdev_features *supported,
1207
0
                        enum netdev_features *peer);
1208
0
    enum netdev_features dummy[4];
1209
0
    int error;
1210
1211
0
    if (!current) {
1212
0
        current = &dummy[0];
1213
0
    }
1214
0
    if (!advertised) {
1215
0
        advertised = &dummy[1];
1216
0
    }
1217
0
    if (!supported) {
1218
0
        supported = &dummy[2];
1219
0
    }
1220
0
    if (!peer) {
1221
0
        peer = &dummy[3];
1222
0
    }
1223
1224
0
    get_features = netdev->netdev_class->get_features;
1225
0
    error = get_features
1226
0
                    ? get_features(netdev, current, advertised, supported,
1227
0
                                   peer)
1228
0
                    : EOPNOTSUPP;
1229
0
    if (error) {
1230
0
        *current = *advertised = *supported = *peer = 0;
1231
0
    }
1232
0
    return error;
1233
0
}
1234
1235
int
1236
netdev_get_speed(const struct netdev *netdev, uint32_t *current, uint32_t *max)
1237
0
{
1238
0
    uint32_t current_dummy, max_dummy;
1239
0
    int error;
1240
1241
0
    if (!current) {
1242
0
        current = &current_dummy;
1243
0
    }
1244
0
    if (!max) {
1245
0
        max = &max_dummy;
1246
0
    }
1247
1248
0
    error = netdev->netdev_class->get_speed
1249
0
            ? netdev->netdev_class->get_speed(netdev, current, max)
1250
0
            : EOPNOTSUPP;
1251
1252
0
    if (error == EOPNOTSUPP) {
1253
0
        enum netdev_features current_f, supported_f;
1254
1255
0
        error = netdev_get_features(netdev, &current_f, NULL,
1256
0
                                    &supported_f, NULL);
1257
0
        *current = netdev_features_to_bps(current_f, 0) / 1000000;
1258
0
        *max = netdev_features_to_bps(supported_f, 0) / 1000000;
1259
0
    } else if (error) {
1260
0
        *current = *max = 0;
1261
0
    }
1262
0
    return error;
1263
0
}
1264
1265
/* Returns the maximum speed of a network connection that has the NETDEV_F_*
1266
 * bits in 'features', in bits per second.  If no bits that indicate a speed
1267
 * are set in 'features', returns 'default_bps'. */
1268
uint64_t
1269
netdev_features_to_bps(enum netdev_features features,
1270
                       uint64_t default_bps)
1271
32.8k
{
1272
32.8k
    enum {
1273
32.8k
        F_1000000MB = NETDEV_F_1TB_FD,
1274
32.8k
        F_100000MB = NETDEV_F_100GB_FD,
1275
32.8k
        F_40000MB = NETDEV_F_40GB_FD,
1276
32.8k
        F_10000MB = NETDEV_F_10GB_FD,
1277
32.8k
        F_1000MB = NETDEV_F_1GB_HD | NETDEV_F_1GB_FD,
1278
32.8k
        F_100MB = NETDEV_F_100MB_HD | NETDEV_F_100MB_FD,
1279
32.8k
        F_10MB = NETDEV_F_10MB_HD | NETDEV_F_10MB_FD
1280
32.8k
    };
1281
1282
32.8k
    return (  features & F_1000000MB ? UINT64_C(1000000000000)
1283
32.8k
            : features & F_100000MB  ? UINT64_C(100000000000)
1284
32.8k
            : features & F_40000MB   ? UINT64_C(40000000000)
1285
32.8k
            : features & F_10000MB   ? UINT64_C(10000000000)
1286
32.8k
            : features & F_1000MB    ? UINT64_C(1000000000)
1287
17.0k
            : features & F_100MB     ? UINT64_C(100000000)
1288
12.4k
            : features & F_10MB      ? UINT64_C(10000000)
1289
7.29k
                                     : default_bps);
1290
32.8k
}
1291
1292
/* Stores the duplex capability of 'netdev' into 'full_duplex'.
1293
 *
1294
 * Some network devices may not implement support for this function.
1295
 * In such cases this function will always return EOPNOTSUPP. */
1296
int
1297
netdev_get_duplex(const struct netdev *netdev, bool *full_duplex)
1298
0
{
1299
0
    int error;
1300
1301
0
    *full_duplex = false;
1302
0
    error = netdev->netdev_class->get_duplex
1303
0
            ? netdev->netdev_class->get_duplex(netdev, full_duplex)
1304
0
            : EOPNOTSUPP;
1305
1306
0
    if (error == EOPNOTSUPP) {
1307
0
        enum netdev_features current;
1308
1309
0
        error = netdev_get_features(netdev, &current, NULL, NULL, NULL);
1310
0
        if (!error && (current & NETDEV_F_OTHER)) {
1311
0
             error = EOPNOTSUPP;
1312
0
        }
1313
0
        if (!error) {
1314
0
            *full_duplex = (current & (NETDEV_F_10MB_FD | NETDEV_F_100MB_FD
1315
0
                                        | NETDEV_F_1GB_FD | NETDEV_F_10GB_FD
1316
0
                                        | NETDEV_F_40GB_FD | NETDEV_F_100GB_FD
1317
0
                                        | NETDEV_F_1TB_FD)) != 0;
1318
0
        }
1319
0
    }
1320
0
    return error;
1321
0
}
1322
1323
/* Set the features advertised by 'netdev' to 'advertise'.  Returns 0 if
1324
 * successful, otherwise a positive errno value. */
1325
int
1326
netdev_set_advertisements(struct netdev *netdev,
1327
                          enum netdev_features advertise)
1328
0
{
1329
0
    return (netdev->netdev_class->set_advertisements
1330
0
            ? netdev->netdev_class->set_advertisements(
1331
0
                    netdev, advertise)
1332
0
            : EOPNOTSUPP);
1333
0
}
1334
1335
static const char *
1336
netdev_feature_to_name(uint32_t bit)
1337
492k
{
1338
492k
    enum netdev_features f = bit;
1339
1340
492k
    switch (f) {
1341
38.5k
    case NETDEV_F_10MB_HD:    return "10MB-HD";
1342
40.2k
    case NETDEV_F_10MB_FD:    return "10MB-FD";
1343
41.7k
    case NETDEV_F_100MB_HD:   return "100MB-HD";
1344
47.0k
    case NETDEV_F_100MB_FD:   return "100MB-FD";
1345
39.0k
    case NETDEV_F_1GB_HD:     return "1GB-HD";
1346
39.8k
    case NETDEV_F_1GB_FD:     return "1GB-FD";
1347
36.9k
    case NETDEV_F_10GB_FD:    return "10GB-FD";
1348
3.38k
    case NETDEV_F_40GB_FD:    return "40GB-FD";
1349
3.72k
    case NETDEV_F_100GB_FD:   return "100GB-FD";
1350
3.68k
    case NETDEV_F_1TB_FD:     return "1TB-FD";
1351
3.65k
    case NETDEV_F_OTHER:      return "OTHER";
1352
34.2k
    case NETDEV_F_COPPER:     return "COPPER";
1353
39.2k
    case NETDEV_F_FIBER:      return "FIBER";
1354
39.5k
    case NETDEV_F_AUTONEG:    return "AUTO_NEG";
1355
40.2k
    case NETDEV_F_PAUSE:      return "AUTO_PAUSE";
1356
40.9k
    case NETDEV_F_PAUSE_ASYM: return "AUTO_PAUSE_ASYM";
1357
492k
    }
1358
1359
0
    return NULL;
1360
492k
}
1361
1362
void
1363
netdev_features_format(struct ds *s, enum netdev_features features)
1364
63.3k
{
1365
63.3k
    ofp_print_bit_names(s, features, netdev_feature_to_name, ' ');
1366
63.3k
    ds_put_char(s, '\n');
1367
63.3k
}
1368
1369
/* Assigns 'addr' as 'netdev''s IPv4 address and 'mask' as its netmask.  If
1370
 * 'addr' is INADDR_ANY, 'netdev''s IPv4 address is cleared.  Returns a
1371
 * positive errno value. */
1372
int
1373
netdev_set_in4(struct netdev *netdev, struct in_addr addr, struct in_addr mask)
1374
0
{
1375
0
    return (netdev->netdev_class->set_in4
1376
0
            ? netdev->netdev_class->set_in4(netdev, addr, mask)
1377
0
            : EOPNOTSUPP);
1378
0
}
1379
1380
static int
1381
netdev_get_addresses_by_name(const char *device_name,
1382
                             struct in6_addr **addrsp, int *n_addrsp)
1383
0
{
1384
0
    struct netdev *netdev;
1385
0
    int error = netdev_open(device_name, NULL, &netdev);
1386
0
    if (error) {
1387
0
        *addrsp = NULL;
1388
0
        *n_addrsp = 0;
1389
0
        return error;
1390
0
    }
1391
1392
0
    struct in6_addr *masks;
1393
0
    error = netdev_get_addr_list(netdev, addrsp, &masks, n_addrsp);
1394
0
    netdev_close(netdev);
1395
0
    free(masks);
1396
0
    return error;
1397
0
}
1398
1399
/* Obtains an IPv4 address from 'device_name' and save the address in '*in4'.
1400
 * Returns 0 if successful, otherwise a positive errno value. */
1401
int
1402
netdev_get_in4_by_name(const char *device_name, struct in_addr *in4)
1403
0
{
1404
0
    struct in6_addr *addrs;
1405
0
    int n;
1406
0
    int error = netdev_get_addresses_by_name(device_name, &addrs, &n);
1407
1408
0
    in4->s_addr = 0;
1409
0
    if (!error) {
1410
0
        error = ENOENT;
1411
0
        for (int i = 0; i < n; i++) {
1412
0
            if (IN6_IS_ADDR_V4MAPPED(&addrs[i])) {
1413
0
                in4->s_addr = in6_addr_get_mapped_ipv4(&addrs[i]);
1414
0
                error = 0;
1415
0
                break;
1416
0
            }
1417
0
        }
1418
0
    }
1419
0
    free(addrs);
1420
1421
0
    return error;
1422
0
}
1423
1424
/* Obtains an IPv4 or IPv6 address from 'device_name' and save the address in
1425
 * '*in6', representing IPv4 addresses as v6-mapped.  Returns 0 if successful,
1426
 * otherwise a positive errno value. */
1427
int
1428
netdev_get_ip_by_name(const char *device_name, struct in6_addr *in6)
1429
0
{
1430
0
    struct in6_addr *addrs;
1431
0
    int n;
1432
0
    int error = netdev_get_addresses_by_name(device_name, &addrs, &n);
1433
1434
0
    *in6 = in6addr_any;
1435
0
    if (!error) {
1436
0
        error = ENOENT;
1437
0
        for (int i = 0; i < n; i++) {
1438
0
            if (!in6_is_lla(&addrs[i])) {
1439
0
                *in6 = addrs[i];
1440
0
                error = 0;
1441
0
                break;
1442
0
            }
1443
0
        }
1444
0
    }
1445
0
    free(addrs);
1446
1447
0
    return error;
1448
0
}
1449
1450
/* Adds 'router' as a default IP gateway for the TCP/IP stack that corresponds
1451
 * to 'netdev'. */
1452
int
1453
netdev_add_router(struct netdev *netdev, struct in_addr router)
1454
0
{
1455
0
    COVERAGE_INC(netdev_add_router);
1456
0
    return (netdev->netdev_class->add_router
1457
0
            ? netdev->netdev_class->add_router(netdev, router)
1458
0
            : EOPNOTSUPP);
1459
0
}
1460
1461
/* Looks up the next hop for 'host' for the TCP/IP stack that corresponds to
1462
 * 'netdev'.  If a route cannot not be determined, sets '*next_hop' to 0,
1463
 * '*netdev_name' to null, and returns a positive errno value.  Otherwise, if a
1464
 * next hop is found, stores the next hop gateway's address (0 if 'host' is on
1465
 * a directly connected network) in '*next_hop' and a copy of the name of the
1466
 * device to reach 'host' in '*netdev_name', and returns 0.  The caller is
1467
 * responsible for freeing '*netdev_name' (by calling free()). */
1468
int
1469
netdev_get_next_hop(const struct netdev *netdev,
1470
                    const struct in_addr *host, struct in_addr *next_hop,
1471
                    char **netdev_name)
1472
0
{
1473
0
    int error = (netdev->netdev_class->get_next_hop
1474
0
                 ? netdev->netdev_class->get_next_hop(
1475
0
                        host, next_hop, netdev_name)
1476
0
                 : EOPNOTSUPP);
1477
0
    if (error) {
1478
0
        next_hop->s_addr = 0;
1479
0
        *netdev_name = NULL;
1480
0
    }
1481
0
    return error;
1482
0
}
1483
1484
/* Populates 'smap' with status information.
1485
 *
1486
 * Populates 'smap' with 'netdev' specific status information.  This
1487
 * information may be used to populate the status column of the Interface table
1488
 * as defined in ovs-vswitchd.conf.db(5). */
1489
int
1490
netdev_get_status(const struct netdev *netdev, struct smap *smap)
1491
0
{
1492
0
    int err = EOPNOTSUPP;
1493
1494
    /* Set offload status only if relevant. */
1495
0
    if (netdev_get_dpif_type(netdev) &&
1496
0
        strcmp(netdev_get_dpif_type(netdev), "system")) {
1497
1498
0
#define OL_ADD_STAT(name, bit) \
1499
0
        smap_add(smap, "tx_" name "_offload", \
1500
0
                 netdev->ol_flags & bit ? "true" : "false");
1501
1502
0
        OL_ADD_STAT("ip_csum", NETDEV_TX_OFFLOAD_IPV4_CKSUM);
1503
0
        OL_ADD_STAT("tcp_csum", NETDEV_TX_OFFLOAD_TCP_CKSUM);
1504
0
        OL_ADD_STAT("udp_csum", NETDEV_TX_OFFLOAD_UDP_CKSUM);
1505
0
        OL_ADD_STAT("sctp_csum", NETDEV_TX_OFFLOAD_SCTP_CKSUM);
1506
0
        OL_ADD_STAT("tcp_seg", NETDEV_TX_OFFLOAD_TCP_TSO);
1507
0
        OL_ADD_STAT("vxlan_tso", NETDEV_TX_VXLAN_TNL_TSO);
1508
0
        OL_ADD_STAT("gre_tso", NETDEV_TX_GRE_TNL_TSO);
1509
0
        OL_ADD_STAT("geneve_tso", NETDEV_TX_GENEVE_TNL_TSO);
1510
0
        OL_ADD_STAT("out_ip_csum", NETDEV_TX_OFFLOAD_OUTER_IP_CKSUM);
1511
0
        OL_ADD_STAT("out_udp_csum", NETDEV_TX_OFFLOAD_OUTER_UDP_CKSUM);
1512
0
#undef OL_ADD_STAT
1513
1514
0
        err = 0;
1515
0
    }
1516
1517
0
    if (!netdev->netdev_class->get_status) {
1518
0
        return err;
1519
0
    }
1520
1521
0
    return netdev->netdev_class->get_status(netdev, smap);
1522
0
}
1523
1524
/* Returns all assigned IP address to  'netdev' and returns 0.
1525
 * API allocates array of address and masks and set it to
1526
 * '*addr' and '*mask'.
1527
 * Otherwise, returns a positive errno value and sets '*addr', '*mask
1528
 * and '*n_addr' to NULL.
1529
 *
1530
 * The following error values have well-defined meanings:
1531
 *
1532
 *   - EADDRNOTAVAIL: 'netdev' has no assigned IPv6 address.
1533
 *
1534
 *   - EOPNOTSUPP: No IPv6 network stack attached to 'netdev'.
1535
 *
1536
 * 'addr' may be null, in which case the address itself is not reported. */
1537
int
1538
netdev_get_addr_list(const struct netdev *netdev, struct in6_addr **addr,
1539
                     struct in6_addr **mask, int *n_addr)
1540
0
{
1541
0
    int error;
1542
1543
0
    error = (netdev->netdev_class->get_addr_list
1544
0
             ? netdev->netdev_class->get_addr_list(netdev, addr, mask, n_addr): EOPNOTSUPP);
1545
0
    if (error && addr) {
1546
0
        *addr = NULL;
1547
0
        *mask = NULL;
1548
0
        *n_addr = 0;
1549
0
    }
1550
1551
0
    return error;
1552
0
}
1553
1554
/* On 'netdev', turns off the flags in 'off' and then turns on the flags in
1555
 * 'on'.  Returns 0 if successful, otherwise a positive errno value. */
1556
static int
1557
do_update_flags(struct netdev *netdev, enum netdev_flags off,
1558
                enum netdev_flags on, enum netdev_flags *old_flagsp,
1559
                struct netdev_saved_flags **sfp)
1560
    OVS_EXCLUDED(netdev_mutex)
1561
0
{
1562
0
    struct netdev_saved_flags *sf = NULL;
1563
0
    enum netdev_flags old_flags;
1564
0
    int error;
1565
1566
0
    error = netdev->netdev_class->update_flags(netdev, off & ~on, on,
1567
0
                                               &old_flags);
1568
0
    if (error) {
1569
0
        VLOG_WARN_RL(&rl, "failed to %s flags for network device %s: %s",
1570
0
                     off || on ? "set" : "get", netdev_get_name(netdev),
1571
0
                     ovs_strerror(error));
1572
0
        old_flags = 0;
1573
0
    } else if ((off || on) && sfp) {
1574
0
        enum netdev_flags new_flags = (old_flags & ~off) | on;
1575
0
        enum netdev_flags changed_flags = old_flags ^ new_flags;
1576
0
        if (changed_flags) {
1577
0
            ovs_mutex_lock(&netdev_mutex);
1578
0
            *sfp = sf = xmalloc(sizeof *sf);
1579
0
            sf->netdev = netdev;
1580
0
            ovs_list_push_front(&netdev->saved_flags_list, &sf->node);
1581
0
            sf->saved_flags = changed_flags;
1582
0
            sf->saved_values = changed_flags & new_flags;
1583
1584
0
            netdev->ref_cnt++;
1585
0
            ovs_mutex_unlock(&netdev_mutex);
1586
0
        }
1587
0
    }
1588
1589
0
    if (old_flagsp) {
1590
0
        *old_flagsp = old_flags;
1591
0
    }
1592
0
    if (sfp) {
1593
0
        *sfp = sf;
1594
0
    }
1595
1596
0
    return error;
1597
0
}
1598
1599
/* Obtains the current flags for 'netdev' and stores them into '*flagsp'.
1600
 * Returns 0 if successful, otherwise a positive errno value.  On failure,
1601
 * stores 0 into '*flagsp'. */
1602
int
1603
netdev_get_flags(const struct netdev *netdev_, enum netdev_flags *flagsp)
1604
0
{
1605
0
    struct netdev *netdev = CONST_CAST(struct netdev *, netdev_);
1606
0
    return do_update_flags(netdev, 0, 0, flagsp, NULL);
1607
0
}
1608
1609
/* Sets the flags for 'netdev' to 'flags'.
1610
 * Returns 0 if successful, otherwise a positive errno value. */
1611
int
1612
netdev_set_flags(struct netdev *netdev, enum netdev_flags flags,
1613
                 struct netdev_saved_flags **sfp)
1614
0
{
1615
0
    return do_update_flags(netdev, -1, flags, NULL, sfp);
1616
0
}
1617
1618
/* Turns on the specified 'flags' on 'netdev':
1619
 *
1620
 *    - On success, returns 0.  If 'sfp' is nonnull, sets '*sfp' to a newly
1621
 *      allocated 'struct netdev_saved_flags *' that may be passed to
1622
 *      netdev_restore_flags() to restore the original values of 'flags' on
1623
 *      'netdev' (this will happen automatically at program termination if
1624
 *      netdev_restore_flags() is never called) , or to NULL if no flags were
1625
 *      actually changed.
1626
 *
1627
 *    - On failure, returns a positive errno value.  If 'sfp' is nonnull, sets
1628
 *      '*sfp' to NULL. */
1629
int
1630
netdev_turn_flags_on(struct netdev *netdev, enum netdev_flags flags,
1631
                     struct netdev_saved_flags **sfp)
1632
0
{
1633
0
    return do_update_flags(netdev, 0, flags, NULL, sfp);
1634
0
}
1635
1636
/* Turns off the specified 'flags' on 'netdev'.  See netdev_turn_flags_on() for
1637
 * details of the interface. */
1638
int
1639
netdev_turn_flags_off(struct netdev *netdev, enum netdev_flags flags,
1640
                      struct netdev_saved_flags **sfp)
1641
0
{
1642
0
    return do_update_flags(netdev, flags, 0, NULL, sfp);
1643
0
}
1644
1645
/* Restores the flags that were saved in 'sf', and destroys 'sf'.
1646
 * Does nothing if 'sf' is NULL. */
1647
void
1648
netdev_restore_flags(struct netdev_saved_flags *sf)
1649
    OVS_EXCLUDED(netdev_mutex)
1650
0
{
1651
0
    if (sf) {
1652
0
        struct netdev *netdev = sf->netdev;
1653
0
        enum netdev_flags old_flags;
1654
1655
0
        netdev->netdev_class->update_flags(netdev,
1656
0
                                           sf->saved_flags & sf->saved_values,
1657
0
                                           sf->saved_flags & ~sf->saved_values,
1658
0
                                           &old_flags);
1659
1660
0
        ovs_mutex_lock(&netdev_mutex);
1661
0
        ovs_list_remove(&sf->node);
1662
0
        free(sf);
1663
0
        netdev_unref(netdev);
1664
0
    }
1665
0
}
1666
1667
/* Looks up the ARP table entry for 'ip' on 'netdev'.  If one exists and can be
1668
 * successfully retrieved, it stores the corresponding MAC address in 'mac' and
1669
 * returns 0.  Otherwise, it returns a positive errno value; in particular,
1670
 * ENXIO indicates that there is no ARP table entry for 'ip' on 'netdev'. */
1671
int
1672
netdev_arp_lookup(const struct netdev *netdev,
1673
                  ovs_be32 ip, struct eth_addr *mac)
1674
0
{
1675
0
    int error = (netdev->netdev_class->arp_lookup
1676
0
                 ? netdev->netdev_class->arp_lookup(netdev, ip, mac)
1677
0
                 : EOPNOTSUPP);
1678
0
    if (error) {
1679
0
        *mac = eth_addr_zero;
1680
0
    }
1681
0
    return error;
1682
0
}
1683
1684
/* Returns true if carrier is active (link light is on) on 'netdev'. */
1685
bool
1686
netdev_get_carrier(const struct netdev *netdev)
1687
0
{
1688
0
    int error;
1689
0
    enum netdev_flags flags;
1690
0
    bool carrier;
1691
1692
0
    netdev_get_flags(netdev, &flags);
1693
0
    if (!(flags & NETDEV_UP)) {
1694
0
        return false;
1695
0
    }
1696
1697
0
    if (!netdev->netdev_class->get_carrier) {
1698
0
        return true;
1699
0
    }
1700
1701
0
    error = netdev->netdev_class->get_carrier(netdev, &carrier);
1702
0
    if (error) {
1703
0
        VLOG_DBG("%s: failed to get network device carrier status, assuming "
1704
0
                 "down: %s", netdev_get_name(netdev), ovs_strerror(error));
1705
0
        carrier = false;
1706
0
    }
1707
1708
0
    return carrier;
1709
0
}
1710
1711
/* Returns the number of times 'netdev''s carrier has changed. */
1712
long long int
1713
netdev_get_carrier_resets(const struct netdev *netdev)
1714
0
{
1715
0
    return (netdev->netdev_class->get_carrier_resets
1716
0
            ? netdev->netdev_class->get_carrier_resets(netdev)
1717
0
            : 0);
1718
0
}
1719
1720
/* Attempts to force netdev_get_carrier() to poll 'netdev''s MII registers for
1721
 * link status instead of checking 'netdev''s carrier.  'netdev''s MII
1722
 * registers will be polled once ever 'interval' milliseconds.  If 'netdev'
1723
 * does not support MII, another method may be used as a fallback.  If
1724
 * 'interval' is less than or equal to zero, reverts netdev_get_carrier() to
1725
 * its normal behavior.
1726
 *
1727
 * Returns 0 if successful, otherwise a positive errno value. */
1728
int
1729
netdev_set_miimon_interval(struct netdev *netdev, long long int interval)
1730
0
{
1731
0
    return (netdev->netdev_class->set_miimon_interval
1732
0
            ? netdev->netdev_class->set_miimon_interval(netdev, interval)
1733
0
            : EOPNOTSUPP);
1734
0
}
1735
1736
/* Retrieves current device stats for 'netdev'. */
1737
int
1738
netdev_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1739
0
{
1740
0
    int error;
1741
1742
    /* Statistics are initialized before passing it to particular device
1743
     * implementation so all values are filtered out by default. */
1744
0
    memset(stats, 0xFF, sizeof *stats);
1745
1746
0
    COVERAGE_INC(netdev_get_stats);
1747
0
    error = (netdev->netdev_class->get_stats
1748
0
             ? netdev->netdev_class->get_stats(netdev, stats)
1749
0
             : EOPNOTSUPP);
1750
0
    if (error) {
1751
        /* In case of error all statistics are filtered out */
1752
0
        memset(stats, 0xff, sizeof *stats);
1753
0
    }
1754
0
    return error;
1755
0
}
1756
1757
/* Retrieves current device custom stats for 'netdev'. */
1758
int
1759
netdev_get_custom_stats(const struct netdev *netdev,
1760
                        struct netdev_custom_stats *custom_stats)
1761
0
{
1762
0
    int error;
1763
0
    memset(custom_stats, 0, sizeof *custom_stats);
1764
0
    error = (netdev->netdev_class->get_custom_stats
1765
0
             ? netdev->netdev_class->get_custom_stats(netdev, custom_stats)
1766
0
             : EOPNOTSUPP);
1767
1768
0
    return error;
1769
0
}
1770
1771
/* Attempts to set input rate limiting (policing) policy, such that:
1772
 * - up to 'kbits_rate' kbps of traffic is accepted, with a maximum
1773
 *   accumulative burst size of 'kbits' kb; and
1774
 * - up to 'kpkts' kpps of traffic is accepted, with a maximum
1775
 *   accumulative burst size of 'kpkts' kilo packets.
1776
 */
1777
int
1778
netdev_set_policing(struct netdev *netdev, uint32_t kbits_rate,
1779
                    uint32_t kbits_burst, uint32_t kpkts_rate,
1780
                    uint32_t kpkts_burst)
1781
0
{
1782
0
    return (netdev->netdev_class->set_policing
1783
0
            ? netdev->netdev_class->set_policing(netdev,
1784
0
                    kbits_rate, kbits_burst, kpkts_rate, kpkts_burst)
1785
0
            : EOPNOTSUPP);
1786
0
}
1787
1788
/* Adds to 'types' all of the forms of QoS supported by 'netdev', or leaves it
1789
 * empty if 'netdev' does not support QoS.  Any names added to 'types' should
1790
 * be documented as valid for the "type" column in the "QoS" table in
1791
 * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
1792
 *
1793
 * Every network device supports disabling QoS with a type of "", but this type
1794
 * will not be added to 'types'.
1795
 *
1796
 * The caller must initialize 'types' (e.g. with sset_init()) before calling
1797
 * this function.  The caller is responsible for destroying 'types' (e.g. with
1798
 * sset_destroy()) when it is no longer needed.
1799
 *
1800
 * Returns 0 if successful, otherwise a positive errno value. */
1801
int
1802
netdev_get_qos_types(const struct netdev *netdev, struct sset *types)
1803
0
{
1804
0
    const struct netdev_class *class = netdev->netdev_class;
1805
0
    return (class->get_qos_types
1806
0
            ? class->get_qos_types(netdev, types)
1807
0
            : 0);
1808
0
}
1809
1810
/* Queries 'netdev' for its capabilities regarding the specified 'type' of QoS,
1811
 * which should be "" or one of the types returned by netdev_get_qos_types()
1812
 * for 'netdev'.  Returns 0 if successful, otherwise a positive errno value.
1813
 * On success, initializes 'caps' with the QoS capabilities; on failure, clears
1814
 * 'caps' to all zeros. */
1815
int
1816
netdev_get_qos_capabilities(const struct netdev *netdev, const char *type,
1817
                            struct netdev_qos_capabilities *caps)
1818
0
{
1819
0
    const struct netdev_class *class = netdev->netdev_class;
1820
1821
0
    if (*type) {
1822
0
        int retval = (class->get_qos_capabilities
1823
0
                      ? class->get_qos_capabilities(netdev, type, caps)
1824
0
                      : EOPNOTSUPP);
1825
0
        if (retval) {
1826
0
            memset(caps, 0, sizeof *caps);
1827
0
        }
1828
0
        return retval;
1829
0
    } else {
1830
        /* Every netdev supports turning off QoS. */
1831
0
        memset(caps, 0, sizeof *caps);
1832
0
        return 0;
1833
0
    }
1834
0
}
1835
1836
/* Obtains the number of queues supported by 'netdev' for the specified 'type'
1837
 * of QoS.  Returns 0 if successful, otherwise a positive errno value.  Stores
1838
 * the number of queues (zero on failure) in '*n_queuesp'.
1839
 *
1840
 * This is just a simple wrapper around netdev_get_qos_capabilities(). */
1841
int
1842
netdev_get_n_queues(const struct netdev *netdev,
1843
                    const char *type, unsigned int *n_queuesp)
1844
0
{
1845
0
    struct netdev_qos_capabilities caps;
1846
0
    int retval;
1847
1848
0
    retval = netdev_get_qos_capabilities(netdev, type, &caps);
1849
0
    *n_queuesp = caps.n_queues;
1850
0
    return retval;
1851
0
}
1852
1853
/* Queries 'netdev' about its currently configured form of QoS.  If successful,
1854
 * stores the name of the current form of QoS into '*typep', stores any details
1855
 * of configuration as string key-value pairs in 'details', and returns 0.  On
1856
 * failure, sets '*typep' to NULL and returns a positive errno value.
1857
 *
1858
 * A '*typep' of "" indicates that QoS is currently disabled on 'netdev'.
1859
 *
1860
 * The caller must initialize 'details' as an empty smap (e.g. with
1861
 * smap_init()) before calling this function.  The caller must free 'details'
1862
 * when it is no longer needed (e.g. with smap_destroy()).
1863
 *
1864
 * The caller must not modify or free '*typep'.
1865
 *
1866
 * '*typep' will be one of the types returned by netdev_get_qos_types() for
1867
 * 'netdev'.  The contents of 'details' should be documented as valid for
1868
 * '*typep' in the "other_config" column in the "QoS" table in
1869
 * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)). */
1870
int
1871
netdev_get_qos(const struct netdev *netdev,
1872
               const char **typep, struct smap *details)
1873
0
{
1874
0
    const struct netdev_class *class = netdev->netdev_class;
1875
0
    int retval;
1876
1877
0
    if (class->get_qos) {
1878
0
        retval = class->get_qos(netdev, typep, details);
1879
0
        if (retval) {
1880
0
            *typep = NULL;
1881
0
            smap_clear(details);
1882
0
        }
1883
0
        return retval;
1884
0
    } else {
1885
        /* 'netdev' doesn't support QoS, so report that QoS is disabled. */
1886
0
        *typep = "";
1887
0
        return 0;
1888
0
    }
1889
0
}
1890
1891
/* Attempts to reconfigure QoS on 'netdev', changing the form of QoS to 'type'
1892
 * with details of configuration from 'details'.  Returns 0 if successful,
1893
 * otherwise a positive errno value.  On error, the previous QoS configuration
1894
 * is retained.
1895
 *
1896
 * When this function changes the type of QoS (not just 'details'), this also
1897
 * resets all queue configuration for 'netdev' to their defaults (which depend
1898
 * on the specific type of QoS).  Otherwise, the queue configuration for
1899
 * 'netdev' is unchanged.
1900
 *
1901
 * 'type' should be "" (to disable QoS) or one of the types returned by
1902
 * netdev_get_qos_types() for 'netdev'.  The contents of 'details' should be
1903
 * documented as valid for the given 'type' in the "other_config" column in the
1904
 * "QoS" table in vswitchd/vswitch.xml (which is built as
1905
 * ovs-vswitchd.conf.db(8)).
1906
 *
1907
 * NULL may be specified for 'details' if there are no configuration
1908
 * details. */
1909
int
1910
netdev_set_qos(struct netdev *netdev,
1911
               const char *type, const struct smap *details)
1912
0
{
1913
0
    const struct netdev_class *class = netdev->netdev_class;
1914
1915
0
    if (!type) {
1916
0
        type = "";
1917
0
    }
1918
1919
0
    if (class->set_qos) {
1920
0
        if (!details) {
1921
0
            static const struct smap empty = SMAP_INITIALIZER(&empty);
1922
0
            details = &empty;
1923
0
        }
1924
0
        return class->set_qos(netdev, type, details);
1925
0
    } else {
1926
0
        return *type ? EOPNOTSUPP : 0;
1927
0
    }
1928
0
}
1929
1930
/* Queries 'netdev' for information about the queue numbered 'queue_id'.  If
1931
 * successful, adds that information as string key-value pairs to 'details'.
1932
 * Returns 0 if successful, otherwise a positive errno value.
1933
 *
1934
 * 'queue_id' must be less than the number of queues supported by 'netdev' for
1935
 * the current form of QoS (e.g. as returned by netdev_get_n_queues(netdev)).
1936
 *
1937
 * The returned contents of 'details' should be documented as valid for the
1938
 * given 'type' in the "other_config" column in the "Queue" table in
1939
 * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
1940
 *
1941
 * The caller must initialize 'details' (e.g. with smap_init()) before calling
1942
 * this function.  The caller must free 'details' when it is no longer needed
1943
 * (e.g. with smap_destroy()). */
1944
int
1945
netdev_get_queue(const struct netdev *netdev,
1946
                 unsigned int queue_id, struct smap *details)
1947
0
{
1948
0
    const struct netdev_class *class = netdev->netdev_class;
1949
0
    int retval;
1950
1951
0
    retval = (class->get_queue
1952
0
              ? class->get_queue(netdev, queue_id, details)
1953
0
              : EOPNOTSUPP);
1954
0
    if (retval) {
1955
0
        smap_clear(details);
1956
0
    }
1957
0
    return retval;
1958
0
}
1959
1960
/* Configures the queue numbered 'queue_id' on 'netdev' with the key-value
1961
 * string pairs in 'details'.  The contents of 'details' should be documented
1962
 * as valid for the given 'type' in the "other_config" column in the "Queue"
1963
 * table in vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
1964
 * Returns 0 if successful, otherwise a positive errno value.  On failure, the
1965
 * given queue's configuration should be unmodified.
1966
 *
1967
 * 'queue_id' must be less than the number of queues supported by 'netdev' for
1968
 * the current form of QoS (e.g. as returned by netdev_get_n_queues(netdev)).
1969
 *
1970
 * This function does not modify 'details', and the caller retains ownership of
1971
 * it. */
1972
int
1973
netdev_set_queue(struct netdev *netdev,
1974
                 unsigned int queue_id, const struct smap *details)
1975
0
{
1976
0
    const struct netdev_class *class = netdev->netdev_class;
1977
0
    return (class->set_queue
1978
0
            ? class->set_queue(netdev, queue_id, details)
1979
0
            : EOPNOTSUPP);
1980
0
}
1981
1982
/* Attempts to delete the queue numbered 'queue_id' from 'netdev'.  Some kinds
1983
 * of QoS may have a fixed set of queues, in which case attempts to delete them
1984
 * will fail with EOPNOTSUPP.
1985
 *
1986
 * Returns 0 if successful, otherwise a positive errno value.  On failure, the
1987
 * given queue will be unmodified.
1988
 *
1989
 * 'queue_id' must be less than the number of queues supported by 'netdev' for
1990
 * the current form of QoS (e.g. as returned by
1991
 * netdev_get_n_queues(netdev)). */
1992
int
1993
netdev_delete_queue(struct netdev *netdev, unsigned int queue_id)
1994
0
{
1995
0
    const struct netdev_class *class = netdev->netdev_class;
1996
0
    return (class->delete_queue
1997
0
            ? class->delete_queue(netdev, queue_id)
1998
0
            : EOPNOTSUPP);
1999
0
}
2000
2001
/* Obtains statistics about 'queue_id' on 'netdev'.  On success, returns 0 and
2002
 * fills 'stats' with the queue's statistics; individual members of 'stats' may
2003
 * be set to all-1-bits if the statistic is unavailable.  On failure, returns a
2004
 * positive errno value and fills 'stats' with values indicating unsupported
2005
 * statistics. */
2006
int
2007
netdev_get_queue_stats(const struct netdev *netdev, unsigned int queue_id,
2008
                       struct netdev_queue_stats *stats)
2009
0
{
2010
0
    const struct netdev_class *class = netdev->netdev_class;
2011
0
    int retval;
2012
2013
0
    retval = (class->get_queue_stats
2014
0
              ? class->get_queue_stats(netdev, queue_id, stats)
2015
0
              : EOPNOTSUPP);
2016
0
    if (retval) {
2017
0
        stats->tx_bytes = UINT64_MAX;
2018
0
        stats->tx_packets = UINT64_MAX;
2019
0
        stats->tx_errors = UINT64_MAX;
2020
0
        stats->created = LLONG_MIN;
2021
0
    }
2022
0
    return retval;
2023
0
}
2024
2025
/* Initializes 'dump' to begin dumping the queues in a netdev.
2026
 *
2027
 * This function provides no status indication.  An error status for the entire
2028
 * dump operation is provided when it is completed by calling
2029
 * netdev_queue_dump_done().
2030
 */
2031
void
2032
netdev_queue_dump_start(struct netdev_queue_dump *dump,
2033
                        const struct netdev *netdev)
2034
0
{
2035
0
    dump->netdev = netdev_ref(netdev);
2036
0
    if (netdev->netdev_class->queue_dump_start) {
2037
0
        dump->error = netdev->netdev_class->queue_dump_start(netdev,
2038
0
                                                             &dump->state);
2039
0
    } else {
2040
0
        dump->error = EOPNOTSUPP;
2041
0
    }
2042
0
}
2043
2044
/* Attempts to retrieve another queue from 'dump', which must have been
2045
 * initialized with netdev_queue_dump_start().  On success, stores a new queue
2046
 * ID into '*queue_id', fills 'details' with configuration details for the
2047
 * queue, and returns true.  On failure, returns false.
2048
 *
2049
 * Queues are not necessarily dumped in increasing order of queue ID (or any
2050
 * other predictable order).
2051
 *
2052
 * Failure might indicate an actual error or merely that the last queue has
2053
 * been dumped.  An error status for the entire dump operation is provided when
2054
 * it is completed by calling netdev_queue_dump_done().
2055
 *
2056
 * The returned contents of 'details' should be documented as valid for the
2057
 * given 'type' in the "other_config" column in the "Queue" table in
2058
 * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
2059
 *
2060
 * The caller must initialize 'details' (e.g. with smap_init()) before calling
2061
 * this function.  This function will clear and replace its contents.  The
2062
 * caller must free 'details' when it is no longer needed (e.g. with
2063
 * smap_destroy()). */
2064
bool
2065
netdev_queue_dump_next(struct netdev_queue_dump *dump,
2066
                       unsigned int *queue_id, struct smap *details)
2067
0
{
2068
0
    smap_clear(details);
2069
2070
0
    const struct netdev *netdev = dump->netdev;
2071
0
    if (dump->error) {
2072
0
        return false;
2073
0
    }
2074
2075
0
    dump->error = netdev->netdev_class->queue_dump_next(netdev, dump->state,
2076
0
                                                        queue_id, details);
2077
2078
0
    if (dump->error) {
2079
0
        netdev->netdev_class->queue_dump_done(netdev, dump->state);
2080
0
        return false;
2081
0
    }
2082
0
    return true;
2083
0
}
2084
2085
/* Completes queue table dump operation 'dump', which must have been
2086
 * initialized with netdev_queue_dump_start().  Returns 0 if the dump operation
2087
 * was error-free, otherwise a positive errno value describing the problem. */
2088
int
2089
netdev_queue_dump_done(struct netdev_queue_dump *dump)
2090
0
{
2091
0
    const struct netdev *netdev = dump->netdev;
2092
0
    if (!dump->error && netdev->netdev_class->queue_dump_done) {
2093
0
        dump->error = netdev->netdev_class->queue_dump_done(netdev,
2094
0
                                                            dump->state);
2095
0
    }
2096
0
    netdev_close(dump->netdev);
2097
0
    return dump->error == EOF ? 0 : dump->error;
2098
0
}
2099
2100
/* Iterates over all of 'netdev''s queues, calling 'cb' with the queue's ID,
2101
 * its statistics, and the 'aux' specified by the caller.  The order of
2102
 * iteration is unspecified, but (when successful) each queue is visited
2103
 * exactly once.
2104
 *
2105
 * Calling this function may be more efficient than calling
2106
 * netdev_get_queue_stats() for every queue.
2107
 *
2108
 * 'cb' must not modify or free the statistics passed in.
2109
 *
2110
 * Returns 0 if successful, otherwise a positive errno value.  On error, some
2111
 * configured queues may not have been included in the iteration. */
2112
int
2113
netdev_dump_queue_stats(const struct netdev *netdev,
2114
                        netdev_dump_queue_stats_cb *cb, void *aux)
2115
0
{
2116
0
    const struct netdev_class *class = netdev->netdev_class;
2117
0
    return (class->dump_queue_stats
2118
0
            ? class->dump_queue_stats(netdev, cb, aux)
2119
0
            : EOPNOTSUPP);
2120
0
}
2121
2122

2123
/* Returns the class type of 'netdev'.
2124
 *
2125
 * The caller must not free the returned value. */
2126
const char *
2127
netdev_get_type(const struct netdev *netdev)
2128
0
{
2129
0
    return netdev->netdev_class->type;
2130
0
}
2131
2132
/* Returns the class associated with 'netdev'. */
2133
const struct netdev_class *
2134
netdev_get_class(const struct netdev *netdev)
2135
0
{
2136
0
    return netdev->netdev_class;
2137
0
}
2138
2139
/* Set the type of 'dpif' this 'netdev' belongs to. */
2140
void
2141
netdev_set_dpif_type(struct netdev *netdev, const char *type)
2142
0
{
2143
0
    netdev->dpif_type = type;
2144
0
}
2145
2146
/* Returns the type of 'dpif' this 'netdev' belongs to.
2147
 *
2148
 * The caller must not free the returned value. */
2149
const char *
2150
netdev_get_dpif_type(const struct netdev *netdev)
2151
0
{
2152
0
    return netdev->dpif_type;
2153
0
}
2154
2155
/* Returns the netdev with 'name' or NULL if there is none.
2156
 *
2157
 * The caller must free the returned netdev with netdev_close(). */
2158
struct netdev *
2159
netdev_from_name(const char *name)
2160
    OVS_EXCLUDED(netdev_mutex)
2161
0
{
2162
0
    struct netdev *netdev;
2163
2164
0
    ovs_mutex_lock(&netdev_mutex);
2165
0
    netdev = shash_find_data(&netdev_shash, name);
2166
0
    if (netdev) {
2167
0
        netdev->ref_cnt++;
2168
0
    }
2169
0
    ovs_mutex_unlock(&netdev_mutex);
2170
2171
0
    return netdev;
2172
0
}
2173
2174
/* Fills 'device_list' with devices that match 'netdev_class'.
2175
 *
2176
 * The caller is responsible for initializing and destroying 'device_list' and
2177
 * must close each device on the list. */
2178
void
2179
netdev_get_devices(const struct netdev_class *netdev_class,
2180
                   struct shash *device_list)
2181
    OVS_EXCLUDED(netdev_mutex)
2182
0
{
2183
0
    struct shash_node *node;
2184
2185
0
    ovs_mutex_lock(&netdev_mutex);
2186
0
    SHASH_FOR_EACH (node, &netdev_shash) {
2187
0
        struct netdev *dev = node->data;
2188
2189
0
        if (dev->netdev_class == netdev_class) {
2190
0
            dev->ref_cnt++;
2191
0
            shash_add(device_list, node->name, node->data);
2192
0
        }
2193
0
    }
2194
0
    ovs_mutex_unlock(&netdev_mutex);
2195
0
}
2196
2197
/* Extracts pointers to all 'netdev-vports' into an array 'vports'
2198
 * and returns it.  Stores the size of the array into '*size'.
2199
 *
2200
 * The caller is responsible for freeing 'vports' and must close
2201
 * each 'netdev-vport' in the list. */
2202
struct netdev **
2203
netdev_get_vports(size_t *size)
2204
    OVS_EXCLUDED(netdev_mutex)
2205
0
{
2206
0
    struct netdev **vports;
2207
0
    struct shash_node *node;
2208
0
    size_t n = 0;
2209
2210
0
    if (!size) {
2211
0
        return NULL;
2212
0
    }
2213
2214
    /* Explicitly allocates big enough chunk of memory. */
2215
0
    ovs_mutex_lock(&netdev_mutex);
2216
0
    vports = xmalloc(shash_count(&netdev_shash) * sizeof *vports);
2217
0
    SHASH_FOR_EACH (node, &netdev_shash) {
2218
0
        struct netdev *dev = node->data;
2219
2220
0
        if (netdev_vport_is_vport_class(dev->netdev_class)) {
2221
0
            dev->ref_cnt++;
2222
0
            vports[n] = dev;
2223
0
            n++;
2224
0
        }
2225
0
    }
2226
0
    ovs_mutex_unlock(&netdev_mutex);
2227
0
    *size = n;
2228
2229
0
    return vports;
2230
0
}
2231
2232
const char *
2233
netdev_get_type_from_name(const char *name)
2234
0
{
2235
0
    struct netdev *dev;
2236
0
    const char *type;
2237
0
    type = netdev_vport_type_from_name(name);
2238
0
    if (type == NULL) {
2239
0
        dev = netdev_from_name(name);
2240
0
        type = dev ? netdev_get_type(dev) : NULL;
2241
0
        netdev_close(dev);
2242
0
    }
2243
0
    return type;
2244
0
}
2245

2246
struct netdev *
2247
netdev_rxq_get_netdev(const struct netdev_rxq *rx)
2248
0
{
2249
0
    ovs_assert(rx->netdev->ref_cnt > 0);
2250
0
    return rx->netdev;
2251
0
}
2252
2253
const char *
2254
netdev_rxq_get_name(const struct netdev_rxq *rx)
2255
0
{
2256
0
    return netdev_get_name(netdev_rxq_get_netdev(rx));
2257
0
}
2258
2259
int
2260
netdev_rxq_get_queue_id(const struct netdev_rxq *rx)
2261
0
{
2262
0
    return rx->queue_id;
2263
0
}
2264
2265
static void
2266
restore_all_flags(void *aux OVS_UNUSED)
2267
0
{
2268
0
    struct shash_node *node;
2269
2270
0
    SHASH_FOR_EACH (node, &netdev_shash) {
2271
0
        struct netdev *netdev = node->data;
2272
0
        const struct netdev_saved_flags *sf;
2273
0
        enum netdev_flags saved_values;
2274
0
        enum netdev_flags saved_flags;
2275
2276
0
        saved_values = saved_flags = 0;
2277
0
        LIST_FOR_EACH (sf, node, &netdev->saved_flags_list) {
2278
0
            saved_flags |= sf->saved_flags;
2279
0
            saved_values &= ~sf->saved_flags;
2280
0
            saved_values |= sf->saved_flags & sf->saved_values;
2281
0
        }
2282
0
        if (saved_flags) {
2283
0
            enum netdev_flags old_flags;
2284
2285
0
            netdev->netdev_class->update_flags(netdev,
2286
0
                                               saved_flags & saved_values,
2287
0
                                               saved_flags & ~saved_values,
2288
0
                                               &old_flags);
2289
0
        }
2290
#ifdef HAVE_AF_XDP
2291
        if (netdev->netdev_class == &netdev_afxdp_class) {
2292
            signal_remove_xdp(netdev);
2293
        }
2294
#endif
2295
0
    }
2296
0
}
2297
2298
uint64_t
2299
netdev_get_change_seq(const struct netdev *netdev)
2300
0
{
2301
0
    uint64_t change_seq;
2302
2303
0
    atomic_read_explicit(&CONST_CAST(struct netdev *, netdev)->change_seq,
2304
0
                        &change_seq, memory_order_acquire);
2305
2306
0
    return change_seq;
2307
0
}
2308
2309
#ifndef _WIN32
2310
/* This implementation is shared by Linux and BSD. */
2311
2312
static struct ifaddrs *if_addr_list;
2313
static struct ovs_mutex if_addr_list_lock = OVS_MUTEX_INITIALIZER;
2314
2315
void
2316
netdev_get_addrs_list_flush(void)
2317
0
{
2318
0
    ovs_mutex_lock(&if_addr_list_lock);
2319
0
    if (if_addr_list) {
2320
0
        freeifaddrs(if_addr_list);
2321
0
        if_addr_list = NULL;
2322
0
    }
2323
0
    ovs_mutex_unlock(&if_addr_list_lock);
2324
0
}
2325
2326
int
2327
netdev_get_addrs(const char dev[], struct in6_addr **paddr,
2328
                 struct in6_addr **pmask, int *n_in)
2329
0
{
2330
0
    struct in6_addr *addr_array, *mask_array;
2331
0
    const struct ifaddrs *ifa;
2332
0
    int cnt = 0, i = 0;
2333
0
    int retries = 3;
2334
2335
0
    ovs_mutex_lock(&if_addr_list_lock);
2336
0
    if (!if_addr_list) {
2337
0
        int err;
2338
2339
0
retry:
2340
0
        err = getifaddrs(&if_addr_list);
2341
0
        if (err) {
2342
0
            ovs_mutex_unlock(&if_addr_list_lock);
2343
0
            return -err;
2344
0
        }
2345
0
        retries--;
2346
0
    }
2347
2348
0
    for (ifa = if_addr_list; ifa; ifa = ifa->ifa_next) {
2349
0
        if (!ifa->ifa_name) {
2350
0
            if (retries) {
2351
                /* Older versions of glibc have a bug on race condition with
2352
                 * address addition which may cause one of the returned
2353
                 * ifa_name values to be NULL. In such case, we know that we've
2354
                 * got an inconsistent dump. Retry but beware of an endless
2355
                 * loop. From glibc 2.28 and beyond, this workaround is not
2356
                 * needed and should be eventually removed. */
2357
0
                freeifaddrs(if_addr_list);
2358
0
                goto retry;
2359
0
            } else {
2360
0
                VLOG_WARN("Proceeding with an inconsistent dump of "
2361
0
                          "interfaces from the kernel. Some may be missing");
2362
0
            }
2363
0
        }
2364
0
        if (ifa->ifa_addr && ifa->ifa_name && ifa->ifa_netmask) {
2365
0
            int family;
2366
2367
0
            family = ifa->ifa_addr->sa_family;
2368
0
            if (family == AF_INET || family == AF_INET6) {
2369
0
                if (!strncmp(ifa->ifa_name, dev, IFNAMSIZ)) {
2370
0
                    cnt++;
2371
0
                }
2372
0
            }
2373
0
        }
2374
0
    }
2375
2376
0
    if (!cnt) {
2377
0
        ovs_mutex_unlock(&if_addr_list_lock);
2378
0
        return EADDRNOTAVAIL;
2379
0
    }
2380
0
    addr_array = xzalloc(sizeof *addr_array * cnt);
2381
0
    mask_array = xzalloc(sizeof *mask_array * cnt);
2382
0
    for (ifa = if_addr_list; ifa; ifa = ifa->ifa_next) {
2383
0
        if (ifa->ifa_name
2384
0
            && ifa->ifa_addr
2385
0
            && ifa->ifa_netmask
2386
0
            && !strncmp(ifa->ifa_name, dev, IFNAMSIZ)
2387
0
            && sa_is_ip(ifa->ifa_addr)) {
2388
0
            addr_array[i] = sa_get_address(ifa->ifa_addr);
2389
0
            mask_array[i] = sa_get_address(ifa->ifa_netmask);
2390
0
            i++;
2391
0
        }
2392
0
    }
2393
0
    ovs_mutex_unlock(&if_addr_list_lock);
2394
0
    if (paddr) {
2395
0
        *n_in = cnt;
2396
0
        *paddr = addr_array;
2397
0
        *pmask = mask_array;
2398
0
    } else {
2399
0
        free(addr_array);
2400
0
        free(mask_array);
2401
0
    }
2402
0
    return 0;
2403
0
}
2404
#endif
2405
2406
void
2407
netdev_wait_reconf_required(struct netdev *netdev)
2408
0
{
2409
0
    seq_wait(netdev->reconfigure_seq, netdev->last_reconfigure_seq);
2410
0
}
2411
2412
bool
2413
netdev_is_reconf_required(struct netdev *netdev)
2414
0
{
2415
0
    return seq_read(netdev->reconfigure_seq) != netdev->last_reconfigure_seq;
2416
0
}
2417
2418
/* Give a chance to 'netdev' to reconfigure some of its parameters.
2419
 *
2420
 * If a module uses netdev_send() and netdev_rxq_recv(), it must call this
2421
 * function when netdev_is_reconf_required() returns true.
2422
 *
2423
 * Return 0 if successful, otherwise a positive errno value.  If the
2424
 * reconfiguration fails the netdev will not be able to send or receive
2425
 * packets.
2426
 *
2427
 * When this function is called, no call to netdev_rxq_recv() or netdev_send()
2428
 * must be issued. */
2429
int
2430
netdev_reconfigure(struct netdev *netdev)
2431
0
{
2432
0
    const struct netdev_class *class = netdev->netdev_class;
2433
2434
0
    netdev->last_reconfigure_seq = seq_read(netdev->reconfigure_seq);
2435
2436
0
    return (class->reconfigure
2437
0
            ? class->reconfigure(netdev)
2438
0
            : EOPNOTSUPP);
2439
0
}
2440
2441
void
2442
netdev_free_custom_stats_counters(struct netdev_custom_stats *custom_stats)
2443
44.1k
{
2444
44.1k
    if (custom_stats) {
2445
44.1k
        if (custom_stats->counters) {
2446
9.38k
            free(custom_stats->counters);
2447
9.38k
            custom_stats->counters = NULL;
2448
9.38k
            custom_stats->size = 0;
2449
9.38k
        }
2450
44.1k
        free(custom_stats->label);
2451
44.1k
        custom_stats->label = NULL;
2452
44.1k
    }
2453
44.1k
}
2454
2455
uint32_t
2456
netdev_get_block_id(struct netdev *netdev)
2457
0
{
2458
0
    const struct netdev_class *class = netdev->netdev_class;
2459
2460
0
    return (class->get_block_id
2461
0
            ? class->get_block_id(netdev)
2462
0
            : 0);
2463
0
}
2464
2465
/*
2466
 * Get the value of the hw info parameter specified by type.
2467
 * Returns the value on success (>= 0).  Returns -1 on failure.
2468
 */
2469
int
2470
netdev_get_hw_info(struct netdev *netdev, int type)
2471
0
{
2472
0
    int val = -1;
2473
2474
0
    switch (type) {
2475
0
    case HW_INFO_TYPE_OOR:
2476
0
        val = netdev->hw_info.oor;
2477
0
        break;
2478
0
    case HW_INFO_TYPE_PEND_COUNT:
2479
0
        val = netdev->hw_info.pending_count;
2480
0
        break;
2481
0
    case HW_INFO_TYPE_OFFL_COUNT:
2482
0
        val = netdev->hw_info.offload_count;
2483
0
        break;
2484
0
    default:
2485
0
        break;
2486
0
    }
2487
2488
0
    return val;
2489
0
}
2490
2491
/*
2492
 * Set the value of the hw info parameter specified by type.
2493
 */
2494
void
2495
netdev_set_hw_info(struct netdev *netdev, int type, int val)
2496
0
{
2497
0
    switch (type) {
2498
0
    case HW_INFO_TYPE_OOR:
2499
0
        if (val == 0) {
2500
0
            VLOG_DBG("Offload rebalance: netdev: %s is not OOR", netdev->name);
2501
0
        }
2502
0
        netdev->hw_info.oor = val;
2503
0
        break;
2504
0
    case HW_INFO_TYPE_PEND_COUNT:
2505
0
        netdev->hw_info.pending_count = val;
2506
0
        break;
2507
0
    case HW_INFO_TYPE_OFFL_COUNT:
2508
0
        netdev->hw_info.offload_count = val;
2509
0
        break;
2510
0
    default:
2511
0
        break;
2512
0
    }
2513
0
}