Coverage Report

Created: 2026-01-17 06:55

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/openvswitch/lib/dpif-provider.h
Line
Count
Source
1
/*
2
 * Copyright (c) 2009-2014, 2018 Nicira, Inc.
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at:
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16
17
#ifndef DPIF_PROVIDER_H
18
#define DPIF_PROVIDER_H 1
19
20
/* Provider interface to dpifs, which provide an interface to an Open vSwitch
21
 * datapath.  A datapath is a collection of physical or virtual ports that are
22
 * exposed over OpenFlow as a single switch.  Datapaths and the collections of
23
 * ports that they contain may be fixed or dynamic. */
24
25
#include "openflow/openflow.h"
26
#include "ovs-thread.h"
27
#include "dpif.h"
28
#include "util.h"
29
30
#ifdef  __cplusplus
31
extern "C" {
32
#endif
33
34
/* Forward declarations of private structures. */
35
struct dpif_offload_provider_collection;
36
37
/* Open vSwitch datapath interface.
38
 *
39
 * This structure should be treated as opaque by dpif implementations. */
40
struct dpif {
41
    const struct dpif_class *dpif_class;
42
    char *base_name;
43
    char *full_name;
44
    uint8_t netflow_engine_type;
45
    uint8_t netflow_engine_id;
46
    long long int current_ms;
47
48
    /* dpif offload provider specific variables. */
49
    OVSRCU_TYPE(struct dpif_offload_provider_collection *)
50
        offload_provider_collection;
51
};
52
53
struct dpif_ipf_status;
54
struct ipf_dump_ctx;
55
56
void dpif_init(struct dpif *, const struct dpif_class *, const char *name,
57
               uint8_t netflow_engine_type, uint8_t netflow_engine_id);
58
void dpif_uninit(struct dpif *dpif, bool close);
59
60
static inline void dpif_assert_class(const struct dpif *dpif,
61
                                     const struct dpif_class *dpif_class)
62
0
{
63
0
    ovs_assert(dpif->dpif_class == dpif_class);
64
0
}
Unexecuted instantiation: dpif-offload.c:dpif_assert_class
Unexecuted instantiation: dpif-offload-dummy.c:dpif_assert_class
Unexecuted instantiation: dpif.c:dpif_assert_class
Unexecuted instantiation: dpif-netlink.c:dpif_assert_class
Unexecuted instantiation: dpif-offload-tc.c:dpif_assert_class
Unexecuted instantiation: dpif-offload-tc-netdev.c:dpif_assert_class
Unexecuted instantiation: ct-dpif.c:dpif_assert_class
Unexecuted instantiation: dpctl.c:dpif_assert_class
Unexecuted instantiation: dpif-netdev.c:dpif_assert_class
Unexecuted instantiation: dpif-netdev-lookup-generic.c:dpif_assert_class
65
66
struct dpif_flow_dump {
67
    struct dpif *dpif;
68
    bool terse;         /* If true, key/mask/actions may be omitted. */
69
70
    struct ovs_mutex offload_dump_mutex;
71
    struct dpif_offload_flow_dump **offload_dumps;
72
    size_t n_offload_dumps;
73
    size_t offload_dump_index;
74
};
75
76
void dpif_offload_flow_dump_create(struct dpif_flow_dump *,
77
                                   const struct dpif *, bool terse);
78
void dpif_offload_flow_dump_thread_create(struct dpif_flow_dump_thread *,
79
                                          struct dpif_flow_dump *);
80
81
static inline void
82
dpif_flow_dump_init(struct dpif_flow_dump *dump, const struct dpif *dpif,
83
                    bool terse, struct dpif_flow_dump_types *types)
84
0
{
85
0
    dump->dpif = CONST_CAST(struct dpif *, dpif);
86
0
    dump->terse = terse;
87
0
    dump->offload_dumps = NULL;
88
0
    dump->n_offload_dumps = 0;
89
0
    dump->offload_dump_index = 0;
90
0
    ovs_mutex_init(&dump->offload_dump_mutex);
91
0
    if (!types || types->offloaded_flows) {
92
0
        dpif_offload_flow_dump_create(dump, dpif, terse);
93
0
    }
94
0
}
Unexecuted instantiation: dpif-offload.c:dpif_flow_dump_init
Unexecuted instantiation: dpif-offload-dummy.c:dpif_flow_dump_init
Unexecuted instantiation: dpif.c:dpif_flow_dump_init
Unexecuted instantiation: dpif-netlink.c:dpif_flow_dump_init
Unexecuted instantiation: dpif-offload-tc.c:dpif_flow_dump_init
Unexecuted instantiation: dpif-offload-tc-netdev.c:dpif_flow_dump_init
Unexecuted instantiation: ct-dpif.c:dpif_flow_dump_init
Unexecuted instantiation: dpctl.c:dpif_flow_dump_init
Unexecuted instantiation: dpif-netdev.c:dpif_flow_dump_init
Unexecuted instantiation: dpif-netdev-lookup-generic.c:dpif_flow_dump_init
95
96
struct dpif_flow_dump_thread {
97
    struct dpif_flow_dump *dump;
98
99
    struct dpif_offload_flow_dump_thread **offload_threads;
100
    size_t n_offload_threads;
101
    size_t offload_dump_index;
102
    bool offload_dump_done;
103
};
104
105
static inline void
106
dpif_flow_dump_thread_init(struct dpif_flow_dump_thread *thread,
107
                           struct dpif_flow_dump *dump)
108
0
{
109
0
    thread->dump = dump;
110
0
    thread->offload_threads = NULL;
111
0
    thread->n_offload_threads = 0;
112
0
    thread->offload_dump_index = 0;
113
    thread->offload_dump_done = true;
114
0
    dpif_offload_flow_dump_thread_create(thread, dump);
115
0
}
Unexecuted instantiation: dpif-offload.c:dpif_flow_dump_thread_init
Unexecuted instantiation: dpif-offload-dummy.c:dpif_flow_dump_thread_init
Unexecuted instantiation: dpif.c:dpif_flow_dump_thread_init
Unexecuted instantiation: dpif-netlink.c:dpif_flow_dump_thread_init
Unexecuted instantiation: dpif-offload-tc.c:dpif_flow_dump_thread_init
Unexecuted instantiation: dpif-offload-tc-netdev.c:dpif_flow_dump_thread_init
Unexecuted instantiation: ct-dpif.c:dpif_flow_dump_thread_init
Unexecuted instantiation: dpctl.c:dpif_flow_dump_thread_init
Unexecuted instantiation: dpif-netdev.c:dpif_flow_dump_thread_init
Unexecuted instantiation: dpif-netdev-lookup-generic.c:dpif_flow_dump_thread_init
116
117
struct ct_dpif_dump_state;
118
struct ct_dpif_entry;
119
struct ct_dpif_exp;
120
struct ct_dpif_tuple;
121
struct ct_dpif_timeout_policy;
122
enum ct_features;
123
124
/* 'dpif_ipf_proto_status' and 'dpif_ipf_status' are presently in
125
 * sync with 'ipf_proto_status' and 'ipf_status', but more
126
 * generally represent a superset of present and future support. */
127
struct dpif_ipf_proto_status {
128
   uint64_t nfrag_accepted;
129
   uint64_t nfrag_completed_sent;
130
   uint64_t nfrag_expired_sent;
131
   uint64_t nfrag_too_small;
132
   uint64_t nfrag_overlap;
133
   uint64_t nfrag_purged;
134
   unsigned int min_frag_size;
135
   bool enabled;
136
};
137
138
struct dpif_ipf_status {
139
   struct dpif_ipf_proto_status v4;
140
   struct dpif_ipf_proto_status v6;
141
   unsigned int nfrag;
142
   unsigned int nfrag_max;
143
};
144
145
/* Datapath interface class structure, to be defined by each implementation of
146
 * a datapath interface.
147
 *
148
 * These functions return 0 if successful or a positive errno value on failure,
149
 * except where otherwise noted.
150
 *
151
 * These functions are expected to execute synchronously, that is, to block as
152
 * necessary to obtain a result.  Thus, they may not return EAGAIN or
153
 * EWOULDBLOCK or EINPROGRESS.  We may relax this requirement in the future if
154
 * and when we encounter performance problems. */
155
struct dpif_class {
156
    /* Type of dpif in this class, e.g. "system", "netdev", etc.
157
     *
158
     * One of the providers should supply a "system" type, since this is
159
     * the type assumed if no type is specified when opening a dpif. */
160
    const char *type;
161
162
    /* If 'true', datapath ports should be destroyed on ofproto destruction.
163
     *
164
     * This is used by the vswitch at exit, so that it can clean any
165
     * datapaths that can not exist without it (e.g. netdev datapath).  */
166
    bool cleanup_required;
167
168
    /* Called when the dpif provider is registered, typically at program
169
     * startup.  Returning an error from this function will prevent any
170
     * datapath with this class from being created.
171
     *
172
     * This function may be set to null if a datapath class needs no
173
     * initialization at registration time. */
174
    int (*init)(void);
175
176
    /* Enumerates the names of all known created datapaths (of class
177
     * 'dpif_class'), if possible, into 'all_dps'.  The caller has already
178
     * initialized 'all_dps' and other dpif classes might already have added
179
     * names to it.
180
     *
181
     * This is used by the vswitch at startup, so that it can delete any
182
     * datapaths that are not configured.
183
     *
184
     * Some kinds of datapaths might not be practically enumerable, in which
185
     * case this function may be a null pointer. */
186
    int (*enumerate)(struct sset *all_dps, const struct dpif_class *dpif_class);
187
188
    /* Returns the type to pass to netdev_open() when a dpif of class
189
     * 'dpif_class' has a port of type 'type', for a few special cases
190
     * when a netdev type differs from a port type.  For example, when
191
     * using the userspace datapath, a port of type "internal" needs to
192
     * be opened as "tap".
193
     *
194
     * Returns either 'type' itself or a string literal, which must not
195
     * be freed. */
196
    const char *(*port_open_type)(const struct dpif_class *dpif_class,
197
                                  const char *type);
198
199
    /* Attempts to open an existing dpif called 'name', if 'create' is false,
200
     * or to open an existing dpif or create a new one, if 'create' is true.
201
     *
202
     * 'dpif_class' is the class of dpif to open.
203
     *
204
     * If successful, stores a pointer to the new dpif in '*dpifp', which must
205
     * have class 'dpif_class'.  On failure there are no requirements on what
206
     * is stored in '*dpifp'. */
207
    int (*open)(const struct dpif_class *dpif_class,
208
                const char *name, bool create, struct dpif **dpifp);
209
210
    /* Closes 'dpif' and frees associated memory. */
211
    void (*close)(struct dpif *dpif);
212
213
    /* Attempts to destroy the dpif underlying 'dpif'.
214
     *
215
     * If successful, 'dpif' will not be used again except as an argument for
216
     * the 'close' member function. */
217
    int (*destroy)(struct dpif *dpif);
218
219
    /* Performs periodic work needed by 'dpif', if any is necessary.
220
     * Returns true if need to revalidate. */
221
    bool (*run)(struct dpif *dpif);
222
223
    /* Arranges for poll_block() to wake up if the "run" member function needs
224
     * to be called for 'dpif'. */
225
    void (*wait)(struct dpif *dpif);
226
227
    /* Retrieves statistics for 'dpif' into 'stats'. */
228
    int (*get_stats)(const struct dpif *dpif, struct dpif_dp_stats *stats);
229
230
    int (*set_features)(struct dpif *dpif, uint32_t user_features);
231
    uint32_t (*get_features)(struct dpif *dpif);
232
233
    /* Adds 'netdev' as a new port in 'dpif'.  If '*port_no' is not
234
     * ODPP_NONE, attempts to use that as the port's port number.
235
     *
236
     * If port is successfully added, sets '*port_no' to the new port's
237
     * port number.  Returns EBUSY if caller attempted to choose a port
238
     * number, and it was in use. */
239
    int (*port_add)(struct dpif *dpif, struct netdev *netdev,
240
                    odp_port_t *port_no);
241
242
    /* Removes port numbered 'port_no' from 'dpif'. */
243
    int (*port_del)(struct dpif *dpif, odp_port_t port_no);
244
245
    /* Refreshes configuration of 'dpif's port. The implementation might
246
     * postpone applying the changes until run() is called. */
247
    int (*port_set_config)(struct dpif *dpif, odp_port_t port_no,
248
                           const struct smap *cfg);
249
250
    /* Queries 'dpif' for a port with the given 'port_no' or 'devname'.
251
     * If 'port' is not null, stores information about the port into
252
     * '*port' if successful.
253
     *
254
     * If the port doesn't exist, the provider must return ENODEV.  Other
255
     * error numbers means that something wrong happened and will be
256
     * treated differently by upper layers.
257
     *
258
     * If 'port' is not null, the caller takes ownership of data in
259
     * 'port' and must free it with dpif_port_destroy() when it is no
260
     * longer needed. */
261
    int (*port_query_by_number)(const struct dpif *dpif, odp_port_t port_no,
262
                                struct dpif_port *port);
263
    int (*port_query_by_name)(const struct dpif *dpif, const char *devname,
264
                              struct dpif_port *port);
265
266
    /* Returns the Netlink PID value to supply in OVS_ACTION_ATTR_USERSPACE
267
     * actions as the OVS_USERSPACE_ATTR_PID attribute's value, for use in
268
     * flows whose packets arrived on port 'port_no'.
269
     *
270
     * A 'port_no' of UINT32_MAX should be treated as a special case.  The
271
     * implementation should return a reserved PID, not allocated to any port,
272
     * that the client may use for special purposes.
273
     *
274
     * The return value only needs to be meaningful when DPIF_UC_ACTION has
275
     * been enabled in the 'dpif''s listen mask, and it is allowed to change
276
     * when DPIF_UC_ACTION is disabled and then re-enabled.
277
     *
278
     * A dpif provider that doesn't have meaningful Netlink PIDs can use NULL
279
     * for this function.  This is equivalent to always returning 0. */
280
    uint32_t (*port_get_pid)(const struct dpif *dpif, odp_port_t port_no);
281
282
    /* Attempts to begin dumping the ports in a dpif.  On success, returns 0
283
     * and initializes '*statep' with any data needed for iteration.  On
284
     * failure, returns a positive errno value. */
285
    int (*port_dump_start)(const struct dpif *dpif, void **statep);
286
287
    /* Attempts to retrieve another port from 'dpif' for 'state', which was
288
     * initialized by a successful call to the 'port_dump_start' function for
289
     * 'dpif'.  On success, stores a new dpif_port into 'port' and returns 0.
290
     * Returns EOF if the end of the port table has been reached, or a positive
291
     * errno value on error.  This function will not be called again once it
292
     * returns nonzero once for a given iteration (but the 'port_dump_done'
293
     * function will be called afterward).
294
     *
295
     * The dpif provider retains ownership of the data stored in 'port'.  It
296
     * must remain valid until at least the next call to 'port_dump_next' or
297
     * 'port_dump_done' for 'state'. */
298
    int (*port_dump_next)(const struct dpif *dpif, void *state,
299
                          struct dpif_port *port);
300
301
    /* Releases resources from 'dpif' for 'state', which was initialized by a
302
     * successful call to the 'port_dump_start' function for 'dpif'.  */
303
    int (*port_dump_done)(const struct dpif *dpif, void *state);
304
305
    /* Polls for changes in the set of ports in 'dpif'.  If the set of ports in
306
     * 'dpif' has changed, then this function should do one of the
307
     * following:
308
     *
309
     * - Preferably: store the name of the device that was added to or deleted
310
     *   from 'dpif' in '*devnamep' and return 0.  The caller is responsible
311
     *   for freeing '*devnamep' (with free()) when it no longer needs it.
312
     *
313
     * - Alternatively: return ENOBUFS, without indicating the device that was
314
     *   added or deleted.
315
     *
316
     * Occasional 'false positives', in which the function returns 0 while
317
     * indicating a device that was not actually added or deleted or returns
318
     * ENOBUFS without any change, are acceptable.
319
     *
320
     * If the set of ports in 'dpif' has not changed, returns EAGAIN.  May also
321
     * return other positive errno values to indicate that something has gone
322
     * wrong. */
323
    int (*port_poll)(const struct dpif *dpif, char **devnamep);
324
325
    /* Arranges for the poll loop to wake up when 'port_poll' will return a
326
     * value other than EAGAIN. */
327
    void (*port_poll_wait)(const struct dpif *dpif);
328
329
    /* Deletes all flows from 'dpif' and clears all of its queues of received
330
     * packets. */
331
    int (*flow_flush)(struct dpif *dpif);
332
333
    /* Flow dumping interface.
334
     *
335
     * This is the back-end for the flow dumping interface described in
336
     * dpif.h.  Please read the comments there first, because this code
337
     * closely follows it.
338
     *
339
     * 'flow_dump_create' and 'flow_dump_thread_create' must always return an
340
     * initialized and usable data structure and defer error return until
341
     * flow_dump_destroy().  This hasn't been a problem for the dpifs that
342
     * exist so far.
343
     *
344
     * 'flow_dump_create' and 'flow_dump_thread_create' must initialize the
345
     * structures that they return with dpif_flow_dump_init() and
346
     * dpif_flow_dump_thread_init(), respectively.
347
     *
348
     * If 'terse' is true, then only UID and statistics will
349
     * be returned in the dump. Otherwise, all fields will be returned.
350
     *
351
     * If 'types' isn't null, dumps only the flows of the passed types. */
352
    struct dpif_flow_dump *(*flow_dump_create)(
353
        const struct dpif *dpif,
354
        bool terse,
355
        struct dpif_flow_dump_types *types);
356
    int (*flow_dump_destroy)(struct dpif_flow_dump *dump);
357
358
    struct dpif_flow_dump_thread *(*flow_dump_thread_create)(
359
        struct dpif_flow_dump *dump);
360
    void (*flow_dump_thread_destroy)(struct dpif_flow_dump_thread *thread);
361
362
    int (*flow_dump_next)(struct dpif_flow_dump_thread *thread,
363
                          struct dpif_flow *flows, int max_flows);
364
    /* Executes each of the 'n_ops' operations in 'ops' on 'dpif', in the order
365
     * in which they are specified, placing each operation's results in the
366
     * "output" members documented in comments and the 'error' member of each
367
     * dpif_op. */
368
    void (*operate)(struct dpif *dpif, struct dpif_op **ops, size_t n_ops);
369
370
    /* Enables or disables receiving packets with dpif_recv() for 'dpif'.
371
     * Turning packet receive off and then back on is allowed to change Netlink
372
     * PID assignments (see ->port_get_pid()).  The client is responsible for
373
     * updating flows as necessary if it does this. */
374
    int (*recv_set)(struct dpif *dpif, bool enable);
375
376
    /* Attempts to refresh the poll loops and Netlink sockets used for handling
377
     * upcalls when the number of upcall handlers (upcall receiving thread) is
378
     * changed to 'n_handlers' and receiving packets for 'dpif' is enabled by
379
     * recv_set().
380
     *
381
     * A dpif implementation may choose to ignore 'n_handlers' while returning
382
     * success.
383
     *
384
     * The method for distribution of upcalls between handler threads is
385
     * specific to the dpif implementation.
386
     */
387
    int (*handlers_set)(struct dpif *dpif, uint32_t n_handlers);
388
389
    /* Queries 'dpif' to see if a certain number of handlers are required by
390
     * the implementation.
391
     *
392
     * If a certain number of handlers are required, returns 'true' and sets
393
     * 'n_handlers' to that number of handler threads.
394
     *
395
     * If not, returns 'false'.
396
     */
397
    bool (*number_handlers_required)(struct dpif *dpif, uint32_t *n_handlers);
398
399
    /* Pass custom configuration options to the datapath.  The implementation
400
     * might postpone applying the changes until run() is called. */
401
    int (*set_config)(struct dpif *dpif, const struct smap *other_config);
402
403
    /* Translates OpenFlow queue ID 'queue_id' (in host byte order) into a
404
     * priority value used for setting packet priority. */
405
    int (*queue_to_priority)(const struct dpif *dpif, uint32_t queue_id,
406
                             uint32_t *priority);
407
408
    /* Polls for an upcall from 'dpif' for an upcall handler.  Since there
409
     * can be multiple poll loops (see ->handlers_set()), 'handler_id' is
410
     * needed as index to identify the corresponding poll loop.  If
411
     * successful, stores the upcall into '*upcall', using 'buf' for
412
     * storage.  Should only be called if 'recv_set' has been used to enable
413
     * receiving packets from 'dpif'.
414
     *
415
     * The implementation should point 'upcall->key' and 'upcall->userdata'
416
     * (if any) into data in the caller-provided 'buf'.  The implementation may
417
     * also use 'buf' for storing the data of 'upcall->packet'.  If necessary
418
     * to make room, the implementation may reallocate the data in 'buf'.
419
     *
420
     * The caller owns the data of 'upcall->packet' and may modify it.  If
421
     * packet's headroom is exhausted as it is manipulated, 'upcall->packet'
422
     * will be reallocated.  This requires the data of 'upcall->packet' to be
423
     * released with ofpbuf_uninit() before 'upcall' is destroyed.  However,
424
     * when an error is returned, the 'upcall->packet' may be uninitialized
425
     * and should not be released.
426
     *
427
     * This function must not block.  If no upcall is pending when it is
428
     * called, it should return EAGAIN without blocking. */
429
    int (*recv)(struct dpif *dpif, uint32_t handler_id,
430
                struct dpif_upcall *upcall, struct ofpbuf *buf);
431
432
    /* Arranges for the poll loop for an upcall handler to wake up when 'dpif'
433
     * has a message queued to be received with the recv member functions.
434
     * Since there can be multiple poll loops (see ->handlers_set()),
435
     * 'handler_id' is needed as index to identify the corresponding poll loop.
436
     * */
437
    void (*recv_wait)(struct dpif *dpif, uint32_t handler_id);
438
439
    /* Throws away any queued upcalls that 'dpif' currently has ready to
440
     * return. */
441
    void (*recv_purge)(struct dpif *dpif);
442
443
    /* When 'dpif' is about to purge the datapath, the higher layer may want
444
     * to be notified so that it could try reacting accordingly (e.g. grabbing
445
     * all flow stats before they are gone).
446
     *
447
     * Registers an upcall callback function with 'dpif'.  This is only used
448
     * if 'dpif' needs to notify the purging of datapath.  'aux' is passed to
449
     * the callback on invocation. */
450
    void (*register_dp_purge_cb)(struct dpif *, dp_purge_callback *, void *aux);
451
452
    /* For datapaths that run in userspace (i.e. dpif-netdev), threads polling
453
     * for incoming packets can directly call upcall functions instead of
454
     * offloading packet processing to separate handler threads. Datapaths
455
     * that directly call upcall functions should use the functions below to
456
     * to register an upcall function and enable / disable upcalls.
457
     *
458
     * Registers an upcall callback function with 'dpif'. This is only used
459
     * if 'dpif' directly executes upcall functions. 'aux' is passed to the
460
     * callback on invocation. */
461
    void (*register_upcall_cb)(struct dpif *, upcall_callback *, void *aux);
462
463
    /* Enables upcalls if 'dpif' directly executes upcall functions. */
464
    void (*enable_upcall)(struct dpif *);
465
466
    /* Disables upcalls if 'dpif' directly executes upcall functions. */
467
    void (*disable_upcall)(struct dpif *);
468
469
    /* Get datapath version. Caller is responsible for freeing the string
470
     * returned.  */
471
    char *(*get_datapath_version)(void);
472
473
    /* Conntrack entry dumping interface.
474
     *
475
     * These functions are used by ct-dpif.c to provide a datapath-agnostic
476
     * dumping interface to the connection trackers provided by the
477
     * datapaths.
478
     *
479
     * ct_dump_start() should put in '*state' a pointer to a newly allocated
480
     * stucture that will be passed by the caller to ct_dump_next() and
481
     * ct_dump_done(). If 'zone' is not NULL, only the entries in '*zone'
482
     * should be dumped.
483
     *
484
     * ct_dump_next() should fill 'entry' with information from a connection
485
     * and prepare to dump the next one on a subsequest invocation.
486
     *
487
     * ct_dump_done() should perform any cleanup necessary (including
488
     * deallocating the 'state' structure, if applicable). */
489
    int (*ct_dump_start)(struct dpif *, struct ct_dpif_dump_state **state,
490
                         const uint16_t *zone, int *);
491
    int (*ct_dump_next)(struct dpif *, struct ct_dpif_dump_state *state,
492
                        struct ct_dpif_entry *entry);
493
    int (*ct_dump_done)(struct dpif *, struct ct_dpif_dump_state *state);
494
495
    /* Starts the dump initializing the structures involved and the zone
496
     * filter. */
497
    int (*ct_exp_dump_start)(struct dpif *, struct ct_dpif_dump_state **state,
498
                             const uint16_t *zone);
499
    /* Fill the expectation 'entry' with the related information. */
500
    int (*ct_exp_dump_next)(struct dpif *, struct ct_dpif_dump_state *state,
501
                            struct ct_dpif_exp *entry);
502
    /* Ends the dump cleaning up any potential pending state, if any. */
503
    int (*ct_exp_dump_done)(struct dpif *, struct ct_dpif_dump_state *state);
504
505
    /* Flushes the connection tracking tables.  The arguments have the
506
     * following behavior:
507
     *
508
     *   - If both 'zone' and 'tuple' are NULL, flush all the conntrack
509
     *     entries.
510
     *   - If 'zone' is not NULL, and 'tuple' is NULL, flush all the
511
     *     conntrack entries in '*zone'.
512
     *   - If 'tuple' is not NULL, flush the conntrack entry specified by
513
     *     'tuple' in '*zone'. If 'zone' is NULL, use the default zone
514
     *     (zone 0). */
515
    int (*ct_flush)(struct dpif *, const uint16_t *zone,
516
                    const struct ct_dpif_tuple *tuple);
517
    /* Set max connections allowed. */
518
    int (*ct_set_maxconns)(struct dpif *, uint32_t maxconns);
519
    /* Get max connections allowed. */
520
    int (*ct_get_maxconns)(struct dpif *, uint32_t *maxconns);
521
    /* Get number of connections tracked. */
522
    int (*ct_get_nconns)(struct dpif *, uint32_t *nconns);
523
    /* Enable or disable TCP sequence checking. */
524
    int (*ct_set_tcp_seq_chk)(struct dpif *, bool enabled);
525
    /* Get the TCP sequence checking configuration. */
526
    int (*ct_get_tcp_seq_chk)(struct dpif *, bool *enabled);
527
    /* Updates the sweep interval for the CT sweeper. */
528
    int (*ct_set_sweep_interval)(struct dpif *, uint32_t ms);
529
    /* Get the current value of the sweep interval for the CT sweeper. */
530
    int (*ct_get_sweep_interval)(struct dpif *, uint32_t *ms);
531
532
533
    /* Connection tracking per zone limit */
534
535
    /* Per zone conntrack limit sets the maximum allowed connections in zones
536
     * to provide resource isolation.  If a per zone limit for a particular
537
     * zone is not available in the datapath, it defaults to the default
538
     * per zone limit.  Initially, the default per zone limit is
539
     * unlimited (0). */
540
541
    /* Sets the max connections allowed per zone according to 'zone_limits',
542
     * a list of 'struct ct_dpif_zone_limit' entries (the 'count' member
543
     * is not used when setting limits). */
544
    int (*ct_set_limits)(struct dpif *, const struct ovs_list *zone_limits);
545
546
    /* Looks up the per zone limits for all zones in the 'zone_limits_in' list
547
     * of 'struct ct_dpif_zone_limit' entries (the 'limit' and 'count' members
548
     * are not used), and stores the reply that includes the zone, the per
549
     * zone limit, and the number of connections in the zone into
550
     * 'zone_limits_out' list.  If the 'zone_limits_in' list is empty the
551
     * report will contain all previously set zone limits and the default
552
     * limit.  Note: The default zone limit "count" is not used. */
553
    int (*ct_get_limits)(struct dpif *, const struct ovs_list *zone_limits_in,
554
                         struct ovs_list *zone_limits_out);
555
556
    /* Deletes per zone limit of all zones specified in 'zone_limits', a
557
     * list of 'struct ct_dpif_zone_limit' entries. */
558
    int (*ct_del_limits)(struct dpif *, const struct ovs_list *zone_limits);
559
560
    /* Connection tracking timeout policy */
561
562
    /* A connection tracking timeout policy contains a list of timeout
563
     * attributes that specify timeout values on various connection states.
564
     * In a datapath, the timeout policy is identified by a 4-byte unsigned
565
     * integer.  Unsupported timeout attributes are ignored.  When a
566
     * connection is committed it can be associated with a timeout
567
     * policy, or it defaults to the datapath's default timeout policy. */
568
569
    /* Sets timeout policy '*tp' into the datapath. */
570
    int (*ct_set_timeout_policy)(struct dpif *,
571
                                 const struct ct_dpif_timeout_policy *tp);
572
    /* Gets a timeout policy specified by tp_id and stores it into '*tp'. */
573
    int (*ct_get_timeout_policy)(struct dpif *, uint32_t tp_id,
574
                                 struct ct_dpif_timeout_policy *tp);
575
    /* Deletes a timeout policy identified by 'tp_id'. */
576
    int (*ct_del_timeout_policy)(struct dpif *, uint32_t tp_id);
577
578
    /* Conntrack timeout policy dumping interface.
579
     *
580
     * These functions provide a datapath-agnostic dumping interface
581
     * to the conntrack timeout policy provided by the datapaths.
582
     *
583
     * ct_timeout_policy_dump_start() should put in '*statep' a pointer to
584
     * a newly allocated structure that will be passed by the caller to
585
     * ct_timeout_policy_dump_next() and ct_timeout_policy_dump_done().
586
     *
587
     * ct_timeout_policy_dump_next() attempts to retrieve another timeout
588
     * policy from 'dpif' for 'state', which was initialized by a successful
589
     * call to ct_timeout_policy_dump_start().  On success, stores a new
590
     * timeout policy into 'tp' and returns 0.  Returns EOF if the last
591
     * timeout policy has been dumped, or a positive errno value on error.
592
     * This function will not be called again once it returns nonzero once
593
     * for a given iteration (but the ct_timeout_policy_dump_done() will
594
     * be called afterward).
595
     *
596
     * ct_timeout_policy_dump_done() should perform any cleanup necessary
597
     * (including deallocating the 'state' structure, if applicable). */
598
    int (*ct_timeout_policy_dump_start)(struct dpif *, void **statep);
599
    int (*ct_timeout_policy_dump_next)(struct dpif *, void *state,
600
                                       struct ct_dpif_timeout_policy *tp);
601
    int (*ct_timeout_policy_dump_done)(struct dpif *, void *state);
602
603
    /* Gets timeout policy based on 'tp_id', 'dl_type' and 'nw_proto'.
604
     * On success, returns 0, stores the timeout policy name in 'tp_name',
605
     * and sets 'is_generic'. 'is_generic' is false if the returned timeout
606
     * policy in the 'dpif' is specific to 'dl_type' and 'nw_proto' in the
607
     * datapath (e.g., the Linux kernel datapath).  Sets 'is_generic' to
608
     * true, if the timeout policy supports all OVS supported L3/L4
609
     * protocols.
610
     *
611
     * The caller is responsible for freeing 'tp_name'. */
612
    int (*ct_get_timeout_policy_name)(struct dpif *, uint32_t tp_id,
613
                                      uint16_t dl_type, uint8_t nw_proto,
614
                                      char **tp_name, bool *is_generic);
615
616
    /* Stores the conntrack features supported by 'dpif' into features.
617
     * The value is a bitmap of CONNTRACK_F_* bits. */
618
    int (*ct_get_features)(struct dpif *, enum ct_features *features);
619
620
    /* IP Fragmentation. */
621
622
    /* Disables or enables conntrack fragment reassembly.  The default
623
     * setting is enabled. */
624
    int (*ipf_set_enabled)(struct dpif *, bool v6, bool enabled);
625
626
    /* Set minimum fragment allowed. */
627
    int (*ipf_set_min_frag)(struct dpif *, bool v6, uint32_t min_frag);
628
629
    /* Set maximum number of fragments tracked. */
630
    int (*ipf_set_max_nfrags)(struct dpif *, uint32_t max_nfrags);
631
632
    /* Get fragmentation configuration status and counters. */
633
    int (*ipf_get_status)(struct dpif *,
634
                          struct dpif_ipf_status *dpif_ipf_status);
635
636
    /* The following 3 apis find and print ipf lists by creating a string
637
     * representation of the state of an ipf list, to which 'dump' is pointed
638
     * to.  'ipf_dump_start()' allocates memory for 'ipf_dump_ctx'.
639
     * 'ipf_dump_next()' finds the next ipf list and copies it's
640
     * characteristics to a string, which is freed by the caller.
641
     * 'ipf_dump_done()' frees the 'ipf_dump_ctx' that was allocated in
642
     * 'ipf_dump_start'. */
643
    int (*ipf_dump_start)(struct dpif *, struct ipf_dump_ctx **ipf_dump_ctx);
644
    int (*ipf_dump_next)(struct dpif *, void *ipf_dump_ctx, char **dump);
645
    int (*ipf_dump_done)(struct dpif *, void *ipf_dump_ctx);
646
647
    /* Meters */
648
649
    /* Queries 'dpif' for supported meter features.
650
     * NULL pointer means no meter features are supported. */
651
    void (*meter_get_features)(const struct dpif *,
652
                               struct ofputil_meter_features *);
653
654
    /* Adds or modifies the meter in 'dpif' with the given 'meter_id'
655
     * and the configuration in 'config'.
656
     *
657
     * The meter id specified through 'config->meter_id' is ignored. */
658
    int (*meter_set)(struct dpif *, ofproto_meter_id meter_id,
659
                     struct ofputil_meter_config *);
660
661
    /* Queries 'dpif' for meter stats with the given 'meter_id'.  Stores
662
     * maximum of 'n_bands' meter statistics, returning the number of band
663
     * stats returned in 'stats->n_bands' if successful. */
664
    int (*meter_get)(const struct dpif *, ofproto_meter_id meter_id,
665
                     struct ofputil_meter_stats *, uint16_t n_bands);
666
667
    /* Removes meter 'meter_id' from 'dpif'. Stores meter and band statistics
668
     * (for maximum of 'n_bands', returning the number of band stats returned
669
     * in 'stats->n_bands' if successful.  'stats' may be passed in as NULL if
670
     * no stats are needed, in which case 'n_bands' must be passed in as
671
     * zero. */
672
    int (*meter_del)(struct dpif *, ofproto_meter_id meter_id,
673
                     struct ofputil_meter_stats *, uint16_t n_bands);
674
675
    /* Adds a bond with 'bond_id' and the member-map to 'dpif'. */
676
    int (*bond_add)(struct dpif *dpif, uint32_t bond_id,
677
                    odp_port_t *member_map);
678
679
    /* Removes bond identified by 'bond_id' from 'dpif'. */
680
    int (*bond_del)(struct dpif *dpif, uint32_t bond_id);
681
682
    /* Reads bond stats from 'dpif'.  'n_bytes' should be an array with size
683
     * sufficient to store BOND_BUCKETS number of elements. */
684
    int (*bond_stats_get)(struct dpif *dpif, uint32_t bond_id,
685
                          uint64_t *n_bytes);
686
687
    /* Cache configuration
688
     *
689
     * Multiple levels of cache can exist in a given datapath implementation.
690
     * An API has been provided to get the number of supported caches, which
691
     * can then be used to get/set specific configuration. Cache level is 0
692
     * indexed, i.e. if 1 level is supported, the level value to use is 0.
693
     *
694
     * Get the number of cache levels supported. */
695
    int (*cache_get_supported_levels)(struct dpif *dpif, uint32_t *levels);
696
697
    /* Get the cache name for the given level. */
698
    int (*cache_get_name)(struct dpif *dpif, uint32_t level,
699
                          const char **name);
700
701
    /* Get currently configured cache size. */
702
    int (*cache_get_size)(struct dpif *dpif, uint32_t level, uint32_t *size);
703
704
    /* Set cache size. */
705
    int (*cache_set_size)(struct dpif *dpif, uint32_t level, uint32_t size);
706
};
707
708
extern const struct dpif_class dpif_netlink_class;
709
extern const struct dpif_class dpif_netdev_class;
710
711
#ifdef  __cplusplus
712
}
713
#endif
714
715
#endif /* dpif-provider.h */