Coverage Report

Created: 2025-07-01 06:50

/src/openvswitch/lib/dpif-netdev-perf.h
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 2017 Ericsson AB.
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at:
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16
17
#ifndef DPIF_NETDEV_PERF_H
18
#define DPIF_NETDEV_PERF_H 1
19
20
#include <stdbool.h>
21
#include <stddef.h>
22
#include <stdint.h>
23
#include <string.h>
24
#include <time.h>
25
#include <math.h>
26
27
#ifdef DPDK_NETDEV
28
#include <rte_config.h>
29
#include <rte_cycles.h>
30
#endif
31
32
#include "openvswitch/vlog.h"
33
#include "ovs-atomic.h"
34
#include "timeval.h"
35
#include "unixctl.h"
36
#include "util.h"
37
38
#ifdef  __cplusplus
39
extern "C" {
40
#endif
41
42
/* This module encapsulates data structures and functions to maintain basic PMD
43
 * performance metrics such as packet counters, execution cycles as well as
44
 * histograms and time series recording for more detailed PMD metrics.
45
 *
46
 * It provides a clean API for dpif-netdev to initialize, update and read and
47
 * reset these metrics.
48
 *
49
 * The basic set of PMD counters is implemented as atomic_uint64_t variables
50
 * to guarantee correct read also in 32-bit systems.
51
 *
52
 * The detailed PMD performance metrics are only supported on 64-bit systems
53
 * with atomic 64-bit read and store semantics for plain uint64_t counters.
54
 */
55
56
/* Set of counter types maintained in pmd_perf_stats. */
57
58
enum pmd_stat_type {
59
    PMD_STAT_PHWOL_HIT,     /* Packets that had a partial HWOL hit (phwol). */
60
    PMD_STAT_MFEX_OPT_HIT,  /* Packets that had miniflow optimized match. */
61
    PMD_STAT_SIMPLE_HIT,    /* Packets that had a simple match hit. */
62
    PMD_STAT_EXACT_HIT,     /* Packets that had an exact match (emc). */
63
    PMD_STAT_SMC_HIT,       /* Packets that had a sig match hit (SMC). */
64
    PMD_STAT_MASKED_HIT,    /* Packets that matched in the flow table. */
65
    PMD_STAT_MISS,          /* Packets that did not match and upcall was ok. */
66
    PMD_STAT_LOST,          /* Packets that did not match and upcall failed. */
67
                            /* The above statistics account for the total
68
                             * number of packet passes through the datapath
69
                             * pipeline and should not be overlapping with each
70
                             * other. */
71
    PMD_STAT_MASKED_LOOKUP, /* Number of subtable lookups for flow table
72
                               hits. Each MASKED_HIT hit will have >= 1
73
                               MASKED_LOOKUP(s). */
74
    PMD_STAT_RECV,          /* Packets entering the datapath pipeline from an
75
                             * interface. */
76
    PMD_STAT_RECIRC,        /* Packets reentering the datapath pipeline due to
77
                             * recirculation. */
78
    PMD_STAT_SENT_PKTS,     /* Packets that have been sent. */
79
    PMD_STAT_SENT_BATCHES,  /* Number of batches sent. */
80
    PMD_CYCLES_ITER_IDLE,   /* Cycles spent in idle iterations. */
81
    PMD_CYCLES_ITER_BUSY,   /* Cycles spent in busy iterations. */
82
    PMD_CYCLES_UPCALL,      /* Cycles spent processing upcalls. */
83
    PMD_SLEEP_ITER,         /* Iterations where a sleep has taken place. */
84
    PMD_CYCLES_SLEEP,       /* Total cycles slept to save power. */
85
    PMD_N_STATS
86
};
87
88
/* Array of PMD counters indexed by enum pmd_stat_type.
89
 * The n[] array contains the actual counter values since initialization
90
 * of the PMD. Counters are atomically updated from the PMD but are
91
 * read and cleared also from other processes. To clear the counters at
92
 * PMD run-time, the current counter values are copied over to the zero[]
93
 * array. To read counters we subtract zero[] value from n[]. */
94
95
struct pmd_counters {
96
    atomic_uint64_t n[PMD_N_STATS];     /* Value since _init(). */
97
    uint64_t zero[PMD_N_STATS];         /* Value at last _clear().  */
98
};
99
100
/* Data structure to collect statistical distribution of an integer measurement
101
 * type in form of a histogram. The wall[] array contains the inclusive
102
 * upper boundaries of the bins, while the bin[] array contains the actual
103
 * counters per bin. The histogram walls are typically set automatically
104
 * using the functions provided below.*/
105
106
0
#define NUM_BINS 32             /* Number of histogram bins. */
107
108
struct histogram {
109
    uint32_t wall[NUM_BINS];
110
    uint64_t bin[NUM_BINS];
111
};
112
113
/* Data structure to record details PMD execution metrics per iteration for
114
 * a history period of up to HISTORY_LEN iterations in circular buffer.
115
 * Also used to record up to HISTORY_LEN millisecond averages/totals of these
116
 * metrics.*/
117
118
struct iter_stats {
119
    uint64_t timestamp;         /* Iteration no. or millisecond. */
120
    uint64_t cycles;            /* Number of TSC cycles spent in it. or ms. */
121
    uint64_t busy_cycles;       /* Cycles spent in busy iterations or ms. */
122
    uint32_t iterations;        /* Iterations in ms. */
123
    uint32_t pkts;              /* Packets processed in iteration or ms. */
124
    uint32_t upcalls;           /* Number of upcalls in iteration or ms. */
125
    uint32_t upcall_cycles;     /* Cycles spent in upcalls in it. or ms. */
126
    uint32_t batches;           /* Number of rx batches in iteration or ms. */
127
    uint32_t max_vhost_qfill;   /* Maximum fill level in iteration or ms. */
128
};
129
130
0
#define HISTORY_LEN 1000        /* Length of recorded history
131
                                   (iterations and ms). */
132
#define DEF_HIST_SHOW 20        /* Default number of history samples to
133
                                   display. */
134
135
struct history {
136
    size_t idx;                 /* Slot to which next call to history_store()
137
                                   will write. */
138
    struct iter_stats sample[HISTORY_LEN];
139
};
140
141
/* Container for all performance metrics of a PMD within the struct
142
 * dp_netdev_pmd_thread. The metrics must be updated from within the PMD
143
 * thread but can be read from any thread. The basic PMD counters in
144
 * struct pmd_counters can be read without protection against concurrent
145
 * clearing. The other metrics may only be safely read with the clear_mutex
146
 * held to protect against concurrent clearing. */
147
148
struct pmd_perf_stats {
149
    /* Prevents interference between PMD polling and stats clearing. */
150
    struct ovs_mutex stats_mutex;
151
    /* Set by CLI thread to order clearing of PMD stats. */
152
    volatile bool clear;
153
    /* Prevents stats retrieval while clearing is in progress. */
154
    struct ovs_mutex clear_mutex;
155
    /* Start of the current performance measurement period. */
156
    uint64_t start_ms;
157
    /* Counter for PMD iterations. */
158
    uint64_t iteration_cnt;
159
    /* Start of the current iteration. */
160
    uint64_t start_tsc;
161
    /* Latest TSC time stamp taken in PMD. */
162
    uint64_t last_tsc;
163
    /* Used to space certain checks in time. */
164
    uint64_t next_check_tsc;
165
    /* If non-NULL, outermost cycle timer currently running in PMD. */
166
    struct cycle_timer *cur_timer;
167
    /* Set of PMD counters with their zero offsets. */
168
    struct pmd_counters counters;
169
    /* Statistics of the current iteration. */
170
    struct iter_stats current;
171
    /* Totals for the current millisecond. */
172
    struct iter_stats totals;
173
    /* Histograms for the PMD metrics. */
174
    struct histogram cycles;
175
    struct histogram pkts;
176
    struct histogram cycles_per_pkt;
177
    struct histogram upcalls;
178
    struct histogram cycles_per_upcall;
179
    struct histogram pkts_per_batch;
180
    struct histogram max_vhost_qfill;
181
    /* Iteration history buffer. */
182
    struct history iterations;
183
    /* Millisecond history buffer. */
184
    struct history milliseconds;
185
    /* Suspicious iteration log. */
186
    uint32_t log_susp_it;
187
    /* Start of iteration range to log. */
188
    uint32_t log_begin_it;
189
    /* End of iteration range to log. */
190
    uint32_t log_end_it;
191
    /* Reason for logging suspicious iteration. */
192
    char *log_reason;
193
};
194
195
#ifdef __linux__
196
static inline uint64_t
197
rdtsc_syscall(struct pmd_perf_stats *s)
198
0
{
199
0
    struct timespec val;
200
0
    uint64_t v;
201
0
202
0
    if (clock_gettime(CLOCK_MONOTONIC_RAW, &val) != 0) {
203
0
       return s->last_tsc;
204
0
    }
205
0
206
0
    v  = val.tv_sec * UINT64_C(1000000000) + val.tv_nsec;
207
0
    return s->last_tsc = v;
208
0
}
Unexecuted instantiation: dpif-netdev.c:rdtsc_syscall
Unexecuted instantiation: dpif-netdev-private-dpif.c:rdtsc_syscall
Unexecuted instantiation: dpif-netdev-private-extract.c:rdtsc_syscall
Unexecuted instantiation: dpif-netdev-perf.c:rdtsc_syscall
Unexecuted instantiation: dpif-netdev-extract-study.c:rdtsc_syscall
Unexecuted instantiation: dpif-netdev-lookup.c:rdtsc_syscall
Unexecuted instantiation: dpif-netdev-lookup-autovalidator.c:rdtsc_syscall
Unexecuted instantiation: dpif-netdev-lookup-generic.c:rdtsc_syscall
209
#endif
210
211
/* Support for accurate timing of PMD execution on TSC clock cycle level.
212
 * These functions are intended to be invoked in the context of pmd threads. */
213
214
/* Read the TSC cycle register and cache it. Any function not requiring clock
215
 * cycle accuracy should read the cached value using cycles_counter_get() to
216
 * avoid the overhead of reading the TSC register. */
217
218
static inline uint64_t
219
cycles_counter_update(struct pmd_perf_stats *s)
220
0
{
221
#ifdef DPDK_NETDEV
222
    return s->last_tsc = rte_get_tsc_cycles();
223
#elif !defined(_MSC_VER) && defined(__x86_64__)
224
    uint32_t h, l;
225
0
    asm volatile("rdtsc" : "=a" (l), "=d" (h));
226
227
0
    return s->last_tsc = ((uint64_t) h << 32) | l;
228
#elif !defined(_MSC_VER) && defined(__aarch64__)
229
    asm volatile("mrs %0, cntvct_el0" : "=r" (s->last_tsc));
230
231
    return s->last_tsc;
232
#elif defined(__linux__)
233
    return rdtsc_syscall(s);
234
#else
235
    return s->last_tsc = 0;
236
#endif
237
0
}
Unexecuted instantiation: dpif-netdev.c:cycles_counter_update
Unexecuted instantiation: dpif-netdev-private-dpif.c:cycles_counter_update
Unexecuted instantiation: dpif-netdev-private-extract.c:cycles_counter_update
Unexecuted instantiation: dpif-netdev-perf.c:cycles_counter_update
Unexecuted instantiation: dpif-netdev-extract-study.c:cycles_counter_update
Unexecuted instantiation: dpif-netdev-lookup.c:cycles_counter_update
Unexecuted instantiation: dpif-netdev-lookup-autovalidator.c:cycles_counter_update
Unexecuted instantiation: dpif-netdev-lookup-generic.c:cycles_counter_update
238
239
static inline uint64_t
240
cycles_counter_get(struct pmd_perf_stats *s)
241
0
{
242
0
    return s->last_tsc;
243
0
}
Unexecuted instantiation: dpif-netdev.c:cycles_counter_get
Unexecuted instantiation: dpif-netdev-private-dpif.c:cycles_counter_get
Unexecuted instantiation: dpif-netdev-private-extract.c:cycles_counter_get
Unexecuted instantiation: dpif-netdev-perf.c:cycles_counter_get
Unexecuted instantiation: dpif-netdev-extract-study.c:cycles_counter_get
Unexecuted instantiation: dpif-netdev-lookup.c:cycles_counter_get
Unexecuted instantiation: dpif-netdev-lookup-autovalidator.c:cycles_counter_get
Unexecuted instantiation: dpif-netdev-lookup-generic.c:cycles_counter_get
244
245
void pmd_perf_estimate_tsc_frequency(void);
246
247
/* A nestable timer for measuring execution time in TSC cycles.
248
 *
249
 * Usage:
250
 * struct cycle_timer timer;
251
 *
252
 * cycle_timer_start(pmd, &timer);
253
 * <Timed execution>
254
 * uint64_t cycles = cycle_timer_stop(pmd, &timer);
255
 *
256
 * The caller must guarantee that a call to cycle_timer_start() is always
257
 * paired with a call to cycle_stimer_stop().
258
 *
259
 * Is is possible to have nested cycles timers within the timed code. The
260
 * execution time measured by the nested timers is excluded from the time
261
 * measured by the embracing timer.
262
 */
263
264
struct cycle_timer {
265
    uint64_t start;
266
    uint64_t suspended;
267
    struct cycle_timer *interrupted;
268
};
269
270
static inline void
271
cycle_timer_start(struct pmd_perf_stats *s,
272
                  struct cycle_timer *timer)
273
0
{
274
0
    struct cycle_timer *cur_timer = s->cur_timer;
275
0
    uint64_t now = cycles_counter_update(s);
276
277
0
    if (cur_timer) {
278
0
        cur_timer->suspended = now;
279
0
    }
280
0
    timer->interrupted = cur_timer;
281
0
    timer->start = now;
282
0
    timer->suspended = 0;
283
0
    s->cur_timer = timer;
284
0
}
Unexecuted instantiation: dpif-netdev.c:cycle_timer_start
Unexecuted instantiation: dpif-netdev-private-dpif.c:cycle_timer_start
Unexecuted instantiation: dpif-netdev-private-extract.c:cycle_timer_start
Unexecuted instantiation: dpif-netdev-perf.c:cycle_timer_start
Unexecuted instantiation: dpif-netdev-extract-study.c:cycle_timer_start
Unexecuted instantiation: dpif-netdev-lookup.c:cycle_timer_start
Unexecuted instantiation: dpif-netdev-lookup-autovalidator.c:cycle_timer_start
Unexecuted instantiation: dpif-netdev-lookup-generic.c:cycle_timer_start
285
286
static inline uint64_t
287
cycle_timer_stop(struct pmd_perf_stats *s,
288
                 struct cycle_timer *timer)
289
0
{
290
    /* Assert that this is the current cycle timer. */
291
0
    ovs_assert(s->cur_timer == timer);
292
0
    uint64_t now = cycles_counter_update(s);
293
0
    struct cycle_timer *intr_timer = timer->interrupted;
294
295
0
    if (intr_timer) {
296
        /* Adjust the start offset by the suspended cycles. */
297
0
        intr_timer->start += now - intr_timer->suspended;
298
0
    }
299
    /* Restore suspended timer, if any. */
300
0
    s->cur_timer = intr_timer;
301
0
    return now - timer->start;
302
0
}
Unexecuted instantiation: dpif-netdev.c:cycle_timer_stop
Unexecuted instantiation: dpif-netdev-private-dpif.c:cycle_timer_stop
Unexecuted instantiation: dpif-netdev-private-extract.c:cycle_timer_stop
Unexecuted instantiation: dpif-netdev-perf.c:cycle_timer_stop
Unexecuted instantiation: dpif-netdev-extract-study.c:cycle_timer_stop
Unexecuted instantiation: dpif-netdev-lookup.c:cycle_timer_stop
Unexecuted instantiation: dpif-netdev-lookup-autovalidator.c:cycle_timer_stop
Unexecuted instantiation: dpif-netdev-lookup-generic.c:cycle_timer_stop
303
304
/* Functions to initialize and reset the PMD performance metrics. */
305
306
void pmd_perf_stats_init(struct pmd_perf_stats *s);
307
void pmd_perf_stats_clear(struct pmd_perf_stats *s);
308
void pmd_perf_stats_clear_lock(struct pmd_perf_stats *s);
309
310
/* Functions to read and update PMD counters. */
311
312
void pmd_perf_read_counters(struct pmd_perf_stats *s,
313
                            uint64_t stats[PMD_N_STATS]);
314
315
/* PMD performance counters are updated lock-less. For real PMDs
316
 * they are only updated from the PMD thread itself. In the case of the
317
 * NON-PMD they might be updated from multiple threads, but we can live
318
 * with losing a rare update as 100% accuracy is not required.
319
 * However, as counters are read for display from outside the PMD thread
320
 * with e.g. pmd-stats-show, we make sure that the 64-bit read and store
321
 * operations are atomic also on 32-bit systems so that readers cannot
322
 * not read garbage. On 64-bit systems this incurs no overhead. */
323
324
static inline void
325
pmd_perf_update_counter(struct pmd_perf_stats *s,
326
                        enum pmd_stat_type counter, int delta)
327
0
{
328
0
    uint64_t tmp;
329
0
    atomic_read_relaxed(&s->counters.n[counter], &tmp);
330
0
    tmp += delta;
331
0
    atomic_store_relaxed(&s->counters.n[counter], tmp);
332
0
}
Unexecuted instantiation: dpif-netdev.c:pmd_perf_update_counter
Unexecuted instantiation: dpif-netdev-private-dpif.c:pmd_perf_update_counter
Unexecuted instantiation: dpif-netdev-private-extract.c:pmd_perf_update_counter
Unexecuted instantiation: dpif-netdev-perf.c:pmd_perf_update_counter
Unexecuted instantiation: dpif-netdev-extract-study.c:pmd_perf_update_counter
Unexecuted instantiation: dpif-netdev-lookup.c:pmd_perf_update_counter
Unexecuted instantiation: dpif-netdev-lookup-autovalidator.c:pmd_perf_update_counter
Unexecuted instantiation: dpif-netdev-lookup-generic.c:pmd_perf_update_counter
333
334
/* Functions to manipulate a sample history. */
335
336
static inline void
337
histogram_add_sample(struct histogram *hist, uint32_t val)
338
0
{
339
    /* TODO: Can do better with binary search? */
340
0
    for (int i = 0; i < NUM_BINS-1; i++) {
341
0
        if (val <= hist->wall[i]) {
342
0
            hist->bin[i]++;
343
0
            return;
344
0
        }
345
0
    }
346
0
    hist->bin[NUM_BINS-1]++;
347
0
}
Unexecuted instantiation: dpif-netdev.c:histogram_add_sample
Unexecuted instantiation: dpif-netdev-private-dpif.c:histogram_add_sample
Unexecuted instantiation: dpif-netdev-private-extract.c:histogram_add_sample
Unexecuted instantiation: dpif-netdev-perf.c:histogram_add_sample
Unexecuted instantiation: dpif-netdev-extract-study.c:histogram_add_sample
Unexecuted instantiation: dpif-netdev-lookup.c:histogram_add_sample
Unexecuted instantiation: dpif-netdev-lookup-autovalidator.c:histogram_add_sample
Unexecuted instantiation: dpif-netdev-lookup-generic.c:histogram_add_sample
348
349
uint64_t histogram_samples(const struct histogram *hist);
350
351
/* This function is used to advance the given history index by positive
352
 * offset in the circular history buffer. */
353
static inline uint32_t
354
history_add(uint32_t idx, uint32_t offset)
355
0
{
356
0
    return (idx + offset) % HISTORY_LEN;
357
0
}
Unexecuted instantiation: dpif-netdev.c:history_add
Unexecuted instantiation: dpif-netdev-private-dpif.c:history_add
Unexecuted instantiation: dpif-netdev-private-extract.c:history_add
Unexecuted instantiation: dpif-netdev-perf.c:history_add
Unexecuted instantiation: dpif-netdev-extract-study.c:history_add
Unexecuted instantiation: dpif-netdev-lookup.c:history_add
Unexecuted instantiation: dpif-netdev-lookup-autovalidator.c:history_add
Unexecuted instantiation: dpif-netdev-lookup-generic.c:history_add
358
359
/* This function computes the difference between two indices into the
360
 * circular history buffer. The result is always positive in the range
361
 * 0 .. HISTORY_LEN-1 and specifies the number of steps to reach idx1
362
 * starting from idx2. It can also be used to retreat the history index
363
 * idx1 by idx2 steps. */
364
static inline uint32_t
365
history_sub(uint32_t idx1, uint32_t idx2)
366
0
{
367
0
    return (idx1 + HISTORY_LEN - idx2) % HISTORY_LEN;
368
0
}
Unexecuted instantiation: dpif-netdev.c:history_sub
Unexecuted instantiation: dpif-netdev-private-dpif.c:history_sub
Unexecuted instantiation: dpif-netdev-private-extract.c:history_sub
Unexecuted instantiation: dpif-netdev-perf.c:history_sub
Unexecuted instantiation: dpif-netdev-extract-study.c:history_sub
Unexecuted instantiation: dpif-netdev-lookup.c:history_sub
Unexecuted instantiation: dpif-netdev-lookup-autovalidator.c:history_sub
Unexecuted instantiation: dpif-netdev-lookup-generic.c:history_sub
369
370
static inline struct iter_stats *
371
history_current(struct history *h)
372
0
{
373
0
    return &h->sample[h->idx];
374
0
}
Unexecuted instantiation: dpif-netdev.c:history_current
Unexecuted instantiation: dpif-netdev-private-dpif.c:history_current
Unexecuted instantiation: dpif-netdev-private-extract.c:history_current
Unexecuted instantiation: dpif-netdev-perf.c:history_current
Unexecuted instantiation: dpif-netdev-extract-study.c:history_current
Unexecuted instantiation: dpif-netdev-lookup.c:history_current
Unexecuted instantiation: dpif-netdev-lookup-autovalidator.c:history_current
Unexecuted instantiation: dpif-netdev-lookup-generic.c:history_current
375
376
static inline struct iter_stats *
377
history_next(struct history *h)
378
0
{
379
0
    size_t next_idx = history_add(h->idx, 1);
380
0
    struct iter_stats *next = &h->sample[next_idx];
381
382
0
    memset(next, 0, sizeof(*next));
383
0
    h->idx = next_idx;
384
0
    return next;
385
0
}
Unexecuted instantiation: dpif-netdev.c:history_next
Unexecuted instantiation: dpif-netdev-private-dpif.c:history_next
Unexecuted instantiation: dpif-netdev-private-extract.c:history_next
Unexecuted instantiation: dpif-netdev-perf.c:history_next
Unexecuted instantiation: dpif-netdev-extract-study.c:history_next
Unexecuted instantiation: dpif-netdev-lookup.c:history_next
Unexecuted instantiation: dpif-netdev-lookup-autovalidator.c:history_next
Unexecuted instantiation: dpif-netdev-lookup-generic.c:history_next
386
387
static inline struct iter_stats *
388
history_store(struct history *h, struct iter_stats *is)
389
0
{
390
0
    if (is) {
391
0
        h->sample[h->idx] = *is;
392
0
    }
393
    /* Advance the history pointer */
394
0
    return history_next(h);
395
0
}
Unexecuted instantiation: dpif-netdev.c:history_store
Unexecuted instantiation: dpif-netdev-private-dpif.c:history_store
Unexecuted instantiation: dpif-netdev-private-extract.c:history_store
Unexecuted instantiation: dpif-netdev-perf.c:history_store
Unexecuted instantiation: dpif-netdev-extract-study.c:history_store
Unexecuted instantiation: dpif-netdev-lookup.c:history_store
Unexecuted instantiation: dpif-netdev-lookup-autovalidator.c:history_store
Unexecuted instantiation: dpif-netdev-lookup-generic.c:history_store
396
397
/* Data and function related to logging of suspicious iterations. */
398
399
extern bool log_enabled;
400
extern bool log_extend;
401
extern uint32_t log_q_thr;
402
extern uint64_t iter_cycle_threshold;
403
404
void pmd_perf_set_log_susp_iteration(struct pmd_perf_stats *s, char *reason);
405
void pmd_perf_log_susp_iteration_neighborhood(struct pmd_perf_stats *s);
406
407
/* Functions recording PMD metrics per iteration. */
408
409
void
410
pmd_perf_start_iteration(struct pmd_perf_stats *s);
411
void
412
pmd_perf_end_iteration(struct pmd_perf_stats *s, int rx_packets,
413
                       int tx_packets, uint64_t sleep_cycles,
414
                       bool full_metrics);
415
416
/* Formatting the output of commands. */
417
418
struct pmd_perf_params {
419
    int command_type;
420
    bool histograms;
421
    size_t iter_hist_len;
422
    size_t ms_hist_len;
423
};
424
425
void pmd_perf_format_overall_stats(struct ds *str, struct pmd_perf_stats *s,
426
                                   double duration);
427
void pmd_perf_format_histograms(struct ds *str, struct pmd_perf_stats *s);
428
void pmd_perf_format_iteration_history(struct ds *str,
429
                                       struct pmd_perf_stats *s,
430
                                       int n_iter);
431
void pmd_perf_format_ms_history(struct ds *str, struct pmd_perf_stats *s,
432
                                int n_ms);
433
void pmd_perf_log_set_cmd(struct unixctl_conn *conn,
434
                          int argc, const char *argv[],
435
                          void *aux OVS_UNUSED);
436
437
#ifdef  __cplusplus
438
}
439
#endif
440
441
#endif /* DPIF_NETDEV_PERF_H */