Coverage Report

Created: 2025-07-11 06:12

/src/openvswitch/lib/dpif-netdev-private-dfc.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2015 Nicira, Inc.
3
 * Copyright (c) 2019, 2020, 2021 Intel Corporation.
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at:
8
 *
9
 *     http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 */
17
18
19
#include <config.h>
20
21
#include "dpif-netdev-private-dfc.h"
22
23
static void
24
emc_clear_entry(struct emc_entry *ce)
25
0
{
26
0
    if (ce->flow) {
27
0
        dp_netdev_flow_unref(ce->flow);
28
0
        ce->flow = NULL;
29
0
    }
30
0
}
31
32
static void
33
smc_clear_entry(struct smc_bucket *b, int idx)
34
0
{
35
0
    b->flow_idx[idx] = UINT16_MAX;
36
0
}
37
38
static void
39
emc_cache_init(struct emc_cache *flow_cache)
40
0
{
41
0
    int i;
42
43
0
    flow_cache->sweep_idx = 0;
44
0
    for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) {
45
0
        flow_cache->entries[i].flow = NULL;
46
0
        flow_cache->entries[i].key.hash = 0;
47
0
        flow_cache->entries[i].key.len = sizeof(struct miniflow);
48
0
        flowmap_init(&flow_cache->entries[i].key.mf.map);
49
0
    }
50
0
}
51
52
static void
53
smc_cache_init(struct smc_cache *smc_cache)
54
0
{
55
0
    int i, j;
56
0
    for (i = 0; i < SMC_BUCKET_CNT; i++) {
57
0
        for (j = 0; j < SMC_ENTRY_PER_BUCKET; j++) {
58
0
            smc_cache->buckets[i].flow_idx[j] = UINT16_MAX;
59
0
        }
60
0
    }
61
0
}
62
63
void
64
dfc_cache_init(struct dfc_cache *flow_cache)
65
0
{
66
0
    emc_cache_init(&flow_cache->emc_cache);
67
0
    smc_cache_init(&flow_cache->smc_cache);
68
0
}
69
70
static void
71
emc_cache_uninit(struct emc_cache *flow_cache)
72
0
{
73
0
    int i;
74
75
0
    for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) {
76
0
        emc_clear_entry(&flow_cache->entries[i]);
77
0
    }
78
0
}
79
80
static void
81
smc_cache_uninit(struct smc_cache *smc)
82
0
{
83
0
    int i, j;
84
85
0
    for (i = 0; i < SMC_BUCKET_CNT; i++) {
86
0
        for (j = 0; j < SMC_ENTRY_PER_BUCKET; j++) {
87
0
            smc_clear_entry(&(smc->buckets[i]), j);
88
0
        }
89
0
    }
90
0
}
91
92
void
93
dfc_cache_uninit(struct dfc_cache *flow_cache)
94
0
{
95
0
    smc_cache_uninit(&flow_cache->smc_cache);
96
0
    emc_cache_uninit(&flow_cache->emc_cache);
97
0
}
98
99
/* Check and clear dead flow references slowly (one entry at each
100
 * invocation).  */
101
void
102
emc_cache_slow_sweep(struct emc_cache *flow_cache)
103
0
{
104
0
    struct emc_entry *entry = &flow_cache->entries[flow_cache->sweep_idx];
105
106
0
    if (!emc_entry_alive(entry)) {
107
0
        emc_clear_entry(entry);
108
0
    }
109
0
    flow_cache->sweep_idx = (flow_cache->sweep_idx + 1) & EM_FLOW_HASH_MASK;
110
0
}