/src/openvswitch/lib/classifier-private.h
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2014, 2015, 2016 Nicira, Inc. |
3 | | * |
4 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | | * you may not use this file except in compliance with the License. |
6 | | * You may obtain a copy of the License at: |
7 | | * |
8 | | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | | * |
10 | | * Unless required by applicable law or agreed to in writing, software |
11 | | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | | * See the License for the specific language governing permissions and |
14 | | * limitations under the License. |
15 | | */ |
16 | | |
17 | | #ifndef CLASSIFIER_PRIVATE_H |
18 | | #define CLASSIFIER_PRIVATE_H 1 |
19 | | |
20 | | #include "ccmap.h" |
21 | | #include "cmap.h" |
22 | | #include "flow.h" |
23 | | #include "hash.h" |
24 | | #include "rculist.h" |
25 | | |
26 | | /* Classifier internal definitions, subject to change at any time. */ |
27 | | |
28 | | /* A set of rules that all have the same fields wildcarded. */ |
29 | | struct cls_subtable { |
30 | | struct cmap_node cmap_node; /* Within classifier's 'subtables_map'. */ |
31 | | |
32 | | /* These fields are only used by writers. */ |
33 | | int max_priority; /* Max priority of any rule in subtable. */ |
34 | | unsigned int max_count; /* Count of max_priority rules. */ |
35 | | |
36 | | /* Accessed by iterators. */ |
37 | | struct rculist rules_list; /* Unordered. */ |
38 | | |
39 | | /* Identical, but lower priority rules are not inserted to any of the |
40 | | * following data structures. */ |
41 | | |
42 | | /* These fields are accessed by readers who care about wildcarding. */ |
43 | | const uint8_t n_indices; /* How many indices to use. */ |
44 | | const struct flowmap index_maps[CLS_MAX_INDICES + 1]; /* Stage maps. */ |
45 | | unsigned int trie_plen[CLS_MAX_TRIES]; /* Trie prefix length in 'mask' |
46 | | * (runtime configurable). */ |
47 | | const int ports_mask_len; |
48 | | struct ccmap indices[CLS_MAX_INDICES]; /* Staged lookup indices. */ |
49 | | rcu_trie_ptr ports_trie; /* NULL if none. */ |
50 | | |
51 | | /* These fields are accessed by all readers. */ |
52 | | struct cmap rules; /* Contains 'cls_match'es. */ |
53 | | const struct minimask mask; /* Wildcards for fields. */ |
54 | | /* 'mask' must be the last field. */ |
55 | | }; |
56 | | |
57 | | /* Internal representation of a rule in a "struct cls_subtable". |
58 | | * |
59 | | * The 'next' member is an element in a singly linked, null-terminated list. |
60 | | * This list links together identical "cls_match"es in order of decreasing |
61 | | * priority. The classifier code maintains the invariant that at most one rule |
62 | | * of a given priority is visible for any given lookup version. |
63 | | */ |
64 | | struct cls_match { |
65 | | /* Accessed by everybody. */ |
66 | | OVSRCU_TYPE(struct cls_match *) next; /* Equal, lower-priority matches. */ |
67 | | OVSRCU_TYPE(struct cls_conjunction_set *) conj_set; |
68 | | |
69 | | /* Accessed by readers interested in wildcarding. */ |
70 | | const int priority; /* Larger numbers are higher priorities. */ |
71 | | |
72 | | /* Accessed by all readers. */ |
73 | | struct cmap_node cmap_node; /* Within struct cls_subtable 'rules'. */ |
74 | | |
75 | | /* Rule versioning. */ |
76 | | struct versions versions; |
77 | | |
78 | | const struct cls_rule *cls_rule; |
79 | | const struct miniflow flow; /* Matching rule. Mask is in the subtable. */ |
80 | | /* 'flow' must be the last field. */ |
81 | | }; |
82 | | |
83 | | /* Utilities for accessing the 'cls_match' member of struct cls_rule. */ |
84 | | static inline struct cls_match * |
85 | | get_cls_match_protected(const struct cls_rule *rule) |
86 | 0 | { |
87 | 0 | return ovsrcu_get_protected(struct cls_match *, &rule->cls_match); |
88 | 0 | } Unexecuted instantiation: flow_extract_target.c:get_cls_match_protected Unexecuted instantiation: classifier.c:get_cls_match_protected |
89 | | |
90 | | static inline struct cls_match * |
91 | | get_cls_match(const struct cls_rule *rule) |
92 | 0 | { |
93 | 0 | return ovsrcu_get(struct cls_match *, &rule->cls_match); |
94 | 0 | } Unexecuted instantiation: flow_extract_target.c:get_cls_match Unexecuted instantiation: classifier.c:get_cls_match |
95 | | |
96 | | /* Must be RCU postponed. */ |
97 | | void cls_match_free_cb(struct cls_match *); |
98 | | |
99 | | static inline void |
100 | | cls_match_set_remove_version(struct cls_match *rule, ovs_version_t version) |
101 | 0 | { |
102 | 0 | versions_set_remove_version(&rule->versions, version); |
103 | 0 | } Unexecuted instantiation: flow_extract_target.c:cls_match_set_remove_version Unexecuted instantiation: classifier.c:cls_match_set_remove_version |
104 | | |
105 | | static inline bool |
106 | | cls_match_visible_in_version(const struct cls_match *rule, |
107 | | ovs_version_t version) |
108 | 0 | { |
109 | 0 | return versions_visible_in_version(&rule->versions, version); |
110 | 0 | } Unexecuted instantiation: flow_extract_target.c:cls_match_visible_in_version Unexecuted instantiation: classifier.c:cls_match_visible_in_version |
111 | | |
112 | | static inline bool |
113 | | cls_match_is_eventually_invisible(const struct cls_match *rule) |
114 | 0 | { |
115 | 0 | return versions_is_eventually_invisible(&rule->versions); |
116 | 0 | } Unexecuted instantiation: flow_extract_target.c:cls_match_is_eventually_invisible Unexecuted instantiation: classifier.c:cls_match_is_eventually_invisible |
117 | | |
118 | | |
119 | | /* cls_match 'next' */ |
120 | | |
121 | | static inline const struct cls_match * |
122 | | cls_match_next(const struct cls_match *rule) |
123 | 0 | { |
124 | 0 | return ovsrcu_get(struct cls_match *, &rule->next); |
125 | 0 | } Unexecuted instantiation: flow_extract_target.c:cls_match_next Unexecuted instantiation: classifier.c:cls_match_next |
126 | | |
127 | | static inline struct cls_match * |
128 | | cls_match_next_protected(const struct cls_match *rule) |
129 | 0 | { |
130 | 0 | return ovsrcu_get_protected(struct cls_match *, &rule->next); |
131 | 0 | } Unexecuted instantiation: flow_extract_target.c:cls_match_next_protected Unexecuted instantiation: classifier.c:cls_match_next_protected |
132 | | |
133 | | /* Puts 'rule' in the position between 'prev' and 'next'. If 'prev' == NULL, |
134 | | * then the 'rule' is the new list head, and if 'next' == NULL, the rule is the |
135 | | * new list tail. |
136 | | * If there are any nodes between 'prev' and 'next', they are dropped from the |
137 | | * list. */ |
138 | | static inline void |
139 | | cls_match_insert(struct cls_match *prev, struct cls_match *next, |
140 | | struct cls_match *rule) |
141 | 0 | { |
142 | 0 | ovsrcu_set_hidden(&rule->next, next); |
143 | |
|
144 | 0 | if (prev) { |
145 | 0 | ovsrcu_set(&prev->next, rule); |
146 | 0 | } |
147 | 0 | } Unexecuted instantiation: flow_extract_target.c:cls_match_insert Unexecuted instantiation: classifier.c:cls_match_insert |
148 | | |
149 | | /* Puts 'new_rule' in the position of 'old_rule', which is the next node after |
150 | | * 'prev'. If 'prev' == NULL, then the 'new_rule' is the new list head. |
151 | | * |
152 | | * The replaced cls_match still links to the later rules, and may still be |
153 | | * referenced by other threads until all other threads quiesce. The replaced |
154 | | * rule may not be re-inserted, re-initialized, or deleted until after all |
155 | | * other threads have quiesced (use ovsrcu_postpone). */ |
156 | | static inline void |
157 | | cls_match_replace(struct cls_match *prev, |
158 | | struct cls_match *old_rule, struct cls_match *new_rule) |
159 | 0 | { |
160 | 0 | cls_match_insert(prev, cls_match_next_protected(old_rule), new_rule); |
161 | 0 | } Unexecuted instantiation: flow_extract_target.c:cls_match_replace Unexecuted instantiation: classifier.c:cls_match_replace |
162 | | |
163 | | /* Removes 'rule' following 'prev' from the list. If 'prev' is NULL, then the |
164 | | * 'rule' is a list head, and the caller is responsible for maintaining its |
165 | | * list head pointer (if any). |
166 | | * |
167 | | * Afterward, the removed rule is not linked to any more, but still links to |
168 | | * the following rules, and may still be referenced by other threads until all |
169 | | * other threads quiesce. The removed rule may not be re-inserted, |
170 | | * re-initialized, or deleted until after all other threads have quiesced (use |
171 | | * ovsrcu_postpone). |
172 | | */ |
173 | | static inline void |
174 | | cls_match_remove(struct cls_match *prev, struct cls_match *rule) |
175 | 0 | { |
176 | 0 | if (prev) { |
177 | 0 | ovsrcu_set(&prev->next, cls_match_next_protected(rule)); |
178 | 0 | } |
179 | 0 | } Unexecuted instantiation: flow_extract_target.c:cls_match_remove Unexecuted instantiation: classifier.c:cls_match_remove |
180 | | |
181 | | #define CLS_MATCH_FOR_EACH(ITER, HEAD) \ |
182 | 0 | for ((ITER) = (HEAD); (ITER); (ITER) = cls_match_next(ITER)) |
183 | | |
184 | | #define CLS_MATCH_FOR_EACH_AFTER_HEAD(ITER, HEAD) \ |
185 | | CLS_MATCH_FOR_EACH(ITER, cls_match_next(HEAD)) |
186 | | |
187 | | /* Iterate cls_matches keeping the previous pointer for modifications. */ |
188 | | #define FOR_EACH_RULE_IN_LIST_PROTECTED(ITER, PREV, HEAD) \ |
189 | 0 | for ((PREV) = NULL, (ITER) = (HEAD); \ |
190 | 0 | (ITER); \ |
191 | 0 | (PREV) = (ITER), (ITER) = cls_match_next_protected(ITER)) |
192 | | |
193 | | |
194 | | /* A longest-prefix match tree. */ |
195 | | struct trie_node { |
196 | | uint32_t prefix; /* Prefix bits for this node, MSB first. */ |
197 | | uint8_t n_bits; /* Never zero, except for the root node. */ |
198 | | unsigned int n_rules; /* Number of rules that have this prefix. */ |
199 | | rcu_trie_ptr edges[2]; /* Both NULL if leaf. */ |
200 | | }; |
201 | | |
202 | | /* Max bits per node. Must fit in struct trie_node's 'prefix'. |
203 | | * Also tested with 16, 8, and 5 to stress the implementation. */ |
204 | 0 | #define TRIE_PREFIX_BITS 32 |
205 | | |
206 | | /* flow/miniflow/minimask/minimatch utilities. |
207 | | * These are only used by the classifier, so place them here to allow |
208 | | * for better optimization. */ |
209 | | |
210 | | /* Returns a hash value for the bits of 'flow' where there are 1-bits in |
211 | | * 'mask', given 'basis'. |
212 | | * |
213 | | * The hash values returned by this function are the same as those returned by |
214 | | * miniflow_hash_in_minimask(), only the form of the arguments differ. */ |
215 | | static inline uint32_t |
216 | | flow_hash_in_minimask(const struct flow *flow, const struct minimask *mask, |
217 | | uint32_t basis) |
218 | 0 | { |
219 | 0 | const uint64_t *mask_values = miniflow_get_values(&mask->masks); |
220 | 0 | const uint64_t *flow_u64 = (const uint64_t *)flow; |
221 | 0 | const uint64_t *p = mask_values; |
222 | 0 | uint32_t hash = basis; |
223 | 0 | map_t map; |
224 | |
|
225 | 0 | FLOWMAP_FOR_EACH_MAP (map, mask->masks.map) { |
226 | 0 | size_t idx; |
227 | |
|
228 | 0 | MAP_FOR_EACH_INDEX (idx, map) { |
229 | 0 | hash = hash_add64(hash, flow_u64[idx] & *p++); |
230 | 0 | } |
231 | 0 | flow_u64 += MAP_T_BITS; |
232 | 0 | } |
233 | |
|
234 | 0 | return hash_finish(hash, (p - mask_values) * 8); |
235 | 0 | } Unexecuted instantiation: flow_extract_target.c:flow_hash_in_minimask Unexecuted instantiation: classifier.c:flow_hash_in_minimask |
236 | | |
237 | | /* Returns a hash value for the bits of 'flow' where there are 1-bits in |
238 | | * 'mask', given 'basis'. |
239 | | * |
240 | | * The hash values returned by this function are the same as those returned by |
241 | | * flow_hash_in_minimask(), only the form of the arguments differ. */ |
242 | | static inline uint32_t |
243 | | miniflow_hash_in_minimask(const struct miniflow *flow, |
244 | | const struct minimask *mask, uint32_t basis) |
245 | 0 | { |
246 | 0 | const uint64_t *mask_values = miniflow_get_values(&mask->masks); |
247 | 0 | const uint64_t *p = mask_values; |
248 | 0 | uint32_t hash = basis; |
249 | 0 | uint64_t value; |
250 | |
|
251 | 0 | MINIFLOW_FOR_EACH_IN_FLOWMAP(value, flow, mask->masks.map) { |
252 | 0 | hash = hash_add64(hash, value & *p++); |
253 | 0 | } |
254 | |
|
255 | 0 | return hash_finish(hash, (p - mask_values) * 8); |
256 | 0 | } Unexecuted instantiation: flow_extract_target.c:miniflow_hash_in_minimask Unexecuted instantiation: classifier.c:miniflow_hash_in_minimask |
257 | | |
258 | | /* Returns a hash value for the values of 'flow', indicated by 'range', where |
259 | | * there are 1-bits in 'mask', given 'basis'. 'range' must be a continuous |
260 | | * subset of the bits in 'mask''s map, representing a continuous range of the |
261 | | * minimask's mask data. '*offset' must be the number of 64-bit units of the |
262 | | * minimask's data to skip to get to the first unit covered by 'range'. On |
263 | | * return '*offset' is updated with the number of 64-bit units of the minimask |
264 | | * consumed. |
265 | | * |
266 | | * Typically this function is called for successive ranges of minimask's masks, |
267 | | * and the first invocation passes '*offset' as zero. |
268 | | * |
269 | | * The hash values returned by this function are the same as those returned by |
270 | | * minimatch_hash_range(), only the form of the arguments differ. */ |
271 | | static inline uint32_t |
272 | | flow_hash_in_minimask_range(const struct flow *flow, |
273 | | const struct minimask *mask, |
274 | | const struct flowmap range, |
275 | | unsigned int *offset, |
276 | | uint32_t *basis) |
277 | 0 | { |
278 | 0 | const uint64_t *mask_values = miniflow_get_values(&mask->masks); |
279 | 0 | const uint64_t *flow_u64 = (const uint64_t *)flow; |
280 | 0 | const uint64_t *p = mask_values + *offset; |
281 | 0 | uint32_t hash = *basis; |
282 | 0 | map_t map; |
283 | |
|
284 | 0 | FLOWMAP_FOR_EACH_MAP (map, range) { |
285 | 0 | size_t idx; |
286 | |
|
287 | 0 | MAP_FOR_EACH_INDEX (idx, map) { |
288 | 0 | hash = hash_add64(hash, flow_u64[idx] & *p++); |
289 | 0 | } |
290 | 0 | flow_u64 += MAP_T_BITS; |
291 | 0 | } |
292 | |
|
293 | 0 | *basis = hash; /* Allow continuation from the unfinished value. */ |
294 | 0 | *offset = p - mask_values; |
295 | 0 | return hash_finish(hash, *offset * 8); |
296 | 0 | } Unexecuted instantiation: flow_extract_target.c:flow_hash_in_minimask_range Unexecuted instantiation: classifier.c:flow_hash_in_minimask_range |
297 | | |
298 | | /* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask. */ |
299 | | static inline void |
300 | | flow_wildcards_fold_minimask(struct flow_wildcards *wc, |
301 | | const struct minimask *mask) |
302 | 0 | { |
303 | 0 | flow_union_with_miniflow(&wc->masks, &mask->masks); |
304 | 0 | } Unexecuted instantiation: flow_extract_target.c:flow_wildcards_fold_minimask Unexecuted instantiation: classifier.c:flow_wildcards_fold_minimask |
305 | | |
306 | | /* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask for bits in |
307 | | * 'fmap'. 1-bits in 'fmap' are a subset of 1-bits in 'mask''s map. */ |
308 | | static inline void |
309 | | flow_wildcards_fold_minimask_in_map(struct flow_wildcards *wc, |
310 | | const struct minimask *mask, |
311 | | const struct flowmap fmap) |
312 | 0 | { |
313 | 0 | flow_union_with_miniflow_subset(&wc->masks, &mask->masks, fmap); |
314 | 0 | } Unexecuted instantiation: flow_extract_target.c:flow_wildcards_fold_minimask_in_map Unexecuted instantiation: classifier.c:flow_wildcards_fold_minimask_in_map |
315 | | |
316 | | /* Returns a hash value for 'mask', given 'basis'. */ |
317 | | static inline uint32_t |
318 | | minimask_hash(const struct minimask *mask, uint32_t basis) |
319 | 0 | { |
320 | 0 | const uint64_t *p = miniflow_get_values(&mask->masks); |
321 | 0 | size_t n_values = miniflow_n_values(&mask->masks); |
322 | 0 | uint32_t hash = basis; |
323 | |
|
324 | 0 | for (size_t i = 0; i < n_values; i++) { |
325 | 0 | hash = hash_add64(hash, *p++); |
326 | 0 | } |
327 | |
|
328 | 0 | map_t map; |
329 | 0 | FLOWMAP_FOR_EACH_MAP (map, mask->masks.map) { |
330 | 0 | hash = hash_add64(hash, map); |
331 | 0 | } |
332 | |
|
333 | 0 | return hash_finish(hash, n_values); |
334 | 0 | } Unexecuted instantiation: flow_extract_target.c:minimask_hash Unexecuted instantiation: classifier.c:minimask_hash |
335 | | |
336 | | /* Returns a hash value for the values of 'match->flow', indicated by 'range', |
337 | | * where there are 1-bits in 'match->mask', given 'basis'. 'range' must be a |
338 | | * continuous subset of the bits in the map of 'match', representing a |
339 | | * continuous range of the mask data of 'match'. '*offset' must be the number |
340 | | * of 64-bit units of the match data to skip to get to the first unit covered |
341 | | * by 'range'. On return '*offset' is updated with the number of 64-bit units |
342 | | * of the match consumed. |
343 | | * |
344 | | * Typically this function is called for successive ranges of minimask's masks, |
345 | | * and the first invocation passes '*offset' as zero. |
346 | | * |
347 | | * The hash values returned by this function are the same as those returned by |
348 | | * flow_hash_in_minimask_range(), only the form of the arguments differ. */ |
349 | | static inline uint32_t |
350 | | minimatch_hash_range(const struct minimatch *match, |
351 | | const struct flowmap range, unsigned int *offset, |
352 | | uint32_t *basis) |
353 | 0 | { |
354 | 0 | const uint64_t *p = miniflow_get_values(match->flow) + *offset; |
355 | 0 | const uint64_t *q = miniflow_get_values(&match->mask->masks) + *offset; |
356 | 0 | unsigned int n = flowmap_n_1bits(range); |
357 | 0 | uint32_t hash = *basis; |
358 | |
|
359 | 0 | for (unsigned int i = 0; i < n; i++) { |
360 | 0 | hash = hash_add64(hash, p[i] & q[i]); |
361 | 0 | } |
362 | 0 | *basis = hash; /* Allow continuation from the unfinished value. */ |
363 | 0 | *offset += n; |
364 | 0 | return hash_finish(hash, *offset * 8); |
365 | 0 | } Unexecuted instantiation: flow_extract_target.c:minimatch_hash_range Unexecuted instantiation: classifier.c:minimatch_hash_range |
366 | | |
367 | | #endif |