/src/openvswitch/lib/netdev-offload-tc.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2016 Mellanox Technologies, Ltd. |
3 | | * |
4 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | | * you may not use this file except in compliance with the License. |
6 | | * You may obtain a copy of the License at: |
7 | | * |
8 | | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | | * |
10 | | * Unless required by applicable law or agreed to in writing, software |
11 | | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | | * See the License for the specific language governing permissions and |
14 | | * limitations under the License. |
15 | | */ |
16 | | |
17 | | #include <config.h> |
18 | | |
19 | | #include <errno.h> |
20 | | #include <linux/if_ether.h> |
21 | | |
22 | | #include "dpif.h" |
23 | | #include "hash.h" |
24 | | #include "id-pool.h" |
25 | | #include "openvswitch/hmap.h" |
26 | | #include "openvswitch/match.h" |
27 | | #include "openvswitch/ofpbuf.h" |
28 | | #include "openvswitch/thread.h" |
29 | | #include "openvswitch/types.h" |
30 | | #include "openvswitch/util.h" |
31 | | #include "openvswitch/vlog.h" |
32 | | #include "netdev-linux.h" |
33 | | #include "netdev-offload-provider.h" |
34 | | #include "netdev-provider.h" |
35 | | #include "netdev-vport.h" |
36 | | #include "netlink.h" |
37 | | #include "netlink-socket.h" |
38 | | #include "odp-netlink.h" |
39 | | #include "odp-util.h" |
40 | | #include "tc.h" |
41 | | #include "unaligned.h" |
42 | | #include "util.h" |
43 | | #include "dpif-provider.h" |
44 | | |
45 | | VLOG_DEFINE_THIS_MODULE(netdev_offload_tc); |
46 | | |
47 | | static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(60, 5); |
48 | | static struct vlog_rate_limit warn_rl = VLOG_RATE_LIMIT_INIT(10, 2); |
49 | | |
50 | | static struct hmap ufid_to_tc = HMAP_INITIALIZER(&ufid_to_tc); |
51 | | static struct hmap tc_to_ufid = HMAP_INITIALIZER(&tc_to_ufid); |
52 | | static bool multi_mask_per_prio = false; |
53 | | static bool block_support = false; |
54 | | static uint16_t ct_state_support; |
55 | | |
56 | | struct netlink_field { |
57 | | int offset; |
58 | | int flower_offset; |
59 | | int size; |
60 | | }; |
61 | | |
62 | | struct chain_node { |
63 | | struct hmap_node node; |
64 | | uint32_t chain; |
65 | | }; |
66 | | |
67 | | struct meter_police_mapping_data { |
68 | | struct hmap_node meter_id_node; |
69 | | struct hmap_node police_idx_node; |
70 | | uint32_t meter_id; |
71 | | uint32_t police_idx; |
72 | | }; |
73 | | |
74 | | struct policer_node { |
75 | | struct hmap_node node; |
76 | | uint32_t police_idx; |
77 | | }; |
78 | | |
79 | | /* Protects below meter police ids pool. */ |
80 | | static struct ovs_mutex meter_police_ids_mutex = OVS_MUTEX_INITIALIZER; |
81 | | static struct id_pool *meter_police_ids OVS_GUARDED_BY(meter_police_ids_mutex); |
82 | | /* Protects below meter hashmaps. */ |
83 | | static struct ovs_mutex meter_mutex = OVS_MUTEX_INITIALIZER; |
84 | | static struct hmap meter_id_to_police_idx OVS_GUARDED_BY(meter_mutex) |
85 | | = HMAP_INITIALIZER(&meter_id_to_police_idx); |
86 | | static struct hmap police_idx_to_meter_id OVS_GUARDED_BY(meter_mutex) |
87 | | = HMAP_INITIALIZER(&police_idx_to_meter_id); |
88 | | |
89 | | static int meter_id_lookup(uint32_t meter_id, uint32_t *police_idx); |
90 | | static int police_idx_lookup(uint32_t police_idx, uint32_t *meter_id); |
91 | | |
92 | | static int netdev_tc_parse_nl_actions(struct netdev *netdev, |
93 | | struct tc_flower *flower, |
94 | | struct offload_info *info, |
95 | | const struct nlattr *actions, |
96 | | size_t actions_len, |
97 | | bool *recirc_act, bool more_actions, |
98 | | struct tc_action **need_jump_update); |
99 | | |
100 | | static void parse_tc_flower_to_stats(struct tc_flower *flower, |
101 | | struct dpif_flow_stats *stats); |
102 | | |
103 | | static int get_ufid_adjust_stats(const ovs_u128 *ufid, |
104 | | struct dpif_flow_stats *stats); |
105 | | |
106 | | static bool |
107 | | is_internal_port(const char *type) |
108 | 0 | { |
109 | 0 | return !strcmp(type, "internal"); |
110 | 0 | } |
111 | | |
112 | | static enum tc_qdisc_hook |
113 | | get_tc_qdisc_hook(struct netdev *netdev) |
114 | 0 | { |
115 | 0 | return is_internal_port(netdev_get_type(netdev)) ? TC_EGRESS : TC_INGRESS; |
116 | 0 | } |
117 | | |
118 | | static struct netlink_field set_flower_map[][4] = { |
119 | | [OVS_KEY_ATTR_IPV4] = { |
120 | | { offsetof(struct ovs_key_ipv4, ipv4_src), |
121 | | offsetof(struct tc_flower_key, ipv4.ipv4_src), |
122 | | MEMBER_SIZEOF(struct tc_flower_key, ipv4.ipv4_src) |
123 | | }, |
124 | | { offsetof(struct ovs_key_ipv4, ipv4_dst), |
125 | | offsetof(struct tc_flower_key, ipv4.ipv4_dst), |
126 | | MEMBER_SIZEOF(struct tc_flower_key, ipv4.ipv4_dst) |
127 | | }, |
128 | | { offsetof(struct ovs_key_ipv4, ipv4_ttl), |
129 | | offsetof(struct tc_flower_key, ipv4.rewrite_ttl), |
130 | | MEMBER_SIZEOF(struct tc_flower_key, ipv4.rewrite_ttl) |
131 | | }, |
132 | | { offsetof(struct ovs_key_ipv4, ipv4_tos), |
133 | | offsetof(struct tc_flower_key, ipv4.rewrite_tos), |
134 | | MEMBER_SIZEOF(struct tc_flower_key, ipv4.rewrite_tos) |
135 | | }, |
136 | | }, |
137 | | [OVS_KEY_ATTR_IPV6] = { |
138 | | { offsetof(struct ovs_key_ipv6, ipv6_src), |
139 | | offsetof(struct tc_flower_key, ipv6.ipv6_src), |
140 | | MEMBER_SIZEOF(struct tc_flower_key, ipv6.ipv6_src) |
141 | | }, |
142 | | { offsetof(struct ovs_key_ipv6, ipv6_dst), |
143 | | offsetof(struct tc_flower_key, ipv6.ipv6_dst), |
144 | | MEMBER_SIZEOF(struct tc_flower_key, ipv6.ipv6_dst) |
145 | | }, |
146 | | { offsetof(struct ovs_key_ipv6, ipv6_hlimit), |
147 | | offsetof(struct tc_flower_key, ipv6.rewrite_hlimit), |
148 | | MEMBER_SIZEOF(struct tc_flower_key, ipv6.rewrite_hlimit) |
149 | | }, |
150 | | { offsetof(struct ovs_key_ipv6, ipv6_tclass), |
151 | | offsetof(struct tc_flower_key, ipv6.rewrite_tclass), |
152 | | MEMBER_SIZEOF(struct tc_flower_key, ipv6.rewrite_tclass) |
153 | | }, |
154 | | }, |
155 | | [OVS_KEY_ATTR_ETHERNET] = { |
156 | | { offsetof(struct ovs_key_ethernet, eth_src), |
157 | | offsetof(struct tc_flower_key, src_mac), |
158 | | MEMBER_SIZEOF(struct tc_flower_key, src_mac) |
159 | | }, |
160 | | { offsetof(struct ovs_key_ethernet, eth_dst), |
161 | | offsetof(struct tc_flower_key, dst_mac), |
162 | | MEMBER_SIZEOF(struct tc_flower_key, dst_mac) |
163 | | }, |
164 | | }, |
165 | | [OVS_KEY_ATTR_ETHERTYPE] = { |
166 | | { 0, |
167 | | offsetof(struct tc_flower_key, eth_type), |
168 | | MEMBER_SIZEOF(struct tc_flower_key, eth_type) |
169 | | }, |
170 | | }, |
171 | | [OVS_KEY_ATTR_TCP] = { |
172 | | { offsetof(struct ovs_key_tcp, tcp_src), |
173 | | offsetof(struct tc_flower_key, tcp_src), |
174 | | MEMBER_SIZEOF(struct tc_flower_key, tcp_src) |
175 | | }, |
176 | | { offsetof(struct ovs_key_tcp, tcp_dst), |
177 | | offsetof(struct tc_flower_key, tcp_dst), |
178 | | MEMBER_SIZEOF(struct tc_flower_key, tcp_dst) |
179 | | }, |
180 | | }, |
181 | | [OVS_KEY_ATTR_UDP] = { |
182 | | { offsetof(struct ovs_key_udp, udp_src), |
183 | | offsetof(struct tc_flower_key, udp_src), |
184 | | MEMBER_SIZEOF(struct tc_flower_key, udp_src) |
185 | | }, |
186 | | { offsetof(struct ovs_key_udp, udp_dst), |
187 | | offsetof(struct tc_flower_key, udp_dst), |
188 | | MEMBER_SIZEOF(struct tc_flower_key, udp_dst) |
189 | | }, |
190 | | }, |
191 | | }; |
192 | | |
193 | | static struct ovs_mutex ufid_lock = OVS_MUTEX_INITIALIZER; |
194 | | |
195 | | /** |
196 | | * struct ufid_tc_data - data entry for ufid-tc hashmaps. |
197 | | * @ufid_to_tc_node: Element in @ufid_to_tc hash table by ufid key. |
198 | | * @tc_to_ufid_node: Element in @tc_to_ufid hash table by tcf_id key. |
199 | | * @ufid: ufid assigned to the flow |
200 | | * @id: tc filter id (tcf_id) |
201 | | * @netdev: netdev associated with the tc rule |
202 | | * @adjust_stats: When flow gets updated with new actions, we need to adjust |
203 | | * the reported stats to include previous values as the hardware |
204 | | * rule is removed and re-added. This stats copy is used for it. |
205 | | */ |
206 | | struct ufid_tc_data { |
207 | | struct hmap_node ufid_to_tc_node; |
208 | | struct hmap_node tc_to_ufid_node; |
209 | | ovs_u128 ufid; |
210 | | struct tcf_id id; |
211 | | struct netdev *netdev; |
212 | | struct dpif_flow_stats adjust_stats; |
213 | | }; |
214 | | |
215 | | static void |
216 | | del_ufid_tc_mapping_unlocked(const ovs_u128 *ufid) |
217 | 0 | { |
218 | 0 | size_t ufid_hash = hash_bytes(ufid, sizeof *ufid, 0); |
219 | 0 | struct ufid_tc_data *data; |
220 | |
|
221 | 0 | HMAP_FOR_EACH_WITH_HASH (data, ufid_to_tc_node, ufid_hash, &ufid_to_tc) { |
222 | 0 | if (ovs_u128_equals(*ufid, data->ufid)) { |
223 | 0 | break; |
224 | 0 | } |
225 | 0 | } |
226 | |
|
227 | 0 | if (!data) { |
228 | 0 | return; |
229 | 0 | } |
230 | | |
231 | 0 | hmap_remove(&ufid_to_tc, &data->ufid_to_tc_node); |
232 | 0 | hmap_remove(&tc_to_ufid, &data->tc_to_ufid_node); |
233 | 0 | netdev_close(data->netdev); |
234 | 0 | free(data); |
235 | 0 | } |
236 | | |
237 | | /* Remove matching ufid entry from ufid-tc hashmaps. */ |
238 | | static void |
239 | | del_ufid_tc_mapping(const ovs_u128 *ufid) |
240 | 0 | { |
241 | 0 | ovs_mutex_lock(&ufid_lock); |
242 | 0 | del_ufid_tc_mapping_unlocked(ufid); |
243 | 0 | ovs_mutex_unlock(&ufid_lock); |
244 | 0 | } |
245 | | |
246 | | static void |
247 | | netdev_tc_adjust_stats(struct dpif_flow_stats *stats, |
248 | | const struct dpif_flow_stats *adjust_stats) |
249 | 0 | { |
250 | | /* Do not try to restore the stats->used, as in terse mode dumps TC doesn't |
251 | | * report TCA_ACT_OPTIONS, so the 'lastused' value is not available, hence |
252 | | * we report used as 0. |
253 | | * tcp_flags is not collected by tc, so no need to update it. */ |
254 | 0 | stats->n_bytes += adjust_stats->n_bytes; |
255 | 0 | stats->n_packets += adjust_stats->n_packets; |
256 | 0 | } |
257 | | |
258 | | /* Wrapper function to delete filter and ufid tc mapping */ |
259 | | static int |
260 | | del_filter_and_ufid_mapping(struct tcf_id *id, const ovs_u128 *ufid, |
261 | | struct dpif_flow_stats *stats) |
262 | 0 | { |
263 | 0 | struct tc_flower flower; |
264 | 0 | int err; |
265 | |
|
266 | 0 | if (stats) { |
267 | 0 | memset(stats, 0, sizeof *stats); |
268 | 0 | if (!tc_get_flower(id, &flower)) { |
269 | 0 | struct dpif_flow_stats adjust_stats; |
270 | |
|
271 | 0 | parse_tc_flower_to_stats(&flower, stats); |
272 | 0 | if (!get_ufid_adjust_stats(ufid, &adjust_stats)) { |
273 | 0 | netdev_tc_adjust_stats(stats, &adjust_stats); |
274 | 0 | } |
275 | 0 | } |
276 | 0 | } |
277 | |
|
278 | 0 | err = tc_del_flower_filter(id); |
279 | 0 | if (!err) { |
280 | 0 | del_ufid_tc_mapping(ufid); |
281 | 0 | } |
282 | 0 | return err; |
283 | 0 | } |
284 | | |
285 | | /* Add ufid entry to ufid_to_tc hashmap. */ |
286 | | static void |
287 | | add_ufid_tc_mapping(struct netdev *netdev, const ovs_u128 *ufid, |
288 | | struct tcf_id *id, struct dpif_flow_stats *stats) |
289 | 0 | { |
290 | 0 | struct ufid_tc_data *new_data = xzalloc(sizeof *new_data); |
291 | 0 | size_t ufid_hash = hash_bytes(ufid, sizeof *ufid, 0); |
292 | 0 | size_t tc_hash; |
293 | |
|
294 | 0 | tc_hash = hash_int(hash_int(id->prio, id->handle), id->ifindex); |
295 | 0 | tc_hash = hash_int(id->chain, tc_hash); |
296 | |
|
297 | 0 | new_data->ufid = *ufid; |
298 | 0 | new_data->id = *id; |
299 | 0 | new_data->netdev = netdev_ref(netdev); |
300 | 0 | if (stats) { |
301 | 0 | new_data->adjust_stats = *stats; |
302 | 0 | } |
303 | |
|
304 | 0 | ovs_mutex_lock(&ufid_lock); |
305 | 0 | hmap_insert(&ufid_to_tc, &new_data->ufid_to_tc_node, ufid_hash); |
306 | 0 | hmap_insert(&tc_to_ufid, &new_data->tc_to_ufid_node, tc_hash); |
307 | 0 | ovs_mutex_unlock(&ufid_lock); |
308 | 0 | } |
309 | | |
310 | | /* Get tc id from ufid_to_tc hashmap. |
311 | | * |
312 | | * Returns 0 if successful and fills id. |
313 | | * Otherwise returns the error. |
314 | | */ |
315 | | static int |
316 | | get_ufid_tc_mapping(const ovs_u128 *ufid, struct tcf_id *id) |
317 | 0 | { |
318 | 0 | size_t ufid_hash = hash_bytes(ufid, sizeof *ufid, 0); |
319 | 0 | struct ufid_tc_data *data; |
320 | |
|
321 | 0 | ovs_mutex_lock(&ufid_lock); |
322 | 0 | HMAP_FOR_EACH_WITH_HASH (data, ufid_to_tc_node, ufid_hash, &ufid_to_tc) { |
323 | 0 | if (ovs_u128_equals(*ufid, data->ufid)) { |
324 | 0 | *id = data->id; |
325 | 0 | ovs_mutex_unlock(&ufid_lock); |
326 | 0 | return 0; |
327 | 0 | } |
328 | 0 | } |
329 | 0 | ovs_mutex_unlock(&ufid_lock); |
330 | |
|
331 | 0 | return ENOENT; |
332 | 0 | } |
333 | | |
334 | | /* Get adjust_stats from ufid_to_tc hashmap. |
335 | | * |
336 | | * Returns 0 if successful and fills stats with adjust_stats. |
337 | | * Otherwise returns the error. |
338 | | */ |
339 | | static int |
340 | | get_ufid_adjust_stats(const ovs_u128 *ufid, struct dpif_flow_stats *stats) |
341 | 0 | { |
342 | 0 | size_t ufid_hash = hash_bytes(ufid, sizeof *ufid, 0); |
343 | 0 | struct ufid_tc_data *data; |
344 | |
|
345 | 0 | ovs_mutex_lock(&ufid_lock); |
346 | 0 | HMAP_FOR_EACH_WITH_HASH (data, ufid_to_tc_node, ufid_hash, &ufid_to_tc) { |
347 | 0 | if (ovs_u128_equals(*ufid, data->ufid)) { |
348 | 0 | *stats = data->adjust_stats; |
349 | 0 | ovs_mutex_unlock(&ufid_lock); |
350 | 0 | return 0; |
351 | 0 | } |
352 | 0 | } |
353 | 0 | ovs_mutex_unlock(&ufid_lock); |
354 | |
|
355 | 0 | return ENOENT; |
356 | 0 | } |
357 | | |
358 | | /* Find ufid entry in ufid_to_tc hashmap using tcf_id id. |
359 | | * The result is saved in ufid. |
360 | | * |
361 | | * Returns true on success. |
362 | | */ |
363 | | static bool |
364 | | find_ufid(struct netdev *netdev, struct tcf_id *id, ovs_u128 *ufid) |
365 | 0 | { |
366 | 0 | struct ufid_tc_data *data; |
367 | 0 | size_t tc_hash; |
368 | |
|
369 | 0 | tc_hash = hash_int(hash_int(id->prio, id->handle), id->ifindex); |
370 | 0 | tc_hash = hash_int(id->chain, tc_hash); |
371 | |
|
372 | 0 | ovs_mutex_lock(&ufid_lock); |
373 | 0 | HMAP_FOR_EACH_WITH_HASH (data, tc_to_ufid_node, tc_hash, &tc_to_ufid) { |
374 | 0 | if (netdev == data->netdev && is_tcf_id_eq(&data->id, id)) { |
375 | 0 | *ufid = data->ufid; |
376 | 0 | break; |
377 | 0 | } |
378 | 0 | } |
379 | 0 | ovs_mutex_unlock(&ufid_lock); |
380 | |
|
381 | 0 | return (data != NULL); |
382 | 0 | } |
383 | | |
384 | | struct prio_map_data { |
385 | | struct hmap_node node; |
386 | | struct tc_flower_key mask; |
387 | | ovs_be16 protocol; |
388 | | uint16_t prio; |
389 | | }; |
390 | | |
391 | | static uint16_t |
392 | | get_next_available_prio(ovs_be16 protocol) |
393 | 0 | { |
394 | 0 | static uint16_t last_prio = TC_RESERVED_PRIORITY_MAX; |
395 | |
|
396 | 0 | if (multi_mask_per_prio) { |
397 | 0 | if (protocol == htons(ETH_P_IP)) { |
398 | 0 | return TC_RESERVED_PRIORITY_IPV4; |
399 | 0 | } else if (protocol == htons(ETH_P_IPV6)) { |
400 | 0 | return TC_RESERVED_PRIORITY_IPV6; |
401 | 0 | } |
402 | 0 | } |
403 | | |
404 | | /* last_prio can overflow if there will be many different kinds of |
405 | | * flows which shouldn't happen organically. */ |
406 | 0 | if (last_prio == UINT16_MAX) { |
407 | 0 | return TC_RESERVED_PRIORITY_NONE; |
408 | 0 | } |
409 | | |
410 | 0 | return ++last_prio; |
411 | 0 | } |
412 | | |
413 | | /* Get free prio for tc flower |
414 | | * If prio is already allocated for mask/eth_type combination then return it. |
415 | | * If not assign new prio. |
416 | | * |
417 | | * Return prio on success or 0 if we are out of prios. |
418 | | */ |
419 | | static uint16_t |
420 | | get_prio_for_tc_flower(struct tc_flower *flower) |
421 | 0 | { |
422 | 0 | static struct hmap prios = HMAP_INITIALIZER(&prios); |
423 | 0 | static struct ovs_mutex prios_lock = OVS_MUTEX_INITIALIZER; |
424 | 0 | size_t key_len = sizeof(struct tc_flower_key); |
425 | 0 | size_t hash = hash_int((OVS_FORCE uint32_t) flower->key.eth_type, 0); |
426 | 0 | struct prio_map_data *data; |
427 | 0 | struct prio_map_data *new_data; |
428 | 0 | uint16_t prio; |
429 | |
|
430 | 0 | if (!multi_mask_per_prio) { |
431 | 0 | hash = hash_bytes(&flower->mask, key_len, hash); |
432 | 0 | } |
433 | | |
434 | | /* We can use the same prio for same mask/eth combination but must have |
435 | | * different prio if not. Flower classifier will reject same prio for |
436 | | * different mask combination unless multi mask per prio is supported. */ |
437 | 0 | ovs_mutex_lock(&prios_lock); |
438 | 0 | HMAP_FOR_EACH_WITH_HASH (data, node, hash, &prios) { |
439 | 0 | if ((multi_mask_per_prio |
440 | 0 | || !memcmp(&flower->mask, &data->mask, key_len)) |
441 | 0 | && data->protocol == flower->key.eth_type) { |
442 | 0 | ovs_mutex_unlock(&prios_lock); |
443 | 0 | return data->prio; |
444 | 0 | } |
445 | 0 | } |
446 | | |
447 | 0 | prio = get_next_available_prio(flower->key.eth_type); |
448 | 0 | if (prio == TC_RESERVED_PRIORITY_NONE) { |
449 | 0 | ovs_mutex_unlock(&prios_lock); |
450 | 0 | return prio; |
451 | 0 | } |
452 | | |
453 | 0 | new_data = xzalloc(sizeof *new_data); |
454 | 0 | memcpy(&new_data->mask, &flower->mask, key_len); |
455 | 0 | new_data->prio = prio; |
456 | 0 | new_data->protocol = flower->key.eth_type; |
457 | 0 | hmap_insert(&prios, &new_data->node, hash); |
458 | 0 | ovs_mutex_unlock(&prios_lock); |
459 | |
|
460 | 0 | return prio; |
461 | 0 | } |
462 | | |
463 | | static uint32_t |
464 | | get_block_id_from_netdev(struct netdev *netdev) |
465 | 0 | { |
466 | 0 | if (block_support) { |
467 | 0 | return netdev_get_block_id(netdev); |
468 | 0 | } |
469 | | |
470 | 0 | return 0; |
471 | 0 | } |
472 | | |
473 | | static int |
474 | | get_chains_from_netdev(struct netdev *netdev, struct tcf_id *id, |
475 | | struct hmap *map) |
476 | 0 | { |
477 | 0 | struct netdev_flow_dump *dump; |
478 | 0 | struct chain_node *chain_node; |
479 | 0 | struct ofpbuf rbuffer, reply; |
480 | 0 | uint32_t chain; |
481 | 0 | size_t hash; |
482 | 0 | int err; |
483 | |
|
484 | 0 | dump = xzalloc(sizeof *dump); |
485 | 0 | dump->nl_dump = xzalloc(sizeof *dump->nl_dump); |
486 | 0 | dump->netdev = netdev_ref(netdev); |
487 | |
|
488 | 0 | ofpbuf_init(&rbuffer, NL_DUMP_BUFSIZE); |
489 | 0 | tc_dump_tc_chain_start(id, dump->nl_dump); |
490 | |
|
491 | 0 | while (nl_dump_next(dump->nl_dump, &reply, &rbuffer)) { |
492 | 0 | if (parse_netlink_to_tc_chain(&reply, &chain)) { |
493 | 0 | continue; |
494 | 0 | } |
495 | | |
496 | 0 | chain_node = xzalloc(sizeof *chain_node); |
497 | 0 | chain_node->chain = chain; |
498 | 0 | hash = hash_int(chain, 0); |
499 | 0 | hmap_insert(map, &chain_node->node, hash); |
500 | 0 | } |
501 | |
|
502 | 0 | err = nl_dump_done(dump->nl_dump); |
503 | 0 | ofpbuf_uninit(&rbuffer); |
504 | 0 | netdev_close(netdev); |
505 | 0 | free(dump->nl_dump); |
506 | 0 | free(dump); |
507 | |
|
508 | 0 | return err; |
509 | 0 | } |
510 | | |
511 | | static int |
512 | | delete_chains_from_netdev(struct netdev *netdev, struct tcf_id *id) |
513 | 0 | { |
514 | 0 | struct chain_node *chain_node; |
515 | 0 | struct hmap map; |
516 | 0 | int error; |
517 | |
|
518 | 0 | hmap_init(&map); |
519 | 0 | error = get_chains_from_netdev(netdev, id, &map); |
520 | |
|
521 | 0 | if (!error) { |
522 | | /* Flush rules explicitly needed when we work with ingress_block, |
523 | | * so we will not fail with reattaching block to bond iface, for ex. |
524 | | */ |
525 | 0 | HMAP_FOR_EACH_POP (chain_node, node, &map) { |
526 | 0 | id->chain = chain_node->chain; |
527 | 0 | tc_del_flower_filter(id); |
528 | 0 | free(chain_node); |
529 | 0 | } |
530 | 0 | } |
531 | |
|
532 | 0 | hmap_destroy(&map); |
533 | 0 | return error; |
534 | 0 | } |
535 | | |
536 | | static int |
537 | | netdev_tc_flow_flush(struct netdev *netdev) |
538 | 0 | { |
539 | 0 | struct ufid_tc_data *data; |
540 | 0 | int err; |
541 | |
|
542 | 0 | ovs_mutex_lock(&ufid_lock); |
543 | 0 | HMAP_FOR_EACH_SAFE (data, tc_to_ufid_node, &tc_to_ufid) { |
544 | 0 | if (data->netdev != netdev) { |
545 | 0 | continue; |
546 | 0 | } |
547 | | |
548 | 0 | err = tc_del_flower_filter(&data->id); |
549 | 0 | if (!err) { |
550 | 0 | del_ufid_tc_mapping_unlocked(&data->ufid); |
551 | 0 | } |
552 | 0 | } |
553 | 0 | ovs_mutex_unlock(&ufid_lock); |
554 | |
|
555 | 0 | return 0; |
556 | 0 | } |
557 | | |
558 | | static int |
559 | | netdev_tc_flow_dump_create(struct netdev *netdev, |
560 | | struct netdev_flow_dump **dump_out, |
561 | | bool terse) |
562 | 0 | { |
563 | 0 | enum tc_qdisc_hook hook = get_tc_qdisc_hook(netdev); |
564 | 0 | struct netdev_flow_dump *dump; |
565 | 0 | uint32_t block_id = 0; |
566 | 0 | struct tcf_id id; |
567 | 0 | int prio = 0; |
568 | 0 | int ifindex; |
569 | |
|
570 | 0 | ifindex = netdev_get_ifindex(netdev); |
571 | 0 | if (ifindex < 0) { |
572 | 0 | VLOG_ERR_RL(&error_rl, "dump_create: failed to get ifindex for %s: %s", |
573 | 0 | netdev_get_name(netdev), ovs_strerror(-ifindex)); |
574 | 0 | return -ifindex; |
575 | 0 | } |
576 | | |
577 | 0 | block_id = get_block_id_from_netdev(netdev); |
578 | 0 | dump = xzalloc(sizeof *dump); |
579 | 0 | dump->nl_dump = xzalloc(sizeof *dump->nl_dump); |
580 | 0 | dump->netdev = netdev_ref(netdev); |
581 | 0 | dump->terse = terse; |
582 | |
|
583 | 0 | id = tc_make_tcf_id(ifindex, block_id, prio, hook); |
584 | 0 | tc_dump_flower_start(&id, dump->nl_dump, terse); |
585 | |
|
586 | 0 | *dump_out = dump; |
587 | |
|
588 | 0 | return 0; |
589 | 0 | } |
590 | | |
591 | | static int |
592 | | netdev_tc_flow_dump_destroy(struct netdev_flow_dump *dump) |
593 | 0 | { |
594 | 0 | nl_dump_done(dump->nl_dump); |
595 | 0 | netdev_close(dump->netdev); |
596 | 0 | free(dump->nl_dump); |
597 | 0 | free(dump); |
598 | 0 | return 0; |
599 | 0 | } |
600 | | |
601 | | static void |
602 | | parse_flower_rewrite_to_netlink_action(struct ofpbuf *buf, |
603 | | struct tc_action *action) |
604 | 0 | { |
605 | 0 | char *mask = (char *) &action->rewrite.mask; |
606 | 0 | char *data = (char *) &action->rewrite.key; |
607 | |
|
608 | 0 | for (int type = 0; type < ARRAY_SIZE(set_flower_map); type++) { |
609 | 0 | char *put = NULL; |
610 | 0 | size_t nested = 0; |
611 | 0 | int len = ovs_flow_key_attr_lens[type].len; |
612 | |
|
613 | 0 | if (len <= 0) { |
614 | 0 | continue; |
615 | 0 | } |
616 | | |
617 | 0 | for (int j = 0; j < ARRAY_SIZE(set_flower_map[type]); j++) { |
618 | 0 | struct netlink_field *f = &set_flower_map[type][j]; |
619 | |
|
620 | 0 | if (!f->size) { |
621 | 0 | break; |
622 | 0 | } |
623 | | |
624 | 0 | if (!is_all_zeros(mask + f->flower_offset, f->size)) { |
625 | 0 | if (!put) { |
626 | 0 | nested = nl_msg_start_nested(buf, |
627 | 0 | OVS_ACTION_ATTR_SET_MASKED); |
628 | 0 | put = nl_msg_put_unspec_zero(buf, type, len * 2); |
629 | 0 | } |
630 | |
|
631 | 0 | memcpy(put + f->offset, data + f->flower_offset, f->size); |
632 | 0 | memcpy(put + len + f->offset, |
633 | 0 | mask + f->flower_offset, f->size); |
634 | 0 | } |
635 | 0 | } |
636 | |
|
637 | 0 | if (put) { |
638 | 0 | nl_msg_end_nested(buf, nested); |
639 | 0 | } |
640 | 0 | } |
641 | 0 | } |
642 | | |
643 | | static void parse_tc_flower_geneve_opts(struct tc_action *action, |
644 | | struct ofpbuf *buf) |
645 | 0 | { |
646 | 0 | int tun_opt_len = action->encap.data.present.len; |
647 | 0 | size_t geneve_off; |
648 | 0 | int idx = 0; |
649 | |
|
650 | 0 | if (!tun_opt_len) { |
651 | 0 | return; |
652 | 0 | } |
653 | | |
654 | 0 | geneve_off = nl_msg_start_nested(buf, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS); |
655 | 0 | while (tun_opt_len) { |
656 | 0 | struct geneve_opt *opt; |
657 | |
|
658 | 0 | opt = &action->encap.data.opts.gnv[idx]; |
659 | 0 | nl_msg_put(buf, opt, sizeof(struct geneve_opt) + opt->length * 4); |
660 | 0 | idx += sizeof(struct geneve_opt) / 4 + opt->length; |
661 | 0 | tun_opt_len -= sizeof(struct geneve_opt) + opt->length * 4; |
662 | 0 | } |
663 | 0 | nl_msg_end_nested(buf, geneve_off); |
664 | 0 | } |
665 | | |
666 | | static void |
667 | | flower_tun_opt_to_match(struct match *match, struct tc_flower *flower) |
668 | 0 | { |
669 | 0 | struct geneve_opt *opt, *opt_mask; |
670 | 0 | int len, cnt = 0; |
671 | | |
672 | | /* Options are always in UDPIF format in the 'flower'. */ |
673 | 0 | match->flow.tunnel.flags |= FLOW_TNL_F_UDPIF; |
674 | 0 | match->wc.masks.tunnel.flags |= FLOW_TNL_F_UDPIF; |
675 | |
|
676 | 0 | match->flow.tunnel.metadata.present.len = |
677 | 0 | flower->key.tunnel.metadata.present.len; |
678 | | /* In the 'flower' mask len is an actual length, not a mask. But in the |
679 | | * 'match' it is an actual mask, so should be an exact match, because TC |
680 | | * will always match on the exact value. */ |
681 | 0 | match->wc.masks.tunnel.metadata.present.len = 0xff; |
682 | |
|
683 | 0 | if (!flower->key.tunnel.metadata.present.len) { |
684 | | /* No options present. */ |
685 | 0 | return; |
686 | 0 | } |
687 | | |
688 | 0 | memcpy(match->flow.tunnel.metadata.opts.gnv, |
689 | 0 | flower->key.tunnel.metadata.opts.gnv, |
690 | 0 | flower->key.tunnel.metadata.present.len); |
691 | 0 | memcpy(match->wc.masks.tunnel.metadata.opts.gnv, |
692 | 0 | flower->mask.tunnel.metadata.opts.gnv, |
693 | 0 | flower->mask.tunnel.metadata.present.len); |
694 | | |
695 | | /* Fixing up 'length' fields of particular options, since these are |
696 | | * also not masks, but actual lengths in the 'flower' structure. */ |
697 | 0 | len = flower->key.tunnel.metadata.present.len; |
698 | 0 | while (len) { |
699 | 0 | opt = &match->flow.tunnel.metadata.opts.gnv[cnt]; |
700 | 0 | opt_mask = &match->wc.masks.tunnel.metadata.opts.gnv[cnt]; |
701 | | |
702 | | /* "Exact" match as set in tun_metadata_to_geneve_mask__(). */ |
703 | 0 | opt_mask->length = 0x1f; |
704 | |
|
705 | 0 | cnt += sizeof(struct geneve_opt) / 4 + opt->length; |
706 | 0 | len -= sizeof(struct geneve_opt) + opt->length * 4; |
707 | 0 | } |
708 | 0 | } |
709 | | |
710 | | static void |
711 | | parse_tc_flower_to_stats(struct tc_flower *flower, |
712 | | struct dpif_flow_stats *stats) |
713 | 0 | { |
714 | 0 | if (!stats) { |
715 | 0 | return; |
716 | 0 | } |
717 | | |
718 | 0 | memset(stats, 0, sizeof *stats); |
719 | 0 | stats->n_packets = get_32aligned_u64(&flower->stats_sw.n_packets); |
720 | 0 | stats->n_packets += get_32aligned_u64(&flower->stats_hw.n_packets); |
721 | 0 | stats->n_bytes = get_32aligned_u64(&flower->stats_sw.n_bytes); |
722 | 0 | stats->n_bytes += get_32aligned_u64(&flower->stats_hw.n_bytes); |
723 | 0 | stats->used = flower->lastused; |
724 | 0 | } |
725 | | |
726 | | static void |
727 | | parse_tc_flower_to_attrs(struct tc_flower *flower, |
728 | | struct dpif_flow_attrs *attrs) |
729 | 0 | { |
730 | 0 | attrs->offloaded = (flower->offloaded_state == TC_OFFLOADED_STATE_IN_HW || |
731 | 0 | flower->offloaded_state == |
732 | 0 | TC_OFFLOADED_STATE_UNDEFINED); |
733 | 0 | attrs->dp_layer = "tc"; |
734 | 0 | attrs->dp_extra_info = NULL; |
735 | 0 | } |
736 | | |
737 | | static int |
738 | | parse_tc_flower_terse_to_match(struct tc_flower *flower, |
739 | | struct match *match, |
740 | | struct dpif_flow_stats *stats, |
741 | | struct dpif_flow_attrs *attrs) |
742 | 0 | { |
743 | 0 | match_init_catchall(match); |
744 | |
|
745 | 0 | parse_tc_flower_to_stats(flower, stats); |
746 | 0 | parse_tc_flower_to_attrs(flower, attrs); |
747 | |
|
748 | 0 | return 0; |
749 | 0 | } |
750 | | |
751 | | static int |
752 | | parse_tc_flower_to_actions__(struct tc_flower *flower, struct ofpbuf *buf, |
753 | | int start_index, int max_index) |
754 | 0 | { |
755 | 0 | struct tc_action *action; |
756 | 0 | int i; |
757 | |
|
758 | 0 | if (max_index <= 0 || max_index > flower->action_count) { |
759 | 0 | max_index = flower->action_count; |
760 | 0 | } |
761 | |
|
762 | 0 | for (i = start_index; i < max_index; i++) { |
763 | 0 | action = &flower->actions[i]; |
764 | |
|
765 | 0 | switch (action->type) { |
766 | 0 | case TC_ACT_VLAN_POP: { |
767 | 0 | nl_msg_put_flag(buf, OVS_ACTION_ATTR_POP_VLAN); |
768 | 0 | } |
769 | 0 | break; |
770 | 0 | case TC_ACT_VLAN_PUSH: { |
771 | 0 | struct ovs_action_push_vlan *push; |
772 | |
|
773 | 0 | push = nl_msg_put_unspec_zero(buf, OVS_ACTION_ATTR_PUSH_VLAN, |
774 | 0 | sizeof *push); |
775 | 0 | push->vlan_tpid = action->vlan.vlan_push_tpid; |
776 | 0 | push->vlan_tci = htons(action->vlan.vlan_push_id |
777 | 0 | | (action->vlan.vlan_push_prio << 13) |
778 | 0 | | VLAN_CFI); |
779 | 0 | } |
780 | 0 | break; |
781 | 0 | case TC_ACT_MPLS_POP: { |
782 | 0 | nl_msg_put_be16(buf, OVS_ACTION_ATTR_POP_MPLS, |
783 | 0 | action->mpls.proto); |
784 | 0 | } |
785 | 0 | break; |
786 | 0 | case TC_ACT_MPLS_PUSH: { |
787 | 0 | struct ovs_action_push_mpls *push; |
788 | 0 | ovs_be32 mpls_lse = 0; |
789 | |
|
790 | 0 | flow_set_mpls_lse_label(&mpls_lse, action->mpls.label); |
791 | 0 | flow_set_mpls_lse_tc(&mpls_lse, action->mpls.tc); |
792 | 0 | flow_set_mpls_lse_ttl(&mpls_lse, action->mpls.ttl); |
793 | 0 | flow_set_mpls_lse_bos(&mpls_lse, action->mpls.bos); |
794 | |
|
795 | 0 | push = nl_msg_put_unspec_zero(buf, OVS_ACTION_ATTR_PUSH_MPLS, |
796 | 0 | sizeof *push); |
797 | 0 | push->mpls_ethertype = action->mpls.proto; |
798 | 0 | push->mpls_lse = mpls_lse; |
799 | 0 | } |
800 | 0 | break; |
801 | 0 | case TC_ACT_MPLS_SET: { |
802 | 0 | size_t set_offset = nl_msg_start_nested(buf, |
803 | 0 | OVS_ACTION_ATTR_SET); |
804 | 0 | struct ovs_key_mpls *set_mpls; |
805 | 0 | ovs_be32 mpls_lse = 0; |
806 | |
|
807 | 0 | flow_set_mpls_lse_label(&mpls_lse, action->mpls.label); |
808 | 0 | flow_set_mpls_lse_tc(&mpls_lse, action->mpls.tc); |
809 | 0 | flow_set_mpls_lse_ttl(&mpls_lse, action->mpls.ttl); |
810 | 0 | flow_set_mpls_lse_bos(&mpls_lse, action->mpls.bos); |
811 | |
|
812 | 0 | set_mpls = nl_msg_put_unspec_zero(buf, OVS_KEY_ATTR_MPLS, |
813 | 0 | sizeof *set_mpls); |
814 | 0 | set_mpls->mpls_lse = mpls_lse; |
815 | 0 | nl_msg_end_nested(buf, set_offset); |
816 | 0 | } |
817 | 0 | break; |
818 | 0 | case TC_ACT_PEDIT: { |
819 | 0 | parse_flower_rewrite_to_netlink_action(buf, action); |
820 | 0 | } |
821 | 0 | break; |
822 | 0 | case TC_ACT_ENCAP: { |
823 | 0 | size_t set_offset = nl_msg_start_nested(buf, OVS_ACTION_ATTR_SET); |
824 | 0 | size_t tunnel_offset = |
825 | 0 | nl_msg_start_nested(buf, OVS_KEY_ATTR_TUNNEL); |
826 | |
|
827 | 0 | if (action->encap.id_present) { |
828 | 0 | nl_msg_put_be64(buf, OVS_TUNNEL_KEY_ATTR_ID, action->encap.id); |
829 | 0 | } |
830 | 0 | if (action->encap.ipv4.ipv4_src) { |
831 | 0 | nl_msg_put_be32(buf, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, |
832 | 0 | action->encap.ipv4.ipv4_src); |
833 | 0 | } |
834 | 0 | if (action->encap.ipv4.ipv4_dst) { |
835 | 0 | nl_msg_put_be32(buf, OVS_TUNNEL_KEY_ATTR_IPV4_DST, |
836 | 0 | action->encap.ipv4.ipv4_dst); |
837 | 0 | } |
838 | 0 | if (ipv6_addr_is_set(&action->encap.ipv6.ipv6_src)) { |
839 | 0 | nl_msg_put_in6_addr(buf, OVS_TUNNEL_KEY_ATTR_IPV6_SRC, |
840 | 0 | &action->encap.ipv6.ipv6_src); |
841 | 0 | } |
842 | 0 | if (ipv6_addr_is_set(&action->encap.ipv6.ipv6_dst)) { |
843 | 0 | nl_msg_put_in6_addr(buf, OVS_TUNNEL_KEY_ATTR_IPV6_DST, |
844 | 0 | &action->encap.ipv6.ipv6_dst); |
845 | 0 | } |
846 | 0 | if (action->encap.tos) { |
847 | 0 | nl_msg_put_u8(buf, OVS_TUNNEL_KEY_ATTR_TOS, |
848 | 0 | action->encap.tos); |
849 | 0 | } |
850 | 0 | if (action->encap.ttl) { |
851 | 0 | nl_msg_put_u8(buf, OVS_TUNNEL_KEY_ATTR_TTL, |
852 | 0 | action->encap.ttl); |
853 | 0 | } |
854 | 0 | if (action->encap.tp_dst) { |
855 | 0 | nl_msg_put_be16(buf, OVS_TUNNEL_KEY_ATTR_TP_DST, |
856 | 0 | action->encap.tp_dst); |
857 | 0 | } |
858 | 0 | if (!action->encap.no_csum) { |
859 | 0 | nl_msg_put_flag(buf, OVS_TUNNEL_KEY_ATTR_CSUM); |
860 | 0 | } |
861 | |
|
862 | 0 | parse_tc_flower_geneve_opts(action, buf); |
863 | 0 | nl_msg_end_nested(buf, tunnel_offset); |
864 | 0 | nl_msg_end_nested(buf, set_offset); |
865 | 0 | } |
866 | 0 | break; |
867 | 0 | case TC_ACT_OUTPUT: { |
868 | 0 | odp_port_t outport = 0; |
869 | |
|
870 | 0 | if (action->out.ifindex_out) { |
871 | 0 | outport = |
872 | 0 | netdev_ifindex_to_odp_port(action->out.ifindex_out); |
873 | 0 | if (!outport) { |
874 | 0 | return -ENOENT; |
875 | 0 | } |
876 | 0 | } |
877 | 0 | nl_msg_put_u32(buf, OVS_ACTION_ATTR_OUTPUT, odp_to_u32(outport)); |
878 | 0 | } |
879 | 0 | break; |
880 | 0 | case TC_ACT_CT: { |
881 | 0 | size_t ct_offset; |
882 | |
|
883 | 0 | if (action->ct.clear) { |
884 | 0 | nl_msg_put_flag(buf, OVS_ACTION_ATTR_CT_CLEAR); |
885 | 0 | break; |
886 | 0 | } |
887 | | |
888 | 0 | ct_offset = nl_msg_start_nested(buf, OVS_ACTION_ATTR_CT); |
889 | |
|
890 | 0 | if (action->ct.commit) { |
891 | 0 | if (action->ct.force) { |
892 | 0 | nl_msg_put_flag(buf, OVS_CT_ATTR_FORCE_COMMIT); |
893 | 0 | } else { |
894 | 0 | nl_msg_put_flag(buf, OVS_CT_ATTR_COMMIT); |
895 | 0 | } |
896 | 0 | } |
897 | |
|
898 | 0 | if (action->ct.zone) { |
899 | 0 | nl_msg_put_u16(buf, OVS_CT_ATTR_ZONE, action->ct.zone); |
900 | 0 | } |
901 | |
|
902 | 0 | if (action->ct.mark_mask) { |
903 | 0 | uint32_t mark_and_mask[2] = { action->ct.mark, |
904 | 0 | action->ct.mark_mask }; |
905 | 0 | nl_msg_put_unspec(buf, OVS_CT_ATTR_MARK, &mark_and_mask, |
906 | 0 | sizeof mark_and_mask); |
907 | 0 | } |
908 | |
|
909 | 0 | if (!ovs_u128_is_zero(action->ct.label_mask)) { |
910 | 0 | struct { |
911 | 0 | ovs_u128 key; |
912 | 0 | ovs_u128 mask; |
913 | 0 | } ct_label = { |
914 | 0 | .key = action->ct.label, |
915 | 0 | .mask = action->ct.label_mask, |
916 | 0 | }; |
917 | |
|
918 | 0 | nl_msg_put_unspec(buf, OVS_CT_ATTR_LABELS, |
919 | 0 | &ct_label, sizeof ct_label); |
920 | 0 | } |
921 | |
|
922 | 0 | if (action->ct.nat_type) { |
923 | 0 | size_t nat_offset = nl_msg_start_nested(buf, |
924 | 0 | OVS_CT_ATTR_NAT); |
925 | |
|
926 | 0 | if (action->ct.nat_type == TC_NAT_SRC) { |
927 | 0 | nl_msg_put_flag(buf, OVS_NAT_ATTR_SRC); |
928 | 0 | } else if (action->ct.nat_type == TC_NAT_DST) { |
929 | 0 | nl_msg_put_flag(buf, OVS_NAT_ATTR_DST); |
930 | 0 | } |
931 | |
|
932 | 0 | if (action->ct.range.ip_family == AF_INET) { |
933 | 0 | nl_msg_put_be32(buf, OVS_NAT_ATTR_IP_MIN, |
934 | 0 | action->ct.range.ipv4.min); |
935 | 0 | nl_msg_put_be32(buf, OVS_NAT_ATTR_IP_MAX, |
936 | 0 | action->ct.range.ipv4.max); |
937 | 0 | } else if (action->ct.range.ip_family == AF_INET6) { |
938 | 0 | nl_msg_put_in6_addr(buf, OVS_NAT_ATTR_IP_MIN, |
939 | 0 | &action->ct.range.ipv6.min); |
940 | 0 | nl_msg_put_in6_addr(buf, OVS_NAT_ATTR_IP_MAX, |
941 | 0 | &action->ct.range.ipv6.max); |
942 | 0 | } |
943 | |
|
944 | 0 | if (action->ct.range.port.min) { |
945 | 0 | nl_msg_put_u16(buf, OVS_NAT_ATTR_PROTO_MIN, |
946 | 0 | ntohs(action->ct.range.port.min)); |
947 | 0 | if (action->ct.range.port.max) { |
948 | 0 | nl_msg_put_u16(buf, OVS_NAT_ATTR_PROTO_MAX, |
949 | 0 | ntohs(action->ct.range.port.max)); |
950 | 0 | } |
951 | 0 | } |
952 | |
|
953 | 0 | nl_msg_end_nested(buf, nat_offset); |
954 | 0 | } |
955 | |
|
956 | 0 | nl_msg_end_nested(buf, ct_offset); |
957 | 0 | } |
958 | 0 | break; |
959 | 0 | case TC_ACT_GOTO: { |
960 | 0 | nl_msg_put_u32(buf, OVS_ACTION_ATTR_RECIRC, action->chain); |
961 | 0 | } |
962 | 0 | break; |
963 | 0 | case TC_ACT_POLICE: { |
964 | 0 | uint32_t meter_id; |
965 | |
|
966 | 0 | if (police_idx_lookup(action->police.index, &meter_id)) { |
967 | 0 | return -ENOENT; |
968 | 0 | } |
969 | 0 | nl_msg_put_u32(buf, OVS_ACTION_ATTR_METER, meter_id); |
970 | 0 | } |
971 | 0 | break; |
972 | 0 | case TC_ACT_POLICE_MTU: { |
973 | 0 | size_t offset, act_offset; |
974 | 0 | uint32_t jump; |
975 | |
|
976 | 0 | offset = nl_msg_start_nested(buf, |
977 | 0 | OVS_ACTION_ATTR_CHECK_PKT_LEN); |
978 | |
|
979 | 0 | nl_msg_put_u16(buf, OVS_CHECK_PKT_LEN_ATTR_PKT_LEN, |
980 | 0 | action->police.mtu); |
981 | |
|
982 | 0 | act_offset = nl_msg_start_nested( |
983 | 0 | buf, OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER); |
984 | 0 | i = parse_tc_flower_to_actions__(flower, buf, i + 1, |
985 | 0 | action->police.result_jump); |
986 | 0 | if (i < 0) { |
987 | 0 | return i; |
988 | 0 | } |
989 | 0 | nl_msg_end_nested(buf, act_offset); |
990 | |
|
991 | 0 | act_offset = nl_msg_start_nested( |
992 | 0 | buf, OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL); |
993 | |
|
994 | 0 | jump = flower->actions[i - 1].jump_action; |
995 | 0 | if (jump == JUMP_ACTION_STOP) { |
996 | 0 | jump = max_index; |
997 | 0 | } |
998 | 0 | if (jump != 0) { |
999 | 0 | i = parse_tc_flower_to_actions__(flower, buf, i, jump); |
1000 | 0 | if (i < 0) { |
1001 | 0 | return i; |
1002 | 0 | } |
1003 | 0 | } |
1004 | 0 | nl_msg_end_nested(buf, act_offset); |
1005 | |
|
1006 | 0 | i--; |
1007 | 0 | nl_msg_end_nested(buf, offset); |
1008 | 0 | } |
1009 | 0 | break; |
1010 | 0 | } |
1011 | | |
1012 | 0 | if (action->jump_action && action->type != TC_ACT_POLICE_MTU) { |
1013 | | /* If there is a jump, it means this was the end of an action |
1014 | | * set and we need to end this branch. */ |
1015 | 0 | i++; |
1016 | 0 | break; |
1017 | 0 | } |
1018 | 0 | } |
1019 | 0 | return i; |
1020 | 0 | } |
1021 | | |
1022 | | static int |
1023 | | parse_tc_flower_to_actions(struct tc_flower *flower, |
1024 | | struct ofpbuf *buf) |
1025 | 0 | { |
1026 | 0 | return parse_tc_flower_to_actions__(flower, buf, 0, 0); |
1027 | 0 | } |
1028 | | |
1029 | | static int |
1030 | | parse_tc_flower_to_match(const struct netdev *netdev, |
1031 | | struct tc_flower *flower, |
1032 | | struct match *match, |
1033 | | struct nlattr **actions, |
1034 | | struct dpif_flow_stats *stats, |
1035 | | struct dpif_flow_attrs *attrs, |
1036 | | struct ofpbuf *buf, |
1037 | | bool terse) |
1038 | 0 | { |
1039 | 0 | struct tc_flower_key *key = &flower->key; |
1040 | 0 | struct tc_flower_key *mask = &flower->mask; |
1041 | 0 | size_t act_off; |
1042 | 0 | int err; |
1043 | |
|
1044 | 0 | if (terse) { |
1045 | 0 | return parse_tc_flower_terse_to_match(flower, match, stats, attrs); |
1046 | 0 | } |
1047 | | |
1048 | 0 | ofpbuf_clear(buf); |
1049 | |
|
1050 | 0 | match_init_catchall(match); |
1051 | 0 | match_set_dl_src_masked(match, key->src_mac, mask->src_mac); |
1052 | 0 | match_set_dl_dst_masked(match, key->dst_mac, mask->dst_mac); |
1053 | |
|
1054 | 0 | if (eth_type_vlan(key->eth_type)) { |
1055 | 0 | match->flow.vlans[0].tpid = key->eth_type; |
1056 | 0 | match->wc.masks.vlans[0].tpid = OVS_BE16_MAX; |
1057 | 0 | match_set_dl_vlan(match, htons(key->vlan_id[0]), 0); |
1058 | 0 | match_set_dl_vlan_pcp(match, key->vlan_prio[0], 0); |
1059 | |
|
1060 | 0 | if (eth_type_vlan(key->encap_eth_type[0])) { |
1061 | 0 | match_set_dl_vlan(match, htons(key->vlan_id[1]), 1); |
1062 | 0 | match_set_dl_vlan_pcp(match, key->vlan_prio[1], 1); |
1063 | 0 | match_set_dl_type(match, key->encap_eth_type[1]); |
1064 | 0 | match->flow.vlans[1].tpid = key->encap_eth_type[0]; |
1065 | 0 | match->wc.masks.vlans[1].tpid = OVS_BE16_MAX; |
1066 | 0 | } else { |
1067 | 0 | match_set_dl_type(match, key->encap_eth_type[0]); |
1068 | 0 | } |
1069 | 0 | flow_fix_vlan_tpid(&match->flow); |
1070 | 0 | } else if (eth_type_mpls(key->eth_type)) { |
1071 | 0 | match->flow.mpls_lse[0] = key->mpls_lse & mask->mpls_lse; |
1072 | 0 | match->wc.masks.mpls_lse[0] = mask->mpls_lse; |
1073 | 0 | match_set_dl_type(match, key->encap_eth_type[0]); |
1074 | 0 | } else if (key->eth_type == htons(ETH_TYPE_ARP)) { |
1075 | 0 | match_set_arp_sha_masked(match, key->arp.sha, mask->arp.sha); |
1076 | 0 | match_set_arp_tha_masked(match, key->arp.tha, mask->arp.tha); |
1077 | 0 | match_set_arp_spa_masked(match, key->arp.spa, mask->arp.spa); |
1078 | 0 | match_set_arp_tpa_masked(match, key->arp.tpa, mask->arp.tpa); |
1079 | 0 | match_set_arp_opcode_masked(match, key->arp.opcode, |
1080 | 0 | mask->arp.opcode); |
1081 | 0 | match_set_dl_type(match, key->eth_type); |
1082 | 0 | } else { |
1083 | 0 | match_set_dl_type(match, key->eth_type); |
1084 | 0 | } |
1085 | |
|
1086 | 0 | if (is_ip_any(&match->flow)) { |
1087 | 0 | if (key->ip_proto) { |
1088 | 0 | match_set_nw_proto(match, key->ip_proto); |
1089 | 0 | } |
1090 | |
|
1091 | 0 | match_set_nw_tos_masked(match, key->ip_tos, mask->ip_tos); |
1092 | 0 | match_set_nw_ttl_masked(match, key->ip_ttl, mask->ip_ttl); |
1093 | |
|
1094 | 0 | if (mask->flags) { |
1095 | 0 | uint8_t flags = 0; |
1096 | 0 | uint8_t flags_mask = 0; |
1097 | |
|
1098 | 0 | if (mask->flags & TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT) { |
1099 | 0 | if (key->flags & TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT) { |
1100 | 0 | flags |= FLOW_NW_FRAG_ANY; |
1101 | 0 | } |
1102 | 0 | flags_mask |= FLOW_NW_FRAG_ANY; |
1103 | 0 | } |
1104 | |
|
1105 | 0 | if (mask->flags & TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST) { |
1106 | 0 | if (!(key->flags & TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST)) { |
1107 | 0 | flags |= FLOW_NW_FRAG_LATER; |
1108 | 0 | } |
1109 | 0 | flags_mask |= FLOW_NW_FRAG_LATER; |
1110 | 0 | } |
1111 | |
|
1112 | 0 | match_set_nw_frag_masked(match, flags, flags_mask); |
1113 | 0 | } |
1114 | |
|
1115 | 0 | match_set_nw_src_masked(match, key->ipv4.ipv4_src, mask->ipv4.ipv4_src); |
1116 | 0 | match_set_nw_dst_masked(match, key->ipv4.ipv4_dst, mask->ipv4.ipv4_dst); |
1117 | |
|
1118 | 0 | match_set_ipv6_src_masked(match, |
1119 | 0 | &key->ipv6.ipv6_src, &mask->ipv6.ipv6_src); |
1120 | 0 | match_set_ipv6_dst_masked(match, |
1121 | 0 | &key->ipv6.ipv6_dst, &mask->ipv6.ipv6_dst); |
1122 | |
|
1123 | 0 | if (key->ip_proto == IPPROTO_TCP) { |
1124 | 0 | match_set_tp_dst_masked(match, key->tcp_dst, mask->tcp_dst); |
1125 | 0 | match_set_tp_src_masked(match, key->tcp_src, mask->tcp_src); |
1126 | 0 | match_set_tcp_flags_masked(match, key->tcp_flags, mask->tcp_flags); |
1127 | 0 | } else if (key->ip_proto == IPPROTO_UDP) { |
1128 | 0 | match_set_tp_dst_masked(match, key->udp_dst, mask->udp_dst); |
1129 | 0 | match_set_tp_src_masked(match, key->udp_src, mask->udp_src); |
1130 | 0 | } else if (key->ip_proto == IPPROTO_SCTP) { |
1131 | 0 | match_set_tp_dst_masked(match, key->sctp_dst, mask->sctp_dst); |
1132 | 0 | match_set_tp_src_masked(match, key->sctp_src, mask->sctp_src); |
1133 | 0 | } else if (key->ip_proto == IPPROTO_ICMP || |
1134 | 0 | key->ip_proto == IPPROTO_ICMPV6) { |
1135 | 0 | match_set_tp_dst_masked(match, htons(key->icmp_code), |
1136 | 0 | htons(mask->icmp_code)); |
1137 | 0 | match_set_tp_src_masked(match, htons(key->icmp_type), |
1138 | 0 | htons(mask->icmp_type)); |
1139 | 0 | } |
1140 | |
|
1141 | 0 | if (mask->ct_state) { |
1142 | 0 | uint8_t ct_statev = 0, ct_statem = 0; |
1143 | |
|
1144 | 0 | if (mask->ct_state & TCA_FLOWER_KEY_CT_FLAGS_NEW) { |
1145 | 0 | if (key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_NEW) { |
1146 | 0 | ct_statev |= OVS_CS_F_NEW; |
1147 | 0 | } |
1148 | 0 | ct_statem |= OVS_CS_F_NEW; |
1149 | 0 | } |
1150 | |
|
1151 | 0 | if (mask->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) { |
1152 | 0 | if (key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) { |
1153 | 0 | ct_statev |= OVS_CS_F_ESTABLISHED; |
1154 | 0 | } |
1155 | 0 | ct_statem |= OVS_CS_F_ESTABLISHED; |
1156 | 0 | } |
1157 | |
|
1158 | 0 | if (mask->ct_state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED) { |
1159 | 0 | if (key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED) { |
1160 | 0 | ct_statev |= OVS_CS_F_TRACKED; |
1161 | 0 | } |
1162 | 0 | ct_statem |= OVS_CS_F_TRACKED; |
1163 | 0 | } |
1164 | |
|
1165 | 0 | if (mask->ct_state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) { |
1166 | 0 | if (key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) { |
1167 | 0 | ct_statev |= OVS_CS_F_REPLY_DIR; |
1168 | 0 | } |
1169 | 0 | ct_statem |= OVS_CS_F_REPLY_DIR; |
1170 | 0 | } |
1171 | |
|
1172 | 0 | if (mask->ct_state & TCA_FLOWER_KEY_CT_FLAGS_INVALID) { |
1173 | 0 | if (key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_INVALID) { |
1174 | 0 | ct_statev |= OVS_CS_F_INVALID; |
1175 | 0 | } |
1176 | 0 | ct_statem |= OVS_CS_F_INVALID; |
1177 | 0 | } |
1178 | |
|
1179 | 0 | if (mask->ct_state & TCA_FLOWER_KEY_CT_FLAGS_RELATED) { |
1180 | 0 | if (key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_RELATED) { |
1181 | 0 | ct_statev |= OVS_CS_F_RELATED; |
1182 | 0 | } |
1183 | 0 | ct_statem |= OVS_CS_F_RELATED; |
1184 | 0 | } |
1185 | |
|
1186 | 0 | match_set_ct_state_masked(match, ct_statev, ct_statem); |
1187 | 0 | } |
1188 | |
|
1189 | 0 | match_set_ct_zone_masked(match, key->ct_zone, mask->ct_zone); |
1190 | 0 | match_set_ct_mark_masked(match, key->ct_mark, mask->ct_mark); |
1191 | 0 | match_set_ct_label_masked(match, key->ct_label, mask->ct_label); |
1192 | 0 | } |
1193 | |
|
1194 | 0 | if (flower->tunnel) { |
1195 | 0 | if (flower->mask.tunnel.id) { |
1196 | 0 | match_set_tun_id(match, flower->key.tunnel.id); |
1197 | 0 | match->flow.tunnel.flags |= FLOW_TNL_F_KEY; |
1198 | 0 | } |
1199 | 0 | if (flower->mask.tunnel.ipv4.ipv4_dst || |
1200 | 0 | flower->mask.tunnel.ipv4.ipv4_src) { |
1201 | 0 | match_set_tun_dst_masked(match, |
1202 | 0 | flower->key.tunnel.ipv4.ipv4_dst, |
1203 | 0 | flower->mask.tunnel.ipv4.ipv4_dst); |
1204 | 0 | match_set_tun_src_masked(match, |
1205 | 0 | flower->key.tunnel.ipv4.ipv4_src, |
1206 | 0 | flower->mask.tunnel.ipv4.ipv4_src); |
1207 | 0 | } else if (ipv6_addr_is_set(&flower->mask.tunnel.ipv6.ipv6_dst) || |
1208 | 0 | ipv6_addr_is_set(&flower->mask.tunnel.ipv6.ipv6_src)) { |
1209 | 0 | match_set_tun_ipv6_dst_masked(match, |
1210 | 0 | &flower->key.tunnel.ipv6.ipv6_dst, |
1211 | 0 | &flower->mask.tunnel.ipv6.ipv6_dst); |
1212 | 0 | match_set_tun_ipv6_src_masked(match, |
1213 | 0 | &flower->key.tunnel.ipv6.ipv6_src, |
1214 | 0 | &flower->mask.tunnel.ipv6.ipv6_src); |
1215 | 0 | } |
1216 | 0 | if (flower->mask.tunnel.tos) { |
1217 | 0 | match_set_tun_tos_masked(match, flower->key.tunnel.tos, |
1218 | 0 | flower->mask.tunnel.tos); |
1219 | 0 | } |
1220 | 0 | if (flower->mask.tunnel.ttl) { |
1221 | 0 | match_set_tun_ttl_masked(match, flower->key.tunnel.ttl, |
1222 | 0 | flower->mask.tunnel.ttl); |
1223 | 0 | } |
1224 | 0 | if (flower->mask.tunnel.tp_src) { |
1225 | 0 | match_set_tun_tp_dst_masked(match, flower->key.tunnel.tp_src, |
1226 | 0 | flower->mask.tunnel.tp_src); |
1227 | 0 | } |
1228 | 0 | if (flower->mask.tunnel.tp_dst) { |
1229 | 0 | match_set_tun_tp_dst_masked(match, flower->key.tunnel.tp_dst, |
1230 | 0 | flower->mask.tunnel.tp_dst); |
1231 | 0 | } |
1232 | |
|
1233 | 0 | if (!strcmp(netdev_get_type(netdev), "geneve")) { |
1234 | 0 | flower_tun_opt_to_match(match, flower); |
1235 | 0 | } |
1236 | 0 | } |
1237 | |
|
1238 | 0 | act_off = nl_msg_start_nested(buf, OVS_FLOW_ATTR_ACTIONS); |
1239 | 0 | err = parse_tc_flower_to_actions(flower, buf); |
1240 | 0 | if (err < 0) { |
1241 | 0 | return -err; |
1242 | 0 | } |
1243 | 0 | nl_msg_end_nested(buf, act_off); |
1244 | |
|
1245 | 0 | *actions = ofpbuf_at_assert(buf, act_off, sizeof(struct nlattr)); |
1246 | |
|
1247 | 0 | parse_tc_flower_to_stats(flower, stats); |
1248 | 0 | parse_tc_flower_to_attrs(flower, attrs); |
1249 | |
|
1250 | 0 | return 0; |
1251 | 0 | } |
1252 | | |
1253 | | static bool |
1254 | | netdev_tc_flow_dump_next(struct netdev_flow_dump *dump, |
1255 | | struct match *match, |
1256 | | struct nlattr **actions, |
1257 | | struct dpif_flow_stats *stats, |
1258 | | struct dpif_flow_attrs *attrs, |
1259 | | ovs_u128 *ufid, |
1260 | | struct ofpbuf *rbuffer, |
1261 | | struct ofpbuf *wbuffer) |
1262 | 0 | { |
1263 | 0 | struct netdev *netdev = dump->netdev; |
1264 | 0 | struct ofpbuf nl_flow; |
1265 | 0 | struct tcf_id id; |
1266 | |
|
1267 | 0 | id = tc_make_tcf_id(netdev_get_ifindex(netdev), |
1268 | 0 | get_block_id_from_netdev(netdev), |
1269 | 0 | 0, /* prio */ |
1270 | 0 | get_tc_qdisc_hook(netdev)); |
1271 | |
|
1272 | 0 | while (nl_dump_next(dump->nl_dump, &nl_flow, rbuffer)) { |
1273 | 0 | struct dpif_flow_stats adjust_stats; |
1274 | 0 | struct tc_flower flower; |
1275 | |
|
1276 | 0 | if (parse_netlink_to_tc_flower(&nl_flow, &id, &flower, dump->terse)) { |
1277 | 0 | continue; |
1278 | 0 | } |
1279 | | |
1280 | 0 | if (parse_tc_flower_to_match(netdev, &flower, match, actions, |
1281 | 0 | stats, attrs, wbuffer, dump->terse)) { |
1282 | 0 | continue; |
1283 | 0 | } |
1284 | | |
1285 | 0 | if (flower.act_cookie.len) { |
1286 | 0 | *ufid = *((ovs_u128 *) flower.act_cookie.data); |
1287 | 0 | } else if (!find_ufid(netdev, &id, ufid)) { |
1288 | 0 | continue; |
1289 | 0 | } |
1290 | | |
1291 | 0 | if (!get_ufid_adjust_stats(ufid, &adjust_stats)) { |
1292 | 0 | netdev_tc_adjust_stats(stats, &adjust_stats); |
1293 | 0 | } |
1294 | |
|
1295 | 0 | match->wc.masks.in_port.odp_port = u32_to_odp(UINT32_MAX); |
1296 | 0 | match->flow.in_port.odp_port = dump->port; |
1297 | 0 | match_set_recirc_id(match, id.chain); |
1298 | |
|
1299 | 0 | return true; |
1300 | 0 | } |
1301 | | |
1302 | 0 | return false; |
1303 | 0 | } |
1304 | | |
1305 | | static int |
1306 | | parse_mpls_set_action(struct tc_flower *flower, struct tc_action *action, |
1307 | | const struct nlattr *set) |
1308 | 0 | { |
1309 | 0 | const struct ovs_key_mpls *mpls_set = nl_attr_get(set); |
1310 | |
|
1311 | 0 | action->mpls.label = mpls_lse_to_label(mpls_set->mpls_lse); |
1312 | 0 | action->mpls.tc = mpls_lse_to_tc(mpls_set->mpls_lse); |
1313 | 0 | action->mpls.ttl = mpls_lse_to_ttl(mpls_set->mpls_lse); |
1314 | 0 | action->mpls.bos = mpls_lse_to_bos(mpls_set->mpls_lse); |
1315 | 0 | action->type = TC_ACT_MPLS_SET; |
1316 | 0 | flower->action_count++; |
1317 | |
|
1318 | 0 | return 0; |
1319 | 0 | } |
1320 | | |
1321 | | static int |
1322 | | parse_put_flow_nat_action(struct tc_action *action, |
1323 | | const struct nlattr *nat, |
1324 | | size_t nat_len) |
1325 | 0 | { |
1326 | 0 | const struct nlattr *nat_attr; |
1327 | 0 | size_t nat_left; |
1328 | |
|
1329 | 0 | action->ct.nat_type = TC_NAT_RESTORE; |
1330 | 0 | NL_ATTR_FOR_EACH_UNSAFE (nat_attr, nat_left, nat, nat_len) { |
1331 | 0 | switch (nl_attr_type(nat_attr)) { |
1332 | 0 | case OVS_NAT_ATTR_SRC: { |
1333 | 0 | action->ct.nat_type = TC_NAT_SRC; |
1334 | 0 | }; |
1335 | 0 | break; |
1336 | 0 | case OVS_NAT_ATTR_DST: { |
1337 | 0 | action->ct.nat_type = TC_NAT_DST; |
1338 | 0 | }; |
1339 | 0 | break; |
1340 | 0 | case OVS_NAT_ATTR_IP_MIN: { |
1341 | 0 | if (nl_attr_get_size(nat_attr) == sizeof(ovs_be32)) { |
1342 | 0 | ovs_be32 addr = nl_attr_get_be32(nat_attr); |
1343 | |
|
1344 | 0 | action->ct.range.ipv4.min = addr; |
1345 | 0 | action->ct.range.ip_family = AF_INET; |
1346 | 0 | } else { |
1347 | 0 | struct in6_addr addr = nl_attr_get_in6_addr(nat_attr); |
1348 | |
|
1349 | 0 | action->ct.range.ipv6.min = addr; |
1350 | 0 | action->ct.range.ip_family = AF_INET6; |
1351 | 0 | } |
1352 | 0 | }; |
1353 | 0 | break; |
1354 | 0 | case OVS_NAT_ATTR_IP_MAX: { |
1355 | 0 | if (nl_attr_get_size(nat_attr) == sizeof(ovs_be32)) { |
1356 | 0 | ovs_be32 addr = nl_attr_get_be32(nat_attr); |
1357 | |
|
1358 | 0 | action->ct.range.ipv4.max = addr; |
1359 | 0 | action->ct.range.ip_family = AF_INET; |
1360 | 0 | } else { |
1361 | 0 | struct in6_addr addr = nl_attr_get_in6_addr(nat_attr); |
1362 | |
|
1363 | 0 | action->ct.range.ipv6.max = addr; |
1364 | 0 | action->ct.range.ip_family = AF_INET6; |
1365 | 0 | } |
1366 | 0 | }; |
1367 | 0 | break; |
1368 | 0 | case OVS_NAT_ATTR_PROTO_MIN: { |
1369 | 0 | action->ct.range.port.min = htons(nl_attr_get_u16(nat_attr)); |
1370 | 0 | }; |
1371 | 0 | break; |
1372 | 0 | case OVS_NAT_ATTR_PROTO_MAX: { |
1373 | 0 | action->ct.range.port.max = htons(nl_attr_get_u16(nat_attr)); |
1374 | 0 | }; |
1375 | 0 | break; |
1376 | 0 | } |
1377 | 0 | } |
1378 | 0 | return 0; |
1379 | 0 | } |
1380 | | |
1381 | | static int |
1382 | | parse_put_flow_ct_action(struct tc_flower *flower, |
1383 | | struct tc_action *action, |
1384 | | const struct nlattr *ct, |
1385 | | size_t ct_len) |
1386 | 0 | { |
1387 | 0 | const struct nlattr *ct_attr; |
1388 | 0 | size_t ct_left; |
1389 | 0 | int err; |
1390 | |
|
1391 | 0 | NL_ATTR_FOR_EACH_UNSAFE (ct_attr, ct_left, ct, ct_len) { |
1392 | 0 | switch (nl_attr_type(ct_attr)) { |
1393 | 0 | case OVS_CT_ATTR_COMMIT: { |
1394 | 0 | action->ct.commit = true; |
1395 | 0 | } |
1396 | 0 | break; |
1397 | 0 | case OVS_CT_ATTR_FORCE_COMMIT: { |
1398 | 0 | action->ct.commit = true; |
1399 | 0 | action->ct.force = true; |
1400 | 0 | } |
1401 | 0 | break; |
1402 | 0 | case OVS_CT_ATTR_ZONE: { |
1403 | 0 | action->ct.zone = nl_attr_get_u16(ct_attr); |
1404 | 0 | } |
1405 | 0 | break; |
1406 | 0 | case OVS_CT_ATTR_NAT: { |
1407 | 0 | const struct nlattr *nat = nl_attr_get(ct_attr); |
1408 | 0 | const size_t nat_len = nl_attr_get_size(ct_attr); |
1409 | |
|
1410 | 0 | err = parse_put_flow_nat_action(action, nat, nat_len); |
1411 | 0 | if (err) { |
1412 | 0 | return err; |
1413 | 0 | } |
1414 | 0 | } |
1415 | 0 | break; |
1416 | 0 | case OVS_CT_ATTR_MARK: { |
1417 | 0 | const struct { |
1418 | 0 | uint32_t key; |
1419 | 0 | uint32_t mask; |
1420 | 0 | } *ct_mark; |
1421 | |
|
1422 | 0 | ct_mark = nl_attr_get_unspec(ct_attr, sizeof *ct_mark); |
1423 | 0 | action->ct.mark = ct_mark->key; |
1424 | 0 | action->ct.mark_mask = ct_mark->mask; |
1425 | 0 | } |
1426 | 0 | break; |
1427 | 0 | case OVS_CT_ATTR_LABELS: { |
1428 | 0 | const struct { |
1429 | 0 | ovs_32aligned_u128 key; |
1430 | 0 | ovs_32aligned_u128 mask; |
1431 | 0 | } *ct_label; |
1432 | |
|
1433 | 0 | ct_label = nl_attr_get_unspec(ct_attr, sizeof *ct_label); |
1434 | 0 | action->ct.label = get_32aligned_u128(&ct_label->key); |
1435 | 0 | action->ct.label_mask = |
1436 | 0 | get_32aligned_u128(&ct_label->mask); |
1437 | 0 | } |
1438 | 0 | break; |
1439 | | /* The following option we do not support in tc-ct, and should |
1440 | | * not be ignored for proper operation. */ |
1441 | 0 | case OVS_CT_ATTR_HELPER: |
1442 | 0 | return EOPNOTSUPP; |
1443 | 0 | } |
1444 | 0 | } |
1445 | | |
1446 | 0 | action->type = TC_ACT_CT; |
1447 | 0 | flower->action_count++; |
1448 | 0 | return 0; |
1449 | 0 | } |
1450 | | |
1451 | | static int |
1452 | | parse_put_flow_set_masked_action(struct tc_flower *flower, |
1453 | | struct tc_action *action, |
1454 | | const struct nlattr *set, |
1455 | | size_t set_len, |
1456 | | bool hasmask) |
1457 | 0 | { |
1458 | 0 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20); |
1459 | 0 | uint64_t set_stub[1024 / 8]; |
1460 | 0 | struct ofpbuf set_buf = OFPBUF_STUB_INITIALIZER(set_stub); |
1461 | 0 | char *set_data, *set_mask; |
1462 | 0 | char *key = (char *) &action->rewrite.key; |
1463 | 0 | char *mask = (char *) &action->rewrite.mask; |
1464 | 0 | const struct nlattr *attr; |
1465 | 0 | int i, j, type; |
1466 | 0 | size_t size; |
1467 | | |
1468 | | /* copy so we can set attr mask to 0 for used ovs key struct members */ |
1469 | 0 | attr = ofpbuf_put(&set_buf, set, set_len); |
1470 | |
|
1471 | 0 | type = nl_attr_type(attr); |
1472 | 0 | size = nl_attr_get_size(attr) / 2; |
1473 | 0 | set_data = CONST_CAST(char *, nl_attr_get(attr)); |
1474 | 0 | set_mask = set_data + size; |
1475 | |
|
1476 | 0 | if (type >= ARRAY_SIZE(set_flower_map) |
1477 | 0 | || !set_flower_map[type][0].size) { |
1478 | 0 | VLOG_DBG_RL(&rl, "unsupported set action type: %d", type); |
1479 | 0 | ofpbuf_uninit(&set_buf); |
1480 | 0 | return EOPNOTSUPP; |
1481 | 0 | } |
1482 | | |
1483 | 0 | for (i = 0; i < ARRAY_SIZE(set_flower_map[type]); i++) { |
1484 | 0 | struct netlink_field *f = &set_flower_map[type][i]; |
1485 | |
|
1486 | 0 | if (!f->size) { |
1487 | 0 | break; |
1488 | 0 | } |
1489 | | |
1490 | | /* copy masked value */ |
1491 | 0 | for (j = 0; j < f->size; j++) { |
1492 | 0 | char maskval = hasmask ? set_mask[f->offset + j] : 0xFF; |
1493 | |
|
1494 | 0 | key[f->flower_offset + j] = maskval & set_data[f->offset + j]; |
1495 | 0 | mask[f->flower_offset + j] = maskval; |
1496 | |
|
1497 | 0 | } |
1498 | | |
1499 | | /* set its mask to 0 to show it's been used. */ |
1500 | 0 | if (hasmask) { |
1501 | 0 | memset(set_mask + f->offset, 0, f->size); |
1502 | 0 | } |
1503 | 0 | } |
1504 | |
|
1505 | 0 | if (hasmask && !is_all_zeros(set_mask, size)) { |
1506 | 0 | VLOG_DBG_RL(&rl, "unsupported sub attribute of set action type %d", |
1507 | 0 | type); |
1508 | 0 | ofpbuf_uninit(&set_buf); |
1509 | 0 | return EOPNOTSUPP; |
1510 | 0 | } |
1511 | | |
1512 | 0 | ofpbuf_uninit(&set_buf); |
1513 | 0 | action->type = TC_ACT_PEDIT; |
1514 | 0 | flower->action_count++; |
1515 | 0 | return 0; |
1516 | 0 | } |
1517 | | |
1518 | | static int |
1519 | | parse_put_flow_set_action(struct tc_flower *flower, struct tc_action *action, |
1520 | | const struct nlattr *set, size_t set_len) |
1521 | 0 | { |
1522 | 0 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20); |
1523 | 0 | const struct nlattr *tunnel; |
1524 | 0 | const struct nlattr *tun_attr; |
1525 | 0 | size_t tun_left, tunnel_len; |
1526 | |
|
1527 | 0 | if (nl_attr_type(set) == OVS_KEY_ATTR_MPLS) { |
1528 | 0 | return parse_mpls_set_action(flower, action, set); |
1529 | 0 | } |
1530 | | |
1531 | 0 | if (nl_attr_type(set) != OVS_KEY_ATTR_TUNNEL) { |
1532 | 0 | return parse_put_flow_set_masked_action(flower, action, set, |
1533 | 0 | set_len, false); |
1534 | 0 | } |
1535 | | |
1536 | 0 | tunnel = nl_attr_get(set); |
1537 | 0 | tunnel_len = nl_attr_get_size(set); |
1538 | |
|
1539 | 0 | action->type = TC_ACT_ENCAP; |
1540 | 0 | action->encap.id_present = false; |
1541 | 0 | action->encap.no_csum = 1; |
1542 | 0 | flower->action_count++; |
1543 | 0 | NL_ATTR_FOR_EACH_UNSAFE(tun_attr, tun_left, tunnel, tunnel_len) { |
1544 | 0 | switch (nl_attr_type(tun_attr)) { |
1545 | 0 | case OVS_TUNNEL_KEY_ATTR_ID: { |
1546 | 0 | action->encap.id = nl_attr_get_be64(tun_attr); |
1547 | 0 | action->encap.id_present = true; |
1548 | 0 | } |
1549 | 0 | break; |
1550 | 0 | case OVS_TUNNEL_KEY_ATTR_IPV4_SRC: { |
1551 | 0 | action->encap.ipv4.ipv4_src = nl_attr_get_be32(tun_attr); |
1552 | 0 | } |
1553 | 0 | break; |
1554 | 0 | case OVS_TUNNEL_KEY_ATTR_IPV4_DST: { |
1555 | 0 | action->encap.ipv4.ipv4_dst = nl_attr_get_be32(tun_attr); |
1556 | 0 | } |
1557 | 0 | break; |
1558 | 0 | case OVS_TUNNEL_KEY_ATTR_TOS: { |
1559 | 0 | action->encap.tos = nl_attr_get_u8(tun_attr); |
1560 | 0 | } |
1561 | 0 | break; |
1562 | 0 | case OVS_TUNNEL_KEY_ATTR_TTL: { |
1563 | 0 | action->encap.ttl = nl_attr_get_u8(tun_attr); |
1564 | 0 | } |
1565 | 0 | break; |
1566 | 0 | case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT: { |
1567 | | /* XXX: This is wrong! We're ignoring the DF flag configuration |
1568 | | * requested by the user. However, TC for now has no way to pass |
1569 | | * that flag and it is set by default, meaning tunnel offloading |
1570 | | * will not work if 'options:df_default=false' is not set. |
1571 | | * Keeping incorrect behavior for now. */ |
1572 | 0 | } |
1573 | 0 | break; |
1574 | 0 | case OVS_TUNNEL_KEY_ATTR_CSUM: { |
1575 | 0 | action->encap.no_csum = 0; |
1576 | 0 | } |
1577 | 0 | break; |
1578 | 0 | case OVS_TUNNEL_KEY_ATTR_IPV6_SRC: { |
1579 | 0 | action->encap.ipv6.ipv6_src = |
1580 | 0 | nl_attr_get_in6_addr(tun_attr); |
1581 | 0 | } |
1582 | 0 | break; |
1583 | 0 | case OVS_TUNNEL_KEY_ATTR_IPV6_DST: { |
1584 | 0 | action->encap.ipv6.ipv6_dst = |
1585 | 0 | nl_attr_get_in6_addr(tun_attr); |
1586 | 0 | } |
1587 | 0 | break; |
1588 | 0 | case OVS_TUNNEL_KEY_ATTR_TP_SRC: { |
1589 | 0 | action->encap.tp_src = nl_attr_get_be16(tun_attr); |
1590 | 0 | } |
1591 | 0 | break; |
1592 | 0 | case OVS_TUNNEL_KEY_ATTR_TP_DST: { |
1593 | 0 | action->encap.tp_dst = nl_attr_get_be16(tun_attr); |
1594 | 0 | } |
1595 | 0 | break; |
1596 | 0 | case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS: { |
1597 | 0 | memcpy(action->encap.data.opts.gnv, nl_attr_get(tun_attr), |
1598 | 0 | nl_attr_get_size(tun_attr)); |
1599 | 0 | action->encap.data.present.len = nl_attr_get_size(tun_attr); |
1600 | 0 | } |
1601 | 0 | break; |
1602 | 0 | default: |
1603 | 0 | VLOG_DBG_RL(&rl, "unsupported tunnel key attribute %d", |
1604 | 0 | nl_attr_type(tun_attr)); |
1605 | 0 | return EOPNOTSUPP; |
1606 | 0 | } |
1607 | 0 | } |
1608 | | |
1609 | 0 | return 0; |
1610 | 0 | } |
1611 | | |
1612 | | static bool |
1613 | | is_ipv6_fragment_and_masked(const struct flow *key, const struct flow *mask) |
1614 | 0 | { |
1615 | 0 | if (key->dl_type != htons(ETH_P_IPV6)) { |
1616 | 0 | return false; |
1617 | 0 | } |
1618 | 0 | if (mask->nw_proto && key->nw_proto == IPPROTO_FRAGMENT) { |
1619 | 0 | return true; |
1620 | 0 | } |
1621 | 0 | if (key->nw_frag & (mask->nw_frag & FLOW_NW_FRAG_ANY)) { |
1622 | 0 | return true; |
1623 | 0 | } |
1624 | 0 | return false; |
1625 | 0 | } |
1626 | | |
1627 | | static int |
1628 | | test_key_and_mask(struct match *match) |
1629 | 0 | { |
1630 | 0 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20); |
1631 | 0 | const struct flow *key = &match->flow; |
1632 | 0 | struct flow *mask = &match->wc.masks; |
1633 | |
|
1634 | 0 | if (mask->pkt_mark) { |
1635 | 0 | VLOG_DBG_RL(&rl, "offloading attribute pkt_mark isn't supported"); |
1636 | 0 | return EOPNOTSUPP; |
1637 | 0 | } |
1638 | | |
1639 | 0 | if (mask->dp_hash) { |
1640 | 0 | VLOG_DBG_RL(&rl, "offloading attribute dp_hash isn't supported"); |
1641 | 0 | return EOPNOTSUPP; |
1642 | 0 | } |
1643 | | |
1644 | 0 | if (mask->conj_id) { |
1645 | 0 | VLOG_DBG_RL(&rl, "offloading attribute conj_id isn't supported"); |
1646 | 0 | return EOPNOTSUPP; |
1647 | 0 | } |
1648 | | |
1649 | 0 | if (mask->skb_priority) { |
1650 | 0 | VLOG_DBG_RL(&rl, "offloading attribute skb_priority isn't supported"); |
1651 | 0 | return EOPNOTSUPP; |
1652 | 0 | } |
1653 | | |
1654 | 0 | if (mask->actset_output) { |
1655 | 0 | VLOG_DBG_RL(&rl, |
1656 | 0 | "offloading attribute actset_output isn't supported"); |
1657 | 0 | return EOPNOTSUPP; |
1658 | 0 | } |
1659 | | |
1660 | 0 | if (mask->packet_type && key->packet_type) { |
1661 | 0 | VLOG_DBG_RL(&rl, "offloading attribute packet_type isn't supported"); |
1662 | 0 | return EOPNOTSUPP; |
1663 | 0 | } |
1664 | 0 | mask->packet_type = 0; |
1665 | |
|
1666 | 0 | for (int i = 0; i < FLOW_N_REGS; i++) { |
1667 | 0 | if (mask->regs[i]) { |
1668 | 0 | VLOG_DBG_RL(&rl, |
1669 | 0 | "offloading attribute regs[%d] isn't supported", i); |
1670 | 0 | return EOPNOTSUPP; |
1671 | 0 | } |
1672 | 0 | } |
1673 | | |
1674 | 0 | if (mask->metadata) { |
1675 | 0 | VLOG_DBG_RL(&rl, "offloading attribute metadata isn't supported"); |
1676 | 0 | return EOPNOTSUPP; |
1677 | 0 | } |
1678 | | |
1679 | 0 | if (mask->nw_tos) { |
1680 | 0 | VLOG_DBG_RL(&rl, "offloading attribute nw_tos isn't supported"); |
1681 | 0 | return EOPNOTSUPP; |
1682 | 0 | } |
1683 | | |
1684 | 0 | for (int i = 1; i < FLOW_MAX_MPLS_LABELS; i++) { |
1685 | 0 | if (mask->mpls_lse[i]) { |
1686 | 0 | VLOG_DBG_RL(&rl, "offloading multiple mpls_lses isn't supported"); |
1687 | 0 | return EOPNOTSUPP; |
1688 | 0 | } |
1689 | 0 | } |
1690 | | |
1691 | 0 | if (key->dl_type == htons(ETH_TYPE_IP) && |
1692 | 0 | key->nw_proto == IPPROTO_IGMP) { |
1693 | 0 | if (mask->tp_src) { |
1694 | 0 | VLOG_DBG_RL(&rl, |
1695 | 0 | "offloading attribute igmp_type isn't supported"); |
1696 | 0 | return EOPNOTSUPP; |
1697 | 0 | } |
1698 | 0 | if (mask->tp_dst) { |
1699 | 0 | VLOG_DBG_RL(&rl, |
1700 | 0 | "offloading attribute igmp_code isn't supported"); |
1701 | 0 | return EOPNOTSUPP; |
1702 | 0 | } |
1703 | 0 | } else if (key->dl_type == htons(OFP_DL_TYPE_NOT_ETH_TYPE)) { |
1704 | 0 | VLOG_DBG_RL(&rl, |
1705 | 0 | "offloading of non-ethernet packets isn't supported"); |
1706 | 0 | return EOPNOTSUPP; |
1707 | 0 | } |
1708 | | |
1709 | 0 | if (is_ipv6_fragment_and_masked(key, mask)) { |
1710 | 0 | VLOG_DBG_RL(&rl, "offloading of IPv6 fragments isn't supported"); |
1711 | 0 | return EOPNOTSUPP; |
1712 | 0 | } |
1713 | | |
1714 | 0 | if (!is_all_zeros(mask, sizeof *mask)) { |
1715 | 0 | if (!VLOG_DROP_DBG(&rl)) { |
1716 | 0 | struct ds ds = DS_EMPTY_INITIALIZER; |
1717 | |
|
1718 | 0 | ds_put_cstr(&ds, |
1719 | 0 | "offloading isn't supported, unknown attribute\n" |
1720 | 0 | "Unused mask bits:\n"); |
1721 | 0 | ds_put_sparse_hex_dump(&ds, mask, sizeof *mask, 0, false); |
1722 | |
|
1723 | 0 | VLOG_DBG("%s", ds_cstr(&ds)); |
1724 | 0 | ds_destroy(&ds); |
1725 | 0 | } |
1726 | 0 | return EOPNOTSUPP; |
1727 | 0 | } |
1728 | | |
1729 | 0 | return 0; |
1730 | 0 | } |
1731 | | |
1732 | | static void |
1733 | | flower_match_to_tun_opt(struct tc_flower *flower, const struct flow_tnl *tnl, |
1734 | | struct flow_tnl *tnl_mask) |
1735 | 0 | { |
1736 | 0 | struct geneve_opt *opt, *opt_mask; |
1737 | 0 | int len, cnt = 0; |
1738 | | |
1739 | | /* 'flower' always has an exact match on tunnel metadata length, so having |
1740 | | * it in a wrong format is not acceptable unless it is empty. */ |
1741 | 0 | if (!(tnl->flags & FLOW_TNL_F_UDPIF)) { |
1742 | 0 | if (tnl->metadata.present.map) { |
1743 | | /* XXX: Add non-UDPIF format parsing here? */ |
1744 | 0 | VLOG_WARN_RL(&warn_rl, "Tunnel options are in the wrong format."); |
1745 | 0 | } else { |
1746 | | /* There are no options, that equals for them to be in UDPIF format |
1747 | | * with a zero 'len'. Clearing the 'map' mask as consumed. |
1748 | | * No need to explicitly set 'len' to zero in the 'flower'. */ |
1749 | 0 | tnl_mask->flags &= ~FLOW_TNL_F_UDPIF; |
1750 | 0 | memset(&tnl_mask->metadata.present.map, 0, |
1751 | 0 | sizeof tnl_mask->metadata.present.map); |
1752 | 0 | } |
1753 | 0 | return; |
1754 | 0 | } |
1755 | | |
1756 | 0 | tnl_mask->flags &= ~FLOW_TNL_F_UDPIF; |
1757 | |
|
1758 | 0 | flower->key.tunnel.metadata.present.len = tnl->metadata.present.len; |
1759 | | /* Copying from the key and not from the mask, since in the 'flower' |
1760 | | * the length for a mask is not a mask, but the actual length. TC |
1761 | | * will use an exact match for the length. */ |
1762 | 0 | flower->mask.tunnel.metadata.present.len = tnl->metadata.present.len; |
1763 | 0 | memset(&tnl_mask->metadata.present.len, 0, |
1764 | 0 | sizeof tnl_mask->metadata.present.len); |
1765 | |
|
1766 | 0 | if (!tnl->metadata.present.len) { |
1767 | 0 | return; |
1768 | 0 | } |
1769 | | |
1770 | 0 | memcpy(flower->key.tunnel.metadata.opts.gnv, tnl->metadata.opts.gnv, |
1771 | 0 | tnl->metadata.present.len); |
1772 | 0 | memcpy(flower->mask.tunnel.metadata.opts.gnv, tnl_mask->metadata.opts.gnv, |
1773 | 0 | tnl->metadata.present.len); |
1774 | |
|
1775 | 0 | memset(tnl_mask->metadata.opts.gnv, 0, tnl->metadata.present.len); |
1776 | | |
1777 | | /* Fixing up 'length' fields of particular options, since these are |
1778 | | * also not masks, but actual lengths in the 'flower' structure. */ |
1779 | 0 | len = flower->key.tunnel.metadata.present.len; |
1780 | 0 | while (len) { |
1781 | 0 | opt = &flower->key.tunnel.metadata.opts.gnv[cnt]; |
1782 | 0 | opt_mask = &flower->mask.tunnel.metadata.opts.gnv[cnt]; |
1783 | |
|
1784 | 0 | opt_mask->length = opt->length; |
1785 | |
|
1786 | 0 | cnt += sizeof(struct geneve_opt) / 4 + opt->length; |
1787 | 0 | len -= sizeof(struct geneve_opt) + opt->length * 4; |
1788 | 0 | } |
1789 | 0 | } |
1790 | | |
1791 | | static void |
1792 | | parse_match_ct_state_to_flower(struct tc_flower *flower, struct match *match) |
1793 | 0 | { |
1794 | 0 | const struct flow *key = &match->flow; |
1795 | 0 | struct flow *mask = &match->wc.masks; |
1796 | |
|
1797 | 0 | if (!ct_state_support) { |
1798 | 0 | return; |
1799 | 0 | } |
1800 | | |
1801 | 0 | if ((ct_state_support & mask->ct_state) == mask->ct_state) { |
1802 | 0 | if (mask->ct_state & OVS_CS_F_NEW) { |
1803 | 0 | if (key->ct_state & OVS_CS_F_NEW) { |
1804 | 0 | flower->key.ct_state |= TCA_FLOWER_KEY_CT_FLAGS_NEW; |
1805 | 0 | } |
1806 | 0 | flower->mask.ct_state |= TCA_FLOWER_KEY_CT_FLAGS_NEW; |
1807 | 0 | mask->ct_state &= ~OVS_CS_F_NEW; |
1808 | 0 | } |
1809 | |
|
1810 | 0 | if (mask->ct_state & OVS_CS_F_ESTABLISHED) { |
1811 | 0 | if (key->ct_state & OVS_CS_F_ESTABLISHED) { |
1812 | 0 | flower->key.ct_state |= TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED; |
1813 | 0 | } |
1814 | 0 | flower->mask.ct_state |= TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED; |
1815 | 0 | mask->ct_state &= ~OVS_CS_F_ESTABLISHED; |
1816 | 0 | } |
1817 | |
|
1818 | 0 | if (mask->ct_state & OVS_CS_F_TRACKED) { |
1819 | 0 | if (key->ct_state & OVS_CS_F_TRACKED) { |
1820 | 0 | flower->key.ct_state |= TCA_FLOWER_KEY_CT_FLAGS_TRACKED; |
1821 | 0 | } |
1822 | 0 | flower->mask.ct_state |= TCA_FLOWER_KEY_CT_FLAGS_TRACKED; |
1823 | 0 | mask->ct_state &= ~OVS_CS_F_TRACKED; |
1824 | 0 | } |
1825 | |
|
1826 | 0 | if (mask->ct_state & OVS_CS_F_REPLY_DIR) { |
1827 | 0 | if (key->ct_state & OVS_CS_F_REPLY_DIR) { |
1828 | 0 | flower->key.ct_state |= TCA_FLOWER_KEY_CT_FLAGS_REPLY; |
1829 | 0 | } |
1830 | 0 | flower->mask.ct_state |= TCA_FLOWER_KEY_CT_FLAGS_REPLY; |
1831 | 0 | mask->ct_state &= ~OVS_CS_F_REPLY_DIR; |
1832 | 0 | } |
1833 | |
|
1834 | 0 | if (mask->ct_state & OVS_CS_F_INVALID) { |
1835 | 0 | if (key->ct_state & OVS_CS_F_INVALID) { |
1836 | 0 | flower->key.ct_state |= TCA_FLOWER_KEY_CT_FLAGS_INVALID; |
1837 | 0 | } |
1838 | 0 | flower->mask.ct_state |= TCA_FLOWER_KEY_CT_FLAGS_INVALID; |
1839 | 0 | mask->ct_state &= ~OVS_CS_F_INVALID; |
1840 | 0 | } |
1841 | |
|
1842 | 0 | if (mask->ct_state & OVS_CS_F_RELATED) { |
1843 | 0 | if (key->ct_state & OVS_CS_F_RELATED) { |
1844 | 0 | flower->key.ct_state |= TCA_FLOWER_KEY_CT_FLAGS_RELATED; |
1845 | 0 | } |
1846 | 0 | flower->mask.ct_state |= TCA_FLOWER_KEY_CT_FLAGS_RELATED; |
1847 | 0 | mask->ct_state &= ~OVS_CS_F_RELATED; |
1848 | 0 | } |
1849 | |
|
1850 | 0 | if (flower->key.ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) { |
1851 | 0 | flower->key.ct_state &= ~(TCA_FLOWER_KEY_CT_FLAGS_NEW); |
1852 | 0 | flower->mask.ct_state &= ~(TCA_FLOWER_KEY_CT_FLAGS_NEW); |
1853 | 0 | } |
1854 | |
|
1855 | 0 | if (flower->key.ct_state && |
1856 | 0 | !(flower->key.ct_state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) { |
1857 | 0 | flower->key.ct_state |= TCA_FLOWER_KEY_CT_FLAGS_TRACKED; |
1858 | 0 | flower->mask.ct_state |= TCA_FLOWER_KEY_CT_FLAGS_TRACKED; |
1859 | 0 | } |
1860 | 0 | } |
1861 | |
|
1862 | 0 | if (mask->ct_zone) { |
1863 | 0 | flower->key.ct_zone = key->ct_zone; |
1864 | 0 | flower->mask.ct_zone = mask->ct_zone; |
1865 | 0 | mask->ct_zone = 0; |
1866 | 0 | } |
1867 | |
|
1868 | 0 | if (mask->ct_mark) { |
1869 | 0 | flower->key.ct_mark = key->ct_mark; |
1870 | 0 | flower->mask.ct_mark = mask->ct_mark; |
1871 | 0 | mask->ct_mark = 0; |
1872 | 0 | } |
1873 | |
|
1874 | 0 | if (!ovs_u128_is_zero(mask->ct_label)) { |
1875 | 0 | flower->key.ct_label = key->ct_label; |
1876 | 0 | flower->mask.ct_label = mask->ct_label; |
1877 | 0 | mask->ct_label = OVS_U128_ZERO; |
1878 | 0 | } |
1879 | 0 | } |
1880 | | |
1881 | | |
1882 | | static int |
1883 | | parse_check_pkt_len_action(struct netdev *netdev, struct tc_flower *flower, |
1884 | | struct offload_info *info, struct tc_action *action, |
1885 | | const struct nlattr *nla, bool last_action, |
1886 | | struct tc_action **need_jump_update, |
1887 | | bool *recirc_act) |
1888 | 0 | { |
1889 | 0 | struct tc_action *ge_jump_update = NULL, *le_jump_update = NULL; |
1890 | 0 | const struct nlattr *nl_actions; |
1891 | 0 | int err, le_offset, gt_offset; |
1892 | 0 | uint16_t pkt_len; |
1893 | |
|
1894 | 0 | static const struct nl_policy ovs_cpl_policy[] = { |
1895 | 0 | [OVS_CHECK_PKT_LEN_ATTR_PKT_LEN] = { .type = NL_A_U16 }, |
1896 | 0 | [OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER] = { .type = NL_A_NESTED }, |
1897 | 0 | [OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL] |
1898 | 0 | = { .type = NL_A_NESTED }, |
1899 | 0 | }; |
1900 | 0 | struct nlattr *a[ARRAY_SIZE(ovs_cpl_policy)]; |
1901 | |
|
1902 | 0 | if (!nl_parse_nested(nla, ovs_cpl_policy, a, ARRAY_SIZE(a))) { |
1903 | 0 | VLOG_INFO("Received invalid formatted OVS_ACTION_ATTR_CHECK_PKT_LEN!"); |
1904 | 0 | return EOPNOTSUPP; |
1905 | 0 | } |
1906 | | |
1907 | 0 | if (!a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER] || |
1908 | 0 | !a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL]) { |
1909 | 0 | VLOG_INFO("Received invalid OVS_CHECK_PKT_LEN_ATTR_ACTION_IF_*!"); |
1910 | 0 | return EOPNOTSUPP; |
1911 | 0 | } |
1912 | | |
1913 | 0 | pkt_len = nl_attr_get_u16(a[OVS_CHECK_PKT_LEN_ATTR_PKT_LEN]); |
1914 | | |
1915 | | /* Add the police mtu action first in the allocated slot. */ |
1916 | 0 | action->police.mtu = pkt_len; |
1917 | 0 | action->type = TC_ACT_POLICE_MTU; |
1918 | 0 | le_offset = flower->action_count++; |
1919 | | |
1920 | | /* Parse and add the greater than action(s). |
1921 | | * NOTE: The last_action parameter means that there are no more actions |
1922 | | * after the if () then ... else () case. */ |
1923 | 0 | nl_actions = a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER]; |
1924 | 0 | err = netdev_tc_parse_nl_actions(netdev, flower, info, |
1925 | 0 | nl_attr_get(nl_actions), |
1926 | 0 | nl_attr_get_size(nl_actions), |
1927 | 0 | recirc_act, !last_action, |
1928 | 0 | &ge_jump_update); |
1929 | 0 | if (err) { |
1930 | 0 | return err; |
1931 | 0 | } |
1932 | | |
1933 | | /* Update goto offset for le actions. */ |
1934 | 0 | flower->actions[le_offset].police.result_jump = flower->action_count; |
1935 | |
|
1936 | 0 | gt_offset = flower->action_count; |
1937 | | |
1938 | | /* Parse and add the less than action(s). */ |
1939 | 0 | nl_actions = a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL]; |
1940 | 0 | err = netdev_tc_parse_nl_actions(netdev, flower, info, |
1941 | 0 | nl_attr_get(nl_actions), |
1942 | 0 | nl_attr_get_size(nl_actions), |
1943 | 0 | recirc_act, !last_action, |
1944 | 0 | &le_jump_update); |
1945 | |
|
1946 | 0 | if (gt_offset == flower->action_count && last_action) { |
1947 | | /* No le actions where added, fix gt offset. */ |
1948 | 0 | flower->actions[le_offset].police.result_jump = JUMP_ACTION_STOP; |
1949 | 0 | } |
1950 | | |
1951 | | /* Update goto offset for gt actions to skip the le ones. */ |
1952 | 0 | if (last_action) { |
1953 | 0 | flower->actions[gt_offset - 1].jump_action = JUMP_ACTION_STOP; |
1954 | |
|
1955 | 0 | if (need_jump_update) { |
1956 | 0 | *need_jump_update = NULL; |
1957 | 0 | } |
1958 | 0 | } else { |
1959 | 0 | if (gt_offset == flower->action_count) { |
1960 | 0 | flower->actions[gt_offset - 1].jump_action = 0; |
1961 | 0 | } else { |
1962 | 0 | flower->actions[gt_offset - 1].jump_action = flower->action_count; |
1963 | 0 | } |
1964 | | /* If we have nested if() else () the if actions jump over the else |
1965 | | * and will end-up in the outer else () case, which it should have |
1966 | | * skipped. To void this we return the "potential" inner if() goto to |
1967 | | * need_jump_update, so it can be updated on return! |
1968 | | */ |
1969 | 0 | if (need_jump_update) { |
1970 | 0 | *need_jump_update = &flower->actions[gt_offset - 1]; |
1971 | 0 | } |
1972 | 0 | } |
1973 | |
|
1974 | 0 | if (le_jump_update != NULL) { |
1975 | 0 | le_jump_update->jump_action = |
1976 | 0 | flower->actions[gt_offset - 1].jump_action; |
1977 | 0 | } |
1978 | 0 | if (ge_jump_update != NULL) { |
1979 | 0 | ge_jump_update->jump_action = |
1980 | 0 | flower->actions[gt_offset - 1].jump_action; |
1981 | 0 | } |
1982 | |
|
1983 | 0 | if (err) { |
1984 | 0 | return err; |
1985 | 0 | } |
1986 | | |
1987 | 0 | return 0; |
1988 | 0 | } |
1989 | | |
1990 | | static int |
1991 | | netdev_tc_parse_nl_actions(struct netdev *netdev, struct tc_flower *flower, |
1992 | | struct offload_info *info, |
1993 | | const struct nlattr *actions, size_t actions_len, |
1994 | | bool *recirc_act, bool more_actions, |
1995 | | struct tc_action **need_jump_update) |
1996 | 0 | { |
1997 | 0 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20); |
1998 | 0 | const struct nlattr *nla; |
1999 | 0 | size_t left; |
2000 | |
|
2001 | 0 | NL_ATTR_FOR_EACH (nla, left, actions, actions_len) { |
2002 | 0 | struct tc_action *action; |
2003 | 0 | int err; |
2004 | |
|
2005 | 0 | if (flower->action_count >= TCA_ACT_MAX_NUM) { |
2006 | 0 | VLOG_DBG_RL(&rl, "Can only support %d actions", TCA_ACT_MAX_NUM); |
2007 | 0 | return EOPNOTSUPP; |
2008 | 0 | } |
2009 | | |
2010 | 0 | action = &flower->actions[flower->action_count]; |
2011 | |
|
2012 | 0 | if (nl_attr_type(nla) == OVS_ACTION_ATTR_OUTPUT) { |
2013 | 0 | odp_port_t port = nl_attr_get_odp_port(nla); |
2014 | 0 | struct netdev *outdev = netdev_ports_get( |
2015 | 0 | port, netdev_get_dpif_type(netdev)); |
2016 | |
|
2017 | 0 | if (!outdev) { |
2018 | 0 | VLOG_DBG_RL(&rl, "Can't find netdev for output port %d", port); |
2019 | 0 | return ENODEV; |
2020 | 0 | } |
2021 | | |
2022 | 0 | if (!netdev_flow_api_equals(netdev, outdev)) { |
2023 | 0 | VLOG_DBG_RL(&rl, |
2024 | 0 | "Flow API provider mismatch between ingress (%s) " |
2025 | 0 | "and egress (%s) ports", |
2026 | 0 | netdev_get_name(netdev), netdev_get_name(outdev)); |
2027 | 0 | netdev_close(outdev); |
2028 | 0 | return EOPNOTSUPP; |
2029 | 0 | } |
2030 | | |
2031 | 0 | action->out.ifindex_out = netdev_get_ifindex(outdev); |
2032 | 0 | if (action->out.ifindex_out < 0) { |
2033 | 0 | VLOG_DBG_RL(&rl, |
2034 | 0 | "Can't find ifindex for output port %s, error %d", |
2035 | 0 | netdev_get_name(outdev), action->out.ifindex_out); |
2036 | 0 | netdev_close(outdev); |
2037 | 0 | return -action->out.ifindex_out; |
2038 | 0 | } |
2039 | | |
2040 | 0 | action->out.ingress = is_internal_port(netdev_get_type(outdev)); |
2041 | 0 | action->type = TC_ACT_OUTPUT; |
2042 | 0 | flower->action_count++; |
2043 | 0 | netdev_close(outdev); |
2044 | 0 | } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_PUSH_VLAN) { |
2045 | 0 | const struct ovs_action_push_vlan *vlan_push = nl_attr_get(nla); |
2046 | |
|
2047 | 0 | action->vlan.vlan_push_tpid = vlan_push->vlan_tpid; |
2048 | 0 | action->vlan.vlan_push_id = vlan_tci_to_vid(vlan_push->vlan_tci); |
2049 | 0 | action->vlan.vlan_push_prio = vlan_tci_to_pcp(vlan_push->vlan_tci); |
2050 | 0 | action->type = TC_ACT_VLAN_PUSH; |
2051 | 0 | flower->action_count++; |
2052 | 0 | } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_POP_VLAN) { |
2053 | 0 | action->type = TC_ACT_VLAN_POP; |
2054 | 0 | flower->action_count++; |
2055 | 0 | } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_PUSH_MPLS) { |
2056 | 0 | const struct ovs_action_push_mpls *mpls_push = nl_attr_get(nla); |
2057 | |
|
2058 | 0 | action->mpls.proto = mpls_push->mpls_ethertype; |
2059 | 0 | action->mpls.label = mpls_lse_to_label(mpls_push->mpls_lse); |
2060 | 0 | action->mpls.tc = mpls_lse_to_tc(mpls_push->mpls_lse); |
2061 | 0 | action->mpls.ttl = mpls_lse_to_ttl(mpls_push->mpls_lse); |
2062 | 0 | action->mpls.bos = mpls_lse_to_bos(mpls_push->mpls_lse); |
2063 | 0 | action->type = TC_ACT_MPLS_PUSH; |
2064 | 0 | flower->action_count++; |
2065 | 0 | } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_POP_MPLS) { |
2066 | 0 | action->mpls.proto = nl_attr_get_be16(nla); |
2067 | 0 | action->type = TC_ACT_MPLS_POP; |
2068 | 0 | flower->action_count++; |
2069 | 0 | } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_SET) { |
2070 | 0 | const struct nlattr *set = nl_attr_get(nla); |
2071 | 0 | const size_t set_len = nl_attr_get_size(nla); |
2072 | |
|
2073 | 0 | err = parse_put_flow_set_action(flower, action, set, set_len); |
2074 | 0 | if (err) { |
2075 | 0 | return err; |
2076 | 0 | } |
2077 | 0 | } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_SET_MASKED) { |
2078 | 0 | const struct nlattr *set = nl_attr_get(nla); |
2079 | 0 | const size_t set_len = nl_attr_get_size(nla); |
2080 | |
|
2081 | 0 | err = parse_put_flow_set_masked_action(flower, action, set, |
2082 | 0 | set_len, true); |
2083 | 0 | if (err) { |
2084 | 0 | return err; |
2085 | 0 | } |
2086 | 0 | } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_CT) { |
2087 | 0 | const struct nlattr *ct = nl_attr_get(nla); |
2088 | 0 | const size_t ct_len = nl_attr_get_size(nla); |
2089 | |
|
2090 | 0 | if (!ct_state_support) { |
2091 | 0 | return -EOPNOTSUPP; |
2092 | 0 | } |
2093 | | |
2094 | 0 | err = parse_put_flow_ct_action(flower, action, ct, ct_len); |
2095 | 0 | if (err) { |
2096 | 0 | return err; |
2097 | 0 | } |
2098 | 0 | } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_CT_CLEAR) { |
2099 | 0 | action->type = TC_ACT_CT; |
2100 | 0 | action->ct.clear = true; |
2101 | 0 | flower->action_count++; |
2102 | 0 | } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_RECIRC) { |
2103 | 0 | action->type = TC_ACT_GOTO; |
2104 | 0 | action->chain = nl_attr_get_u32(nla); |
2105 | 0 | flower->action_count++; |
2106 | 0 | *recirc_act = true; |
2107 | 0 | } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_DROP) { |
2108 | 0 | action->type = TC_ACT_GOTO; |
2109 | 0 | action->chain = 0; /* 0 is reserved and not used by recirc. */ |
2110 | 0 | flower->action_count++; |
2111 | 0 | } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_METER) { |
2112 | 0 | uint32_t police_index, meter_id; |
2113 | |
|
2114 | 0 | meter_id = nl_attr_get_u32(nla); |
2115 | 0 | if (meter_id_lookup(meter_id, &police_index)) { |
2116 | 0 | return EOPNOTSUPP; |
2117 | 0 | } |
2118 | 0 | action->type = TC_ACT_POLICE; |
2119 | 0 | action->police.index = police_index; |
2120 | 0 | flower->action_count++; |
2121 | 0 | } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_CHECK_PKT_LEN) { |
2122 | 0 | err = parse_check_pkt_len_action(netdev, flower, info, action, nla, |
2123 | 0 | nl_attr_len_pad(nla, |
2124 | 0 | left) >= left |
2125 | 0 | && !more_actions, |
2126 | 0 | need_jump_update, |
2127 | 0 | recirc_act); |
2128 | 0 | if (err) { |
2129 | 0 | return err; |
2130 | 0 | } |
2131 | 0 | } else { |
2132 | 0 | VLOG_DBG_RL(&rl, "unsupported put action type: %d", |
2133 | 0 | nl_attr_type(nla)); |
2134 | 0 | return EOPNOTSUPP; |
2135 | 0 | } |
2136 | 0 | } |
2137 | 0 | return 0; |
2138 | 0 | } |
2139 | | |
2140 | | static int |
2141 | | netdev_tc_flow_put(struct netdev *netdev, struct match *match, |
2142 | | struct nlattr *actions, size_t actions_len, |
2143 | | const ovs_u128 *ufid, struct offload_info *info, |
2144 | | struct dpif_flow_stats *stats) |
2145 | 0 | { |
2146 | 0 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20); |
2147 | 0 | enum tc_qdisc_hook hook = get_tc_qdisc_hook(netdev); |
2148 | 0 | struct tc_flower flower; |
2149 | 0 | const struct flow *key = &match->flow; |
2150 | 0 | struct flow *mask = &match->wc.masks; |
2151 | 0 | const struct flow_tnl *tnl = &match->flow.tunnel; |
2152 | 0 | struct flow_tnl *tnl_mask = &mask->tunnel; |
2153 | 0 | struct dpif_flow_stats adjust_stats; |
2154 | 0 | bool recirc_act = false; |
2155 | 0 | uint32_t block_id = 0; |
2156 | 0 | struct tcf_id id; |
2157 | 0 | uint32_t chain; |
2158 | 0 | int prio = 0; |
2159 | 0 | int ifindex; |
2160 | 0 | int err; |
2161 | |
|
2162 | 0 | ifindex = netdev_get_ifindex(netdev); |
2163 | 0 | if (ifindex < 0) { |
2164 | 0 | VLOG_ERR_RL(&error_rl, "flow_put: failed to get ifindex for %s: %s", |
2165 | 0 | netdev_get_name(netdev), ovs_strerror(-ifindex)); |
2166 | 0 | return -ifindex; |
2167 | 0 | } |
2168 | | |
2169 | 0 | memset(&flower, 0, sizeof flower); |
2170 | |
|
2171 | 0 | chain = key->recirc_id; |
2172 | 0 | mask->recirc_id = 0; |
2173 | |
|
2174 | 0 | if (flow_tnl_dst_is_set(&key->tunnel) || |
2175 | 0 | flow_tnl_src_is_set(&key->tunnel)) { |
2176 | 0 | VLOG_DBG_RL(&rl, |
2177 | 0 | "tunnel: id %#" PRIx64 " src " IP_FMT |
2178 | 0 | " dst " IP_FMT " tp_src %d tp_dst %d", |
2179 | 0 | ntohll(tnl->tun_id), |
2180 | 0 | IP_ARGS(tnl->ip_src), IP_ARGS(tnl->ip_dst), |
2181 | 0 | ntohs(tnl->tp_src), ntohs(tnl->tp_dst)); |
2182 | 0 | flower.key.tunnel.id = tnl->tun_id; |
2183 | 0 | flower.key.tunnel.ipv4.ipv4_src = tnl->ip_src; |
2184 | 0 | flower.key.tunnel.ipv4.ipv4_dst = tnl->ip_dst; |
2185 | 0 | flower.key.tunnel.ipv6.ipv6_src = tnl->ipv6_src; |
2186 | 0 | flower.key.tunnel.ipv6.ipv6_dst = tnl->ipv6_dst; |
2187 | 0 | flower.key.tunnel.tos = tnl->ip_tos; |
2188 | 0 | flower.key.tunnel.ttl = tnl->ip_ttl; |
2189 | 0 | flower.key.tunnel.tp_src = tnl->tp_src; |
2190 | 0 | flower.key.tunnel.tp_dst = tnl->tp_dst; |
2191 | |
|
2192 | 0 | flower.mask.tunnel.ipv4.ipv4_src = tnl_mask->ip_src; |
2193 | 0 | flower.mask.tunnel.ipv4.ipv4_dst = tnl_mask->ip_dst; |
2194 | 0 | flower.mask.tunnel.ipv6.ipv6_src = tnl_mask->ipv6_src; |
2195 | 0 | flower.mask.tunnel.ipv6.ipv6_dst = tnl_mask->ipv6_dst; |
2196 | 0 | flower.mask.tunnel.tos = tnl_mask->ip_tos; |
2197 | 0 | flower.mask.tunnel.ttl = tnl_mask->ip_ttl; |
2198 | 0 | flower.mask.tunnel.tp_src = tnl_mask->tp_src; |
2199 | | /* XXX: We should be setting the mask from 'tnl_mask->tp_dst' here, but |
2200 | | * some hardware drivers (mlx5) doesn't support masked matches and will |
2201 | | * refuse to offload such flows keeping them in software path. |
2202 | | * Degrading the flow down to exact match for now as a workaround. */ |
2203 | 0 | flower.mask.tunnel.tp_dst = OVS_BE16_MAX; |
2204 | 0 | flower.mask.tunnel.id = (tnl->flags & FLOW_TNL_F_KEY) ? tnl_mask->tun_id : 0; |
2205 | |
|
2206 | 0 | memset(&tnl_mask->ip_src, 0, sizeof tnl_mask->ip_src); |
2207 | 0 | memset(&tnl_mask->ip_dst, 0, sizeof tnl_mask->ip_dst); |
2208 | 0 | memset(&tnl_mask->ipv6_src, 0, sizeof tnl_mask->ipv6_src); |
2209 | 0 | memset(&tnl_mask->ipv6_dst, 0, sizeof tnl_mask->ipv6_dst); |
2210 | 0 | memset(&tnl_mask->ip_tos, 0, sizeof tnl_mask->ip_tos); |
2211 | 0 | memset(&tnl_mask->ip_ttl, 0, sizeof tnl_mask->ip_ttl); |
2212 | 0 | memset(&tnl_mask->tp_src, 0, sizeof tnl_mask->tp_src); |
2213 | 0 | memset(&tnl_mask->tp_dst, 0, sizeof tnl_mask->tp_dst); |
2214 | |
|
2215 | 0 | memset(&tnl_mask->tun_id, 0, sizeof tnl_mask->tun_id); |
2216 | 0 | tnl_mask->flags &= ~FLOW_TNL_F_KEY; |
2217 | | |
2218 | | /* XXX: This is wrong! We're ignoring DF and CSUM flags configuration |
2219 | | * requested by the user. However, TC for now has no way to pass |
2220 | | * these flags in a flower key and their masks are set by default, |
2221 | | * meaning tunnel offloading will not work at all if not cleared. |
2222 | | * Keeping incorrect behavior for now. */ |
2223 | 0 | tnl_mask->flags &= ~(FLOW_TNL_F_DONT_FRAGMENT | FLOW_TNL_F_CSUM); |
2224 | |
|
2225 | 0 | if (!strcmp(netdev_get_type(netdev), "geneve")) { |
2226 | 0 | flower_match_to_tun_opt(&flower, tnl, tnl_mask); |
2227 | 0 | } |
2228 | 0 | flower.tunnel = true; |
2229 | 0 | } else { |
2230 | | /* There is no tunnel metadata to match on, but there could be some |
2231 | | * mask bits set due to flow translation artifacts. Clear them. */ |
2232 | 0 | memset(&mask->tunnel, 0, sizeof mask->tunnel); |
2233 | 0 | } |
2234 | |
|
2235 | 0 | flower.key.eth_type = key->dl_type; |
2236 | 0 | flower.mask.eth_type = mask->dl_type; |
2237 | 0 | if (mask->mpls_lse[0]) { |
2238 | 0 | flower.key.mpls_lse = key->mpls_lse[0]; |
2239 | 0 | flower.mask.mpls_lse = mask->mpls_lse[0]; |
2240 | 0 | flower.key.encap_eth_type[0] = flower.key.eth_type; |
2241 | 0 | } |
2242 | 0 | mask->mpls_lse[0] = 0; |
2243 | |
|
2244 | 0 | if (mask->vlans[0].tpid && eth_type_vlan(key->vlans[0].tpid)) { |
2245 | 0 | flower.key.encap_eth_type[0] = flower.key.eth_type; |
2246 | 0 | flower.mask.encap_eth_type[0] = CONSTANT_HTONS(0xffff); |
2247 | 0 | flower.key.eth_type = key->vlans[0].tpid; |
2248 | 0 | flower.mask.eth_type = mask->vlans[0].tpid; |
2249 | 0 | } |
2250 | 0 | if (mask->vlans[0].tci) { |
2251 | 0 | ovs_be16 vid_mask = mask->vlans[0].tci & htons(VLAN_VID_MASK); |
2252 | 0 | ovs_be16 pcp_mask = mask->vlans[0].tci & htons(VLAN_PCP_MASK); |
2253 | 0 | ovs_be16 cfi = mask->vlans[0].tci & htons(VLAN_CFI); |
2254 | |
|
2255 | 0 | if (cfi && key->vlans[0].tci & htons(VLAN_CFI) |
2256 | 0 | && (!vid_mask || vid_mask == htons(VLAN_VID_MASK)) |
2257 | 0 | && (!pcp_mask || pcp_mask == htons(VLAN_PCP_MASK)) |
2258 | 0 | && (vid_mask || pcp_mask)) { |
2259 | 0 | if (vid_mask) { |
2260 | 0 | flower.key.vlan_id[0] = vlan_tci_to_vid(key->vlans[0].tci); |
2261 | 0 | flower.mask.vlan_id[0] = vlan_tci_to_vid(mask->vlans[0].tci); |
2262 | 0 | VLOG_DBG_RL(&rl, "vlan_id[0]: %d\n", flower.key.vlan_id[0]); |
2263 | 0 | } |
2264 | 0 | if (pcp_mask) { |
2265 | 0 | flower.key.vlan_prio[0] = vlan_tci_to_pcp(key->vlans[0].tci); |
2266 | 0 | flower.mask.vlan_prio[0] = vlan_tci_to_pcp(mask->vlans[0].tci); |
2267 | 0 | VLOG_DBG_RL(&rl, "vlan_prio[0]: %d\n", |
2268 | 0 | flower.key.vlan_prio[0]); |
2269 | 0 | } |
2270 | 0 | } else if (mask->vlans[0].tci == htons(0xffff) && |
2271 | 0 | ntohs(key->vlans[0].tci) == 0) { |
2272 | | /* exact && no vlan */ |
2273 | 0 | } else { |
2274 | | /* partial mask */ |
2275 | 0 | return EOPNOTSUPP; |
2276 | 0 | } |
2277 | 0 | } |
2278 | | |
2279 | 0 | if (mask->vlans[1].tpid && eth_type_vlan(key->vlans[1].tpid)) { |
2280 | 0 | flower.key.encap_eth_type[1] = flower.key.encap_eth_type[0]; |
2281 | 0 | flower.mask.encap_eth_type[1] = flower.mask.encap_eth_type[0]; |
2282 | 0 | flower.key.encap_eth_type[0] = key->vlans[1].tpid; |
2283 | 0 | flower.mask.encap_eth_type[0] = mask->vlans[1].tpid; |
2284 | 0 | } |
2285 | 0 | if (mask->vlans[1].tci) { |
2286 | 0 | ovs_be16 vid_mask = mask->vlans[1].tci & htons(VLAN_VID_MASK); |
2287 | 0 | ovs_be16 pcp_mask = mask->vlans[1].tci & htons(VLAN_PCP_MASK); |
2288 | 0 | ovs_be16 cfi = mask->vlans[1].tci & htons(VLAN_CFI); |
2289 | |
|
2290 | 0 | if (cfi && key->vlans[1].tci & htons(VLAN_CFI) |
2291 | 0 | && (!vid_mask || vid_mask == htons(VLAN_VID_MASK)) |
2292 | 0 | && (!pcp_mask || pcp_mask == htons(VLAN_PCP_MASK)) |
2293 | 0 | && (vid_mask || pcp_mask)) { |
2294 | 0 | if (vid_mask) { |
2295 | 0 | flower.key.vlan_id[1] = vlan_tci_to_vid(key->vlans[1].tci); |
2296 | 0 | flower.mask.vlan_id[1] = vlan_tci_to_vid(mask->vlans[1].tci); |
2297 | 0 | VLOG_DBG_RL(&rl, "vlan_id[1]: %d", flower.key.vlan_id[1]); |
2298 | 0 | } |
2299 | 0 | if (pcp_mask) { |
2300 | 0 | flower.key.vlan_prio[1] = vlan_tci_to_pcp(key->vlans[1].tci); |
2301 | 0 | flower.mask.vlan_prio[1] = vlan_tci_to_pcp(mask->vlans[1].tci); |
2302 | 0 | VLOG_DBG_RL(&rl, "vlan_prio[1]: %d", flower.key.vlan_prio[1]); |
2303 | 0 | } |
2304 | 0 | } else if (mask->vlans[1].tci == htons(0xffff) && |
2305 | 0 | ntohs(key->vlans[1].tci) == 0) { |
2306 | | /* exact && no vlan */ |
2307 | 0 | } else { |
2308 | | /* partial mask */ |
2309 | 0 | return EOPNOTSUPP; |
2310 | 0 | } |
2311 | 0 | } |
2312 | 0 | memset(mask->vlans, 0, sizeof mask->vlans); |
2313 | |
|
2314 | 0 | flower.key.dst_mac = key->dl_dst; |
2315 | 0 | flower.mask.dst_mac = mask->dl_dst; |
2316 | 0 | flower.key.src_mac = key->dl_src; |
2317 | 0 | flower.mask.src_mac = mask->dl_src; |
2318 | 0 | memset(&mask->dl_dst, 0, sizeof mask->dl_dst); |
2319 | 0 | memset(&mask->dl_src, 0, sizeof mask->dl_src); |
2320 | 0 | mask->dl_type = 0; |
2321 | 0 | mask->in_port.odp_port = 0; |
2322 | |
|
2323 | 0 | if (key->dl_type == htons(ETH_P_ARP)) { |
2324 | 0 | flower.key.arp.spa = key->nw_src; |
2325 | 0 | flower.key.arp.tpa = key->nw_dst; |
2326 | 0 | flower.key.arp.sha = key->arp_sha; |
2327 | 0 | flower.key.arp.tha = key->arp_tha; |
2328 | 0 | flower.key.arp.opcode = key->nw_proto; |
2329 | 0 | flower.mask.arp.spa = mask->nw_src; |
2330 | 0 | flower.mask.arp.tpa = mask->nw_dst; |
2331 | 0 | flower.mask.arp.sha = mask->arp_sha; |
2332 | 0 | flower.mask.arp.tha = mask->arp_tha; |
2333 | 0 | flower.mask.arp.opcode = mask->nw_proto; |
2334 | |
|
2335 | 0 | mask->nw_src = 0; |
2336 | 0 | mask->nw_dst = 0; |
2337 | 0 | mask->nw_proto = 0; |
2338 | 0 | memset(&mask->arp_sha, 0, sizeof mask->arp_sha); |
2339 | 0 | memset(&mask->arp_tha, 0, sizeof mask->arp_tha); |
2340 | 0 | } |
2341 | |
|
2342 | 0 | if (is_ip_any(key) && !is_ipv6_fragment_and_masked(key, mask)) { |
2343 | 0 | flower.key.ip_proto = key->nw_proto; |
2344 | 0 | flower.mask.ip_proto = mask->nw_proto; |
2345 | 0 | mask->nw_proto = 0; |
2346 | 0 | flower.key.ip_tos = key->nw_tos; |
2347 | 0 | flower.mask.ip_tos = mask->nw_tos; |
2348 | 0 | mask->nw_tos = 0; |
2349 | 0 | flower.key.ip_ttl = key->nw_ttl; |
2350 | 0 | flower.mask.ip_ttl = mask->nw_ttl; |
2351 | 0 | mask->nw_ttl = 0; |
2352 | |
|
2353 | 0 | if (mask->nw_frag & FLOW_NW_FRAG_ANY) { |
2354 | 0 | flower.mask.flags |= TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT; |
2355 | |
|
2356 | 0 | if (key->nw_frag & FLOW_NW_FRAG_ANY) { |
2357 | 0 | flower.key.flags |= TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT; |
2358 | |
|
2359 | 0 | if (mask->nw_frag & FLOW_NW_FRAG_LATER) { |
2360 | 0 | flower.mask.flags |= TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST; |
2361 | |
|
2362 | 0 | if (!(key->nw_frag & FLOW_NW_FRAG_LATER)) { |
2363 | 0 | flower.key.flags |= TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST; |
2364 | 0 | } |
2365 | 0 | } |
2366 | 0 | } |
2367 | |
|
2368 | 0 | mask->nw_frag = 0; |
2369 | 0 | } |
2370 | |
|
2371 | 0 | if (key->nw_proto == IPPROTO_TCP) { |
2372 | 0 | flower.key.tcp_dst = key->tp_dst; |
2373 | 0 | flower.mask.tcp_dst = mask->tp_dst; |
2374 | 0 | flower.key.tcp_src = key->tp_src; |
2375 | 0 | flower.mask.tcp_src = mask->tp_src; |
2376 | 0 | flower.key.tcp_flags = key->tcp_flags; |
2377 | 0 | flower.mask.tcp_flags = mask->tcp_flags; |
2378 | 0 | mask->tp_src = 0; |
2379 | 0 | mask->tp_dst = 0; |
2380 | 0 | mask->tcp_flags = 0; |
2381 | 0 | } else if (key->nw_proto == IPPROTO_UDP) { |
2382 | 0 | flower.key.udp_dst = key->tp_dst; |
2383 | 0 | flower.mask.udp_dst = mask->tp_dst; |
2384 | 0 | flower.key.udp_src = key->tp_src; |
2385 | 0 | flower.mask.udp_src = mask->tp_src; |
2386 | 0 | mask->tp_src = 0; |
2387 | 0 | mask->tp_dst = 0; |
2388 | 0 | } else if (key->nw_proto == IPPROTO_SCTP) { |
2389 | 0 | flower.key.sctp_dst = key->tp_dst; |
2390 | 0 | flower.mask.sctp_dst = mask->tp_dst; |
2391 | 0 | flower.key.sctp_src = key->tp_src; |
2392 | 0 | flower.mask.sctp_src = mask->tp_src; |
2393 | 0 | mask->tp_src = 0; |
2394 | 0 | mask->tp_dst = 0; |
2395 | 0 | } else if (key->nw_proto == IPPROTO_ICMP || |
2396 | 0 | key->nw_proto == IPPROTO_ICMPV6) { |
2397 | 0 | flower.key.icmp_code = (uint8_t) ntohs(key->tp_dst); |
2398 | 0 | flower.mask.icmp_code = (uint8_t) ntohs (mask->tp_dst); |
2399 | 0 | flower.key.icmp_type = (uint8_t) ntohs(key->tp_src); |
2400 | 0 | flower.mask.icmp_type = (uint8_t) ntohs(mask->tp_src); |
2401 | 0 | mask->tp_src = 0; |
2402 | 0 | mask->tp_dst = 0; |
2403 | 0 | } |
2404 | |
|
2405 | 0 | if (key->dl_type == htons(ETH_P_IP)) { |
2406 | 0 | flower.key.ipv4.ipv4_src = key->nw_src; |
2407 | 0 | flower.mask.ipv4.ipv4_src = mask->nw_src; |
2408 | 0 | flower.key.ipv4.ipv4_dst = key->nw_dst; |
2409 | 0 | flower.mask.ipv4.ipv4_dst = mask->nw_dst; |
2410 | 0 | mask->nw_src = 0; |
2411 | 0 | mask->nw_dst = 0; |
2412 | 0 | } else if (key->dl_type == htons(ETH_P_IPV6)) { |
2413 | 0 | flower.key.ipv6.ipv6_src = key->ipv6_src; |
2414 | 0 | flower.mask.ipv6.ipv6_src = mask->ipv6_src; |
2415 | 0 | flower.key.ipv6.ipv6_dst = key->ipv6_dst; |
2416 | 0 | flower.mask.ipv6.ipv6_dst = mask->ipv6_dst; |
2417 | 0 | memset(&mask->ipv6_src, 0, sizeof mask->ipv6_src); |
2418 | 0 | memset(&mask->ipv6_dst, 0, sizeof mask->ipv6_dst); |
2419 | 0 | } |
2420 | 0 | } |
2421 | |
|
2422 | 0 | parse_match_ct_state_to_flower(&flower, match); |
2423 | | |
2424 | | /* ignore exact match on skb_mark of 0. */ |
2425 | 0 | if (mask->pkt_mark == UINT32_MAX && !key->pkt_mark) { |
2426 | 0 | mask->pkt_mark = 0; |
2427 | 0 | } |
2428 | |
|
2429 | 0 | err = test_key_and_mask(match); |
2430 | 0 | if (err) { |
2431 | 0 | return err; |
2432 | 0 | } |
2433 | | |
2434 | | /* Parse all (nested) actions. */ |
2435 | 0 | err = netdev_tc_parse_nl_actions(netdev, &flower, info, |
2436 | 0 | actions, actions_len, &recirc_act, |
2437 | 0 | false, NULL); |
2438 | 0 | if (err) { |
2439 | 0 | return err; |
2440 | 0 | } |
2441 | | |
2442 | 0 | if ((chain || recirc_act) && !info->recirc_id_shared_with_tc) { |
2443 | 0 | VLOG_DBG_RL(&rl, "flow_put: recirc_id sharing not supported"); |
2444 | 0 | return EOPNOTSUPP; |
2445 | 0 | } |
2446 | | |
2447 | 0 | memset(&adjust_stats, 0, sizeof adjust_stats); |
2448 | 0 | if (get_ufid_tc_mapping(ufid, &id) == 0) { |
2449 | 0 | VLOG_DBG_RL(&rl, "updating old handle: %d prio: %d", |
2450 | 0 | id.handle, id.prio); |
2451 | 0 | info->tc_modify_flow_deleted = !del_filter_and_ufid_mapping( |
2452 | 0 | &id, ufid, &adjust_stats); |
2453 | 0 | } |
2454 | |
|
2455 | 0 | prio = get_prio_for_tc_flower(&flower); |
2456 | 0 | if (prio == 0) { |
2457 | 0 | VLOG_ERR_RL(&rl, "couldn't get tc prio: %s", ovs_strerror(ENOSPC)); |
2458 | 0 | return ENOSPC; |
2459 | 0 | } |
2460 | | |
2461 | 0 | flower.act_cookie.data = ufid; |
2462 | 0 | flower.act_cookie.len = sizeof *ufid; |
2463 | |
|
2464 | 0 | block_id = get_block_id_from_netdev(netdev); |
2465 | 0 | id = tc_make_tcf_id_chain(ifindex, block_id, chain, prio, hook); |
2466 | 0 | err = tc_replace_flower(&id, &flower); |
2467 | 0 | if (!err) { |
2468 | 0 | if (stats) { |
2469 | 0 | memset(stats, 0, sizeof *stats); |
2470 | 0 | netdev_tc_adjust_stats(stats, &adjust_stats); |
2471 | 0 | } |
2472 | 0 | add_ufid_tc_mapping(netdev, ufid, &id, &adjust_stats); |
2473 | 0 | } |
2474 | |
|
2475 | 0 | return err; |
2476 | 0 | } |
2477 | | |
2478 | | static int |
2479 | | netdev_tc_flow_get(struct netdev *netdev, |
2480 | | struct match *match, |
2481 | | struct nlattr **actions, |
2482 | | const ovs_u128 *ufid, |
2483 | | struct dpif_flow_stats *stats, |
2484 | | struct dpif_flow_attrs *attrs, |
2485 | | struct ofpbuf *buf) |
2486 | 0 | { |
2487 | 0 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20); |
2488 | 0 | struct tc_flower flower; |
2489 | 0 | odp_port_t in_port; |
2490 | 0 | struct tcf_id id; |
2491 | 0 | int err; |
2492 | |
|
2493 | 0 | err = get_ufid_tc_mapping(ufid, &id); |
2494 | 0 | if (err) { |
2495 | 0 | return err; |
2496 | 0 | } |
2497 | | |
2498 | 0 | VLOG_DBG_RL(&rl, "flow get (dev %s prio %d handle %d block_id %d)", |
2499 | 0 | netdev_get_name(netdev), id.prio, id.handle, id.block_id); |
2500 | |
|
2501 | 0 | err = tc_get_flower(&id, &flower); |
2502 | 0 | if (err) { |
2503 | 0 | VLOG_ERR_RL(&error_rl, |
2504 | 0 | "flow get failed (dev %s prio %d handle %d): %s", |
2505 | 0 | netdev_get_name(netdev), id.prio, id.handle, |
2506 | 0 | ovs_strerror(err)); |
2507 | 0 | return err; |
2508 | 0 | } |
2509 | | |
2510 | 0 | in_port = netdev_ifindex_to_odp_port(id.ifindex); |
2511 | 0 | err = parse_tc_flower_to_match(netdev, &flower, match, actions, |
2512 | 0 | stats, attrs, buf, false); |
2513 | 0 | if (err) { |
2514 | 0 | VLOG_ERR_RL(&error_rl, |
2515 | 0 | "flow get parse failed (dev %s prio %d handle %d): %s", |
2516 | 0 | netdev_get_name(netdev), id.prio, id.handle, |
2517 | 0 | ovs_strerror(err)); |
2518 | 0 | return err; |
2519 | 0 | } |
2520 | | |
2521 | 0 | if (stats) { |
2522 | 0 | struct dpif_flow_stats adjust_stats; |
2523 | |
|
2524 | 0 | if (!get_ufid_adjust_stats(ufid, &adjust_stats)) { |
2525 | 0 | netdev_tc_adjust_stats(stats, &adjust_stats); |
2526 | 0 | } |
2527 | 0 | } |
2528 | 0 | match->wc.masks.in_port.odp_port = u32_to_odp(UINT32_MAX); |
2529 | 0 | match->flow.in_port.odp_port = in_port; |
2530 | 0 | match_set_recirc_id(match, id.chain); |
2531 | |
|
2532 | 0 | return 0; |
2533 | 0 | } |
2534 | | |
2535 | | static int |
2536 | | netdev_tc_flow_del(struct netdev *netdev OVS_UNUSED, |
2537 | | const ovs_u128 *ufid, |
2538 | | struct dpif_flow_stats *stats) |
2539 | 0 | { |
2540 | 0 | struct tcf_id id; |
2541 | 0 | int error; |
2542 | |
|
2543 | 0 | error = get_ufid_tc_mapping(ufid, &id); |
2544 | 0 | if (error) { |
2545 | 0 | return error; |
2546 | 0 | } |
2547 | | |
2548 | 0 | return del_filter_and_ufid_mapping(&id, ufid, stats); |
2549 | 0 | } |
2550 | | |
2551 | | static int |
2552 | | netdev_tc_get_n_flows(struct netdev *netdev, uint64_t *n_flows) |
2553 | 0 | { |
2554 | 0 | struct ufid_tc_data *data; |
2555 | 0 | uint64_t total = 0; |
2556 | |
|
2557 | 0 | ovs_mutex_lock(&ufid_lock); |
2558 | 0 | HMAP_FOR_EACH (data, tc_to_ufid_node, &tc_to_ufid) { |
2559 | 0 | if (data->netdev == netdev) { |
2560 | 0 | total++; |
2561 | 0 | } |
2562 | 0 | } |
2563 | 0 | ovs_mutex_unlock(&ufid_lock); |
2564 | |
|
2565 | 0 | *n_flows = total; |
2566 | 0 | return 0; |
2567 | 0 | } |
2568 | | |
2569 | | static void |
2570 | | probe_multi_mask_per_prio(int ifindex) |
2571 | 0 | { |
2572 | 0 | struct tc_flower flower; |
2573 | 0 | struct tcf_id id1, id2; |
2574 | 0 | int block_id = 0; |
2575 | 0 | int prio = 1; |
2576 | 0 | int error; |
2577 | |
|
2578 | 0 | error = tc_add_del_qdisc(ifindex, true, block_id, TC_INGRESS); |
2579 | 0 | if (error) { |
2580 | 0 | return; |
2581 | 0 | } |
2582 | | |
2583 | 0 | memset(&flower, 0, sizeof flower); |
2584 | |
|
2585 | 0 | flower.tc_policy = TC_POLICY_SKIP_HW; |
2586 | 0 | flower.key.eth_type = htons(ETH_P_IP); |
2587 | 0 | flower.mask.eth_type = OVS_BE16_MAX; |
2588 | 0 | memset(&flower.key.dst_mac, 0x11, sizeof flower.key.dst_mac); |
2589 | 0 | memset(&flower.mask.dst_mac, 0xff, sizeof flower.mask.dst_mac); |
2590 | |
|
2591 | 0 | id1 = tc_make_tcf_id(ifindex, block_id, prio, TC_INGRESS); |
2592 | 0 | error = tc_replace_flower(&id1, &flower); |
2593 | 0 | if (error) { |
2594 | 0 | goto out; |
2595 | 0 | } |
2596 | | |
2597 | 0 | memset(&flower.key.src_mac, 0x11, sizeof flower.key.src_mac); |
2598 | 0 | memset(&flower.mask.src_mac, 0xff, sizeof flower.mask.src_mac); |
2599 | |
|
2600 | 0 | id2 = tc_make_tcf_id(ifindex, block_id, prio, TC_INGRESS); |
2601 | 0 | error = tc_replace_flower(&id2, &flower); |
2602 | 0 | tc_del_flower_filter(&id1); |
2603 | |
|
2604 | 0 | if (error) { |
2605 | 0 | goto out; |
2606 | 0 | } |
2607 | | |
2608 | 0 | tc_del_flower_filter(&id2); |
2609 | |
|
2610 | 0 | multi_mask_per_prio = true; |
2611 | 0 | VLOG_INFO("probe tc: multiple masks on single tc prio is supported."); |
2612 | |
|
2613 | 0 | out: |
2614 | 0 | tc_add_del_qdisc(ifindex, false, block_id, TC_INGRESS); |
2615 | 0 | } |
2616 | | |
2617 | | |
2618 | | static int |
2619 | | probe_insert_ct_state_rule(int ifindex, uint16_t ct_state, struct tcf_id *id) |
2620 | 0 | { |
2621 | 0 | int prio = TC_RESERVED_PRIORITY_MAX + 1; |
2622 | 0 | struct tc_flower flower; |
2623 | |
|
2624 | 0 | memset(&flower, 0, sizeof flower); |
2625 | 0 | flower.key.ct_state = ct_state; |
2626 | 0 | flower.mask.ct_state = ct_state; |
2627 | 0 | flower.tc_policy = TC_POLICY_SKIP_HW; |
2628 | 0 | flower.key.eth_type = htons(ETH_P_IP); |
2629 | 0 | flower.mask.eth_type = OVS_BE16_MAX; |
2630 | |
|
2631 | 0 | *id = tc_make_tcf_id(ifindex, 0, prio, TC_INGRESS); |
2632 | 0 | return tc_replace_flower(id, &flower); |
2633 | 0 | } |
2634 | | |
2635 | | static void |
2636 | | probe_ct_state_support(int ifindex) |
2637 | 0 | { |
2638 | 0 | struct tc_flower flower; |
2639 | 0 | uint16_t ct_state; |
2640 | 0 | struct tcf_id id; |
2641 | 0 | int error; |
2642 | |
|
2643 | 0 | error = tc_add_del_qdisc(ifindex, true, 0, TC_INGRESS); |
2644 | 0 | if (error) { |
2645 | 0 | return; |
2646 | 0 | } |
2647 | | |
2648 | | /* Test for base ct_state match support */ |
2649 | 0 | ct_state = TCA_FLOWER_KEY_CT_FLAGS_NEW | TCA_FLOWER_KEY_CT_FLAGS_TRACKED; |
2650 | 0 | error = probe_insert_ct_state_rule(ifindex, ct_state, &id); |
2651 | 0 | if (error) { |
2652 | 0 | goto out; |
2653 | 0 | } |
2654 | | |
2655 | 0 | error = tc_get_flower(&id, &flower); |
2656 | 0 | if (error || flower.mask.ct_state != ct_state) { |
2657 | 0 | goto out_del; |
2658 | 0 | } |
2659 | | |
2660 | 0 | tc_del_flower_filter(&id); |
2661 | 0 | ct_state_support = OVS_CS_F_NEW | |
2662 | 0 | OVS_CS_F_ESTABLISHED | |
2663 | 0 | OVS_CS_F_TRACKED | |
2664 | 0 | OVS_CS_F_RELATED; |
2665 | | |
2666 | | /* Test for reject, ct_state >= MAX */ |
2667 | 0 | ct_state = ~0; |
2668 | 0 | error = probe_insert_ct_state_rule(ifindex, ct_state, &id); |
2669 | 0 | if (!error) { |
2670 | | /* No reject, can't continue probing other flags */ |
2671 | 0 | goto out_del; |
2672 | 0 | } |
2673 | | |
2674 | 0 | tc_del_flower_filter(&id); |
2675 | | |
2676 | | /* Test for ct_state INVALID support */ |
2677 | 0 | memset(&flower, 0, sizeof flower); |
2678 | 0 | ct_state = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | |
2679 | 0 | TCA_FLOWER_KEY_CT_FLAGS_INVALID; |
2680 | 0 | error = probe_insert_ct_state_rule(ifindex, ct_state, &id); |
2681 | 0 | if (error) { |
2682 | 0 | goto out; |
2683 | 0 | } |
2684 | | |
2685 | 0 | tc_del_flower_filter(&id); |
2686 | 0 | ct_state_support |= OVS_CS_F_INVALID; |
2687 | | |
2688 | | /* Test for ct_state REPLY support */ |
2689 | 0 | memset(&flower, 0, sizeof flower); |
2690 | 0 | ct_state = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | |
2691 | 0 | TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED | |
2692 | 0 | TCA_FLOWER_KEY_CT_FLAGS_REPLY; |
2693 | 0 | error = probe_insert_ct_state_rule(ifindex, ct_state, &id); |
2694 | 0 | if (error) { |
2695 | 0 | goto out; |
2696 | 0 | } |
2697 | | |
2698 | 0 | ct_state_support |= OVS_CS_F_REPLY_DIR; |
2699 | |
|
2700 | 0 | out_del: |
2701 | 0 | tc_del_flower_filter(&id); |
2702 | 0 | out: |
2703 | 0 | tc_add_del_qdisc(ifindex, false, 0, TC_INGRESS); |
2704 | 0 | VLOG_INFO("probe tc: supported ovs ct_state bits: 0x%x", ct_state_support); |
2705 | 0 | } |
2706 | | |
2707 | | static void |
2708 | | probe_tc_block_support(int ifindex) |
2709 | 0 | { |
2710 | 0 | struct tc_flower flower; |
2711 | 0 | uint32_t block_id = 1; |
2712 | 0 | struct tcf_id id; |
2713 | 0 | int prio = 0; |
2714 | 0 | int error; |
2715 | |
|
2716 | 0 | error = tc_add_del_qdisc(ifindex, true, block_id, TC_INGRESS); |
2717 | 0 | if (error) { |
2718 | 0 | return; |
2719 | 0 | } |
2720 | | |
2721 | 0 | memset(&flower, 0, sizeof flower); |
2722 | |
|
2723 | 0 | flower.tc_policy = TC_POLICY_SKIP_HW; |
2724 | 0 | flower.key.eth_type = htons(ETH_P_IP); |
2725 | 0 | flower.mask.eth_type = OVS_BE16_MAX; |
2726 | 0 | memset(&flower.key.dst_mac, 0x11, sizeof flower.key.dst_mac); |
2727 | 0 | memset(&flower.mask.dst_mac, 0xff, sizeof flower.mask.dst_mac); |
2728 | |
|
2729 | 0 | id = tc_make_tcf_id(ifindex, block_id, prio, TC_INGRESS); |
2730 | 0 | error = tc_replace_flower(&id, &flower); |
2731 | |
|
2732 | 0 | tc_add_del_qdisc(ifindex, false, block_id, TC_INGRESS); |
2733 | |
|
2734 | 0 | if (!error) { |
2735 | 0 | block_support = true; |
2736 | 0 | VLOG_INFO("probe tc: block offload is supported."); |
2737 | 0 | } |
2738 | 0 | } |
2739 | | |
2740 | | static int |
2741 | | tc_get_policer_action_ids(struct hmap *map) |
2742 | 0 | { |
2743 | 0 | uint32_t police_idx[TCA_ACT_MAX_PRIO]; |
2744 | 0 | struct policer_node *policer_node; |
2745 | 0 | struct netdev_flow_dump *dump; |
2746 | 0 | struct ofpbuf rbuffer, reply; |
2747 | 0 | size_t hash; |
2748 | 0 | int i, err; |
2749 | |
|
2750 | 0 | dump = xzalloc(sizeof *dump); |
2751 | 0 | dump->nl_dump = xzalloc(sizeof *dump->nl_dump); |
2752 | |
|
2753 | 0 | ofpbuf_init(&rbuffer, NL_DUMP_BUFSIZE); |
2754 | 0 | tc_dump_tc_action_start("police", dump->nl_dump); |
2755 | |
|
2756 | 0 | while (nl_dump_next(dump->nl_dump, &reply, &rbuffer)) { |
2757 | 0 | memset(police_idx, 0, sizeof police_idx); |
2758 | 0 | if (parse_netlink_to_tc_policer(&reply, police_idx)) { |
2759 | 0 | continue; |
2760 | 0 | } |
2761 | | |
2762 | 0 | for (i = 0; i < TCA_ACT_MAX_PRIO; i++) { |
2763 | 0 | if (!police_idx[i]) { |
2764 | 0 | break; |
2765 | 0 | } |
2766 | 0 | policer_node = xzalloc(sizeof *policer_node); |
2767 | 0 | policer_node->police_idx = police_idx[i]; |
2768 | 0 | hash = hash_int(police_idx[i], 0); |
2769 | 0 | hmap_insert(map, &policer_node->node, hash); |
2770 | 0 | } |
2771 | 0 | } |
2772 | |
|
2773 | 0 | err = nl_dump_done(dump->nl_dump); |
2774 | 0 | ofpbuf_uninit(&rbuffer); |
2775 | 0 | free(dump->nl_dump); |
2776 | 0 | free(dump); |
2777 | |
|
2778 | 0 | return err; |
2779 | 0 | } |
2780 | | |
2781 | | static void |
2782 | | tc_cleanup_policer_actions(struct id_pool *police_ids, |
2783 | | uint32_t id_min, uint32_t id_max) |
2784 | 0 | { |
2785 | 0 | struct policer_node *policer_node; |
2786 | 0 | unsigned int unusable_ids = 0; |
2787 | 0 | uint32_t police_idx; |
2788 | 0 | struct hmap map; |
2789 | 0 | int err; |
2790 | |
|
2791 | 0 | hmap_init(&map); |
2792 | 0 | tc_get_policer_action_ids(&map); |
2793 | |
|
2794 | 0 | HMAP_FOR_EACH_POP (policer_node, node, &map) { |
2795 | 0 | police_idx = policer_node->police_idx; |
2796 | 0 | if (police_idx >= id_min && police_idx <= id_max) { |
2797 | 0 | err = tc_del_policer_action(police_idx, NULL); |
2798 | 0 | if (err && err != ENOENT) { |
2799 | | /* Don't use this police any more. */ |
2800 | 0 | id_pool_add(police_ids, police_idx); |
2801 | |
|
2802 | 0 | unusable_ids++; |
2803 | 0 | VLOG_DBG("Policer index %u could not be freed for OVS, " |
2804 | 0 | "error %d", police_idx, err); |
2805 | 0 | } |
2806 | 0 | } |
2807 | 0 | free(policer_node); |
2808 | 0 | } |
2809 | |
|
2810 | 0 | if (unusable_ids) { |
2811 | 0 | VLOG_WARN("Full policer index pool allocation failed, " |
2812 | 0 | "%u indexes are unavailable", unusable_ids); |
2813 | 0 | } |
2814 | |
|
2815 | 0 | hmap_destroy(&map); |
2816 | 0 | } |
2817 | | |
2818 | | static int |
2819 | | netdev_tc_init_flow_api(struct netdev *netdev) |
2820 | 0 | { |
2821 | 0 | static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER; |
2822 | 0 | enum tc_qdisc_hook hook = get_tc_qdisc_hook(netdev); |
2823 | 0 | static bool get_chain_supported = true; |
2824 | 0 | uint32_t block_id = 0; |
2825 | 0 | struct tcf_id id; |
2826 | 0 | int ifindex; |
2827 | 0 | int error; |
2828 | |
|
2829 | 0 | if (netdev_vport_is_vport_class(netdev->netdev_class) |
2830 | 0 | && strcmp(netdev_get_dpif_type(netdev), "system")) { |
2831 | 0 | VLOG_DBG("%s: vport doesn't belong to the system datapath. Skipping.", |
2832 | 0 | netdev_get_name(netdev)); |
2833 | 0 | return EOPNOTSUPP; |
2834 | 0 | } |
2835 | | |
2836 | 0 | ifindex = netdev_get_ifindex(netdev); |
2837 | 0 | if (ifindex < 0) { |
2838 | 0 | VLOG_INFO("init: failed to get ifindex for %s: %s", |
2839 | 0 | netdev_get_name(netdev), ovs_strerror(-ifindex)); |
2840 | 0 | return -ifindex; |
2841 | 0 | } |
2842 | | |
2843 | 0 | block_id = get_block_id_from_netdev(netdev); |
2844 | 0 | id = tc_make_tcf_id(ifindex, block_id, 0, hook); |
2845 | |
|
2846 | 0 | if (get_chain_supported) { |
2847 | 0 | if (delete_chains_from_netdev(netdev, &id)) { |
2848 | 0 | get_chain_supported = false; |
2849 | 0 | } |
2850 | 0 | } |
2851 | | |
2852 | | /* fallback here if delete chains fail */ |
2853 | 0 | if (!get_chain_supported) { |
2854 | 0 | tc_del_flower_filter(&id); |
2855 | 0 | } |
2856 | | |
2857 | | /* make sure there is no ingress/egress qdisc */ |
2858 | 0 | tc_add_del_qdisc(ifindex, false, 0, hook); |
2859 | |
|
2860 | 0 | if (ovsthread_once_start(&once)) { |
2861 | 0 | probe_tc_block_support(ifindex); |
2862 | | /* Need to re-fetch block id as it depends on feature availability. */ |
2863 | 0 | block_id = get_block_id_from_netdev(netdev); |
2864 | |
|
2865 | 0 | probe_multi_mask_per_prio(ifindex); |
2866 | 0 | probe_ct_state_support(ifindex); |
2867 | |
|
2868 | 0 | ovs_mutex_lock(&meter_police_ids_mutex); |
2869 | 0 | meter_police_ids = id_pool_create(METER_POLICE_IDS_BASE, |
2870 | 0 | METER_POLICE_IDS_MAX - METER_POLICE_IDS_BASE + 1); |
2871 | 0 | tc_cleanup_policer_actions(meter_police_ids, METER_POLICE_IDS_BASE, |
2872 | 0 | METER_POLICE_IDS_MAX); |
2873 | 0 | ovs_mutex_unlock(&meter_police_ids_mutex); |
2874 | |
|
2875 | 0 | ovsthread_once_done(&once); |
2876 | 0 | } |
2877 | |
|
2878 | 0 | error = tc_add_del_qdisc(ifindex, true, block_id, hook); |
2879 | |
|
2880 | 0 | if (error && error != EEXIST) { |
2881 | 0 | VLOG_INFO("failed adding ingress qdisc required for offloading: %s", |
2882 | 0 | ovs_strerror(error)); |
2883 | 0 | return error; |
2884 | 0 | } |
2885 | | |
2886 | 0 | VLOG_INFO("added ingress qdisc to %s", netdev_get_name(netdev)); |
2887 | |
|
2888 | 0 | return 0; |
2889 | 0 | } |
2890 | | |
2891 | | static struct meter_police_mapping_data * |
2892 | | meter_id_find_locked(uint32_t meter_id) |
2893 | | OVS_REQUIRES(meter_mutex) |
2894 | 0 | { |
2895 | 0 | struct meter_police_mapping_data *data; |
2896 | 0 | size_t hash = hash_int(meter_id, 0); |
2897 | |
|
2898 | 0 | HMAP_FOR_EACH_WITH_HASH (data, meter_id_node, hash, |
2899 | 0 | &meter_id_to_police_idx) { |
2900 | 0 | if (data->meter_id == meter_id) { |
2901 | 0 | return data; |
2902 | 0 | } |
2903 | 0 | } |
2904 | | |
2905 | 0 | return NULL; |
2906 | 0 | } |
2907 | | |
2908 | | static int |
2909 | | meter_id_lookup(uint32_t meter_id, uint32_t *police_idx) |
2910 | 0 | { |
2911 | 0 | struct meter_police_mapping_data *data; |
2912 | |
|
2913 | 0 | ovs_mutex_lock(&meter_mutex); |
2914 | 0 | data = meter_id_find_locked(meter_id); |
2915 | 0 | if (data) { |
2916 | 0 | *police_idx = data->police_idx; |
2917 | 0 | } |
2918 | 0 | ovs_mutex_unlock(&meter_mutex); |
2919 | |
|
2920 | 0 | return data ? 0 : ENOENT; |
2921 | 0 | } |
2922 | | |
2923 | | static int |
2924 | | police_idx_lookup(uint32_t police_idx, uint32_t *meter_id) |
2925 | 0 | { |
2926 | 0 | struct meter_police_mapping_data *data; |
2927 | 0 | size_t hash = hash_int(police_idx, 0); |
2928 | 0 | int err = ENOENT; |
2929 | |
|
2930 | 0 | ovs_mutex_lock(&meter_mutex); |
2931 | 0 | HMAP_FOR_EACH_WITH_HASH (data, police_idx_node, hash, |
2932 | 0 | &police_idx_to_meter_id) { |
2933 | 0 | if (data->police_idx == police_idx) { |
2934 | 0 | *meter_id = data->meter_id; |
2935 | 0 | err = 0; |
2936 | 0 | break; |
2937 | 0 | } |
2938 | 0 | } |
2939 | 0 | ovs_mutex_unlock(&meter_mutex); |
2940 | |
|
2941 | 0 | return err; |
2942 | 0 | } |
2943 | | |
2944 | | static void |
2945 | | meter_id_insert(uint32_t meter_id, uint32_t police_idx) |
2946 | 0 | { |
2947 | 0 | struct meter_police_mapping_data *data; |
2948 | |
|
2949 | 0 | ovs_mutex_lock(&meter_mutex); |
2950 | 0 | data = xzalloc(sizeof *data); |
2951 | 0 | data->meter_id = meter_id; |
2952 | 0 | data->police_idx = police_idx; |
2953 | 0 | hmap_insert(&meter_id_to_police_idx, &data->meter_id_node, |
2954 | 0 | hash_int(meter_id, 0)); |
2955 | 0 | hmap_insert(&police_idx_to_meter_id, &data->police_idx_node, |
2956 | 0 | hash_int(police_idx, 0)); |
2957 | 0 | ovs_mutex_unlock(&meter_mutex); |
2958 | 0 | } |
2959 | | |
2960 | | static void |
2961 | | meter_id_remove(uint32_t meter_id) |
2962 | 0 | { |
2963 | 0 | struct meter_police_mapping_data *data; |
2964 | |
|
2965 | 0 | ovs_mutex_lock(&meter_mutex); |
2966 | 0 | data = meter_id_find_locked(meter_id); |
2967 | 0 | if (data) { |
2968 | 0 | hmap_remove(&meter_id_to_police_idx, &data->meter_id_node); |
2969 | 0 | hmap_remove(&police_idx_to_meter_id, &data->police_idx_node); |
2970 | 0 | free(data); |
2971 | 0 | } |
2972 | 0 | ovs_mutex_unlock(&meter_mutex); |
2973 | 0 | } |
2974 | | |
2975 | | static bool |
2976 | | meter_alloc_police_index(uint32_t *police_index) |
2977 | 0 | { |
2978 | 0 | bool ret; |
2979 | |
|
2980 | 0 | ovs_mutex_lock(&meter_police_ids_mutex); |
2981 | 0 | ret = id_pool_alloc_id(meter_police_ids, police_index); |
2982 | 0 | ovs_mutex_unlock(&meter_police_ids_mutex); |
2983 | |
|
2984 | 0 | return ret; |
2985 | 0 | } |
2986 | | |
2987 | | static void |
2988 | | meter_free_police_index(uint32_t police_index) |
2989 | 0 | { |
2990 | 0 | ovs_mutex_lock(&meter_police_ids_mutex); |
2991 | 0 | id_pool_free_id(meter_police_ids, police_index); |
2992 | 0 | ovs_mutex_unlock(&meter_police_ids_mutex); |
2993 | 0 | } |
2994 | | |
2995 | | static int |
2996 | | meter_tc_set_policer(ofproto_meter_id meter_id, |
2997 | | struct ofputil_meter_config *config) |
2998 | 0 | { |
2999 | 0 | uint32_t police_index; |
3000 | 0 | uint32_t rate, burst; |
3001 | 0 | bool add_policer; |
3002 | 0 | int err; |
3003 | |
|
3004 | 0 | if (!config->bands || config->n_bands < 1 || |
3005 | 0 | config->bands[0].type != OFPMBT13_DROP) { |
3006 | 0 | return 0; |
3007 | 0 | } |
3008 | | |
3009 | 0 | rate = config->bands[0].rate; |
3010 | 0 | if (config->flags & OFPMF13_BURST) { |
3011 | 0 | burst = config->bands[0].burst_size; |
3012 | 0 | } else { |
3013 | 0 | burst = config->bands[0].rate; |
3014 | 0 | } |
3015 | |
|
3016 | 0 | add_policer = (meter_id_lookup(meter_id.uint32, &police_index) == ENOENT); |
3017 | 0 | if (add_policer) { |
3018 | 0 | if (!meter_alloc_police_index(&police_index)) { |
3019 | 0 | VLOG_WARN_RL(&warn_rl, "No free police index for meter id %u", |
3020 | 0 | meter_id.uint32); |
3021 | 0 | return ENOENT; |
3022 | 0 | } |
3023 | 0 | } |
3024 | | |
3025 | 0 | err = tc_add_policer_action(police_index, |
3026 | 0 | (config->flags & OFPMF13_KBPS) ? rate : 0, |
3027 | 0 | (config->flags & OFPMF13_KBPS) ? burst : 0, |
3028 | 0 | (config->flags & OFPMF13_PKTPS) ? rate : 0, |
3029 | 0 | (config->flags & OFPMF13_PKTPS) ? burst : 0, |
3030 | 0 | !add_policer); |
3031 | 0 | if (err) { |
3032 | 0 | VLOG_WARN_RL(&warn_rl, |
3033 | 0 | "Failed to %s police %u for meter id %u: %s", |
3034 | 0 | add_policer ? "add" : "modify", |
3035 | 0 | police_index, meter_id.uint32, ovs_strerror(err)); |
3036 | 0 | } |
3037 | |
|
3038 | 0 | if (add_policer) { |
3039 | 0 | if (!err) { |
3040 | 0 | meter_id_insert(meter_id.uint32, police_index); |
3041 | 0 | } else { |
3042 | 0 | meter_free_police_index(police_index); |
3043 | 0 | } |
3044 | 0 | } |
3045 | |
|
3046 | 0 | return err; |
3047 | 0 | } |
3048 | | |
3049 | | static int |
3050 | | meter_tc_get_policer(ofproto_meter_id meter_id, |
3051 | | struct ofputil_meter_stats *stats) |
3052 | 0 | { |
3053 | 0 | uint32_t police_index; |
3054 | 0 | int err = ENOENT; |
3055 | |
|
3056 | 0 | if (!meter_id_lookup(meter_id.uint32, &police_index)) { |
3057 | 0 | err = tc_get_policer_action(police_index, stats); |
3058 | 0 | if (err) { |
3059 | 0 | VLOG_WARN_RL(&warn_rl, |
3060 | 0 | "Failed to get police %u stats for meter %u: %s", |
3061 | 0 | police_index, meter_id.uint32, ovs_strerror(err)); |
3062 | 0 | } |
3063 | 0 | } |
3064 | |
|
3065 | 0 | return err; |
3066 | 0 | } |
3067 | | |
3068 | | static int |
3069 | | meter_tc_del_policer(ofproto_meter_id meter_id, |
3070 | | struct ofputil_meter_stats *stats) |
3071 | 0 | { |
3072 | 0 | uint32_t police_index; |
3073 | 0 | int err = ENOENT; |
3074 | |
|
3075 | 0 | if (!meter_id_lookup(meter_id.uint32, &police_index)) { |
3076 | 0 | err = tc_del_policer_action(police_index, stats); |
3077 | 0 | if (err && err != ENOENT) { |
3078 | 0 | VLOG_ERR_RL(&error_rl, |
3079 | 0 | "Failed to del police %u for meter %u: %s", |
3080 | 0 | police_index, meter_id.uint32, ovs_strerror(err)); |
3081 | 0 | } else { |
3082 | 0 | meter_free_police_index(police_index); |
3083 | 0 | } |
3084 | 0 | meter_id_remove(meter_id.uint32); |
3085 | 0 | } |
3086 | |
|
3087 | 0 | return err; |
3088 | 0 | } |
3089 | | |
3090 | | const struct netdev_flow_api netdev_offload_tc = { |
3091 | | .type = "linux_tc", |
3092 | | .flow_flush = netdev_tc_flow_flush, |
3093 | | .flow_dump_create = netdev_tc_flow_dump_create, |
3094 | | .flow_dump_destroy = netdev_tc_flow_dump_destroy, |
3095 | | .flow_dump_next = netdev_tc_flow_dump_next, |
3096 | | .flow_put = netdev_tc_flow_put, |
3097 | | .flow_get = netdev_tc_flow_get, |
3098 | | .flow_del = netdev_tc_flow_del, |
3099 | | .flow_get_n_flows = netdev_tc_get_n_flows, |
3100 | | .meter_set = meter_tc_set_policer, |
3101 | | .meter_get = meter_tc_get_policer, |
3102 | | .meter_del = meter_tc_del_policer, |
3103 | | .init_flow_api = netdev_tc_init_flow_api, |
3104 | | }; |