/src/openvswitch/lib/odp-execute.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc. |
3 | | * Copyright (c) 2013 Simon Horman |
4 | | * |
5 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
6 | | * you may not use this file except in compliance with the License. |
7 | | * You may obtain a copy of the License at: |
8 | | * |
9 | | * http://www.apache.org/licenses/LICENSE-2.0 |
10 | | * |
11 | | * Unless required by applicable law or agreed to in writing, software |
12 | | * distributed under the License is distributed on an "AS IS" BASIS, |
13 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
14 | | * See the License for the specific language governing permissions and |
15 | | * limitations under the License. |
16 | | */ |
17 | | |
18 | | #include <config.h> |
19 | | #include "odp-execute.h" |
20 | | #include "odp-execute-private.h" |
21 | | #include <sys/types.h> |
22 | | #include <netinet/in.h> |
23 | | #include <arpa/inet.h> |
24 | | #include <netinet/icmp6.h> |
25 | | #include <netinet/ip6.h> |
26 | | #include <stdlib.h> |
27 | | #include <string.h> |
28 | | |
29 | | #include "coverage.h" |
30 | | #include "dp-packet.h" |
31 | | #include "dpif.h" |
32 | | #include "netlink.h" |
33 | | #include "odp-netlink.h" |
34 | | #include "odp-util.h" |
35 | | #include "packets.h" |
36 | | #include "flow.h" |
37 | | #include "unaligned.h" |
38 | | #include "util.h" |
39 | | #include "csum.h" |
40 | | #include "conntrack.h" |
41 | | #include "openvswitch/vlog.h" |
42 | | #include "unixctl.h" |
43 | | |
44 | | VLOG_DEFINE_THIS_MODULE(odp_execute); |
45 | | COVERAGE_DEFINE(datapath_drop_sample_error); |
46 | | COVERAGE_DEFINE(datapath_drop_nsh_decap_error); |
47 | | COVERAGE_DEFINE(drop_action_of_pipeline); |
48 | | COVERAGE_DEFINE(drop_action_bridge_not_found); |
49 | | COVERAGE_DEFINE(drop_action_recursion_too_deep); |
50 | | COVERAGE_DEFINE(drop_action_too_many_resubmit); |
51 | | COVERAGE_DEFINE(drop_action_stack_too_deep); |
52 | | COVERAGE_DEFINE(drop_action_no_recirculation_context); |
53 | | COVERAGE_DEFINE(drop_action_recirculation_conflict); |
54 | | COVERAGE_DEFINE(drop_action_too_many_mpls_labels); |
55 | | COVERAGE_DEFINE(drop_action_invalid_tunnel_metadata); |
56 | | COVERAGE_DEFINE(drop_action_unsupported_packet_type); |
57 | | COVERAGE_DEFINE(drop_action_congestion); |
58 | | COVERAGE_DEFINE(drop_action_forwarding_disabled); |
59 | | COVERAGE_DEFINE(drop_action_tunnel_routing_failed); |
60 | | COVERAGE_DEFINE(drop_action_tunnel_output_no_ethernet); |
61 | | COVERAGE_DEFINE(drop_action_tunnel_neigh_cache_miss); |
62 | | COVERAGE_DEFINE(drop_action_tunnel_header_build_failed); |
63 | | |
64 | | static void |
65 | | dp_update_drop_action_counter(enum xlate_error drop_reason, |
66 | | int delta) |
67 | 0 | { |
68 | 0 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); |
69 | |
|
70 | 0 | switch (drop_reason) { |
71 | 0 | case XLATE_OK: |
72 | 0 | COVERAGE_ADD(drop_action_of_pipeline, delta); |
73 | 0 | break; |
74 | 0 | case XLATE_BRIDGE_NOT_FOUND: |
75 | 0 | COVERAGE_ADD(drop_action_bridge_not_found, delta); |
76 | 0 | break; |
77 | 0 | case XLATE_RECURSION_TOO_DEEP: |
78 | 0 | COVERAGE_ADD(drop_action_recursion_too_deep, delta); |
79 | 0 | break; |
80 | 0 | case XLATE_TOO_MANY_RESUBMITS: |
81 | 0 | COVERAGE_ADD(drop_action_too_many_resubmit, delta); |
82 | 0 | break; |
83 | 0 | case XLATE_STACK_TOO_DEEP: |
84 | 0 | COVERAGE_ADD(drop_action_stack_too_deep, delta); |
85 | 0 | break; |
86 | 0 | case XLATE_NO_RECIRCULATION_CONTEXT: |
87 | 0 | COVERAGE_ADD(drop_action_no_recirculation_context, delta); |
88 | 0 | break; |
89 | 0 | case XLATE_RECIRCULATION_CONFLICT: |
90 | 0 | COVERAGE_ADD(drop_action_recirculation_conflict, delta); |
91 | 0 | break; |
92 | 0 | case XLATE_TOO_MANY_MPLS_LABELS: |
93 | 0 | COVERAGE_ADD(drop_action_too_many_mpls_labels, delta); |
94 | 0 | break; |
95 | 0 | case XLATE_INVALID_TUNNEL_METADATA: |
96 | 0 | COVERAGE_ADD(drop_action_invalid_tunnel_metadata, delta); |
97 | 0 | break; |
98 | 0 | case XLATE_UNSUPPORTED_PACKET_TYPE: |
99 | 0 | COVERAGE_ADD(drop_action_unsupported_packet_type, delta); |
100 | 0 | break; |
101 | 0 | case XLATE_CONGESTION_DROP: |
102 | 0 | COVERAGE_ADD(drop_action_congestion, delta); |
103 | 0 | break; |
104 | 0 | case XLATE_FORWARDING_DISABLED: |
105 | 0 | COVERAGE_ADD(drop_action_forwarding_disabled, delta); |
106 | 0 | break; |
107 | 0 | case XLATE_TUNNEL_ROUTING_FAILED: |
108 | 0 | COVERAGE_ADD(drop_action_tunnel_routing_failed, delta); |
109 | 0 | break; |
110 | 0 | case XLATE_TUNNEL_OUTPUT_NO_ETHERNET: |
111 | 0 | COVERAGE_ADD(drop_action_tunnel_output_no_ethernet, delta); |
112 | 0 | break; |
113 | 0 | case XLATE_TUNNEL_NEIGH_CACHE_MISS: |
114 | 0 | COVERAGE_ADD(drop_action_tunnel_neigh_cache_miss, delta); |
115 | 0 | break; |
116 | 0 | case XLATE_TUNNEL_HEADER_BUILD_FAILED: |
117 | 0 | COVERAGE_ADD(drop_action_tunnel_header_build_failed, delta); |
118 | 0 | break; |
119 | 0 | case XLATE_MAX: |
120 | 0 | default: |
121 | 0 | VLOG_ERR_RL(&rl, "Invalid Drop reason type: %d", drop_reason); |
122 | 0 | } |
123 | 0 | } |
124 | | |
125 | | /* Masked copy of an ethernet address. 'src' is already properly masked. */ |
126 | | static void |
127 | | ether_addr_copy_masked(struct eth_addr *dst, const struct eth_addr src, |
128 | | const struct eth_addr mask) |
129 | 0 | { |
130 | 0 | int i; |
131 | |
|
132 | 0 | for (i = 0; i < ARRAY_SIZE(dst->be16); i++) { |
133 | 0 | dst->be16[i] = src.be16[i] | (dst->be16[i] & ~mask.be16[i]); |
134 | 0 | } |
135 | 0 | } |
136 | | |
137 | | static void |
138 | | odp_eth_set_addrs(struct dp_packet *packet, const struct ovs_key_ethernet *key, |
139 | | const struct ovs_key_ethernet *mask) |
140 | 0 | { |
141 | 0 | struct eth_header *eh = dp_packet_eth(packet); |
142 | |
|
143 | 0 | if (eh) { |
144 | 0 | if (!mask) { |
145 | 0 | eh->eth_src = key->eth_src; |
146 | 0 | eh->eth_dst = key->eth_dst; |
147 | 0 | } else { |
148 | 0 | ether_addr_copy_masked(&eh->eth_src, key->eth_src, mask->eth_src); |
149 | 0 | ether_addr_copy_masked(&eh->eth_dst, key->eth_dst, mask->eth_dst); |
150 | 0 | } |
151 | 0 | } |
152 | 0 | } |
153 | | |
154 | | static void |
155 | | odp_set_ipv4(struct dp_packet *packet, const struct ovs_key_ipv4 *key, |
156 | | const struct ovs_key_ipv4 *mask) |
157 | 0 | { |
158 | 0 | struct ip_header *nh = dp_packet_l3(packet); |
159 | 0 | ovs_be32 ip_src_nh; |
160 | 0 | ovs_be32 ip_dst_nh; |
161 | 0 | ovs_be32 new_ip_src; |
162 | 0 | ovs_be32 new_ip_dst; |
163 | 0 | uint8_t new_tos; |
164 | 0 | uint8_t new_ttl; |
165 | |
|
166 | 0 | ovs_assert(nh); |
167 | |
|
168 | 0 | if (mask->ipv4_src) { |
169 | 0 | ip_src_nh = get_16aligned_be32(&nh->ip_src); |
170 | 0 | new_ip_src = key->ipv4_src | (ip_src_nh & ~mask->ipv4_src); |
171 | |
|
172 | 0 | if (ip_src_nh != new_ip_src) { |
173 | 0 | packet_set_ipv4_addr(packet, &nh->ip_src, new_ip_src); |
174 | 0 | } |
175 | 0 | } |
176 | |
|
177 | 0 | if (mask->ipv4_dst) { |
178 | 0 | ip_dst_nh = get_16aligned_be32(&nh->ip_dst); |
179 | 0 | new_ip_dst = key->ipv4_dst | (ip_dst_nh & ~mask->ipv4_dst); |
180 | |
|
181 | 0 | if (ip_dst_nh != new_ip_dst) { |
182 | 0 | packet_set_ipv4_addr(packet, &nh->ip_dst, new_ip_dst); |
183 | 0 | } |
184 | 0 | } |
185 | |
|
186 | 0 | if (mask->ipv4_tos) { |
187 | 0 | new_tos = key->ipv4_tos | (nh->ip_tos & ~mask->ipv4_tos); |
188 | |
|
189 | 0 | if (nh->ip_tos != new_tos) { |
190 | 0 | if (dp_packet_ip_checksum_valid(packet)) { |
191 | 0 | dp_packet_ip_checksum_set_partial(packet); |
192 | 0 | } else { |
193 | 0 | nh->ip_csum = recalc_csum16(nh->ip_csum, |
194 | 0 | htons((uint16_t) nh->ip_tos), |
195 | 0 | htons((uint16_t) new_tos)); |
196 | 0 | } |
197 | |
|
198 | 0 | nh->ip_tos = new_tos; |
199 | 0 | } |
200 | 0 | } |
201 | |
|
202 | 0 | if (OVS_LIKELY(mask->ipv4_ttl)) { |
203 | 0 | new_ttl = key->ipv4_ttl | (nh->ip_ttl & ~mask->ipv4_ttl); |
204 | |
|
205 | 0 | if (OVS_LIKELY(nh->ip_ttl != new_ttl)) { |
206 | 0 | if (dp_packet_ip_checksum_valid(packet)) { |
207 | 0 | dp_packet_ip_checksum_set_partial(packet); |
208 | 0 | } else { |
209 | 0 | nh->ip_csum = recalc_csum16(nh->ip_csum, |
210 | 0 | htons(nh->ip_ttl << 8), |
211 | 0 | htons(new_ttl << 8)); |
212 | 0 | } |
213 | |
|
214 | 0 | nh->ip_ttl = new_ttl; |
215 | 0 | } |
216 | 0 | } |
217 | 0 | } |
218 | | |
219 | | static struct in6_addr * |
220 | | mask_ipv6_addr(const ovs_16aligned_be32 *old, const struct in6_addr *addr, |
221 | | const struct in6_addr *mask, struct in6_addr *masked) |
222 | 0 | { |
223 | 0 | #ifdef s6_addr32 |
224 | 0 | for (int i = 0; i < 4; i++) { |
225 | 0 | masked->s6_addr32[i] = addr->s6_addr32[i] |
226 | 0 | | (get_16aligned_be32(&old[i]) & ~mask->s6_addr32[i]); |
227 | 0 | } |
228 | | #else |
229 | | const uint8_t *old8 = (const uint8_t *)old; |
230 | | for (int i = 0; i < 16; i++) { |
231 | | masked->s6_addr[i] = addr->s6_addr[i] | (old8[i] & ~mask->s6_addr[i]); |
232 | | } |
233 | | #endif |
234 | 0 | return masked; |
235 | 0 | } |
236 | | |
237 | | static void |
238 | | odp_set_ipv6(struct dp_packet *packet, const struct ovs_key_ipv6 *key, |
239 | | const struct ovs_key_ipv6 *mask) |
240 | 0 | { |
241 | 0 | struct ovs_16aligned_ip6_hdr *nh = dp_packet_l3(packet); |
242 | 0 | struct in6_addr sbuf, dbuf; |
243 | 0 | uint8_t old_tc = ntohl(get_16aligned_be32(&nh->ip6_flow)) >> 20; |
244 | 0 | ovs_be32 old_fl = get_16aligned_be32(&nh->ip6_flow) & htonl(0xfffff); |
245 | |
|
246 | 0 | packet_set_ipv6( |
247 | 0 | packet, |
248 | 0 | mask_ipv6_addr(nh->ip6_src.be32, &key->ipv6_src, &mask->ipv6_src, |
249 | 0 | &sbuf), |
250 | 0 | mask_ipv6_addr(nh->ip6_dst.be32, &key->ipv6_dst, &mask->ipv6_dst, |
251 | 0 | &dbuf), |
252 | 0 | key->ipv6_tclass | (old_tc & ~mask->ipv6_tclass), |
253 | 0 | key->ipv6_label | (old_fl & ~mask->ipv6_label), |
254 | 0 | key->ipv6_hlimit | (nh->ip6_hlim & ~mask->ipv6_hlimit)); |
255 | 0 | } |
256 | | |
257 | | static void |
258 | | odp_set_tcp(struct dp_packet *packet, const struct ovs_key_tcp *key, |
259 | | const struct ovs_key_tcp *mask) |
260 | 0 | { |
261 | 0 | struct tcp_header *th = dp_packet_l4(packet); |
262 | |
|
263 | 0 | if (OVS_LIKELY(th && dp_packet_get_tcp_payload(packet))) { |
264 | 0 | packet_set_tcp_port(packet, |
265 | 0 | key->tcp_src | (th->tcp_src & ~mask->tcp_src), |
266 | 0 | key->tcp_dst | (th->tcp_dst & ~mask->tcp_dst)); |
267 | 0 | } |
268 | 0 | } |
269 | | |
270 | | static void |
271 | | odp_set_udp(struct dp_packet *packet, const struct ovs_key_udp *key, |
272 | | const struct ovs_key_udp *mask) |
273 | 0 | { |
274 | 0 | struct udp_header *uh = dp_packet_l4(packet); |
275 | |
|
276 | 0 | if (OVS_LIKELY(uh && dp_packet_get_udp_payload(packet))) { |
277 | 0 | packet_set_udp_port(packet, |
278 | 0 | key->udp_src | (uh->udp_src & ~mask->udp_src), |
279 | 0 | key->udp_dst | (uh->udp_dst & ~mask->udp_dst)); |
280 | 0 | } |
281 | 0 | } |
282 | | |
283 | | static void |
284 | | odp_set_sctp(struct dp_packet *packet, const struct ovs_key_sctp *key, |
285 | | const struct ovs_key_sctp *mask) |
286 | 0 | { |
287 | 0 | struct sctp_header *sh = dp_packet_l4(packet); |
288 | |
|
289 | 0 | if (OVS_LIKELY(sh && dp_packet_get_sctp_payload(packet))) { |
290 | 0 | packet_set_sctp_port(packet, |
291 | 0 | key->sctp_src | (sh->sctp_src & ~mask->sctp_src), |
292 | 0 | key->sctp_dst | (sh->sctp_dst & ~mask->sctp_dst)); |
293 | 0 | } |
294 | 0 | } |
295 | | |
296 | | static void |
297 | | odp_set_tunnel_action(const struct nlattr *a, struct flow_tnl *tun_key) |
298 | 0 | { |
299 | 0 | ovs_assert(odp_tun_key_from_attr(a, tun_key, NULL) != ODP_FIT_ERROR); |
300 | 0 | } |
301 | | |
302 | | static void |
303 | | set_arp(struct dp_packet *packet, const struct ovs_key_arp *key, |
304 | | const struct ovs_key_arp *mask) |
305 | 0 | { |
306 | 0 | struct arp_eth_header *arp = dp_packet_l3(packet); |
307 | |
|
308 | 0 | ovs_assert(arp); |
309 | |
|
310 | 0 | if (!mask) { |
311 | 0 | arp->ar_op = key->arp_op; |
312 | 0 | arp->ar_sha = key->arp_sha; |
313 | 0 | put_16aligned_be32(&arp->ar_spa, key->arp_sip); |
314 | 0 | arp->ar_tha = key->arp_tha; |
315 | 0 | put_16aligned_be32(&arp->ar_tpa, key->arp_tip); |
316 | 0 | } else { |
317 | 0 | ovs_be32 ar_spa = get_16aligned_be32(&arp->ar_spa); |
318 | 0 | ovs_be32 ar_tpa = get_16aligned_be32(&arp->ar_tpa); |
319 | |
|
320 | 0 | arp->ar_op = key->arp_op | (arp->ar_op & ~mask->arp_op); |
321 | 0 | ether_addr_copy_masked(&arp->ar_sha, key->arp_sha, mask->arp_sha); |
322 | 0 | put_16aligned_be32(&arp->ar_spa, |
323 | 0 | key->arp_sip | (ar_spa & ~mask->arp_sip)); |
324 | 0 | ether_addr_copy_masked(&arp->ar_tha, key->arp_tha, mask->arp_tha); |
325 | 0 | put_16aligned_be32(&arp->ar_tpa, |
326 | 0 | key->arp_tip | (ar_tpa & ~mask->arp_tip)); |
327 | 0 | } |
328 | 0 | } |
329 | | |
330 | | static void |
331 | | odp_set_nd_ext(struct dp_packet *packet, const struct ovs_key_nd_extensions |
332 | | *key, const struct ovs_key_nd_extensions *mask) |
333 | 0 | { |
334 | 0 | const struct ovs_nd_msg *ns = dp_packet_l4(packet); |
335 | 0 | ovs_16aligned_be32 reserved = ns->rso_flags; |
336 | 0 | uint8_t opt_type = ns->options[0].type; |
337 | |
|
338 | 0 | if (mask->nd_reserved) { |
339 | 0 | put_16aligned_be32(&reserved, key->nd_reserved); |
340 | 0 | } |
341 | 0 | if (mask->nd_options_type) { |
342 | 0 | opt_type = key->nd_options_type; |
343 | 0 | } |
344 | 0 | packet_set_nd_ext(packet, reserved, opt_type); |
345 | 0 | } |
346 | | |
347 | | static void |
348 | | odp_set_nd(struct dp_packet *packet, const struct ovs_key_nd *key, |
349 | | const struct ovs_key_nd *mask) |
350 | 0 | { |
351 | 0 | const struct ovs_nd_msg *ns = dp_packet_l4(packet); |
352 | 0 | const struct ovs_nd_lla_opt *lla_opt = dp_packet_get_nd_payload(packet); |
353 | |
|
354 | 0 | if (OVS_LIKELY(ns && lla_opt)) { |
355 | 0 | int bytes_remain = dp_packet_l4_size(packet) - sizeof(*ns); |
356 | 0 | struct in6_addr tgt_buf; |
357 | 0 | struct eth_addr sll_buf = eth_addr_zero; |
358 | 0 | struct eth_addr tll_buf = eth_addr_zero; |
359 | |
|
360 | 0 | while (bytes_remain >= ND_LLA_OPT_LEN && lla_opt->len != 0) { |
361 | 0 | if (lla_opt->type == ND_OPT_SOURCE_LINKADDR |
362 | 0 | && lla_opt->len == 1) { |
363 | 0 | sll_buf = lla_opt->mac; |
364 | 0 | ether_addr_copy_masked(&sll_buf, key->nd_sll, mask->nd_sll); |
365 | | |
366 | | /* A packet can only contain one SLL or TLL option */ |
367 | 0 | break; |
368 | 0 | } else if (lla_opt->type == ND_OPT_TARGET_LINKADDR |
369 | 0 | && lla_opt->len == 1) { |
370 | 0 | tll_buf = lla_opt->mac; |
371 | 0 | ether_addr_copy_masked(&tll_buf, key->nd_tll, mask->nd_tll); |
372 | | |
373 | | /* A packet can only contain one SLL or TLL option */ |
374 | 0 | break; |
375 | 0 | } |
376 | | |
377 | 0 | lla_opt += lla_opt->len; |
378 | 0 | bytes_remain -= lla_opt->len * ND_LLA_OPT_LEN; |
379 | 0 | } |
380 | |
|
381 | 0 | packet_set_nd(packet, |
382 | 0 | mask_ipv6_addr(ns->target.be32, &key->nd_target, |
383 | 0 | &mask->nd_target, &tgt_buf), |
384 | 0 | sll_buf, |
385 | 0 | tll_buf); |
386 | 0 | } |
387 | 0 | } |
388 | | |
389 | | /* Set the NSH header. Assumes the NSH header is present and matches the |
390 | | * MD format of the key. The slow path must take case of that. */ |
391 | | static void |
392 | | odp_set_nsh(struct dp_packet *packet, const struct nlattr *a, bool has_mask) |
393 | 0 | { |
394 | 0 | struct ovs_key_nsh key, mask; |
395 | 0 | struct nsh_hdr *nsh = dp_packet_l3(packet); |
396 | 0 | uint8_t mdtype = nsh_md_type(nsh); |
397 | 0 | ovs_be32 path_hdr; |
398 | |
|
399 | 0 | if (has_mask) { |
400 | 0 | odp_nsh_key_from_attr(a, &key, &mask, NULL); |
401 | 0 | } else { |
402 | 0 | odp_nsh_key_from_attr(a, &key, NULL, NULL); |
403 | 0 | } |
404 | |
|
405 | 0 | if (!has_mask) { |
406 | 0 | nsh_set_flags_and_ttl(nsh, key.flags, key.ttl); |
407 | 0 | put_16aligned_be32(&nsh->path_hdr, key.path_hdr); |
408 | 0 | switch (mdtype) { |
409 | 0 | case NSH_M_TYPE1: |
410 | 0 | for (int i = 0; i < 4; i++) { |
411 | 0 | put_16aligned_be32(&nsh->md1.context[i], key.context[i]); |
412 | 0 | } |
413 | 0 | break; |
414 | 0 | case NSH_M_TYPE2: |
415 | 0 | default: |
416 | | /* No support for setting any other metadata format yet. */ |
417 | 0 | break; |
418 | 0 | } |
419 | 0 | } else { |
420 | 0 | uint8_t flags = nsh_get_flags(nsh); |
421 | 0 | uint8_t ttl = nsh_get_ttl(nsh); |
422 | |
|
423 | 0 | flags = key.flags | (flags & ~mask.flags); |
424 | 0 | ttl = key.ttl | (ttl & ~mask.ttl); |
425 | 0 | nsh_set_flags_and_ttl(nsh, flags, ttl); |
426 | |
|
427 | 0 | uint32_t spi = ntohl(nsh_get_spi(nsh)); |
428 | 0 | uint8_t si = nsh_get_si(nsh); |
429 | 0 | uint32_t spi_mask = nsh_path_hdr_to_spi_uint32(mask.path_hdr); |
430 | 0 | uint8_t si_mask = nsh_path_hdr_to_si(mask.path_hdr); |
431 | 0 | if (spi_mask == 0x00ffffff) { |
432 | 0 | spi_mask = UINT32_MAX; |
433 | 0 | } |
434 | 0 | spi = nsh_path_hdr_to_spi_uint32(key.path_hdr) | (spi & ~spi_mask); |
435 | 0 | si = nsh_path_hdr_to_si(key.path_hdr) | (si & ~si_mask); |
436 | 0 | path_hdr = nsh_get_path_hdr(nsh); |
437 | 0 | nsh_path_hdr_set_spi(&path_hdr, htonl(spi)); |
438 | 0 | nsh_path_hdr_set_si(&path_hdr, si); |
439 | 0 | put_16aligned_be32(&nsh->path_hdr, path_hdr); |
440 | 0 | switch (mdtype) { |
441 | 0 | case NSH_M_TYPE1: |
442 | 0 | for (int i = 0; i < 4; i++) { |
443 | 0 | ovs_be32 p = get_16aligned_be32(&nsh->md1.context[i]); |
444 | 0 | ovs_be32 k = key.context[i]; |
445 | 0 | ovs_be32 m = mask.context[i]; |
446 | 0 | put_16aligned_be32(&nsh->md1.context[i], k | (p & ~m)); |
447 | 0 | } |
448 | 0 | break; |
449 | 0 | case NSH_M_TYPE2: |
450 | 0 | default: |
451 | | /* No support for setting any other metadata format yet. */ |
452 | 0 | break; |
453 | 0 | } |
454 | 0 | } |
455 | 0 | } |
456 | | |
457 | | static void |
458 | | odp_execute_set_action(struct dp_packet *packet, const struct nlattr *a) |
459 | 0 | { |
460 | 0 | enum ovs_key_attr type = nl_attr_type(a); |
461 | 0 | const struct ovs_key_ipv4 *ipv4_key; |
462 | 0 | const struct ovs_key_ipv6 *ipv6_key; |
463 | 0 | struct pkt_metadata *md = &packet->md; |
464 | |
|
465 | 0 | switch (type) { |
466 | 0 | case OVS_KEY_ATTR_PRIORITY: |
467 | 0 | md->skb_priority = nl_attr_get_u32(a); |
468 | 0 | break; |
469 | | |
470 | 0 | case OVS_KEY_ATTR_TUNNEL: |
471 | 0 | odp_set_tunnel_action(a, &md->tunnel); |
472 | 0 | break; |
473 | | |
474 | 0 | case OVS_KEY_ATTR_SKB_MARK: |
475 | 0 | md->pkt_mark = nl_attr_get_u32(a); |
476 | 0 | break; |
477 | | |
478 | 0 | case OVS_KEY_ATTR_ETHERNET: |
479 | 0 | odp_eth_set_addrs(packet, nl_attr_get(a), NULL); |
480 | 0 | break; |
481 | | |
482 | 0 | case OVS_KEY_ATTR_NSH: { |
483 | 0 | odp_set_nsh(packet, a, false); |
484 | 0 | break; |
485 | 0 | } |
486 | | |
487 | 0 | case OVS_KEY_ATTR_IPV4: |
488 | 0 | ipv4_key = nl_attr_get_unspec(a, sizeof(struct ovs_key_ipv4)); |
489 | 0 | packet_set_ipv4(packet, ipv4_key->ipv4_src, |
490 | 0 | ipv4_key->ipv4_dst, ipv4_key->ipv4_tos, |
491 | 0 | ipv4_key->ipv4_ttl); |
492 | 0 | break; |
493 | | |
494 | 0 | case OVS_KEY_ATTR_IPV6: |
495 | 0 | ipv6_key = nl_attr_get_unspec(a, sizeof(struct ovs_key_ipv6)); |
496 | 0 | packet_set_ipv6(packet, &ipv6_key->ipv6_src, &ipv6_key->ipv6_dst, |
497 | 0 | ipv6_key->ipv6_tclass, ipv6_key->ipv6_label, |
498 | 0 | ipv6_key->ipv6_hlimit); |
499 | 0 | break; |
500 | | |
501 | 0 | case OVS_KEY_ATTR_TCP: |
502 | 0 | if (OVS_LIKELY(dp_packet_get_tcp_payload(packet))) { |
503 | 0 | const struct ovs_key_tcp *tcp_key |
504 | 0 | = nl_attr_get_unspec(a, sizeof(struct ovs_key_tcp)); |
505 | |
|
506 | 0 | packet_set_tcp_port(packet, tcp_key->tcp_src, |
507 | 0 | tcp_key->tcp_dst); |
508 | 0 | } |
509 | 0 | break; |
510 | | |
511 | 0 | case OVS_KEY_ATTR_UDP: |
512 | 0 | if (OVS_LIKELY(dp_packet_get_udp_payload(packet))) { |
513 | 0 | const struct ovs_key_udp *udp_key |
514 | 0 | = nl_attr_get_unspec(a, sizeof(struct ovs_key_udp)); |
515 | |
|
516 | 0 | packet_set_udp_port(packet, udp_key->udp_src, |
517 | 0 | udp_key->udp_dst); |
518 | 0 | } |
519 | 0 | break; |
520 | | |
521 | 0 | case OVS_KEY_ATTR_SCTP: |
522 | 0 | if (OVS_LIKELY(dp_packet_get_sctp_payload(packet))) { |
523 | 0 | const struct ovs_key_sctp *sctp_key |
524 | 0 | = nl_attr_get_unspec(a, sizeof(struct ovs_key_sctp)); |
525 | |
|
526 | 0 | packet_set_sctp_port(packet, sctp_key->sctp_src, |
527 | 0 | sctp_key->sctp_dst); |
528 | 0 | } |
529 | 0 | break; |
530 | | |
531 | 0 | case OVS_KEY_ATTR_MPLS: |
532 | 0 | set_mpls_lse(packet, nl_attr_get_be32(a)); |
533 | 0 | break; |
534 | | |
535 | 0 | case OVS_KEY_ATTR_ARP: |
536 | 0 | set_arp(packet, nl_attr_get(a), NULL); |
537 | 0 | break; |
538 | | |
539 | 0 | case OVS_KEY_ATTR_ICMP: |
540 | 0 | case OVS_KEY_ATTR_ICMPV6: |
541 | 0 | if (OVS_LIKELY(dp_packet_get_icmp_payload(packet))) { |
542 | 0 | const struct ovs_key_icmp *icmp_key |
543 | 0 | = nl_attr_get_unspec(a, sizeof(struct ovs_key_icmp)); |
544 | |
|
545 | 0 | packet_set_icmp(packet, icmp_key->icmp_type, icmp_key->icmp_code); |
546 | 0 | } |
547 | 0 | break; |
548 | | |
549 | 0 | case OVS_KEY_ATTR_ND: |
550 | 0 | if (OVS_LIKELY(dp_packet_get_nd_payload(packet))) { |
551 | 0 | const struct ovs_key_nd *nd_key |
552 | 0 | = nl_attr_get_unspec(a, sizeof(struct ovs_key_nd)); |
553 | 0 | packet_set_nd(packet, &nd_key->nd_target, nd_key->nd_sll, |
554 | 0 | nd_key->nd_tll); |
555 | 0 | } |
556 | 0 | break; |
557 | | |
558 | 0 | case OVS_KEY_ATTR_ND_EXTENSIONS: |
559 | 0 | if (OVS_LIKELY(dp_packet_get_nd_payload(packet))) { |
560 | 0 | const struct ovs_key_nd_extensions *nd_ext_key |
561 | 0 | = nl_attr_get_unspec(a, sizeof(struct ovs_key_nd_extensions)); |
562 | 0 | ovs_16aligned_be32 rso_flags; |
563 | 0 | put_16aligned_be32(&rso_flags, nd_ext_key->nd_reserved); |
564 | 0 | packet_set_nd_ext(packet, rso_flags, nd_ext_key->nd_options_type); |
565 | 0 | } |
566 | 0 | break; |
567 | | |
568 | 0 | case OVS_KEY_ATTR_DP_HASH: |
569 | 0 | md->dp_hash = nl_attr_get_u32(a); |
570 | 0 | break; |
571 | | |
572 | 0 | case OVS_KEY_ATTR_RECIRC_ID: |
573 | 0 | md->recirc_id = nl_attr_get_u32(a); |
574 | 0 | break; |
575 | | |
576 | 0 | case OVS_KEY_ATTR_UNSPEC: |
577 | 0 | case OVS_KEY_ATTR_PACKET_TYPE: |
578 | 0 | case OVS_KEY_ATTR_ENCAP: |
579 | 0 | case OVS_KEY_ATTR_ETHERTYPE: |
580 | 0 | case OVS_KEY_ATTR_IN_PORT: |
581 | 0 | case OVS_KEY_ATTR_VLAN: |
582 | 0 | case OVS_KEY_ATTR_TCP_FLAGS: |
583 | 0 | case OVS_KEY_ATTR_CT_STATE: |
584 | 0 | case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4: |
585 | 0 | case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6: |
586 | 0 | case OVS_KEY_ATTR_CT_ZONE: |
587 | 0 | case OVS_KEY_ATTR_CT_MARK: |
588 | 0 | case OVS_KEY_ATTR_CT_LABELS: |
589 | 0 | case OVS_KEY_ATTR_TUNNEL_INFO: |
590 | 0 | case __OVS_KEY_ATTR_MAX: |
591 | 0 | default: |
592 | 0 | OVS_NOT_REACHED(); |
593 | 0 | } |
594 | 0 | } |
595 | | |
596 | | static void |
597 | | odp_execute_masked_set_action(struct dp_packet *packet, |
598 | | const struct nlattr *a) |
599 | 0 | { |
600 | 0 | struct pkt_metadata *md = &packet->md; |
601 | 0 | enum ovs_key_attr type = nl_attr_type(a); |
602 | 0 | struct mpls_hdr *mh; |
603 | |
|
604 | 0 | switch (type) { |
605 | 0 | case OVS_KEY_ATTR_PRIORITY: |
606 | 0 | md->skb_priority = nl_attr_get_u32(a) |
607 | 0 | | (md->skb_priority & ~*odp_get_key_mask(a, uint32_t)); |
608 | 0 | break; |
609 | | |
610 | 0 | case OVS_KEY_ATTR_SKB_MARK: |
611 | 0 | md->pkt_mark = nl_attr_get_u32(a) |
612 | 0 | | (md->pkt_mark & ~*odp_get_key_mask(a, uint32_t)); |
613 | 0 | break; |
614 | | |
615 | 0 | case OVS_KEY_ATTR_ETHERNET: |
616 | 0 | odp_eth_set_addrs(packet, nl_attr_get(a), |
617 | 0 | odp_get_key_mask(a, struct ovs_key_ethernet)); |
618 | 0 | break; |
619 | | |
620 | 0 | case OVS_KEY_ATTR_NSH: { |
621 | 0 | odp_set_nsh(packet, a, true); |
622 | 0 | break; |
623 | 0 | } |
624 | | |
625 | 0 | case OVS_KEY_ATTR_IPV4: |
626 | 0 | odp_set_ipv4(packet, nl_attr_get(a), |
627 | 0 | odp_get_key_mask(a, struct ovs_key_ipv4)); |
628 | 0 | break; |
629 | | |
630 | 0 | case OVS_KEY_ATTR_IPV6: |
631 | 0 | odp_set_ipv6(packet, nl_attr_get(a), |
632 | 0 | odp_get_key_mask(a, struct ovs_key_ipv6)); |
633 | 0 | break; |
634 | | |
635 | 0 | case OVS_KEY_ATTR_TCP: |
636 | 0 | odp_set_tcp(packet, nl_attr_get(a), |
637 | 0 | odp_get_key_mask(a, struct ovs_key_tcp)); |
638 | 0 | break; |
639 | | |
640 | 0 | case OVS_KEY_ATTR_UDP: |
641 | 0 | odp_set_udp(packet, nl_attr_get(a), |
642 | 0 | odp_get_key_mask(a, struct ovs_key_udp)); |
643 | 0 | break; |
644 | | |
645 | 0 | case OVS_KEY_ATTR_SCTP: |
646 | 0 | odp_set_sctp(packet, nl_attr_get(a), |
647 | 0 | odp_get_key_mask(a, struct ovs_key_sctp)); |
648 | 0 | break; |
649 | | |
650 | 0 | case OVS_KEY_ATTR_MPLS: |
651 | 0 | mh = dp_packet_l2_5(packet); |
652 | 0 | if (mh) { |
653 | 0 | put_16aligned_be32(&mh->mpls_lse, nl_attr_get_be32(a) |
654 | 0 | | (get_16aligned_be32(&mh->mpls_lse) |
655 | 0 | & ~*odp_get_key_mask(a, ovs_be32))); |
656 | 0 | } |
657 | 0 | break; |
658 | | |
659 | 0 | case OVS_KEY_ATTR_ARP: |
660 | 0 | set_arp(packet, nl_attr_get(a), |
661 | 0 | odp_get_key_mask(a, struct ovs_key_arp)); |
662 | 0 | break; |
663 | | |
664 | 0 | case OVS_KEY_ATTR_ND: |
665 | 0 | odp_set_nd(packet, nl_attr_get(a), |
666 | 0 | odp_get_key_mask(a, struct ovs_key_nd)); |
667 | 0 | break; |
668 | | |
669 | 0 | case OVS_KEY_ATTR_ND_EXTENSIONS: |
670 | 0 | odp_set_nd_ext(packet, nl_attr_get(a), |
671 | 0 | odp_get_key_mask(a, struct ovs_key_nd_extensions)); |
672 | 0 | break; |
673 | | |
674 | 0 | case OVS_KEY_ATTR_DP_HASH: |
675 | 0 | md->dp_hash = nl_attr_get_u32(a) |
676 | 0 | | (md->dp_hash & ~*odp_get_key_mask(a, uint32_t)); |
677 | 0 | break; |
678 | | |
679 | 0 | case OVS_KEY_ATTR_RECIRC_ID: |
680 | 0 | md->recirc_id = nl_attr_get_u32(a) |
681 | 0 | | (md->recirc_id & ~*odp_get_key_mask(a, uint32_t)); |
682 | 0 | break; |
683 | | |
684 | 0 | case OVS_KEY_ATTR_TUNNEL: /* Masked data not supported for tunnel. */ |
685 | 0 | case OVS_KEY_ATTR_PACKET_TYPE: |
686 | 0 | case OVS_KEY_ATTR_UNSPEC: |
687 | 0 | case OVS_KEY_ATTR_CT_STATE: |
688 | 0 | case OVS_KEY_ATTR_CT_ZONE: |
689 | 0 | case OVS_KEY_ATTR_CT_MARK: |
690 | 0 | case OVS_KEY_ATTR_CT_LABELS: |
691 | 0 | case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4: |
692 | 0 | case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6: |
693 | 0 | case OVS_KEY_ATTR_ENCAP: |
694 | 0 | case OVS_KEY_ATTR_ETHERTYPE: |
695 | 0 | case OVS_KEY_ATTR_IN_PORT: |
696 | 0 | case OVS_KEY_ATTR_VLAN: |
697 | 0 | case OVS_KEY_ATTR_ICMP: |
698 | 0 | case OVS_KEY_ATTR_ICMPV6: |
699 | 0 | case OVS_KEY_ATTR_TCP_FLAGS: |
700 | 0 | case OVS_KEY_ATTR_TUNNEL_INFO: |
701 | 0 | case __OVS_KEY_ATTR_MAX: |
702 | 0 | default: |
703 | 0 | OVS_NOT_REACHED(); |
704 | 0 | } |
705 | 0 | } |
706 | | |
707 | | static void |
708 | | odp_execute_sample(void *dp, struct dp_packet *packet, bool steal, |
709 | | const struct nlattr *action, |
710 | | odp_execute_cb dp_execute_action) |
711 | 0 | { |
712 | 0 | const struct nlattr *subactions = NULL; |
713 | 0 | const struct nlattr *a; |
714 | 0 | struct dp_packet_batch pb; |
715 | 0 | size_t left; |
716 | |
|
717 | 0 | NL_NESTED_FOR_EACH_UNSAFE (a, left, action) { |
718 | 0 | int type = nl_attr_type(a); |
719 | |
|
720 | 0 | switch ((enum ovs_sample_attr) type) { |
721 | 0 | case OVS_SAMPLE_ATTR_PROBABILITY: |
722 | 0 | if (random_uint32() >= nl_attr_get_u32(a)) { |
723 | 0 | if (steal) { |
724 | 0 | COVERAGE_INC(datapath_drop_sample_error); |
725 | 0 | dp_packet_delete(packet); |
726 | 0 | } |
727 | 0 | return; |
728 | 0 | } |
729 | 0 | break; |
730 | | |
731 | 0 | case OVS_SAMPLE_ATTR_ACTIONS: |
732 | 0 | subactions = a; |
733 | 0 | break; |
734 | | |
735 | 0 | case OVS_SAMPLE_ATTR_UNSPEC: |
736 | 0 | case __OVS_SAMPLE_ATTR_MAX: |
737 | 0 | default: |
738 | 0 | OVS_NOT_REACHED(); |
739 | 0 | } |
740 | 0 | } |
741 | | |
742 | 0 | if (!steal) { |
743 | | /* The 'subactions' may modify the packet, but the modification |
744 | | * should not propagate beyond this sample action. Make a copy |
745 | | * the packet in case we don't own the packet, so that the |
746 | | * 'subactions' are only applid to the clone. 'odp_execute_actions' |
747 | | * will free the clone. */ |
748 | 0 | packet = dp_packet_clone(packet); |
749 | 0 | } |
750 | 0 | dp_packet_batch_init_packet(&pb, packet); |
751 | 0 | odp_execute_actions(dp, &pb, true, nl_attr_get(subactions), |
752 | 0 | nl_attr_get_size(subactions), dp_execute_action); |
753 | 0 | } |
754 | | |
755 | | static void |
756 | | odp_execute_clone(void *dp, struct dp_packet_batch *batch, bool steal, |
757 | | const struct nlattr *actions, |
758 | | odp_execute_cb dp_execute_action) |
759 | 0 | { |
760 | 0 | if (!steal) { |
761 | | /* The 'actions' may modify the packet, but the modification |
762 | | * should not propagate beyond this clone action. Make a copy |
763 | | * the packet in case we don't own the packet, so that the |
764 | | * 'actions' are only applied to the clone. 'odp_execute_actions' |
765 | | * will free the clone. */ |
766 | 0 | struct dp_packet_batch clone_pkt_batch; |
767 | 0 | dp_packet_batch_clone(&clone_pkt_batch, batch); |
768 | 0 | dp_packet_batch_reset_cutlen(batch); |
769 | 0 | odp_execute_actions(dp, &clone_pkt_batch, true, nl_attr_get(actions), |
770 | 0 | nl_attr_get_size(actions), dp_execute_action); |
771 | 0 | } |
772 | 0 | else { |
773 | 0 | odp_execute_actions(dp, batch, true, nl_attr_get(actions), |
774 | 0 | nl_attr_get_size(actions), dp_execute_action); |
775 | 0 | } |
776 | 0 | } |
777 | | |
778 | | static void |
779 | | odp_execute_check_pkt_len(void *dp, struct dp_packet *packet, bool steal, |
780 | | const struct nlattr *action, |
781 | | odp_execute_cb dp_execute_action) |
782 | 0 | { |
783 | 0 | static const struct nl_policy ovs_cpl_policy[] = { |
784 | 0 | [OVS_CHECK_PKT_LEN_ATTR_PKT_LEN] = { .type = NL_A_U16 }, |
785 | 0 | [OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER] = { .type = NL_A_NESTED }, |
786 | 0 | [OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL] |
787 | 0 | = { .type = NL_A_NESTED }, |
788 | 0 | }; |
789 | 0 | struct nlattr *attrs[ARRAY_SIZE(ovs_cpl_policy)]; |
790 | |
|
791 | 0 | if (!nl_parse_nested(action, ovs_cpl_policy, attrs, ARRAY_SIZE(attrs))) { |
792 | 0 | OVS_NOT_REACHED(); |
793 | 0 | } |
794 | | |
795 | 0 | const struct nlattr *a; |
796 | 0 | struct dp_packet_batch pb; |
797 | 0 | uint32_t size = dp_packet_get_send_len(packet) |
798 | 0 | - dp_packet_l2_pad_size(packet); |
799 | |
|
800 | 0 | a = attrs[OVS_CHECK_PKT_LEN_ATTR_PKT_LEN]; |
801 | 0 | if (size > nl_attr_get_u16(a)) { |
802 | 0 | a = attrs[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER]; |
803 | 0 | } else { |
804 | 0 | a = attrs[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL]; |
805 | 0 | } |
806 | |
|
807 | 0 | if (!steal) { |
808 | | /* The 'subactions' may modify the packet, but the modification |
809 | | * should not propagate beyond this action. Make a copy |
810 | | * the packet in case we don't own the packet, so that the |
811 | | * 'subactions' are only applid to check_pkt_len. 'odp_execute_actions' |
812 | | * will free the clone. */ |
813 | 0 | packet = dp_packet_clone(packet); |
814 | 0 | } |
815 | | /* If nl_attr_get(a) is NULL, the packet will be freed by |
816 | | * odp_execute_actions. */ |
817 | 0 | dp_packet_batch_init_packet(&pb, packet); |
818 | 0 | odp_execute_actions(dp, &pb, true, nl_attr_get(a), nl_attr_get_size(a), |
819 | 0 | dp_execute_action); |
820 | 0 | } |
821 | | |
822 | | static bool |
823 | | requires_datapath_assistance(const struct nlattr *a) |
824 | 0 | { |
825 | 0 | enum ovs_action_attr type = nl_attr_type(a); |
826 | |
|
827 | 0 | switch (type) { |
828 | | /* These only make sense in the context of a datapath. */ |
829 | 0 | case OVS_ACTION_ATTR_OUTPUT: |
830 | 0 | case OVS_ACTION_ATTR_LB_OUTPUT: |
831 | 0 | case OVS_ACTION_ATTR_TUNNEL_PUSH: |
832 | 0 | case OVS_ACTION_ATTR_TUNNEL_POP: |
833 | 0 | case OVS_ACTION_ATTR_USERSPACE: |
834 | 0 | case OVS_ACTION_ATTR_RECIRC: |
835 | 0 | case OVS_ACTION_ATTR_CT: |
836 | 0 | case OVS_ACTION_ATTR_METER: |
837 | 0 | case OVS_ACTION_ATTR_PSAMPLE: |
838 | 0 | return true; |
839 | | |
840 | 0 | case OVS_ACTION_ATTR_SET: |
841 | 0 | case OVS_ACTION_ATTR_SET_MASKED: |
842 | 0 | case OVS_ACTION_ATTR_PUSH_VLAN: |
843 | 0 | case OVS_ACTION_ATTR_POP_VLAN: |
844 | 0 | case OVS_ACTION_ATTR_HASH: |
845 | 0 | case OVS_ACTION_ATTR_PUSH_MPLS: |
846 | 0 | case OVS_ACTION_ATTR_POP_MPLS: |
847 | 0 | case OVS_ACTION_ATTR_TRUNC: |
848 | 0 | case OVS_ACTION_ATTR_PUSH_ETH: |
849 | 0 | case OVS_ACTION_ATTR_POP_ETH: |
850 | 0 | case OVS_ACTION_ATTR_CLONE: |
851 | 0 | case OVS_ACTION_ATTR_PUSH_NSH: |
852 | 0 | case OVS_ACTION_ATTR_POP_NSH: |
853 | 0 | case OVS_ACTION_ATTR_CT_CLEAR: |
854 | 0 | case OVS_ACTION_ATTR_CHECK_PKT_LEN: |
855 | 0 | case OVS_ACTION_ATTR_ADD_MPLS: |
856 | 0 | case OVS_ACTION_ATTR_DEC_TTL: |
857 | 0 | case OVS_ACTION_ATTR_DROP: |
858 | 0 | return false; |
859 | | |
860 | 0 | case OVS_ACTION_ATTR_SAMPLE: { |
861 | | /* Nested "psample" actions rely on the datapath executing the |
862 | | * parent "sample", storing the probability and making it available |
863 | | * when the nested "psample" is run. */ |
864 | 0 | const struct nlattr *attr; |
865 | 0 | unsigned int left; |
866 | |
|
867 | 0 | NL_NESTED_FOR_EACH (attr, left, a) { |
868 | 0 | if (nl_attr_type(attr) == OVS_SAMPLE_ATTR_ACTIONS) { |
869 | 0 | const struct nlattr *act; |
870 | 0 | unsigned int act_left; |
871 | |
|
872 | 0 | NL_NESTED_FOR_EACH (act, act_left, attr) { |
873 | 0 | if (nl_attr_type(act) == OVS_ACTION_ATTR_PSAMPLE) { |
874 | 0 | return true; |
875 | 0 | } |
876 | 0 | } |
877 | 0 | } |
878 | 0 | } |
879 | 0 | return false; |
880 | 0 | } |
881 | | |
882 | 0 | case OVS_ACTION_ATTR_UNSPEC: |
883 | 0 | case __OVS_ACTION_ATTR_MAX: |
884 | 0 | OVS_NOT_REACHED(); |
885 | 0 | } |
886 | | |
887 | 0 | return false; |
888 | 0 | } |
889 | | |
890 | | static void |
891 | | action_pop_vlan(struct dp_packet_batch *batch, |
892 | | const struct nlattr *a OVS_UNUSED) |
893 | 0 | { |
894 | 0 | struct dp_packet *packet; |
895 | |
|
896 | 0 | DP_PACKET_BATCH_FOR_EACH (i, packet, batch) { |
897 | 0 | eth_pop_vlan(packet); |
898 | 0 | } |
899 | 0 | } |
900 | | |
901 | | static void |
902 | | action_push_vlan(struct dp_packet_batch *batch, const struct nlattr *a) |
903 | 0 | { |
904 | 0 | struct dp_packet *packet; |
905 | 0 | const struct ovs_action_push_vlan *vlan = nl_attr_get(a); |
906 | |
|
907 | 0 | DP_PACKET_BATCH_FOR_EACH (i, packet, batch) { |
908 | 0 | eth_push_vlan(packet, vlan->vlan_tpid, vlan->vlan_tci); |
909 | 0 | } |
910 | 0 | } |
911 | | |
912 | | static void |
913 | | action_set_masked(struct dp_packet_batch *batch, const struct nlattr *a) |
914 | 0 | { |
915 | 0 | const struct nlattr *key = nl_attr_get(a); |
916 | 0 | struct dp_packet *packet; |
917 | |
|
918 | 0 | DP_PACKET_BATCH_FOR_EACH (i, packet, batch) { |
919 | 0 | odp_execute_masked_set_action(packet, key); |
920 | 0 | } |
921 | 0 | } |
922 | | |
923 | | /* Implementation of the scalar actions impl init function. Build up the |
924 | | * array of func ptrs here. */ |
925 | | int |
926 | | odp_action_scalar_init(struct odp_execute_action_impl *self) |
927 | 0 | { |
928 | | /* Set function pointers for actions that can be applied directly, these |
929 | | * are identified by OVS_ACTION_ATTR_*. */ |
930 | 0 | self->funcs[OVS_ACTION_ATTR_POP_VLAN] = action_pop_vlan; |
931 | 0 | self->funcs[OVS_ACTION_ATTR_PUSH_VLAN] = action_push_vlan; |
932 | 0 | self->funcs[OVS_ACTION_ATTR_SET_MASKED] = action_set_masked; |
933 | |
|
934 | 0 | return 0; |
935 | 0 | } |
936 | | |
937 | | /* The active function pointers on the datapath. ISA optimized implementations |
938 | | * are enabled by plugging them into this static arary, which is consulted when |
939 | | * applying actions on the datapath. */ |
940 | | static ATOMIC(struct odp_execute_action_impl *) actions_active_impl; |
941 | | |
942 | | static int |
943 | | odp_actions_impl_set(const char *name) |
944 | 0 | { |
945 | 0 | struct odp_execute_action_impl *active; |
946 | 0 | active = odp_execute_action_set(name); |
947 | 0 | if (!active) { |
948 | 0 | VLOG_ERR("Failed setting action implementation to %s", name); |
949 | 0 | return 1; |
950 | 0 | } |
951 | | |
952 | 0 | atomic_store_relaxed(&actions_active_impl, active); |
953 | 0 | return 0; |
954 | 0 | } |
955 | | |
956 | | static void |
957 | | action_impl_set(struct unixctl_conn *conn, int argc OVS_UNUSED, |
958 | | const char *argv[], void *aux OVS_UNUSED) |
959 | 0 | { |
960 | 0 | struct ds reply = DS_EMPTY_INITIALIZER; |
961 | |
|
962 | 0 | int err = odp_actions_impl_set(argv[1]); |
963 | 0 | if (err) { |
964 | 0 | ds_put_format(&reply, |
965 | 0 | "Error: unknown action implementation, %s, specified!", |
966 | 0 | argv[1]); |
967 | 0 | unixctl_command_reply_error(conn, ds_cstr(&reply)); |
968 | 0 | } else { |
969 | 0 | ds_put_format(&reply, "Action implementation set to %s.", argv[1]); |
970 | 0 | unixctl_command_reply(conn, ds_cstr(&reply)); |
971 | 0 | } |
972 | |
|
973 | 0 | ds_destroy(&reply); |
974 | 0 | } |
975 | | |
976 | | static void |
977 | | action_impl_show(struct unixctl_conn *conn, int argc OVS_UNUSED, |
978 | | const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED) |
979 | 0 | { |
980 | 0 | struct ds reply = DS_EMPTY_INITIALIZER; |
981 | |
|
982 | 0 | odp_execute_action_get_info(&reply); |
983 | 0 | unixctl_command_reply(conn, ds_cstr(&reply)); |
984 | 0 | ds_destroy(&reply); |
985 | 0 | } |
986 | | |
987 | | static void |
988 | | odp_execute_unixctl_init(void) |
989 | 0 | { |
990 | 0 | unixctl_command_register("odp-execute/action-impl-set", "name", |
991 | 0 | 1, 1, action_impl_set, |
992 | 0 | NULL); |
993 | 0 | unixctl_command_register("odp-execute/action-impl-show", "", |
994 | 0 | 0, 0, action_impl_show, |
995 | 0 | NULL); |
996 | 0 | } |
997 | | |
998 | | void |
999 | | odp_execute_init(void) |
1000 | 0 | { |
1001 | 0 | static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER; |
1002 | 0 | if (ovsthread_once_start(&once)) { |
1003 | 0 | odp_execute_action_init(); |
1004 | | #ifdef ACTIONS_AUTOVALIDATOR_DEFAULT |
1005 | | odp_actions_impl_set("autovalidator"); |
1006 | | #else |
1007 | 0 | odp_actions_impl_set("scalar"); |
1008 | 0 | #endif |
1009 | 0 | odp_execute_unixctl_init(); |
1010 | 0 | ovsthread_once_done(&once); |
1011 | 0 | } |
1012 | 0 | } |
1013 | | |
1014 | | /* Executes all of the 'actions_len' bytes of datapath actions in 'actions' on |
1015 | | * the packets in 'batch'. If 'steal' is true, possibly modifies and |
1016 | | * definitely free the packets in 'batch', otherwise leaves 'batch' unchanged. |
1017 | | * |
1018 | | * Some actions (e.g. output actions) can only be executed by a datapath. This |
1019 | | * function implements those actions by passing the action and the packets to |
1020 | | * 'dp_execute_action' (along with 'dp'). If 'dp_execute_action' is passed a |
1021 | | * true 'steal' parameter then it must definitely free the packets passed into |
1022 | | * it. The packet can be modified whether 'steal' is false or true. If a |
1023 | | * packet is removed from the batch, then the fate of the packet is determined |
1024 | | * by the code that does this removal, irrespective of the value of 'steal'. |
1025 | | * Otherwise, if the packet is not removed from the batch and 'steal' is false |
1026 | | * then the packet could either be cloned or not. */ |
1027 | | void |
1028 | | odp_execute_actions(void *dp, struct dp_packet_batch *batch, bool steal, |
1029 | | const struct nlattr *actions, size_t actions_len, |
1030 | | odp_execute_cb dp_execute_action) |
1031 | 0 | { |
1032 | 0 | struct dp_packet *packet; |
1033 | 0 | const struct nlattr *a; |
1034 | 0 | unsigned int left; |
1035 | |
|
1036 | 0 | NL_ATTR_FOR_EACH_UNSAFE (a, left, actions, actions_len) { |
1037 | 0 | int type = nl_attr_type(a); |
1038 | 0 | enum ovs_action_attr attr_type = (enum ovs_action_attr) type; |
1039 | 0 | bool last_action = (left <= NLA_ALIGN(a->nla_len)); |
1040 | |
|
1041 | 0 | if (requires_datapath_assistance(a)) { |
1042 | 0 | if (dp_execute_action) { |
1043 | | /* Allow 'dp_execute_action' to steal the packet data if we do |
1044 | | * not need it any more. */ |
1045 | 0 | bool should_steal = steal && last_action; |
1046 | |
|
1047 | 0 | dp_execute_action(dp, batch, a, should_steal); |
1048 | |
|
1049 | 0 | if (last_action || dp_packet_batch_is_empty(batch)) { |
1050 | | /* We do not need to free the packets. |
1051 | | * Either dp_execute_actions() has stolen them |
1052 | | * or the batch is freed due to errors. In either |
1053 | | * case we do not need to execute further actions. |
1054 | | */ |
1055 | 0 | return; |
1056 | 0 | } |
1057 | 0 | } |
1058 | 0 | continue; |
1059 | 0 | } |
1060 | | |
1061 | | /* If type is set in the active actions implementation, call the |
1062 | | * function-pointer and continue to the next action. */ |
1063 | 0 | if (attr_type <= OVS_ACTION_ATTR_MAX) { |
1064 | | /* Read the action implementation pointer atomically to avoid |
1065 | | * non-atomic read causing corruption if being written by another |
1066 | | * thread simultaneously. */ |
1067 | 0 | struct odp_execute_action_impl *actions_impl; |
1068 | 0 | atomic_read_relaxed(&actions_active_impl, &actions_impl); |
1069 | |
|
1070 | 0 | if (actions_impl && actions_impl->funcs[attr_type]) { |
1071 | 0 | actions_impl->funcs[attr_type](batch, a); |
1072 | 0 | continue; |
1073 | 0 | } |
1074 | 0 | } |
1075 | | |
1076 | | /* If the action was not handled by the active function pointers above, |
1077 | | * process them by switching on the type below. */ |
1078 | | |
1079 | 0 | switch (attr_type) { |
1080 | 0 | case OVS_ACTION_ATTR_HASH: { |
1081 | 0 | const struct ovs_action_hash *hash_act = nl_attr_get(a); |
1082 | | |
1083 | | /* Calculate a hash value directly. This might not match the |
1084 | | * value computed by the datapath, but it is much less expensive, |
1085 | | * and the current use case (bonding) does not require a strict |
1086 | | * match to work properly. */ |
1087 | 0 | switch (hash_act->hash_alg) { |
1088 | 0 | case OVS_HASH_ALG_L4: { |
1089 | 0 | struct flow flow; |
1090 | 0 | uint32_t hash; |
1091 | |
|
1092 | 0 | DP_PACKET_BATCH_FOR_EACH (i, packet, batch) { |
1093 | | /* RSS hash can be used here instead of 5tuple for |
1094 | | * performance reasons. */ |
1095 | 0 | if (dp_packet_rss_valid(packet)) { |
1096 | 0 | hash = dp_packet_get_rss_hash(packet); |
1097 | 0 | hash = hash_int(hash, hash_act->hash_basis); |
1098 | 0 | } else { |
1099 | 0 | flow_extract(packet, &flow); |
1100 | 0 | hash = flow_hash_5tuple(&flow, hash_act->hash_basis); |
1101 | 0 | } |
1102 | 0 | packet->md.dp_hash = hash; |
1103 | 0 | } |
1104 | 0 | break; |
1105 | 0 | } |
1106 | 0 | case OVS_HASH_ALG_SYM_L4: { |
1107 | 0 | struct flow flow; |
1108 | 0 | uint32_t hash; |
1109 | |
|
1110 | 0 | DP_PACKET_BATCH_FOR_EACH (i, packet, batch) { |
1111 | 0 | flow_extract(packet, &flow); |
1112 | 0 | hash = flow_hash_symmetric_l3l4(&flow, |
1113 | 0 | hash_act->hash_basis, |
1114 | 0 | false); |
1115 | 0 | packet->md.dp_hash = hash; |
1116 | 0 | } |
1117 | 0 | break; |
1118 | 0 | } |
1119 | 0 | default: |
1120 | | /* Assert on unknown hash algorithm. */ |
1121 | 0 | OVS_NOT_REACHED(); |
1122 | 0 | } |
1123 | 0 | break; |
1124 | 0 | } |
1125 | | |
1126 | 0 | case OVS_ACTION_ATTR_PUSH_MPLS: { |
1127 | 0 | const struct ovs_action_push_mpls *mpls = nl_attr_get(a); |
1128 | |
|
1129 | 0 | DP_PACKET_BATCH_FOR_EACH (i, packet, batch) { |
1130 | 0 | push_mpls(packet, mpls->mpls_ethertype, mpls->mpls_lse); |
1131 | 0 | } |
1132 | 0 | break; |
1133 | 0 | } |
1134 | | |
1135 | 0 | case OVS_ACTION_ATTR_POP_MPLS: |
1136 | 0 | DP_PACKET_BATCH_FOR_EACH (i, packet, batch) { |
1137 | 0 | pop_mpls(packet, nl_attr_get_be16(a)); |
1138 | 0 | } |
1139 | 0 | break; |
1140 | | |
1141 | 0 | case OVS_ACTION_ATTR_SET: |
1142 | 0 | DP_PACKET_BATCH_FOR_EACH (i, packet, batch) { |
1143 | 0 | odp_execute_set_action(packet, nl_attr_get(a)); |
1144 | 0 | } |
1145 | 0 | break; |
1146 | | |
1147 | 0 | case OVS_ACTION_ATTR_SAMPLE: |
1148 | 0 | DP_PACKET_BATCH_FOR_EACH (i, packet, batch) { |
1149 | 0 | odp_execute_sample(dp, packet, steal && last_action, a, |
1150 | 0 | dp_execute_action); |
1151 | 0 | } |
1152 | |
|
1153 | 0 | if (last_action) { |
1154 | | /* We do not need to free the packets. odp_execute_sample() has |
1155 | | * stolen them*/ |
1156 | 0 | return; |
1157 | 0 | } |
1158 | 0 | break; |
1159 | | |
1160 | 0 | case OVS_ACTION_ATTR_TRUNC: { |
1161 | 0 | const struct ovs_action_trunc *trunc = |
1162 | 0 | nl_attr_get_unspec(a, sizeof *trunc); |
1163 | |
|
1164 | 0 | batch->trunc = true; |
1165 | 0 | DP_PACKET_BATCH_FOR_EACH (i, packet, batch) { |
1166 | 0 | dp_packet_set_cutlen(packet, trunc->max_len); |
1167 | 0 | } |
1168 | 0 | break; |
1169 | 0 | } |
1170 | | |
1171 | 0 | case OVS_ACTION_ATTR_CLONE: |
1172 | 0 | odp_execute_clone(dp, batch, steal && last_action, a, |
1173 | 0 | dp_execute_action); |
1174 | 0 | if (last_action) { |
1175 | | /* We do not need to free the packets. odp_execute_clone() has |
1176 | | * stolen them. */ |
1177 | 0 | return; |
1178 | 0 | } |
1179 | 0 | break; |
1180 | 0 | case OVS_ACTION_ATTR_METER: |
1181 | | /* Not implemented yet. */ |
1182 | 0 | break; |
1183 | 0 | case OVS_ACTION_ATTR_PUSH_ETH: { |
1184 | 0 | const struct ovs_action_push_eth *eth = nl_attr_get(a); |
1185 | |
|
1186 | 0 | DP_PACKET_BATCH_FOR_EACH (i, packet, batch) { |
1187 | 0 | push_eth(packet, ð->addresses.eth_dst, |
1188 | 0 | ð->addresses.eth_src); |
1189 | 0 | } |
1190 | 0 | break; |
1191 | 0 | } |
1192 | | |
1193 | 0 | case OVS_ACTION_ATTR_POP_ETH: |
1194 | 0 | DP_PACKET_BATCH_FOR_EACH (i, packet, batch) { |
1195 | 0 | pop_eth(packet); |
1196 | 0 | } |
1197 | 0 | break; |
1198 | | |
1199 | 0 | case OVS_ACTION_ATTR_PUSH_NSH: { |
1200 | 0 | uint32_t buffer[NSH_HDR_MAX_LEN / 4]; |
1201 | 0 | struct nsh_hdr *nsh_hdr = ALIGNED_CAST(struct nsh_hdr *, buffer); |
1202 | 0 | nsh_reset_ver_flags_ttl_len(nsh_hdr); |
1203 | 0 | odp_nsh_hdr_from_attr(nl_attr_get(a), nsh_hdr, NSH_HDR_MAX_LEN); |
1204 | 0 | DP_PACKET_BATCH_FOR_EACH (i, packet, batch) { |
1205 | 0 | push_nsh(packet, nsh_hdr); |
1206 | 0 | } |
1207 | 0 | break; |
1208 | 0 | } |
1209 | 0 | case OVS_ACTION_ATTR_POP_NSH: { |
1210 | 0 | size_t i; |
1211 | 0 | const size_t num = dp_packet_batch_size(batch); |
1212 | |
|
1213 | 0 | DP_PACKET_BATCH_REFILL_FOR_EACH (i, num, packet, batch) { |
1214 | 0 | if (pop_nsh(packet)) { |
1215 | 0 | dp_packet_batch_refill(batch, packet, i); |
1216 | 0 | } else { |
1217 | 0 | COVERAGE_INC(datapath_drop_nsh_decap_error); |
1218 | 0 | dp_packet_delete(packet); |
1219 | 0 | } |
1220 | 0 | } |
1221 | 0 | break; |
1222 | 0 | } |
1223 | 0 | case OVS_ACTION_ATTR_CT_CLEAR: |
1224 | 0 | DP_PACKET_BATCH_FOR_EACH (i, packet, batch) { |
1225 | 0 | conntrack_clear(packet); |
1226 | 0 | } |
1227 | 0 | break; |
1228 | | |
1229 | 0 | case OVS_ACTION_ATTR_CHECK_PKT_LEN: |
1230 | 0 | DP_PACKET_BATCH_FOR_EACH (i, packet, batch) { |
1231 | 0 | odp_execute_check_pkt_len(dp, packet, steal && last_action, a, |
1232 | 0 | dp_execute_action); |
1233 | 0 | } |
1234 | |
|
1235 | 0 | if (last_action) { |
1236 | | /* We do not need to free the packets. |
1237 | | * odp_execute_check_pkt_len() has stolen them. */ |
1238 | 0 | return; |
1239 | 0 | } |
1240 | 0 | break; |
1241 | | |
1242 | 0 | case OVS_ACTION_ATTR_ADD_MPLS: { |
1243 | 0 | const struct ovs_action_add_mpls *mpls = nl_attr_get(a); |
1244 | 0 | bool l3_flag = mpls->tun_flags & OVS_MPLS_L3_TUNNEL_FLAG_MASK; |
1245 | |
|
1246 | 0 | DP_PACKET_BATCH_FOR_EACH (i, packet, batch) { |
1247 | 0 | add_mpls(packet, mpls->mpls_ethertype, mpls->mpls_lse, |
1248 | 0 | l3_flag); |
1249 | 0 | } |
1250 | 0 | break; |
1251 | 0 | } |
1252 | | |
1253 | 0 | case OVS_ACTION_ATTR_DROP:{ |
1254 | 0 | const enum xlate_error *drop_reason = nl_attr_get(a); |
1255 | |
|
1256 | 0 | dp_update_drop_action_counter(*drop_reason, |
1257 | 0 | dp_packet_batch_size(batch)); |
1258 | 0 | dp_packet_delete_batch(batch, steal); |
1259 | 0 | return; |
1260 | 0 | } |
1261 | 0 | case OVS_ACTION_ATTR_OUTPUT: |
1262 | 0 | case OVS_ACTION_ATTR_LB_OUTPUT: |
1263 | 0 | case OVS_ACTION_ATTR_TUNNEL_PUSH: |
1264 | 0 | case OVS_ACTION_ATTR_TUNNEL_POP: |
1265 | 0 | case OVS_ACTION_ATTR_USERSPACE: |
1266 | 0 | case OVS_ACTION_ATTR_RECIRC: |
1267 | 0 | case OVS_ACTION_ATTR_CT: |
1268 | 0 | case OVS_ACTION_ATTR_UNSPEC: |
1269 | 0 | case OVS_ACTION_ATTR_DEC_TTL: |
1270 | 0 | case OVS_ACTION_ATTR_PSAMPLE: |
1271 | 0 | case __OVS_ACTION_ATTR_MAX: |
1272 | | /* The following actions are handled by the scalar implementation. */ |
1273 | 0 | case OVS_ACTION_ATTR_POP_VLAN: |
1274 | 0 | case OVS_ACTION_ATTR_PUSH_VLAN: |
1275 | 0 | case OVS_ACTION_ATTR_SET_MASKED: |
1276 | 0 | OVS_NOT_REACHED(); |
1277 | 0 | } |
1278 | | |
1279 | | /* Do not add any generic processing here, as it won't be executed when |
1280 | | * an ISA-specific action implementation exists. */ |
1281 | 0 | } |
1282 | | |
1283 | 0 | dp_packet_delete_batch(batch, steal); |
1284 | 0 | } |