/src/openvswitch/lib/netdev-dummy.c
Line | Count | Source |
1 | | /* |
2 | | * Copyright (c) 2010, 2011, 2012, 2013, 2015, 2016, 2017 Nicira, Inc. |
3 | | * |
4 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | | * you may not use this file except in compliance with the License. |
6 | | * You may obtain a copy of the License at: |
7 | | * |
8 | | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | | * |
10 | | * Unless required by applicable law or agreed to in writing, software |
11 | | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | | * See the License for the specific language governing permissions and |
14 | | * limitations under the License. |
15 | | */ |
16 | | |
17 | | #include <config.h> |
18 | | |
19 | | #include "dummy.h" |
20 | | |
21 | | #include <errno.h> |
22 | | #include <unistd.h> |
23 | | |
24 | | #include "dp-packet.h" |
25 | | #include "dpif-netdev.h" |
26 | | #include "flow.h" |
27 | | #include "netdev-provider.h" |
28 | | #include "netdev-vport.h" |
29 | | #include "odp-util.h" |
30 | | #include "openvswitch/dynamic-string.h" |
31 | | #include "openvswitch/list.h" |
32 | | #include "openvswitch/match.h" |
33 | | #include "openvswitch/ofp-print.h" |
34 | | #include "openvswitch/ofpbuf.h" |
35 | | #include "openvswitch/vlog.h" |
36 | | #include "ovs-atomic.h" |
37 | | #include "packets.h" |
38 | | #include "pcap-file.h" |
39 | | #include "openvswitch/poll-loop.h" |
40 | | #include "openvswitch/shash.h" |
41 | | #include "ovs-router.h" |
42 | | #include "sset.h" |
43 | | #include "stream.h" |
44 | | #include "unaligned.h" |
45 | | #include "timeval.h" |
46 | | #include "unixctl.h" |
47 | | #include "userspace-tso.h" |
48 | | #include "reconnect.h" |
49 | | |
50 | | VLOG_DEFINE_THIS_MODULE(netdev_dummy); |
51 | | |
52 | 0 | #define C_STATS_SIZE 2 |
53 | | |
54 | | struct reconnect; |
55 | | |
56 | | struct dummy_packet_stream { |
57 | | struct stream *stream; |
58 | | struct ovs_list txq; |
59 | | struct dp_packet rxbuf; |
60 | | }; |
61 | | |
62 | | enum dummy_packet_conn_type { |
63 | | NONE, /* No connection is configured. */ |
64 | | PASSIVE, /* Listener. */ |
65 | | ACTIVE /* Connect to listener. */ |
66 | | }; |
67 | | |
68 | | enum dummy_netdev_conn_state { |
69 | | CONN_STATE_CONNECTED, /* Listener connected. */ |
70 | | CONN_STATE_NOT_CONNECTED, /* Listener not connected. */ |
71 | | CONN_STATE_UNKNOWN, /* No relavent information. */ |
72 | | }; |
73 | | |
74 | | struct dummy_packet_pconn { |
75 | | struct pstream *pstream; |
76 | | struct dummy_packet_stream **streams; |
77 | | size_t n_streams; |
78 | | }; |
79 | | |
80 | | struct dummy_packet_rconn { |
81 | | struct dummy_packet_stream *rstream; |
82 | | struct reconnect *reconnect; |
83 | | }; |
84 | | |
85 | | struct dummy_packet_conn { |
86 | | enum dummy_packet_conn_type type; |
87 | | union { |
88 | | struct dummy_packet_pconn pconn; |
89 | | struct dummy_packet_rconn rconn; |
90 | | }; |
91 | | }; |
92 | | |
93 | | struct pkt_list_node { |
94 | | struct dp_packet *pkt; |
95 | | struct ovs_list list_node; |
96 | | }; |
97 | | |
98 | | struct offloaded_flow { |
99 | | struct hmap_node node; |
100 | | ovs_u128 ufid; |
101 | | struct match match; |
102 | | uint32_t mark; |
103 | | }; |
104 | | |
105 | | struct netdev_dummy_q_stats { |
106 | | uint64_t bytes; |
107 | | uint64_t packets; |
108 | | }; |
109 | | |
110 | | /* Protects 'dummy_list'. */ |
111 | | static struct ovs_mutex dummy_list_mutex = OVS_MUTEX_INITIALIZER; |
112 | | |
113 | | /* Contains all 'struct dummy_dev's. */ |
114 | | static struct ovs_list dummy_list OVS_GUARDED_BY(dummy_list_mutex) |
115 | | = OVS_LIST_INITIALIZER(&dummy_list); |
116 | | |
117 | | struct netdev_dummy { |
118 | | struct netdev up; |
119 | | |
120 | | /* In dummy_list. */ |
121 | | struct ovs_list list_node OVS_GUARDED_BY(dummy_list_mutex); |
122 | | |
123 | | /* Protects all members below. */ |
124 | | struct ovs_mutex mutex OVS_ACQ_AFTER(dummy_list_mutex); |
125 | | |
126 | | struct eth_addr hwaddr OVS_GUARDED; |
127 | | int mtu OVS_GUARDED; |
128 | | struct netdev_stats stats OVS_GUARDED; |
129 | | struct netdev_custom_counter custom_stats[C_STATS_SIZE] OVS_GUARDED; |
130 | | struct netdev_dummy_q_stats *rxq_stats OVS_GUARDED; |
131 | | struct netdev_dummy_q_stats *txq_stats OVS_GUARDED; |
132 | | enum netdev_flags flags OVS_GUARDED; |
133 | | int ifindex OVS_GUARDED; |
134 | | int numa_id OVS_GUARDED; |
135 | | |
136 | | struct dummy_packet_conn conn OVS_GUARDED; |
137 | | |
138 | | struct pcap_file *tx_pcap, *rxq_pcap OVS_GUARDED; |
139 | | |
140 | | struct ovs_list addrs OVS_GUARDED; |
141 | | struct ovs_list rxes OVS_GUARDED; /* List of child "netdev_rxq_dummy"s. */ |
142 | | |
143 | | /* The following properties are for dummy-pmd and they cannot be changed |
144 | | * when a device is running, so we remember the request and update them |
145 | | * next time netdev_dummy_reconfigure() is called. */ |
146 | | int requested_n_txq OVS_GUARDED; |
147 | | int requested_n_rxq OVS_GUARDED; |
148 | | int requested_numa_id OVS_GUARDED; |
149 | | |
150 | | /* Force IP Rx csum good. */ |
151 | | bool ol_ip_rx_csum_set_good OVS_GUARDED; |
152 | | /* Force IP Rx csum bad. */ |
153 | | bool ol_ip_rx_csum_set_bad OVS_GUARDED; |
154 | | /* Force IP Rx csum partial. */ |
155 | | bool ol_ip_rx_csum_set_partial OVS_GUARDED; |
156 | | /* Announce netdev IP Tx csum offload. */ |
157 | | bool ol_ip_tx_csum OVS_GUARDED; |
158 | | /* Disable IP Tx csum offload. */ |
159 | | bool ol_ip_tx_csum_disabled OVS_GUARDED; |
160 | | |
161 | | /* Force L4 Rx csum good. */ |
162 | | bool ol_l4_rx_csum_set_good OVS_GUARDED; |
163 | | /* Force L4 Rx csum bad. */ |
164 | | bool ol_l4_rx_csum_set_bad OVS_GUARDED; |
165 | | /* Force L4 Rx csum partial. */ |
166 | | bool ol_l4_rx_csum_set_partial OVS_GUARDED; |
167 | | /* Announce netdev L4 Tx csum offload. */ |
168 | | bool ol_l4_tx_csum OVS_GUARDED; |
169 | | /* Disable L4 Tx csum offload. */ |
170 | | bool ol_l4_tx_csum_disabled OVS_GUARDED; |
171 | | |
172 | | /* Announce netdev outer IP Tx csum offload. */ |
173 | | bool ol_out_ip_tx_csum OVS_GUARDED; |
174 | | /* Disable outer IP Tx csum offload. */ |
175 | | bool ol_out_ip_tx_csum_disabled OVS_GUARDED; |
176 | | |
177 | | /* Announce netdev outer UDP Tx csum offload. */ |
178 | | bool ol_out_udp_tx_csum OVS_GUARDED; |
179 | | /* Disable outer UDP Tx csum offload. */ |
180 | | bool ol_out_udp_tx_csum_disabled OVS_GUARDED; |
181 | | |
182 | | /* Set the segment size for netdev TSO support. */ |
183 | | int ol_tso_segsz OVS_GUARDED; |
184 | | }; |
185 | | |
186 | | /* Max 'recv_queue_len' in struct netdev_dummy. */ |
187 | 0 | #define NETDEV_DUMMY_MAX_QUEUE 100 |
188 | | |
189 | | struct netdev_rxq_dummy { |
190 | | struct netdev_rxq up; |
191 | | struct ovs_list node; /* In netdev_dummy's "rxes" list. */ |
192 | | struct ovs_list recv_queue; |
193 | | int recv_queue_len; /* ovs_list_size(&recv_queue). */ |
194 | | struct seq *seq; /* Reports newly queued packets. */ |
195 | | }; |
196 | | |
197 | | struct netdev_addr_dummy { |
198 | | struct in6_addr address; |
199 | | struct in6_addr netmask; |
200 | | struct ovs_list node; /* In netdev_dummy's "addrs" list. */ |
201 | | }; |
202 | | |
203 | | static unixctl_cb_func netdev_dummy_set_admin_state; |
204 | | static int netdev_dummy_construct(struct netdev *); |
205 | | static void netdev_dummy_queue_packet(struct netdev_dummy *, |
206 | | struct dp_packet *, struct flow *, int); |
207 | | |
208 | | static void dummy_packet_stream_close(struct dummy_packet_stream *); |
209 | | |
210 | | static void pkt_list_delete(struct ovs_list *); |
211 | | static void addr_list_delete(struct ovs_list *); |
212 | | |
213 | | bool |
214 | | is_dummy_netdev_class(const struct netdev_class *class) |
215 | 0 | { |
216 | 0 | return class->construct == netdev_dummy_construct; |
217 | 0 | } |
218 | | |
219 | | static struct netdev_dummy * |
220 | | netdev_dummy_cast(const struct netdev *netdev) |
221 | 0 | { |
222 | 0 | ovs_assert(is_dummy_netdev_class(netdev_get_class(netdev))); |
223 | 0 | return CONTAINER_OF(netdev, struct netdev_dummy, up); |
224 | 0 | } |
225 | | |
226 | | static struct netdev_rxq_dummy * |
227 | | netdev_rxq_dummy_cast(const struct netdev_rxq *rx) |
228 | 0 | { |
229 | 0 | ovs_assert(is_dummy_netdev_class(netdev_get_class(rx->netdev))); |
230 | 0 | return CONTAINER_OF(rx, struct netdev_rxq_dummy, up); |
231 | 0 | } |
232 | | |
233 | | static void |
234 | | dummy_packet_stream_init(struct dummy_packet_stream *s, struct stream *stream) |
235 | 0 | { |
236 | 0 | int rxbuf_size = stream ? 2048 : 0; |
237 | 0 | s->stream = stream; |
238 | 0 | dp_packet_init(&s->rxbuf, rxbuf_size); |
239 | 0 | ovs_list_init(&s->txq); |
240 | 0 | } |
241 | | |
242 | | static struct dummy_packet_stream * |
243 | | dummy_packet_stream_create(struct stream *stream) |
244 | 0 | { |
245 | 0 | struct dummy_packet_stream *s; |
246 | |
|
247 | 0 | s = xzalloc_cacheline(sizeof *s); |
248 | 0 | dummy_packet_stream_init(s, stream); |
249 | |
|
250 | 0 | return s; |
251 | 0 | } |
252 | | |
253 | | static void |
254 | | dummy_packet_stream_wait(struct dummy_packet_stream *s) |
255 | 0 | { |
256 | 0 | stream_run_wait(s->stream); |
257 | 0 | if (!ovs_list_is_empty(&s->txq)) { |
258 | 0 | stream_send_wait(s->stream); |
259 | 0 | } |
260 | 0 | stream_recv_wait(s->stream); |
261 | 0 | } |
262 | | |
263 | | static void |
264 | | dummy_packet_stream_send(struct dummy_packet_stream *s, const void *buffer, size_t size) |
265 | 0 | { |
266 | 0 | if (ovs_list_size(&s->txq) < NETDEV_DUMMY_MAX_QUEUE) { |
267 | 0 | struct dp_packet *b; |
268 | 0 | struct pkt_list_node *node; |
269 | |
|
270 | 0 | b = dp_packet_clone_data_with_headroom(buffer, size, 2); |
271 | 0 | put_unaligned_be16(dp_packet_push_uninit(b, 2), htons(size)); |
272 | |
|
273 | 0 | node = xmalloc(sizeof *node); |
274 | 0 | node->pkt = b; |
275 | 0 | ovs_list_push_back(&s->txq, &node->list_node); |
276 | 0 | } |
277 | 0 | } |
278 | | |
279 | | static int |
280 | | dummy_packet_stream_run(struct netdev_dummy *dev, struct dummy_packet_stream *s) |
281 | 0 | { |
282 | 0 | int error = 0; |
283 | 0 | size_t n = 0; |
284 | |
|
285 | 0 | stream_run(s->stream); |
286 | |
|
287 | 0 | if (!ovs_list_is_empty(&s->txq)) { |
288 | 0 | struct pkt_list_node *txbuf_node; |
289 | 0 | struct dp_packet *txbuf; |
290 | 0 | int retval; |
291 | |
|
292 | 0 | ASSIGN_CONTAINER(txbuf_node, ovs_list_front(&s->txq), list_node); |
293 | 0 | txbuf = txbuf_node->pkt; |
294 | 0 | retval = stream_send(s->stream, dp_packet_data(txbuf), dp_packet_size(txbuf)); |
295 | |
|
296 | 0 | if (retval > 0) { |
297 | 0 | dp_packet_pull(txbuf, retval); |
298 | 0 | if (!dp_packet_size(txbuf)) { |
299 | 0 | ovs_list_remove(&txbuf_node->list_node); |
300 | 0 | free(txbuf_node); |
301 | 0 | dp_packet_delete(txbuf); |
302 | 0 | } |
303 | 0 | } else if (retval != -EAGAIN) { |
304 | 0 | error = -retval; |
305 | 0 | } |
306 | 0 | } |
307 | |
|
308 | 0 | if (!error) { |
309 | 0 | if (dp_packet_size(&s->rxbuf) < 2) { |
310 | 0 | n = 2 - dp_packet_size(&s->rxbuf); |
311 | 0 | } else { |
312 | 0 | uint16_t frame_len; |
313 | |
|
314 | 0 | frame_len = ntohs(get_unaligned_be16(dp_packet_data(&s->rxbuf))); |
315 | 0 | if (frame_len < ETH_HEADER_LEN) { |
316 | 0 | error = EPROTO; |
317 | 0 | n = 0; |
318 | 0 | } else { |
319 | 0 | n = (2 + frame_len) - dp_packet_size(&s->rxbuf); |
320 | 0 | } |
321 | 0 | } |
322 | 0 | } |
323 | 0 | if (!error) { |
324 | 0 | int retval; |
325 | |
|
326 | 0 | dp_packet_prealloc_tailroom(&s->rxbuf, n); |
327 | 0 | retval = stream_recv(s->stream, dp_packet_tail(&s->rxbuf), n); |
328 | |
|
329 | 0 | if (retval > 0) { |
330 | 0 | dp_packet_set_size(&s->rxbuf, dp_packet_size(&s->rxbuf) + retval); |
331 | 0 | if (retval == n && dp_packet_size(&s->rxbuf) > 2) { |
332 | 0 | dp_packet_pull(&s->rxbuf, 2); |
333 | 0 | netdev_dummy_queue_packet(dev, |
334 | 0 | dp_packet_clone(&s->rxbuf), NULL, 0); |
335 | 0 | dp_packet_clear(&s->rxbuf); |
336 | 0 | } |
337 | 0 | } else if (retval != -EAGAIN) { |
338 | 0 | error = (retval < 0 ? -retval |
339 | 0 | : dp_packet_size(&s->rxbuf) ? EPROTO |
340 | 0 | : EOF); |
341 | 0 | } |
342 | 0 | } |
343 | |
|
344 | 0 | return error; |
345 | 0 | } |
346 | | |
347 | | static void |
348 | | dummy_packet_stream_close(struct dummy_packet_stream *s) |
349 | 0 | { |
350 | 0 | stream_close(s->stream); |
351 | 0 | dp_packet_uninit(&s->rxbuf); |
352 | 0 | pkt_list_delete(&s->txq); |
353 | 0 | } |
354 | | |
355 | | static void |
356 | | dummy_packet_conn_init(struct dummy_packet_conn *conn) |
357 | 0 | { |
358 | 0 | memset(conn, 0, sizeof *conn); |
359 | 0 | conn->type = NONE; |
360 | 0 | } |
361 | | |
362 | | static void |
363 | | dummy_packet_conn_get_config(struct dummy_packet_conn *conn, struct smap *args) |
364 | 0 | { |
365 | |
|
366 | 0 | switch (conn->type) { |
367 | 0 | case PASSIVE: |
368 | 0 | smap_add(args, "pstream", pstream_get_name(conn->pconn.pstream)); |
369 | 0 | break; |
370 | | |
371 | 0 | case ACTIVE: |
372 | 0 | smap_add(args, "stream", stream_get_name(conn->rconn.rstream->stream)); |
373 | 0 | break; |
374 | | |
375 | 0 | case NONE: |
376 | 0 | default: |
377 | 0 | break; |
378 | 0 | } |
379 | 0 | } |
380 | | |
381 | | static void |
382 | | dummy_packet_conn_close(struct dummy_packet_conn *conn) |
383 | 0 | { |
384 | 0 | int i; |
385 | 0 | struct dummy_packet_pconn *pconn = &conn->pconn; |
386 | 0 | struct dummy_packet_rconn *rconn = &conn->rconn; |
387 | |
|
388 | 0 | switch (conn->type) { |
389 | 0 | case PASSIVE: |
390 | 0 | pstream_close(pconn->pstream); |
391 | 0 | for (i = 0; i < pconn->n_streams; i++) { |
392 | 0 | dummy_packet_stream_close(pconn->streams[i]); |
393 | 0 | free_cacheline(pconn->streams[i]); |
394 | 0 | } |
395 | 0 | free(pconn->streams); |
396 | 0 | pconn->pstream = NULL; |
397 | 0 | pconn->streams = NULL; |
398 | 0 | break; |
399 | | |
400 | 0 | case ACTIVE: |
401 | 0 | dummy_packet_stream_close(rconn->rstream); |
402 | 0 | free_cacheline(rconn->rstream); |
403 | 0 | rconn->rstream = NULL; |
404 | 0 | reconnect_destroy(rconn->reconnect); |
405 | 0 | rconn->reconnect = NULL; |
406 | 0 | break; |
407 | | |
408 | 0 | case NONE: |
409 | 0 | default: |
410 | 0 | break; |
411 | 0 | } |
412 | | |
413 | 0 | conn->type = NONE; |
414 | 0 | memset(conn, 0, sizeof *conn); |
415 | 0 | } |
416 | | |
417 | | static void |
418 | | dummy_packet_conn_set_config(struct dummy_packet_conn *conn, |
419 | | const struct smap *args) |
420 | 0 | { |
421 | 0 | const char *pstream = smap_get(args, "pstream"); |
422 | 0 | const char *stream = smap_get(args, "stream"); |
423 | |
|
424 | 0 | if (pstream && stream) { |
425 | 0 | VLOG_WARN("Open failed: both %s and %s are configured", |
426 | 0 | pstream, stream); |
427 | 0 | return; |
428 | 0 | } |
429 | | |
430 | 0 | switch (conn->type) { |
431 | 0 | case PASSIVE: |
432 | 0 | if (pstream && |
433 | 0 | !strcmp(pstream_get_name(conn->pconn.pstream), pstream)) { |
434 | 0 | return; |
435 | 0 | } |
436 | 0 | dummy_packet_conn_close(conn); |
437 | 0 | break; |
438 | 0 | case ACTIVE: |
439 | 0 | if (stream && |
440 | 0 | !strcmp(stream_get_name(conn->rconn.rstream->stream), stream)) { |
441 | 0 | return; |
442 | 0 | } |
443 | 0 | dummy_packet_conn_close(conn); |
444 | 0 | break; |
445 | 0 | case NONE: |
446 | 0 | default: |
447 | 0 | break; |
448 | 0 | } |
449 | | |
450 | 0 | if (pstream) { |
451 | 0 | int error; |
452 | |
|
453 | 0 | error = pstream_open(pstream, &conn->pconn.pstream, DSCP_DEFAULT); |
454 | 0 | if (error) { |
455 | 0 | VLOG_WARN("%s: open failed (%s)", pstream, ovs_strerror(error)); |
456 | 0 | } else { |
457 | 0 | conn->type = PASSIVE; |
458 | 0 | } |
459 | 0 | } |
460 | |
|
461 | 0 | if (stream) { |
462 | 0 | int error; |
463 | 0 | struct stream *active_stream; |
464 | 0 | struct reconnect *reconnect; |
465 | |
|
466 | 0 | reconnect = reconnect_create(time_msec()); |
467 | 0 | reconnect_set_name(reconnect, stream); |
468 | 0 | reconnect_set_passive(reconnect, false, time_msec()); |
469 | 0 | reconnect_enable(reconnect, time_msec()); |
470 | 0 | reconnect_set_backoff(reconnect, 100, INT_MAX); |
471 | 0 | reconnect_set_probe_interval(reconnect, 0); |
472 | 0 | conn->rconn.reconnect = reconnect; |
473 | 0 | conn->type = ACTIVE; |
474 | |
|
475 | 0 | error = stream_open(stream, &active_stream, DSCP_DEFAULT); |
476 | 0 | conn->rconn.rstream = dummy_packet_stream_create(active_stream); |
477 | |
|
478 | 0 | switch (error) { |
479 | 0 | case 0: |
480 | 0 | reconnect_connected(reconnect, time_msec()); |
481 | 0 | break; |
482 | | |
483 | 0 | case EAGAIN: |
484 | 0 | reconnect_connecting(reconnect, time_msec()); |
485 | 0 | break; |
486 | | |
487 | 0 | default: |
488 | 0 | reconnect_connect_failed(reconnect, time_msec(), error); |
489 | 0 | stream_close(active_stream); |
490 | 0 | conn->rconn.rstream->stream = NULL; |
491 | 0 | break; |
492 | 0 | } |
493 | 0 | } |
494 | 0 | } |
495 | | |
496 | | static void |
497 | | dummy_pconn_run(struct netdev_dummy *dev) |
498 | | OVS_REQUIRES(dev->mutex) |
499 | 0 | { |
500 | 0 | struct stream *new_stream; |
501 | 0 | struct dummy_packet_pconn *pconn = &dev->conn.pconn; |
502 | 0 | int error; |
503 | 0 | size_t i; |
504 | |
|
505 | 0 | error = pstream_accept(pconn->pstream, &new_stream); |
506 | 0 | if (!error) { |
507 | 0 | struct dummy_packet_stream *s; |
508 | |
|
509 | 0 | pconn->streams = xrealloc(pconn->streams, |
510 | 0 | ((pconn->n_streams + 1) |
511 | 0 | * sizeof s)); |
512 | 0 | s = xmalloc_cacheline(sizeof *s); |
513 | 0 | pconn->streams[pconn->n_streams++] = s; |
514 | 0 | dummy_packet_stream_init(s, new_stream); |
515 | 0 | } else if (error != EAGAIN) { |
516 | 0 | VLOG_WARN("%s: accept failed (%s)", |
517 | 0 | pstream_get_name(pconn->pstream), ovs_strerror(error)); |
518 | 0 | pstream_close(pconn->pstream); |
519 | 0 | pconn->pstream = NULL; |
520 | 0 | dev->conn.type = NONE; |
521 | 0 | } |
522 | |
|
523 | 0 | for (i = 0; i < pconn->n_streams; ) { |
524 | 0 | struct dummy_packet_stream *s = pconn->streams[i]; |
525 | |
|
526 | 0 | error = dummy_packet_stream_run(dev, s); |
527 | 0 | if (error) { |
528 | 0 | VLOG_DBG("%s: closing connection (%s)", |
529 | 0 | stream_get_name(s->stream), |
530 | 0 | ovs_retval_to_string(error)); |
531 | 0 | dummy_packet_stream_close(s); |
532 | 0 | free_cacheline(s); |
533 | 0 | pconn->streams[i] = pconn->streams[--pconn->n_streams]; |
534 | 0 | } else { |
535 | 0 | i++; |
536 | 0 | } |
537 | 0 | } |
538 | 0 | } |
539 | | |
540 | | static void |
541 | | dummy_rconn_run(struct netdev_dummy *dev) |
542 | | OVS_REQUIRES(dev->mutex) |
543 | 0 | { |
544 | 0 | struct dummy_packet_rconn *rconn = &dev->conn.rconn; |
545 | |
|
546 | 0 | switch (reconnect_run(rconn->reconnect, time_msec())) { |
547 | 0 | case RECONNECT_CONNECT: |
548 | 0 | { |
549 | 0 | int error; |
550 | |
|
551 | 0 | if (rconn->rstream->stream) { |
552 | 0 | error = stream_connect(rconn->rstream->stream); |
553 | 0 | } else { |
554 | 0 | error = stream_open(reconnect_get_name(rconn->reconnect), |
555 | 0 | &rconn->rstream->stream, DSCP_DEFAULT); |
556 | 0 | } |
557 | |
|
558 | 0 | switch (error) { |
559 | 0 | case 0: |
560 | 0 | reconnect_connected(rconn->reconnect, time_msec()); |
561 | 0 | break; |
562 | | |
563 | 0 | case EAGAIN: |
564 | 0 | reconnect_connecting(rconn->reconnect, time_msec()); |
565 | 0 | break; |
566 | | |
567 | 0 | default: |
568 | 0 | reconnect_connect_failed(rconn->reconnect, time_msec(), error); |
569 | 0 | stream_close(rconn->rstream->stream); |
570 | 0 | rconn->rstream->stream = NULL; |
571 | 0 | break; |
572 | 0 | } |
573 | 0 | } |
574 | 0 | break; |
575 | | |
576 | 0 | case RECONNECT_DISCONNECT: |
577 | 0 | case RECONNECT_PROBE: |
578 | 0 | default: |
579 | 0 | break; |
580 | 0 | } |
581 | | |
582 | 0 | if (reconnect_is_connected(rconn->reconnect)) { |
583 | 0 | int err; |
584 | |
|
585 | 0 | err = dummy_packet_stream_run(dev, rconn->rstream); |
586 | |
|
587 | 0 | if (err) { |
588 | 0 | reconnect_disconnected(rconn->reconnect, time_msec(), err); |
589 | 0 | stream_close(rconn->rstream->stream); |
590 | 0 | rconn->rstream->stream = NULL; |
591 | 0 | } |
592 | 0 | } |
593 | 0 | } |
594 | | |
595 | | static void |
596 | | dummy_packet_conn_run(struct netdev_dummy *dev) |
597 | | OVS_REQUIRES(dev->mutex) |
598 | 0 | { |
599 | 0 | switch (dev->conn.type) { |
600 | 0 | case PASSIVE: |
601 | 0 | dummy_pconn_run(dev); |
602 | 0 | break; |
603 | | |
604 | 0 | case ACTIVE: |
605 | 0 | dummy_rconn_run(dev); |
606 | 0 | break; |
607 | | |
608 | 0 | case NONE: |
609 | 0 | default: |
610 | 0 | break; |
611 | 0 | } |
612 | 0 | } |
613 | | |
614 | | static void |
615 | | dummy_packet_conn_wait(struct dummy_packet_conn *conn) |
616 | 0 | { |
617 | 0 | int i; |
618 | 0 | switch (conn->type) { |
619 | 0 | case PASSIVE: |
620 | 0 | pstream_wait(conn->pconn.pstream); |
621 | 0 | for (i = 0; i < conn->pconn.n_streams; i++) { |
622 | 0 | struct dummy_packet_stream *s = conn->pconn.streams[i]; |
623 | 0 | dummy_packet_stream_wait(s); |
624 | 0 | } |
625 | 0 | break; |
626 | 0 | case ACTIVE: |
627 | 0 | if (reconnect_is_connected(conn->rconn.reconnect)) { |
628 | 0 | dummy_packet_stream_wait(conn->rconn.rstream); |
629 | 0 | } |
630 | 0 | break; |
631 | | |
632 | 0 | case NONE: |
633 | 0 | default: |
634 | 0 | break; |
635 | 0 | } |
636 | 0 | } |
637 | | |
638 | | static void |
639 | | dummy_packet_conn_send(struct dummy_packet_conn *conn, |
640 | | const void *buffer, size_t size) |
641 | 0 | { |
642 | 0 | int i; |
643 | |
|
644 | 0 | switch (conn->type) { |
645 | 0 | case PASSIVE: |
646 | 0 | for (i = 0; i < conn->pconn.n_streams; i++) { |
647 | 0 | struct dummy_packet_stream *s = conn->pconn.streams[i]; |
648 | |
|
649 | 0 | dummy_packet_stream_send(s, buffer, size); |
650 | 0 | pstream_wait(conn->pconn.pstream); |
651 | 0 | } |
652 | 0 | break; |
653 | | |
654 | 0 | case ACTIVE: |
655 | 0 | if (reconnect_is_connected(conn->rconn.reconnect)) { |
656 | 0 | dummy_packet_stream_send(conn->rconn.rstream, buffer, size); |
657 | 0 | dummy_packet_stream_wait(conn->rconn.rstream); |
658 | 0 | } |
659 | 0 | break; |
660 | | |
661 | 0 | case NONE: |
662 | 0 | default: |
663 | 0 | break; |
664 | 0 | } |
665 | 0 | } |
666 | | |
667 | | static enum dummy_netdev_conn_state |
668 | | dummy_netdev_get_conn_state(struct dummy_packet_conn *conn) |
669 | 0 | { |
670 | 0 | enum dummy_netdev_conn_state state; |
671 | |
|
672 | 0 | if (conn->type == ACTIVE) { |
673 | 0 | if (reconnect_is_connected(conn->rconn.reconnect)) { |
674 | 0 | state = CONN_STATE_CONNECTED; |
675 | 0 | } else { |
676 | 0 | state = CONN_STATE_NOT_CONNECTED; |
677 | 0 | } |
678 | 0 | } else { |
679 | 0 | state = CONN_STATE_UNKNOWN; |
680 | 0 | } |
681 | |
|
682 | 0 | return state; |
683 | 0 | } |
684 | | |
685 | | static void |
686 | | netdev_dummy_run(const struct netdev_class *netdev_class) |
687 | 0 | { |
688 | 0 | struct netdev_dummy *dev; |
689 | |
|
690 | 0 | ovs_mutex_lock(&dummy_list_mutex); |
691 | 0 | LIST_FOR_EACH (dev, list_node, &dummy_list) { |
692 | 0 | if (netdev_get_class(&dev->up) != netdev_class) { |
693 | 0 | continue; |
694 | 0 | } |
695 | 0 | ovs_mutex_lock(&dev->mutex); |
696 | 0 | dummy_packet_conn_run(dev); |
697 | 0 | ovs_mutex_unlock(&dev->mutex); |
698 | 0 | } |
699 | 0 | ovs_mutex_unlock(&dummy_list_mutex); |
700 | 0 | } |
701 | | |
702 | | static void |
703 | | netdev_dummy_wait(const struct netdev_class *netdev_class) |
704 | 0 | { |
705 | 0 | struct netdev_dummy *dev; |
706 | |
|
707 | 0 | ovs_mutex_lock(&dummy_list_mutex); |
708 | 0 | LIST_FOR_EACH (dev, list_node, &dummy_list) { |
709 | 0 | if (netdev_get_class(&dev->up) != netdev_class) { |
710 | 0 | continue; |
711 | 0 | } |
712 | 0 | ovs_mutex_lock(&dev->mutex); |
713 | 0 | dummy_packet_conn_wait(&dev->conn); |
714 | 0 | ovs_mutex_unlock(&dev->mutex); |
715 | 0 | } |
716 | 0 | ovs_mutex_unlock(&dummy_list_mutex); |
717 | 0 | } |
718 | | |
719 | | static struct netdev * |
720 | | netdev_dummy_alloc(void) |
721 | 0 | { |
722 | 0 | struct netdev_dummy *netdev = xzalloc(sizeof *netdev); |
723 | 0 | return &netdev->up; |
724 | 0 | } |
725 | | |
726 | | static int |
727 | | netdev_dummy_construct(struct netdev *netdev_) |
728 | 0 | { |
729 | 0 | static atomic_count next_n = ATOMIC_COUNT_INIT(0xaa550000); |
730 | 0 | struct netdev_dummy *netdev = netdev_dummy_cast(netdev_); |
731 | 0 | unsigned int n; |
732 | |
|
733 | 0 | n = atomic_count_inc(&next_n); |
734 | |
|
735 | 0 | ovs_mutex_init(&netdev->mutex); |
736 | 0 | ovs_mutex_lock(&netdev->mutex); |
737 | 0 | netdev->hwaddr.ea[0] = 0xaa; |
738 | 0 | netdev->hwaddr.ea[1] = 0x55; |
739 | 0 | netdev->hwaddr.ea[2] = n >> 24; |
740 | 0 | netdev->hwaddr.ea[3] = n >> 16; |
741 | 0 | netdev->hwaddr.ea[4] = n >> 8; |
742 | 0 | netdev->hwaddr.ea[5] = n; |
743 | 0 | netdev->mtu = 1500; |
744 | 0 | netdev->flags = NETDEV_UP; |
745 | 0 | netdev->ifindex = -EOPNOTSUPP; |
746 | 0 | netdev->requested_n_rxq = netdev_->n_rxq; |
747 | 0 | netdev->requested_n_txq = netdev_->n_txq; |
748 | 0 | netdev->numa_id = 0; |
749 | |
|
750 | 0 | memset(&netdev->custom_stats, 0, sizeof(netdev->custom_stats)); |
751 | |
|
752 | 0 | ovs_strlcpy(netdev->custom_stats[0].name, |
753 | 0 | "rx_custom_packets_1", NETDEV_CUSTOM_STATS_NAME_SIZE); |
754 | 0 | ovs_strlcpy(netdev->custom_stats[1].name, |
755 | 0 | "rx_custom_packets_2", NETDEV_CUSTOM_STATS_NAME_SIZE); |
756 | |
|
757 | 0 | netdev->rxq_stats = xcalloc(netdev->up.n_rxq, sizeof *netdev->rxq_stats); |
758 | 0 | netdev->txq_stats = xcalloc(netdev->up.n_rxq, sizeof *netdev->txq_stats); |
759 | |
|
760 | 0 | dummy_packet_conn_init(&netdev->conn); |
761 | |
|
762 | 0 | ovs_list_init(&netdev->rxes); |
763 | 0 | ovs_list_init(&netdev->addrs); |
764 | 0 | ovs_mutex_unlock(&netdev->mutex); |
765 | |
|
766 | 0 | ovs_mutex_lock(&dummy_list_mutex); |
767 | 0 | ovs_list_push_back(&dummy_list, &netdev->list_node); |
768 | 0 | ovs_mutex_unlock(&dummy_list_mutex); |
769 | |
|
770 | 0 | return 0; |
771 | 0 | } |
772 | | |
773 | | static void |
774 | | netdev_dummy_destruct(struct netdev *netdev_) |
775 | 0 | { |
776 | 0 | struct netdev_dummy *netdev = netdev_dummy_cast(netdev_); |
777 | |
|
778 | 0 | ovs_mutex_lock(&dummy_list_mutex); |
779 | 0 | ovs_list_remove(&netdev->list_node); |
780 | 0 | ovs_mutex_unlock(&dummy_list_mutex); |
781 | |
|
782 | 0 | ovs_mutex_lock(&netdev->mutex); |
783 | 0 | free(netdev->rxq_stats); |
784 | 0 | free(netdev->txq_stats); |
785 | 0 | if (netdev->rxq_pcap) { |
786 | 0 | ovs_pcap_close(netdev->rxq_pcap); |
787 | 0 | } |
788 | 0 | if (netdev->tx_pcap && netdev->tx_pcap != netdev->rxq_pcap) { |
789 | 0 | ovs_pcap_close(netdev->tx_pcap); |
790 | 0 | } |
791 | 0 | dummy_packet_conn_close(&netdev->conn); |
792 | 0 | netdev->conn.type = NONE; |
793 | |
|
794 | 0 | addr_list_delete(&netdev->addrs); |
795 | |
|
796 | 0 | ovs_mutex_unlock(&netdev->mutex); |
797 | 0 | ovs_mutex_destroy(&netdev->mutex); |
798 | 0 | } |
799 | | |
800 | | static void |
801 | | netdev_dummy_dealloc(struct netdev *netdev_) |
802 | 0 | { |
803 | 0 | struct netdev_dummy *netdev = netdev_dummy_cast(netdev_); |
804 | |
|
805 | 0 | free(netdev); |
806 | 0 | } |
807 | | |
808 | | static int |
809 | | netdev_dummy_get_config(const struct netdev *dev, struct smap *args) |
810 | 0 | { |
811 | 0 | struct netdev_dummy *netdev = netdev_dummy_cast(dev); |
812 | |
|
813 | 0 | ovs_mutex_lock(&netdev->mutex); |
814 | |
|
815 | 0 | if (netdev->ifindex >= 0) { |
816 | 0 | smap_add_format(args, "ifindex", "%d", netdev->ifindex); |
817 | 0 | } |
818 | |
|
819 | 0 | dummy_packet_conn_get_config(&netdev->conn, args); |
820 | | |
821 | | /* pcap, rxq_pcap and tx_pcap cannot be recovered because filenames have |
822 | | * been discarded after opening file descriptors */ |
823 | |
|
824 | 0 | if (netdev->ol_ip_rx_csum_set_good) { |
825 | 0 | smap_add_format(args, "ol_ip_rx_csum_set_good", "%s", "true"); |
826 | 0 | } |
827 | 0 | if (netdev->ol_ip_rx_csum_set_bad) { |
828 | 0 | smap_add_format(args, "ol_ip_rx_csum_set_bad", "%s", "true"); |
829 | 0 | } |
830 | 0 | if (netdev->ol_ip_rx_csum_set_partial) { |
831 | 0 | smap_add_format(args, "ol_ip_rx_csum_set_partial", "%s", "true"); |
832 | 0 | } |
833 | 0 | if (netdev->ol_ip_tx_csum) { |
834 | 0 | smap_add_format(args, "ol_ip_tx_csum", "%s", "true"); |
835 | 0 | if (netdev->ol_ip_tx_csum_disabled) { |
836 | 0 | smap_add_format(args, "ol_ip_tx_csum_disabled", "%s", "true"); |
837 | 0 | } |
838 | 0 | } |
839 | |
|
840 | 0 | if (netdev->ol_l4_rx_csum_set_good) { |
841 | 0 | smap_add_format(args, "ol_l4_rx_csum_set_good", "%s", "true"); |
842 | 0 | } |
843 | 0 | if (netdev->ol_l4_rx_csum_set_bad) { |
844 | 0 | smap_add_format(args, "ol_l4_rx_csum_set_bad", "%s", "true"); |
845 | 0 | } |
846 | 0 | if (netdev->ol_l4_rx_csum_set_partial) { |
847 | 0 | smap_add_format(args, "ol_l4_rx_csum_set_partial", "%s", "true"); |
848 | 0 | } |
849 | 0 | if (netdev->ol_l4_tx_csum) { |
850 | 0 | smap_add_format(args, "ol_l4_tx_csum", "%s", "true"); |
851 | 0 | if (netdev->ol_l4_tx_csum_disabled) { |
852 | 0 | smap_add_format(args, "ol_l4_tx_csum_disabled", "%s", "true"); |
853 | 0 | } |
854 | 0 | } |
855 | |
|
856 | 0 | if (netdev->ol_out_ip_tx_csum) { |
857 | 0 | smap_add_format(args, "ol_out_ip_tx_csum", "%s", "true"); |
858 | 0 | if (netdev->ol_out_ip_tx_csum_disabled) { |
859 | 0 | smap_add_format(args, "ol_out_ip_tx_csum_disabled", "%s", "true"); |
860 | 0 | } |
861 | 0 | } |
862 | |
|
863 | 0 | if (netdev->ol_out_udp_tx_csum) { |
864 | 0 | smap_add_format(args, "ol_out_udp_tx_csum", "%s", "true"); |
865 | 0 | if (netdev->ol_out_udp_tx_csum_disabled) { |
866 | 0 | smap_add_format(args, "ol_out_udp_tx_csum_disabled", "%s", "true"); |
867 | 0 | } |
868 | 0 | } |
869 | |
|
870 | 0 | if (netdev->ol_tso_segsz && userspace_tso_enabled()) { |
871 | 0 | smap_add_format(args, "ol_tso_segsz", "%d", netdev->ol_tso_segsz); |
872 | 0 | } |
873 | | |
874 | | /* 'dummy-pmd' specific config. */ |
875 | 0 | if (!netdev_is_pmd(dev)) { |
876 | 0 | goto exit; |
877 | 0 | } |
878 | | |
879 | 0 | smap_add_format(args, "n_rxq", "%d", netdev->requested_n_rxq); |
880 | 0 | smap_add_format(args, "n_txq", "%d", netdev->requested_n_txq); |
881 | 0 | smap_add_format(args, "numa_id", "%d", netdev->requested_numa_id); |
882 | |
|
883 | 0 | exit: |
884 | 0 | ovs_mutex_unlock(&netdev->mutex); |
885 | 0 | return 0; |
886 | 0 | } |
887 | | |
888 | | static int |
889 | | netdev_dummy_get_addr_list(const struct netdev *netdev_, struct in6_addr **paddr, |
890 | | struct in6_addr **pmask, int *n_addr) |
891 | 0 | { |
892 | 0 | struct netdev_dummy *netdev = netdev_dummy_cast(netdev_); |
893 | 0 | int cnt = 0, i = 0, err = 0; |
894 | 0 | struct in6_addr *addr, *mask; |
895 | 0 | struct netdev_addr_dummy *addr_dummy; |
896 | |
|
897 | 0 | ovs_mutex_lock(&netdev->mutex); |
898 | |
|
899 | 0 | cnt = ovs_list_size(&netdev->addrs); |
900 | 0 | if (!cnt) { |
901 | 0 | err = EADDRNOTAVAIL; |
902 | 0 | goto out; |
903 | 0 | } |
904 | 0 | addr = xmalloc(sizeof *addr * cnt); |
905 | 0 | mask = xmalloc(sizeof *mask * cnt); |
906 | |
|
907 | 0 | LIST_FOR_EACH (addr_dummy, node, &netdev->addrs) { |
908 | 0 | memcpy(&addr[i], &addr_dummy->address, sizeof *addr); |
909 | 0 | memcpy(&mask[i], &addr_dummy->netmask, sizeof *mask); |
910 | 0 | i++; |
911 | 0 | } |
912 | |
|
913 | 0 | if (paddr) { |
914 | 0 | *paddr = addr; |
915 | 0 | *pmask = mask; |
916 | 0 | *n_addr = cnt; |
917 | 0 | } else { |
918 | 0 | free(addr); |
919 | 0 | free(mask); |
920 | 0 | } |
921 | 0 | out: |
922 | 0 | ovs_mutex_unlock(&netdev->mutex); |
923 | |
|
924 | 0 | return err; |
925 | 0 | } |
926 | | |
927 | | static int |
928 | | netdev_dummy_add_in4(struct netdev *netdev_, struct in_addr address, |
929 | | struct in_addr netmask) |
930 | 0 | { |
931 | 0 | struct netdev_dummy *netdev = netdev_dummy_cast(netdev_); |
932 | 0 | struct netdev_addr_dummy *addr_dummy = xmalloc(sizeof *addr_dummy); |
933 | |
|
934 | 0 | ovs_mutex_lock(&netdev->mutex); |
935 | 0 | in6_addr_set_mapped_ipv4(&addr_dummy->address, address.s_addr); |
936 | 0 | in6_addr_set_mapped_ipv4(&addr_dummy->netmask, netmask.s_addr); |
937 | 0 | ovs_list_push_back(&netdev->addrs, &addr_dummy->node); |
938 | 0 | netdev_change_seq_changed(netdev_); |
939 | 0 | ovs_mutex_unlock(&netdev->mutex); |
940 | |
|
941 | 0 | return 0; |
942 | 0 | } |
943 | | |
944 | | static int |
945 | | netdev_dummy_add_in6(struct netdev *netdev_, struct in6_addr *in6, |
946 | | struct in6_addr *mask) |
947 | 0 | { |
948 | 0 | struct netdev_dummy *netdev = netdev_dummy_cast(netdev_); |
949 | 0 | struct netdev_addr_dummy *addr_dummy = xmalloc(sizeof *addr_dummy); |
950 | |
|
951 | 0 | ovs_mutex_lock(&netdev->mutex); |
952 | 0 | addr_dummy->address = *in6; |
953 | 0 | addr_dummy->netmask = *mask; |
954 | 0 | ovs_list_push_back(&netdev->addrs, &addr_dummy->node); |
955 | 0 | netdev_change_seq_changed(netdev_); |
956 | 0 | ovs_mutex_unlock(&netdev->mutex); |
957 | |
|
958 | 0 | return 0; |
959 | 0 | } |
960 | | |
961 | 0 | #define DUMMY_MAX_QUEUES_PER_PORT 1024 |
962 | | |
963 | | static int |
964 | | netdev_dummy_set_config(struct netdev *netdev_, const struct smap *args, |
965 | | char **errp OVS_UNUSED) |
966 | 0 | { |
967 | 0 | struct netdev_dummy *netdev = netdev_dummy_cast(netdev_); |
968 | 0 | const char *pcap; |
969 | 0 | int new_n_rxq, new_n_txq, new_numa_id; |
970 | |
|
971 | 0 | ovs_mutex_lock(&netdev->mutex); |
972 | 0 | netdev->ifindex = smap_get_int(args, "ifindex", -EOPNOTSUPP); |
973 | |
|
974 | 0 | dummy_packet_conn_set_config(&netdev->conn, args); |
975 | |
|
976 | 0 | if (netdev->rxq_pcap) { |
977 | 0 | ovs_pcap_close(netdev->rxq_pcap); |
978 | 0 | } |
979 | 0 | if (netdev->tx_pcap && netdev->tx_pcap != netdev->rxq_pcap) { |
980 | 0 | ovs_pcap_close(netdev->tx_pcap); |
981 | 0 | } |
982 | 0 | netdev->rxq_pcap = netdev->tx_pcap = NULL; |
983 | 0 | pcap = smap_get(args, "pcap"); |
984 | 0 | if (pcap) { |
985 | 0 | netdev->rxq_pcap = netdev->tx_pcap = ovs_pcap_open(pcap, "ab"); |
986 | 0 | } else { |
987 | 0 | const char *rxq_pcap = smap_get(args, "rxq_pcap"); |
988 | 0 | const char *tx_pcap = smap_get(args, "tx_pcap"); |
989 | |
|
990 | 0 | if (rxq_pcap) { |
991 | 0 | netdev->rxq_pcap = ovs_pcap_open(rxq_pcap, "ab"); |
992 | 0 | } |
993 | 0 | if (tx_pcap) { |
994 | 0 | netdev->tx_pcap = ovs_pcap_open(tx_pcap, "ab"); |
995 | 0 | } |
996 | 0 | } |
997 | |
|
998 | 0 | netdev->ol_ip_rx_csum_set_good = |
999 | 0 | smap_get_bool(args, "ol_ip_rx_csum_set_good", false); |
1000 | 0 | netdev->ol_ip_rx_csum_set_bad = |
1001 | 0 | smap_get_bool(args, "ol_ip_rx_csum_set_bad", false); |
1002 | 0 | netdev->ol_ip_rx_csum_set_partial = |
1003 | 0 | smap_get_bool(args, "ol_ip_rx_csum_set_partial", false); |
1004 | 0 | netdev->ol_ip_tx_csum = smap_get_bool(args, "ol_ip_tx_csum", false); |
1005 | 0 | if (netdev->ol_ip_tx_csum) { |
1006 | 0 | netdev_->ol_flags |= NETDEV_TX_OFFLOAD_IPV4_CKSUM; |
1007 | 0 | netdev->ol_ip_tx_csum_disabled = |
1008 | 0 | smap_get_bool(args, "ol_ip_tx_csum_disabled", false); |
1009 | 0 | } else { |
1010 | 0 | netdev_->ol_flags &= ~NETDEV_TX_OFFLOAD_IPV4_CKSUM; |
1011 | 0 | netdev->ol_ip_tx_csum_disabled = true; |
1012 | 0 | } |
1013 | |
|
1014 | 0 | netdev->ol_l4_rx_csum_set_good = |
1015 | 0 | smap_get_bool(args, "ol_l4_rx_csum_set_good", false); |
1016 | 0 | netdev->ol_l4_rx_csum_set_bad = |
1017 | 0 | smap_get_bool(args, "ol_l4_rx_csum_set_bad", false); |
1018 | 0 | netdev->ol_l4_rx_csum_set_partial = |
1019 | 0 | smap_get_bool(args, "ol_l4_rx_csum_set_partial", false); |
1020 | 0 | netdev->ol_l4_tx_csum = smap_get_bool(args, "ol_l4_tx_csum", false); |
1021 | 0 | if (netdev->ol_l4_tx_csum) { |
1022 | 0 | netdev_->ol_flags |= NETDEV_TX_OFFLOAD_TCP_CKSUM; |
1023 | 0 | netdev_->ol_flags |= NETDEV_TX_OFFLOAD_UDP_CKSUM; |
1024 | 0 | netdev->ol_l4_tx_csum_disabled = |
1025 | 0 | smap_get_bool(args, "ol_l4_tx_csum_disabled", false); |
1026 | 0 | } else { |
1027 | 0 | netdev_->ol_flags &= ~NETDEV_TX_OFFLOAD_TCP_CKSUM; |
1028 | 0 | netdev_->ol_flags &= ~NETDEV_TX_OFFLOAD_UDP_CKSUM; |
1029 | 0 | netdev->ol_l4_tx_csum_disabled = true; |
1030 | 0 | } |
1031 | |
|
1032 | 0 | netdev->ol_out_ip_tx_csum = smap_get_bool(args, "ol_out_ip_tx_csum", |
1033 | 0 | false); |
1034 | 0 | if (netdev->ol_out_ip_tx_csum) { |
1035 | 0 | netdev_->ol_flags |= NETDEV_TX_OFFLOAD_OUTER_IP_CKSUM; |
1036 | 0 | netdev->ol_out_ip_tx_csum_disabled = |
1037 | 0 | smap_get_bool(args, "ol_out_ip_tx_csum_disabled", false); |
1038 | 0 | } else { |
1039 | 0 | netdev_->ol_flags &= ~NETDEV_TX_OFFLOAD_OUTER_IP_CKSUM; |
1040 | 0 | netdev->ol_out_ip_tx_csum_disabled = true; |
1041 | 0 | } |
1042 | |
|
1043 | 0 | netdev->ol_out_udp_tx_csum = smap_get_bool(args, "ol_out_udp_tx_csum", |
1044 | 0 | false); |
1045 | 0 | if (netdev->ol_out_udp_tx_csum) { |
1046 | 0 | netdev_->ol_flags |= NETDEV_TX_OFFLOAD_OUTER_UDP_CKSUM; |
1047 | 0 | netdev->ol_out_udp_tx_csum_disabled = |
1048 | 0 | smap_get_bool(args, "ol_out_udp_tx_csum_disabled", false); |
1049 | 0 | } else { |
1050 | 0 | netdev_->ol_flags &= ~NETDEV_TX_OFFLOAD_OUTER_UDP_CKSUM; |
1051 | 0 | netdev->ol_out_udp_tx_csum_disabled = true; |
1052 | 0 | } |
1053 | |
|
1054 | 0 | if (userspace_tso_enabled()) { |
1055 | 0 | netdev->ol_tso_segsz = smap_get_int(args, "ol_tso_segsz", 0); |
1056 | 0 | if (netdev->ol_tso_segsz) { |
1057 | 0 | netdev_->ol_flags |= (NETDEV_TX_OFFLOAD_TCP_TSO |
1058 | 0 | | NETDEV_TX_OFFLOAD_TCP_CKSUM); |
1059 | 0 | } |
1060 | 0 | } |
1061 | |
|
1062 | 0 | netdev_change_seq_changed(netdev_); |
1063 | | |
1064 | | /* 'dummy-pmd' specific config. */ |
1065 | 0 | if (!netdev_->netdev_class->is_pmd) { |
1066 | 0 | goto exit; |
1067 | 0 | } |
1068 | | |
1069 | 0 | new_n_rxq = MAX(smap_get_int(args, "n_rxq", NR_QUEUE), 1); |
1070 | 0 | new_n_txq = MAX(smap_get_int(args, "n_txq", NR_QUEUE), 1); |
1071 | |
|
1072 | 0 | if (new_n_rxq > DUMMY_MAX_QUEUES_PER_PORT || |
1073 | 0 | new_n_txq > DUMMY_MAX_QUEUES_PER_PORT) { |
1074 | 0 | VLOG_WARN("The one or both of interface %s queues" |
1075 | 0 | "(rxq: %d, txq: %d) exceed %d. Sets it %d.\n", |
1076 | 0 | netdev_get_name(netdev_), |
1077 | 0 | new_n_rxq, |
1078 | 0 | new_n_txq, |
1079 | 0 | DUMMY_MAX_QUEUES_PER_PORT, |
1080 | 0 | DUMMY_MAX_QUEUES_PER_PORT); |
1081 | |
|
1082 | 0 | new_n_rxq = MIN(DUMMY_MAX_QUEUES_PER_PORT, new_n_rxq); |
1083 | 0 | new_n_txq = MIN(DUMMY_MAX_QUEUES_PER_PORT, new_n_txq); |
1084 | 0 | } |
1085 | |
|
1086 | 0 | new_numa_id = smap_get_int(args, "numa_id", 0); |
1087 | 0 | if (new_n_rxq != netdev->requested_n_rxq |
1088 | 0 | || new_n_txq != netdev->requested_n_txq |
1089 | 0 | || new_numa_id != netdev->requested_numa_id) { |
1090 | 0 | netdev->requested_n_rxq = new_n_rxq; |
1091 | 0 | netdev->requested_n_txq = new_n_txq; |
1092 | 0 | netdev->requested_numa_id = new_numa_id; |
1093 | 0 | netdev_request_reconfigure(netdev_); |
1094 | 0 | } |
1095 | |
|
1096 | 0 | exit: |
1097 | 0 | ovs_mutex_unlock(&netdev->mutex); |
1098 | 0 | return 0; |
1099 | 0 | } |
1100 | | |
1101 | | static int |
1102 | | netdev_dummy_get_numa_id(const struct netdev *netdev_) |
1103 | 0 | { |
1104 | 0 | struct netdev_dummy *netdev = netdev_dummy_cast(netdev_); |
1105 | |
|
1106 | 0 | ovs_mutex_lock(&netdev->mutex); |
1107 | 0 | int numa_id = netdev->numa_id; |
1108 | 0 | ovs_mutex_unlock(&netdev->mutex); |
1109 | |
|
1110 | 0 | return numa_id; |
1111 | 0 | } |
1112 | | |
1113 | | /* Sets the number of tx queues and rx queues for the dummy PMD interface. */ |
1114 | | static int |
1115 | | netdev_dummy_reconfigure(struct netdev *netdev_) |
1116 | 0 | { |
1117 | 0 | struct netdev_dummy *netdev = netdev_dummy_cast(netdev_); |
1118 | 0 | int old_n_txq = netdev_->n_txq; |
1119 | 0 | int old_n_rxq = netdev_->n_rxq; |
1120 | |
|
1121 | 0 | ovs_mutex_lock(&netdev->mutex); |
1122 | |
|
1123 | 0 | netdev_->n_txq = netdev->requested_n_txq; |
1124 | 0 | netdev_->n_rxq = netdev->requested_n_rxq; |
1125 | 0 | netdev->numa_id = netdev->requested_numa_id; |
1126 | |
|
1127 | 0 | if (netdev_->n_txq != old_n_txq || netdev_->n_rxq != old_n_rxq) { |
1128 | | /* Resize the per queue stats arrays. */ |
1129 | 0 | netdev->txq_stats = xrealloc(netdev->txq_stats, |
1130 | 0 | netdev_->n_txq * |
1131 | 0 | sizeof *netdev->txq_stats); |
1132 | 0 | netdev->rxq_stats = xrealloc(netdev->rxq_stats, |
1133 | 0 | netdev_->n_rxq * |
1134 | 0 | sizeof *netdev->rxq_stats); |
1135 | | |
1136 | | /* Reset all stats for consistency between per-queue and global |
1137 | | * counters. */ |
1138 | 0 | memset(&netdev->stats, 0, sizeof netdev->stats); |
1139 | 0 | netdev->custom_stats[0].value = 0; |
1140 | 0 | netdev->custom_stats[1].value = 0; |
1141 | 0 | memset(netdev->txq_stats, 0, |
1142 | 0 | netdev_->n_txq * sizeof *netdev->txq_stats); |
1143 | 0 | memset(netdev->rxq_stats, 0, |
1144 | 0 | netdev_->n_rxq * sizeof *netdev->rxq_stats); |
1145 | 0 | } |
1146 | |
|
1147 | 0 | ovs_mutex_unlock(&netdev->mutex); |
1148 | 0 | return 0; |
1149 | 0 | } |
1150 | | |
1151 | | static struct netdev_rxq * |
1152 | | netdev_dummy_rxq_alloc(void) |
1153 | 0 | { |
1154 | 0 | struct netdev_rxq_dummy *rx = xzalloc(sizeof *rx); |
1155 | 0 | return &rx->up; |
1156 | 0 | } |
1157 | | |
1158 | | static int |
1159 | | netdev_dummy_rxq_construct(struct netdev_rxq *rxq_) |
1160 | 0 | { |
1161 | 0 | struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_); |
1162 | 0 | struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev); |
1163 | |
|
1164 | 0 | ovs_mutex_lock(&netdev->mutex); |
1165 | 0 | ovs_list_push_back(&netdev->rxes, &rx->node); |
1166 | 0 | ovs_list_init(&rx->recv_queue); |
1167 | 0 | rx->recv_queue_len = 0; |
1168 | 0 | rx->seq = seq_create(); |
1169 | 0 | ovs_mutex_unlock(&netdev->mutex); |
1170 | |
|
1171 | 0 | return 0; |
1172 | 0 | } |
1173 | | |
1174 | | static void |
1175 | | netdev_dummy_rxq_destruct(struct netdev_rxq *rxq_) |
1176 | 0 | { |
1177 | 0 | struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_); |
1178 | 0 | struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev); |
1179 | |
|
1180 | 0 | ovs_mutex_lock(&netdev->mutex); |
1181 | 0 | ovs_list_remove(&rx->node); |
1182 | 0 | pkt_list_delete(&rx->recv_queue); |
1183 | 0 | ovs_mutex_unlock(&netdev->mutex); |
1184 | 0 | seq_destroy(rx->seq); |
1185 | 0 | } |
1186 | | |
1187 | | static void |
1188 | | netdev_dummy_rxq_dealloc(struct netdev_rxq *rxq_) |
1189 | 0 | { |
1190 | 0 | struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_); |
1191 | |
|
1192 | 0 | free(rx); |
1193 | 0 | } |
1194 | | |
1195 | | static int |
1196 | | netdev_dummy_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet_batch *batch, |
1197 | | int *qfill) |
1198 | 0 | { |
1199 | 0 | struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_); |
1200 | 0 | struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev); |
1201 | 0 | struct dp_packet *packet; |
1202 | |
|
1203 | 0 | ovs_mutex_lock(&netdev->mutex); |
1204 | 0 | if (!ovs_list_is_empty(&rx->recv_queue)) { |
1205 | 0 | struct pkt_list_node *pkt_node; |
1206 | |
|
1207 | 0 | ASSIGN_CONTAINER(pkt_node, ovs_list_pop_front(&rx->recv_queue), list_node); |
1208 | 0 | packet = pkt_node->pkt; |
1209 | 0 | free(pkt_node); |
1210 | 0 | rx->recv_queue_len--; |
1211 | 0 | } else { |
1212 | 0 | packet = NULL; |
1213 | 0 | } |
1214 | 0 | ovs_mutex_unlock(&netdev->mutex); |
1215 | |
|
1216 | 0 | if (!packet) { |
1217 | 0 | if (netdev_is_pmd(&netdev->up)) { |
1218 | | /* If 'netdev' is a PMD device, this is called as part of the PMD |
1219 | | * thread busy loop. We yield here (without quiescing) for two |
1220 | | * reasons: |
1221 | | * |
1222 | | * - To reduce the CPU utilization during the testsuite |
1223 | | * - To give valgrind a chance to switch thread. According |
1224 | | * to the valgrind documentation, there's a big lock that |
1225 | | * prevents multiple thread from being executed at the same |
1226 | | * time. On my system, without this sleep, the pmd threads |
1227 | | * testcases fail under valgrind, because ovs-vswitchd becomes |
1228 | | * unresponsive. */ |
1229 | 0 | sched_yield(); |
1230 | 0 | } |
1231 | 0 | return EAGAIN; |
1232 | 0 | } |
1233 | 0 | ovs_mutex_lock(&netdev->mutex); |
1234 | 0 | netdev->stats.rx_packets++; |
1235 | 0 | netdev->rxq_stats[rxq_->queue_id].packets++; |
1236 | 0 | netdev->stats.rx_bytes += dp_packet_size(packet); |
1237 | 0 | netdev->rxq_stats[rxq_->queue_id].bytes += dp_packet_size(packet); |
1238 | 0 | netdev->custom_stats[0].value++; |
1239 | 0 | netdev->custom_stats[1].value++; |
1240 | |
|
1241 | 0 | if (netdev->ol_ip_rx_csum_set_good) { |
1242 | 0 | dp_packet_ip_checksum_set_good(packet); |
1243 | 0 | } else if (netdev->ol_ip_rx_csum_set_bad) { |
1244 | 0 | dp_packet_ip_checksum_set_bad(packet); |
1245 | 0 | } else if (netdev->ol_ip_rx_csum_set_partial) { |
1246 | 0 | dp_packet_ip_checksum_set_partial(packet); |
1247 | 0 | } else { |
1248 | 0 | dp_packet_ip_checksum_set_unknown(packet); |
1249 | 0 | } |
1250 | |
|
1251 | 0 | if (netdev->ol_l4_rx_csum_set_good) { |
1252 | 0 | dp_packet_l4_checksum_set_good(packet); |
1253 | 0 | } else if (netdev->ol_l4_rx_csum_set_bad) { |
1254 | 0 | dp_packet_l4_checksum_set_bad(packet); |
1255 | 0 | } else if (netdev->ol_l4_rx_csum_set_partial) { |
1256 | 0 | dp_packet_l4_checksum_set_partial(packet); |
1257 | 0 | } else { |
1258 | 0 | dp_packet_l4_checksum_set_unknown(packet); |
1259 | 0 | } |
1260 | |
|
1261 | 0 | if (userspace_tso_enabled() && netdev->ol_tso_segsz) { |
1262 | 0 | dp_packet_set_tso_segsz(packet, netdev->ol_tso_segsz); |
1263 | 0 | } |
1264 | |
|
1265 | 0 | if (VLOG_IS_DBG_ENABLED()) { |
1266 | 0 | bool ip_csum_good; |
1267 | 0 | bool l4_csum_good; |
1268 | 0 | bool ip_csum_bad; |
1269 | 0 | bool l4_csum_bad; |
1270 | |
|
1271 | 0 | ip_csum_good = !!(packet->offloads & DP_PACKET_OL_IP_CKSUM_GOOD); |
1272 | 0 | ip_csum_bad = !!(packet->offloads & DP_PACKET_OL_IP_CKSUM_BAD); |
1273 | 0 | l4_csum_good = !!(packet->offloads & DP_PACKET_OL_L4_CKSUM_GOOD); |
1274 | 0 | l4_csum_bad = !!(packet->offloads & DP_PACKET_OL_L4_CKSUM_BAD); |
1275 | 0 | VLOG_DBG("Rx: packet with csum IP %s, L4 %s, segsz %"PRIu16, |
1276 | 0 | ip_csum_good ? (ip_csum_bad ? "partial" : "good") |
1277 | 0 | : (ip_csum_bad ? "bad" : "unknown"), |
1278 | 0 | l4_csum_good ? (l4_csum_bad ? "partial" : "good") |
1279 | 0 | : (l4_csum_bad ? "bad" : "unknown"), |
1280 | 0 | dp_packet_get_tso_segsz(packet)); |
1281 | 0 | } |
1282 | |
|
1283 | 0 | ovs_mutex_unlock(&netdev->mutex); |
1284 | |
|
1285 | 0 | dp_packet_batch_init_packet(batch, packet); |
1286 | |
|
1287 | 0 | if (qfill) { |
1288 | 0 | *qfill = -ENOTSUP; |
1289 | 0 | } |
1290 | |
|
1291 | 0 | return 0; |
1292 | 0 | } |
1293 | | |
1294 | | static void |
1295 | | netdev_dummy_rxq_wait(struct netdev_rxq *rxq_) |
1296 | 0 | { |
1297 | 0 | struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_); |
1298 | 0 | struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev); |
1299 | 0 | uint64_t seq = seq_read(rx->seq); |
1300 | |
|
1301 | 0 | ovs_mutex_lock(&netdev->mutex); |
1302 | 0 | if (!ovs_list_is_empty(&rx->recv_queue)) { |
1303 | 0 | poll_immediate_wake(); |
1304 | 0 | } else { |
1305 | 0 | seq_wait(rx->seq, seq); |
1306 | 0 | } |
1307 | 0 | ovs_mutex_unlock(&netdev->mutex); |
1308 | 0 | } |
1309 | | |
1310 | | static int |
1311 | | netdev_dummy_rxq_drain(struct netdev_rxq *rxq_) |
1312 | 0 | { |
1313 | 0 | struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_); |
1314 | 0 | struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev); |
1315 | |
|
1316 | 0 | ovs_mutex_lock(&netdev->mutex); |
1317 | 0 | pkt_list_delete(&rx->recv_queue); |
1318 | 0 | rx->recv_queue_len = 0; |
1319 | 0 | ovs_mutex_unlock(&netdev->mutex); |
1320 | |
|
1321 | 0 | seq_change(rx->seq); |
1322 | |
|
1323 | 0 | return 0; |
1324 | 0 | } |
1325 | | |
1326 | | static int |
1327 | | netdev_dummy_send(struct netdev *netdev, int qid, |
1328 | | struct dp_packet_batch *batch, |
1329 | | bool concurrent_txq OVS_UNUSED) |
1330 | 0 | { |
1331 | 0 | struct netdev_dummy *dev = netdev_dummy_cast(netdev); |
1332 | 0 | int error = 0; |
1333 | |
|
1334 | 0 | struct dp_packet *packet; |
1335 | 0 | DP_PACKET_BATCH_FOR_EACH(i, packet, batch) { |
1336 | 0 | const void *buffer = dp_packet_data(packet); |
1337 | 0 | size_t size = dp_packet_size(packet); |
1338 | 0 | uint64_t flags; |
1339 | 0 | bool is_tso; |
1340 | |
|
1341 | 0 | ovs_mutex_lock(&dev->mutex); |
1342 | 0 | flags = netdev->ol_flags; |
1343 | 0 | if (!dev->ol_ip_tx_csum_disabled) { |
1344 | 0 | flags &= ~NETDEV_TX_OFFLOAD_IPV4_CKSUM; |
1345 | 0 | } |
1346 | 0 | if (!dev->ol_l4_tx_csum_disabled) { |
1347 | 0 | flags &= ~NETDEV_TX_OFFLOAD_TCP_CKSUM; |
1348 | 0 | flags &= ~NETDEV_TX_OFFLOAD_UDP_CKSUM; |
1349 | 0 | } |
1350 | 0 | if (!dev->ol_out_ip_tx_csum_disabled) { |
1351 | 0 | flags &= ~NETDEV_TX_OFFLOAD_OUTER_IP_CKSUM; |
1352 | 0 | } |
1353 | 0 | if (!dev->ol_out_udp_tx_csum_disabled) { |
1354 | 0 | flags &= ~NETDEV_TX_OFFLOAD_OUTER_UDP_CKSUM; |
1355 | 0 | } |
1356 | 0 | is_tso = userspace_tso_enabled() && dev->ol_tso_segsz && |
1357 | 0 | dp_packet_get_tso_segsz(packet); |
1358 | 0 | ovs_mutex_unlock(&dev->mutex); |
1359 | |
|
1360 | 0 | if (!dp_packet_is_eth(packet)) { |
1361 | 0 | error = EPFNOSUPPORT; |
1362 | 0 | break; |
1363 | 0 | } |
1364 | | |
1365 | 0 | if (size < ETH_HEADER_LEN) { |
1366 | 0 | error = EMSGSIZE; |
1367 | 0 | break; |
1368 | 0 | } else { |
1369 | 0 | const struct eth_header *eth = buffer; |
1370 | 0 | int max_size; |
1371 | |
|
1372 | 0 | ovs_mutex_lock(&dev->mutex); |
1373 | 0 | max_size = dev->mtu + ETH_HEADER_LEN; |
1374 | 0 | ovs_mutex_unlock(&dev->mutex); |
1375 | |
|
1376 | 0 | if (eth->eth_type == htons(ETH_TYPE_VLAN)) { |
1377 | 0 | max_size += VLAN_HEADER_LEN; |
1378 | 0 | } |
1379 | 0 | if (size > max_size && !is_tso) { |
1380 | 0 | error = EMSGSIZE; |
1381 | 0 | break; |
1382 | 0 | } |
1383 | 0 | } |
1384 | | |
1385 | 0 | if (VLOG_IS_DBG_ENABLED()) { |
1386 | 0 | bool inner_ip_csum_good; |
1387 | 0 | bool inner_l4_csum_good; |
1388 | 0 | bool inner_ip_csum_bad; |
1389 | 0 | bool inner_l4_csum_bad; |
1390 | 0 | const char *tunnel; |
1391 | 0 | bool ip_csum_good; |
1392 | 0 | bool l4_csum_good; |
1393 | 0 | bool ip_csum_bad; |
1394 | 0 | bool l4_csum_bad; |
1395 | |
|
1396 | 0 | ip_csum_good = !!(packet->offloads & DP_PACKET_OL_IP_CKSUM_GOOD); |
1397 | 0 | ip_csum_bad = !!(packet->offloads & DP_PACKET_OL_IP_CKSUM_BAD); |
1398 | 0 | l4_csum_good = !!(packet->offloads & DP_PACKET_OL_L4_CKSUM_GOOD); |
1399 | 0 | l4_csum_bad = !!(packet->offloads & DP_PACKET_OL_L4_CKSUM_BAD); |
1400 | 0 | inner_ip_csum_good = |
1401 | 0 | !!(packet->offloads & DP_PACKET_OL_INNER_IP_CKSUM_GOOD); |
1402 | 0 | inner_ip_csum_bad = |
1403 | 0 | !!(packet->offloads & DP_PACKET_OL_INNER_IP_CKSUM_BAD); |
1404 | 0 | inner_l4_csum_good = |
1405 | 0 | !!(packet->offloads & DP_PACKET_OL_INNER_L4_CKSUM_GOOD); |
1406 | 0 | inner_l4_csum_bad = |
1407 | 0 | !!(packet->offloads & DP_PACKET_OL_INNER_L4_CKSUM_BAD); |
1408 | 0 | tunnel = !dp_packet_tunnel(packet) ? "none" |
1409 | 0 | : dp_packet_tunnel_vxlan(packet) ? "vxlan" |
1410 | 0 | : dp_packet_tunnel_geneve(packet) ? "geneve" |
1411 | 0 | : "gre"; |
1412 | 0 | VLOG_DBG("Tx: packet with csum IP %s, L4 %s, tunnel %s, " |
1413 | 0 | "inner csum IP %s, inner L4 %s, segsz %"PRIu16, |
1414 | 0 | ip_csum_good ? (ip_csum_bad ? "partial" : "good") |
1415 | 0 | : (ip_csum_bad ? "bad" : "unknown"), |
1416 | 0 | l4_csum_good ? (l4_csum_bad ? "partial" : "good") |
1417 | 0 | : (l4_csum_bad ? "bad" : "unknown"), |
1418 | 0 | tunnel, |
1419 | 0 | inner_ip_csum_good |
1420 | 0 | ? (inner_ip_csum_bad ? "partial" : "good") |
1421 | 0 | : (inner_ip_csum_bad ? "bad" : "unknown"), |
1422 | 0 | inner_l4_csum_good |
1423 | 0 | ? (inner_l4_csum_bad ? "partial" : "good") |
1424 | 0 | : (inner_l4_csum_bad ? "bad" : "unknown"), |
1425 | 0 | dp_packet_get_tso_segsz(packet)); |
1426 | 0 | } |
1427 | |
|
1428 | 0 | if (dp_packet_ip_checksum_partial(packet) |
1429 | 0 | || dp_packet_l4_checksum_partial(packet) |
1430 | 0 | || dp_packet_inner_ip_checksum_partial(packet) |
1431 | 0 | || dp_packet_inner_l4_checksum_partial(packet)) { |
1432 | 0 | dp_packet_ol_send_prepare(packet, flags); |
1433 | 0 | } |
1434 | |
|
1435 | 0 | ovs_mutex_lock(&dev->mutex); |
1436 | 0 | dev->stats.tx_packets++; |
1437 | 0 | dev->txq_stats[qid].packets++; |
1438 | 0 | dev->stats.tx_bytes += size; |
1439 | 0 | dev->txq_stats[qid].bytes += size; |
1440 | |
|
1441 | 0 | dummy_packet_conn_send(&dev->conn, buffer, size); |
1442 | | |
1443 | | /* Reply to ARP requests for 'dev''s assigned IP address. */ |
1444 | 0 | struct netdev_addr_dummy *addr_dummy; |
1445 | 0 | LIST_FOR_EACH (addr_dummy, node, &dev->addrs) { |
1446 | 0 | ovs_be32 address = in6_addr_get_mapped_ipv4(&addr_dummy->address); |
1447 | |
|
1448 | 0 | struct dp_packet dp; |
1449 | 0 | struct flow flow; |
1450 | |
|
1451 | 0 | dp_packet_use_const(&dp, buffer, size); |
1452 | 0 | flow_extract(&dp, &flow); |
1453 | 0 | if (flow.dl_type == htons(ETH_TYPE_ARP) |
1454 | 0 | && flow.nw_proto == ARP_OP_REQUEST |
1455 | 0 | && flow.nw_dst == address) { |
1456 | 0 | struct dp_packet *reply = dp_packet_new(0); |
1457 | 0 | compose_arp(reply, ARP_OP_REPLY, dev->hwaddr, flow.dl_src, |
1458 | 0 | false, flow.nw_dst, flow.nw_src); |
1459 | 0 | netdev_dummy_queue_packet(dev, reply, NULL, 0); |
1460 | 0 | break; |
1461 | 0 | } |
1462 | 0 | } |
1463 | |
|
1464 | 0 | if (dev->tx_pcap) { |
1465 | 0 | struct dp_packet dp; |
1466 | |
|
1467 | 0 | dp_packet_use_const(&dp, buffer, size); |
1468 | 0 | ovs_pcap_write(dev->tx_pcap, &dp); |
1469 | 0 | } |
1470 | |
|
1471 | 0 | ovs_mutex_unlock(&dev->mutex); |
1472 | 0 | } |
1473 | |
|
1474 | 0 | dp_packet_delete_batch(batch, true); |
1475 | |
|
1476 | 0 | return error; |
1477 | 0 | } |
1478 | | |
1479 | | static int |
1480 | | netdev_dummy_set_etheraddr(struct netdev *netdev, const struct eth_addr mac) |
1481 | 0 | { |
1482 | 0 | struct netdev_dummy *dev = netdev_dummy_cast(netdev); |
1483 | |
|
1484 | 0 | ovs_mutex_lock(&dev->mutex); |
1485 | 0 | if (!eth_addr_equals(dev->hwaddr, mac)) { |
1486 | 0 | dev->hwaddr = mac; |
1487 | 0 | netdev_change_seq_changed(netdev); |
1488 | 0 | } |
1489 | 0 | ovs_mutex_unlock(&dev->mutex); |
1490 | |
|
1491 | 0 | return 0; |
1492 | 0 | } |
1493 | | |
1494 | | static int |
1495 | | netdev_dummy_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac) |
1496 | 0 | { |
1497 | 0 | struct netdev_dummy *dev = netdev_dummy_cast(netdev); |
1498 | |
|
1499 | 0 | ovs_mutex_lock(&dev->mutex); |
1500 | 0 | *mac = dev->hwaddr; |
1501 | 0 | ovs_mutex_unlock(&dev->mutex); |
1502 | |
|
1503 | 0 | return 0; |
1504 | 0 | } |
1505 | | |
1506 | | static int |
1507 | | netdev_dummy_get_mtu(const struct netdev *netdev, int *mtup) |
1508 | 0 | { |
1509 | 0 | struct netdev_dummy *dev = netdev_dummy_cast(netdev); |
1510 | |
|
1511 | 0 | ovs_mutex_lock(&dev->mutex); |
1512 | 0 | *mtup = dev->mtu; |
1513 | 0 | ovs_mutex_unlock(&dev->mutex); |
1514 | |
|
1515 | 0 | return 0; |
1516 | 0 | } |
1517 | | |
1518 | 0 | #define DUMMY_MIN_MTU 68 |
1519 | 0 | #define DUMMY_MAX_MTU 65535 |
1520 | | |
1521 | | static int |
1522 | | netdev_dummy_set_mtu(struct netdev *netdev, int mtu) |
1523 | 0 | { |
1524 | 0 | if (mtu < DUMMY_MIN_MTU || mtu > DUMMY_MAX_MTU) { |
1525 | 0 | return EINVAL; |
1526 | 0 | } |
1527 | | |
1528 | 0 | struct netdev_dummy *dev = netdev_dummy_cast(netdev); |
1529 | |
|
1530 | 0 | ovs_mutex_lock(&dev->mutex); |
1531 | 0 | if (dev->mtu != mtu) { |
1532 | 0 | dev->mtu = mtu; |
1533 | 0 | netdev_change_seq_changed(netdev); |
1534 | 0 | } |
1535 | 0 | ovs_mutex_unlock(&dev->mutex); |
1536 | |
|
1537 | 0 | return 0; |
1538 | 0 | } |
1539 | | |
1540 | | static int |
1541 | | netdev_dummy_get_stats(const struct netdev *netdev, struct netdev_stats *stats) |
1542 | 0 | { |
1543 | 0 | struct netdev_dummy *dev = netdev_dummy_cast(netdev); |
1544 | |
|
1545 | 0 | ovs_mutex_lock(&dev->mutex); |
1546 | | /* Passing only collected counters */ |
1547 | 0 | stats->tx_packets = dev->stats.tx_packets; |
1548 | 0 | stats->tx_bytes = dev->stats.tx_bytes; |
1549 | 0 | stats->rx_packets = dev->stats.rx_packets; |
1550 | 0 | stats->rx_bytes = dev->stats.rx_bytes; |
1551 | 0 | ovs_mutex_unlock(&dev->mutex); |
1552 | |
|
1553 | 0 | return 0; |
1554 | 0 | } |
1555 | | |
1556 | | static int |
1557 | | netdev_dummy_get_custom_stats(const struct netdev *netdev, |
1558 | | struct netdev_custom_stats *custom_stats) |
1559 | 0 | { |
1560 | 0 | int i, j; |
1561 | |
|
1562 | 0 | struct netdev_dummy *dev = netdev_dummy_cast(netdev); |
1563 | |
|
1564 | 0 | ovs_mutex_lock(&dev->mutex); |
1565 | |
|
1566 | 0 | #define DUMMY_Q_STATS \ |
1567 | 0 | DUMMY_Q_STAT(bytes) \ |
1568 | 0 | DUMMY_Q_STAT(packets) |
1569 | |
|
1570 | 0 | custom_stats->size = C_STATS_SIZE; |
1571 | 0 | #define DUMMY_Q_STAT(NAME) + netdev->n_rxq |
1572 | 0 | custom_stats->size += DUMMY_Q_STATS; |
1573 | 0 | #undef DUMMY_Q_STAT |
1574 | 0 | #define DUMMY_Q_STAT(NAME) + netdev->n_txq |
1575 | 0 | custom_stats->size += DUMMY_Q_STATS; |
1576 | 0 | #undef DUMMY_Q_STAT |
1577 | |
|
1578 | 0 | custom_stats->counters = xcalloc(custom_stats->size, |
1579 | 0 | sizeof(struct netdev_custom_counter)); |
1580 | |
|
1581 | 0 | j = 0; |
1582 | 0 | for (i = 0 ; i < C_STATS_SIZE ; i++) { |
1583 | 0 | custom_stats->counters[j].value = dev->custom_stats[i].value; |
1584 | 0 | ovs_strlcpy(custom_stats->counters[j++].name, |
1585 | 0 | dev->custom_stats[i].name, |
1586 | 0 | NETDEV_CUSTOM_STATS_NAME_SIZE); |
1587 | 0 | } |
1588 | |
|
1589 | 0 | for (i = 0; i < netdev->n_rxq; i++) { |
1590 | 0 | #define DUMMY_Q_STAT(NAME) \ |
1591 | 0 | snprintf(custom_stats->counters[j].name, \ |
1592 | 0 | NETDEV_CUSTOM_STATS_NAME_SIZE, "rx_q%d_"#NAME, i); \ |
1593 | 0 | custom_stats->counters[j++].value = dev->rxq_stats[i].NAME; |
1594 | 0 | DUMMY_Q_STATS |
1595 | 0 | #undef DUMMY_Q_STAT |
1596 | 0 | } |
1597 | |
|
1598 | 0 | for (i = 0; i < netdev->n_txq; i++) { |
1599 | 0 | #define DUMMY_Q_STAT(NAME) \ |
1600 | 0 | snprintf(custom_stats->counters[j].name, \ |
1601 | 0 | NETDEV_CUSTOM_STATS_NAME_SIZE, "tx_q%d_"#NAME, i); \ |
1602 | 0 | custom_stats->counters[j++].value = dev->txq_stats[i].NAME; |
1603 | 0 | DUMMY_Q_STATS |
1604 | 0 | #undef DUMMY_Q_STAT |
1605 | 0 | } |
1606 | |
|
1607 | 0 | ovs_mutex_unlock(&dev->mutex); |
1608 | |
|
1609 | 0 | return 0; |
1610 | 0 | } |
1611 | | |
1612 | | static int |
1613 | | netdev_dummy_get_queue(const struct netdev *netdev OVS_UNUSED, |
1614 | | unsigned int queue_id, struct smap *details OVS_UNUSED) |
1615 | 0 | { |
1616 | 0 | if (queue_id == 0) { |
1617 | 0 | return 0; |
1618 | 0 | } else { |
1619 | 0 | return EINVAL; |
1620 | 0 | } |
1621 | 0 | } |
1622 | | |
1623 | | static void |
1624 | | netdev_dummy_init_queue_stats(struct netdev_queue_stats *stats) |
1625 | 0 | { |
1626 | 0 | *stats = (struct netdev_queue_stats) { |
1627 | 0 | .tx_bytes = UINT64_MAX, |
1628 | 0 | .tx_packets = UINT64_MAX, |
1629 | 0 | .tx_errors = UINT64_MAX, |
1630 | 0 | .created = LLONG_MIN, |
1631 | 0 | }; |
1632 | 0 | } |
1633 | | |
1634 | | static int |
1635 | | netdev_dummy_get_queue_stats(const struct netdev *netdev OVS_UNUSED, |
1636 | | unsigned int queue_id, |
1637 | | struct netdev_queue_stats *stats) |
1638 | 0 | { |
1639 | 0 | if (queue_id == 0) { |
1640 | 0 | netdev_dummy_init_queue_stats(stats); |
1641 | 0 | return 0; |
1642 | 0 | } else { |
1643 | 0 | return EINVAL; |
1644 | 0 | } |
1645 | 0 | } |
1646 | | |
1647 | | struct netdev_dummy_queue_state { |
1648 | | unsigned int next_queue; |
1649 | | }; |
1650 | | |
1651 | | static int |
1652 | | netdev_dummy_queue_dump_start(const struct netdev *netdev OVS_UNUSED, |
1653 | | void **statep) |
1654 | 0 | { |
1655 | 0 | struct netdev_dummy_queue_state *state = xmalloc(sizeof *state); |
1656 | 0 | state->next_queue = 0; |
1657 | 0 | *statep = state; |
1658 | 0 | return 0; |
1659 | 0 | } |
1660 | | |
1661 | | static int |
1662 | | netdev_dummy_queue_dump_next(const struct netdev *netdev OVS_UNUSED, |
1663 | | void *state_, |
1664 | | unsigned int *queue_id, |
1665 | | struct smap *details OVS_UNUSED) |
1666 | 0 | { |
1667 | 0 | struct netdev_dummy_queue_state *state = state_; |
1668 | 0 | if (state->next_queue == 0) { |
1669 | 0 | *queue_id = 0; |
1670 | 0 | state->next_queue++; |
1671 | 0 | return 0; |
1672 | 0 | } else { |
1673 | 0 | return EOF; |
1674 | 0 | } |
1675 | 0 | } |
1676 | | |
1677 | | static int |
1678 | | netdev_dummy_queue_dump_done(const struct netdev *netdev OVS_UNUSED, |
1679 | | void *state) |
1680 | 0 | { |
1681 | 0 | free(state); |
1682 | 0 | return 0; |
1683 | 0 | } |
1684 | | |
1685 | | static int |
1686 | | netdev_dummy_dump_queue_stats(const struct netdev *netdev OVS_UNUSED, |
1687 | | void (*cb)(unsigned int queue_id, |
1688 | | struct netdev_queue_stats *, |
1689 | | void *aux), |
1690 | | void *aux) |
1691 | 0 | { |
1692 | 0 | struct netdev_queue_stats stats; |
1693 | 0 | netdev_dummy_init_queue_stats(&stats); |
1694 | 0 | cb(0, &stats, aux); |
1695 | 0 | return 0; |
1696 | 0 | } |
1697 | | |
1698 | | static int |
1699 | | netdev_dummy_get_ifindex(const struct netdev *netdev) |
1700 | 0 | { |
1701 | 0 | struct netdev_dummy *dev = netdev_dummy_cast(netdev); |
1702 | 0 | int ifindex; |
1703 | |
|
1704 | 0 | ovs_mutex_lock(&dev->mutex); |
1705 | 0 | ifindex = dev->ifindex; |
1706 | 0 | ovs_mutex_unlock(&dev->mutex); |
1707 | |
|
1708 | 0 | return ifindex; |
1709 | 0 | } |
1710 | | |
1711 | | static int |
1712 | | netdev_dummy_update_flags__(struct netdev_dummy *netdev, |
1713 | | enum netdev_flags off, enum netdev_flags on, |
1714 | | enum netdev_flags *old_flagsp) |
1715 | | OVS_REQUIRES(netdev->mutex) |
1716 | 0 | { |
1717 | 0 | if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) { |
1718 | 0 | return EINVAL; |
1719 | 0 | } |
1720 | | |
1721 | 0 | *old_flagsp = netdev->flags; |
1722 | 0 | netdev->flags |= on; |
1723 | 0 | netdev->flags &= ~off; |
1724 | 0 | if (*old_flagsp != netdev->flags) { |
1725 | 0 | netdev_change_seq_changed(&netdev->up); |
1726 | 0 | } |
1727 | |
|
1728 | 0 | return 0; |
1729 | 0 | } |
1730 | | |
1731 | | static int |
1732 | | netdev_dummy_update_flags(struct netdev *netdev_, |
1733 | | enum netdev_flags off, enum netdev_flags on, |
1734 | | enum netdev_flags *old_flagsp) |
1735 | 0 | { |
1736 | 0 | struct netdev_dummy *netdev = netdev_dummy_cast(netdev_); |
1737 | 0 | int error; |
1738 | |
|
1739 | 0 | ovs_mutex_lock(&netdev->mutex); |
1740 | 0 | error = netdev_dummy_update_flags__(netdev, off, on, old_flagsp); |
1741 | 0 | ovs_mutex_unlock(&netdev->mutex); |
1742 | |
|
1743 | 0 | return error; |
1744 | 0 | } |
1745 | | |
1746 | | #define NETDEV_DUMMY_CLASS_COMMON \ |
1747 | | .run = netdev_dummy_run, \ |
1748 | | .wait = netdev_dummy_wait, \ |
1749 | | .alloc = netdev_dummy_alloc, \ |
1750 | | .construct = netdev_dummy_construct, \ |
1751 | | .destruct = netdev_dummy_destruct, \ |
1752 | | .dealloc = netdev_dummy_dealloc, \ |
1753 | | .get_config = netdev_dummy_get_config, \ |
1754 | | .set_config = netdev_dummy_set_config, \ |
1755 | | .get_numa_id = netdev_dummy_get_numa_id, \ |
1756 | | .send = netdev_dummy_send, \ |
1757 | | .set_etheraddr = netdev_dummy_set_etheraddr, \ |
1758 | | .get_etheraddr = netdev_dummy_get_etheraddr, \ |
1759 | | .get_mtu = netdev_dummy_get_mtu, \ |
1760 | | .set_mtu = netdev_dummy_set_mtu, \ |
1761 | | .get_ifindex = netdev_dummy_get_ifindex, \ |
1762 | | .get_stats = netdev_dummy_get_stats, \ |
1763 | | .get_custom_stats = netdev_dummy_get_custom_stats, \ |
1764 | | .get_queue = netdev_dummy_get_queue, \ |
1765 | | .get_queue_stats = netdev_dummy_get_queue_stats, \ |
1766 | | .queue_dump_start = netdev_dummy_queue_dump_start, \ |
1767 | | .queue_dump_next = netdev_dummy_queue_dump_next, \ |
1768 | | .queue_dump_done = netdev_dummy_queue_dump_done, \ |
1769 | | .dump_queue_stats = netdev_dummy_dump_queue_stats, \ |
1770 | | .get_addr_list = netdev_dummy_get_addr_list, \ |
1771 | | .update_flags = netdev_dummy_update_flags, \ |
1772 | | .rxq_alloc = netdev_dummy_rxq_alloc, \ |
1773 | | .rxq_construct = netdev_dummy_rxq_construct, \ |
1774 | | .rxq_destruct = netdev_dummy_rxq_destruct, \ |
1775 | | .rxq_dealloc = netdev_dummy_rxq_dealloc, \ |
1776 | | .rxq_recv = netdev_dummy_rxq_recv, \ |
1777 | | .rxq_wait = netdev_dummy_rxq_wait, \ |
1778 | | .rxq_drain = netdev_dummy_rxq_drain |
1779 | | |
1780 | | static const struct netdev_class dummy_class = { |
1781 | | NETDEV_DUMMY_CLASS_COMMON, |
1782 | | .type = "dummy" |
1783 | | }; |
1784 | | |
1785 | | static const struct netdev_class dummy_internal_class = { |
1786 | | NETDEV_DUMMY_CLASS_COMMON, |
1787 | | .type = "dummy-internal" |
1788 | | }; |
1789 | | |
1790 | | static const struct netdev_class dummy_pmd_class = { |
1791 | | NETDEV_DUMMY_CLASS_COMMON, |
1792 | | .type = "dummy-pmd", |
1793 | | .is_pmd = true, |
1794 | | .reconfigure = netdev_dummy_reconfigure |
1795 | | }; |
1796 | | |
1797 | | |
1798 | | /* Helper functions. */ |
1799 | | |
1800 | | static void |
1801 | | pkt_list_delete(struct ovs_list *l) |
1802 | 0 | { |
1803 | 0 | struct pkt_list_node *pkt; |
1804 | |
|
1805 | 0 | LIST_FOR_EACH_POP(pkt, list_node, l) { |
1806 | 0 | dp_packet_delete(pkt->pkt); |
1807 | 0 | free(pkt); |
1808 | 0 | } |
1809 | 0 | } |
1810 | | |
1811 | | static void |
1812 | | addr_list_delete(struct ovs_list *l) |
1813 | 0 | { |
1814 | 0 | struct netdev_addr_dummy *addr_dummy; |
1815 | |
|
1816 | 0 | LIST_FOR_EACH_POP (addr_dummy, node, l) { |
1817 | 0 | free(addr_dummy); |
1818 | 0 | } |
1819 | 0 | } |
1820 | | |
1821 | | static struct dp_packet * |
1822 | | eth_from_packet(const char *s) |
1823 | 0 | { |
1824 | 0 | struct dp_packet *packet; |
1825 | 0 | eth_from_hex(s, &packet); |
1826 | 0 | return packet; |
1827 | 0 | } |
1828 | | |
1829 | | static struct dp_packet * |
1830 | | eth_from_flow_str(const char *s, size_t packet_size, |
1831 | | struct flow *flow, char **errorp) |
1832 | 0 | { |
1833 | 0 | *errorp = NULL; |
1834 | |
|
1835 | 0 | enum odp_key_fitness fitness; |
1836 | 0 | struct dp_packet *packet; |
1837 | 0 | struct ofpbuf odp_key; |
1838 | 0 | int error; |
1839 | | |
1840 | | /* Convert string to datapath key. |
1841 | | * |
1842 | | * It would actually be nicer to parse an OpenFlow-like flow key here, but |
1843 | | * the code for that currently calls exit() on parse error. We have to |
1844 | | * settle for parsing a datapath key for now. |
1845 | | */ |
1846 | 0 | ofpbuf_init(&odp_key, 0); |
1847 | 0 | error = odp_flow_from_string(s, NULL, &odp_key, NULL, errorp); |
1848 | 0 | if (error) { |
1849 | 0 | ofpbuf_uninit(&odp_key); |
1850 | 0 | return NULL; |
1851 | 0 | } |
1852 | | |
1853 | | /* Convert odp_key to flow. */ |
1854 | 0 | fitness = odp_flow_key_to_flow(odp_key.data, odp_key.size, flow, errorp); |
1855 | 0 | if (fitness == ODP_FIT_ERROR) { |
1856 | 0 | ofpbuf_uninit(&odp_key); |
1857 | 0 | return NULL; |
1858 | 0 | } |
1859 | | |
1860 | 0 | packet = dp_packet_new(0); |
1861 | 0 | if (packet_size) { |
1862 | 0 | flow_compose(packet, flow, NULL, 0, false); |
1863 | 0 | if (dp_packet_size(packet) < packet_size) { |
1864 | 0 | packet_expand(packet, flow, packet_size); |
1865 | 0 | } else if (dp_packet_size(packet) > packet_size){ |
1866 | 0 | dp_packet_delete(packet); |
1867 | 0 | packet = NULL; |
1868 | 0 | } |
1869 | 0 | } else { |
1870 | 0 | flow_compose(packet, flow, NULL, 64, false); |
1871 | 0 | } |
1872 | |
|
1873 | 0 | ofpbuf_uninit(&odp_key); |
1874 | 0 | return packet; |
1875 | 0 | } |
1876 | | |
1877 | | static void |
1878 | | netdev_dummy_queue_packet__(struct netdev_rxq_dummy *rx, struct dp_packet *packet) |
1879 | 0 | { |
1880 | 0 | struct pkt_list_node *pkt_node = xmalloc(sizeof *pkt_node); |
1881 | |
|
1882 | 0 | pkt_node->pkt = packet; |
1883 | 0 | ovs_list_push_back(&rx->recv_queue, &pkt_node->list_node); |
1884 | 0 | rx->recv_queue_len++; |
1885 | 0 | seq_change(rx->seq); |
1886 | 0 | } |
1887 | | |
1888 | | static void |
1889 | | netdev_dummy_queue_packet(struct netdev_dummy *dummy, struct dp_packet *packet, |
1890 | | struct flow *flow, int queue_id) |
1891 | | OVS_REQUIRES(dummy->mutex) |
1892 | 0 | { |
1893 | 0 | struct netdev_rxq_dummy *rx, *prev; |
1894 | |
|
1895 | 0 | if (dummy->rxq_pcap) { |
1896 | 0 | ovs_pcap_write(dummy->rxq_pcap, packet); |
1897 | 0 | } |
1898 | |
|
1899 | 0 | dummy_netdev_simulate_offload(&dummy->up, packet, flow); |
1900 | |
|
1901 | 0 | prev = NULL; |
1902 | 0 | LIST_FOR_EACH (rx, node, &dummy->rxes) { |
1903 | 0 | if (rx->up.queue_id == queue_id && |
1904 | 0 | rx->recv_queue_len < NETDEV_DUMMY_MAX_QUEUE) { |
1905 | 0 | if (prev) { |
1906 | 0 | netdev_dummy_queue_packet__(prev, dp_packet_clone(packet)); |
1907 | 0 | } |
1908 | 0 | prev = rx; |
1909 | 0 | } |
1910 | 0 | } |
1911 | 0 | if (prev) { |
1912 | 0 | netdev_dummy_queue_packet__(prev, packet); |
1913 | 0 | } else { |
1914 | 0 | dp_packet_delete(packet); |
1915 | 0 | } |
1916 | 0 | } |
1917 | | |
1918 | | static void |
1919 | | netdev_dummy_receive(struct unixctl_conn *conn, |
1920 | | int argc, const char *argv[], void *aux OVS_UNUSED) |
1921 | 0 | { |
1922 | 0 | struct netdev_dummy *dummy_dev; |
1923 | 0 | struct netdev *netdev; |
1924 | 0 | int i, k = 1, rx_qid = 0; |
1925 | |
|
1926 | 0 | netdev = netdev_from_name(argv[k++]); |
1927 | 0 | if (!netdev || !is_dummy_netdev_class(netdev->netdev_class)) { |
1928 | 0 | unixctl_command_reply_error(conn, "no such dummy netdev"); |
1929 | 0 | goto exit_netdev; |
1930 | 0 | } |
1931 | 0 | dummy_dev = netdev_dummy_cast(netdev); |
1932 | |
|
1933 | 0 | ovs_mutex_lock(&dummy_dev->mutex); |
1934 | |
|
1935 | 0 | if (argc > k + 1 && !strcmp(argv[k], "--qid")) { |
1936 | 0 | rx_qid = strtol(argv[k + 1], NULL, 10); |
1937 | 0 | if (rx_qid < 0 || rx_qid >= netdev->n_rxq) { |
1938 | 0 | unixctl_command_reply_error(conn, "bad rx queue id."); |
1939 | 0 | goto exit; |
1940 | 0 | } |
1941 | 0 | k += 2; |
1942 | 0 | } |
1943 | | |
1944 | 0 | for (i = k; i < argc; i++) { |
1945 | 0 | struct dp_packet *packet; |
1946 | 0 | struct flow flow; |
1947 | | |
1948 | | /* Try to parse 'argv[i]' as packet in hex. */ |
1949 | 0 | packet = eth_from_packet(argv[i]); |
1950 | |
|
1951 | 0 | if (!packet) { |
1952 | 0 | int packet_size = 0; |
1953 | 0 | const char *flow_str = argv[i]; |
1954 | | |
1955 | | /* Parse optional --len argument immediately follows a 'flow'. */ |
1956 | 0 | if (argc >= i + 2 && !strcmp(argv[i + 1], "--len")) { |
1957 | 0 | packet_size = strtol(argv[i + 2], NULL, 10); |
1958 | |
|
1959 | 0 | if (packet_size < ETH_TOTAL_MIN) { |
1960 | 0 | unixctl_command_reply_error(conn, "too small packet len"); |
1961 | 0 | goto exit; |
1962 | 0 | } |
1963 | 0 | i += 2; |
1964 | 0 | } |
1965 | | /* Try parse 'argv[i]' as odp flow. */ |
1966 | 0 | char *error_s; |
1967 | 0 | packet = eth_from_flow_str(flow_str, packet_size, &flow, &error_s); |
1968 | 0 | if (!packet) { |
1969 | 0 | unixctl_command_reply_error(conn, error_s); |
1970 | 0 | free(error_s); |
1971 | 0 | goto exit; |
1972 | 0 | } |
1973 | 0 | } else { |
1974 | 0 | flow_extract(packet, &flow); |
1975 | 0 | } |
1976 | | |
1977 | 0 | netdev_dummy_queue_packet(dummy_dev, packet, &flow, rx_qid); |
1978 | 0 | } |
1979 | | |
1980 | 0 | unixctl_command_reply(conn, NULL); |
1981 | |
|
1982 | 0 | exit: |
1983 | 0 | ovs_mutex_unlock(&dummy_dev->mutex); |
1984 | 0 | exit_netdev: |
1985 | 0 | netdev_close(netdev); |
1986 | 0 | } |
1987 | | |
1988 | | static void |
1989 | | netdev_dummy_set_admin_state__(struct netdev_dummy *dev, bool admin_state) |
1990 | | OVS_REQUIRES(dev->mutex) |
1991 | 0 | { |
1992 | 0 | enum netdev_flags old_flags; |
1993 | |
|
1994 | 0 | if (admin_state) { |
1995 | 0 | netdev_dummy_update_flags__(dev, 0, NETDEV_UP, &old_flags); |
1996 | 0 | } else { |
1997 | 0 | netdev_dummy_update_flags__(dev, NETDEV_UP, 0, &old_flags); |
1998 | 0 | } |
1999 | 0 | } |
2000 | | |
2001 | | static void |
2002 | | netdev_dummy_set_admin_state(struct unixctl_conn *conn, int argc, |
2003 | | const char *argv[], void *aux OVS_UNUSED) |
2004 | 0 | { |
2005 | 0 | bool up; |
2006 | |
|
2007 | 0 | if (!strcasecmp(argv[argc - 1], "up")) { |
2008 | 0 | up = true; |
2009 | 0 | } else if ( !strcasecmp(argv[argc - 1], "down")) { |
2010 | 0 | up = false; |
2011 | 0 | } else { |
2012 | 0 | unixctl_command_reply_error(conn, "Invalid Admin State"); |
2013 | 0 | return; |
2014 | 0 | } |
2015 | | |
2016 | 0 | if (argc > 2) { |
2017 | 0 | struct netdev *netdev = netdev_from_name(argv[1]); |
2018 | 0 | if (netdev && is_dummy_netdev_class(netdev->netdev_class)) { |
2019 | 0 | struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev); |
2020 | |
|
2021 | 0 | ovs_mutex_lock(&dummy_dev->mutex); |
2022 | 0 | netdev_dummy_set_admin_state__(dummy_dev, up); |
2023 | 0 | ovs_mutex_unlock(&dummy_dev->mutex); |
2024 | |
|
2025 | 0 | netdev_close(netdev); |
2026 | 0 | } else { |
2027 | 0 | unixctl_command_reply_error(conn, "Unknown Dummy Interface"); |
2028 | 0 | netdev_close(netdev); |
2029 | 0 | return; |
2030 | 0 | } |
2031 | 0 | } else { |
2032 | 0 | struct netdev_dummy *netdev; |
2033 | |
|
2034 | 0 | ovs_mutex_lock(&dummy_list_mutex); |
2035 | 0 | LIST_FOR_EACH (netdev, list_node, &dummy_list) { |
2036 | 0 | ovs_mutex_lock(&netdev->mutex); |
2037 | 0 | netdev_dummy_set_admin_state__(netdev, up); |
2038 | 0 | ovs_mutex_unlock(&netdev->mutex); |
2039 | 0 | } |
2040 | 0 | ovs_mutex_unlock(&dummy_list_mutex); |
2041 | 0 | } |
2042 | 0 | unixctl_command_reply(conn, "OK"); |
2043 | 0 | } |
2044 | | |
2045 | | static void |
2046 | | display_conn_state__(struct ds *s, const char *name, |
2047 | | enum dummy_netdev_conn_state state) |
2048 | 0 | { |
2049 | 0 | ds_put_format(s, "%s: ", name); |
2050 | |
|
2051 | 0 | switch (state) { |
2052 | 0 | case CONN_STATE_CONNECTED: |
2053 | 0 | ds_put_cstr(s, "connected\n"); |
2054 | 0 | break; |
2055 | | |
2056 | 0 | case CONN_STATE_NOT_CONNECTED: |
2057 | 0 | ds_put_cstr(s, "disconnected\n"); |
2058 | 0 | break; |
2059 | | |
2060 | 0 | case CONN_STATE_UNKNOWN: |
2061 | 0 | default: |
2062 | 0 | ds_put_cstr(s, "unknown\n"); |
2063 | 0 | break; |
2064 | 0 | }; |
2065 | 0 | } |
2066 | | |
2067 | | static void |
2068 | | netdev_dummy_conn_state(struct unixctl_conn *conn, int argc, |
2069 | | const char *argv[], void *aux OVS_UNUSED) |
2070 | 0 | { |
2071 | 0 | enum dummy_netdev_conn_state state = CONN_STATE_UNKNOWN; |
2072 | 0 | struct ds s; |
2073 | |
|
2074 | 0 | ds_init(&s); |
2075 | |
|
2076 | 0 | if (argc > 1) { |
2077 | 0 | const char *dev_name = argv[1]; |
2078 | 0 | struct netdev *netdev = netdev_from_name(dev_name); |
2079 | |
|
2080 | 0 | if (netdev && is_dummy_netdev_class(netdev->netdev_class)) { |
2081 | 0 | struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev); |
2082 | |
|
2083 | 0 | ovs_mutex_lock(&dummy_dev->mutex); |
2084 | 0 | state = dummy_netdev_get_conn_state(&dummy_dev->conn); |
2085 | 0 | ovs_mutex_unlock(&dummy_dev->mutex); |
2086 | |
|
2087 | 0 | netdev_close(netdev); |
2088 | 0 | } |
2089 | 0 | display_conn_state__(&s, dev_name, state); |
2090 | 0 | } else { |
2091 | 0 | struct netdev_dummy *netdev; |
2092 | |
|
2093 | 0 | ovs_mutex_lock(&dummy_list_mutex); |
2094 | 0 | LIST_FOR_EACH (netdev, list_node, &dummy_list) { |
2095 | 0 | ovs_mutex_lock(&netdev->mutex); |
2096 | 0 | state = dummy_netdev_get_conn_state(&netdev->conn); |
2097 | 0 | ovs_mutex_unlock(&netdev->mutex); |
2098 | 0 | if (state != CONN_STATE_UNKNOWN) { |
2099 | 0 | display_conn_state__(&s, netdev->up.name, state); |
2100 | 0 | } |
2101 | 0 | } |
2102 | 0 | ovs_mutex_unlock(&dummy_list_mutex); |
2103 | 0 | } |
2104 | |
|
2105 | 0 | unixctl_command_reply(conn, ds_cstr(&s)); |
2106 | 0 | ds_destroy(&s); |
2107 | 0 | } |
2108 | | |
2109 | | static void |
2110 | | netdev_dummy_ip4addr(struct unixctl_conn *conn, int argc OVS_UNUSED, |
2111 | | const char *argv[], void *aux OVS_UNUSED) |
2112 | 0 | { |
2113 | 0 | struct netdev *netdev = netdev_from_name(argv[1]); |
2114 | |
|
2115 | 0 | if (netdev && is_dummy_netdev_class(netdev->netdev_class)) { |
2116 | 0 | struct in_addr ip, mask; |
2117 | 0 | struct in6_addr ip6; |
2118 | 0 | uint32_t plen; |
2119 | 0 | char *error; |
2120 | |
|
2121 | 0 | error = ip_parse_cidr(argv[2], &ip.s_addr, &plen); |
2122 | 0 | if (!error) { |
2123 | 0 | mask.s_addr = be32_prefix_mask(plen); |
2124 | 0 | netdev_dummy_add_in4(netdev, ip, mask); |
2125 | |
|
2126 | 0 | in6_addr_set_mapped_ipv4(&ip6, ip.s_addr); |
2127 | | /* Insert local route entry for the new address. */ |
2128 | 0 | ovs_router_force_insert(CLS_LOCAL, 0, &ip6, 32 + 96, argv[1], |
2129 | 0 | &in6addr_any, &ip6); |
2130 | | /* Insert network route entry for the new address. */ |
2131 | 0 | ovs_router_force_insert(CLS_MAIN, 0, &ip6, plen + 96, argv[1], |
2132 | 0 | &in6addr_any, &ip6); |
2133 | |
|
2134 | 0 | unixctl_command_reply(conn, "OK"); |
2135 | 0 | } else { |
2136 | 0 | unixctl_command_reply_error(conn, error); |
2137 | 0 | free(error); |
2138 | 0 | } |
2139 | 0 | } else { |
2140 | 0 | unixctl_command_reply_error(conn, "Unknown Dummy Interface"); |
2141 | 0 | } |
2142 | |
|
2143 | 0 | netdev_close(netdev); |
2144 | 0 | } |
2145 | | |
2146 | | static void |
2147 | | netdev_dummy_ip6addr(struct unixctl_conn *conn, int argc OVS_UNUSED, |
2148 | | const char *argv[], void *aux OVS_UNUSED) |
2149 | 0 | { |
2150 | 0 | struct netdev *netdev = netdev_from_name(argv[1]); |
2151 | |
|
2152 | 0 | if (netdev && is_dummy_netdev_class(netdev->netdev_class)) { |
2153 | 0 | struct in6_addr ip6; |
2154 | 0 | char *error; |
2155 | 0 | uint32_t plen; |
2156 | |
|
2157 | 0 | error = ipv6_parse_cidr(argv[2], &ip6, &plen); |
2158 | 0 | if (!error) { |
2159 | 0 | struct in6_addr mask; |
2160 | |
|
2161 | 0 | mask = ipv6_create_mask(plen); |
2162 | 0 | netdev_dummy_add_in6(netdev, &ip6, &mask); |
2163 | | |
2164 | | /* Insert local route entry for the new address. */ |
2165 | 0 | ovs_router_force_insert(CLS_LOCAL, 0, &ip6, 128, argv[1], |
2166 | 0 | &in6addr_any, &ip6); |
2167 | | /* Insert network route entry for the new address. */ |
2168 | 0 | ovs_router_force_insert(CLS_MAIN, 0, &ip6, plen, argv[1], |
2169 | 0 | &in6addr_any, &ip6); |
2170 | |
|
2171 | 0 | unixctl_command_reply(conn, "OK"); |
2172 | 0 | } else { |
2173 | 0 | unixctl_command_reply_error(conn, error); |
2174 | 0 | free(error); |
2175 | 0 | } |
2176 | 0 | } else { |
2177 | 0 | unixctl_command_reply_error(conn, "Unknown Dummy Interface"); |
2178 | 0 | } |
2179 | |
|
2180 | 0 | netdev_close(netdev); |
2181 | 0 | } |
2182 | | |
2183 | | |
2184 | | static void |
2185 | | netdev_dummy_override(const char *type) |
2186 | 0 | { |
2187 | 0 | if (!netdev_unregister_provider(type)) { |
2188 | 0 | struct netdev_class *class; |
2189 | 0 | int error; |
2190 | |
|
2191 | 0 | class = xmemdup(&dummy_class, sizeof dummy_class); |
2192 | 0 | class->type = xstrdup(type); |
2193 | 0 | error = netdev_register_provider(class); |
2194 | 0 | if (error) { |
2195 | 0 | VLOG_ERR("%s: failed to register netdev provider (%s)", |
2196 | 0 | type, ovs_strerror(error)); |
2197 | 0 | free(CONST_CAST(char *, class->type)); |
2198 | 0 | free(class); |
2199 | 0 | } |
2200 | 0 | } |
2201 | 0 | } |
2202 | | |
2203 | | void |
2204 | | netdev_dummy_register(enum dummy_level level) |
2205 | 0 | { |
2206 | 0 | unixctl_command_register("netdev-dummy/receive", |
2207 | 0 | "name [--qid queue_id] packet|flow [--len packet_len]", |
2208 | 0 | 2, INT_MAX, netdev_dummy_receive, NULL); |
2209 | 0 | unixctl_command_register("netdev-dummy/set-admin-state", |
2210 | 0 | "[netdev] up|down", 1, 2, |
2211 | 0 | netdev_dummy_set_admin_state, NULL); |
2212 | 0 | unixctl_command_register("netdev-dummy/conn-state", |
2213 | 0 | "[netdev]", 0, 1, |
2214 | 0 | netdev_dummy_conn_state, NULL); |
2215 | 0 | unixctl_command_register("netdev-dummy/ip4addr", |
2216 | 0 | "[netdev] ipaddr/mask-prefix-len", 2, 2, |
2217 | 0 | netdev_dummy_ip4addr, NULL); |
2218 | 0 | unixctl_command_register("netdev-dummy/ip6addr", |
2219 | 0 | "[netdev] ip6addr", 2, 2, |
2220 | 0 | netdev_dummy_ip6addr, NULL); |
2221 | |
|
2222 | 0 | if (level == DUMMY_OVERRIDE_ALL) { |
2223 | 0 | struct sset types; |
2224 | 0 | const char *type; |
2225 | |
|
2226 | 0 | sset_init(&types); |
2227 | 0 | netdev_enumerate_types(&types); |
2228 | 0 | SSET_FOR_EACH (type, &types) { |
2229 | 0 | if (strcmp(type, "patch")) { |
2230 | 0 | netdev_dummy_override(type); |
2231 | 0 | } |
2232 | 0 | } |
2233 | 0 | sset_destroy(&types); |
2234 | 0 | } else if (level == DUMMY_OVERRIDE_SYSTEM) { |
2235 | 0 | netdev_dummy_override("system"); |
2236 | 0 | } |
2237 | 0 | netdev_register_provider(&dummy_class); |
2238 | 0 | netdev_register_provider(&dummy_internal_class); |
2239 | 0 | netdev_register_provider(&dummy_pmd_class); |
2240 | |
|
2241 | 0 | netdev_vport_tunnel_register(); |
2242 | 0 | } |