/src/suricata7/src/source-napatech.c
Line | Count | Source |
1 | | /* Copyright (C) 2012-2020 Open Information Security Foundation |
2 | | * |
3 | | * You can copy, redistribute or modify this Program under the terms of |
4 | | * the GNU General Public License version 2 as published by the Free |
5 | | * Software Foundation. |
6 | | * |
7 | | * This program is distributed in the hope that it will be useful, |
8 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
9 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
10 | | * GNU General Public License for more details. |
11 | | * |
12 | | * You should have received a copy of the GNU General Public License |
13 | | * version 2 along with this program; if not, write to the Free Software |
14 | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
15 | | * 02110-1301, USA. |
16 | | */ |
17 | | |
18 | | /** |
19 | | * \file |
20 | | * |
21 | | - * \author nPulse Technologies, LLC. |
22 | | - * \author Matt Keeler <mk@npulsetech.com> |
23 | | * * |
24 | | * Support for NAPATECH adapter with the 3GD Driver/API. |
25 | | * Requires libntapi from Napatech A/S. |
26 | | * |
27 | | */ |
28 | | #include "suricata-common.h" |
29 | | #include "action-globals.h" |
30 | | #include "decode.h" |
31 | | #include "packet.h" |
32 | | #include "suricata.h" |
33 | | #include "threadvars.h" |
34 | | #include "util-datalink.h" |
35 | | #include "util-optimize.h" |
36 | | #include "tm-queuehandlers.h" |
37 | | #include "tm-threads.h" |
38 | | #include "tm-modules.h" |
39 | | #include "util-privs.h" |
40 | | #include "tmqh-packetpool.h" |
41 | | #include "util-napatech.h" |
42 | | #include "source-napatech.h" |
43 | | #include "runmode-napatech.h" |
44 | | |
45 | | #ifndef HAVE_NAPATECH |
46 | | |
47 | | TmEcode NoNapatechSupportExit(ThreadVars*, const void*, void**); |
48 | | |
49 | | void TmModuleNapatechStreamRegister(void) |
50 | 33 | { |
51 | 33 | tmm_modules[TMM_RECEIVENAPATECH].name = "NapatechStream"; |
52 | 33 | tmm_modules[TMM_RECEIVENAPATECH].ThreadInit = NoNapatechSupportExit; |
53 | 33 | tmm_modules[TMM_RECEIVENAPATECH].Func = NULL; |
54 | 33 | tmm_modules[TMM_RECEIVENAPATECH].ThreadExitPrintStats = NULL; |
55 | 33 | tmm_modules[TMM_RECEIVENAPATECH].ThreadDeinit = NULL; |
56 | 33 | tmm_modules[TMM_RECEIVENAPATECH].cap_flags = SC_CAP_NET_ADMIN; |
57 | 33 | } |
58 | | |
59 | | void TmModuleNapatechDecodeRegister(void) |
60 | 33 | { |
61 | 33 | tmm_modules[TMM_DECODENAPATECH].name = "NapatechDecode"; |
62 | 33 | tmm_modules[TMM_DECODENAPATECH].ThreadInit = NoNapatechSupportExit; |
63 | 33 | tmm_modules[TMM_DECODENAPATECH].Func = NULL; |
64 | 33 | tmm_modules[TMM_DECODENAPATECH].ThreadExitPrintStats = NULL; |
65 | 33 | tmm_modules[TMM_DECODENAPATECH].ThreadDeinit = NULL; |
66 | 33 | tmm_modules[TMM_DECODENAPATECH].cap_flags = 0; |
67 | 33 | tmm_modules[TMM_DECODENAPATECH].flags = TM_FLAG_DECODE_TM; |
68 | 33 | } |
69 | | |
70 | | TmEcode NoNapatechSupportExit(ThreadVars *tv, const void *initdata, void **data) |
71 | 0 | { |
72 | 0 | SCLogError("Error creating thread %s: you do not have support for Napatech adapter " |
73 | 0 | "enabled please recompile with --enable-napatech", |
74 | 0 | tv->name); |
75 | | exit(EXIT_FAILURE); |
76 | 0 | } |
77 | | |
78 | | #else /* Implied we do have NAPATECH support */ |
79 | | |
80 | | |
81 | | #include <numa.h> |
82 | | #include <nt.h> |
83 | | |
84 | | extern uint16_t max_pending_packets; |
85 | | |
86 | | typedef struct NapatechThreadVars_ |
87 | | { |
88 | | ThreadVars *tv; |
89 | | NtNetStreamRx_t rx_stream; |
90 | | uint16_t stream_id; |
91 | | int hba; |
92 | | TmSlot *slot; |
93 | | } NapatechThreadVars; |
94 | | |
95 | | #ifdef NAPATECH_ENABLE_BYPASS |
96 | | static int NapatechBypassCallback(Packet *p); |
97 | | #endif |
98 | | |
99 | | TmEcode NapatechStreamThreadInit(ThreadVars *, const void *, void **); |
100 | | void NapatechStreamThreadExitStats(ThreadVars *, void *); |
101 | | TmEcode NapatechPacketLoop(ThreadVars *tv, void *data, void *slot); |
102 | | |
103 | | TmEcode NapatechDecodeThreadInit(ThreadVars *, const void *, void **); |
104 | | TmEcode NapatechDecodeThreadDeinit(ThreadVars *tv, void *data); |
105 | | TmEcode NapatechDecode(ThreadVars *, Packet *, void *); |
106 | | |
107 | | /* These are used as the threads are exiting to get a comprehensive count of |
108 | | * all the packets received and dropped. |
109 | | */ |
110 | | SC_ATOMIC_DECLARE(uint64_t, total_packets); |
111 | | SC_ATOMIC_DECLARE(uint64_t, total_drops); |
112 | | SC_ATOMIC_DECLARE(uint16_t, total_tallied); |
113 | | |
114 | | /* Streams are counted as they are instantiated in order to know when all threads |
115 | | * are running*/ |
116 | | SC_ATOMIC_DECLARE(uint16_t, stream_count); |
117 | | |
118 | | typedef struct NapatechNumaDetect_ { |
119 | | SC_ATOMIC_DECLARE(uint16_t, count); |
120 | | } NapatechNumaDetect; |
121 | | |
122 | | NapatechNumaDetect *numa_detect = NULL; |
123 | | |
124 | | SC_ATOMIC_DECLARE(uint64_t, flow_callback_cnt); |
125 | | SC_ATOMIC_DECLARE(uint64_t, flow_callback_handled_pkts); |
126 | | SC_ATOMIC_DECLARE(uint64_t, flow_callback_udp_pkts); |
127 | | SC_ATOMIC_DECLARE(uint64_t, flow_callback_tcp_pkts); |
128 | | SC_ATOMIC_DECLARE(uint64_t, flow_callback_unhandled_pkts); |
129 | | |
130 | | /** |
131 | | * \brief Initialize the Napatech receiver (reader) module for globals. |
132 | | */ |
133 | | static TmEcode NapatechStreamInit(void) |
134 | | { |
135 | | int i; |
136 | | |
137 | | SC_ATOMIC_INIT(total_packets); |
138 | | SC_ATOMIC_INIT(total_drops); |
139 | | SC_ATOMIC_INIT(total_tallied); |
140 | | SC_ATOMIC_INIT(stream_count); |
141 | | |
142 | | numa_detect = SCMalloc(sizeof(*numa_detect) * (numa_max_node() + 1)); |
143 | | if (numa_detect == NULL) { |
144 | | FatalError("Failed to allocate memory for numa detection array: %s", strerror(errno)); |
145 | | } |
146 | | |
147 | | for (i = 0; i <= numa_max_node(); ++i) { |
148 | | SC_ATOMIC_INIT(numa_detect[i].count); |
149 | | } |
150 | | |
151 | | SC_ATOMIC_INIT(flow_callback_cnt); |
152 | | SC_ATOMIC_INIT(flow_callback_handled_pkts); |
153 | | SC_ATOMIC_INIT(flow_callback_udp_pkts); |
154 | | SC_ATOMIC_INIT(flow_callback_tcp_pkts); |
155 | | SC_ATOMIC_INIT(flow_callback_unhandled_pkts); |
156 | | |
157 | | return TM_ECODE_OK; |
158 | | } |
159 | | |
160 | | /** |
161 | | * \brief Deinitialize the Napatech receiver (reader) module for globals. |
162 | | */ |
163 | | static TmEcode NapatechStreamDeInit(void) |
164 | | { |
165 | | if (numa_detect != NULL) { |
166 | | SCFree(numa_detect); |
167 | | } |
168 | | |
169 | | return TM_ECODE_OK; |
170 | | } |
171 | | |
172 | | /** |
173 | | * \brief Register the Napatech receiver (reader) module. |
174 | | */ |
175 | | void TmModuleNapatechStreamRegister(void) |
176 | | { |
177 | | tmm_modules[TMM_RECEIVENAPATECH].name = "NapatechStream"; |
178 | | tmm_modules[TMM_RECEIVENAPATECH].ThreadInit = NapatechStreamThreadInit; |
179 | | tmm_modules[TMM_RECEIVENAPATECH].Func = NULL; |
180 | | tmm_modules[TMM_RECEIVENAPATECH].PktAcqLoop = NapatechPacketLoop; |
181 | | tmm_modules[TMM_RECEIVENAPATECH].PktAcqBreakLoop = NULL; |
182 | | tmm_modules[TMM_RECEIVENAPATECH].ThreadExitPrintStats = NapatechStreamThreadExitStats; |
183 | | tmm_modules[TMM_RECEIVENAPATECH].ThreadDeinit = NapatechStreamThreadDeinit; |
184 | | tmm_modules[TMM_RECEIVENAPATECH].cap_flags = SC_CAP_NET_RAW; |
185 | | tmm_modules[TMM_RECEIVENAPATECH].flags = TM_FLAG_RECEIVE_TM; |
186 | | tmm_modules[TMM_RECEIVENAPATECH].Init = NapatechStreamInit; |
187 | | tmm_modules[TMM_RECEIVENAPATECH].DeInit = NapatechStreamDeInit; |
188 | | } |
189 | | |
190 | | /** |
191 | | * \brief Register the Napatech decoder module. |
192 | | */ |
193 | | void TmModuleNapatechDecodeRegister(void) |
194 | | { |
195 | | tmm_modules[TMM_DECODENAPATECH].name = "NapatechDecode"; |
196 | | tmm_modules[TMM_DECODENAPATECH].ThreadInit = NapatechDecodeThreadInit; |
197 | | tmm_modules[TMM_DECODENAPATECH].Func = NapatechDecode; |
198 | | tmm_modules[TMM_DECODENAPATECH].ThreadExitPrintStats = NULL; |
199 | | tmm_modules[TMM_DECODENAPATECH].ThreadDeinit = NapatechDecodeThreadDeinit; |
200 | | tmm_modules[TMM_DECODENAPATECH].cap_flags = 0; |
201 | | tmm_modules[TMM_DECODENAPATECH].flags = TM_FLAG_DECODE_TM; |
202 | | } |
203 | | |
204 | | #ifdef NAPATECH_ENABLE_BYPASS |
205 | | /** |
206 | | * \brief template of IPv4 header |
207 | | */ |
208 | | struct ipv4_hdr |
209 | | { |
210 | | uint8_t version_ihl; /**< version and header length */ |
211 | | uint8_t type_of_service; /**< type of service */ |
212 | | uint16_t total_length; /**< length of packet */ |
213 | | uint16_t packet_id; /**< packet ID */ |
214 | | uint16_t fragment_offset; /**< fragmentation offset */ |
215 | | uint8_t time_to_live; /**< time to live */ |
216 | | uint8_t next_proto_id; /**< protocol ID */ |
217 | | uint16_t hdr_checksum; /**< header checksum */ |
218 | | uint32_t src_addr; /**< source address */ |
219 | | uint32_t dst_addr; /**< destination address */ |
220 | | } __attribute__ ((__packed__)); |
221 | | |
222 | | /** |
223 | | * \brief template of IPv6 header |
224 | | */ |
225 | | struct ipv6_hdr |
226 | | { |
227 | | uint32_t vtc_flow; /**< IP version, traffic class & flow label. */ |
228 | | uint16_t payload_len; /**< IP packet length - includes sizeof(ip_header). */ |
229 | | uint8_t proto; /**< Protocol, next header. */ |
230 | | uint8_t hop_limits; /**< Hop limits. */ |
231 | | uint8_t src_addr[16]; /**< IP address of source host. */ |
232 | | uint8_t dst_addr[16]; /**< IP address of destination host(s). */ |
233 | | } __attribute__ ((__packed__)); |
234 | | |
235 | | /** |
236 | | * \brief template of UDP header |
237 | | */ |
238 | | struct udp_hdr |
239 | | { |
240 | | uint16_t src_port; /**< UDP source port. */ |
241 | | uint16_t dst_port; /**< UDP destination port. */ |
242 | | uint16_t dgram_len; /**< UDP datagram length */ |
243 | | uint16_t dgram_cksum; /**< UDP datagram checksum */ |
244 | | } __attribute__ ((__packed__)); |
245 | | |
246 | | /** |
247 | | * \brief template of TCP header |
248 | | */ |
249 | | struct tcp_hdr |
250 | | { |
251 | | uint16_t src_port; /**< TCP source port. */ |
252 | | uint16_t dst_port; /**< TCP destination port. */ |
253 | | uint32_t sent_seq; /**< TX data sequence number. */ |
254 | | uint32_t recv_ack; /**< RX data acknowledgement sequence number. */ |
255 | | uint8_t data_off; /**< Data offset. */ |
256 | | uint8_t tcp_flags; /**< TCP flags */ |
257 | | uint16_t rx_win; /**< RX flow control window. */ |
258 | | uint16_t cksum; /**< TCP checksum. */ |
259 | | uint16_t tcp_urp; /**< TCP urgent pointer, if any. */ |
260 | | } __attribute__ ((__packed__)); |
261 | | |
262 | | |
263 | | /* The hardware will assign a "color" value indicating what filters are matched |
264 | | * by a given packet. These constants indicate what bits are set in the color |
265 | | * field for different protocols |
266 | | * |
267 | | */ |
268 | | #define RTE_PTYPE_L2_ETHER 0x10000000 |
269 | | #define RTE_PTYPE_L3_IPV4 0x01000000 |
270 | | #define RTE_PTYPE_L3_IPV6 0x04000000 |
271 | | #define RTE_PTYPE_L4_TCP 0x00100000 |
272 | | #define RTE_PTYPE_L4_UDP 0x00200000 |
273 | | |
274 | | /* These masks are used to extract layer 3 and layer 4 protocol |
275 | | * values from the color field in the packet descriptor. |
276 | | */ |
277 | | #define RTE_PTYPE_L3_MASK 0x0f000000 |
278 | | #define RTE_PTYPE_L4_MASK 0x00f00000 |
279 | | |
280 | | #define COLOR_IS_SPAN 0x00001000 |
281 | | |
282 | | static int is_inline = 0; |
283 | | static int inline_port_map[MAX_PORTS] = { -1 }; |
284 | | |
285 | | /** |
286 | | * \brief Binds two ports together for inline operation. |
287 | | * |
288 | | * Get the ID of an adapter on which a given port resides. |
289 | | * |
290 | | * \param port one of the ports in a pairing. |
291 | | * \param peer the other port in a pairing. |
292 | | * \return ID of the adapter. |
293 | | * |
294 | | */ |
295 | | int NapatechSetPortmap(int port, int peer) |
296 | | { |
297 | | if ((inline_port_map[port] == -1) && (inline_port_map[peer] == -1)) { |
298 | | inline_port_map[port] = peer; |
299 | | inline_port_map[peer] = port; |
300 | | } else { |
301 | | SCLogError("Port pairing is already configured."); |
302 | | return 0; |
303 | | } |
304 | | return 1; |
305 | | } |
306 | | |
307 | | /** |
308 | | * \brief Returns the ID of the adapter |
309 | | * |
310 | | * Get the ID of an adapter on which a given port resides. |
311 | | * |
312 | | * \param port for which adapter ID is requested. |
313 | | * \return ID of the adapter. |
314 | | * |
315 | | */ |
316 | | int NapatechGetAdapter(uint8_t port) |
317 | | { |
318 | | static int port_adapter_map[MAX_PORTS] = { -1 }; |
319 | | int status; |
320 | | NtInfo_t h_info; /* Info handle */ |
321 | | NtInfoStream_t h_info_stream; /* Info stream handle */ |
322 | | |
323 | | if (unlikely(port_adapter_map[port] == -1)) { |
324 | | if ((status = NT_InfoOpen(&h_info_stream, "ExampleInfo")) != NT_SUCCESS) { |
325 | | NAPATECH_ERROR(status); |
326 | | return -1; |
327 | | } |
328 | | /* Read the system info */ |
329 | | h_info.cmd = NT_INFO_CMD_READ_PORT_V9; |
330 | | h_info.u.port_v9.portNo = (uint8_t) port; |
331 | | if ((status = NT_InfoRead(h_info_stream, &h_info)) != NT_SUCCESS) { |
332 | | /* Get the status code as text */ |
333 | | NAPATECH_ERROR(status); |
334 | | NT_InfoClose(h_info_stream); |
335 | | return -1; |
336 | | } |
337 | | port_adapter_map[port] = h_info.u.port_v9.data.adapterNo; |
338 | | } |
339 | | return port_adapter_map[port]; |
340 | | } |
341 | | |
342 | | /** |
343 | | * \brief IPv4 4-tuple convenience structure |
344 | | */ |
345 | | struct IPv4Tuple4 |
346 | | { |
347 | | uint32_t sa; /*!< Source address */ |
348 | | uint32_t da; /*!< Destination address */ |
349 | | uint16_t sp; /*!< Source port */ |
350 | | uint16_t dp; /*!< Destination port */ |
351 | | }; |
352 | | |
353 | | /** |
354 | | * \brief IPv6 4-tuple convenience structure |
355 | | */ |
356 | | struct IPv6Tuple4 |
357 | | { |
358 | | uint8_t sa[16]; /*!< Source address */ |
359 | | uint8_t da[16]; /*!< Destination address */ |
360 | | uint16_t sp; /*!< Source port */ |
361 | | uint16_t dp; /*!< Destination port */ |
362 | | }; |
363 | | |
364 | | /** |
365 | | * \brief Compares the byte order value of two IPv6 addresses. |
366 | | * |
367 | | * |
368 | | * \param addr_a The first address to compare |
369 | | * \param addr_b The second address to compare |
370 | | * |
371 | | * \return -1 if addr_a < addr_b |
372 | | * 1 if addr_a > addr_b |
373 | | * 0 if addr_a == addr_b |
374 | | */ |
375 | | static int CompareIPv6Addr(uint8_t addr_a[16], uint8_t addr_b[16]) { |
376 | | uint16_t pos; |
377 | | for (pos = 0; pos < 16; ++pos) { |
378 | | if (addr_a[pos] < addr_b[pos]) { |
379 | | return -1; |
380 | | } else if (addr_a[pos] > addr_b[pos]) { |
381 | | return 1; |
382 | | } /* else they are equal - check next position*/ |
383 | | } |
384 | | |
385 | | /* if we get here the addresses are equal */ |
386 | | return 0; |
387 | | } |
388 | | |
389 | | /** |
390 | | * \brief Initializes the FlowStreams used to program flow data. |
391 | | * |
392 | | * Opens a FlowStream on the adapter associated with the rx port. This |
393 | | * FlowStream is subsequently used to program the adapter with |
394 | | * flows to bypass. |
395 | | * |
396 | | * \return the flow stream handle, NULL if failure. |
397 | | */ |
398 | | static NtFlowStream_t InitFlowStream(int adapter, int stream_id) |
399 | | { |
400 | | int status; |
401 | | NtFlowStream_t hFlowStream; |
402 | | |
403 | | NtFlowAttr_t attr; |
404 | | char flow_name[80]; |
405 | | |
406 | | NT_FlowOpenAttrInit(&attr); |
407 | | NT_FlowOpenAttrSetAdapterNo(&attr, adapter); |
408 | | |
409 | | snprintf(flow_name, sizeof(flow_name), "Flow_stream_%d", stream_id ); |
410 | | SCLogDebug("Opening flow programming stream: %s", flow_name); |
411 | | if ((status = NT_FlowOpen_Attr(&hFlowStream, flow_name, &attr)) != NT_SUCCESS) { |
412 | | SCLogWarning("Napatech bypass functionality not supported by the FPGA version on adapter " |
413 | | "%d - disabling support.", |
414 | | adapter); |
415 | | return NULL; |
416 | | } |
417 | | return hFlowStream; |
418 | | } |
419 | | |
420 | | /** |
421 | | * \brief Callback function to process Bypass events on Napatech Adapter. |
422 | | * |
423 | | * Callback function that sets up the Flow tables on the Napatech card |
424 | | * so that subsequent packets from this flow are bypassed on the hardware. |
425 | | * |
426 | | * \param p packet containing information about the flow to be bypassed |
427 | | * \param is_inline indicates if Suricata is being run in inline mode. |
428 | | * |
429 | | * \return Error code indicating success (1) or failure (0). |
430 | | * |
431 | | */ |
432 | | static int ProgramFlow(Packet *p, int inline_mode) |
433 | | { |
434 | | NtFlow_t flow_match; |
435 | | memset(&flow_match, 0, sizeof(flow_match)); |
436 | | |
437 | | NapatechPacketVars *ntpv = &(p->ntpv); |
438 | | |
439 | | /* |
440 | | * The hardware decoder will "color" the packets according to the protocols |
441 | | * in the packet and the port the packet arrived on. packet_type gets |
442 | | * these bits and we mask out layer3, layer4, and is_span to determine |
443 | | * the protocols and if the packet is coming in from a SPAN port. |
444 | | */ |
445 | | uint32_t packet_type = ((ntpv->dyn3->color_hi << 14) & 0xFFFFC000) | ntpv->dyn3->color_lo; |
446 | | uint8_t *packet = (uint8_t *) ntpv->dyn3 + ntpv->dyn3->descrLength; |
447 | | |
448 | | uint32_t layer3 = packet_type & RTE_PTYPE_L3_MASK; |
449 | | uint32_t layer4 = packet_type & RTE_PTYPE_L4_MASK; |
450 | | uint32_t is_span = packet_type & COLOR_IS_SPAN; |
451 | | |
452 | | /* |
453 | | * When we're programming the flows to arrive on a span port, |
454 | | * where upstream and downstream packets arrive on the same port, |
455 | | * the hardware is configured to swap the source and dest |
456 | | * fields if the src addr > dest addr. We need to program the |
457 | | * flow tables to match. We'll compare addresses and set |
458 | | * do_swap accordingly. |
459 | | */ |
460 | | |
461 | | uint32_t do_swap = 0; |
462 | | |
463 | | SC_ATOMIC_ADD(flow_callback_cnt, 1); |
464 | | |
465 | | /* Only bypass TCP and UDP */ |
466 | | if (PKT_IS_TCP(p)) { |
467 | | SC_ATOMIC_ADD(flow_callback_tcp_pkts, 1); |
468 | | } else if PKT_IS_UDP(p) { |
469 | | SC_ATOMIC_ADD(flow_callback_udp_pkts, 1); |
470 | | } else { |
471 | | SC_ATOMIC_ADD(flow_callback_unhandled_pkts, 1); |
472 | | } |
473 | | |
474 | | struct IPv4Tuple4 v4Tuple; |
475 | | struct IPv6Tuple4 v6Tuple; |
476 | | struct ipv4_hdr *pIPv4_hdr = NULL; |
477 | | struct ipv6_hdr *pIPv6_hdr = NULL; |
478 | | |
479 | | switch (layer3) { |
480 | | case RTE_PTYPE_L3_IPV4: |
481 | | { |
482 | | pIPv4_hdr = (struct ipv4_hdr *) (packet + ntpv->dyn3->offset0); |
483 | | if (!is_span) { |
484 | | v4Tuple.sa = pIPv4_hdr->src_addr; |
485 | | v4Tuple.da = pIPv4_hdr->dst_addr; |
486 | | } else { |
487 | | do_swap = (htonl(pIPv4_hdr->src_addr) > htonl(pIPv4_hdr->dst_addr)); |
488 | | if (!do_swap) { |
489 | | /* already in order */ |
490 | | v4Tuple.sa = pIPv4_hdr->src_addr; |
491 | | v4Tuple.da = pIPv4_hdr->dst_addr; |
492 | | } else { /* swap */ |
493 | | v4Tuple.sa = pIPv4_hdr->dst_addr; |
494 | | v4Tuple.da = pIPv4_hdr->src_addr; |
495 | | } |
496 | | } |
497 | | break; |
498 | | } |
499 | | case RTE_PTYPE_L3_IPV6: |
500 | | { |
501 | | pIPv6_hdr = (struct ipv6_hdr *) (packet + ntpv->dyn3->offset0); |
502 | | do_swap = (CompareIPv6Addr(pIPv6_hdr->src_addr, pIPv6_hdr->dst_addr) > 0); |
503 | | |
504 | | if (!is_span) { |
505 | | memcpy(&(v6Tuple.sa), pIPv6_hdr->src_addr, 16); |
506 | | memcpy(&(v6Tuple.da), pIPv6_hdr->dst_addr, 16); |
507 | | } else { |
508 | | /* sort src/dest address before programming */ |
509 | | if (!do_swap) { |
510 | | /* already in order */ |
511 | | memcpy(&(v6Tuple.sa), pIPv6_hdr->src_addr, 16); |
512 | | memcpy(&(v6Tuple.da), pIPv6_hdr->dst_addr, 16); |
513 | | } else { /* swap the addresses */ |
514 | | memcpy(&(v6Tuple.sa), pIPv6_hdr->dst_addr, 16); |
515 | | memcpy(&(v6Tuple.da), pIPv6_hdr->src_addr, 16); |
516 | | } |
517 | | } |
518 | | break; |
519 | | } |
520 | | default: |
521 | | { |
522 | | return 0; |
523 | | } |
524 | | } |
525 | | |
526 | | switch (layer4) { |
527 | | case RTE_PTYPE_L4_TCP: |
528 | | { |
529 | | struct tcp_hdr *tcp_hdr = (struct tcp_hdr *) (packet + ntpv->dyn3->offset1); |
530 | | if (layer3 == RTE_PTYPE_L3_IPV4) { |
531 | | if (!is_span) { |
532 | | v4Tuple.dp = tcp_hdr->dst_port; |
533 | | v4Tuple.sp = tcp_hdr->src_port; |
534 | | flow_match.keyId = NAPATECH_KEYTYPE_IPV4; |
535 | | } else { |
536 | | if (!do_swap) { |
537 | | v4Tuple.sp = tcp_hdr->src_port; |
538 | | v4Tuple.dp = tcp_hdr->dst_port; |
539 | | } else { |
540 | | v4Tuple.sp = tcp_hdr->dst_port; |
541 | | v4Tuple.dp = tcp_hdr->src_port; |
542 | | } |
543 | | flow_match.keyId = NAPATECH_KEYTYPE_IPV4_SPAN; |
544 | | } |
545 | | memcpy(&(flow_match.keyData), &v4Tuple, sizeof(v4Tuple)); |
546 | | } else { |
547 | | if (!is_span) { |
548 | | v6Tuple.dp = tcp_hdr->dst_port; |
549 | | v6Tuple.sp = tcp_hdr->src_port; |
550 | | flow_match.keyId = NAPATECH_KEYTYPE_IPV6; |
551 | | } else { |
552 | | if (!do_swap) { |
553 | | v6Tuple.sp = tcp_hdr->src_port; |
554 | | v6Tuple.dp = tcp_hdr->dst_port; |
555 | | } else { |
556 | | v6Tuple.dp = tcp_hdr->src_port; |
557 | | v6Tuple.sp = tcp_hdr->dst_port; |
558 | | } |
559 | | flow_match.keyId = NAPATECH_KEYTYPE_IPV6_SPAN; |
560 | | } |
561 | | memcpy(&(flow_match.keyData), &v6Tuple, sizeof(v6Tuple)); |
562 | | } |
563 | | flow_match.ipProtocolField = 6; |
564 | | break; |
565 | | } |
566 | | case RTE_PTYPE_L4_UDP: |
567 | | { |
568 | | struct udp_hdr *udp_hdr = (struct udp_hdr *) (packet + ntpv->dyn3->offset1); |
569 | | if (layer3 == RTE_PTYPE_L3_IPV4) { |
570 | | if (!is_span) { |
571 | | v4Tuple.dp = udp_hdr->dst_port; |
572 | | v4Tuple.sp = udp_hdr->src_port; |
573 | | flow_match.keyId = NAPATECH_KEYTYPE_IPV4; |
574 | | } else { |
575 | | if (!do_swap) { |
576 | | v4Tuple.sp = udp_hdr->src_port; |
577 | | v4Tuple.dp = udp_hdr->dst_port; |
578 | | } else { |
579 | | v4Tuple.dp = udp_hdr->src_port; |
580 | | v4Tuple.sp = udp_hdr->dst_port; |
581 | | } |
582 | | flow_match.keyId = NAPATECH_KEYTYPE_IPV4_SPAN; |
583 | | } |
584 | | memcpy(&(flow_match.keyData), &v4Tuple, sizeof(v4Tuple)); |
585 | | } else { /* layer3 is IPV6 */ |
586 | | if (!is_span) { |
587 | | v6Tuple.dp = udp_hdr->dst_port; |
588 | | v6Tuple.sp = udp_hdr->src_port; |
589 | | flow_match.keyId = NAPATECH_KEYTYPE_IPV6; |
590 | | } else { |
591 | | if (!do_swap) { |
592 | | v6Tuple.sp = udp_hdr->src_port; |
593 | | v6Tuple.dp = udp_hdr->dst_port; |
594 | | } else { |
595 | | v6Tuple.dp = udp_hdr->src_port; |
596 | | v6Tuple.sp = udp_hdr->dst_port; |
597 | | } |
598 | | flow_match.keyId = NAPATECH_KEYTYPE_IPV6_SPAN; |
599 | | } |
600 | | memcpy(&(flow_match.keyData), &v6Tuple, sizeof(v6Tuple)); |
601 | | } |
602 | | flow_match.ipProtocolField = 17; |
603 | | break; |
604 | | } |
605 | | default: |
606 | | { |
607 | | return 0; |
608 | | } |
609 | | } |
610 | | |
611 | | flow_match.op = 1; /* program flow */ |
612 | | flow_match.gfi = 1; /* Generate FlowInfo records */ |
613 | | flow_match.tau = 1; /* tcp automatic unlearn */ |
614 | | |
615 | | if (PacketCheckAction(p, ACTION_DROP)) { |
616 | | flow_match.keySetId = NAPATECH_FLOWTYPE_DROP; |
617 | | } else { |
618 | | if (inline_mode) { |
619 | | flow_match.keySetId = NAPATECH_FLOWTYPE_PASS; |
620 | | } else { |
621 | | flow_match.keySetId = NAPATECH_FLOWTYPE_DROP; |
622 | | } |
623 | | } |
624 | | |
625 | | if (NT_FlowWrite(ntpv->flow_stream, &flow_match, -1) != NT_SUCCESS) { |
626 | | if (!(suricata_ctl_flags & SURICATA_STOP)) { |
627 | | SCLogError("NT_FlowWrite failed!."); |
628 | | exit(EXIT_FAILURE); |
629 | | } |
630 | | } |
631 | | |
632 | | return 1; |
633 | | } |
634 | | |
635 | | /** |
636 | | * \brief Callback from Suricata when a flow that should be bypassed |
637 | | * is identified. |
638 | | */ |
639 | | |
640 | | static int NapatechBypassCallback(Packet *p) |
641 | | { |
642 | | NapatechPacketVars *ntpv = &(p->ntpv); |
643 | | |
644 | | /* |
645 | | * Since, at this point, we don't know what action to take, |
646 | | * simply mark this packet as one that should be |
647 | | * bypassed when the packet is returned by suricata with a |
648 | | * pass/drop verdict. |
649 | | */ |
650 | | ntpv->bypass = 1; |
651 | | |
652 | | return 1; |
653 | | } |
654 | | |
655 | | #endif |
656 | | |
657 | | /** |
658 | | * \brief Initialize the Napatech receiver thread, generate a single |
659 | | * NapatechThreadVar structure for each thread, this will |
660 | | * contain a NtNetStreamRx_t stream handle which is used when the |
661 | | * thread executes to acquire the packets. |
662 | | * |
663 | | * \param tv Thread variable to ThreadVars |
664 | | * \param initdata Initial data to the adapter passed from the user, |
665 | | * this is processed by the user. |
666 | | * |
667 | | * For now, we assume that we have only a single name for the NAPATECH |
668 | | * adapter. |
669 | | * |
670 | | * \param data data pointer gets populated with |
671 | | * |
672 | | */ |
673 | | TmEcode NapatechStreamThreadInit(ThreadVars *tv, const void *initdata, void **data) |
674 | | { |
675 | | SCEnter(); |
676 | | struct NapatechStreamDevConf *conf = (struct NapatechStreamDevConf *) initdata; |
677 | | uint16_t stream_id = conf->stream_id; |
678 | | *data = NULL; |
679 | | |
680 | | NapatechThreadVars *ntv = SCCalloc(1, sizeof (NapatechThreadVars)); |
681 | | if (unlikely(ntv == NULL)) { |
682 | | FatalError("Failed to allocate memory for NAPATECH thread vars."); |
683 | | } |
684 | | |
685 | | memset(ntv, 0, sizeof (NapatechThreadVars)); |
686 | | ntv->stream_id = stream_id; |
687 | | ntv->tv = tv; |
688 | | ntv->hba = conf->hba; |
689 | | |
690 | | DatalinkSetGlobalType(LINKTYPE_ETHERNET); |
691 | | |
692 | | SCLogDebug("Started processing packets from NAPATECH Stream: %u", ntv->stream_id); |
693 | | |
694 | | *data = (void *) ntv; |
695 | | SCReturnInt(TM_ECODE_OK); |
696 | | } |
697 | | |
698 | | /** |
699 | | * \brief Callback to indicate that the packet buffer can be returned to the hardware. |
700 | | * |
701 | | * Called when Suricata is done processing the packet. Before the packet is released |
702 | | * this also checks the action to see if the packet should be dropped and programs the |
703 | | * flow hardware if the flow is to be bypassed and the Napatech packet buffer is released. |
704 | | * |
705 | | * |
706 | | * \param p Packet to return to the system. |
707 | | * |
708 | | */ |
709 | | static void NapatechReleasePacket(struct Packet_ *p) |
710 | | { |
711 | | /* |
712 | | * If the packet is to be dropped we need to set the wirelength |
713 | | * before releasing the Napatech buffer back to NTService. |
714 | | */ |
715 | | #ifdef NAPATECH_ENABLE_BYPASS |
716 | | if (is_inline && PacketCheckAction(p, ACTION_DROP)) { |
717 | | p->ntpv.dyn3->wireLength = 0; |
718 | | } |
719 | | |
720 | | /* |
721 | | * If this flow is to be programmed for hardware bypass we do it now. This is done |
722 | | * here because the action is not available in the packet structure at the time of the |
723 | | * bypass callback and it needs to be done before we release the packet structure. |
724 | | */ |
725 | | if (p->ntpv.bypass == 1) { |
726 | | ProgramFlow(p, is_inline); |
727 | | } |
728 | | #endif |
729 | | |
730 | | NT_NetRxRelease(p->ntpv.rx_stream, p->ntpv.nt_packet_buf); |
731 | | PacketFreeOrRelease(p); |
732 | | } |
733 | | |
734 | | /** |
735 | | * \brief Returns the NUMA node associated with the currently running thread. |
736 | | * |
737 | | * \return ID of the NUMA node. |
738 | | * |
739 | | */ |
740 | | static int GetNumaNode(void) |
741 | | { |
742 | | int cpu = 0; |
743 | | int node = 0; |
744 | | |
745 | | #if defined(__linux__) |
746 | | cpu = sched_getcpu(); |
747 | | node = numa_node_of_cpu(cpu); |
748 | | #else |
749 | | SCLogWarning("Auto configuration of NUMA node is not supported on this OS."); |
750 | | #endif |
751 | | |
752 | | return node; |
753 | | } |
754 | | |
755 | | /** |
756 | | * \brief Outputs hints on the optimal host-buffer configuration to aid tuning. |
757 | | * |
758 | | * \param log_level of the currently running instance. |
759 | | * |
760 | | */ |
761 | | static void RecommendNUMAConfig(void) |
762 | | { |
763 | | char *buffer, *p; |
764 | | int set_cpu_affinity = 0; |
765 | | |
766 | | p = buffer = SCCalloc(sizeof(char), (32 * (numa_max_node() + 1) + 1)); |
767 | | if (buffer == NULL) { |
768 | | FatalError("Failed to allocate memory for temporary buffer: %s", strerror(errno)); |
769 | | } |
770 | | |
771 | | if (ConfGetBool("threading.set-cpu-affinity", &set_cpu_affinity) != 1) { |
772 | | set_cpu_affinity = 0; |
773 | | } |
774 | | |
775 | | if (set_cpu_affinity) { |
776 | | SCLogPerf("Minimum host buffers that should be defined in ntservice.ini:"); |
777 | | for (int i = 0; i <= numa_max_node(); ++i) { |
778 | | SCLogPerf(" NUMA Node %d: %d", i, SC_ATOMIC_GET(numa_detect[i].count)); |
779 | | p += snprintf(p, 32, "%s[%d, 16, %d]", (i == 0 ? "" : ","), |
780 | | SC_ATOMIC_GET(numa_detect[i].count), i); |
781 | | } |
782 | | SCLogPerf("E.g.: HostBuffersRx=%s", buffer); |
783 | | } |
784 | | |
785 | | SCFree(buffer); |
786 | | } |
787 | | |
788 | | /** |
789 | | * \brief Main Napatechpacket processing loop |
790 | | * |
791 | | * \param tv Thread variable to ThreadVars |
792 | | * \param data Pointer to NapatechThreadVars with data specific to Napatech |
793 | | * \param slot TMSlot where this instance is running. |
794 | | * |
795 | | */ |
796 | | TmEcode NapatechPacketLoop(ThreadVars *tv, void *data, void *slot) |
797 | | { |
798 | | int32_t status; |
799 | | char error_buffer[100]; |
800 | | uint64_t pkt_ts; |
801 | | NtNetBuf_t packet_buffer; |
802 | | NapatechThreadVars *ntv = (NapatechThreadVars *) data; |
803 | | uint64_t hba_pkt_drops = 0; |
804 | | uint64_t hba_byte_drops = 0; |
805 | | uint16_t hba_pkt = 0; |
806 | | int numa_node = -1; |
807 | | int set_cpu_affinity = 0; |
808 | | int closer = 0; |
809 | | int is_autoconfig = 0; |
810 | | |
811 | | /* This just keeps the startup output more orderly. */ |
812 | | usleep(200000 * ntv->stream_id); |
813 | | |
814 | | #ifdef NAPATECH_ENABLE_BYPASS |
815 | | NtFlowStream_t flow_stream[MAX_ADAPTERS] = { 0 }; |
816 | | if (NapatechUseHWBypass()) { |
817 | | /* Get a FlowStream handle for each adapter so we can efficiently find the |
818 | | * correct handle corresponding to the port on which a packet is received. |
819 | | */ |
820 | | int adapter = 0; |
821 | | for (adapter = 0; adapter < NapatechGetNumAdapters(); ++adapter) { |
822 | | flow_stream[adapter] = InitFlowStream(adapter, ntv->stream_id); |
823 | | } |
824 | | } |
825 | | #endif |
826 | | |
827 | | if (ConfGetBool("napatech.auto-config", &is_autoconfig) == 0) { |
828 | | is_autoconfig = 0; |
829 | | } |
830 | | |
831 | | if (is_autoconfig) { |
832 | | numa_node = GetNumaNode(); |
833 | | |
834 | | if (numa_node <= numa_max_node()) { |
835 | | SC_ATOMIC_ADD(numa_detect[numa_node].count, 1); |
836 | | } |
837 | | |
838 | | if (ConfGetBool("threading.set-cpu-affinity", &set_cpu_affinity) != 1) { |
839 | | set_cpu_affinity = 0; |
840 | | } |
841 | | |
842 | | if (set_cpu_affinity) { |
843 | | NapatechSetupNuma(ntv->stream_id, numa_node); |
844 | | } |
845 | | |
846 | | SC_ATOMIC_ADD(stream_count, 1); |
847 | | if (SC_ATOMIC_GET(stream_count) == NapatechGetNumConfiguredStreams()) { |
848 | | /* Print the recommended NUMA configuration early because it |
849 | | * can fail with "No available hostbuffers" in NapatechSetupTraffic */ |
850 | | RecommendNUMAConfig(); |
851 | | |
852 | | #ifdef NAPATECH_ENABLE_BYPASS |
853 | | if (ConfGetBool("napatech.inline", &is_inline) == 0) { |
854 | | is_inline = 0; |
855 | | } |
856 | | |
857 | | /* Initialize the port map before we setup traffic filters */ |
858 | | for (int i = 0; i < MAX_PORTS; ++i) { |
859 | | inline_port_map[i] = -1; |
860 | | } |
861 | | #endif |
862 | | /* The last thread to run sets up and deletes the streams */ |
863 | | status = NapatechSetupTraffic(NapatechGetNumFirstStream(), |
864 | | NapatechGetNumLastStream()); |
865 | | |
866 | | closer = 1; |
867 | | |
868 | | if (status == 0x20002061) { |
869 | | FatalError("Check host buffer configuration in ntservice.ini" |
870 | | " or try running /opt/napatech3/bin/ntpl -e " |
871 | | "\"delete=all\" to clean-up stream NUMA config."); |
872 | | } else if (status == 0x20000008) { |
873 | | FatalError("Check napatech.ports in the suricata config file."); |
874 | | } |
875 | | SCLogNotice("Napatech packet input engine started."); |
876 | | } |
877 | | } // is_autoconfig |
878 | | |
879 | | SCLogInfo( |
880 | | "Napatech Packet Loop Started - cpu: %3d, cpu_numa: %3d stream: %3u ", |
881 | | sched_getcpu(), numa_node, ntv->stream_id); |
882 | | |
883 | | if (ntv->hba > 0) { |
884 | | char *s_hbad_pkt = SCCalloc(1, 32); |
885 | | if (unlikely(s_hbad_pkt == NULL)) { |
886 | | FatalError("Failed to allocate memory for NAPATECH stream counter."); |
887 | | } |
888 | | snprintf(s_hbad_pkt, 32, "nt%d.hba_drop", ntv->stream_id); |
889 | | hba_pkt = StatsRegisterCounter(s_hbad_pkt, tv); |
890 | | StatsSetupPrivate(tv); |
891 | | StatsSetUI64(tv, hba_pkt, 0); |
892 | | } |
893 | | SCLogDebug("Opening NAPATECH Stream: %u for processing", ntv->stream_id); |
894 | | |
895 | | if ((status = NT_NetRxOpen(&(ntv->rx_stream), "SuricataStream", |
896 | | NT_NET_INTERFACE_PACKET, ntv->stream_id, ntv->hba)) != NT_SUCCESS) { |
897 | | |
898 | | NAPATECH_ERROR(status); |
899 | | SCFree(ntv); |
900 | | SCReturnInt(TM_ECODE_FAILED); |
901 | | } |
902 | | TmSlot *s = (TmSlot *) slot; |
903 | | ntv->slot = s->slot_next; |
904 | | |
905 | | // Indicate that the thread is actually running its application level code (i.e., it can poll |
906 | | // packets) |
907 | | TmThreadsSetFlag(tv, THV_RUNNING); |
908 | | |
909 | | while (!(suricata_ctl_flags & SURICATA_STOP)) { |
910 | | /* make sure we have at least one packet in the packet pool, to prevent |
911 | | * us from alloc'ing packets at line rate */ |
912 | | PacketPoolWait(); |
913 | | |
914 | | /* Napatech returns packets 1 at a time */ |
915 | | status = NT_NetRxGet(ntv->rx_stream, &packet_buffer, 1000); |
916 | | if (unlikely( |
917 | | status == NT_STATUS_TIMEOUT || status == NT_STATUS_TRYAGAIN)) { |
918 | | if (status == NT_STATUS_TIMEOUT) { |
919 | | TmThreadsCaptureHandleTimeout(tv, NULL); |
920 | | } |
921 | | continue; |
922 | | } else if (unlikely(status != NT_SUCCESS)) { |
923 | | NAPATECH_ERROR(status); |
924 | | SCLogInfo("Failed to read from Napatech Stream %d: %s", |
925 | | ntv->stream_id, error_buffer); |
926 | | break; |
927 | | } |
928 | | |
929 | | Packet *p = PacketGetFromQueueOrAlloc(); |
930 | | if (unlikely(p == NULL)) { |
931 | | NT_NetRxRelease(ntv->rx_stream, packet_buffer); |
932 | | SCReturnInt(TM_ECODE_FAILED); |
933 | | } |
934 | | |
935 | | #ifdef NAPATECH_ENABLE_BYPASS |
936 | | p->ntpv.bypass = 0; |
937 | | #endif |
938 | | p->ntpv.rx_stream = ntv->rx_stream; |
939 | | |
940 | | pkt_ts = NT_NET_GET_PKT_TIMESTAMP(packet_buffer); |
941 | | |
942 | | /* |
943 | | * Handle the different timestamp forms that the napatech cards could use |
944 | | * - NT_TIMESTAMP_TYPE_NATIVE is not supported due to having an base |
945 | | * of 0 as opposed to NATIVE_UNIX which has a base of 1/1/1970 |
946 | | */ |
947 | | switch (NT_NET_GET_PKT_TIMESTAMP_TYPE(packet_buffer)) { |
948 | | case NT_TIMESTAMP_TYPE_NATIVE_UNIX: |
949 | | p->ts = SCTIME_ADD_USECS(SCTIME_FROM_SECS(pkt_ts / 100000000), |
950 | | ((pkt_ts % 100000000) / 100) + ((pkt_ts % 100) > 50 ? 1 : 0)); |
951 | | break; |
952 | | case NT_TIMESTAMP_TYPE_PCAP: |
953 | | p->ts = SCTIME_ADD_USECS(SCTIME_FROM_SECS(pkt_ts >> 32), pkt_ts & 0xFFFFFFFF); |
954 | | break; |
955 | | case NT_TIMESTAMP_TYPE_PCAP_NANOTIME: |
956 | | p->ts = SCTIME_ADD_USECS(SCTIME_FROM_SECS(pkt_ts >> 32), |
957 | | ((pkt_ts & 0xFFFFFFFF) / 1000) + ((pkt_ts % 1000) > 500 ? 1 : 0)); |
958 | | break; |
959 | | case NT_TIMESTAMP_TYPE_NATIVE_NDIS: |
960 | | /* number of seconds between 1/1/1601 and 1/1/1970 */ |
961 | | p->ts = SCTIME_ADD_USECS(SCTIME_FROM_SECS((pkt_ts / 100000000) - 11644473600), |
962 | | ((pkt_ts % 100000000) / 100) + ((pkt_ts % 100) > 50 ? 1 : 0)); |
963 | | break; |
964 | | default: |
965 | | SCLogError("Packet from Napatech Stream: %u does not have a supported timestamp " |
966 | | "format", |
967 | | ntv->stream_id); |
968 | | NT_NetRxRelease(ntv->rx_stream, packet_buffer); |
969 | | SCReturnInt(TM_ECODE_FAILED); |
970 | | } |
971 | | |
972 | | if (unlikely(ntv->hba > 0)) { |
973 | | NtNetRx_t stat_cmd; |
974 | | stat_cmd.cmd = NT_NETRX_READ_CMD_STREAM_DROP; |
975 | | /* Update drop counter */ |
976 | | if (unlikely((status = NT_NetRxRead(ntv->rx_stream, &stat_cmd)) != NT_SUCCESS)) { |
977 | | NAPATECH_ERROR(status); |
978 | | SCLogInfo("Couldn't retrieve drop statistics from the RX stream: %u", |
979 | | ntv->stream_id); |
980 | | } else { |
981 | | hba_pkt_drops = stat_cmd.u.streamDrop.pktsDropped; |
982 | | |
983 | | StatsSetUI64(tv, hba_pkt, hba_pkt_drops); |
984 | | } |
985 | | StatsSyncCountersIfSignalled(tv); |
986 | | } |
987 | | |
988 | | #ifdef NAPATECH_ENABLE_BYPASS |
989 | | p->ntpv.dyn3 = _NT_NET_GET_PKT_DESCR_PTR_DYN3(packet_buffer); |
990 | | p->BypassPacketsFlow = (NapatechIsBypassSupported() ? NapatechBypassCallback : NULL); |
991 | | NT_NET_SET_PKT_TXPORT(packet_buffer, inline_port_map[p->ntpv.dyn3->rxPort]); |
992 | | p->ntpv.flow_stream = flow_stream[NapatechGetAdapter(p->ntpv.dyn3->rxPort)]; |
993 | | |
994 | | #endif |
995 | | |
996 | | p->ReleasePacket = NapatechReleasePacket; |
997 | | p->ntpv.nt_packet_buf = packet_buffer; |
998 | | p->ntpv.stream_id = ntv->stream_id; |
999 | | p->datalink = LINKTYPE_ETHERNET; |
1000 | | |
1001 | | if (unlikely(PacketSetData(p, (uint8_t *)NT_NET_GET_PKT_L2_PTR(packet_buffer), NT_NET_GET_PKT_WIRE_LENGTH(packet_buffer)))) { |
1002 | | TmqhOutputPacketpool(ntv->tv, p); |
1003 | | SCReturnInt(TM_ECODE_FAILED); |
1004 | | } |
1005 | | |
1006 | | if (unlikely(TmThreadsSlotProcessPkt(ntv->tv, ntv->slot, p) != TM_ECODE_OK)) { |
1007 | | SCReturnInt(TM_ECODE_FAILED); |
1008 | | } |
1009 | | |
1010 | | /* |
1011 | | * At this point the packet and the Napatech Packet Buffer have been returned |
1012 | | * to the system in the NapatechReleasePacket() Callback. |
1013 | | */ |
1014 | | |
1015 | | StatsSyncCountersIfSignalled(tv); |
1016 | | } // while |
1017 | | |
1018 | | if (closer) { |
1019 | | NapatechDeleteFilters(); |
1020 | | } |
1021 | | |
1022 | | if (unlikely(ntv->hba > 0)) { |
1023 | | SCLogInfo("Host Buffer Allowance Drops - pkts: %ld, bytes: %ld", hba_pkt_drops, hba_byte_drops); |
1024 | | } |
1025 | | |
1026 | | SCReturnInt(TM_ECODE_OK); |
1027 | | } |
1028 | | |
1029 | | /** |
1030 | | * \brief Print some stats to the log at program exit. |
1031 | | * |
1032 | | * \param tv Pointer to ThreadVars. |
1033 | | * \param data Pointer to data, ErfFileThreadVars. |
1034 | | */ |
1035 | | void NapatechStreamThreadExitStats(ThreadVars *tv, void *data) |
1036 | | { |
1037 | | NapatechThreadVars *ntv = (NapatechThreadVars *) data; |
1038 | | NapatechCurrentStats stat = NapatechGetCurrentStats(ntv->stream_id); |
1039 | | |
1040 | | double percent = 0; |
1041 | | if (stat.current_drop_packets > 0) |
1042 | | percent = (((double) stat.current_drop_packets) |
1043 | | / (stat.current_packets + stat.current_drop_packets)) * 100; |
1044 | | |
1045 | | SCLogInfo("nt%lu - pkts: %lu; drop: %lu (%5.2f%%); bytes: %lu", |
1046 | | (uint64_t) ntv->stream_id, stat.current_packets, |
1047 | | stat.current_drop_packets, percent, stat.current_bytes); |
1048 | | |
1049 | | SC_ATOMIC_ADD(total_packets, stat.current_packets); |
1050 | | SC_ATOMIC_ADD(total_drops, stat.current_drop_packets); |
1051 | | SC_ATOMIC_ADD(total_tallied, 1); |
1052 | | |
1053 | | if (SC_ATOMIC_GET(total_tallied) == NapatechGetNumConfiguredStreams()) { |
1054 | | if (SC_ATOMIC_GET(total_drops) > 0) |
1055 | | percent = (((double) SC_ATOMIC_GET(total_drops)) / (SC_ATOMIC_GET(total_packets) |
1056 | | + SC_ATOMIC_GET(total_drops))) * 100; |
1057 | | |
1058 | | SCLogInfo(" "); |
1059 | | SCLogInfo("--- Total Packets: %ld Total Dropped: %ld (%5.2f%%)", |
1060 | | SC_ATOMIC_GET(total_packets), SC_ATOMIC_GET(total_drops), percent); |
1061 | | |
1062 | | #ifdef NAPATECH_ENABLE_BYPASS |
1063 | | SCLogInfo("--- BypassCB - Total: %ld, UDP: %ld, TCP: %ld, Unhandled: %ld", |
1064 | | SC_ATOMIC_GET(flow_callback_cnt), |
1065 | | SC_ATOMIC_GET(flow_callback_udp_pkts), |
1066 | | SC_ATOMIC_GET(flow_callback_tcp_pkts), |
1067 | | SC_ATOMIC_GET(flow_callback_unhandled_pkts)); |
1068 | | #endif |
1069 | | } |
1070 | | } |
1071 | | |
1072 | | /** |
1073 | | * \brief Deinitializes the NAPATECH card. |
1074 | | * \param tv pointer to ThreadVars |
1075 | | * \param data pointer that gets cast into PcapThreadVars for ptv |
1076 | | */ |
1077 | | TmEcode NapatechStreamThreadDeinit(ThreadVars *tv, void *data) |
1078 | | { |
1079 | | SCEnter(); |
1080 | | NapatechThreadVars *ntv = (NapatechThreadVars *) data; |
1081 | | |
1082 | | SCLogDebug("Closing Napatech Stream: %d", ntv->stream_id); |
1083 | | NT_NetRxClose(ntv->rx_stream); |
1084 | | |
1085 | | SCReturnInt(TM_ECODE_OK); |
1086 | | } |
1087 | | |
1088 | | /** |
1089 | | * \brief This function passes off to link type decoders. |
1090 | | * |
1091 | | * NapatechDecode decodes packets from Napatech and passes |
1092 | | * them off to the proper link type decoder. |
1093 | | * |
1094 | | * \param t pointer to ThreadVars |
1095 | | * \param p pointer to the current packet |
1096 | | * \param data pointer that gets cast into PcapThreadVars for ptv |
1097 | | */ |
1098 | | TmEcode NapatechDecode(ThreadVars *tv, Packet *p, void *data) |
1099 | | { |
1100 | | SCEnter(); |
1101 | | |
1102 | | DecodeThreadVars *dtv = (DecodeThreadVars *) data; |
1103 | | |
1104 | | BUG_ON(PKT_IS_PSEUDOPKT(p)); |
1105 | | |
1106 | | // update counters |
1107 | | DecodeUpdatePacketCounters(tv, dtv, p); |
1108 | | |
1109 | | switch (p->datalink) { |
1110 | | case LINKTYPE_ETHERNET: |
1111 | | DecodeEthernet(tv, dtv, p, GET_PKT_DATA(p), GET_PKT_LEN(p)); |
1112 | | break; |
1113 | | default: |
1114 | | SCLogError("Datalink type %" PRId32 " not yet supported in module NapatechDecode", |
1115 | | p->datalink); |
1116 | | break; |
1117 | | } |
1118 | | |
1119 | | PacketDecodeFinalize(tv, dtv, p); |
1120 | | SCReturnInt(TM_ECODE_OK); |
1121 | | } |
1122 | | |
1123 | | /** |
1124 | | * \brief Initialization of Napatech Thread. |
1125 | | * |
1126 | | * \param t pointer to ThreadVars |
1127 | | * \param initdata - unused. |
1128 | | * \param data pointer that gets cast into DecoderThreadVars |
1129 | | */ |
1130 | | TmEcode NapatechDecodeThreadInit(ThreadVars *tv, const void *initdata, void **data) |
1131 | | { |
1132 | | SCEnter(); |
1133 | | DecodeThreadVars *dtv = NULL; |
1134 | | dtv = DecodeThreadVarsAlloc(tv); |
1135 | | if (dtv == NULL) { |
1136 | | SCReturnInt(TM_ECODE_FAILED); |
1137 | | } |
1138 | | |
1139 | | DecodeRegisterPerfCounters(dtv, tv); |
1140 | | *data = (void *) dtv; |
1141 | | SCReturnInt(TM_ECODE_OK); |
1142 | | } |
1143 | | |
1144 | | /** |
1145 | | * \brief Deinitialization of Napatech Thread. |
1146 | | * |
1147 | | * \param tv pointer to ThreadVars |
1148 | | * \param data pointer that gets cast into DecoderThreadVars |
1149 | | */ |
1150 | | TmEcode NapatechDecodeThreadDeinit(ThreadVars *tv, void *data) |
1151 | | { |
1152 | | if (data != NULL) { |
1153 | | DecodeThreadVarsFree(tv, data); |
1154 | | } |
1155 | | SCReturnInt(TM_ECODE_OK); |
1156 | | } |
1157 | | |
1158 | | #endif /* HAVE_NAPATECH */ |