/src/suricata7/src/source-nfq.c
Line | Count | Source |
1 | | /* Copyright (C) 2007-2023 Open Information Security Foundation |
2 | | * |
3 | | * You can copy, redistribute or modify this Program under the terms of |
4 | | * the GNU General Public License version 2 as published by the Free |
5 | | * Software Foundation. |
6 | | * |
7 | | * This program is distributed in the hope that it will be useful, |
8 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
9 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
10 | | * GNU General Public License for more details. |
11 | | * |
12 | | * You should have received a copy of the GNU General Public License |
13 | | * version 2 along with this program; if not, write to the Free Software |
14 | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
15 | | * 02110-1301, USA. |
16 | | */ |
17 | | |
18 | | /** |
19 | | * \file |
20 | | * |
21 | | * \author Victor Julien <victor@inliniac.net> |
22 | | * \author Eric Leblond <eric@regit.org> |
23 | | * |
24 | | * Netfilter's netfilter_queue support for reading packets from the |
25 | | * kernel and setting verdicts back to it (inline mode). |
26 | | */ |
27 | | |
28 | | #include "suricata-common.h" |
29 | | #include "suricata.h" |
30 | | #include "packet.h" |
31 | | #include "decode.h" |
32 | | #include "packet-queue.h" |
33 | | |
34 | | #include "threads.h" |
35 | | #include "threadvars.h" |
36 | | #include "tm-threads.h" |
37 | | #include "tm-queuehandlers.h" |
38 | | #include "tmqh-packetpool.h" |
39 | | |
40 | | #include "conf.h" |
41 | | #include "conf-yaml-loader.h" |
42 | | #include "source-nfq-prototypes.h" |
43 | | #include "action-globals.h" |
44 | | |
45 | | #include "util-datalink.h" |
46 | | #include "util-debug.h" |
47 | | #include "util-error.h" |
48 | | #include "util-byte.h" |
49 | | #include "util-cpu.h" |
50 | | #include "util-privs.h" |
51 | | #include "util-device.h" |
52 | | |
53 | | #include "runmodes.h" |
54 | | |
55 | | #include "source-nfq.h" |
56 | | |
57 | | /* Handle the case where no NFQ support is compiled in. */ |
58 | | #ifndef NFQ |
59 | | static TmEcode NoNFQSupportExit(ThreadVars *, const void *, void **); |
60 | | |
61 | | void TmModuleReceiveNFQRegister (void) |
62 | 71 | { |
63 | 71 | tmm_modules[TMM_RECEIVENFQ].name = "ReceiveNFQ"; |
64 | 71 | tmm_modules[TMM_RECEIVENFQ].ThreadInit = NoNFQSupportExit; |
65 | 71 | tmm_modules[TMM_RECEIVENFQ].ThreadExitPrintStats = NULL; |
66 | 71 | tmm_modules[TMM_RECEIVENFQ].ThreadDeinit = NULL; |
67 | 71 | tmm_modules[TMM_RECEIVENFQ].cap_flags = SC_CAP_NET_ADMIN; |
68 | 71 | tmm_modules[TMM_RECEIVENFQ].flags = TM_FLAG_RECEIVE_TM; |
69 | 71 | } |
70 | | |
71 | | void TmModuleVerdictNFQRegister (void) |
72 | 71 | { |
73 | 71 | tmm_modules[TMM_VERDICTNFQ].name = "VerdictNFQ"; |
74 | 71 | tmm_modules[TMM_VERDICTNFQ].ThreadInit = NoNFQSupportExit; |
75 | 71 | tmm_modules[TMM_VERDICTNFQ].ThreadExitPrintStats = NULL; |
76 | 71 | tmm_modules[TMM_VERDICTNFQ].ThreadDeinit = NULL; |
77 | 71 | tmm_modules[TMM_VERDICTNFQ].cap_flags = SC_CAP_NET_ADMIN; |
78 | 71 | tmm_modules[TMM_VERDICTNFQ].flags = TM_FLAG_VERDICT_TM; |
79 | 71 | } |
80 | | |
81 | | void TmModuleDecodeNFQRegister (void) |
82 | 71 | { |
83 | 71 | tmm_modules[TMM_DECODENFQ].name = "DecodeNFQ"; |
84 | 71 | tmm_modules[TMM_DECODENFQ].ThreadInit = NoNFQSupportExit; |
85 | 71 | tmm_modules[TMM_DECODENFQ].ThreadExitPrintStats = NULL; |
86 | 71 | tmm_modules[TMM_DECODENFQ].ThreadDeinit = NULL; |
87 | 71 | tmm_modules[TMM_DECODENFQ].cap_flags = 0; |
88 | 71 | tmm_modules[TMM_DECODENFQ].flags = TM_FLAG_DECODE_TM; |
89 | 71 | } |
90 | | |
91 | | static TmEcode NoNFQSupportExit(ThreadVars *tv, const void *initdata, void **data) |
92 | 0 | { |
93 | | FatalError("Error creating thread %s: you do not " |
94 | 0 | "have support for nfqueue enabled please recompile with " |
95 | 0 | "--enable-nfqueue", |
96 | 0 | tv->name); |
97 | 0 | } |
98 | | |
99 | | #else /* we do have NFQ support */ |
100 | | |
101 | | extern uint16_t max_pending_packets; |
102 | | |
103 | | #define MAX_ALREADY_TREATED 5 |
104 | | #define NFQ_VERDICT_RETRY_COUNT 3 |
105 | | static int already_seen_warning; |
106 | | static int runmode_workers; |
107 | | |
108 | | #define NFQ_BURST_FACTOR 4 |
109 | | |
110 | | #ifndef SOL_NETLINK |
111 | | #define SOL_NETLINK 270 |
112 | | #endif |
113 | | |
114 | | typedef struct NFQThreadVars_ |
115 | | { |
116 | | uint16_t nfq_index; |
117 | | ThreadVars *tv; |
118 | | TmSlot *slot; |
119 | | |
120 | | LiveDevice *livedev; |
121 | | |
122 | | char *data; /** Per function and thread data */ |
123 | | int datalen; /** Length of per function and thread data */ |
124 | | } NFQThreadVars; |
125 | | /* shared vars for all for nfq queues and threads */ |
126 | | static NFQGlobalVars nfq_g; |
127 | | |
128 | | static NFQThreadVars *g_nfq_t; |
129 | | static NFQQueueVars *g_nfq_q; |
130 | | static uint16_t receive_queue_num = 0; |
131 | | static SCMutex nfq_init_lock; |
132 | | |
133 | | static TmEcode ReceiveNFQLoop(ThreadVars *tv, void *data, void *slot); |
134 | | static TmEcode ReceiveNFQThreadInit(ThreadVars *, const void *, void **); |
135 | | static TmEcode ReceiveNFQThreadDeinit(ThreadVars *, void *); |
136 | | static void ReceiveNFQThreadExitStats(ThreadVars *, void *); |
137 | | |
138 | | static TmEcode VerdictNFQ(ThreadVars *, Packet *, void *); |
139 | | static TmEcode VerdictNFQThreadInit(ThreadVars *, const void *, void **); |
140 | | static TmEcode VerdictNFQThreadDeinit(ThreadVars *, void *); |
141 | | |
142 | | static TmEcode DecodeNFQ(ThreadVars *, Packet *, void *); |
143 | | static TmEcode DecodeNFQThreadInit(ThreadVars *, const void *, void **); |
144 | | static TmEcode DecodeNFQThreadDeinit(ThreadVars *tv, void *data); |
145 | | |
146 | | static TmEcode NFQSetVerdict(Packet *p); |
147 | | static void NFQReleasePacket(Packet *p); |
148 | | |
149 | | typedef enum NFQMode_ { |
150 | | NFQ_ACCEPT_MODE, |
151 | | NFQ_REPEAT_MODE, |
152 | | NFQ_ROUTE_MODE, |
153 | | } NFQMode; |
154 | | |
155 | | #define NFQ_FLAG_FAIL_OPEN (1 << 0) |
156 | | |
157 | | typedef struct NFQCnf_ { |
158 | | NFQMode mode; |
159 | | uint32_t mark; |
160 | | uint32_t mask; |
161 | | uint32_t bypass_mark; |
162 | | uint32_t bypass_mask; |
163 | | uint32_t next_queue; |
164 | | uint32_t flags; |
165 | | uint8_t batchcount; |
166 | | } NFQCnf; |
167 | | |
168 | | NFQCnf nfq_config; |
169 | | |
170 | | void TmModuleReceiveNFQRegister (void) |
171 | | { |
172 | | /* XXX create a general NFQ setup function */ |
173 | | memset(&nfq_g, 0, sizeof(nfq_g)); |
174 | | SCMutexInit(&nfq_init_lock, NULL); |
175 | | |
176 | | tmm_modules[TMM_RECEIVENFQ].name = "ReceiveNFQ"; |
177 | | tmm_modules[TMM_RECEIVENFQ].ThreadInit = ReceiveNFQThreadInit; |
178 | | tmm_modules[TMM_RECEIVENFQ].PktAcqLoop = ReceiveNFQLoop; |
179 | | tmm_modules[TMM_RECEIVENFQ].PktAcqBreakLoop = NULL; |
180 | | tmm_modules[TMM_RECEIVENFQ].ThreadExitPrintStats = ReceiveNFQThreadExitStats; |
181 | | tmm_modules[TMM_RECEIVENFQ].ThreadDeinit = ReceiveNFQThreadDeinit; |
182 | | tmm_modules[TMM_RECEIVENFQ].flags = TM_FLAG_RECEIVE_TM; |
183 | | } |
184 | | |
185 | | void TmModuleVerdictNFQRegister (void) |
186 | | { |
187 | | tmm_modules[TMM_VERDICTNFQ].name = "VerdictNFQ"; |
188 | | tmm_modules[TMM_VERDICTNFQ].ThreadInit = VerdictNFQThreadInit; |
189 | | tmm_modules[TMM_VERDICTNFQ].Func = VerdictNFQ; |
190 | | tmm_modules[TMM_VERDICTNFQ].ThreadDeinit = VerdictNFQThreadDeinit; |
191 | | tmm_modules[TMM_VERDICTNFQ].flags = TM_FLAG_VERDICT_TM; |
192 | | } |
193 | | |
194 | | void TmModuleDecodeNFQRegister (void) |
195 | | { |
196 | | tmm_modules[TMM_DECODENFQ].name = "DecodeNFQ"; |
197 | | tmm_modules[TMM_DECODENFQ].ThreadInit = DecodeNFQThreadInit; |
198 | | tmm_modules[TMM_DECODENFQ].Func = DecodeNFQ; |
199 | | tmm_modules[TMM_DECODENFQ].ThreadDeinit = DecodeNFQThreadDeinit; |
200 | | tmm_modules[TMM_DECODENFQ].flags = TM_FLAG_DECODE_TM; |
201 | | } |
202 | | |
203 | | /** \brief To initialize the NFQ global configuration data |
204 | | * |
205 | | * \param quiet It tells the mode of operation, if it is TRUE nothing will |
206 | | * be get printed. |
207 | | */ |
208 | | void NFQInitConfig(bool quiet) |
209 | | { |
210 | | intmax_t value = 0; |
211 | | const char *nfq_mode = NULL; |
212 | | int boolval; |
213 | | |
214 | | SCLogDebug("Initializing NFQ"); |
215 | | |
216 | | memset(&nfq_config, 0, sizeof(nfq_config)); |
217 | | |
218 | | if ((ConfGet("nfq.mode", &nfq_mode)) == 0) { |
219 | | nfq_config.mode = NFQ_ACCEPT_MODE; |
220 | | } else { |
221 | | if (!strcmp("accept", nfq_mode)) { |
222 | | nfq_config.mode = NFQ_ACCEPT_MODE; |
223 | | } else if (!strcmp("repeat", nfq_mode)) { |
224 | | nfq_config.mode = NFQ_REPEAT_MODE; |
225 | | } else if (!strcmp("route", nfq_mode)) { |
226 | | nfq_config.mode = NFQ_ROUTE_MODE; |
227 | | } else { |
228 | | FatalError("Unknown nfq.mode"); |
229 | | } |
230 | | } |
231 | | |
232 | | (void)ConfGetBool("nfq.fail-open", (int *)&boolval); |
233 | | if (boolval) { |
234 | | #ifdef HAVE_NFQ_SET_QUEUE_FLAGS |
235 | | SCLogInfo("Enabling fail-open on queue"); |
236 | | nfq_config.flags |= NFQ_FLAG_FAIL_OPEN; |
237 | | #else |
238 | | SCLogError("nfq.%s set but NFQ library has no support for it.", "fail-open"); |
239 | | #endif |
240 | | } |
241 | | |
242 | | if ((ConfGetInt("nfq.repeat-mark", &value)) == 1) { |
243 | | nfq_config.mark = (uint32_t)value; |
244 | | } |
245 | | |
246 | | if ((ConfGetInt("nfq.repeat-mask", &value)) == 1) { |
247 | | nfq_config.mask = (uint32_t)value; |
248 | | } |
249 | | |
250 | | if ((ConfGetInt("nfq.bypass-mark", &value)) == 1) { |
251 | | nfq_config.bypass_mark = (uint32_t)value; |
252 | | } |
253 | | |
254 | | if ((ConfGetInt("nfq.bypass-mask", &value)) == 1) { |
255 | | nfq_config.bypass_mask = (uint32_t)value; |
256 | | } |
257 | | |
258 | | if ((ConfGetInt("nfq.route-queue", &value)) == 1) { |
259 | | nfq_config.next_queue = ((uint32_t)value) << 16; |
260 | | } |
261 | | |
262 | | if ((ConfGetInt("nfq.batchcount", &value)) == 1) { |
263 | | #ifdef HAVE_NFQ_SET_VERDICT_BATCH |
264 | | if (value > 255) { |
265 | | SCLogWarning("nfq.batchcount cannot exceed 255."); |
266 | | value = 255; |
267 | | } |
268 | | if (value > 1) |
269 | | nfq_config.batchcount = (uint8_t) (value - 1); |
270 | | #else |
271 | | SCLogWarning("nfq.%s set but NFQ library has no support for it.", "batchcount"); |
272 | | #endif |
273 | | } |
274 | | |
275 | | if (!quiet) { |
276 | | switch (nfq_config.mode) { |
277 | | case NFQ_ACCEPT_MODE: |
278 | | SCLogInfo("NFQ running in standard ACCEPT/DROP mode"); |
279 | | break; |
280 | | case NFQ_REPEAT_MODE: |
281 | | SCLogInfo("NFQ running in REPEAT mode with mark %"PRIu32"/%"PRIu32, |
282 | | nfq_config.mark, nfq_config.mask); |
283 | | break; |
284 | | case NFQ_ROUTE_MODE: |
285 | | SCLogInfo("NFQ running in route mode with next queue %"PRIu32, |
286 | | nfq_config.next_queue >> 16); |
287 | | break; |
288 | | } |
289 | | } |
290 | | |
291 | | } |
292 | | |
293 | | static uint8_t NFQVerdictCacheLen(NFQQueueVars *t) |
294 | | { |
295 | | #ifdef HAVE_NFQ_SET_VERDICT_BATCH |
296 | | return t->verdict_cache.len; |
297 | | #else |
298 | | return 0; |
299 | | #endif |
300 | | } |
301 | | |
302 | | static void NFQVerdictCacheFlush(NFQQueueVars *t) |
303 | | { |
304 | | #ifdef HAVE_NFQ_SET_VERDICT_BATCH |
305 | | int ret; |
306 | | int iter = 0; |
307 | | |
308 | | do { |
309 | | if (t->verdict_cache.mark_valid) |
310 | | ret = nfq_set_verdict_batch2(t->qh, |
311 | | t->verdict_cache.packet_id, |
312 | | t->verdict_cache.verdict, |
313 | | t->verdict_cache.mark); |
314 | | else |
315 | | ret = nfq_set_verdict_batch(t->qh, |
316 | | t->verdict_cache.packet_id, |
317 | | t->verdict_cache.verdict); |
318 | | } while ((ret < 0) && (iter++ < NFQ_VERDICT_RETRY_COUNT)); |
319 | | |
320 | | if (ret < 0) { |
321 | | SCLogWarning("nfq_set_verdict_batch failed: %s", strerror(errno)); |
322 | | } else { |
323 | | t->verdict_cache.len = 0; |
324 | | t->verdict_cache.mark_valid = 0; |
325 | | } |
326 | | #endif |
327 | | } |
328 | | |
329 | | static int NFQVerdictCacheAdd(NFQQueueVars *t, Packet *p, uint32_t verdict) |
330 | | { |
331 | | #ifdef HAVE_NFQ_SET_VERDICT_BATCH |
332 | | if (t->verdict_cache.maxlen == 0) |
333 | | return -1; |
334 | | |
335 | | if (p->flags & PKT_STREAM_MODIFIED || verdict == NF_DROP) |
336 | | goto flush; |
337 | | |
338 | | if (p->flags & PKT_MARK_MODIFIED) { |
339 | | if (!t->verdict_cache.mark_valid) { |
340 | | if (t->verdict_cache.len) |
341 | | goto flush; |
342 | | t->verdict_cache.mark_valid = 1; |
343 | | t->verdict_cache.mark = p->nfq_v.mark; |
344 | | } else if (t->verdict_cache.mark != p->nfq_v.mark) { |
345 | | goto flush; |
346 | | } |
347 | | } else if (t->verdict_cache.mark_valid) { |
348 | | goto flush; |
349 | | } |
350 | | |
351 | | if (t->verdict_cache.len == 0) { |
352 | | t->verdict_cache.verdict = verdict; |
353 | | } else if (t->verdict_cache.verdict != verdict) |
354 | | goto flush; |
355 | | |
356 | | /* same verdict, mark not set or identical -> can cache */ |
357 | | t->verdict_cache.packet_id = p->nfq_v.id; |
358 | | |
359 | | if (t->verdict_cache.len >= t->verdict_cache.maxlen) |
360 | | NFQVerdictCacheFlush(t); |
361 | | else |
362 | | t->verdict_cache.len++; |
363 | | return 0; |
364 | | flush: |
365 | | /* can't cache. Flush current cache and signal caller it should send single verdict */ |
366 | | if (NFQVerdictCacheLen(t) > 0) |
367 | | NFQVerdictCacheFlush(t); |
368 | | #endif |
369 | | return -1; |
370 | | } |
371 | | |
372 | | static inline void NFQMutexInit(NFQQueueVars *nq) |
373 | | { |
374 | | char *active_runmode = RunmodeGetActive(); |
375 | | |
376 | | if (active_runmode && !strcmp("workers", active_runmode)) { |
377 | | nq->use_mutex = 0; |
378 | | runmode_workers = 1; |
379 | | SCLogDebug("NFQ running in 'workers' runmode, will not use mutex."); |
380 | | } else { |
381 | | nq->use_mutex = 1; |
382 | | runmode_workers = 0; |
383 | | SCMutexInit(&nq->mutex_qh, NULL); |
384 | | } |
385 | | } |
386 | | |
387 | | #define NFQMutexLock(nq) do { \ |
388 | | if ((nq)->use_mutex) \ |
389 | | SCMutexLock(&(nq)->mutex_qh); \ |
390 | | } while (0) |
391 | | |
392 | | #define NFQMutexUnlock(nq) do { \ |
393 | | if ((nq)->use_mutex) \ |
394 | | SCMutexUnlock(&(nq)->mutex_qh); \ |
395 | | } while (0) |
396 | | |
397 | | /** |
398 | | * \brief Read data from nfq message and setup Packet |
399 | | * |
400 | | * \note |
401 | | * In case of error, this function verdict the packet |
402 | | * to avoid skb to get stuck in kernel. |
403 | | */ |
404 | | static int NFQSetupPkt (Packet *p, struct nfq_q_handle *qh, void *data) |
405 | | { |
406 | | struct nfq_data *tb = (struct nfq_data *)data; |
407 | | int ret; |
408 | | char *pktdata; |
409 | | struct nfqnl_msg_packet_hdr *ph; |
410 | | |
411 | | // Early release function -- will be updated once repeat |
412 | | // mode handling has been done |
413 | | p->ReleasePacket = PacketFreeOrRelease; |
414 | | |
415 | | ph = nfq_get_msg_packet_hdr(tb); |
416 | | if (ph != NULL) { |
417 | | p->nfq_v.id = SCNtohl(ph->packet_id); |
418 | | p->nfq_v.hw_protocol = ph->hw_protocol; |
419 | | } |
420 | | /* coverity[missing_lock] */ |
421 | | p->nfq_v.mark = nfq_get_nfmark(tb); |
422 | | if (nfq_config.mode == NFQ_REPEAT_MODE) { |
423 | | if ((nfq_config.mark & nfq_config.mask) == |
424 | | (p->nfq_v.mark & nfq_config.mask)) { |
425 | | int iter = 0; |
426 | | if (already_seen_warning < MAX_ALREADY_TREATED) |
427 | | SCLogInfo("Packet seems already treated by suricata"); |
428 | | already_seen_warning++; |
429 | | do { |
430 | | ret = nfq_set_verdict(qh, p->nfq_v.id, NF_ACCEPT, 0, NULL); |
431 | | } while ((ret < 0) && (iter++ < NFQ_VERDICT_RETRY_COUNT)); |
432 | | if (ret < 0) { |
433 | | SCLogWarning( |
434 | | "nfq_set_verdict of %p failed %" PRId32 ": %s", p, ret, strerror(errno)); |
435 | | } |
436 | | return -1 ; |
437 | | } |
438 | | } |
439 | | |
440 | | // Switch to full featured release function |
441 | | p->ReleasePacket = NFQReleasePacket; |
442 | | p->nfq_v.ifi = nfq_get_indev(tb); |
443 | | p->nfq_v.ifo = nfq_get_outdev(tb); |
444 | | p->nfq_v.verdicted = 0; |
445 | | |
446 | | #ifdef NFQ_GET_PAYLOAD_SIGNED |
447 | | ret = nfq_get_payload(tb, &pktdata); |
448 | | #else |
449 | | ret = nfq_get_payload(tb, (unsigned char **) &pktdata); |
450 | | #endif /* NFQ_GET_PAYLOAD_SIGNED */ |
451 | | if (ret > 0) { |
452 | | /* nfq_get_payload returns a pointer to a part of memory |
453 | | * that is not preserved over the lifetime of our packet. |
454 | | * So we need to copy it. */ |
455 | | if (ret > 65536) { |
456 | | /* Will not be able to copy data ! Set length to 0 |
457 | | * to trigger an error in packet decoding. |
458 | | * This is unlikely to happen */ |
459 | | SCLogWarning("NFQ sent too big packet"); |
460 | | SET_PKT_LEN(p, 0); |
461 | | } else if (runmode_workers) { |
462 | | PacketSetData(p, (uint8_t *)pktdata, ret); |
463 | | } else { |
464 | | PacketCopyData(p, (uint8_t *)pktdata, ret); |
465 | | } |
466 | | } else if (ret == -1) { |
467 | | /* unable to get pointer to data, ensure packet length is zero. |
468 | | * This will trigger an error in packet decoding */ |
469 | | SET_PKT_LEN(p, 0); |
470 | | } |
471 | | |
472 | | struct timeval tv; |
473 | | ret = nfq_get_timestamp(tb, &tv); |
474 | | if (ret != 0 || tv.tv_sec == 0) { |
475 | | memset(&tv, 0, sizeof(tv)); |
476 | | gettimeofday(&tv, NULL); |
477 | | } |
478 | | p->ts = SCTIME_FROM_TIMEVAL(&tv); |
479 | | |
480 | | p->datalink = DLT_RAW; |
481 | | return 0; |
482 | | } |
483 | | |
484 | | static void NFQReleasePacket(Packet *p) |
485 | | { |
486 | | if (unlikely(!p->nfq_v.verdicted)) { |
487 | | PacketDrop(p, ACTION_DROP, PKT_DROP_REASON_NFQ_ERROR); |
488 | | NFQSetVerdict(p); |
489 | | } |
490 | | PacketFreeOrRelease(p); |
491 | | } |
492 | | |
493 | | /** |
494 | | * \brief bypass callback function for NFQ |
495 | | * |
496 | | * \param p a Packet to use information from to trigger bypass |
497 | | * \return 1 if bypass is successful, 0 if not |
498 | | */ |
499 | | static int NFQBypassCallback(Packet *p) |
500 | | { |
501 | | if (IS_TUNNEL_PKT(p)) { |
502 | | /* real tunnels may have multiple flows inside them, so bypass can't |
503 | | * work for those. Rebuilt packets from IP fragments are fine. */ |
504 | | if (p->flags & PKT_REBUILT_FRAGMENT) { |
505 | | Packet *tp = p->root ? p->root : p; |
506 | | SCSpinLock(&tp->persistent.tunnel_lock); |
507 | | tp->nfq_v.mark = (nfq_config.bypass_mark & nfq_config.bypass_mask) |
508 | | | (tp->nfq_v.mark & ~nfq_config.bypass_mask); |
509 | | tp->flags |= PKT_MARK_MODIFIED; |
510 | | SCSpinUnlock(&tp->persistent.tunnel_lock); |
511 | | return 1; |
512 | | } |
513 | | return 0; |
514 | | } else { |
515 | | /* coverity[missing_lock] */ |
516 | | p->nfq_v.mark = (nfq_config.bypass_mark & nfq_config.bypass_mask) |
517 | | | (p->nfq_v.mark & ~nfq_config.bypass_mask); |
518 | | p->flags |= PKT_MARK_MODIFIED; |
519 | | } |
520 | | |
521 | | return 1; |
522 | | } |
523 | | |
524 | | static int NFQCallBack(struct nfq_q_handle *qh, struct nfgenmsg *nfmsg, |
525 | | struct nfq_data *nfa, void *data) |
526 | | { |
527 | | NFQThreadVars *ntv = (NFQThreadVars *)data; |
528 | | ThreadVars *tv = ntv->tv; |
529 | | int ret; |
530 | | |
531 | | /* grab a packet */ |
532 | | Packet *p = PacketGetFromQueueOrAlloc(); |
533 | | if (p == NULL) { |
534 | | return -1; |
535 | | } |
536 | | PKT_SET_SRC(p, PKT_SRC_WIRE); |
537 | | |
538 | | p->nfq_v.nfq_index = ntv->nfq_index; |
539 | | /* if bypass mask is set then we may want to bypass so set pointer */ |
540 | | if (nfq_config.bypass_mask) { |
541 | | p->BypassPacketsFlow = NFQBypassCallback; |
542 | | } |
543 | | |
544 | | ret = NFQSetupPkt(p, qh, (void *)nfa); |
545 | | if (ret == -1) { |
546 | | #ifdef COUNTERS |
547 | | NFQQueueVars *q = NFQGetQueue(ntv->nfq_index); |
548 | | q->errs++; |
549 | | q->pkts++; |
550 | | q->bytes += GET_PKT_LEN(p); |
551 | | #endif /* COUNTERS */ |
552 | | (void) SC_ATOMIC_ADD(ntv->livedev->pkts, 1); |
553 | | |
554 | | /* NFQSetupPkt is issuing a verdict |
555 | | so we only recycle Packet and leave */ |
556 | | TmqhOutputPacketpool(tv, p); |
557 | | return 0; |
558 | | } |
559 | | |
560 | | #ifdef COUNTERS |
561 | | NFQQueueVars *q = NFQGetQueue(ntv->nfq_index); |
562 | | q->pkts++; |
563 | | q->bytes += GET_PKT_LEN(p); |
564 | | #endif /* COUNTERS */ |
565 | | (void) SC_ATOMIC_ADD(ntv->livedev->pkts, 1); |
566 | | |
567 | | if (TmThreadsSlotProcessPkt(tv, ntv->slot, p) != TM_ECODE_OK) { |
568 | | return -1; |
569 | | } |
570 | | |
571 | | return 0; |
572 | | } |
573 | | |
574 | | static TmEcode NFQInitThread(NFQThreadVars *t, uint32_t queue_maxlen) |
575 | | { |
576 | | struct timeval tv; |
577 | | int opt; |
578 | | NFQQueueVars *q = NFQGetQueue(t->nfq_index); |
579 | | if (q == NULL) { |
580 | | SCLogError("no queue for given index"); |
581 | | return TM_ECODE_FAILED; |
582 | | } |
583 | | SCLogDebug("opening library handle"); |
584 | | q->h = nfq_open(); |
585 | | if (q->h == NULL) { |
586 | | SCLogError("nfq_open() failed"); |
587 | | return TM_ECODE_FAILED; |
588 | | } |
589 | | |
590 | | if (nfq_g.unbind == 0) |
591 | | { |
592 | | /* VJ: on my Ubuntu Hardy system this fails the first time it's |
593 | | * run. Ignoring the error seems to have no bad effects. */ |
594 | | SCLogDebug("unbinding existing nf_queue handler for AF_INET (if any)"); |
595 | | if (nfq_unbind_pf(q->h, AF_INET) < 0) { |
596 | | FatalError("nfq_unbind_pf() for AF_INET failed: %s", strerror(errno)); |
597 | | } |
598 | | if (nfq_unbind_pf(q->h, AF_INET6) < 0) { |
599 | | FatalError("nfq_unbind_pf() for AF_INET6 failed"); |
600 | | } |
601 | | nfq_g.unbind = 1; |
602 | | |
603 | | SCLogDebug("binding nfnetlink_queue as nf_queue handler for AF_INET and AF_INET6"); |
604 | | |
605 | | if (nfq_bind_pf(q->h, AF_INET) < 0) { |
606 | | FatalError("nfq_bind_pf() for AF_INET failed"); |
607 | | } |
608 | | if (nfq_bind_pf(q->h, AF_INET6) < 0) { |
609 | | FatalError("nfq_bind_pf() for AF_INET6 failed"); |
610 | | } |
611 | | } |
612 | | |
613 | | SCLogInfo("binding this thread %d to queue '%" PRIu32 "'", t->nfq_index, q->queue_num); |
614 | | |
615 | | /* pass the thread memory as a void ptr so the |
616 | | * callback function has access to it. */ |
617 | | q->qh = nfq_create_queue(q->h, q->queue_num, &NFQCallBack, (void *)t); |
618 | | if (q->qh == NULL) { |
619 | | SCLogError("nfq_create_queue failed"); |
620 | | return TM_ECODE_FAILED; |
621 | | } |
622 | | |
623 | | SCLogDebug("setting copy_packet mode"); |
624 | | |
625 | | /* 05DC = 1500 */ |
626 | | //if (nfq_set_mode(nfq_t->qh, NFQNL_COPY_PACKET, 0x05DC) < 0) { |
627 | | if (nfq_set_mode(q->qh, NFQNL_COPY_PACKET, 0xFFFF) < 0) { |
628 | | SCLogError("can't set packet_copy mode"); |
629 | | return TM_ECODE_FAILED; |
630 | | } |
631 | | |
632 | | #ifdef HAVE_NFQ_MAXLEN |
633 | | if (queue_maxlen > 0) { |
634 | | SCLogInfo("setting queue length to %" PRId32 "", queue_maxlen); |
635 | | |
636 | | /* non-fatal if it fails */ |
637 | | if (nfq_set_queue_maxlen(q->qh, queue_maxlen) < 0) { |
638 | | SCLogWarning("can't set queue maxlen: your kernel probably " |
639 | | "doesn't support setting the queue length"); |
640 | | } |
641 | | } |
642 | | #endif /* HAVE_NFQ_MAXLEN */ |
643 | | |
644 | | /* set netlink buffer size to a decent value */ |
645 | | nfnl_rcvbufsiz(nfq_nfnlh(q->h), queue_maxlen * 1500); |
646 | | SCLogInfo("setting nfnl bufsize to %" PRId32 "", queue_maxlen * 1500); |
647 | | |
648 | | q->nh = nfq_nfnlh(q->h); |
649 | | q->fd = nfnl_fd(q->nh); |
650 | | NFQMutexInit(q); |
651 | | |
652 | | /* Set some netlink specific option on the socket to increase |
653 | | performance */ |
654 | | opt = 1; |
655 | | #ifdef NETLINK_BROADCAST_SEND_ERROR |
656 | | if (setsockopt(q->fd, SOL_NETLINK, |
657 | | NETLINK_BROADCAST_SEND_ERROR, &opt, sizeof(int)) == -1) { |
658 | | SCLogWarning("can't set netlink broadcast error: %s", strerror(errno)); |
659 | | } |
660 | | #endif |
661 | | /* Don't send error about no buffer space available but drop the |
662 | | packets instead */ |
663 | | #ifdef NETLINK_NO_ENOBUFS |
664 | | if (setsockopt(q->fd, SOL_NETLINK, |
665 | | NETLINK_NO_ENOBUFS, &opt, sizeof(int)) == -1) { |
666 | | SCLogWarning("can't set netlink enobufs: %s", strerror(errno)); |
667 | | } |
668 | | #endif |
669 | | |
670 | | #ifdef HAVE_NFQ_SET_QUEUE_FLAGS |
671 | | if (nfq_config.flags & NFQ_FLAG_FAIL_OPEN) { |
672 | | uint32_t flags = NFQA_CFG_F_FAIL_OPEN; |
673 | | uint32_t mask = NFQA_CFG_F_FAIL_OPEN; |
674 | | int r = nfq_set_queue_flags(q->qh, mask, flags); |
675 | | |
676 | | if (r == -1) { |
677 | | SCLogWarning("can't set fail-open mode: %s", strerror(errno)); |
678 | | } else { |
679 | | SCLogInfo("fail-open mode should be set on queue"); |
680 | | } |
681 | | } |
682 | | #endif |
683 | | |
684 | | #ifdef HAVE_NFQ_SET_VERDICT_BATCH |
685 | | if (runmode_workers) { |
686 | | q->verdict_cache.maxlen = nfq_config.batchcount; |
687 | | } else if (nfq_config.batchcount) { |
688 | | SCLogError("nfq.batchcount is only valid in workers runmode."); |
689 | | } |
690 | | #endif |
691 | | |
692 | | /* set a timeout to the socket so we can check for a signal |
693 | | * in case we don't get packets for a longer period. */ |
694 | | tv.tv_sec = 1; |
695 | | tv.tv_usec = 0; |
696 | | |
697 | | if(setsockopt(q->fd, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)) == -1) { |
698 | | SCLogWarning("can't set socket timeout: %s", strerror(errno)); |
699 | | } |
700 | | |
701 | | SCLogDebug("nfq_q->h %p, nfq_q->nh %p, nfq_q->qh %p, nfq_q->fd %" PRId32 "", |
702 | | q->h, q->nh, q->qh, q->fd); |
703 | | |
704 | | return TM_ECODE_OK; |
705 | | } |
706 | | |
707 | | TmEcode ReceiveNFQThreadInit(ThreadVars *tv, const void *initdata, void **data) |
708 | | { |
709 | | SCMutexLock(&nfq_init_lock); |
710 | | |
711 | | sigset_t sigs; |
712 | | sigfillset(&sigs); |
713 | | pthread_sigmask(SIG_BLOCK, &sigs, NULL); |
714 | | |
715 | | NFQThreadVars *ntv = (NFQThreadVars *) initdata; |
716 | | /* store the ThreadVars pointer in our NFQ thread context |
717 | | * as we will need it in our callback function */ |
718 | | ntv->tv = tv; |
719 | | |
720 | | int r = NFQInitThread(ntv, (max_pending_packets * NFQ_BURST_FACTOR)); |
721 | | if (r != TM_ECODE_OK) { |
722 | | SCLogError("nfq thread failed to initialize"); |
723 | | |
724 | | SCMutexUnlock(&nfq_init_lock); |
725 | | exit(EXIT_FAILURE); |
726 | | } |
727 | | |
728 | | #define T_DATA_SIZE 70000 |
729 | | ntv->data = SCMalloc(T_DATA_SIZE); |
730 | | if (ntv->data == NULL) { |
731 | | SCMutexUnlock(&nfq_init_lock); |
732 | | return TM_ECODE_FAILED; |
733 | | } |
734 | | ntv->datalen = T_DATA_SIZE; |
735 | | #undef T_DATA_SIZE |
736 | | |
737 | | DatalinkSetGlobalType(DLT_RAW); |
738 | | |
739 | | *data = (void *)ntv; |
740 | | |
741 | | SCMutexUnlock(&nfq_init_lock); |
742 | | return TM_ECODE_OK; |
743 | | } |
744 | | |
745 | | static void NFQDestroyQueue(NFQQueueVars *nq) |
746 | | { |
747 | | if (unlikely(nq == NULL)) { |
748 | | return; |
749 | | } |
750 | | |
751 | | SCLogDebug("starting... will close queuenum %" PRIu32 "", nq->queue_num); |
752 | | NFQMutexLock(nq); |
753 | | if (nq->qh != NULL) { |
754 | | nfq_destroy_queue(nq->qh); |
755 | | nq->qh = NULL; |
756 | | nfq_close(nq->h); |
757 | | nq->h = NULL; |
758 | | } |
759 | | NFQMutexUnlock(nq); |
760 | | } |
761 | | |
762 | | TmEcode ReceiveNFQThreadDeinit(ThreadVars *t, void *data) |
763 | | { |
764 | | NFQThreadVars *ntv = (NFQThreadVars *)data; |
765 | | NFQQueueVars *nq = NFQGetQueue(ntv->nfq_index); |
766 | | |
767 | | if (ntv->data != NULL) { |
768 | | SCFree(ntv->data); |
769 | | ntv->data = NULL; |
770 | | } |
771 | | ntv->datalen = 0; |
772 | | |
773 | | NFQDestroyQueue(nq); |
774 | | |
775 | | return TM_ECODE_OK; |
776 | | } |
777 | | |
778 | | TmEcode VerdictNFQThreadInit(ThreadVars *tv, const void *initdata, void **data) |
779 | | { |
780 | | NFQThreadVars *ntv = (NFQThreadVars *)initdata; |
781 | | *data = (void *)ntv; |
782 | | return TM_ECODE_OK; |
783 | | } |
784 | | |
785 | | TmEcode VerdictNFQThreadDeinit(ThreadVars *tv, void *data) |
786 | | { |
787 | | NFQThreadVars *ntv = (NFQThreadVars *)data; |
788 | | NFQQueueVars *nq = NFQGetQueue(ntv->nfq_index); |
789 | | |
790 | | NFQDestroyQueue(nq); |
791 | | |
792 | | return TM_ECODE_OK; |
793 | | } |
794 | | |
795 | | /** |
796 | | * \brief Add a single Netfilter queue |
797 | | * |
798 | | * \param string with the queue number |
799 | | * |
800 | | * \retval 0 on success. |
801 | | * \retval -1 on failure. |
802 | | */ |
803 | | int NFQRegisterQueue(const uint16_t number) |
804 | | { |
805 | | NFQThreadVars *ntv = NULL; |
806 | | NFQQueueVars *nq = NULL; |
807 | | char queue[10] = { 0 }; |
808 | | static bool many_queues_warned = false; |
809 | | uint16_t num_cpus = UtilCpuGetNumProcessorsOnline(); |
810 | | |
811 | | if (g_nfq_t == NULL || g_nfq_q == NULL) { |
812 | | SCLogError("NFQ context is not initialized"); |
813 | | return -1; |
814 | | } |
815 | | |
816 | | SCMutexLock(&nfq_init_lock); |
817 | | if (!many_queues_warned && (receive_queue_num >= num_cpus)) { |
818 | | SCLogWarning("using more Netfilter queues than %hu available CPU core(s) " |
819 | | "may degrade performance", |
820 | | num_cpus); |
821 | | many_queues_warned = true; |
822 | | } |
823 | | if (receive_queue_num >= NFQ_MAX_QUEUE) { |
824 | | SCLogError("can not register more than %d Netfilter queues", NFQ_MAX_QUEUE); |
825 | | SCMutexUnlock(&nfq_init_lock); |
826 | | return -1; |
827 | | } |
828 | | |
829 | | ntv = &g_nfq_t[receive_queue_num]; |
830 | | ntv->nfq_index = receive_queue_num; |
831 | | |
832 | | nq = &g_nfq_q[receive_queue_num]; |
833 | | memset(nq, 0, sizeof(*nq)); |
834 | | nq->queue_num = number; |
835 | | receive_queue_num++; |
836 | | SCMutexUnlock(&nfq_init_lock); |
837 | | snprintf(queue, sizeof(queue) - 1, "NFQ#%hu", number); |
838 | | LiveRegisterDevice(queue); |
839 | | |
840 | | ntv->livedev = LiveGetDevice(queue); |
841 | | |
842 | | if (ntv->livedev == NULL) { |
843 | | SCLogError("Unable to find Live device"); |
844 | | return -1; |
845 | | } |
846 | | |
847 | | SCLogDebug("Queue %d registered.", number); |
848 | | return 0; |
849 | | } |
850 | | |
851 | | /** |
852 | | * \brief Parses and adds Netfilter queue(s). |
853 | | * |
854 | | * \param string with the queue number or range |
855 | | * |
856 | | * \retval 0 on success. |
857 | | * \retval -1 on failure. |
858 | | */ |
859 | | int NFQParseAndRegisterQueues(const char *queues) |
860 | | { |
861 | | uint16_t queue_start = 0; |
862 | | uint16_t queue_end = 0; |
863 | | uint16_t num_queues = 1; // if argument is correct, at least one queue will be created |
864 | | |
865 | | // Either "id" or "start:end" format (e.g., "12" or "0:5") |
866 | | int count = sscanf(queues, "%hu:%hu", &queue_start, &queue_end); |
867 | | |
868 | | if (count < 1) { |
869 | | SCLogError("specified queue(s) argument '%s' is not " |
870 | | "valid (allowed queue numbers are 0-65535)", |
871 | | queues); |
872 | | return -1; |
873 | | } |
874 | | |
875 | | // Do we have a range? |
876 | | if (count == 2) { |
877 | | // Sanity check |
878 | | if (queue_start > queue_end) { |
879 | | SCLogError("start queue's number %d is greater than " |
880 | | "ending number %d", |
881 | | queue_start, queue_end); |
882 | | return -1; |
883 | | } |
884 | | |
885 | | num_queues = queue_end - queue_start + 1; // +1 due to inclusive range |
886 | | } |
887 | | |
888 | | // We do realloc() to preserve previously registered queues |
889 | | void *ptmp = SCRealloc(g_nfq_t, (receive_queue_num + num_queues) * sizeof(NFQThreadVars)); |
890 | | if (ptmp == NULL) { |
891 | | SCLogError("Unable to allocate NFQThreadVars"); |
892 | | NFQContextsClean(); |
893 | | exit(EXIT_FAILURE); |
894 | | } |
895 | | |
896 | | g_nfq_t = (NFQThreadVars *)ptmp; |
897 | | |
898 | | ptmp = SCRealloc(g_nfq_q, (receive_queue_num + num_queues) * sizeof(NFQQueueVars)); |
899 | | if (ptmp == NULL) { |
900 | | SCLogError("Unable to allocate NFQQueueVars"); |
901 | | NFQContextsClean(); |
902 | | exit(EXIT_FAILURE); |
903 | | } |
904 | | |
905 | | g_nfq_q = (NFQQueueVars *)ptmp; |
906 | | |
907 | | do { |
908 | | if (NFQRegisterQueue(queue_start) != 0) { |
909 | | return -1; |
910 | | } |
911 | | } while (++queue_start <= queue_end); |
912 | | |
913 | | return 0; |
914 | | } |
915 | | |
916 | | /** |
917 | | * \brief Get a pointer to the NFQ queue at index |
918 | | * |
919 | | * \param number idx of the queue in our array |
920 | | * |
921 | | * \retval ptr pointer to the NFQThreadVars at index |
922 | | * \retval NULL on error |
923 | | */ |
924 | | void *NFQGetQueue(int number) |
925 | | { |
926 | | if (unlikely(number < 0 || number >= receive_queue_num || g_nfq_q == NULL)) |
927 | | return NULL; |
928 | | |
929 | | return (void *)&g_nfq_q[number]; |
930 | | } |
931 | | |
932 | | /** |
933 | | * \brief Get a pointer to the NFQ thread at index |
934 | | * |
935 | | * This function is temporary used as configuration parser. |
936 | | * |
937 | | * \param number idx of the queue in our array |
938 | | * |
939 | | * \retval ptr pointer to the NFQThreadVars at index |
940 | | * \retval NULL on error |
941 | | */ |
942 | | void *NFQGetThread(int number) |
943 | | { |
944 | | if (unlikely(number < 0 || number >= receive_queue_num || g_nfq_t == NULL)) |
945 | | return NULL; |
946 | | |
947 | | return (void *)&g_nfq_t[number]; |
948 | | } |
949 | | |
950 | | /** |
951 | | * \brief NFQ function to get a packet from the kernel |
952 | | * |
953 | | * \note separate functions for Linux and Win32 for readability. |
954 | | */ |
955 | | static void NFQRecvPkt(NFQQueueVars *t, NFQThreadVars *tv) |
956 | | { |
957 | | int ret; |
958 | | int flag = NFQVerdictCacheLen(t) ? MSG_DONTWAIT : 0; |
959 | | |
960 | | int rv = recv(t->fd, tv->data, tv->datalen, flag); |
961 | | if (rv < 0) { |
962 | | if (errno == EINTR || errno == EWOULDBLOCK || errno == EAGAIN) { |
963 | | /* no error on timeout */ |
964 | | if (flag) |
965 | | NFQVerdictCacheFlush(t); |
966 | | |
967 | | /* handle timeout */ |
968 | | TmThreadsCaptureHandleTimeout(tv->tv, NULL); |
969 | | } else { |
970 | | #ifdef COUNTERS |
971 | | NFQMutexLock(t); |
972 | | t->errs++; |
973 | | NFQMutexUnlock(t); |
974 | | #endif /* COUNTERS */ |
975 | | } |
976 | | } else if(rv == 0) { |
977 | | SCLogWarning("recv got returncode 0"); |
978 | | } else { |
979 | | #ifdef DBG_PERF |
980 | | if (rv > t->dbg_maxreadsize) |
981 | | t->dbg_maxreadsize = rv; |
982 | | #endif /* DBG_PERF */ |
983 | | |
984 | | NFQMutexLock(t); |
985 | | if (t->qh != NULL) { |
986 | | ret = nfq_handle_packet(t->h, tv->data, rv); |
987 | | } else { |
988 | | SCLogWarning("NFQ handle has been destroyed"); |
989 | | ret = -1; |
990 | | } |
991 | | NFQMutexUnlock(t); |
992 | | if (ret != 0) { |
993 | | SCLogDebug("nfq_handle_packet error %"PRId32, ret); |
994 | | } |
995 | | } |
996 | | } |
997 | | |
998 | | /** |
999 | | * \brief Main NFQ reading Loop function |
1000 | | */ |
1001 | | TmEcode ReceiveNFQLoop(ThreadVars *tv, void *data, void *slot) |
1002 | | { |
1003 | | SCEnter(); |
1004 | | NFQThreadVars *ntv = (NFQThreadVars *)data; |
1005 | | NFQQueueVars *nq = NFQGetQueue(ntv->nfq_index); |
1006 | | |
1007 | | ntv->slot = ((TmSlot *) slot)->slot_next; |
1008 | | |
1009 | | // Indicate that the thread is actually running its application level code (i.e., it can poll |
1010 | | // packets) |
1011 | | TmThreadsSetFlag(tv, THV_RUNNING); |
1012 | | |
1013 | | while(1) { |
1014 | | if (unlikely(suricata_ctl_flags != 0)) { |
1015 | | NFQDestroyQueue(nq); |
1016 | | break; |
1017 | | } |
1018 | | NFQRecvPkt(nq, ntv); |
1019 | | |
1020 | | StatsSyncCountersIfSignalled(tv); |
1021 | | } |
1022 | | SCReturnInt(TM_ECODE_OK); |
1023 | | } |
1024 | | |
1025 | | /** |
1026 | | * \brief NFQ receive module stats printing function |
1027 | | */ |
1028 | | void ReceiveNFQThreadExitStats(ThreadVars *tv, void *data) |
1029 | | { |
1030 | | NFQThreadVars *ntv = (NFQThreadVars *)data; |
1031 | | NFQQueueVars *nq = NFQGetQueue(ntv->nfq_index); |
1032 | | #ifdef COUNTERS |
1033 | | SCLogNotice("(%s) Treated: Pkts %" PRIu32 ", Bytes %" PRIu64 ", Errors %" PRIu32 "", |
1034 | | tv->name, nq->pkts, nq->bytes, nq->errs); |
1035 | | SCLogNotice("(%s) Verdict: Accepted %"PRIu32", Dropped %"PRIu32", Replaced %"PRIu32, |
1036 | | tv->name, nq->accepted, nq->dropped, nq->replaced); |
1037 | | #endif |
1038 | | } |
1039 | | |
1040 | | static inline uint32_t GetVerdict(const Packet *p) |
1041 | | { |
1042 | | uint32_t verdict = NF_ACCEPT; |
1043 | | |
1044 | | if (PacketCheckAction(p, ACTION_DROP)) { |
1045 | | verdict = NF_DROP; |
1046 | | } else { |
1047 | | switch (nfq_config.mode) { |
1048 | | default: |
1049 | | case NFQ_ACCEPT_MODE: |
1050 | | verdict = NF_ACCEPT; |
1051 | | break; |
1052 | | case NFQ_REPEAT_MODE: |
1053 | | verdict = NF_REPEAT; |
1054 | | break; |
1055 | | case NFQ_ROUTE_MODE: |
1056 | | verdict = ((uint32_t) NF_QUEUE) | nfq_config.next_queue; |
1057 | | break; |
1058 | | } |
1059 | | } |
1060 | | return verdict; |
1061 | | } |
1062 | | |
1063 | | #ifdef COUNTERS |
1064 | | static inline void UpdateCounters(NFQQueueVars *t, const Packet *p) |
1065 | | { |
1066 | | if (PacketCheckAction(p, ACTION_DROP)) { |
1067 | | t->dropped++; |
1068 | | } else { |
1069 | | if (p->flags & PKT_STREAM_MODIFIED) { |
1070 | | t->replaced++; |
1071 | | } |
1072 | | |
1073 | | t->accepted++; |
1074 | | } |
1075 | | } |
1076 | | #endif /* COUNTERS */ |
1077 | | |
1078 | | /** |
1079 | | * \brief NFQ verdict function |
1080 | | */ |
1081 | | TmEcode NFQSetVerdict(Packet *p) |
1082 | | { |
1083 | | int iter = 0; |
1084 | | /* we could also have a direct pointer but we need to have a ref count in this case */ |
1085 | | NFQQueueVars *t = g_nfq_q + p->nfq_v.nfq_index; |
1086 | | |
1087 | | p->nfq_v.verdicted = 1; |
1088 | | |
1089 | | /* can't verdict a "fake" packet */ |
1090 | | if (PKT_IS_PSEUDOPKT(p)) { |
1091 | | return TM_ECODE_OK; |
1092 | | } |
1093 | | |
1094 | | //printf("%p verdicting on queue %" PRIu32 "\n", t, t->queue_num); |
1095 | | NFQMutexLock(t); |
1096 | | |
1097 | | if (t->qh == NULL) { |
1098 | | /* Somebody has started a clean-up, we leave */ |
1099 | | NFQMutexUnlock(t); |
1100 | | return TM_ECODE_OK; |
1101 | | } |
1102 | | |
1103 | | uint32_t verdict = GetVerdict(p); |
1104 | | #ifdef COUNTERS |
1105 | | UpdateCounters(t, p); |
1106 | | #endif /* COUNTERS */ |
1107 | | |
1108 | | int ret = NFQVerdictCacheAdd(t, p, verdict); |
1109 | | if (ret == 0) { |
1110 | | NFQMutexUnlock(t); |
1111 | | return TM_ECODE_OK; |
1112 | | } |
1113 | | |
1114 | | do { |
1115 | | switch (nfq_config.mode) { |
1116 | | default: |
1117 | | case NFQ_ACCEPT_MODE: |
1118 | | case NFQ_ROUTE_MODE: |
1119 | | if (p->flags & PKT_MARK_MODIFIED) { |
1120 | | #ifdef HAVE_NFQ_SET_VERDICT2 |
1121 | | if (p->flags & PKT_STREAM_MODIFIED) { |
1122 | | ret = nfq_set_verdict2(t->qh, p->nfq_v.id, verdict, |
1123 | | p->nfq_v.mark, |
1124 | | GET_PKT_LEN(p), GET_PKT_DATA(p)); |
1125 | | } else { |
1126 | | ret = nfq_set_verdict2(t->qh, p->nfq_v.id, verdict, |
1127 | | p->nfq_v.mark, |
1128 | | 0, NULL); |
1129 | | } |
1130 | | #else /* fall back to old function */ |
1131 | | if (p->flags & PKT_STREAM_MODIFIED) { |
1132 | | ret = nfq_set_verdict_mark(t->qh, p->nfq_v.id, verdict, |
1133 | | htonl(p->nfq_v.mark), |
1134 | | GET_PKT_LEN(p), GET_PKT_DATA(p)); |
1135 | | } else { |
1136 | | ret = nfq_set_verdict_mark(t->qh, p->nfq_v.id, verdict, |
1137 | | htonl(p->nfq_v.mark), |
1138 | | 0, NULL); |
1139 | | } |
1140 | | #endif /* HAVE_NFQ_SET_VERDICT2 */ |
1141 | | } else { |
1142 | | if (p->flags & PKT_STREAM_MODIFIED) { |
1143 | | ret = nfq_set_verdict(t->qh, p->nfq_v.id, verdict, |
1144 | | GET_PKT_LEN(p), GET_PKT_DATA(p)); |
1145 | | } else { |
1146 | | ret = nfq_set_verdict(t->qh, p->nfq_v.id, verdict, 0, NULL); |
1147 | | } |
1148 | | |
1149 | | } |
1150 | | break; |
1151 | | case NFQ_REPEAT_MODE: |
1152 | | #ifdef HAVE_NFQ_SET_VERDICT2 |
1153 | | if (p->flags & PKT_STREAM_MODIFIED) { |
1154 | | ret = nfq_set_verdict2(t->qh, p->nfq_v.id, verdict, |
1155 | | (nfq_config.mark & nfq_config.mask) | (p->nfq_v.mark & ~nfq_config.mask), |
1156 | | GET_PKT_LEN(p), GET_PKT_DATA(p)); |
1157 | | } else { |
1158 | | ret = nfq_set_verdict2(t->qh, p->nfq_v.id, verdict, |
1159 | | (nfq_config.mark & nfq_config.mask) | (p->nfq_v.mark & ~nfq_config.mask), |
1160 | | 0, NULL); |
1161 | | } |
1162 | | #else /* fall back to old function */ |
1163 | | if (p->flags & PKT_STREAM_MODIFIED) { |
1164 | | ret = nfq_set_verdict_mark(t->qh, p->nfq_v.id, verdict, |
1165 | | htonl((nfq_config.mark & nfq_config.mask) | (p->nfq_v.mark & ~nfq_config.mask)), |
1166 | | GET_PKT_LEN(p), GET_PKT_DATA(p)); |
1167 | | } else { |
1168 | | ret = nfq_set_verdict_mark(t->qh, p->nfq_v.id, verdict, |
1169 | | htonl((nfq_config.mark & nfq_config.mask) | (p->nfq_v.mark & ~nfq_config.mask)), |
1170 | | 0, NULL); |
1171 | | } |
1172 | | #endif /* HAVE_NFQ_SET_VERDICT2 */ |
1173 | | break; |
1174 | | } |
1175 | | } while ((ret < 0) && (iter++ < NFQ_VERDICT_RETRY_COUNT)); |
1176 | | |
1177 | | NFQMutexUnlock(t); |
1178 | | |
1179 | | if (ret < 0) { |
1180 | | SCLogWarning("nfq_set_verdict of %p failed %" PRId32 ": %s", p, ret, strerror(errno)); |
1181 | | return TM_ECODE_FAILED; |
1182 | | } |
1183 | | return TM_ECODE_OK; |
1184 | | } |
1185 | | |
1186 | | /** |
1187 | | * \brief NFQ verdict module packet entry function |
1188 | | */ |
1189 | | TmEcode VerdictNFQ(ThreadVars *tv, Packet *p, void *data) |
1190 | | { |
1191 | | /* if this is a tunnel packet we check if we are ready to verdict |
1192 | | * already. */ |
1193 | | if (IS_TUNNEL_PKT(p)) { |
1194 | | SCLogDebug("tunnel pkt: %p/%p %s", p, p->root, p->root ? "upper layer" : "root"); |
1195 | | bool verdict = VerdictTunnelPacket(p); |
1196 | | /* don't verdict if we are not ready */ |
1197 | | if (verdict == true) { |
1198 | | int ret = NFQSetVerdict(p->root ? p->root : p); |
1199 | | if (ret != TM_ECODE_OK) { |
1200 | | return ret; |
1201 | | } |
1202 | | } |
1203 | | } else { |
1204 | | /* no tunnel, verdict normally */ |
1205 | | int ret = NFQSetVerdict(p); |
1206 | | if (ret != TM_ECODE_OK) { |
1207 | | return ret; |
1208 | | } |
1209 | | } |
1210 | | return TM_ECODE_OK; |
1211 | | } |
1212 | | |
1213 | | /** |
1214 | | * \brief Decode a packet coming from NFQ |
1215 | | */ |
1216 | | TmEcode DecodeNFQ(ThreadVars *tv, Packet *p, void *data) |
1217 | | { |
1218 | | |
1219 | | IPV4Hdr *ip4h = (IPV4Hdr *)GET_PKT_DATA(p); |
1220 | | IPV6Hdr *ip6h = (IPV6Hdr *)GET_PKT_DATA(p); |
1221 | | DecodeThreadVars *dtv = (DecodeThreadVars *)data; |
1222 | | |
1223 | | BUG_ON(PKT_IS_PSEUDOPKT(p)); |
1224 | | |
1225 | | DecodeUpdatePacketCounters(tv, dtv, p); |
1226 | | |
1227 | | if (IPV4_GET_RAW_VER(ip4h) == 4) { |
1228 | | if (unlikely(GET_PKT_LEN(p) > USHRT_MAX)) { |
1229 | | return TM_ECODE_FAILED; |
1230 | | } |
1231 | | SCLogDebug("IPv4 packet"); |
1232 | | DecodeIPV4(tv, dtv, p, GET_PKT_DATA(p), GET_PKT_LEN(p)); |
1233 | | } else if (IPV6_GET_RAW_VER(ip6h) == 6) { |
1234 | | if (unlikely(GET_PKT_LEN(p) > USHRT_MAX)) { |
1235 | | return TM_ECODE_FAILED; |
1236 | | } |
1237 | | SCLogDebug("IPv6 packet"); |
1238 | | DecodeIPV6(tv, dtv, p, GET_PKT_DATA(p), GET_PKT_LEN(p)); |
1239 | | } else { |
1240 | | SCLogDebug("packet unsupported by NFQ, first byte: %02x", *GET_PKT_DATA(p)); |
1241 | | } |
1242 | | |
1243 | | PacketDecodeFinalize(tv, dtv, p); |
1244 | | |
1245 | | return TM_ECODE_OK; |
1246 | | } |
1247 | | |
1248 | | /** |
1249 | | * \brief Initialize the NFQ Decode threadvars |
1250 | | */ |
1251 | | TmEcode DecodeNFQThreadInit(ThreadVars *tv, const void *initdata, void **data) |
1252 | | { |
1253 | | DecodeThreadVars *dtv = DecodeThreadVarsAlloc(tv); |
1254 | | if (dtv == NULL) |
1255 | | SCReturnInt(TM_ECODE_FAILED); |
1256 | | |
1257 | | DecodeRegisterPerfCounters(dtv, tv); |
1258 | | |
1259 | | *data = (void *)dtv; |
1260 | | return TM_ECODE_OK; |
1261 | | } |
1262 | | |
1263 | | TmEcode DecodeNFQThreadDeinit(ThreadVars *tv, void *data) |
1264 | | { |
1265 | | if (data != NULL) |
1266 | | DecodeThreadVarsFree(tv, data); |
1267 | | SCReturnInt(TM_ECODE_OK); |
1268 | | } |
1269 | | |
1270 | | /** |
1271 | | * \brief Clean global contexts. Must be called on exit. |
1272 | | */ |
1273 | | void NFQContextsClean(void) |
1274 | | { |
1275 | | if (g_nfq_q != NULL) { |
1276 | | SCFree(g_nfq_q); |
1277 | | g_nfq_q = NULL; |
1278 | | } |
1279 | | |
1280 | | if (g_nfq_t != NULL) { |
1281 | | SCFree(g_nfq_t); |
1282 | | g_nfq_t = NULL; |
1283 | | } |
1284 | | } |
1285 | | #endif /* NFQ */ |