/src/suricata7/src/tmqh-packetpool.c
Line | Count | Source |
1 | | /* Copyright (C) 2007-2022 Open Information Security Foundation |
2 | | * |
3 | | * You can copy, redistribute or modify this Program under the terms of |
4 | | * the GNU General Public License version 2 as published by the Free |
5 | | * Software Foundation. |
6 | | * |
7 | | * This program is distributed in the hope that it will be useful, |
8 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
9 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
10 | | * GNU General Public License for more details. |
11 | | * |
12 | | * You should have received a copy of the GNU General Public License |
13 | | * version 2 along with this program; if not, write to the Free Software |
14 | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
15 | | * 02110-1301, USA. |
16 | | */ |
17 | | |
18 | | /** |
19 | | * \file |
20 | | * |
21 | | * \author Victor Julien <victor@inliniac.net> |
22 | | * |
23 | | * Packetpool queue handlers. Packet pool is implemented as a stack. |
24 | | */ |
25 | | |
26 | | #include "suricata-common.h" |
27 | | #include "tmqh-packetpool.h" |
28 | | #include "tm-queuehandlers.h" |
29 | | #include "tm-threads.h" |
30 | | #include "threads.h" |
31 | | #include "decode.h" |
32 | | #include "tm-modules.h" |
33 | | #include "packet.h" |
34 | | #include "util-profiling.h" |
35 | | #include "util-validate.h" |
36 | | #include "action-globals.h" |
37 | | |
38 | | /* Number of freed packet to save for one pool before freeing them. */ |
39 | | #define MAX_PENDING_RETURN_PACKETS 32 |
40 | | static uint32_t max_pending_return_packets = MAX_PENDING_RETURN_PACKETS; |
41 | | |
42 | | thread_local PktPool thread_pkt_pool; |
43 | | |
44 | | static inline PktPool *GetThreadPacketPool(void) |
45 | 3.82M | { |
46 | 3.82M | return &thread_pkt_pool; |
47 | 3.82M | } |
48 | | |
49 | | /** |
50 | | * \brief TmqhPacketpoolRegister |
51 | | * \initonly |
52 | | */ |
53 | | void TmqhPacketpoolRegister (void) |
54 | 71 | { |
55 | 71 | tmqh_table[TMQH_PACKETPOOL].name = "packetpool"; |
56 | 71 | tmqh_table[TMQH_PACKETPOOL].InHandler = TmqhInputPacketpool; |
57 | 71 | tmqh_table[TMQH_PACKETPOOL].OutHandler = TmqhOutputPacketpool; |
58 | 71 | } |
59 | | |
60 | | static int PacketPoolIsEmpty(PktPool *pool) |
61 | 336k | { |
62 | | /* Check local stack first. */ |
63 | 336k | if (pool->head || pool->return_stack.head) |
64 | 336k | return 0; |
65 | | |
66 | 0 | return 1; |
67 | 336k | } |
68 | | |
69 | | void PacketPoolWait(void) |
70 | 168k | { |
71 | 168k | PktPool *my_pool = GetThreadPacketPool(); |
72 | | |
73 | 168k | if (PacketPoolIsEmpty(my_pool)) { |
74 | 0 | SCMutexLock(&my_pool->return_stack.mutex); |
75 | 0 | SC_ATOMIC_ADD(my_pool->return_stack.sync_now, 1); |
76 | 0 | SCCondWait(&my_pool->return_stack.cond, &my_pool->return_stack.mutex); |
77 | 0 | SCMutexUnlock(&my_pool->return_stack.mutex); |
78 | 0 | } |
79 | | |
80 | 168k | while(PacketPoolIsEmpty(my_pool)) |
81 | 0 | cc_barrier(); |
82 | 168k | } |
83 | | |
84 | | /** \brief a initialized packet |
85 | | * |
86 | | * \warning Use *only* at init, not at packet runtime |
87 | | */ |
88 | | static void PacketPoolStorePacket(Packet *p) |
89 | 768 | { |
90 | 768 | p->pool = GetThreadPacketPool(); |
91 | 768 | p->ReleasePacket = PacketPoolReturnPacket; |
92 | 768 | PacketPoolReturnPacket(p); |
93 | 768 | } |
94 | | |
95 | | static void PacketPoolGetReturnedPackets(PktPool *pool) |
96 | 0 | { |
97 | 0 | SCMutexLock(&pool->return_stack.mutex); |
98 | | /* Move all the packets from the locked return stack to the local stack. */ |
99 | 0 | pool->head = pool->return_stack.head; |
100 | 0 | pool->return_stack.head = NULL; |
101 | 0 | SCMutexUnlock(&pool->return_stack.mutex); |
102 | 0 | } |
103 | | |
104 | | /** \brief Get a new packet from the packet pool |
105 | | * |
106 | | * Only allocates from the thread's local stack, or mallocs new packets. |
107 | | * If the local stack is empty, first move all the return stack packets to |
108 | | * the local stack. |
109 | | * \retval Packet pointer, or NULL on failure. |
110 | | */ |
111 | | Packet *PacketPoolGetPacket(void) |
112 | 771k | { |
113 | 771k | PktPool *pool = GetThreadPacketPool(); |
114 | 771k | #ifdef DEBUG_VALIDATION |
115 | 771k | BUG_ON(pool->initialized == 0); |
116 | 771k | BUG_ON(pool->destroyed == 1); |
117 | 771k | #endif /* DEBUG_VALIDATION */ |
118 | 771k | if (pool->head) { |
119 | | /* Stack is not empty. */ |
120 | 771k | Packet *p = pool->head; |
121 | 771k | pool->head = p->next; |
122 | 771k | p->pool = pool; |
123 | 771k | PacketReinit(p); |
124 | 771k | return p; |
125 | 771k | } |
126 | | |
127 | | /* Local Stack is empty, so check the return stack, which requires |
128 | | * locking. */ |
129 | 0 | PacketPoolGetReturnedPackets(pool); |
130 | | |
131 | | /* Try to allocate again. Need to check for not empty again, since the |
132 | | * return stack might have been empty too. |
133 | | */ |
134 | 0 | if (pool->head) { |
135 | | /* Stack is not empty. */ |
136 | 0 | Packet *p = pool->head; |
137 | 0 | pool->head = p->next; |
138 | 0 | p->pool = pool; |
139 | 0 | PacketReinit(p); |
140 | 0 | return p; |
141 | 0 | } |
142 | | |
143 | | /* Failed to allocate a packet, so return NULL. */ |
144 | | /* Optionally, could allocate a new packet here. */ |
145 | 0 | return NULL; |
146 | 0 | } |
147 | | |
148 | | /** \brief Return packet to Packet pool |
149 | | * |
150 | | */ |
151 | | void PacketPoolReturnPacket(Packet *p) |
152 | 772k | { |
153 | 772k | PktPool *my_pool = GetThreadPacketPool(); |
154 | 772k | PktPool *pool = p->pool; |
155 | 772k | if (pool == NULL) { |
156 | 0 | PacketFree(p); |
157 | 0 | return; |
158 | 0 | } |
159 | | |
160 | 772k | PacketReleaseRefs(p); |
161 | | |
162 | 772k | #ifdef DEBUG_VALIDATION |
163 | 772k | BUG_ON(pool->initialized == 0); |
164 | 772k | BUG_ON(pool->destroyed == 1); |
165 | 772k | BUG_ON(my_pool->initialized == 0); |
166 | 772k | BUG_ON(my_pool->destroyed == 1); |
167 | 772k | #endif /* DEBUG_VALIDATION */ |
168 | | |
169 | 772k | if (pool == my_pool) { |
170 | | /* Push back onto this thread's own stack, so no locking. */ |
171 | 772k | p->next = my_pool->head; |
172 | 772k | my_pool->head = p; |
173 | 772k | } else { |
174 | 0 | PktPool *pending_pool = my_pool->pending_pool; |
175 | 0 | if (pending_pool == NULL || pending_pool == pool) { |
176 | 0 | if (pending_pool == NULL) { |
177 | | /* No pending packet, so store the current packet. */ |
178 | 0 | p->next = NULL; |
179 | 0 | my_pool->pending_pool = pool; |
180 | 0 | my_pool->pending_head = p; |
181 | 0 | my_pool->pending_tail = p; |
182 | 0 | my_pool->pending_count = 1; |
183 | 0 | } else if (pending_pool == pool) { |
184 | | /* Another packet for the pending pool list. */ |
185 | 0 | p->next = my_pool->pending_head; |
186 | 0 | my_pool->pending_head = p; |
187 | 0 | my_pool->pending_count++; |
188 | 0 | } |
189 | |
|
190 | 0 | if (SC_ATOMIC_GET(pool->return_stack.sync_now) || my_pool->pending_count > max_pending_return_packets) { |
191 | | /* Return the entire list of pending packets. */ |
192 | 0 | SCMutexLock(&pool->return_stack.mutex); |
193 | 0 | my_pool->pending_tail->next = pool->return_stack.head; |
194 | 0 | pool->return_stack.head = my_pool->pending_head; |
195 | 0 | SC_ATOMIC_RESET(pool->return_stack.sync_now); |
196 | 0 | SCCondSignal(&pool->return_stack.cond); |
197 | 0 | SCMutexUnlock(&pool->return_stack.mutex); |
198 | | /* Clear the list of pending packets to return. */ |
199 | 0 | my_pool->pending_pool = NULL; |
200 | 0 | my_pool->pending_head = NULL; |
201 | 0 | my_pool->pending_tail = NULL; |
202 | 0 | my_pool->pending_count = 0; |
203 | 0 | } |
204 | 0 | } else { |
205 | | /* Push onto return stack for this pool */ |
206 | 0 | SCMutexLock(&pool->return_stack.mutex); |
207 | 0 | p->next = pool->return_stack.head; |
208 | 0 | pool->return_stack.head = p; |
209 | 0 | SC_ATOMIC_RESET(pool->return_stack.sync_now); |
210 | 0 | SCMutexUnlock(&pool->return_stack.mutex); |
211 | 0 | SCCondSignal(&pool->return_stack.cond); |
212 | 0 | } |
213 | 0 | } |
214 | 772k | } |
215 | | |
216 | | void PacketPoolInitEmpty(void) |
217 | 0 | { |
218 | 0 | PktPool *my_pool = GetThreadPacketPool(); |
219 | |
|
220 | 0 | #ifdef DEBUG_VALIDATION |
221 | 0 | BUG_ON(my_pool->initialized); |
222 | 0 | my_pool->initialized = 1; |
223 | 0 | my_pool->destroyed = 0; |
224 | 0 | #endif /* DEBUG_VALIDATION */ |
225 | |
|
226 | 0 | SCMutexInit(&my_pool->return_stack.mutex, NULL); |
227 | 0 | SCCondInit(&my_pool->return_stack.cond, NULL); |
228 | 0 | SC_ATOMIC_INIT(my_pool->return_stack.sync_now); |
229 | 0 | } |
230 | | |
231 | | void PacketPoolInit(void) |
232 | 6 | { |
233 | 6 | extern uint16_t max_pending_packets; |
234 | | |
235 | 6 | PktPool *my_pool = GetThreadPacketPool(); |
236 | | |
237 | 6 | #ifdef DEBUG_VALIDATION |
238 | 6 | BUG_ON(my_pool->initialized); |
239 | 6 | my_pool->initialized = 1; |
240 | 6 | my_pool->destroyed = 0; |
241 | 6 | #endif /* DEBUG_VALIDATION */ |
242 | | |
243 | 6 | SCMutexInit(&my_pool->return_stack.mutex, NULL); |
244 | 6 | SCCondInit(&my_pool->return_stack.cond, NULL); |
245 | 6 | SC_ATOMIC_INIT(my_pool->return_stack.sync_now); |
246 | | |
247 | | /* pre allocate packets */ |
248 | 6 | SCLogDebug("preallocating packets... packet size %" PRIuMAX "", |
249 | 6 | (uintmax_t)SIZE_OF_PACKET); |
250 | 6 | int i = 0; |
251 | 774 | for (i = 0; i < max_pending_packets; i++) { |
252 | 768 | Packet *p = PacketGetFromAlloc(); |
253 | 768 | if (unlikely(p == NULL)) { |
254 | 0 | FatalError("Fatal error encountered while allocating a packet. Exiting..."); |
255 | 0 | } |
256 | 768 | PacketPoolStorePacket(p); |
257 | 768 | } |
258 | | |
259 | | //SCLogInfo("preallocated %"PRIiMAX" packets. Total memory %"PRIuMAX"", |
260 | | // max_pending_packets, (uintmax_t)(max_pending_packets*SIZE_OF_PACKET)); |
261 | 6 | } |
262 | | |
263 | | void PacketPoolDestroy(void) |
264 | 0 | { |
265 | 0 | Packet *p = NULL; |
266 | 0 | PktPool *my_pool = GetThreadPacketPool(); |
267 | |
|
268 | 0 | #ifdef DEBUG_VALIDATION |
269 | 0 | BUG_ON(my_pool && my_pool->destroyed); |
270 | 0 | #endif /* DEBUG_VALIDATION */ |
271 | | |
272 | 0 | if (my_pool && my_pool->pending_pool != NULL) { |
273 | 0 | p = my_pool->pending_head; |
274 | 0 | while (p) { |
275 | 0 | Packet *next_p = p->next; |
276 | 0 | PacketFree(p); |
277 | 0 | p = next_p; |
278 | 0 | my_pool->pending_count--; |
279 | 0 | } |
280 | 0 | #ifdef DEBUG_VALIDATION |
281 | 0 | BUG_ON(my_pool->pending_count); |
282 | 0 | #endif /* DEBUG_VALIDATION */ |
283 | 0 | my_pool->pending_pool = NULL; |
284 | 0 | my_pool->pending_head = NULL; |
285 | 0 | my_pool->pending_tail = NULL; |
286 | 0 | } |
287 | | |
288 | 0 | while ((p = PacketPoolGetPacket()) != NULL) { |
289 | 0 | PacketFree(p); |
290 | 0 | } |
291 | |
|
292 | 0 | #ifdef DEBUG_VALIDATION |
293 | 0 | my_pool->initialized = 0; |
294 | 0 | my_pool->destroyed = 1; |
295 | 0 | #endif /* DEBUG_VALIDATION */ |
296 | 0 | } |
297 | | |
298 | | Packet *TmqhInputPacketpool(ThreadVars *tv) |
299 | 0 | { |
300 | 0 | return PacketPoolGetPacket(); |
301 | 0 | } |
302 | | |
303 | | void TmqhOutputPacketpool(ThreadVars *t, Packet *p) |
304 | 616k | { |
305 | 616k | bool proot = false; |
306 | | |
307 | 616k | SCEnter(); |
308 | 616k | SCLogDebug("Packet %p, p->root %p, alloced %s", p, p->root, BOOL2STR(p->pool == NULL)); |
309 | | |
310 | 616k | if (IS_TUNNEL_PKT(p)) { |
311 | 21.1k | SCLogDebug("Packet %p is a tunnel packet: %s", |
312 | 21.1k | p,p->root ? "upper layer" : "tunnel root"); |
313 | | |
314 | | /* get a lock to access root packet fields */ |
315 | 21.1k | SCSpinlock *lock = p->root ? &p->root->persistent.tunnel_lock : &p->persistent.tunnel_lock; |
316 | 21.1k | SCSpinLock(lock); |
317 | | |
318 | 21.1k | if (IS_TUNNEL_ROOT_PKT(p)) { |
319 | 9.90k | SCLogDebug("IS_TUNNEL_ROOT_PKT == TRUE"); |
320 | 9.90k | CaptureStatsUpdate(t, p); |
321 | | |
322 | 9.90k | const uint16_t outstanding = TUNNEL_PKT_TPR(p) - TUNNEL_PKT_RTV(p); |
323 | 9.90k | SCLogDebug("root pkt: outstanding %u", outstanding); |
324 | 9.90k | if (outstanding == 0) { |
325 | 9.90k | SCLogDebug("no tunnel packets outstanding, no more tunnel " |
326 | 9.90k | "packet(s) depending on this root"); |
327 | | /* if this packet is the root and there are no |
328 | | * more tunnel packets to consider |
329 | | * |
330 | | * return it to the pool */ |
331 | 9.90k | } else { |
332 | 0 | SCLogDebug("tunnel root Packet %p: outstanding > 0, so " |
333 | 0 | "packets are still depending on this root, setting " |
334 | 0 | "SET_TUNNEL_PKT_VERDICTED", p); |
335 | | /* if this is the root and there are more tunnel |
336 | | * packets, return this to the pool. It's still referenced |
337 | | * by the tunnel packets, and we will return it |
338 | | * when we handle them */ |
339 | 0 | SET_TUNNEL_PKT_VERDICTED(p); |
340 | |
|
341 | 0 | PACKET_PROFILING_END(p); |
342 | 0 | SCSpinUnlock(lock); |
343 | 0 | SCReturn; |
344 | 0 | } |
345 | 11.2k | } else { |
346 | 11.2k | SCLogDebug("NOT IS_TUNNEL_ROOT_PKT, so tunnel pkt"); |
347 | | |
348 | 11.2k | TUNNEL_INCR_PKT_RTV_NOLOCK(p); |
349 | 11.2k | const uint16_t outstanding = TUNNEL_PKT_TPR(p) - TUNNEL_PKT_RTV(p); |
350 | 11.2k | SCLogDebug("tunnel pkt: outstanding %u", outstanding); |
351 | | /* all tunnel packets are processed except us. Root already |
352 | | * processed. So return tunnel pkt and root packet to the |
353 | | * pool. */ |
354 | 11.2k | if (outstanding == 0 && |
355 | 9.90k | p->root && IS_TUNNEL_PKT_VERDICTED(p->root)) |
356 | 0 | { |
357 | 0 | SCLogDebug("root verdicted == true && no outstanding"); |
358 | | |
359 | | /* handle freeing the root as well*/ |
360 | 0 | SCLogDebug("setting proot = 1 for root pkt, p->root %p " |
361 | 0 | "(tunnel packet %p)", p->root, p); |
362 | 0 | proot = true; |
363 | | |
364 | | /* fall through */ |
365 | |
|
366 | 11.2k | } else { |
367 | | /* root not ready yet, or not the last tunnel packet, |
368 | | * so get rid of the tunnel pkt only */ |
369 | | |
370 | 11.2k | SCLogDebug("NOT IS_TUNNEL_PKT_VERDICTED (%s) || " |
371 | 11.2k | "outstanding > 0 (%u)", |
372 | 11.2k | (p->root && IS_TUNNEL_PKT_VERDICTED(p->root)) ? "true" : "false", |
373 | 11.2k | outstanding); |
374 | | |
375 | | /* fall through */ |
376 | 11.2k | } |
377 | 11.2k | } |
378 | 21.1k | SCSpinUnlock(lock); |
379 | | |
380 | 21.1k | SCLogDebug("tunnel stuff done, move on (proot %d)", proot); |
381 | | |
382 | 595k | } else { |
383 | 595k | CaptureStatsUpdate(t, p); |
384 | 595k | } |
385 | | |
386 | 616k | SCLogDebug("[packet %p][%s] %s", p, |
387 | 616k | IS_TUNNEL_PKT(p) ? IS_TUNNEL_ROOT_PKT(p) ? "tunnel::root" : "tunnel::leaf" |
388 | 616k | : "no tunnel", |
389 | 616k | (p->action & ACTION_DROP) ? "DROP" : "no drop"); |
390 | | |
391 | | /* we're done with the tunnel root now as well */ |
392 | 616k | if (proot == true) { |
393 | 0 | SCLogDebug("getting rid of root pkt... alloc'd %s", BOOL2STR(p->root->pool == NULL)); |
394 | |
|
395 | 0 | PacketReleaseRefs(p->root); |
396 | 0 | p->root->ReleasePacket(p->root); |
397 | 0 | p->root = NULL; |
398 | 0 | } |
399 | | |
400 | 616k | PACKET_PROFILING_END(p); |
401 | | |
402 | 616k | PacketReleaseRefs(p); |
403 | 616k | p->ReleasePacket(p); |
404 | | |
405 | 616k | SCReturn; |
406 | 616k | } |
407 | | |
408 | | /** |
409 | | * \brief Release all the packets in the queue back to the packetpool. Mainly |
410 | | * used by threads that have failed, and wants to return the packets back |
411 | | * to the packetpool. |
412 | | * |
413 | | * \param pq Pointer to the packetqueue from which the packets have to be |
414 | | * returned back to the packetpool |
415 | | * |
416 | | * \warning this function assumes that the pq does not use locking |
417 | | */ |
418 | | void TmqhReleasePacketsToPacketPool(PacketQueue *pq) |
419 | 0 | { |
420 | 0 | Packet *p = NULL; |
421 | |
|
422 | 0 | if (pq == NULL) |
423 | 0 | return; |
424 | | |
425 | 0 | while ((p = PacketDequeue(pq)) != NULL) { |
426 | 0 | DEBUG_VALIDATE_BUG_ON(p->flow != NULL); |
427 | 0 | TmqhOutputPacketpool(NULL, p); |
428 | 0 | } |
429 | | |
430 | 0 | return; |
431 | 0 | } |
432 | | |
433 | | /** number of packets to keep reserved when calculating the pending |
434 | | * return packets count. This assumes we need at max 10 packets in one |
435 | | * PacketPoolWaitForN call. The actual number is 9 now, so this has a |
436 | | * bit of margin. */ |
437 | 0 | #define RESERVED_PACKETS 10 |
438 | | |
439 | | /** |
440 | | * \brief Set the max_pending_return_packets value |
441 | | * |
442 | | * Set it to the max pending packets value, divided by the number |
443 | | * of lister threads. Normally, in autofp these are the stream/detect/log |
444 | | * worker threads. |
445 | | * |
446 | | * The max_pending_return_packets value needs to stay below the packet |
447 | | * pool size of the 'producers' (normally pkt capture threads but also |
448 | | * flow timeout injection ) to avoid a deadlock where all the 'workers' |
449 | | * keep packets in their return pools, while the capture thread can't |
450 | | * continue because its pool is empty. |
451 | | */ |
452 | | void PacketPoolPostRunmodes(void) |
453 | 0 | { |
454 | 0 | extern uint16_t max_pending_packets; |
455 | 0 | uint16_t pending_packets = max_pending_packets; |
456 | 0 | if (pending_packets < RESERVED_PACKETS) { |
457 | 0 | FatalError("'max-pending-packets' setting " |
458 | 0 | "must be at least %d", |
459 | 0 | RESERVED_PACKETS); |
460 | 0 | } |
461 | 0 | uint32_t threads = TmThreadCountThreadsByTmmFlags(TM_FLAG_DETECT_TM); |
462 | 0 | if (threads == 0) |
463 | 0 | return; |
464 | | |
465 | 0 | uint32_t packets = (pending_packets / threads) - 1; |
466 | 0 | if (packets < max_pending_return_packets) |
467 | 0 | max_pending_return_packets = packets; |
468 | | |
469 | | /* make sure to have a margin in the return logic */ |
470 | 0 | if (max_pending_return_packets >= RESERVED_PACKETS) |
471 | 0 | max_pending_return_packets -= RESERVED_PACKETS; |
472 | |
|
473 | 0 | SCLogDebug("detect threads %u, max packets %u, max_pending_return_packets %u", |
474 | 0 | threads, packets, max_pending_return_packets); |
475 | 0 | } |