Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Process debugging functions. |
3 | | * |
4 | | * Copyright 2000-2019 Willy Tarreau <willy@haproxy.org>. |
5 | | * |
6 | | * This program is free software; you can redistribute it and/or |
7 | | * modify it under the terms of the GNU General Public License |
8 | | * as published by the Free Software Foundation; either version |
9 | | * 2 of the License, or (at your option) any later version. |
10 | | * |
11 | | */ |
12 | | |
13 | | |
14 | | #include <errno.h> |
15 | | #include <fcntl.h> |
16 | | #include <signal.h> |
17 | | #include <time.h> |
18 | | #include <stdio.h> |
19 | | #include <stdlib.h> |
20 | | #include <syslog.h> |
21 | | #include <sys/stat.h> |
22 | | #include <sys/types.h> |
23 | | #include <sys/wait.h> |
24 | | #include <unistd.h> |
25 | | #ifdef USE_EPOLL |
26 | | #include <sys/epoll.h> |
27 | | #endif |
28 | | |
29 | | #include <haproxy/api.h> |
30 | | #include <haproxy/applet.h> |
31 | | #include <haproxy/buf.h> |
32 | | #include <haproxy/cli.h> |
33 | | #include <haproxy/clock.h> |
34 | | #include <haproxy/debug.h> |
35 | | #include <haproxy/fd.h> |
36 | | #include <haproxy/global.h> |
37 | | #include <haproxy/hlua.h> |
38 | | #include <haproxy/http_ana.h> |
39 | | #include <haproxy/log.h> |
40 | | #include <haproxy/net_helper.h> |
41 | | #include <haproxy/sc_strm.h> |
42 | | #include <haproxy/stconn.h> |
43 | | #include <haproxy/task.h> |
44 | | #include <haproxy/thread.h> |
45 | | #include <haproxy/time.h> |
46 | | #include <haproxy/tools.h> |
47 | | #include <import/ist.h> |
48 | | |
49 | | |
50 | | /* The dump state is made of: |
51 | | * - num_thread on the lowest 15 bits |
52 | | * - a SYNC flag on bit 15 (waiting for sync start) |
53 | | * - number of participating threads on bits 16-30 |
54 | | * Initiating a dump consists in setting it to SYNC and incrementing the |
55 | | * num_thread part when entering the function. The first thread periodically |
56 | | * recounts active threads and compares it to the ready ones, and clears SYNC |
57 | | * and sets the number of participants to the value found, which serves as a |
58 | | * start signal. A thread finished dumping looks up the TID of the next active |
59 | | * thread after it and writes it in the lowest part. If there's none, it sets |
60 | | * the thread counter to the number of participants and resets that part, |
61 | | * which serves as an end-of-dump signal. All threads decrement the num_thread |
62 | | * part. Then all threads wait for the value to reach zero. Only used when |
63 | | * USE_THREAD_DUMP is set. |
64 | | */ |
65 | | #define THREAD_DUMP_TMASK 0x00007FFFU |
66 | | #define THREAD_DUMP_FSYNC 0x00008000U |
67 | | #define THREAD_DUMP_PMASK 0x7FFF0000U |
68 | | |
69 | | /* Points to a copy of the buffer where the dump functions should write, when |
70 | | * non-null. It's only used by debuggers for core dump analysis. |
71 | | */ |
72 | | struct buffer *thread_dump_buffer = NULL; |
73 | | unsigned int debug_commands_issued = 0; |
74 | | |
75 | | /* dumps a backtrace of the current thread that is appended to buffer <buf>. |
76 | | * Lines are prefixed with the string <prefix> which may be empty (used for |
77 | | * indenting). It is recommended to use this at a function's tail so that |
78 | | * the function does not appear in the call stack. The <dump> argument |
79 | | * indicates what dump state to start from, and should usually be zero. It |
80 | | * may be among the following values: |
81 | | * - 0: search usual callers before step 1, or directly jump to 2 |
82 | | * - 1: skip usual callers before step 2 |
83 | | * - 2: dump until polling loop, scheduler, or main() (excluded) |
84 | | * - 3: end |
85 | | * - 4-7: like 0 but stops *after* main. |
86 | | */ |
87 | | void ha_dump_backtrace(struct buffer *buf, const char *prefix, int dump) |
88 | 0 | { |
89 | 0 | struct buffer bak; |
90 | 0 | char pfx2[100]; |
91 | 0 | void *callers[100]; |
92 | 0 | int j, nptrs; |
93 | 0 | const void *addr; |
94 | |
|
95 | 0 | nptrs = my_backtrace(callers, sizeof(callers)/sizeof(*callers)); |
96 | 0 | if (!nptrs) |
97 | 0 | return; |
98 | | |
99 | 0 | if (snprintf(pfx2, sizeof(pfx2), "%s| ", prefix) > sizeof(pfx2)) |
100 | 0 | pfx2[0] = 0; |
101 | | |
102 | | /* The call backtrace_symbols_fd(callers, nptrs, STDOUT_FILENO would |
103 | | * produce similar output to the following: |
104 | | */ |
105 | 0 | chunk_appendf(buf, "%scall trace(%d):\n", prefix, nptrs); |
106 | 0 | for (j = 0; (j < nptrs || (dump & 3) < 2); j++) { |
107 | 0 | if (j == nptrs && !(dump & 3)) { |
108 | | /* we failed to spot the starting point of the |
109 | | * dump, let's start over dumping everything we |
110 | | * have. |
111 | | */ |
112 | 0 | dump += 2; |
113 | 0 | j = 0; |
114 | 0 | } |
115 | 0 | bak = *buf; |
116 | 0 | dump_addr_and_bytes(buf, pfx2, callers[j], 8); |
117 | 0 | addr = resolve_sym_name(buf, ": ", callers[j]); |
118 | 0 | if ((dump & 3) == 0) { |
119 | | /* dump not started, will start *after* ha_thread_dump_one(), |
120 | | * ha_panic and ha_backtrace_to_stderr |
121 | | */ |
122 | 0 | if (addr == ha_panic || |
123 | 0 | addr == ha_backtrace_to_stderr || addr == ha_thread_dump_one) |
124 | 0 | dump++; |
125 | 0 | *buf = bak; |
126 | 0 | continue; |
127 | 0 | } |
128 | | |
129 | 0 | if ((dump & 3) == 1) { |
130 | | /* starting */ |
131 | 0 | if (addr == ha_panic || |
132 | 0 | addr == ha_backtrace_to_stderr || addr == ha_thread_dump_one) { |
133 | 0 | *buf = bak; |
134 | 0 | continue; |
135 | 0 | } |
136 | 0 | dump++; |
137 | 0 | } |
138 | | |
139 | 0 | if ((dump & 3) == 2) { |
140 | | /* still dumping */ |
141 | 0 | if (dump == 6) { |
142 | | /* we only stop *after* main and we must send the LF */ |
143 | 0 | if (addr == main) { |
144 | 0 | j = nptrs; |
145 | 0 | dump++; |
146 | 0 | } |
147 | 0 | } |
148 | 0 | else if (addr == run_poll_loop || addr == main || addr == run_tasks_from_lists) { |
149 | 0 | dump++; |
150 | 0 | *buf = bak; |
151 | 0 | break; |
152 | 0 | } |
153 | 0 | } |
154 | | /* OK, line dumped */ |
155 | 0 | chunk_appendf(buf, "\n"); |
156 | 0 | } |
157 | 0 | } |
158 | | |
159 | | /* dump a backtrace of current thread's stack to stderr. */ |
160 | | void ha_backtrace_to_stderr(void) |
161 | 0 | { |
162 | 0 | char area[2048]; |
163 | 0 | struct buffer b = b_make(area, sizeof(area), 0, 0); |
164 | |
|
165 | 0 | ha_dump_backtrace(&b, " ", 4); |
166 | 0 | if (b.data) |
167 | 0 | DISGUISE(write(2, b.area, b.data)); |
168 | 0 | } |
169 | | |
170 | | /* Dumps to the thread's buffer some known information for the desired thread, |
171 | | * and optionally extra info when it's safe to do so (current thread or |
172 | | * isolated). The dump will be appended to the buffer, so the caller is |
173 | | * responsible for preliminary initializing it. The <from_signal> argument will |
174 | | * indicate if the function is called from the debug signal handler, indicating |
175 | | * the thread was dumped upon request from another one, otherwise if the thread |
176 | | * it the current one, a star ('*') will be displayed in front of the thread to |
177 | | * indicate the requesting one. Any stuck thread is also prefixed with a '>'. |
178 | | * The caller is responsible for atomically setting up the thread's dump buffer |
179 | | * to point to a valid buffer with enough room. Output will be truncated if it |
180 | | * does not fit. When the dump is complete, the dump buffer will be switched to |
181 | | * (void*)0x1 that the caller must turn to 0x0 once the contents are collected. |
182 | | */ |
183 | | void ha_thread_dump_one(int thr, int from_signal) |
184 | 0 | { |
185 | 0 | struct buffer *buf = HA_ATOMIC_LOAD(&ha_thread_ctx[thr].thread_dump_buffer); |
186 | 0 | unsigned long __maybe_unused thr_bit = ha_thread_info[thr].ltid_bit; |
187 | 0 | int __maybe_unused tgrp = ha_thread_info[thr].tgid; |
188 | 0 | unsigned long long p = ha_thread_ctx[thr].prev_cpu_time; |
189 | 0 | unsigned long long n = now_cpu_time_thread(thr); |
190 | 0 | int stuck = !!(ha_thread_ctx[thr].flags & TH_FL_STUCK); |
191 | |
|
192 | 0 | chunk_appendf(buf, |
193 | 0 | "%c%cThread %-2u: id=0x%llx act=%d glob=%d wq=%d rq=%d tl=%d tlsz=%d rqsz=%d\n" |
194 | 0 | " %2u/%-2u stuck=%d prof=%d", |
195 | 0 | (thr == tid && !from_signal) ? '*' : ' ', stuck ? '>' : ' ', thr + 1, |
196 | 0 | ha_get_pthread_id(thr), |
197 | 0 | thread_has_tasks(), |
198 | 0 | !eb_is_empty(&ha_thread_ctx[thr].rqueue_shared), |
199 | 0 | !eb_is_empty(&ha_thread_ctx[thr].timers), |
200 | 0 | !eb_is_empty(&ha_thread_ctx[thr].rqueue), |
201 | 0 | !(LIST_ISEMPTY(&ha_thread_ctx[thr].tasklets[TL_URGENT]) && |
202 | 0 | LIST_ISEMPTY(&ha_thread_ctx[thr].tasklets[TL_NORMAL]) && |
203 | 0 | LIST_ISEMPTY(&ha_thread_ctx[thr].tasklets[TL_BULK]) && |
204 | 0 | MT_LIST_ISEMPTY(&ha_thread_ctx[thr].shared_tasklet_list)), |
205 | 0 | ha_thread_ctx[thr].tasks_in_list, |
206 | 0 | ha_thread_ctx[thr].rq_total, |
207 | 0 | ha_thread_info[thr].tgid, ha_thread_info[thr].ltid + 1, |
208 | 0 | stuck, |
209 | 0 | !!(ha_thread_ctx[thr].flags & TH_FL_TASK_PROFILING)); |
210 | |
|
211 | | #if defined(USE_THREAD) |
212 | | chunk_appendf(buf, |
213 | | " harmless=%d isolated=%d", |
214 | | !!(_HA_ATOMIC_LOAD(&ha_tgroup_ctx[tgrp-1].threads_harmless) & thr_bit), |
215 | | isolated_thread == thr); |
216 | | #endif |
217 | |
|
218 | 0 | chunk_appendf(buf, "\n"); |
219 | 0 | chunk_appendf(buf, " cpu_ns: poll=%llu now=%llu diff=%llu\n", p, n, n-p); |
220 | | |
221 | | /* this is the end of what we can dump from outside the current thread */ |
222 | |
|
223 | 0 | if (thr != tid && !thread_isolated()) |
224 | 0 | goto leave; |
225 | | |
226 | 0 | chunk_appendf(buf, " curr_task="); |
227 | 0 | ha_task_dump(buf, th_ctx->current, " "); |
228 | |
|
229 | 0 | if (stuck && thr == tid) { |
230 | | /* We only emit the backtrace for stuck threads in order not to |
231 | | * waste precious output buffer space with non-interesting data. |
232 | | * Please leave this as the last instruction in this function |
233 | | * so that the compiler uses tail merging and the current |
234 | | * function does not appear in the stack. |
235 | | */ |
236 | 0 | ha_dump_backtrace(buf, " ", 0); |
237 | 0 | } |
238 | 0 | leave: |
239 | | /* end of dump, setting the buffer to 0x1 will tell the caller we're done */ |
240 | 0 | HA_ATOMIC_STORE(&ha_thread_ctx[thr].thread_dump_buffer, (void*)0x1UL); |
241 | 0 | } |
242 | | |
243 | | /* Triggers a thread dump from thread <thr>, either directly if it's the |
244 | | * current thread or if thread dump signals are not implemented, or by sending |
245 | | * a signal if it's a remote one and the feature is supported. The buffer <buf> |
246 | | * will get the dump appended, and the caller is responsible for making sure |
247 | | * there is enough room otherwise some contents will be truncated. |
248 | | */ |
249 | | void ha_thread_dump(struct buffer *buf, int thr) |
250 | 0 | { |
251 | 0 | struct buffer *old = NULL; |
252 | | |
253 | | /* try to impose our dump buffer and to reserve the target thread's |
254 | | * next dump for us. |
255 | | */ |
256 | 0 | do { |
257 | 0 | if (old) |
258 | 0 | ha_thread_relax(); |
259 | 0 | old = NULL; |
260 | 0 | } while (!HA_ATOMIC_CAS(&ha_thread_ctx[thr].thread_dump_buffer, &old, buf)); |
261 | |
|
262 | | #ifdef USE_THREAD_DUMP |
263 | | /* asking the remote thread to dump itself allows to get more details |
264 | | * including a backtrace. |
265 | | */ |
266 | | if (thr != tid) |
267 | | ha_tkill(thr, DEBUGSIG); |
268 | | else |
269 | | #endif |
270 | 0 | ha_thread_dump_one(thr, thr != tid); |
271 | | |
272 | | /* now wait for the dump to be done, and release it */ |
273 | 0 | do { |
274 | 0 | if (old) |
275 | 0 | ha_thread_relax(); |
276 | 0 | old = (void*)0x01; |
277 | 0 | } while (!HA_ATOMIC_CAS(&ha_thread_ctx[thr].thread_dump_buffer, &old, 0)); |
278 | 0 | } |
279 | | |
280 | | /* dumps into the buffer some information related to task <task> (which may |
281 | | * either be a task or a tasklet, and prepend each line except the first one |
282 | | * with <pfx>. The buffer is only appended and the first output starts by the |
283 | | * pointer itself. The caller is responsible for making sure the task is not |
284 | | * going to vanish during the dump. |
285 | | */ |
286 | | void ha_task_dump(struct buffer *buf, const struct task *task, const char *pfx) |
287 | 0 | { |
288 | 0 | const struct stream *s = NULL; |
289 | 0 | const struct appctx __maybe_unused *appctx = NULL; |
290 | 0 | struct hlua __maybe_unused *hlua = NULL; |
291 | 0 | const struct stconn *sc; |
292 | |
|
293 | 0 | if (!task) { |
294 | 0 | chunk_appendf(buf, "0\n"); |
295 | 0 | return; |
296 | 0 | } |
297 | | |
298 | 0 | if (TASK_IS_TASKLET(task)) |
299 | 0 | chunk_appendf(buf, |
300 | 0 | "%p (tasklet) calls=%u\n", |
301 | 0 | task, |
302 | 0 | task->calls); |
303 | 0 | else |
304 | 0 | chunk_appendf(buf, |
305 | 0 | "%p (task) calls=%u last=%llu%s\n", |
306 | 0 | task, |
307 | 0 | task->calls, |
308 | 0 | task->wake_date ? (unsigned long long)(now_mono_time() - task->wake_date) : 0, |
309 | 0 | task->wake_date ? " ns ago" : ""); |
310 | |
|
311 | 0 | chunk_appendf(buf, "%s fct=%p(", pfx, task->process); |
312 | 0 | resolve_sym_name(buf, NULL, task->process); |
313 | 0 | chunk_appendf(buf,") ctx=%p", task->context); |
314 | |
|
315 | 0 | if (task->process == task_run_applet && (appctx = task->context)) |
316 | 0 | chunk_appendf(buf, "(%s)\n", appctx->applet->name); |
317 | 0 | else |
318 | 0 | chunk_appendf(buf, "\n"); |
319 | |
|
320 | 0 | if (task->process == process_stream && task->context) |
321 | 0 | s = (struct stream *)task->context; |
322 | 0 | else if (task->process == task_run_applet && task->context && (sc = appctx_sc((struct appctx *)task->context))) |
323 | 0 | s = sc_strm(sc); |
324 | 0 | else if (task->process == sc_conn_io_cb && task->context) |
325 | 0 | s = sc_strm(((struct stconn *)task->context)); |
326 | |
|
327 | 0 | if (s) |
328 | 0 | stream_dump(buf, s, pfx, '\n'); |
329 | |
|
330 | | #ifdef USE_LUA |
331 | | hlua = NULL; |
332 | | if (s && (hlua = s->hlua)) { |
333 | | chunk_appendf(buf, "%sCurrent executing Lua from a stream analyser -- ", pfx); |
334 | | } |
335 | | else if (task->process == hlua_process_task && (hlua = task->context)) { |
336 | | chunk_appendf(buf, "%sCurrent executing a Lua task -- ", pfx); |
337 | | } |
338 | | else if (task->process == task_run_applet && (appctx = task->context) && |
339 | | (appctx->applet->fct == hlua_applet_tcp_fct)) { |
340 | | chunk_appendf(buf, "%sCurrent executing a Lua TCP service -- ", pfx); |
341 | | } |
342 | | else if (task->process == task_run_applet && (appctx = task->context) && |
343 | | (appctx->applet->fct == hlua_applet_http_fct)) { |
344 | | chunk_appendf(buf, "%sCurrent executing a Lua HTTP service -- ", pfx); |
345 | | } |
346 | | |
347 | | if (hlua && hlua->T) { |
348 | | chunk_appendf(buf, "stack traceback:\n "); |
349 | | append_prefixed_str(buf, hlua_traceback(hlua->T, "\n "), pfx, '\n', 0); |
350 | | } |
351 | | |
352 | | /* we may need to terminate the current line */ |
353 | | if (*b_peek(buf, b_data(buf)-1) != '\n') |
354 | | b_putchr(buf, '\n'); |
355 | | #endif |
356 | 0 | } |
357 | | |
358 | | /* This function dumps all profiling settings. It returns 0 if the output |
359 | | * buffer is full and it needs to be called again, otherwise non-zero. |
360 | | */ |
361 | | static int cli_io_handler_show_threads(struct appctx *appctx) |
362 | 0 | { |
363 | 0 | struct stconn *sc = appctx_sc(appctx); |
364 | 0 | int thr; |
365 | | |
366 | | /* FIXME: Don't watch the other side !*/ |
367 | 0 | if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE)) |
368 | 0 | return 1; |
369 | | |
370 | 0 | if (appctx->st0) |
371 | 0 | thr = appctx->st1; |
372 | 0 | else |
373 | 0 | thr = 0; |
374 | |
|
375 | 0 | do { |
376 | 0 | chunk_reset(&trash); |
377 | 0 | ha_thread_dump(&trash, thr); |
378 | |
|
379 | 0 | if (applet_putchk(appctx, &trash) == -1) { |
380 | | /* failed, try again */ |
381 | 0 | appctx->st1 = thr; |
382 | 0 | return 0; |
383 | 0 | } |
384 | 0 | thr++; |
385 | 0 | } while (thr < global.nbthread); |
386 | | |
387 | 0 | return 1; |
388 | 0 | } |
389 | | |
390 | | #if defined(HA_HAVE_DUMP_LIBS) |
391 | | /* parse a "show libs" command. It returns 1 if it emits anything otherwise zero. */ |
392 | | static int debug_parse_cli_show_libs(char **args, char *payload, struct appctx *appctx, void *private) |
393 | | { |
394 | | if (!cli_has_level(appctx, ACCESS_LVL_OPER)) |
395 | | return 1; |
396 | | |
397 | | chunk_reset(&trash); |
398 | | if (dump_libs(&trash, 1)) |
399 | | return cli_msg(appctx, LOG_INFO, trash.area); |
400 | | else |
401 | | return 0; |
402 | | } |
403 | | #endif |
404 | | |
405 | | /* Dumps a state of all threads into the trash and on fd #2, then aborts. |
406 | | * A copy will be put into a trash chunk that's assigned to thread_dump_buffer |
407 | | * so that the debugger can easily find it. This buffer might be truncated if |
408 | | * too many threads are being dumped, but at least we'll dump them all on stderr. |
409 | | * If thread_dump_buffer is set, it means that a panic has already begun. |
410 | | */ |
411 | | void ha_panic() |
412 | 0 | { |
413 | 0 | struct buffer *old; |
414 | 0 | unsigned int thr; |
415 | |
|
416 | 0 | old = NULL; |
417 | 0 | if (!HA_ATOMIC_CAS(&thread_dump_buffer, &old, get_trash_chunk())) { |
418 | | /* a panic dump is already in progress, let's not disturb it, |
419 | | * we'll be called via signal DEBUGSIG. By returning we may be |
420 | | * able to leave a current signal handler (e.g. WDT) so that |
421 | | * this will ensure more reliable signal delivery. |
422 | | */ |
423 | 0 | return; |
424 | 0 | } |
425 | | |
426 | 0 | chunk_reset(&trash); |
427 | 0 | chunk_appendf(&trash, "Thread %u is about to kill the process.\n", tid + 1); |
428 | |
|
429 | 0 | for (thr = 0; thr < global.nbthread; thr++) { |
430 | 0 | ha_thread_dump(&trash, thr); |
431 | 0 | DISGUISE(write(2, trash.area, trash.data)); |
432 | 0 | b_force_xfer(thread_dump_buffer, &trash, b_room(thread_dump_buffer)); |
433 | 0 | chunk_reset(&trash); |
434 | 0 | } |
435 | |
|
436 | 0 | for (;;) |
437 | 0 | abort(); |
438 | 0 | } |
439 | | |
440 | | /* Complain with message <msg> on stderr. If <counter> is not NULL, it is |
441 | | * atomically incremented, and the message is only printed when the counter |
442 | | * was zero, so that the message is only printed once. <taint> is only checked |
443 | | * on bit 1, and will taint the process either for a bug (2) or warn (0). |
444 | | */ |
445 | | void complain(int *counter, const char *msg, int taint) |
446 | 0 | { |
447 | 0 | if (counter && _HA_ATOMIC_FETCH_ADD(counter, 1)) |
448 | 0 | return; |
449 | 0 | DISGUISE(write(2, msg, strlen(msg))); |
450 | 0 | if (taint & 2) |
451 | 0 | mark_tainted(TAINTED_BUG); |
452 | 0 | else |
453 | 0 | mark_tainted(TAINTED_WARN); |
454 | 0 | } |
455 | | |
456 | | /* parse a "debug dev exit" command. It always returns 1, though it should never return. */ |
457 | | static int debug_parse_cli_exit(char **args, char *payload, struct appctx *appctx, void *private) |
458 | 0 | { |
459 | 0 | int code = atoi(args[3]); |
460 | |
|
461 | 0 | if (!cli_has_level(appctx, ACCESS_LVL_ADMIN)) |
462 | 0 | return 1; |
463 | | |
464 | 0 | _HA_ATOMIC_INC(&debug_commands_issued); |
465 | 0 | exit(code); |
466 | 0 | return 1; |
467 | 0 | } |
468 | | |
469 | | /* parse a "debug dev bug" command. It always returns 1, though it should never return. |
470 | | * Note: we make sure not to make the function static so that it appears in the trace. |
471 | | */ |
472 | | int debug_parse_cli_bug(char **args, char *payload, struct appctx *appctx, void *private) |
473 | 0 | { |
474 | 0 | if (!cli_has_level(appctx, ACCESS_LVL_ADMIN)) |
475 | 0 | return 1; |
476 | | |
477 | 0 | _HA_ATOMIC_INC(&debug_commands_issued); |
478 | 0 | BUG_ON(one > zero); |
479 | 0 | return 1; |
480 | 0 | } |
481 | | |
482 | | /* parse a "debug dev warn" command. It always returns 1. |
483 | | * Note: we make sure not to make the function static so that it appears in the trace. |
484 | | */ |
485 | | int debug_parse_cli_warn(char **args, char *payload, struct appctx *appctx, void *private) |
486 | 0 | { |
487 | 0 | if (!cli_has_level(appctx, ACCESS_LVL_ADMIN)) |
488 | 0 | return 1; |
489 | | |
490 | 0 | _HA_ATOMIC_INC(&debug_commands_issued); |
491 | 0 | WARN_ON(one > zero); |
492 | 0 | return 1; |
493 | 0 | } |
494 | | |
495 | | /* parse a "debug dev check" command. It always returns 1. |
496 | | * Note: we make sure not to make the function static so that it appears in the trace. |
497 | | */ |
498 | | int debug_parse_cli_check(char **args, char *payload, struct appctx *appctx, void *private) |
499 | 0 | { |
500 | 0 | if (!cli_has_level(appctx, ACCESS_LVL_ADMIN)) |
501 | 0 | return 1; |
502 | | |
503 | 0 | _HA_ATOMIC_INC(&debug_commands_issued); |
504 | 0 | CHECK_IF(one > zero); |
505 | 0 | return 1; |
506 | 0 | } |
507 | | |
508 | | /* parse a "debug dev close" command. It always returns 1. */ |
509 | | static int debug_parse_cli_close(char **args, char *payload, struct appctx *appctx, void *private) |
510 | 0 | { |
511 | 0 | int fd; |
512 | |
|
513 | 0 | if (!cli_has_level(appctx, ACCESS_LVL_ADMIN)) |
514 | 0 | return 1; |
515 | | |
516 | 0 | if (!*args[3]) |
517 | 0 | return cli_err(appctx, "Missing file descriptor number.\n"); |
518 | | |
519 | 0 | fd = atoi(args[3]); |
520 | 0 | if (fd < 0 || fd >= global.maxsock) |
521 | 0 | return cli_err(appctx, "File descriptor out of range.\n"); |
522 | | |
523 | 0 | if (!fdtab[fd].owner) |
524 | 0 | return cli_msg(appctx, LOG_INFO, "File descriptor was already closed.\n"); |
525 | | |
526 | 0 | _HA_ATOMIC_INC(&debug_commands_issued); |
527 | 0 | fd_delete(fd); |
528 | 0 | return 1; |
529 | 0 | } |
530 | | |
531 | | /* this is meant to cause a deadlock when more than one task is running it or when run twice */ |
532 | | static struct task *debug_run_cli_deadlock(struct task *task, void *ctx, unsigned int state) |
533 | 0 | { |
534 | 0 | static HA_SPINLOCK_T lock __maybe_unused; |
535 | |
|
536 | 0 | HA_SPIN_LOCK(OTHER_LOCK, &lock); |
537 | 0 | return NULL; |
538 | 0 | } |
539 | | |
540 | | /* parse a "debug dev deadlock" command. It always returns 1. */ |
541 | | static int debug_parse_cli_deadlock(char **args, char *payload, struct appctx *appctx, void *private) |
542 | 0 | { |
543 | 0 | int tasks; |
544 | |
|
545 | 0 | if (!cli_has_level(appctx, ACCESS_LVL_ADMIN)) |
546 | 0 | return 1; |
547 | | |
548 | 0 | _HA_ATOMIC_INC(&debug_commands_issued); |
549 | 0 | for (tasks = atoi(args[3]); tasks > 0; tasks--) { |
550 | 0 | struct task *t = task_new_on(tasks % global.nbthread); |
551 | 0 | if (!t) |
552 | 0 | continue; |
553 | 0 | t->process = debug_run_cli_deadlock; |
554 | 0 | t->context = NULL; |
555 | 0 | task_wakeup(t, TASK_WOKEN_INIT); |
556 | 0 | } |
557 | |
|
558 | 0 | return 1; |
559 | 0 | } |
560 | | |
561 | | /* parse a "debug dev delay" command. It always returns 1. */ |
562 | | static int debug_parse_cli_delay(char **args, char *payload, struct appctx *appctx, void *private) |
563 | 0 | { |
564 | 0 | int delay = atoi(args[3]); |
565 | |
|
566 | 0 | if (!cli_has_level(appctx, ACCESS_LVL_ADMIN)) |
567 | 0 | return 1; |
568 | | |
569 | 0 | _HA_ATOMIC_INC(&debug_commands_issued); |
570 | 0 | usleep((long)delay * 1000); |
571 | 0 | return 1; |
572 | 0 | } |
573 | | |
574 | | /* parse a "debug dev log" command. It always returns 1. */ |
575 | | static int debug_parse_cli_log(char **args, char *payload, struct appctx *appctx, void *private) |
576 | 0 | { |
577 | 0 | int arg; |
578 | |
|
579 | 0 | if (!cli_has_level(appctx, ACCESS_LVL_ADMIN)) |
580 | 0 | return 1; |
581 | | |
582 | 0 | _HA_ATOMIC_INC(&debug_commands_issued); |
583 | 0 | chunk_reset(&trash); |
584 | 0 | for (arg = 3; *args[arg]; arg++) { |
585 | 0 | if (arg > 3) |
586 | 0 | chunk_strcat(&trash, " "); |
587 | 0 | chunk_strcat(&trash, args[arg]); |
588 | 0 | } |
589 | |
|
590 | 0 | send_log(NULL, LOG_INFO, "%s\n", trash.area); |
591 | 0 | return 1; |
592 | 0 | } |
593 | | |
594 | | /* parse a "debug dev loop" command. It always returns 1. */ |
595 | | static int debug_parse_cli_loop(char **args, char *payload, struct appctx *appctx, void *private) |
596 | 0 | { |
597 | 0 | struct timeval deadline, curr; |
598 | 0 | int loop = atoi(args[3]); |
599 | 0 | int isolate; |
600 | |
|
601 | 0 | if (!cli_has_level(appctx, ACCESS_LVL_ADMIN)) |
602 | 0 | return 1; |
603 | | |
604 | 0 | isolate = strcmp(args[4], "isolated") == 0; |
605 | |
|
606 | 0 | _HA_ATOMIC_INC(&debug_commands_issued); |
607 | 0 | gettimeofday(&curr, NULL); |
608 | 0 | tv_ms_add(&deadline, &curr, loop); |
609 | |
|
610 | 0 | if (isolate) |
611 | 0 | thread_isolate(); |
612 | |
|
613 | 0 | while (tv_ms_cmp(&curr, &deadline) < 0) |
614 | 0 | gettimeofday(&curr, NULL); |
615 | |
|
616 | 0 | if (isolate) |
617 | 0 | thread_release(); |
618 | |
|
619 | 0 | return 1; |
620 | 0 | } |
621 | | |
622 | | /* parse a "debug dev panic" command. It always returns 1, though it should never return. */ |
623 | | static int debug_parse_cli_panic(char **args, char *payload, struct appctx *appctx, void *private) |
624 | 0 | { |
625 | 0 | if (!cli_has_level(appctx, ACCESS_LVL_ADMIN)) |
626 | 0 | return 1; |
627 | | |
628 | 0 | _HA_ATOMIC_INC(&debug_commands_issued); |
629 | 0 | ha_panic(); |
630 | 0 | return 1; |
631 | 0 | } |
632 | | |
633 | | /* parse a "debug dev exec" command. It always returns 1. */ |
634 | | #if defined(DEBUG_DEV) |
635 | | static int debug_parse_cli_exec(char **args, char *payload, struct appctx *appctx, void *private) |
636 | | { |
637 | | int pipefd[2]; |
638 | | int arg; |
639 | | int pid; |
640 | | |
641 | | if (!cli_has_level(appctx, ACCESS_LVL_ADMIN)) |
642 | | return 1; |
643 | | |
644 | | _HA_ATOMIC_INC(&debug_commands_issued); |
645 | | chunk_reset(&trash); |
646 | | for (arg = 3; *args[arg]; arg++) { |
647 | | if (arg > 3) |
648 | | chunk_strcat(&trash, " "); |
649 | | chunk_strcat(&trash, args[arg]); |
650 | | } |
651 | | |
652 | | thread_isolate(); |
653 | | if (pipe(pipefd) < 0) |
654 | | goto fail_pipe; |
655 | | |
656 | | if (fd_set_cloexec(pipefd[0]) == -1) |
657 | | goto fail_fcntl; |
658 | | |
659 | | if (fd_set_cloexec(pipefd[1]) == -1) |
660 | | goto fail_fcntl; |
661 | | |
662 | | pid = fork(); |
663 | | |
664 | | if (pid < 0) |
665 | | goto fail_fork; |
666 | | else if (pid == 0) { |
667 | | /* child */ |
668 | | char *cmd[4] = { "/bin/sh", "-c", 0, 0 }; |
669 | | |
670 | | close(0); |
671 | | dup2(pipefd[1], 1); |
672 | | dup2(pipefd[1], 2); |
673 | | |
674 | | cmd[2] = trash.area; |
675 | | execvp(cmd[0], cmd); |
676 | | printf("execvp() failed\n"); |
677 | | exit(1); |
678 | | } |
679 | | |
680 | | /* parent */ |
681 | | thread_release(); |
682 | | close(pipefd[1]); |
683 | | chunk_reset(&trash); |
684 | | while (1) { |
685 | | size_t ret = read(pipefd[0], trash.area + trash.data, trash.size - 20 - trash.data); |
686 | | if (ret <= 0) |
687 | | break; |
688 | | trash.data += ret; |
689 | | if (trash.data + 20 == trash.size) { |
690 | | chunk_strcat(&trash, "\n[[[TRUNCATED]]]\n"); |
691 | | break; |
692 | | } |
693 | | } |
694 | | close(pipefd[0]); |
695 | | waitpid(pid, NULL, WNOHANG); |
696 | | trash.area[trash.data] = 0; |
697 | | return cli_msg(appctx, LOG_INFO, trash.area); |
698 | | |
699 | | fail_fork: |
700 | | fail_fcntl: |
701 | | close(pipefd[0]); |
702 | | close(pipefd[1]); |
703 | | fail_pipe: |
704 | | thread_release(); |
705 | | return cli_err(appctx, "Failed to execute command.\n"); |
706 | | } |
707 | | |
708 | | /* handles SIGRTMAX to inject random delays on the receiving thread in order |
709 | | * to try to increase the likelihood to reproduce inter-thread races. The |
710 | | * signal is periodically sent by a task initiated by "debug dev delay-inj". |
711 | | */ |
712 | | void debug_delay_inj_sighandler(int sig, siginfo_t *si, void *arg) |
713 | | { |
714 | | volatile int i = statistical_prng_range(10000); |
715 | | |
716 | | while (i--) |
717 | | __ha_cpu_relax(); |
718 | | } |
719 | | #endif |
720 | | |
721 | | /* parse a "debug dev hex" command. It always returns 1. */ |
722 | | static int debug_parse_cli_hex(char **args, char *payload, struct appctx *appctx, void *private) |
723 | 0 | { |
724 | 0 | unsigned long start, len; |
725 | |
|
726 | 0 | if (!cli_has_level(appctx, ACCESS_LVL_ADMIN)) |
727 | 0 | return 1; |
728 | | |
729 | 0 | if (!*args[3]) |
730 | 0 | return cli_err(appctx, "Missing memory address to dump from.\n"); |
731 | | |
732 | 0 | start = strtoul(args[3], NULL, 0); |
733 | 0 | if (!start) |
734 | 0 | return cli_err(appctx, "Will not dump from NULL address.\n"); |
735 | | |
736 | 0 | _HA_ATOMIC_INC(&debug_commands_issued); |
737 | | |
738 | | /* by default, dump ~128 till next block of 16 */ |
739 | 0 | len = strtoul(args[4], NULL, 0); |
740 | 0 | if (!len) |
741 | 0 | len = ((start + 128) & -16) - start; |
742 | |
|
743 | 0 | chunk_reset(&trash); |
744 | 0 | dump_hex(&trash, " ", (const void *)start, len, 1); |
745 | 0 | trash.area[trash.data] = 0; |
746 | 0 | return cli_msg(appctx, LOG_INFO, trash.area); |
747 | 0 | } |
748 | | |
749 | | /* parse a "debug dev sym <addr>" command. It always returns 1. */ |
750 | | static int debug_parse_cli_sym(char **args, char *payload, struct appctx *appctx, void *private) |
751 | 0 | { |
752 | 0 | unsigned long addr; |
753 | |
|
754 | 0 | if (!cli_has_level(appctx, ACCESS_LVL_ADMIN)) |
755 | 0 | return 1; |
756 | | |
757 | 0 | if (!*args[3]) |
758 | 0 | return cli_err(appctx, "Missing memory address to be resolved.\n"); |
759 | | |
760 | 0 | _HA_ATOMIC_INC(&debug_commands_issued); |
761 | |
|
762 | 0 | addr = strtoul(args[3], NULL, 0); |
763 | 0 | chunk_printf(&trash, "%#lx resolves to ", addr); |
764 | 0 | resolve_sym_name(&trash, NULL, (const void *)addr); |
765 | 0 | chunk_appendf(&trash, "\n"); |
766 | |
|
767 | 0 | return cli_msg(appctx, LOG_INFO, trash.area); |
768 | 0 | } |
769 | | |
770 | | /* parse a "debug dev tkill" command. It always returns 1. */ |
771 | | static int debug_parse_cli_tkill(char **args, char *payload, struct appctx *appctx, void *private) |
772 | 0 | { |
773 | 0 | int thr = 0; |
774 | 0 | int sig = SIGABRT; |
775 | |
|
776 | 0 | if (!cli_has_level(appctx, ACCESS_LVL_ADMIN)) |
777 | 0 | return 1; |
778 | | |
779 | 0 | if (*args[3]) |
780 | 0 | thr = atoi(args[3]); |
781 | |
|
782 | 0 | if (thr < 0 || thr > global.nbthread) |
783 | 0 | return cli_err(appctx, "Thread number out of range (use 0 for current).\n"); |
784 | | |
785 | 0 | if (*args[4]) |
786 | 0 | sig = atoi(args[4]); |
787 | |
|
788 | 0 | _HA_ATOMIC_INC(&debug_commands_issued); |
789 | 0 | if (thr) |
790 | 0 | ha_tkill(thr - 1, sig); |
791 | 0 | else |
792 | 0 | raise(sig); |
793 | 0 | return 1; |
794 | 0 | } |
795 | | |
796 | | /* hashes 'word' in "debug dev hash 'word' ". */ |
797 | | static int debug_parse_cli_hash(char **args, char *payload, struct appctx *appctx, void *private) |
798 | 0 | { |
799 | 0 | char *msg = NULL; |
800 | |
|
801 | 0 | cli_dynmsg(appctx, LOG_INFO, memprintf(&msg, "%s\n", HA_ANON_CLI(args[3]))); |
802 | 0 | return 1; |
803 | 0 | } |
804 | | |
805 | | /* parse a "debug dev write" command. It always returns 1. */ |
806 | | static int debug_parse_cli_write(char **args, char *payload, struct appctx *appctx, void *private) |
807 | 0 | { |
808 | 0 | unsigned long len; |
809 | |
|
810 | 0 | if (!*args[3]) |
811 | 0 | return cli_err(appctx, "Missing output size.\n"); |
812 | | |
813 | 0 | len = strtoul(args[3], NULL, 0); |
814 | 0 | if (len >= trash.size) |
815 | 0 | return cli_err(appctx, "Output too large, must be <tune.bufsize.\n"); |
816 | | |
817 | 0 | _HA_ATOMIC_INC(&debug_commands_issued); |
818 | |
|
819 | 0 | chunk_reset(&trash); |
820 | 0 | trash.data = len; |
821 | 0 | memset(trash.area, '.', trash.data); |
822 | 0 | trash.area[trash.data] = 0; |
823 | 0 | for (len = 64; len < trash.data; len += 64) |
824 | 0 | trash.area[len] = '\n'; |
825 | 0 | return cli_msg(appctx, LOG_INFO, trash.area); |
826 | 0 | } |
827 | | |
828 | | /* parse a "debug dev stream" command */ |
829 | | /* |
830 | | * debug dev stream [strm=<ptr>] [strm.f[{+-=}<flags>]] [txn.f[{+-=}<flags>]] \ |
831 | | * [req.f[{+-=}<flags>]] [res.f[{+-=}<flags>]] \ |
832 | | * [sif.f[{+-=<flags>]] [sib.f[{+-=<flags>]] \ |
833 | | * [sif.s[=<state>]] [sib.s[=<state>]] |
834 | | */ |
835 | | static int debug_parse_cli_stream(char **args, char *payload, struct appctx *appctx, void *private) |
836 | 0 | { |
837 | 0 | struct stream *s = appctx_strm(appctx); |
838 | 0 | int arg; |
839 | 0 | void *ptr; |
840 | 0 | int size; |
841 | 0 | const char *word, *end; |
842 | 0 | struct ist name; |
843 | 0 | char *msg = NULL; |
844 | 0 | char *endarg; |
845 | 0 | unsigned long long old, new; |
846 | |
|
847 | 0 | if (!cli_has_level(appctx, ACCESS_LVL_ADMIN)) |
848 | 0 | return 1; |
849 | | |
850 | 0 | ptr = NULL; size = 0; |
851 | |
|
852 | 0 | if (!*args[3]) { |
853 | 0 | return cli_err(appctx, |
854 | 0 | "Usage: debug dev stream [ strm=<ptr> ] { <obj> <op> <value> | wake }*\n" |
855 | 0 | " <obj> = { strm.f | strm.x | scf.s | scb.s | txn.f | req.f | res.f }\n" |
856 | 0 | " <op> = {'' (show) | '=' (assign) | '^' (xor) | '+' (or) | '-' (andnot)}\n" |
857 | 0 | " <value> = 'now' | 64-bit dec/hex integer (0x prefix supported)\n" |
858 | 0 | " 'wake' wakes the stream asssigned to 'strm' (default: current)\n" |
859 | 0 | ); |
860 | 0 | } |
861 | | |
862 | 0 | _HA_ATOMIC_INC(&debug_commands_issued); |
863 | 0 | for (arg = 3; *args[arg]; arg++) { |
864 | 0 | old = 0; |
865 | 0 | end = word = args[arg]; |
866 | 0 | while (*end && *end != '=' && *end != '^' && *end != '+' && *end != '-') |
867 | 0 | end++; |
868 | 0 | name = ist2(word, end - word); |
869 | 0 | if (isteq(name, ist("strm"))) { |
870 | 0 | ptr = (!s || !may_access(s)) ? NULL : &s; size = sizeof(s); |
871 | 0 | } else if (isteq(name, ist("strm.f"))) { |
872 | 0 | ptr = (!s || !may_access(s)) ? NULL : &s->flags; size = sizeof(s->flags); |
873 | 0 | } else if (isteq(name, ist("strm.x"))) { |
874 | 0 | ptr = (!s || !may_access(s)) ? NULL : &s->conn_exp; size = sizeof(s->conn_exp); |
875 | 0 | } else if (isteq(name, ist("txn.f"))) { |
876 | 0 | ptr = (!s || !may_access(s)) ? NULL : &s->txn->flags; size = sizeof(s->txn->flags); |
877 | 0 | } else if (isteq(name, ist("req.f"))) { |
878 | 0 | ptr = (!s || !may_access(s)) ? NULL : &s->req.flags; size = sizeof(s->req.flags); |
879 | 0 | } else if (isteq(name, ist("res.f"))) { |
880 | 0 | ptr = (!s || !may_access(s)) ? NULL : &s->res.flags; size = sizeof(s->res.flags); |
881 | 0 | } else if (isteq(name, ist("scf.s"))) { |
882 | 0 | ptr = (!s || !may_access(s)) ? NULL : &s->scf->state; size = sizeof(s->scf->state); |
883 | 0 | } else if (isteq(name, ist("scb.s"))) { |
884 | 0 | ptr = (!s || !may_access(s)) ? NULL : &s->scf->state; size = sizeof(s->scb->state); |
885 | 0 | } else if (isteq(name, ist("wake"))) { |
886 | 0 | if (s && may_access(s) && may_access((void *)s + sizeof(*s) - 1)) |
887 | 0 | task_wakeup(s->task, TASK_WOKEN_TIMER|TASK_WOKEN_IO|TASK_WOKEN_MSG); |
888 | 0 | continue; |
889 | 0 | } else |
890 | 0 | return cli_dynerr(appctx, memprintf(&msg, "Unsupported field name: '%s'.\n", word)); |
891 | | |
892 | | /* read previous value */ |
893 | 0 | if ((s || ptr == &s) && ptr && may_access(ptr) && may_access(ptr + size - 1)) { |
894 | 0 | if (size == 8) |
895 | 0 | old = read_u64(ptr); |
896 | 0 | else if (size == 4) |
897 | 0 | old = read_u32(ptr); |
898 | 0 | else if (size == 2) |
899 | 0 | old = read_u16(ptr); |
900 | 0 | else |
901 | 0 | old = *(const uint8_t *)ptr; |
902 | 0 | } else { |
903 | 0 | memprintf(&msg, |
904 | 0 | "%sSkipping inaccessible pointer %p for field '%.*s'.\n", |
905 | 0 | msg ? msg : "", ptr, (int)(end - word), word); |
906 | 0 | continue; |
907 | 0 | } |
908 | | |
909 | | /* parse the new value . */ |
910 | 0 | new = strtoll(end + 1, &endarg, 0); |
911 | 0 | if (end[1] && *endarg) { |
912 | 0 | if (strcmp(end + 1, "now") == 0) |
913 | 0 | new = now_ms; |
914 | 0 | else { |
915 | 0 | memprintf(&msg, |
916 | 0 | "%sIgnoring unparsable value '%s' for field '%.*s'.\n", |
917 | 0 | msg ? msg : "", end + 1, (int)(end - word), word); |
918 | 0 | continue; |
919 | 0 | } |
920 | 0 | } |
921 | | |
922 | 0 | switch (*end) { |
923 | 0 | case '\0': /* show */ |
924 | 0 | memprintf(&msg, "%s%.*s=%#llx ", msg ? msg : "", (int)(end - word), word, old); |
925 | 0 | new = old; // do not change the value |
926 | 0 | break; |
927 | | |
928 | 0 | case '=': /* set */ |
929 | 0 | break; |
930 | | |
931 | 0 | case '^': /* XOR */ |
932 | 0 | new = old ^ new; |
933 | 0 | break; |
934 | | |
935 | 0 | case '+': /* OR */ |
936 | 0 | new = old | new; |
937 | 0 | break; |
938 | | |
939 | 0 | case '-': /* AND NOT */ |
940 | 0 | new = old & ~new; |
941 | 0 | break; |
942 | | |
943 | 0 | default: |
944 | 0 | break; |
945 | 0 | } |
946 | | |
947 | | /* write the new value */ |
948 | 0 | if (new != old) { |
949 | 0 | if (size == 8) |
950 | 0 | write_u64(ptr, new); |
951 | 0 | else if (size == 4) |
952 | 0 | write_u32(ptr, new); |
953 | 0 | else if (size == 2) |
954 | 0 | write_u16(ptr, new); |
955 | 0 | else |
956 | 0 | *(uint8_t *)ptr = new; |
957 | 0 | } |
958 | 0 | } |
959 | | |
960 | 0 | if (msg && *msg) |
961 | 0 | return cli_dynmsg(appctx, LOG_INFO, msg); |
962 | 0 | return 1; |
963 | 0 | } |
964 | | |
965 | | /* parse a "debug dev stream" command */ |
966 | | /* |
967 | | * debug dev task <ptr> [ "wake" | "expire" | "kill" ] |
968 | | * Show/change status of a task/tasklet |
969 | | */ |
970 | | static int debug_parse_cli_task(char **args, char *payload, struct appctx *appctx, void *private) |
971 | 0 | { |
972 | 0 | const struct ha_caller *caller; |
973 | 0 | struct task *t; |
974 | 0 | char *endarg; |
975 | 0 | char *msg; |
976 | 0 | void *ptr; |
977 | 0 | int ret = 1; |
978 | 0 | int task_ok; |
979 | 0 | int arg; |
980 | |
|
981 | 0 | if (!cli_has_level(appctx, ACCESS_LVL_ADMIN)) |
982 | 0 | return 1; |
983 | | |
984 | | /* parse the pointer value */ |
985 | 0 | ptr = (void *)strtoul(args[3], &endarg, 0); |
986 | 0 | if (!*args[3] || *endarg) |
987 | 0 | goto usage; |
988 | | |
989 | 0 | _HA_ATOMIC_INC(&debug_commands_issued); |
990 | | |
991 | | /* everything below must run under thread isolation till reaching label "leave" */ |
992 | 0 | thread_isolate(); |
993 | | |
994 | | /* struct tasklet is smaller than struct task and is sufficient to check |
995 | | * the TASK_COMMON part. |
996 | | */ |
997 | 0 | if (!may_access(ptr) || !may_access(ptr + sizeof(struct tasklet) - 1) || |
998 | 0 | ((const struct tasklet *)ptr)->tid < -1 || |
999 | 0 | ((const struct tasklet *)ptr)->tid >= (int)MAX_THREADS) { |
1000 | 0 | ret = cli_err(appctx, "The designated memory area doesn't look like a valid task/tasklet\n"); |
1001 | 0 | goto leave; |
1002 | 0 | } |
1003 | | |
1004 | 0 | t = ptr; |
1005 | 0 | caller = t->caller; |
1006 | 0 | msg = NULL; |
1007 | 0 | task_ok = may_access(ptr + sizeof(*t) - 1); |
1008 | |
|
1009 | 0 | chunk_reset(&trash); |
1010 | 0 | resolve_sym_name(&trash, NULL, (const void *)t->process); |
1011 | | |
1012 | | /* we need to be careful here because we may dump a freed task that's |
1013 | | * still in the pool cache, containing garbage in pointers. |
1014 | | */ |
1015 | 0 | if (!*args[4]) { |
1016 | 0 | memprintf(&msg, "%s%p: %s state=%#x tid=%d process=%s ctx=%p calls=%d last=%s:%d intl=%d", |
1017 | 0 | msg ? msg : "", t, (t->state & TASK_F_TASKLET) ? "tasklet" : "task", |
1018 | 0 | t->state, t->tid, trash.area, t->context, t->calls, |
1019 | 0 | caller && may_access(caller) && may_access(caller->func) && isalnum((uchar)*caller->func) ? caller->func : "0", |
1020 | 0 | caller ? t->caller->line : 0, |
1021 | 0 | (t->state & TASK_F_TASKLET) ? LIST_INLIST(&((const struct tasklet *)t)->list) : 0); |
1022 | |
|
1023 | 0 | if (task_ok && !(t->state & TASK_F_TASKLET)) |
1024 | 0 | memprintf(&msg, "%s inrq=%d inwq=%d exp=%d nice=%d", |
1025 | 0 | msg ? msg : "", task_in_rq(t), task_in_wq(t), t->expire, t->nice); |
1026 | |
|
1027 | 0 | memprintf(&msg, "%s\n", msg ? msg : ""); |
1028 | 0 | } |
1029 | |
|
1030 | 0 | for (arg = 4; *args[arg]; arg++) { |
1031 | 0 | if (strcmp(args[arg], "expire") == 0) { |
1032 | 0 | if (t->state & TASK_F_TASKLET) { |
1033 | | /* do nothing for tasklets */ |
1034 | 0 | } |
1035 | 0 | else if (task_ok) { |
1036 | | /* unlink task and wake with timer flag */ |
1037 | 0 | __task_unlink_wq(t); |
1038 | 0 | t->expire = now_ms; |
1039 | 0 | task_wakeup(t, TASK_WOKEN_TIMER); |
1040 | 0 | } |
1041 | 0 | } else if (strcmp(args[arg], "wake") == 0) { |
1042 | | /* wake with all flags but init / timer */ |
1043 | 0 | if (t->state & TASK_F_TASKLET) |
1044 | 0 | tasklet_wakeup((struct tasklet *)t); |
1045 | 0 | else if (task_ok) |
1046 | 0 | task_wakeup(t, TASK_WOKEN_ANY & ~(TASK_WOKEN_INIT|TASK_WOKEN_TIMER)); |
1047 | 0 | } else if (strcmp(args[arg], "kill") == 0) { |
1048 | | /* Kill the task. This is not idempotent! */ |
1049 | 0 | if (!(t->state & TASK_KILLED)) { |
1050 | 0 | if (t->state & TASK_F_TASKLET) |
1051 | 0 | tasklet_kill((struct tasklet *)t); |
1052 | 0 | else if (task_ok) |
1053 | 0 | task_kill(t); |
1054 | 0 | } |
1055 | 0 | } else { |
1056 | 0 | thread_release(); |
1057 | 0 | goto usage; |
1058 | 0 | } |
1059 | 0 | } |
1060 | | |
1061 | 0 | if (msg && *msg) |
1062 | 0 | ret = cli_dynmsg(appctx, LOG_INFO, msg); |
1063 | 0 | leave: |
1064 | 0 | thread_release(); |
1065 | 0 | return ret; |
1066 | 0 | usage: |
1067 | 0 | return cli_err(appctx, |
1068 | 0 | "Usage: debug dev task <ptr> [ wake | expire | kill ]\n" |
1069 | 0 | " By default, dumps some info on task/tasklet <ptr>. 'wake' will wake it up\n" |
1070 | 0 | " with all conditions flags but init/exp. 'expire' will expire the entry, and\n" |
1071 | 0 | " 'kill' will kill it (warning: may crash since later not idempotent!). All\n" |
1072 | 0 | " changes may crash the process if performed on a wrong object!\n" |
1073 | 0 | ); |
1074 | 0 | } |
1075 | | |
1076 | | #if defined(DEBUG_DEV) |
1077 | | static struct task *debug_delay_inj_task(struct task *t, void *ctx, unsigned int state) |
1078 | | { |
1079 | | unsigned long *tctx = ctx; // [0] = interval, [1] = nbwakeups |
1080 | | unsigned long inter = tctx[0]; |
1081 | | unsigned long count = tctx[1]; |
1082 | | unsigned long rnd; |
1083 | | |
1084 | | if (inter) |
1085 | | t->expire = tick_add(now_ms, inter); |
1086 | | else |
1087 | | task_wakeup(t, TASK_WOKEN_MSG); |
1088 | | |
1089 | | /* wake a random thread */ |
1090 | | while (count--) { |
1091 | | rnd = statistical_prng_range(global.nbthread); |
1092 | | ha_tkill(rnd, SIGRTMAX); |
1093 | | } |
1094 | | return t; |
1095 | | } |
1096 | | |
1097 | | /* parse a "debug dev delay-inj" command |
1098 | | * debug dev delay-inj <inter> <count> |
1099 | | */ |
1100 | | static int debug_parse_delay_inj(char **args, char *payload, struct appctx *appctx, void *private) |
1101 | | { |
1102 | | unsigned long *tctx; // [0] = inter, [2] = count |
1103 | | struct task *task; |
1104 | | |
1105 | | if (!cli_has_level(appctx, ACCESS_LVL_ADMIN)) |
1106 | | return 1; |
1107 | | |
1108 | | if (!*args[4]) |
1109 | | return cli_err(appctx, "Usage: debug dev delay-inj <inter_ms> <count>*\n"); |
1110 | | |
1111 | | _HA_ATOMIC_INC(&debug_commands_issued); |
1112 | | |
1113 | | tctx = calloc(sizeof(*tctx), 2); |
1114 | | if (!tctx) |
1115 | | goto fail; |
1116 | | |
1117 | | tctx[0] = atoi(args[3]); |
1118 | | tctx[1] = atoi(args[4]); |
1119 | | |
1120 | | task = task_new_here/*anywhere*/(); |
1121 | | if (!task) |
1122 | | goto fail; |
1123 | | |
1124 | | task->process = debug_delay_inj_task; |
1125 | | task->context = tctx; |
1126 | | task_wakeup(task, TASK_WOKEN_INIT); |
1127 | | return 1; |
1128 | | |
1129 | | fail: |
1130 | | free(tctx); |
1131 | | return cli_err(appctx, "Not enough memory"); |
1132 | | } |
1133 | | #endif // DEBUG_DEV |
1134 | | |
1135 | | static struct task *debug_task_handler(struct task *t, void *ctx, unsigned int state) |
1136 | 0 | { |
1137 | 0 | unsigned long *tctx = ctx; // [0] = #tasks, [1] = inter, [2+] = { tl | (tsk+1) } |
1138 | 0 | unsigned long inter = tctx[1]; |
1139 | 0 | unsigned long rnd; |
1140 | |
|
1141 | 0 | t->expire = tick_add(now_ms, inter); |
1142 | | |
1143 | | /* half of the calls will wake up another entry */ |
1144 | 0 | rnd = statistical_prng(); |
1145 | 0 | if (rnd & 1) { |
1146 | 0 | rnd >>= 1; |
1147 | 0 | rnd %= tctx[0]; |
1148 | 0 | rnd = tctx[rnd + 2]; |
1149 | |
|
1150 | 0 | if (rnd & 1) |
1151 | 0 | task_wakeup((struct task *)(rnd - 1), TASK_WOKEN_MSG); |
1152 | 0 | else |
1153 | 0 | tasklet_wakeup((struct tasklet *)rnd); |
1154 | 0 | } |
1155 | 0 | return t; |
1156 | 0 | } |
1157 | | |
1158 | | static struct task *debug_tasklet_handler(struct task *t, void *ctx, unsigned int state) |
1159 | 0 | { |
1160 | 0 | unsigned long *tctx = ctx; // [0] = #tasks, [1] = inter, [2+] = { tl | (tsk+1) } |
1161 | 0 | unsigned long rnd; |
1162 | 0 | int i; |
1163 | | |
1164 | | /* wake up two random entries */ |
1165 | 0 | for (i = 0; i < 2; i++) { |
1166 | 0 | rnd = statistical_prng() % tctx[0]; |
1167 | 0 | rnd = tctx[rnd + 2]; |
1168 | |
|
1169 | 0 | if (rnd & 1) |
1170 | 0 | task_wakeup((struct task *)(rnd - 1), TASK_WOKEN_MSG); |
1171 | 0 | else |
1172 | 0 | tasklet_wakeup((struct tasklet *)rnd); |
1173 | 0 | } |
1174 | 0 | return t; |
1175 | 0 | } |
1176 | | |
1177 | | /* parse a "debug dev sched" command |
1178 | | * debug dev sched {task|tasklet} [count=<count>] [mask=<mask>] [single=<single>] [inter=<inter>] |
1179 | | */ |
1180 | | static int debug_parse_cli_sched(char **args, char *payload, struct appctx *appctx, void *private) |
1181 | 0 | { |
1182 | 0 | int arg; |
1183 | 0 | void *ptr; |
1184 | 0 | int size; |
1185 | 0 | const char *word, *end; |
1186 | 0 | struct ist name; |
1187 | 0 | char *msg = NULL; |
1188 | 0 | char *endarg; |
1189 | 0 | unsigned long long new; |
1190 | 0 | unsigned long count = 0; |
1191 | 0 | unsigned long thrid = tid; |
1192 | 0 | unsigned int inter = 0; |
1193 | 0 | unsigned long i; |
1194 | 0 | int mode = 0; // 0 = tasklet; 1 = task |
1195 | 0 | unsigned long *tctx; // [0] = #tasks, [1] = inter, [2+] = { tl | (tsk+1) } |
1196 | |
|
1197 | 0 | if (!cli_has_level(appctx, ACCESS_LVL_ADMIN)) |
1198 | 0 | return 1; |
1199 | | |
1200 | 0 | ptr = NULL; size = 0; |
1201 | |
|
1202 | 0 | if (strcmp(args[3], "task") != 0 && strcmp(args[3], "tasklet") != 0) { |
1203 | 0 | return cli_err(appctx, |
1204 | 0 | "Usage: debug dev sched {task|tasklet} { <obj> = <value> }*\n" |
1205 | 0 | " <obj> = {count | tid | inter }\n" |
1206 | 0 | " <value> = 64-bit dec/hex integer (0x prefix supported)\n" |
1207 | 0 | ); |
1208 | 0 | } |
1209 | | |
1210 | 0 | mode = strcmp(args[3], "task") == 0; |
1211 | |
|
1212 | 0 | _HA_ATOMIC_INC(&debug_commands_issued); |
1213 | 0 | for (arg = 4; *args[arg]; arg++) { |
1214 | 0 | end = word = args[arg]; |
1215 | 0 | while (*end && *end != '=' && *end != '^' && *end != '+' && *end != '-') |
1216 | 0 | end++; |
1217 | 0 | name = ist2(word, end - word); |
1218 | 0 | if (isteq(name, ist("count"))) { |
1219 | 0 | ptr = &count; size = sizeof(count); |
1220 | 0 | } else if (isteq(name, ist("tid"))) { |
1221 | 0 | ptr = &thrid; size = sizeof(thrid); |
1222 | 0 | } else if (isteq(name, ist("inter"))) { |
1223 | 0 | ptr = &inter; size = sizeof(inter); |
1224 | 0 | } else |
1225 | 0 | return cli_dynerr(appctx, memprintf(&msg, "Unsupported setting: '%s'.\n", word)); |
1226 | | |
1227 | | /* parse the new value . */ |
1228 | 0 | new = strtoll(end + 1, &endarg, 0); |
1229 | 0 | if (end[1] && *endarg) { |
1230 | 0 | memprintf(&msg, |
1231 | 0 | "%sIgnoring unparsable value '%s' for field '%.*s'.\n", |
1232 | 0 | msg ? msg : "", end + 1, (int)(end - word), word); |
1233 | 0 | continue; |
1234 | 0 | } |
1235 | | |
1236 | | /* write the new value */ |
1237 | 0 | if (size == 8) |
1238 | 0 | write_u64(ptr, new); |
1239 | 0 | else if (size == 4) |
1240 | 0 | write_u32(ptr, new); |
1241 | 0 | else if (size == 2) |
1242 | 0 | write_u16(ptr, new); |
1243 | 0 | else |
1244 | 0 | *(uint8_t *)ptr = new; |
1245 | 0 | } |
1246 | | |
1247 | 0 | tctx = calloc(sizeof(*tctx), count + 2); |
1248 | 0 | if (!tctx) |
1249 | 0 | goto fail; |
1250 | | |
1251 | 0 | tctx[0] = (unsigned long)count; |
1252 | 0 | tctx[1] = (unsigned long)inter; |
1253 | |
|
1254 | 0 | if (thrid >= global.nbthread) |
1255 | 0 | thrid = tid; |
1256 | |
|
1257 | 0 | for (i = 0; i < count; i++) { |
1258 | | /* now, if poly or mask was set, tmask corresponds to the |
1259 | | * valid thread mask to use, otherwise it remains zero. |
1260 | | */ |
1261 | | //printf("%lu: mode=%d mask=%#lx\n", i, mode, tmask); |
1262 | 0 | if (mode == 0) { |
1263 | 0 | struct tasklet *tl = tasklet_new(); |
1264 | |
|
1265 | 0 | if (!tl) |
1266 | 0 | goto fail; |
1267 | | |
1268 | 0 | tl->tid = thrid; |
1269 | 0 | tl->process = debug_tasklet_handler; |
1270 | 0 | tl->context = tctx; |
1271 | 0 | tctx[i + 2] = (unsigned long)tl; |
1272 | 0 | } else { |
1273 | 0 | struct task *task = task_new_on(thrid); |
1274 | |
|
1275 | 0 | if (!task) |
1276 | 0 | goto fail; |
1277 | | |
1278 | 0 | task->process = debug_task_handler; |
1279 | 0 | task->context = tctx; |
1280 | 0 | tctx[i + 2] = (unsigned long)task + 1; |
1281 | 0 | } |
1282 | 0 | } |
1283 | | |
1284 | | /* start the tasks and tasklets */ |
1285 | 0 | for (i = 0; i < count; i++) { |
1286 | 0 | unsigned long ctx = tctx[i + 2]; |
1287 | |
|
1288 | 0 | if (ctx & 1) |
1289 | 0 | task_wakeup((struct task *)(ctx - 1), TASK_WOKEN_INIT); |
1290 | 0 | else |
1291 | 0 | tasklet_wakeup((struct tasklet *)ctx); |
1292 | 0 | } |
1293 | |
|
1294 | 0 | if (msg && *msg) |
1295 | 0 | return cli_dynmsg(appctx, LOG_INFO, msg); |
1296 | 0 | return 1; |
1297 | | |
1298 | 0 | fail: |
1299 | | /* free partially allocated entries */ |
1300 | 0 | for (i = 0; tctx && i < count; i++) { |
1301 | 0 | unsigned long ctx = tctx[i + 2]; |
1302 | |
|
1303 | 0 | if (!ctx) |
1304 | 0 | break; |
1305 | | |
1306 | 0 | if (ctx & 1) |
1307 | 0 | task_destroy((struct task *)(ctx - 1)); |
1308 | 0 | else |
1309 | 0 | tasklet_free((struct tasklet *)ctx); |
1310 | 0 | } |
1311 | |
|
1312 | 0 | free(tctx); |
1313 | 0 | return cli_err(appctx, "Not enough memory"); |
1314 | 0 | } |
1315 | | |
1316 | | /* CLI state for "debug dev fd" */ |
1317 | | struct dev_fd_ctx { |
1318 | | int start_fd; |
1319 | | }; |
1320 | | |
1321 | | /* CLI parser for the "debug dev fd" command. The current FD to restart from is |
1322 | | * stored in a struct dev_fd_ctx pointed to by svcctx. |
1323 | | */ |
1324 | | static int debug_parse_cli_fd(char **args, char *payload, struct appctx *appctx, void *private) |
1325 | 0 | { |
1326 | 0 | struct dev_fd_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx)); |
1327 | |
|
1328 | 0 | if (!cli_has_level(appctx, ACCESS_LVL_OPER)) |
1329 | 0 | return 1; |
1330 | | |
1331 | | /* start at fd #0 */ |
1332 | 0 | ctx->start_fd = 0; |
1333 | 0 | return 0; |
1334 | 0 | } |
1335 | | |
1336 | | /* CLI I/O handler for the "debug dev fd" command. Dumps all FDs that are |
1337 | | * accessible from the process but not known from fdtab. The FD number to |
1338 | | * restart from is stored in a struct dev_fd_ctx pointed to by svcctx. |
1339 | | */ |
1340 | | static int debug_iohandler_fd(struct appctx *appctx) |
1341 | 0 | { |
1342 | 0 | struct dev_fd_ctx *ctx = appctx->svcctx; |
1343 | 0 | struct stconn *sc = appctx_sc(appctx); |
1344 | 0 | struct sockaddr_storage sa; |
1345 | 0 | struct stat statbuf; |
1346 | 0 | socklen_t salen, vlen; |
1347 | 0 | int ret1, ret2, port; |
1348 | 0 | char *addrstr; |
1349 | 0 | int ret = 1; |
1350 | 0 | int i, fd; |
1351 | | |
1352 | | /* FIXME: Don't watch the other side !*/ |
1353 | 0 | if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE)) |
1354 | 0 | goto end; |
1355 | | |
1356 | 0 | chunk_reset(&trash); |
1357 | |
|
1358 | 0 | thread_isolate(); |
1359 | | |
1360 | | /* we have two inner loops here, one for the proxy, the other one for |
1361 | | * the buffer. |
1362 | | */ |
1363 | 0 | for (fd = ctx->start_fd; fd < global.maxsock; fd++) { |
1364 | | /* check for FD's existence */ |
1365 | 0 | ret1 = fcntl(fd, F_GETFD, 0); |
1366 | 0 | if (ret1 == -1) |
1367 | 0 | continue; // not known to the process |
1368 | 0 | if (fdtab[fd].owner) |
1369 | 0 | continue; // well-known |
1370 | | |
1371 | | /* OK we're seeing an orphan let's try to retrieve as much |
1372 | | * information as possible about it. |
1373 | | */ |
1374 | 0 | chunk_printf(&trash, "%5d", fd); |
1375 | |
|
1376 | 0 | if (fstat(fd, &statbuf) != -1) { |
1377 | 0 | chunk_appendf(&trash, " type=%s mod=%04o dev=%#llx siz=%#llx uid=%lld gid=%lld fs=%#llx ino=%#llx", |
1378 | 0 | isatty(fd) ? "tty.": |
1379 | 0 | S_ISREG(statbuf.st_mode) ? "file": |
1380 | 0 | S_ISDIR(statbuf.st_mode) ? "dir.": |
1381 | 0 | S_ISCHR(statbuf.st_mode) ? "chr.": |
1382 | 0 | S_ISBLK(statbuf.st_mode) ? "blk.": |
1383 | 0 | S_ISFIFO(statbuf.st_mode) ? "pipe": |
1384 | 0 | S_ISLNK(statbuf.st_mode) ? "link": |
1385 | 0 | S_ISSOCK(statbuf.st_mode) ? "sock": |
1386 | | #ifdef USE_EPOLL |
1387 | | epoll_wait(fd, NULL, 0, 0) != -1 || errno != EBADF ? "epol": |
1388 | | #endif |
1389 | 0 | "????", |
1390 | 0 | (uint)statbuf.st_mode & 07777, |
1391 | |
|
1392 | 0 | (ullong)statbuf.st_rdev, |
1393 | 0 | (ullong)statbuf.st_size, |
1394 | 0 | (ullong)statbuf.st_uid, |
1395 | 0 | (ullong)statbuf.st_gid, |
1396 | |
|
1397 | 0 | (ullong)statbuf.st_dev, |
1398 | 0 | (ullong)statbuf.st_ino); |
1399 | 0 | } |
1400 | |
|
1401 | 0 | chunk_appendf(&trash, " getfd=%s+%#x", |
1402 | 0 | (ret1 & FD_CLOEXEC) ? "cloex" : "", |
1403 | 0 | ret1 &~ FD_CLOEXEC); |
1404 | | |
1405 | | /* FD options */ |
1406 | 0 | ret2 = fcntl(fd, F_GETFL, 0); |
1407 | 0 | if (ret2) { |
1408 | 0 | chunk_appendf(&trash, " getfl=%s", |
1409 | 0 | (ret1 & 3) >= 2 ? "O_RDWR" : |
1410 | 0 | (ret1 & 1) ? "O_WRONLY" : "O_RDONLY"); |
1411 | |
|
1412 | 0 | for (i = 2; i < 32; i++) { |
1413 | 0 | if (!(ret2 & (1UL << i))) |
1414 | 0 | continue; |
1415 | 0 | switch (1UL << i) { |
1416 | 0 | case O_CREAT: chunk_appendf(&trash, ",O_CREAT"); break; |
1417 | 0 | case O_EXCL: chunk_appendf(&trash, ",O_EXCL"); break; |
1418 | 0 | case O_NOCTTY: chunk_appendf(&trash, ",O_NOCTTY"); break; |
1419 | 0 | case O_TRUNC: chunk_appendf(&trash, ",O_TRUNC"); break; |
1420 | 0 | case O_APPEND: chunk_appendf(&trash, ",O_APPEND"); break; |
1421 | 0 | #ifdef O_ASYNC |
1422 | 0 | case O_ASYNC: chunk_appendf(&trash, ",O_ASYNC"); break; |
1423 | 0 | #endif |
1424 | | #ifdef O_DIRECT |
1425 | | case O_DIRECT: chunk_appendf(&trash, ",O_DIRECT"); break; |
1426 | | #endif |
1427 | | #ifdef O_NOATIME |
1428 | | case O_NOATIME: chunk_appendf(&trash, ",O_NOATIME"); break; |
1429 | | #endif |
1430 | 0 | } |
1431 | 0 | } |
1432 | 0 | } |
1433 | | |
1434 | 0 | vlen = sizeof(ret2); |
1435 | 0 | ret1 = getsockopt(fd, SOL_SOCKET, SO_TYPE, &ret2, &vlen); |
1436 | 0 | if (ret1 != -1) |
1437 | 0 | chunk_appendf(&trash, " so_type=%d", ret2); |
1438 | |
|
1439 | 0 | vlen = sizeof(ret2); |
1440 | 0 | ret1 = getsockopt(fd, SOL_SOCKET, SO_ACCEPTCONN, &ret2, &vlen); |
1441 | 0 | if (ret1 != -1) |
1442 | 0 | chunk_appendf(&trash, " so_accept=%d", ret2); |
1443 | |
|
1444 | 0 | vlen = sizeof(ret2); |
1445 | 0 | ret1 = getsockopt(fd, SOL_SOCKET, SO_ERROR, &ret2, &vlen); |
1446 | 0 | if (ret1 != -1) |
1447 | 0 | chunk_appendf(&trash, " so_error=%d", ret2); |
1448 | |
|
1449 | 0 | salen = sizeof(sa); |
1450 | 0 | if (getsockname(fd, (struct sockaddr *)&sa, &salen) != -1) { |
1451 | 0 | if (sa.ss_family == AF_INET) |
1452 | 0 | port = ntohs(((const struct sockaddr_in *)&sa)->sin_port); |
1453 | 0 | else if (sa.ss_family == AF_INET6) |
1454 | 0 | port = ntohs(((const struct sockaddr_in6 *)&sa)->sin6_port); |
1455 | 0 | else |
1456 | 0 | port = 0; |
1457 | 0 | addrstr = sa2str(&sa, port, 0); |
1458 | 0 | chunk_appendf(&trash, " laddr=%s", addrstr); |
1459 | 0 | free(addrstr); |
1460 | 0 | } |
1461 | |
|
1462 | 0 | salen = sizeof(sa); |
1463 | 0 | if (getpeername(fd, (struct sockaddr *)&sa, &salen) != -1) { |
1464 | 0 | if (sa.ss_family == AF_INET) |
1465 | 0 | port = ntohs(((const struct sockaddr_in *)&sa)->sin_port); |
1466 | 0 | else if (sa.ss_family == AF_INET6) |
1467 | 0 | port = ntohs(((const struct sockaddr_in6 *)&sa)->sin6_port); |
1468 | 0 | else |
1469 | 0 | port = 0; |
1470 | 0 | addrstr = sa2str(&sa, port, 0); |
1471 | 0 | chunk_appendf(&trash, " raddr=%s", addrstr); |
1472 | 0 | free(addrstr); |
1473 | 0 | } |
1474 | |
|
1475 | 0 | chunk_appendf(&trash, "\n"); |
1476 | |
|
1477 | 0 | if (applet_putchk(appctx, &trash) == -1) { |
1478 | 0 | ctx->start_fd = fd; |
1479 | 0 | ret = 0; |
1480 | 0 | break; |
1481 | 0 | } |
1482 | 0 | } |
1483 | | |
1484 | 0 | thread_release(); |
1485 | 0 | end: |
1486 | 0 | return ret; |
1487 | 0 | } |
1488 | | |
1489 | | #if defined(DEBUG_MEM_STATS) |
1490 | | |
1491 | | /* CLI state for "debug dev memstats" */ |
1492 | | struct dev_mem_ctx { |
1493 | | struct mem_stats *start, *stop; /* begin/end of dump */ |
1494 | | char *match; /* non-null if a name prefix is specified */ |
1495 | | int show_all; /* show all entries if non-null */ |
1496 | | int width; /* 1st column width */ |
1497 | | long tot_size; /* sum of alloc-free */ |
1498 | | ulong tot_calls; /* sum of calls */ |
1499 | | }; |
1500 | | |
1501 | | /* CLI parser for the "debug dev memstats" command. Sets a dev_mem_ctx shown above. */ |
1502 | | static int debug_parse_cli_memstats(char **args, char *payload, struct appctx *appctx, void *private) |
1503 | | { |
1504 | | struct dev_mem_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx)); |
1505 | | int arg; |
1506 | | |
1507 | | extern __attribute__((__weak__)) struct mem_stats __start_mem_stats; |
1508 | | extern __attribute__((__weak__)) struct mem_stats __stop_mem_stats; |
1509 | | |
1510 | | if (!cli_has_level(appctx, ACCESS_LVL_OPER)) |
1511 | | return 1; |
1512 | | |
1513 | | for (arg = 3; *args[arg]; arg++) { |
1514 | | if (strcmp(args[arg], "reset") == 0) { |
1515 | | struct mem_stats *ptr; |
1516 | | |
1517 | | if (!cli_has_level(appctx, ACCESS_LVL_ADMIN)) |
1518 | | return 1; |
1519 | | |
1520 | | for (ptr = &__start_mem_stats; ptr < &__stop_mem_stats; ptr++) { |
1521 | | _HA_ATOMIC_STORE(&ptr->calls, 0); |
1522 | | _HA_ATOMIC_STORE(&ptr->size, 0); |
1523 | | } |
1524 | | return 1; |
1525 | | } |
1526 | | else if (strcmp(args[arg], "all") == 0) { |
1527 | | ctx->show_all = 1; |
1528 | | continue; |
1529 | | } |
1530 | | else if (strcmp(args[arg], "match") == 0 && *args[arg + 1]) { |
1531 | | ha_free(&ctx->match); |
1532 | | ctx->match = strdup(args[arg + 1]); |
1533 | | arg++; |
1534 | | continue; |
1535 | | } |
1536 | | else |
1537 | | return cli_err(appctx, "Expects either 'reset', 'all', or 'match <pfx>'.\n"); |
1538 | | } |
1539 | | |
1540 | | /* otherwise proceed with the dump from p0 to p1 */ |
1541 | | ctx->start = &__start_mem_stats; |
1542 | | ctx->stop = &__stop_mem_stats; |
1543 | | ctx->width = 0; |
1544 | | return 0; |
1545 | | } |
1546 | | |
1547 | | /* CLI I/O handler for the "debug dev memstats" command using a dev_mem_ctx |
1548 | | * found in appctx->svcctx. Dumps all mem_stats structs referenced by pointers |
1549 | | * located between ->start and ->stop. Dumps all entries if ->show_all != 0, |
1550 | | * otherwise only non-zero calls. |
1551 | | */ |
1552 | | static int debug_iohandler_memstats(struct appctx *appctx) |
1553 | | { |
1554 | | struct dev_mem_ctx *ctx = appctx->svcctx; |
1555 | | struct stconn *sc = appctx_sc(appctx); |
1556 | | struct mem_stats *ptr; |
1557 | | const char *pfx = ctx->match; |
1558 | | int ret = 1; |
1559 | | |
1560 | | /* FIXME: Don't watch the other side !*/ |
1561 | | if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE)) |
1562 | | goto end; |
1563 | | |
1564 | | if (!ctx->width) { |
1565 | | /* we don't know the first column's width, let's compute it |
1566 | | * now based on a first pass on printable entries and their |
1567 | | * expected width (approximated). |
1568 | | */ |
1569 | | for (ptr = ctx->start; ptr != ctx->stop; ptr++) { |
1570 | | const char *p, *name; |
1571 | | int w = 0; |
1572 | | char tmp; |
1573 | | |
1574 | | if (!ptr->size && !ptr->calls && !ctx->show_all) |
1575 | | continue; |
1576 | | |
1577 | | for (p = name = ptr->caller.file; *p; p++) { |
1578 | | if (*p == '/') |
1579 | | name = p + 1; |
1580 | | } |
1581 | | |
1582 | | if (ctx->show_all) |
1583 | | w = snprintf(&tmp, 0, "%s(%s:%d) ", ptr->caller.func, name, ptr->caller.line); |
1584 | | else |
1585 | | w = snprintf(&tmp, 0, "%s:%d ", name, ptr->caller.line); |
1586 | | |
1587 | | if (w > ctx->width) |
1588 | | ctx->width = w; |
1589 | | } |
1590 | | } |
1591 | | |
1592 | | /* we have two inner loops here, one for the proxy, the other one for |
1593 | | * the buffer. |
1594 | | */ |
1595 | | for (ptr = ctx->start; ptr != ctx->stop; ptr++) { |
1596 | | const char *type; |
1597 | | const char *name; |
1598 | | const char *p; |
1599 | | const char *info = NULL; |
1600 | | const char *func = NULL; |
1601 | | int direction = 0; // neither alloc nor free (e.g. realloc) |
1602 | | |
1603 | | if (!ptr->size && !ptr->calls && !ctx->show_all) |
1604 | | continue; |
1605 | | |
1606 | | /* basename only */ |
1607 | | for (p = name = ptr->caller.file; *p; p++) { |
1608 | | if (*p == '/') |
1609 | | name = p + 1; |
1610 | | } |
1611 | | |
1612 | | func = ptr->caller.func; |
1613 | | |
1614 | | switch (ptr->caller.what) { |
1615 | | case MEM_STATS_TYPE_CALLOC: type = "CALLOC"; direction = 1; break; |
1616 | | case MEM_STATS_TYPE_FREE: type = "FREE"; direction = -1; break; |
1617 | | case MEM_STATS_TYPE_MALLOC: type = "MALLOC"; direction = 1; break; |
1618 | | case MEM_STATS_TYPE_REALLOC: type = "REALLOC"; break; |
1619 | | case MEM_STATS_TYPE_STRDUP: type = "STRDUP"; direction = 1; break; |
1620 | | case MEM_STATS_TYPE_P_ALLOC: type = "P_ALLOC"; direction = 1; if (ptr->extra) info = ((const struct pool_head *)ptr->extra)->name; break; |
1621 | | case MEM_STATS_TYPE_P_FREE: type = "P_FREE"; direction = -1; if (ptr->extra) info = ((const struct pool_head *)ptr->extra)->name; break; |
1622 | | default: type = "UNSET"; break; |
1623 | | } |
1624 | | |
1625 | | //chunk_printf(&trash, |
1626 | | // "%20s:%-5d %7s size: %12lu calls: %9lu size/call: %6lu\n", |
1627 | | // name, ptr->line, type, |
1628 | | // (unsigned long)ptr->size, (unsigned long)ptr->calls, |
1629 | | // (unsigned long)(ptr->calls ? (ptr->size / ptr->calls) : 0)); |
1630 | | |
1631 | | /* only match requested prefixes */ |
1632 | | if (pfx && (!info || strncmp(info, pfx, strlen(pfx)) != 0)) |
1633 | | continue; |
1634 | | |
1635 | | chunk_reset(&trash); |
1636 | | if (ctx->show_all) |
1637 | | chunk_appendf(&trash, "%s(", func); |
1638 | | |
1639 | | chunk_appendf(&trash, "%s:%d", name, ptr->caller.line); |
1640 | | |
1641 | | if (ctx->show_all) |
1642 | | chunk_appendf(&trash, ")"); |
1643 | | |
1644 | | while (trash.data < ctx->width) |
1645 | | trash.area[trash.data++] = ' '; |
1646 | | |
1647 | | chunk_appendf(&trash, "%7s size: %12lu calls: %9lu size/call: %6lu %s\n", |
1648 | | type, |
1649 | | (unsigned long)ptr->size, (unsigned long)ptr->calls, |
1650 | | (unsigned long)(ptr->calls ? (ptr->size / ptr->calls) : 0), |
1651 | | info ? info : ""); |
1652 | | |
1653 | | if (applet_putchk(appctx, &trash) == -1) { |
1654 | | ctx->start = ptr; |
1655 | | ret = 0; |
1656 | | goto end; |
1657 | | } |
1658 | | if (direction > 0) { |
1659 | | ctx->tot_size += (ulong)ptr->size; |
1660 | | ctx->tot_calls += (ulong)ptr->calls; |
1661 | | } |
1662 | | else if (direction < 0) { |
1663 | | ctx->tot_size -= (ulong)ptr->size; |
1664 | | ctx->tot_calls += (ulong)ptr->calls; |
1665 | | } |
1666 | | } |
1667 | | |
1668 | | /* now dump a summary */ |
1669 | | chunk_reset(&trash); |
1670 | | chunk_appendf(&trash, "Total"); |
1671 | | while (trash.data < ctx->width) |
1672 | | trash.area[trash.data++] = ' '; |
1673 | | |
1674 | | chunk_appendf(&trash, "%7s size: %12ld calls: %9lu size/call: %6ld %s\n", |
1675 | | "BALANCE", |
1676 | | ctx->tot_size, ctx->tot_calls, |
1677 | | (long)(ctx->tot_calls ? (ctx->tot_size / ctx->tot_calls) : 0), |
1678 | | "(excl. realloc)"); |
1679 | | |
1680 | | if (applet_putchk(appctx, &trash) == -1) { |
1681 | | ctx->start = ptr; |
1682 | | ret = 0; |
1683 | | goto end; |
1684 | | } |
1685 | | end: |
1686 | | return ret; |
1687 | | } |
1688 | | |
1689 | | /* release the "show pools" context */ |
1690 | | static void debug_release_memstats(struct appctx *appctx) |
1691 | | { |
1692 | | struct dev_mem_ctx *ctx = appctx->svcctx; |
1693 | | |
1694 | | ha_free(&ctx->match); |
1695 | | } |
1696 | | #endif |
1697 | | |
1698 | | #ifdef USE_THREAD_DUMP |
1699 | | |
1700 | | /* handles DEBUGSIG to dump the state of the thread it's working on. This is |
1701 | | * appended at the end of thread_dump_buffer which must be protected against |
1702 | | * reentrance from different threads (a thread-local buffer works fine). |
1703 | | */ |
1704 | | void debug_handler(int sig, siginfo_t *si, void *arg) |
1705 | | { |
1706 | | struct buffer *buf = HA_ATOMIC_LOAD(&th_ctx->thread_dump_buffer); |
1707 | | int harmless = is_thread_harmless(); |
1708 | | |
1709 | | /* first, let's check it's really for us and that we didn't just get |
1710 | | * a spurious DEBUGSIG. |
1711 | | */ |
1712 | | if (!buf || buf == (void*)(0x1UL)) |
1713 | | return; |
1714 | | |
1715 | | /* now dump the current state into the designated buffer, and indicate |
1716 | | * we come from a sig handler. |
1717 | | */ |
1718 | | ha_thread_dump_one(tid, 1); |
1719 | | |
1720 | | /* mark the current thread as stuck to detect it upon next invocation |
1721 | | * if it didn't move. |
1722 | | */ |
1723 | | if (!harmless && |
1724 | | !(_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_SLEEPING)) |
1725 | | _HA_ATOMIC_OR(&th_ctx->flags, TH_FL_STUCK); |
1726 | | } |
1727 | | |
1728 | | static int init_debug_per_thread() |
1729 | | { |
1730 | | sigset_t set; |
1731 | | |
1732 | | /* unblock the DEBUGSIG signal we intend to use */ |
1733 | | sigemptyset(&set); |
1734 | | sigaddset(&set, DEBUGSIG); |
1735 | | #if defined(DEBUG_DEV) |
1736 | | sigaddset(&set, SIGRTMAX); |
1737 | | #endif |
1738 | | ha_sigmask(SIG_UNBLOCK, &set, NULL); |
1739 | | return 1; |
1740 | | } |
1741 | | |
1742 | | static int init_debug() |
1743 | | { |
1744 | | struct sigaction sa; |
1745 | | void *callers[1]; |
1746 | | |
1747 | | /* calling backtrace() will access libgcc at runtime. We don't want to |
1748 | | * do it after the chroot, so let's perform a first call to have it |
1749 | | * ready in memory for later use. |
1750 | | */ |
1751 | | my_backtrace(callers, sizeof(callers)/sizeof(*callers)); |
1752 | | sa.sa_handler = NULL; |
1753 | | sa.sa_sigaction = debug_handler; |
1754 | | sigemptyset(&sa.sa_mask); |
1755 | | sa.sa_flags = SA_SIGINFO; |
1756 | | sigaction(DEBUGSIG, &sa, NULL); |
1757 | | |
1758 | | #if defined(DEBUG_DEV) |
1759 | | sa.sa_handler = NULL; |
1760 | | sa.sa_sigaction = debug_delay_inj_sighandler; |
1761 | | sigemptyset(&sa.sa_mask); |
1762 | | sa.sa_flags = SA_SIGINFO; |
1763 | | sigaction(SIGRTMAX, &sa, NULL); |
1764 | | #endif |
1765 | | return ERR_NONE; |
1766 | | } |
1767 | | |
1768 | | REGISTER_POST_CHECK(init_debug); |
1769 | | REGISTER_PER_THREAD_INIT(init_debug_per_thread); |
1770 | | |
1771 | | #endif /* USE_THREAD_DUMP */ |
1772 | | |
1773 | | /* register cli keywords */ |
1774 | | static struct cli_kw_list cli_kws = {{ },{ |
1775 | | {{ "debug", "dev", "bug", NULL }, "debug dev bug : call BUG_ON() and crash", debug_parse_cli_bug, NULL, NULL, NULL, ACCESS_EXPERT }, |
1776 | | {{ "debug", "dev", "check", NULL }, "debug dev check : call CHECK_IF() and possibly crash", debug_parse_cli_check, NULL, NULL, NULL, ACCESS_EXPERT }, |
1777 | | {{ "debug", "dev", "close", NULL }, "debug dev close <fd> : close this file descriptor", debug_parse_cli_close, NULL, NULL, NULL, ACCESS_EXPERT }, |
1778 | | {{ "debug", "dev", "deadlock", NULL }, "debug dev deadlock [nbtask] : deadlock between this number of tasks", debug_parse_cli_deadlock, NULL, NULL, NULL, ACCESS_EXPERT }, |
1779 | | {{ "debug", "dev", "delay", NULL }, "debug dev delay [ms] : sleep this long", debug_parse_cli_delay, NULL, NULL, NULL, ACCESS_EXPERT }, |
1780 | | #if defined(DEBUG_DEV) |
1781 | | {{ "debug", "dev", "delay-inj", NULL },"debug dev delay-inj <inter> <count> : inject random delays into threads", debug_parse_delay_inj, NULL, NULL, NULL, ACCESS_EXPERT }, |
1782 | | {{ "debug", "dev", "exec", NULL }, "debug dev exec [cmd] ... : show this command's output", debug_parse_cli_exec, NULL, NULL, NULL, ACCESS_EXPERT }, |
1783 | | #endif |
1784 | | {{ "debug", "dev", "fd", NULL }, "debug dev fd : scan for rogue/unhandled FDs", debug_parse_cli_fd, debug_iohandler_fd, NULL, NULL, ACCESS_EXPERT }, |
1785 | | {{ "debug", "dev", "exit", NULL }, "debug dev exit [code] : immediately exit the process", debug_parse_cli_exit, NULL, NULL, NULL, ACCESS_EXPERT }, |
1786 | | {{ "debug", "dev", "hash", NULL }, "debug dev hash [msg] : return msg hashed if anon is set", debug_parse_cli_hash, NULL, NULL, NULL, 0 }, |
1787 | | {{ "debug", "dev", "hex", NULL }, "debug dev hex <addr> [len] : dump a memory area", debug_parse_cli_hex, NULL, NULL, NULL, ACCESS_EXPERT }, |
1788 | | {{ "debug", "dev", "log", NULL }, "debug dev log [msg] ... : send this msg to global logs", debug_parse_cli_log, NULL, NULL, NULL, ACCESS_EXPERT }, |
1789 | | {{ "debug", "dev", "loop", NULL }, "debug dev loop <ms> [isolated] : loop this long, possibly isolated", debug_parse_cli_loop, NULL, NULL, NULL, ACCESS_EXPERT }, |
1790 | | #if defined(DEBUG_MEM_STATS) |
1791 | | {{ "debug", "dev", "memstats", NULL }, "debug dev memstats [reset|all|match ...]: dump/reset memory statistics", debug_parse_cli_memstats, debug_iohandler_memstats, debug_release_memstats, NULL, 0 }, |
1792 | | #endif |
1793 | | {{ "debug", "dev", "panic", NULL }, "debug dev panic : immediately trigger a panic", debug_parse_cli_panic, NULL, NULL, NULL, ACCESS_EXPERT }, |
1794 | | {{ "debug", "dev", "sched", NULL }, "debug dev sched {task|tasklet} [k=v]* : stress the scheduler", debug_parse_cli_sched, NULL, NULL, NULL, ACCESS_EXPERT }, |
1795 | | {{ "debug", "dev", "stream",NULL }, "debug dev stream [k=v]* : show/manipulate stream flags", debug_parse_cli_stream,NULL, NULL, NULL, ACCESS_EXPERT }, |
1796 | | {{ "debug", "dev", "sym", NULL }, "debug dev sym <addr> : resolve symbol address", debug_parse_cli_sym, NULL, NULL, NULL, ACCESS_EXPERT }, |
1797 | | {{ "debug", "dev", "task", NULL }, "debug dev task <ptr> [wake|expire|kill] : show/wake/expire/kill task/tasklet", debug_parse_cli_task, NULL, NULL, NULL, ACCESS_EXPERT }, |
1798 | | {{ "debug", "dev", "tkill", NULL }, "debug dev tkill [thr] [sig] : send signal to thread", debug_parse_cli_tkill, NULL, NULL, NULL, ACCESS_EXPERT }, |
1799 | | {{ "debug", "dev", "warn", NULL }, "debug dev warn : call WARN_ON() and possibly crash", debug_parse_cli_warn, NULL, NULL, NULL, ACCESS_EXPERT }, |
1800 | | {{ "debug", "dev", "write", NULL }, "debug dev write [size] : write that many bytes in return", debug_parse_cli_write, NULL, NULL, NULL, ACCESS_EXPERT }, |
1801 | | |
1802 | | #if defined(HA_HAVE_DUMP_LIBS) |
1803 | | {{ "show", "libs", NULL, NULL }, "show libs : show loaded object files and libraries", debug_parse_cli_show_libs, NULL, NULL }, |
1804 | | #endif |
1805 | | {{ "show", "threads", NULL, NULL }, "show threads : show some threads debugging information", NULL, cli_io_handler_show_threads, NULL }, |
1806 | | {{},} |
1807 | | }}; |
1808 | | |
1809 | | INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws); |