Coverage Report

Created: 2023-03-26 06:06

/src/haproxy/src/debug.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Process debugging functions.
3
 *
4
 * Copyright 2000-2019 Willy Tarreau <willy@haproxy.org>.
5
 *
6
 * This program is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU General Public License
8
 * as published by the Free Software Foundation; either version
9
 * 2 of the License, or (at your option) any later version.
10
 *
11
 */
12
13
14
#include <errno.h>
15
#include <fcntl.h>
16
#include <signal.h>
17
#include <time.h>
18
#include <stdio.h>
19
#include <stdlib.h>
20
#include <syslog.h>
21
#include <sys/stat.h>
22
#include <sys/types.h>
23
#include <sys/wait.h>
24
#include <unistd.h>
25
#ifdef USE_EPOLL
26
#include <sys/epoll.h>
27
#endif
28
29
#include <haproxy/api.h>
30
#include <haproxy/applet.h>
31
#include <haproxy/buf.h>
32
#include <haproxy/cli.h>
33
#include <haproxy/clock.h>
34
#include <haproxy/debug.h>
35
#include <haproxy/fd.h>
36
#include <haproxy/global.h>
37
#include <haproxy/hlua.h>
38
#include <haproxy/http_ana.h>
39
#include <haproxy/log.h>
40
#include <haproxy/net_helper.h>
41
#include <haproxy/sc_strm.h>
42
#include <haproxy/stconn.h>
43
#include <haproxy/task.h>
44
#include <haproxy/thread.h>
45
#include <haproxy/time.h>
46
#include <haproxy/tools.h>
47
#include <import/ist.h>
48
49
50
/* The dump state is made of:
51
 *   - num_thread on the lowest 15 bits
52
 *   - a SYNC flag on bit 15 (waiting for sync start)
53
 *   - number of participating threads on bits 16-30
54
 * Initiating a dump consists in setting it to SYNC and incrementing the
55
 * num_thread part when entering the function. The first thread periodically
56
 * recounts active threads and compares it to the ready ones, and clears SYNC
57
 * and sets the number of participants to the value found, which serves as a
58
 * start signal. A thread finished dumping looks up the TID of the next active
59
 * thread after it and writes it in the lowest part. If there's none, it sets
60
 * the thread counter to the number of participants and resets that part,
61
 * which serves as an end-of-dump signal. All threads decrement the num_thread
62
 * part. Then all threads wait for the value to reach zero. Only used when
63
 * USE_THREAD_DUMP is set.
64
 */
65
#define THREAD_DUMP_TMASK     0x00007FFFU
66
#define THREAD_DUMP_FSYNC     0x00008000U
67
#define THREAD_DUMP_PMASK     0x7FFF0000U
68
69
volatile unsigned int thread_dump_state = 0;
70
unsigned int panic_started = 0;
71
unsigned int debug_commands_issued = 0;
72
73
/* dumps a backtrace of the current thread that is appended to buffer <buf>.
74
 * Lines are prefixed with the string <prefix> which may be empty (used for
75
 * indenting). It is recommended to use this at a function's tail so that
76
 * the function does not appear in the call stack. The <dump> argument
77
 * indicates what dump state to start from, and should usually be zero. It
78
 * may be among the following values:
79
 *   - 0: search usual callers before step 1, or directly jump to 2
80
 *   - 1: skip usual callers before step 2
81
 *   - 2: dump until polling loop, scheduler, or main() (excluded)
82
 *   - 3: end
83
 *   - 4-7: like 0 but stops *after* main.
84
 */
85
void ha_dump_backtrace(struct buffer *buf, const char *prefix, int dump)
86
0
{
87
0
  struct buffer bak;
88
0
  char pfx2[100];
89
0
  void *callers[100];
90
0
  int j, nptrs;
91
0
  const void *addr;
92
93
0
  nptrs = my_backtrace(callers, sizeof(callers)/sizeof(*callers));
94
0
  if (!nptrs)
95
0
    return;
96
97
0
  if (snprintf(pfx2, sizeof(pfx2), "%s| ", prefix) > sizeof(pfx2))
98
0
    pfx2[0] = 0;
99
100
  /* The call backtrace_symbols_fd(callers, nptrs, STDOUT_FILENO would
101
   * produce similar output to the following:
102
   */
103
0
  chunk_appendf(buf, "%scall trace(%d):\n", prefix, nptrs);
104
0
  for (j = 0; (j < nptrs || (dump & 3) < 2); j++) {
105
0
    if (j == nptrs && !(dump & 3)) {
106
      /* we failed to spot the starting point of the
107
       * dump, let's start over dumping everything we
108
       * have.
109
       */
110
0
      dump += 2;
111
0
      j = 0;
112
0
    }
113
0
    bak = *buf;
114
0
    dump_addr_and_bytes(buf, pfx2, callers[j], 8);
115
0
    addr = resolve_sym_name(buf, ": ", callers[j]);
116
0
    if ((dump & 3) == 0) {
117
      /* dump not started, will start *after*
118
       * ha_thread_dump_all_to_trash, ha_panic and ha_backtrace_to_stderr
119
       */
120
0
      if (addr == ha_thread_dump_all_to_trash || addr == ha_panic ||
121
0
          addr == ha_backtrace_to_stderr)
122
0
        dump++;
123
0
      *buf = bak;
124
0
      continue;
125
0
    }
126
127
0
    if ((dump & 3) == 1) {
128
      /* starting */
129
0
      if (addr == ha_thread_dump_all_to_trash || addr == ha_panic ||
130
0
          addr == ha_backtrace_to_stderr) {
131
0
        *buf = bak;
132
0
        continue;
133
0
      }
134
0
      dump++;
135
0
    }
136
137
0
    if ((dump & 3) == 2) {
138
      /* still dumping */
139
0
      if (dump == 6) {
140
        /* we only stop *after* main and we must send the LF */
141
0
        if (addr == main) {
142
0
          j = nptrs;
143
0
          dump++;
144
0
        }
145
0
      }
146
0
      else if (addr == run_poll_loop || addr == main || addr == run_tasks_from_lists) {
147
0
        dump++;
148
0
        *buf = bak;
149
0
        break;
150
0
      }
151
0
    }
152
    /* OK, line dumped */
153
0
    chunk_appendf(buf, "\n");
154
0
  }
155
0
}
156
157
/* dump a backtrace of current thread's stack to stderr. */
158
void ha_backtrace_to_stderr(void)
159
0
{
160
0
  char area[2048];
161
0
  struct buffer b = b_make(area, sizeof(area), 0, 0);
162
163
0
  ha_dump_backtrace(&b, "  ", 4);
164
0
  if (b.data)
165
0
    DISGUISE(write(2, b.area, b.data));
166
0
}
167
168
/* Dumps to the buffer some known information for the desired thread, and
169
 * optionally extra info for the current thread. The dump will be appended to
170
 * the buffer, so the caller is responsible for preliminary initializing it.
171
 * The calling thread ID needs to be passed in <calling_tid> to display a star
172
 * in front of the calling thread's line (usually it's tid). Any stuck thread
173
 * is also prefixed with a '>'.
174
 * It must be called under thread isolation.
175
 */
176
void ha_thread_dump(struct buffer *buf, int thr, int calling_tid)
177
0
{
178
0
  unsigned long thr_bit = ha_thread_info[thr].ltid_bit;
179
0
  unsigned long long p = ha_thread_ctx[thr].prev_cpu_time;
180
0
  unsigned long long n = now_cpu_time_thread(thr);
181
0
  int stuck = !!(ha_thread_ctx[thr].flags & TH_FL_STUCK);
182
0
  int tgrp  = ha_thread_info[thr].tgid;
183
184
0
  chunk_appendf(buf,
185
0
                "%c%cThread %-2u: id=0x%llx act=%d glob=%d wq=%d rq=%d tl=%d tlsz=%d rqsz=%d\n"
186
0
                "     %2u/%-2u   stuck=%d prof=%d",
187
0
                (thr == calling_tid) ? '*' : ' ', stuck ? '>' : ' ', thr + 1,
188
0
          ha_get_pthread_id(thr),
189
0
          thread_has_tasks(),
190
0
                !eb_is_empty(&ha_thread_ctx[thr].rqueue_shared),
191
0
                !eb_is_empty(&ha_thread_ctx[thr].timers),
192
0
                !eb_is_empty(&ha_thread_ctx[thr].rqueue),
193
0
                !(LIST_ISEMPTY(&ha_thread_ctx[thr].tasklets[TL_URGENT]) &&
194
0
      LIST_ISEMPTY(&ha_thread_ctx[thr].tasklets[TL_NORMAL]) &&
195
0
      LIST_ISEMPTY(&ha_thread_ctx[thr].tasklets[TL_BULK]) &&
196
0
      MT_LIST_ISEMPTY(&ha_thread_ctx[thr].shared_tasklet_list)),
197
0
                ha_thread_ctx[thr].tasks_in_list,
198
0
                ha_thread_ctx[thr].rq_total,
199
0
          ha_thread_info[thr].tgid, ha_thread_info[thr].ltid + 1,
200
0
                stuck,
201
0
                !!(th_ctx->flags & TH_FL_TASK_PROFILING));
202
203
0
  chunk_appendf(buf,
204
0
                " harmless=%d wantrdv=%d",
205
0
                !!(_HA_ATOMIC_LOAD(&ha_tgroup_ctx[tgrp-1].threads_harmless) & thr_bit),
206
0
                !!(th_ctx->flags & TH_FL_TASK_PROFILING));
207
208
0
  chunk_appendf(buf, "\n");
209
0
  chunk_appendf(buf, "             cpu_ns: poll=%llu now=%llu diff=%llu\n", p, n, n-p);
210
211
  /* this is the end of what we can dump from outside the current thread */
212
213
0
  if (thr != tid)
214
0
    return;
215
216
0
  chunk_appendf(buf, "             curr_task=");
217
0
  ha_task_dump(buf, th_ctx->current, "             ");
218
219
0
  if (stuck) {
220
    /* We only emit the backtrace for stuck threads in order not to
221
     * waste precious output buffer space with non-interesting data.
222
     * Please leave this as the last instruction in this function
223
     * so that the compiler uses tail merging and the current
224
     * function does not appear in the stack.
225
     */
226
0
    ha_dump_backtrace(buf, "             ", 0);
227
0
  }
228
0
}
229
230
231
/* dumps into the buffer some information related to task <task> (which may
232
 * either be a task or a tasklet, and prepend each line except the first one
233
 * with <pfx>. The buffer is only appended and the first output starts by the
234
 * pointer itself. The caller is responsible for making sure the task is not
235
 * going to vanish during the dump.
236
 */
237
void ha_task_dump(struct buffer *buf, const struct task *task, const char *pfx)
238
0
{
239
0
  const struct stream *s = NULL;
240
0
  const struct appctx __maybe_unused *appctx = NULL;
241
0
  struct hlua __maybe_unused *hlua = NULL;
242
0
  const struct stconn *sc;
243
244
0
  if (!task) {
245
0
    chunk_appendf(buf, "0\n");
246
0
    return;
247
0
  }
248
249
0
  if (TASK_IS_TASKLET(task))
250
0
    chunk_appendf(buf,
251
0
                  "%p (tasklet) calls=%u\n",
252
0
                  task,
253
0
                  task->calls);
254
0
  else
255
0
    chunk_appendf(buf,
256
0
                  "%p (task) calls=%u last=%llu%s\n",
257
0
                  task,
258
0
                  task->calls,
259
0
                  task->wake_date ? (unsigned long long)(now_mono_time() - task->wake_date) : 0,
260
0
                  task->wake_date ? " ns ago" : "");
261
262
0
  chunk_appendf(buf, "%s  fct=%p(", pfx, task->process);
263
0
  resolve_sym_name(buf, NULL, task->process);
264
0
  chunk_appendf(buf,") ctx=%p", task->context);
265
266
0
  if (task->process == task_run_applet && (appctx = task->context))
267
0
    chunk_appendf(buf, "(%s)\n", appctx->applet->name);
268
0
  else
269
0
    chunk_appendf(buf, "\n");
270
271
0
  if (task->process == process_stream && task->context)
272
0
    s = (struct stream *)task->context;
273
0
  else if (task->process == task_run_applet && task->context && (sc = appctx_sc((struct appctx *)task->context)))
274
0
    s = sc_strm(sc);
275
0
  else if (task->process == sc_conn_io_cb && task->context)
276
0
    s = sc_strm(((struct stconn *)task->context));
277
278
0
  if (s)
279
0
    stream_dump(buf, s, pfx, '\n');
280
281
#ifdef USE_LUA
282
  hlua = NULL;
283
  if (s && (hlua = s->hlua)) {
284
    chunk_appendf(buf, "%sCurrent executing Lua from a stream analyser -- ", pfx);
285
  }
286
  else if (task->process == hlua_process_task && (hlua = task->context)) {
287
    chunk_appendf(buf, "%sCurrent executing a Lua task -- ", pfx);
288
  }
289
  else if (task->process == task_run_applet && (appctx = task->context) &&
290
     (appctx->applet->fct == hlua_applet_tcp_fct)) {
291
    chunk_appendf(buf, "%sCurrent executing a Lua TCP service -- ", pfx);
292
  }
293
  else if (task->process == task_run_applet && (appctx = task->context) &&
294
     (appctx->applet->fct == hlua_applet_http_fct)) {
295
    chunk_appendf(buf, "%sCurrent executing a Lua HTTP service -- ", pfx);
296
  }
297
298
  if (hlua && hlua->T) {
299
    chunk_appendf(buf, "stack traceback:\n    ");
300
    append_prefixed_str(buf, hlua_traceback(hlua->T, "\n    "), pfx, '\n', 0);
301
    b_putchr(buf, '\n');
302
  }
303
  else
304
    b_putchr(buf, '\n');
305
#endif
306
0
}
307
308
/* This function dumps all profiling settings. It returns 0 if the output
309
 * buffer is full and it needs to be called again, otherwise non-zero.
310
 */
311
static int cli_io_handler_show_threads(struct appctx *appctx)
312
0
{
313
0
  struct stconn *sc = appctx_sc(appctx);
314
0
  int thr;
315
316
0
  if (unlikely(sc_ic(sc)->flags & CF_SHUTW))
317
0
    return 1;
318
319
0
  if (appctx->st0)
320
0
    thr = appctx->st1;
321
0
  else
322
0
    thr = 0;
323
324
0
  chunk_reset(&trash);
325
0
  ha_thread_dump_all_to_trash();
326
327
0
  if (applet_putchk(appctx, &trash) == -1) {
328
    /* failed, try again */
329
0
    appctx->st1 = thr;
330
0
    return 0;
331
0
  }
332
0
  return 1;
333
0
}
334
335
#if defined(HA_HAVE_DUMP_LIBS)
336
/* parse a "show libs" command. It returns 1 if it emits anything otherwise zero. */
337
static int debug_parse_cli_show_libs(char **args, char *payload, struct appctx *appctx, void *private)
338
{
339
  if (!cli_has_level(appctx, ACCESS_LVL_OPER))
340
    return 1;
341
342
  chunk_reset(&trash);
343
  if (dump_libs(&trash, 1))
344
    return cli_msg(appctx, LOG_INFO, trash.area);
345
  else
346
    return 0;
347
}
348
#endif
349
350
/* dumps a state of all threads into the trash and on fd #2, then aborts. */
351
void ha_panic()
352
0
{
353
0
  if (HA_ATOMIC_FETCH_ADD(&panic_started, 1) != 0) {
354
    /* a panic dump is already in progress, let's not disturb it,
355
     * we'll be called via signal DEBUGSIG. By returning we may be
356
     * able to leave a current signal handler (e.g. WDT) so that
357
     * this will ensure more reliable signal delivery.
358
     */
359
0
    return;
360
0
  }
361
0
  chunk_reset(&trash);
362
0
  chunk_appendf(&trash, "Thread %u is about to kill the process.\n", tid + 1);
363
0
  ha_thread_dump_all_to_trash();
364
0
  DISGUISE(write(2, trash.area, trash.data));
365
0
  for (;;)
366
0
    abort();
367
0
}
368
369
/* Complain with message <msg> on stderr. If <counter> is not NULL, it is
370
 * atomically incremented, and the message is only printed when the counter
371
 * was zero, so that the message is only printed once. <taint> is only checked
372
 * on bit 1, and will taint the process either for a bug (2) or warn (0).
373
 */
374
void complain(int *counter, const char *msg, int taint)
375
0
{
376
0
  if (counter && _HA_ATOMIC_FETCH_ADD(counter, 1))
377
0
    return;
378
0
  DISGUISE(write(2, msg, strlen(msg)));
379
0
  if (taint & 2)
380
0
    mark_tainted(TAINTED_BUG);
381
0
  else
382
0
    mark_tainted(TAINTED_WARN);
383
0
}
384
385
/* parse a "debug dev exit" command. It always returns 1, though it should never return. */
386
static int debug_parse_cli_exit(char **args, char *payload, struct appctx *appctx, void *private)
387
0
{
388
0
  int code = atoi(args[3]);
389
390
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
391
0
    return 1;
392
393
0
  _HA_ATOMIC_INC(&debug_commands_issued);
394
0
  exit(code);
395
0
  return 1;
396
0
}
397
398
/* parse a "debug dev bug" command. It always returns 1, though it should never return.
399
 * Note: we make sure not to make the function static so that it appears in the trace.
400
 */
401
int debug_parse_cli_bug(char **args, char *payload, struct appctx *appctx, void *private)
402
0
{
403
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
404
0
    return 1;
405
406
0
  _HA_ATOMIC_INC(&debug_commands_issued);
407
0
  BUG_ON(one > zero);
408
0
  return 1;
409
0
}
410
411
/* parse a "debug dev warn" command. It always returns 1.
412
 * Note: we make sure not to make the function static so that it appears in the trace.
413
 */
414
int debug_parse_cli_warn(char **args, char *payload, struct appctx *appctx, void *private)
415
0
{
416
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
417
0
    return 1;
418
419
0
  _HA_ATOMIC_INC(&debug_commands_issued);
420
0
  WARN_ON(one > zero);
421
0
  return 1;
422
0
}
423
424
/* parse a "debug dev check" command. It always returns 1.
425
 * Note: we make sure not to make the function static so that it appears in the trace.
426
 */
427
int debug_parse_cli_check(char **args, char *payload, struct appctx *appctx, void *private)
428
0
{
429
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
430
0
    return 1;
431
432
0
  _HA_ATOMIC_INC(&debug_commands_issued);
433
0
  CHECK_IF(one > zero);
434
0
  return 1;
435
0
}
436
437
/* parse a "debug dev close" command. It always returns 1. */
438
static int debug_parse_cli_close(char **args, char *payload, struct appctx *appctx, void *private)
439
0
{
440
0
  int fd;
441
442
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
443
0
    return 1;
444
445
0
  if (!*args[3])
446
0
    return cli_err(appctx, "Missing file descriptor number.\n");
447
448
0
  fd = atoi(args[3]);
449
0
  if (fd < 0 || fd >= global.maxsock)
450
0
    return cli_err(appctx, "File descriptor out of range.\n");
451
452
0
  if (!fdtab[fd].owner)
453
0
    return cli_msg(appctx, LOG_INFO, "File descriptor was already closed.\n");
454
455
0
  _HA_ATOMIC_INC(&debug_commands_issued);
456
0
  fd_delete(fd);
457
0
  return 1;
458
0
}
459
460
/* this is meant to cause a deadlock when more than one task is running it or when run twice */
461
static struct task *debug_run_cli_deadlock(struct task *task, void *ctx, unsigned int state)
462
0
{
463
0
  static HA_SPINLOCK_T lock __maybe_unused;
464
465
0
  HA_SPIN_LOCK(OTHER_LOCK, &lock);
466
0
  return NULL;
467
0
}
468
469
/* parse a "debug dev deadlock" command. It always returns 1. */
470
static int debug_parse_cli_deadlock(char **args, char *payload, struct appctx *appctx, void *private)
471
0
{
472
0
  int tasks;
473
474
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
475
0
    return 1;
476
477
0
  _HA_ATOMIC_INC(&debug_commands_issued);
478
0
  for (tasks = atoi(args[3]); tasks > 0; tasks--) {
479
0
    struct task *t = task_new_on(tasks % global.nbthread);
480
0
    if (!t)
481
0
      continue;
482
0
    t->process = debug_run_cli_deadlock;
483
0
    t->context = NULL;
484
0
    task_wakeup(t, TASK_WOKEN_INIT);
485
0
  }
486
487
0
  return 1;
488
0
}
489
490
/* parse a "debug dev delay" command. It always returns 1. */
491
static int debug_parse_cli_delay(char **args, char *payload, struct appctx *appctx, void *private)
492
0
{
493
0
  int delay = atoi(args[3]);
494
495
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
496
0
    return 1;
497
498
0
  _HA_ATOMIC_INC(&debug_commands_issued);
499
0
  usleep((long)delay * 1000);
500
0
  return 1;
501
0
}
502
503
/* parse a "debug dev log" command. It always returns 1. */
504
static int debug_parse_cli_log(char **args, char *payload, struct appctx *appctx, void *private)
505
0
{
506
0
  int arg;
507
508
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
509
0
    return 1;
510
511
0
  _HA_ATOMIC_INC(&debug_commands_issued);
512
0
  chunk_reset(&trash);
513
0
  for (arg = 3; *args[arg]; arg++) {
514
0
    if (arg > 3)
515
0
      chunk_strcat(&trash, " ");
516
0
    chunk_strcat(&trash, args[arg]);
517
0
  }
518
519
0
  send_log(NULL, LOG_INFO, "%s\n", trash.area);
520
0
  return 1;
521
0
}
522
523
/* parse a "debug dev loop" command. It always returns 1. */
524
static int debug_parse_cli_loop(char **args, char *payload, struct appctx *appctx, void *private)
525
0
{
526
0
  struct timeval deadline, curr;
527
0
  int loop = atoi(args[3]);
528
529
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
530
0
    return 1;
531
532
0
  _HA_ATOMIC_INC(&debug_commands_issued);
533
0
  gettimeofday(&curr, NULL);
534
0
  tv_ms_add(&deadline, &curr, loop);
535
536
0
  while (tv_ms_cmp(&curr, &deadline) < 0)
537
0
    gettimeofday(&curr, NULL);
538
539
0
  return 1;
540
0
}
541
542
/* parse a "debug dev panic" command. It always returns 1, though it should never return. */
543
static int debug_parse_cli_panic(char **args, char *payload, struct appctx *appctx, void *private)
544
0
{
545
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
546
0
    return 1;
547
548
0
  _HA_ATOMIC_INC(&debug_commands_issued);
549
0
  ha_panic();
550
0
  return 1;
551
0
}
552
553
/* parse a "debug dev exec" command. It always returns 1. */
554
#if defined(DEBUG_DEV)
555
static int debug_parse_cli_exec(char **args, char *payload, struct appctx *appctx, void *private)
556
{
557
  int pipefd[2];
558
  int arg;
559
  int pid;
560
561
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
562
    return 1;
563
564
  _HA_ATOMIC_INC(&debug_commands_issued);
565
  chunk_reset(&trash);
566
  for (arg = 3; *args[arg]; arg++) {
567
    if (arg > 3)
568
      chunk_strcat(&trash, " ");
569
    chunk_strcat(&trash, args[arg]);
570
  }
571
572
  thread_isolate();
573
  if (pipe(pipefd) < 0)
574
    goto fail_pipe;
575
576
  if (fd_set_cloexec(pipefd[0]) == -1)
577
    goto fail_fcntl;
578
579
  if (fd_set_cloexec(pipefd[1]) == -1)
580
    goto fail_fcntl;
581
582
  pid = fork();
583
584
  if (pid < 0)
585
    goto fail_fork;
586
  else if (pid == 0) {
587
    /* child */
588
    char *cmd[4] = { "/bin/sh", "-c", 0, 0 };
589
590
    close(0);
591
    dup2(pipefd[1], 1);
592
    dup2(pipefd[1], 2);
593
594
    cmd[2] = trash.area;
595
    execvp(cmd[0], cmd);
596
    printf("execvp() failed\n");
597
    exit(1);
598
  }
599
600
  /* parent */
601
  thread_release();
602
  close(pipefd[1]);
603
  chunk_reset(&trash);
604
  while (1) {
605
    size_t ret = read(pipefd[0], trash.area + trash.data, trash.size - 20 - trash.data);
606
    if (ret <= 0)
607
      break;
608
    trash.data += ret;
609
    if (trash.data + 20 == trash.size) {
610
      chunk_strcat(&trash, "\n[[[TRUNCATED]]]\n");
611
      break;
612
    }
613
  }
614
  close(pipefd[0]);
615
  waitpid(pid, NULL, WNOHANG);
616
  trash.area[trash.data] = 0;
617
  return cli_msg(appctx, LOG_INFO, trash.area);
618
619
 fail_fork:
620
 fail_fcntl:
621
  close(pipefd[0]);
622
  close(pipefd[1]);
623
 fail_pipe:
624
  thread_release();
625
  return cli_err(appctx, "Failed to execute command.\n");
626
}
627
628
/* handles SIGRTMAX to inject random delays on the receiving thread in order
629
 * to try to increase the likelihood to reproduce inter-thread races. The
630
 * signal is periodically sent by a task initiated by "debug dev delay-inj".
631
 */
632
void debug_delay_inj_sighandler(int sig, siginfo_t *si, void *arg)
633
{
634
  volatile int i = statistical_prng_range(10000);
635
636
  while (i--)
637
    __ha_cpu_relax();
638
}
639
#endif
640
641
/* parse a "debug dev hex" command. It always returns 1. */
642
static int debug_parse_cli_hex(char **args, char *payload, struct appctx *appctx, void *private)
643
0
{
644
0
  unsigned long start, len;
645
646
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
647
0
    return 1;
648
649
0
  if (!*args[3])
650
0
    return cli_err(appctx, "Missing memory address to dump from.\n");
651
652
0
  start = strtoul(args[3], NULL, 0);
653
0
  if (!start)
654
0
    return cli_err(appctx, "Will not dump from NULL address.\n");
655
656
0
  _HA_ATOMIC_INC(&debug_commands_issued);
657
658
  /* by default, dump ~128 till next block of 16 */
659
0
  len = strtoul(args[4], NULL, 0);
660
0
  if (!len)
661
0
    len = ((start + 128) & -16) - start;
662
663
0
  chunk_reset(&trash);
664
0
  dump_hex(&trash, "  ", (const void *)start, len, 1);
665
0
  trash.area[trash.data] = 0;
666
0
  return cli_msg(appctx, LOG_INFO, trash.area);
667
0
}
668
669
/* parse a "debug dev sym <addr>" command. It always returns 1. */
670
static int debug_parse_cli_sym(char **args, char *payload, struct appctx *appctx, void *private)
671
0
{
672
0
  unsigned long addr;
673
674
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
675
0
    return 1;
676
677
0
  if (!*args[3])
678
0
    return cli_err(appctx, "Missing memory address to be resolved.\n");
679
680
0
  _HA_ATOMIC_INC(&debug_commands_issued);
681
682
0
  addr = strtoul(args[3], NULL, 0);
683
0
  chunk_printf(&trash, "%#lx resolves to ", addr);
684
0
  resolve_sym_name(&trash, NULL, (const void *)addr);
685
0
  chunk_appendf(&trash, "\n");
686
687
0
  return cli_msg(appctx, LOG_INFO, trash.area);
688
0
}
689
690
/* parse a "debug dev tkill" command. It always returns 1. */
691
static int debug_parse_cli_tkill(char **args, char *payload, struct appctx *appctx, void *private)
692
0
{
693
0
  int thr = 0;
694
0
  int sig = SIGABRT;
695
696
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
697
0
    return 1;
698
699
0
  if (*args[3])
700
0
    thr = atoi(args[3]);
701
702
0
  if (thr < 0 || thr > global.nbthread)
703
0
    return cli_err(appctx, "Thread number out of range (use 0 for current).\n");
704
705
0
  if (*args[4])
706
0
    sig = atoi(args[4]);
707
708
0
  _HA_ATOMIC_INC(&debug_commands_issued);
709
0
  if (thr)
710
0
    ha_tkill(thr - 1, sig);
711
0
  else
712
0
    raise(sig);
713
0
  return 1;
714
0
}
715
716
/* hashes 'word' in "debug dev hash 'word' ". */
717
static int debug_parse_cli_hash(char **args, char *payload, struct appctx *appctx, void *private)
718
0
{
719
0
  char *msg = NULL;
720
721
0
  cli_dynmsg(appctx, LOG_INFO, memprintf(&msg, "%s\n", HA_ANON_CLI(args[3])));
722
0
  return 1;
723
0
}
724
725
/* parse a "debug dev write" command. It always returns 1. */
726
static int debug_parse_cli_write(char **args, char *payload, struct appctx *appctx, void *private)
727
0
{
728
0
  unsigned long len;
729
730
0
  if (!*args[3])
731
0
    return cli_err(appctx, "Missing output size.\n");
732
733
0
  len = strtoul(args[3], NULL, 0);
734
0
  if (len >= trash.size)
735
0
    return cli_err(appctx, "Output too large, must be <tune.bufsize.\n");
736
737
0
  _HA_ATOMIC_INC(&debug_commands_issued);
738
739
0
  chunk_reset(&trash);
740
0
  trash.data = len;
741
0
  memset(trash.area, '.', trash.data);
742
0
  trash.area[trash.data] = 0;
743
0
  for (len = 64; len < trash.data; len += 64)
744
0
    trash.area[len] = '\n';
745
0
  return cli_msg(appctx, LOG_INFO, trash.area);
746
0
}
747
748
/* parse a "debug dev stream" command */
749
/*
750
 *  debug dev stream [strm=<ptr>] [strm.f[{+-=}<flags>]] [txn.f[{+-=}<flags>]] \
751
 *                   [req.f[{+-=}<flags>]] [res.f[{+-=}<flags>]]               \
752
 *                   [sif.f[{+-=<flags>]] [sib.f[{+-=<flags>]]                 \
753
 *                   [sif.s[=<state>]] [sib.s[=<state>]]
754
 */
755
static int debug_parse_cli_stream(char **args, char *payload, struct appctx *appctx, void *private)
756
0
{
757
0
  struct stream *s = appctx_strm(appctx);
758
0
  int arg;
759
0
  void *ptr;
760
0
  int size;
761
0
  const char *word, *end;
762
0
  struct ist name;
763
0
  char *msg = NULL;
764
0
  char *endarg;
765
0
  unsigned long long old, new;
766
767
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
768
0
    return 1;
769
770
0
  ptr = NULL; size = 0;
771
772
0
  if (!*args[3]) {
773
0
    return cli_err(appctx,
774
0
             "Usage: debug dev stream { <obj> <op> <value> | wake }*\n"
775
0
             "     <obj>   = {strm | strm.f | strm.x |\n"
776
0
             "                scf.s | scb.s |\n"
777
0
             "                txn.f | req.f | res.f}\n"
778
0
             "     <op>    = {'' (show) | '=' (assign) | '^' (xor) | '+' (or) | '-' (andnot)}\n"
779
0
             "     <value> = 'now' | 64-bit dec/hex integer (0x prefix supported)\n"
780
0
             "     'wake' wakes the stream asssigned to 'strm' (default: current)\n"
781
0
             );
782
0
  }
783
784
0
  _HA_ATOMIC_INC(&debug_commands_issued);
785
0
  for (arg = 3; *args[arg]; arg++) {
786
0
    old = 0;
787
0
    end = word = args[arg];
788
0
    while (*end && *end != '=' && *end != '^' && *end != '+' && *end != '-')
789
0
      end++;
790
0
    name = ist2(word, end - word);
791
0
    if (isteq(name, ist("strm"))) {
792
0
      ptr = (!s || !may_access(s)) ? NULL : &s; size = sizeof(s);
793
0
    } else if (isteq(name, ist("strm.f"))) {
794
0
      ptr = (!s || !may_access(s)) ? NULL : &s->flags; size = sizeof(s->flags);
795
0
    } else if (isteq(name, ist("strm.x"))) {
796
0
      ptr = (!s || !may_access(s)) ? NULL : &s->conn_exp; size = sizeof(s->conn_exp);
797
0
    } else if (isteq(name, ist("txn.f"))) {
798
0
      ptr = (!s || !may_access(s)) ? NULL : &s->txn->flags; size = sizeof(s->txn->flags);
799
0
    } else if (isteq(name, ist("req.f"))) {
800
0
      ptr = (!s || !may_access(s)) ? NULL : &s->req.flags; size = sizeof(s->req.flags);
801
0
    } else if (isteq(name, ist("res.f"))) {
802
0
      ptr = (!s || !may_access(s)) ? NULL : &s->res.flags; size = sizeof(s->res.flags);
803
0
    } else if (isteq(name, ist("scf.s"))) {
804
0
      ptr = (!s || !may_access(s)) ? NULL : &s->scf->state; size = sizeof(s->scf->state);
805
0
    } else if (isteq(name, ist("scb.s"))) {
806
0
      ptr = (!s || !may_access(s)) ? NULL : &s->scf->state; size = sizeof(s->scb->state);
807
0
    } else if (isteq(name, ist("wake"))) {
808
0
      if (s && may_access(s) && may_access((void *)s + sizeof(*s) - 1))
809
0
        task_wakeup(s->task, TASK_WOKEN_TIMER|TASK_WOKEN_IO|TASK_WOKEN_MSG);
810
0
      continue;
811
0
    } else
812
0
      return cli_dynerr(appctx, memprintf(&msg, "Unsupported field name: '%s'.\n", word));
813
814
    /* read previous value */
815
0
    if ((s || ptr == &s) && ptr && may_access(ptr) && may_access(ptr + size - 1)) {
816
0
      if (size == 8)
817
0
        old = read_u64(ptr);
818
0
      else if (size == 4)
819
0
        old = read_u32(ptr);
820
0
      else if (size == 2)
821
0
        old = read_u16(ptr);
822
0
      else
823
0
        old = *(const uint8_t *)ptr;
824
0
    } else {
825
0
      memprintf(&msg,
826
0
          "%sSkipping inaccessible pointer %p for field '%.*s'.\n",
827
0
          msg ? msg : "", ptr, (int)(end - word), word);
828
0
      continue;
829
0
    }
830
831
    /* parse the new value . */
832
0
    new = strtoll(end + 1, &endarg, 0);
833
0
    if (end[1] && *endarg) {
834
0
      if (strcmp(end + 1, "now") == 0)
835
0
        new = now_ms;
836
0
      else {
837
0
        memprintf(&msg,
838
0
            "%sIgnoring unparsable value '%s' for field '%.*s'.\n",
839
0
            msg ? msg : "", end + 1, (int)(end - word), word);
840
0
        continue;
841
0
      }
842
0
    }
843
844
0
    switch (*end) {
845
0
    case '\0': /* show */
846
0
      memprintf(&msg, "%s%.*s=%#llx ", msg ? msg : "", (int)(end - word), word, old);
847
0
      new = old; // do not change the value
848
0
      break;
849
850
0
    case '=': /* set */
851
0
      break;
852
853
0
    case '^': /* XOR */
854
0
      new = old ^ new;
855
0
      break;
856
857
0
    case '+': /* OR */
858
0
      new = old | new;
859
0
      break;
860
861
0
    case '-': /* AND NOT */
862
0
      new = old & ~new;
863
0
      break;
864
865
0
    default:
866
0
      break;
867
0
    }
868
869
    /* write the new value */
870
0
    if (new != old) {
871
0
      if (size == 8)
872
0
        write_u64(ptr, new);
873
0
      else if (size == 4)
874
0
        write_u32(ptr, new);
875
0
      else if (size == 2)
876
0
        write_u16(ptr, new);
877
0
      else
878
0
        *(uint8_t *)ptr = new;
879
0
    }
880
0
  }
881
882
0
  if (msg && *msg)
883
0
    return cli_dynmsg(appctx, LOG_INFO, msg);
884
0
  return 1;
885
0
}
886
887
#if defined(DEBUG_DEV)
888
static struct task *debug_delay_inj_task(struct task *t, void *ctx, unsigned int state)
889
{
890
  unsigned long *tctx = ctx; // [0] = interval, [1] = nbwakeups
891
  unsigned long inter = tctx[0];
892
  unsigned long count = tctx[1];
893
  unsigned long rnd;
894
895
  if (inter)
896
    t->expire = tick_add(now_ms, inter);
897
  else
898
    task_wakeup(t, TASK_WOKEN_MSG);
899
900
  /* wake a random thread */
901
  while (count--) {
902
    rnd = statistical_prng_range(global.nbthread);
903
    ha_tkill(rnd, SIGRTMAX);
904
  }
905
  return t;
906
}
907
908
/* parse a "debug dev delay-inj" command
909
 * debug dev delay-inj <inter> <count>
910
 */
911
static int debug_parse_delay_inj(char **args, char *payload, struct appctx *appctx, void *private)
912
{
913
  unsigned long *tctx; // [0] = inter, [2] = count
914
  struct task *task;
915
916
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
917
    return 1;
918
919
  if (!*args[4])
920
    return cli_err(appctx,  "Usage: debug dev delay-inj <inter_ms> <count>*\n");
921
922
  _HA_ATOMIC_INC(&debug_commands_issued);
923
924
  tctx = calloc(sizeof(*tctx), 2);
925
  if (!tctx)
926
    goto fail;
927
928
  tctx[0] = atoi(args[3]);
929
  tctx[1] = atoi(args[4]);
930
931
  task = task_new_here/*anywhere*/();
932
  if (!task)
933
    goto fail;
934
935
  task->process = debug_delay_inj_task;
936
  task->context = tctx;
937
  task_wakeup(task, TASK_WOKEN_INIT);
938
  return 1;
939
940
 fail:
941
  free(tctx);
942
  return cli_err(appctx, "Not enough memory");
943
}
944
#endif // DEBUG_DEV
945
946
static struct task *debug_task_handler(struct task *t, void *ctx, unsigned int state)
947
0
{
948
0
  unsigned long *tctx = ctx; // [0] = #tasks, [1] = inter, [2+] = { tl | (tsk+1) }
949
0
  unsigned long inter = tctx[1];
950
0
  unsigned long rnd;
951
952
0
  t->expire = tick_add(now_ms, inter);
953
954
  /* half of the calls will wake up another entry */
955
0
  rnd = statistical_prng();
956
0
  if (rnd & 1) {
957
0
    rnd >>= 1;
958
0
    rnd %= tctx[0];
959
0
    rnd = tctx[rnd + 2];
960
961
0
    if (rnd & 1)
962
0
      task_wakeup((struct task *)(rnd - 1), TASK_WOKEN_MSG);
963
0
    else
964
0
      tasklet_wakeup((struct tasklet *)rnd);
965
0
  }
966
0
  return t;
967
0
}
968
969
static struct task *debug_tasklet_handler(struct task *t, void *ctx, unsigned int state)
970
0
{
971
0
  unsigned long *tctx = ctx; // [0] = #tasks, [1] = inter, [2+] = { tl | (tsk+1) }
972
0
  unsigned long rnd;
973
0
  int i;
974
975
  /* wake up two random entries */
976
0
  for (i = 0; i < 2; i++) {
977
0
    rnd = statistical_prng() % tctx[0];
978
0
    rnd = tctx[rnd + 2];
979
980
0
    if (rnd & 1)
981
0
      task_wakeup((struct task *)(rnd - 1), TASK_WOKEN_MSG);
982
0
    else
983
0
      tasklet_wakeup((struct tasklet *)rnd);
984
0
  }
985
0
  return t;
986
0
}
987
988
/* parse a "debug dev sched" command
989
 * debug dev sched {task|tasklet} [count=<count>] [mask=<mask>] [single=<single>] [inter=<inter>]
990
 */
991
static int debug_parse_cli_sched(char **args, char *payload, struct appctx *appctx, void *private)
992
0
{
993
0
  int arg;
994
0
  void *ptr;
995
0
  int size;
996
0
  const char *word, *end;
997
0
  struct ist name;
998
0
  char *msg = NULL;
999
0
  char *endarg;
1000
0
  unsigned long long new;
1001
0
  unsigned long count = 0;
1002
0
  unsigned long thrid = tid;
1003
0
  unsigned int inter = 0;
1004
0
  unsigned long i;
1005
0
  int mode = 0; // 0 = tasklet; 1 = task
1006
0
  unsigned long *tctx; // [0] = #tasks, [1] = inter, [2+] = { tl | (tsk+1) }
1007
1008
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
1009
0
    return 1;
1010
1011
0
  ptr = NULL; size = 0;
1012
1013
0
  if (strcmp(args[3], "task") != 0 && strcmp(args[3], "tasklet") != 0) {
1014
0
    return cli_err(appctx,
1015
0
             "Usage: debug dev sched {task|tasklet} { <obj> = <value> }*\n"
1016
0
             "     <obj>   = {count | tid | inter }\n"
1017
0
             "     <value> = 64-bit dec/hex integer (0x prefix supported)\n"
1018
0
             );
1019
0
  }
1020
1021
0
  mode = strcmp(args[3], "task") == 0;
1022
1023
0
  _HA_ATOMIC_INC(&debug_commands_issued);
1024
0
  for (arg = 4; *args[arg]; arg++) {
1025
0
    end = word = args[arg];
1026
0
    while (*end && *end != '=' && *end != '^' && *end != '+' && *end != '-')
1027
0
      end++;
1028
0
    name = ist2(word, end - word);
1029
0
    if (isteq(name, ist("count"))) {
1030
0
      ptr = &count; size = sizeof(count);
1031
0
    } else if (isteq(name, ist("tid"))) {
1032
0
      ptr = &thrid; size = sizeof(thrid);
1033
0
    } else if (isteq(name, ist("inter"))) {
1034
0
      ptr = &inter; size = sizeof(inter);
1035
0
    } else
1036
0
      return cli_dynerr(appctx, memprintf(&msg, "Unsupported setting: '%s'.\n", word));
1037
1038
    /* parse the new value . */
1039
0
    new = strtoll(end + 1, &endarg, 0);
1040
0
    if (end[1] && *endarg) {
1041
0
      memprintf(&msg,
1042
0
                "%sIgnoring unparsable value '%s' for field '%.*s'.\n",
1043
0
                msg ? msg : "", end + 1, (int)(end - word), word);
1044
0
      continue;
1045
0
    }
1046
1047
    /* write the new value */
1048
0
    if (size == 8)
1049
0
      write_u64(ptr, new);
1050
0
    else if (size == 4)
1051
0
      write_u32(ptr, new);
1052
0
    else if (size == 2)
1053
0
      write_u16(ptr, new);
1054
0
    else
1055
0
      *(uint8_t *)ptr = new;
1056
0
  }
1057
1058
0
  tctx = calloc(sizeof(*tctx), count + 2);
1059
0
  if (!tctx)
1060
0
    goto fail;
1061
1062
0
  tctx[0] = (unsigned long)count;
1063
0
  tctx[1] = (unsigned long)inter;
1064
1065
0
  if (thrid >= global.nbthread)
1066
0
    thrid = tid;
1067
1068
0
  for (i = 0; i < count; i++) {
1069
    /* now, if poly or mask was set, tmask corresponds to the
1070
     * valid thread mask to use, otherwise it remains zero.
1071
     */
1072
    //printf("%lu: mode=%d mask=%#lx\n", i, mode, tmask);
1073
0
    if (mode == 0) {
1074
0
      struct tasklet *tl = tasklet_new();
1075
1076
0
      if (!tl)
1077
0
        goto fail;
1078
1079
0
      tl->tid = thrid;
1080
0
      tl->process = debug_tasklet_handler;
1081
0
      tl->context = tctx;
1082
0
      tctx[i + 2] = (unsigned long)tl;
1083
0
    } else {
1084
0
      struct task *task = task_new_on(thrid);
1085
1086
0
      if (!task)
1087
0
        goto fail;
1088
1089
0
      task->process = debug_task_handler;
1090
0
      task->context = tctx;
1091
0
      tctx[i + 2] = (unsigned long)task + 1;
1092
0
    }
1093
0
  }
1094
1095
  /* start the tasks and tasklets */
1096
0
  for (i = 0; i < count; i++) {
1097
0
    unsigned long ctx = tctx[i + 2];
1098
1099
0
    if (ctx & 1)
1100
0
      task_wakeup((struct task *)(ctx - 1), TASK_WOKEN_INIT);
1101
0
    else
1102
0
      tasklet_wakeup((struct tasklet *)ctx);
1103
0
  }
1104
1105
0
  if (msg && *msg)
1106
0
    return cli_dynmsg(appctx, LOG_INFO, msg);
1107
0
  return 1;
1108
1109
0
 fail:
1110
  /* free partially allocated entries */
1111
0
  for (i = 0; tctx && i < count; i++) {
1112
0
    unsigned long ctx = tctx[i + 2];
1113
1114
0
    if (!ctx)
1115
0
      break;
1116
1117
0
    if (ctx & 1)
1118
0
      task_destroy((struct task *)(ctx - 1));
1119
0
    else
1120
0
      tasklet_free((struct tasklet *)ctx);
1121
0
  }
1122
1123
0
  free(tctx);
1124
0
  return cli_err(appctx, "Not enough memory");
1125
0
}
1126
1127
/* CLI state for "debug dev fd" */
1128
struct dev_fd_ctx {
1129
  int start_fd;
1130
};
1131
1132
/* CLI parser for the "debug dev fd" command. The current FD to restart from is
1133
 * stored in a struct dev_fd_ctx pointed to by svcctx.
1134
 */
1135
static int debug_parse_cli_fd(char **args, char *payload, struct appctx *appctx, void *private)
1136
0
{
1137
0
  struct dev_fd_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
1138
1139
0
  if (!cli_has_level(appctx, ACCESS_LVL_OPER))
1140
0
    return 1;
1141
1142
  /* start at fd #0 */
1143
0
  ctx->start_fd = 0;
1144
0
  return 0;
1145
0
}
1146
1147
/* CLI I/O handler for the "debug dev fd" command. Dumps all FDs that are
1148
 * accessible from the process but not known from fdtab. The FD number to
1149
 * restart from is stored in a struct dev_fd_ctx pointed to by svcctx.
1150
 */
1151
static int debug_iohandler_fd(struct appctx *appctx)
1152
0
{
1153
0
  struct dev_fd_ctx *ctx = appctx->svcctx;
1154
0
  struct stconn *sc = appctx_sc(appctx);
1155
0
  struct sockaddr_storage sa;
1156
0
  struct stat statbuf;
1157
0
  socklen_t salen, vlen;
1158
0
  int ret1, ret2, port;
1159
0
  char *addrstr;
1160
0
  int ret = 1;
1161
0
  int i, fd;
1162
1163
0
  if (unlikely(sc_ic(sc)->flags & CF_SHUTW))
1164
0
    goto end;
1165
1166
0
  chunk_reset(&trash);
1167
1168
0
  thread_isolate();
1169
1170
  /* we have two inner loops here, one for the proxy, the other one for
1171
   * the buffer.
1172
   */
1173
0
  for (fd = ctx->start_fd; fd < global.maxsock; fd++) {
1174
    /* check for FD's existence */
1175
0
    ret1 = fcntl(fd, F_GETFD, 0);
1176
0
    if (ret1 == -1)
1177
0
      continue; // not known to the process
1178
0
    if (fdtab[fd].owner)
1179
0
      continue; // well-known
1180
1181
    /* OK we're seeing an orphan let's try to retrieve as much
1182
     * information as possible about it.
1183
     */
1184
0
    chunk_printf(&trash, "%5d", fd);
1185
1186
0
    if (fstat(fd, &statbuf) != -1) {
1187
0
      chunk_appendf(&trash, " type=%s mod=%04o dev=%#llx siz=%#llx uid=%lld gid=%lld fs=%#llx ino=%#llx",
1188
0
              isatty(fd)                ? "tty.":
1189
0
              S_ISREG(statbuf.st_mode)  ? "file":
1190
0
              S_ISDIR(statbuf.st_mode)  ? "dir.":
1191
0
              S_ISCHR(statbuf.st_mode)  ? "chr.":
1192
0
              S_ISBLK(statbuf.st_mode)  ? "blk.":
1193
0
              S_ISFIFO(statbuf.st_mode) ? "pipe":
1194
0
              S_ISLNK(statbuf.st_mode)  ? "link":
1195
0
              S_ISSOCK(statbuf.st_mode) ? "sock":
1196
#ifdef USE_EPOLL
1197
              epoll_wait(fd, NULL, 0, 0) != -1 || errno != EBADF ? "epol":
1198
#endif
1199
0
              "????",
1200
0
              (uint)statbuf.st_mode & 07777,
1201
1202
0
              (ullong)statbuf.st_rdev,
1203
0
              (ullong)statbuf.st_size,
1204
0
              (ullong)statbuf.st_uid,
1205
0
              (ullong)statbuf.st_gid,
1206
1207
0
              (ullong)statbuf.st_dev,
1208
0
              (ullong)statbuf.st_ino);
1209
0
    }
1210
1211
0
    chunk_appendf(&trash, " getfd=%s+%#x",
1212
0
           (ret1 & FD_CLOEXEC) ? "cloex" : "",
1213
0
           ret1 &~ FD_CLOEXEC);
1214
1215
    /* FD options */
1216
0
    ret2 = fcntl(fd, F_GETFL, 0);
1217
0
    if (ret2) {
1218
0
      chunk_appendf(&trash, " getfl=%s",
1219
0
              (ret1 & 3) >= 2 ? "O_RDWR" :
1220
0
              (ret1 & 1) ? "O_WRONLY" : "O_RDONLY");
1221
1222
0
      for (i = 2; i < 32; i++) {
1223
0
        if (!(ret2 & (1UL << i)))
1224
0
          continue;
1225
0
        switch (1UL << i) {
1226
0
        case O_CREAT:   chunk_appendf(&trash, ",O_CREAT");   break;
1227
0
        case O_EXCL:    chunk_appendf(&trash, ",O_EXCL");    break;
1228
0
        case O_NOCTTY:  chunk_appendf(&trash, ",O_NOCTTY");  break;
1229
0
        case O_TRUNC:   chunk_appendf(&trash, ",O_TRUNC");   break;
1230
0
        case O_APPEND:  chunk_appendf(&trash, ",O_APPEND");  break;
1231
0
#ifdef O_ASYNC
1232
0
        case O_ASYNC:   chunk_appendf(&trash, ",O_ASYNC");   break;
1233
0
#endif
1234
#ifdef O_DIRECT
1235
        case O_DIRECT:  chunk_appendf(&trash, ",O_DIRECT");  break;
1236
#endif
1237
#ifdef O_NOATIME
1238
        case O_NOATIME: chunk_appendf(&trash, ",O_NOATIME"); break;
1239
#endif
1240
0
        }
1241
0
      }
1242
0
    }
1243
1244
0
    vlen = sizeof(ret2);
1245
0
    ret1 = getsockopt(fd, SOL_SOCKET, SO_TYPE, &ret2, &vlen);
1246
0
    if (ret1 != -1)
1247
0
      chunk_appendf(&trash, " so_type=%d", ret2);
1248
1249
0
    vlen = sizeof(ret2);
1250
0
    ret1 = getsockopt(fd, SOL_SOCKET, SO_ACCEPTCONN, &ret2, &vlen);
1251
0
    if (ret1 != -1)
1252
0
      chunk_appendf(&trash, " so_accept=%d", ret2);
1253
1254
0
    vlen = sizeof(ret2);
1255
0
    ret1 = getsockopt(fd, SOL_SOCKET, SO_ERROR, &ret2, &vlen);
1256
0
    if (ret1 != -1)
1257
0
      chunk_appendf(&trash, " so_error=%d", ret2);
1258
1259
0
    salen = sizeof(sa);
1260
0
    if (getsockname(fd, (struct sockaddr *)&sa, &salen) != -1) {
1261
0
      if (sa.ss_family == AF_INET)
1262
0
        port = ntohs(((const struct sockaddr_in *)&sa)->sin_port);
1263
0
      else if (sa.ss_family == AF_INET6)
1264
0
        port = ntohs(((const struct sockaddr_in6 *)&sa)->sin6_port);
1265
0
      else
1266
0
        port = 0;
1267
0
      addrstr = sa2str(&sa, port, 0);
1268
0
      chunk_appendf(&trash, " laddr=%s", addrstr);
1269
0
      free(addrstr);
1270
0
    }
1271
1272
0
    salen = sizeof(sa);
1273
0
    if (getpeername(fd, (struct sockaddr *)&sa, &salen) != -1) {
1274
0
      if (sa.ss_family == AF_INET)
1275
0
        port = ntohs(((const struct sockaddr_in *)&sa)->sin_port);
1276
0
      else if (sa.ss_family == AF_INET6)
1277
0
        port = ntohs(((const struct sockaddr_in6 *)&sa)->sin6_port);
1278
0
      else
1279
0
        port = 0;
1280
0
      addrstr = sa2str(&sa, port, 0);
1281
0
      chunk_appendf(&trash, " raddr=%s", addrstr);
1282
0
      free(addrstr);
1283
0
    }
1284
1285
0
    chunk_appendf(&trash, "\n");
1286
1287
0
    if (applet_putchk(appctx, &trash) == -1) {
1288
0
      ctx->start_fd = fd;
1289
0
      ret = 0;
1290
0
      break;
1291
0
    }
1292
0
  }
1293
1294
0
  thread_release();
1295
0
 end:
1296
0
  return ret;
1297
0
}
1298
1299
#if defined(DEBUG_MEM_STATS)
1300
1301
/* CLI state for "debug dev memstats" */
1302
struct dev_mem_ctx {
1303
  struct mem_stats *start, *stop; /* begin/end of dump */
1304
  char *match;                    /* non-null if a name prefix is specified */
1305
  int show_all;                   /* show all entries if non-null */
1306
  int width;                      /* 1st column width */
1307
  long tot_size;                  /* sum of alloc-free */
1308
  ulong tot_calls;                /* sum of calls */
1309
};
1310
1311
/* CLI parser for the "debug dev memstats" command. Sets a dev_mem_ctx shown above. */
1312
static int debug_parse_cli_memstats(char **args, char *payload, struct appctx *appctx, void *private)
1313
{
1314
  struct dev_mem_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
1315
  int arg;
1316
1317
  extern __attribute__((__weak__)) struct mem_stats __start_mem_stats;
1318
  extern __attribute__((__weak__)) struct mem_stats __stop_mem_stats;
1319
1320
  if (!cli_has_level(appctx, ACCESS_LVL_OPER))
1321
    return 1;
1322
1323
  for (arg = 3; *args[arg]; arg++) {
1324
    if (strcmp(args[arg], "reset") == 0) {
1325
      struct mem_stats *ptr;
1326
1327
      if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
1328
        return 1;
1329
1330
      for (ptr = &__start_mem_stats; ptr < &__stop_mem_stats; ptr++) {
1331
        _HA_ATOMIC_STORE(&ptr->calls, 0);
1332
        _HA_ATOMIC_STORE(&ptr->size, 0);
1333
      }
1334
      return 1;
1335
    }
1336
    else if (strcmp(args[arg], "all") == 0) {
1337
      ctx->show_all = 1;
1338
      continue;
1339
    }
1340
    else if (strcmp(args[arg], "match") == 0 && *args[arg + 1]) {
1341
      ha_free(&ctx->match);
1342
      ctx->match = strdup(args[arg + 1]);
1343
      arg++;
1344
      continue;
1345
    }
1346
    else
1347
      return cli_err(appctx, "Expects either 'reset', 'all', or 'match <pfx>'.\n");
1348
  }
1349
1350
  /* otherwise proceed with the dump from p0 to p1 */
1351
  ctx->start = &__start_mem_stats;
1352
  ctx->stop  = &__stop_mem_stats;
1353
  ctx->width = 0;
1354
  return 0;
1355
}
1356
1357
/* CLI I/O handler for the "debug dev memstats" command using a dev_mem_ctx
1358
 * found in appctx->svcctx. Dumps all mem_stats structs referenced by pointers
1359
 * located between ->start and ->stop. Dumps all entries if ->show_all != 0,
1360
 * otherwise only non-zero calls.
1361
 */
1362
static int debug_iohandler_memstats(struct appctx *appctx)
1363
{
1364
  struct dev_mem_ctx *ctx = appctx->svcctx;
1365
  struct stconn *sc = appctx_sc(appctx);
1366
  struct mem_stats *ptr;
1367
  const char *pfx = ctx->match;
1368
  int ret = 1;
1369
1370
  if (unlikely(sc_ic(sc)->flags & CF_SHUTW))
1371
    goto end;
1372
1373
  if (!ctx->width) {
1374
    /* we don't know the first column's width, let's compute it
1375
     * now based on a first pass on printable entries and their
1376
     * expected width (approximated).
1377
     */
1378
    for (ptr = ctx->start; ptr != ctx->stop; ptr++) {
1379
      const char *p, *name;
1380
      int w = 0;
1381
      char tmp;
1382
1383
      if (!ptr->size && !ptr->calls && !ctx->show_all)
1384
        continue;
1385
1386
      for (p = name = ptr->caller.file; *p; p++) {
1387
        if (*p == '/')
1388
          name = p + 1;
1389
      }
1390
1391
      if (ctx->show_all)
1392
        w = snprintf(&tmp, 0, "%s(%s:%d) ", ptr->caller.func, name, ptr->caller.line);
1393
      else
1394
        w = snprintf(&tmp, 0, "%s:%d ", name, ptr->caller.line);
1395
1396
      if (w > ctx->width)
1397
        ctx->width = w;
1398
    }
1399
  }
1400
1401
  /* we have two inner loops here, one for the proxy, the other one for
1402
   * the buffer.
1403
   */
1404
  for (ptr = ctx->start; ptr != ctx->stop; ptr++) {
1405
    const char *type;
1406
    const char *name;
1407
    const char *p;
1408
    const char *info = NULL;
1409
    const char *func = NULL;
1410
    int direction = 0; // neither alloc nor free (e.g. realloc)
1411
1412
    if (!ptr->size && !ptr->calls && !ctx->show_all)
1413
      continue;
1414
1415
    /* basename only */
1416
    for (p = name = ptr->caller.file; *p; p++) {
1417
      if (*p == '/')
1418
        name = p + 1;
1419
    }
1420
1421
    func = ptr->caller.func;
1422
1423
    switch (ptr->caller.what) {
1424
    case MEM_STATS_TYPE_CALLOC:  type = "CALLOC";  direction =  1; break;
1425
    case MEM_STATS_TYPE_FREE:    type = "FREE";    direction = -1; break;
1426
    case MEM_STATS_TYPE_MALLOC:  type = "MALLOC";  direction =  1; break;
1427
    case MEM_STATS_TYPE_REALLOC: type = "REALLOC"; break;
1428
    case MEM_STATS_TYPE_STRDUP:  type = "STRDUP";  direction =  1; break;
1429
    case MEM_STATS_TYPE_P_ALLOC: type = "P_ALLOC"; direction =  1; if (ptr->extra) info = ((const struct pool_head *)ptr->extra)->name; break;
1430
    case MEM_STATS_TYPE_P_FREE:  type = "P_FREE";  direction = -1; if (ptr->extra) info = ((const struct pool_head *)ptr->extra)->name; break;
1431
    default:                     type = "UNSET";   break;
1432
    }
1433
1434
    //chunk_printf(&trash,
1435
    //       "%20s:%-5d %7s size: %12lu calls: %9lu size/call: %6lu\n",
1436
    //       name, ptr->line, type,
1437
    //       (unsigned long)ptr->size, (unsigned long)ptr->calls,
1438
    //       (unsigned long)(ptr->calls ? (ptr->size / ptr->calls) : 0));
1439
1440
    /* only match requested prefixes */
1441
    if (pfx && (!info || strncmp(info, pfx, strlen(pfx)) != 0))
1442
      continue;
1443
1444
    chunk_reset(&trash);
1445
    if (ctx->show_all)
1446
      chunk_appendf(&trash, "%s(", func);
1447
1448
    chunk_appendf(&trash, "%s:%d", name, ptr->caller.line);
1449
1450
    if (ctx->show_all)
1451
      chunk_appendf(&trash, ")");
1452
1453
    while (trash.data < ctx->width)
1454
      trash.area[trash.data++] = ' ';
1455
1456
    chunk_appendf(&trash, "%7s  size: %12lu  calls: %9lu  size/call: %6lu %s\n",
1457
           type,
1458
           (unsigned long)ptr->size, (unsigned long)ptr->calls,
1459
                 (unsigned long)(ptr->calls ? (ptr->size / ptr->calls) : 0),
1460
           info ? info : "");
1461
1462
    if (applet_putchk(appctx, &trash) == -1) {
1463
      ctx->start = ptr;
1464
      ret = 0;
1465
      goto end;
1466
    }
1467
    if (direction > 0) {
1468
      ctx->tot_size  += (ulong)ptr->size;
1469
      ctx->tot_calls += (ulong)ptr->calls;
1470
    }
1471
    else if (direction < 0) {
1472
      ctx->tot_size  -= (ulong)ptr->size;
1473
      ctx->tot_calls += (ulong)ptr->calls;
1474
    }
1475
  }
1476
1477
  /* now dump a summary */
1478
  chunk_reset(&trash);
1479
  chunk_appendf(&trash, "Total");
1480
  while (trash.data < ctx->width)
1481
    trash.area[trash.data++] = ' ';
1482
1483
  chunk_appendf(&trash, "%7s  size: %12ld  calls: %9lu  size/call: %6ld %s\n",
1484
          "BALANCE",
1485
          ctx->tot_size, ctx->tot_calls,
1486
          (long)(ctx->tot_calls ? (ctx->tot_size / ctx->tot_calls) : 0),
1487
          "(excl. realloc)");
1488
1489
  if (applet_putchk(appctx, &trash) == -1) {
1490
    ctx->start = ptr;
1491
    ret = 0;
1492
    goto end;
1493
  }
1494
 end:
1495
  return ret;
1496
}
1497
1498
/* release the "show pools" context */
1499
static void debug_release_memstats(struct appctx *appctx)
1500
{
1501
  struct dev_mem_ctx *ctx = appctx->svcctx;
1502
1503
  ha_free(&ctx->match);
1504
}
1505
#endif
1506
1507
#ifndef USE_THREAD_DUMP
1508
1509
/* This function dumps all threads' state to the trash. This version is the
1510
 * most basic one, which doesn't inspect other threads.
1511
 */
1512
void ha_thread_dump_all_to_trash()
1513
0
{
1514
0
  unsigned int thr;
1515
1516
0
  for (thr = 0; thr < global.nbthread; thr++)
1517
0
    ha_thread_dump(&trash, thr, tid);
1518
0
}
1519
1520
#else /* below USE_THREAD_DUMP is set */
1521
1522
/* ID of the thread requesting the dump */
1523
static unsigned int thread_dump_tid;
1524
1525
/* points to the buffer where the dump functions should write. It must
1526
 * have already been initialized by the requester. Nothing is done if
1527
 * it's NULL.
1528
 */
1529
struct buffer *thread_dump_buffer = NULL;
1530
1531
/* initiates a thread dump */
1532
void ha_thread_dump_all_to_trash()
1533
{
1534
  unsigned int old;
1535
1536
  /* initiate a dump starting from first thread. Use a CAS so that we do
1537
   * not wait if we're not the first one, but we wait for a previous dump
1538
   * to finish.
1539
   */
1540
  while (1) {
1541
    old = 0;
1542
    if (HA_ATOMIC_CAS(&thread_dump_state, &old, THREAD_DUMP_FSYNC))
1543
      break;
1544
    ha_thread_relax();
1545
  }
1546
  thread_dump_buffer = &trash;
1547
  thread_dump_tid = tid;
1548
  ha_tkillall(DEBUGSIG);
1549
1550
  /* the call above contains a raise() so we're certain to return after
1551
   * returning from the sighandler, hence when the dump is complete.
1552
   */
1553
}
1554
1555
/* handles DEBUGSIG to dump the state of the thread it's working on. This is
1556
 * appended at the end of thread_dump_buffer which must be protected against
1557
 * reentrance from different threads (a thread-local buffer works fine).
1558
 */
1559
void debug_handler(int sig, siginfo_t *si, void *arg)
1560
{
1561
  int harmless = is_thread_harmless();
1562
  int running = 0;
1563
  uint prev;
1564
  uint next;
1565
1566
  /* first, let's check it's really for us and that we didn't just get
1567
   * a spurious DEBUGSIG.
1568
   */
1569
  if (!_HA_ATOMIC_LOAD(&thread_dump_state))
1570
    return;
1571
1572
  /* There are 5 phases in the dump process:
1573
   *   1- wait for all threads to sync or the first one to start
1574
   *   2- wait for our turn, i.e. when tid appears in lower bits.
1575
   *   3- perform the action if our tid is there
1576
   *   4- pass tid to the number of the next thread to dump or
1577
   *      reset running counter if we're last one.
1578
   *   5- wait for running to be zero and decrement the count
1579
   */
1580
1581
  /* wait for all previous threads to finish first */
1582
  if (!harmless)
1583
    thread_harmless_now();
1584
1585
  if (HA_ATOMIC_FETCH_ADD(&thread_dump_state, 1) == THREAD_DUMP_FSYNC) {
1586
    /* the first one which lands here is responsible for constantly
1587
     * recounting the number of active theads and switching from
1588
     * SYNC to DUMP.
1589
     */
1590
    while (1) {
1591
      int first = -1; // first tid to dump
1592
      int thr;
1593
1594
      running = 0;
1595
      for (thr = 0; thr < global.nbthread; thr++) {
1596
        if (ha_thread_info[thr].tg->threads_enabled & ha_thread_info[thr].ltid_bit) {
1597
          running++;
1598
          if (first < 0)
1599
            first = thr;
1600
        }
1601
      }
1602
1603
      if ((HA_ATOMIC_LOAD(&thread_dump_state) & THREAD_DUMP_TMASK) == running) {
1604
        /* all threads are there, let's try to start */
1605
        prev = THREAD_DUMP_FSYNC | running;
1606
        next = (running << 16) | first;
1607
        if (HA_ATOMIC_CAS(&thread_dump_state, &prev, next))
1608
          break;
1609
        /* it failed! maybe a thread appeared late (e.g. during boot), let's
1610
         * recount.
1611
         */
1612
      }
1613
      ha_thread_relax();
1614
    }
1615
  }
1616
1617
  /* all threads: let's wait for the SYNC flag to disappear; tid is reset at
1618
   * the same time to the first valid tid to dump and pmask will reflect the
1619
   * number of participants.
1620
   */
1621
  while (HA_ATOMIC_LOAD(&thread_dump_state) & THREAD_DUMP_FSYNC)
1622
    ha_thread_relax();
1623
1624
  /* wait for our turn */
1625
  while ((HA_ATOMIC_LOAD(&thread_dump_state) & THREAD_DUMP_TMASK) != tid)
1626
    ha_thread_relax();
1627
1628
  if (!harmless)
1629
    thread_harmless_end_sig();
1630
1631
  /* dump if needed */
1632
  if (thread_dump_buffer)
1633
    ha_thread_dump(thread_dump_buffer, tid, thread_dump_tid);
1634
1635
  /* figure which is the next thread ID to dump among enabled ones. Note
1636
   * that this relies on the fact that we're not creating new threads in
1637
   * the middle of a dump, which is normally granted by the harmless bits
1638
   * anyway.
1639
   */
1640
  for (next = tid + 1; next < global.nbthread; next++) {
1641
    if (unlikely(next >= MAX_THREADS)) {
1642
      /* just to please gcc 6.5 who guesses the ranges wrong. */
1643
      continue;
1644
    }
1645
1646
    if (ha_thread_info[next].tg &&
1647
        ha_thread_info[next].tg->threads_enabled & ha_thread_info[next].ltid_bit)
1648
      break;
1649
  }
1650
1651
  /* if there are threads left to dump, we atomically set the next one,
1652
   * otherwise we'll clear dump and set the thread part to the number of
1653
   * threads that need to disappear.
1654
   */
1655
  if (next < global.nbthread) {
1656
    next = (HA_ATOMIC_LOAD(&thread_dump_state) & THREAD_DUMP_PMASK) | next;
1657
    HA_ATOMIC_STORE(&thread_dump_state, next);
1658
  } else {
1659
    thread_dump_buffer = NULL; // was the last one
1660
    running = (HA_ATOMIC_LOAD(&thread_dump_state) & THREAD_DUMP_PMASK) >> 16;
1661
    HA_ATOMIC_STORE(&thread_dump_state, running);
1662
  }
1663
1664
  /* now wait for all others to finish dumping: the lowest part will turn
1665
   * to zero. Then all others decrement the done part. We must not change
1666
   * the harmless status anymore because one of the other threads might
1667
   * have been interrupted in thread_isolate() waiting for all others to
1668
   * become harmless, and changing the situation here would break that
1669
   * promise.
1670
   */
1671
1672
  /* wait for everyone to finish*/
1673
  while (HA_ATOMIC_LOAD(&thread_dump_state) & THREAD_DUMP_PMASK)
1674
    ha_thread_relax();
1675
1676
  /* we're gone. Past this point anything can happen including another
1677
   * thread trying to re-trigger a dump, so thread_dump_buffer and
1678
   * thread_dump_tid may become invalid immediately after this call.
1679
   */
1680
  HA_ATOMIC_SUB(&thread_dump_state, 1);
1681
1682
  /* mark the current thread as stuck to detect it upon next invocation
1683
   * if it didn't move.
1684
   */
1685
  if (!harmless &&
1686
      !(_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_SLEEPING))
1687
    _HA_ATOMIC_OR(&th_ctx->flags, TH_FL_STUCK);
1688
}
1689
1690
static int init_debug_per_thread()
1691
{
1692
  sigset_t set;
1693
1694
  /* unblock the DEBUGSIG signal we intend to use */
1695
  sigemptyset(&set);
1696
  sigaddset(&set, DEBUGSIG);
1697
#if defined(DEBUG_DEV)
1698
  sigaddset(&set, SIGRTMAX);
1699
#endif
1700
  ha_sigmask(SIG_UNBLOCK, &set, NULL);
1701
  return 1;
1702
}
1703
1704
static int init_debug()
1705
{
1706
  struct sigaction sa;
1707
  void *callers[1];
1708
1709
  /* calling backtrace() will access libgcc at runtime. We don't want to
1710
   * do it after the chroot, so let's perform a first call to have it
1711
   * ready in memory for later use.
1712
   */
1713
  my_backtrace(callers, sizeof(callers)/sizeof(*callers));
1714
  sa.sa_handler = NULL;
1715
  sa.sa_sigaction = debug_handler;
1716
  sigemptyset(&sa.sa_mask);
1717
  sa.sa_flags = SA_SIGINFO;
1718
  sigaction(DEBUGSIG, &sa, NULL);
1719
1720
#if defined(DEBUG_DEV)
1721
  sa.sa_handler = NULL;
1722
  sa.sa_sigaction = debug_delay_inj_sighandler;
1723
  sigemptyset(&sa.sa_mask);
1724
  sa.sa_flags = SA_SIGINFO;
1725
  sigaction(SIGRTMAX, &sa, NULL);
1726
#endif
1727
  return ERR_NONE;
1728
}
1729
1730
REGISTER_POST_CHECK(init_debug);
1731
REGISTER_PER_THREAD_INIT(init_debug_per_thread);
1732
1733
#endif /* USE_THREAD_DUMP */
1734
1735
/* register cli keywords */
1736
static struct cli_kw_list cli_kws = {{ },{
1737
  {{ "debug", "dev", "bug", NULL },      "debug dev bug                           : call BUG_ON() and crash",                 debug_parse_cli_bug,   NULL, NULL, NULL, ACCESS_EXPERT },
1738
  {{ "debug", "dev", "check", NULL },    "debug dev check                         : call CHECK_IF() and possibly crash",      debug_parse_cli_check, NULL, NULL, NULL, ACCESS_EXPERT },
1739
  {{ "debug", "dev", "close", NULL },    "debug dev close  <fd>                   : close this file descriptor",              debug_parse_cli_close, NULL, NULL, NULL, ACCESS_EXPERT },
1740
  {{ "debug", "dev", "deadlock", NULL }, "debug dev deadlock [nbtask]             : deadlock between this number of tasks",   debug_parse_cli_deadlock, NULL, NULL, NULL, ACCESS_EXPERT },
1741
  {{ "debug", "dev", "delay", NULL },    "debug dev delay  [ms]                   : sleep this long",                         debug_parse_cli_delay, NULL, NULL, NULL, ACCESS_EXPERT },
1742
#if defined(DEBUG_DEV)
1743
  {{ "debug", "dev", "delay-inj", NULL },"debug dev delay-inj <inter> <count>     : inject random delays into threads",       debug_parse_delay_inj, NULL, NULL, NULL, ACCESS_EXPERT },
1744
  {{ "debug", "dev", "exec",  NULL },    "debug dev exec   [cmd] ...              : show this command's output",              debug_parse_cli_exec,  NULL, NULL, NULL, ACCESS_EXPERT },
1745
#endif
1746
  {{ "debug", "dev", "fd", NULL },       "debug dev fd                            : scan for rogue/unhandled FDs",            debug_parse_cli_fd,    debug_iohandler_fd, NULL, NULL, ACCESS_EXPERT },
1747
  {{ "debug", "dev", "exit",  NULL },    "debug dev exit   [code]                 : immediately exit the process",            debug_parse_cli_exit,  NULL, NULL, NULL, ACCESS_EXPERT },
1748
  {{ "debug", "dev", "hash", NULL },     "debug dev hash   [msg]                  : return msg hashed if anon is set",        debug_parse_cli_hash,  NULL, NULL, NULL, 0 },
1749
  {{ "debug", "dev", "hex",   NULL },    "debug dev hex    <addr> [len]           : dump a memory area",                      debug_parse_cli_hex,   NULL, NULL, NULL, ACCESS_EXPERT },
1750
  {{ "debug", "dev", "log",   NULL },    "debug dev log    [msg] ...              : send this msg to global logs",            debug_parse_cli_log,   NULL, NULL, NULL, ACCESS_EXPERT },
1751
  {{ "debug", "dev", "loop",  NULL },    "debug dev loop   [ms]                   : loop this long",                          debug_parse_cli_loop,  NULL, NULL, NULL, ACCESS_EXPERT },
1752
#if defined(DEBUG_MEM_STATS)
1753
  {{ "debug", "dev", "memstats", NULL }, "debug dev memstats [reset|all|match ...]: dump/reset memory statistics",            debug_parse_cli_memstats, debug_iohandler_memstats, debug_release_memstats, NULL, 0 },
1754
#endif
1755
  {{ "debug", "dev", "panic", NULL },    "debug dev panic                         : immediately trigger a panic",             debug_parse_cli_panic, NULL, NULL, NULL, ACCESS_EXPERT },
1756
  {{ "debug", "dev", "sched", NULL },    "debug dev sched  {task|tasklet} [k=v]*  : stress the scheduler",                    debug_parse_cli_sched, NULL, NULL, NULL, ACCESS_EXPERT },
1757
  {{ "debug", "dev", "stream",NULL },    "debug dev stream [k=v]*                 : show/manipulate stream flags",            debug_parse_cli_stream,NULL, NULL, NULL, ACCESS_EXPERT },
1758
  {{ "debug", "dev", "sym",   NULL },    "debug dev sym    <addr>                 : resolve symbol address",                  debug_parse_cli_sym,   NULL, NULL, NULL, ACCESS_EXPERT },
1759
  {{ "debug", "dev", "tkill", NULL },    "debug dev tkill  [thr] [sig]            : send signal to thread",                   debug_parse_cli_tkill, NULL, NULL, NULL, ACCESS_EXPERT },
1760
  {{ "debug", "dev", "warn",  NULL },    "debug dev warn                          : call WARN_ON() and possibly crash",       debug_parse_cli_warn,  NULL, NULL, NULL, ACCESS_EXPERT },
1761
  {{ "debug", "dev", "write", NULL },    "debug dev write  [size]                 : write that many bytes in return",         debug_parse_cli_write, NULL, NULL, NULL, ACCESS_EXPERT },
1762
1763
#if defined(HA_HAVE_DUMP_LIBS)
1764
  {{ "show", "libs", NULL, NULL },       "show libs                               : show loaded object files and libraries", debug_parse_cli_show_libs, NULL, NULL },
1765
#endif
1766
  {{ "show", "threads", NULL, NULL },    "show threads                            : show some threads debugging information", NULL, cli_io_handler_show_threads, NULL },
1767
  {{},}
1768
}};
1769
1770
INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);