Coverage Report

Created: 2025-06-20 06:16

/src/haproxy/src/debug.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Process debugging functions.
3
 *
4
 * Copyright 2000-2019 Willy Tarreau <willy@haproxy.org>.
5
 *
6
 * This program is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU General Public License
8
 * as published by the Free Software Foundation; either version
9
 * 2 of the License, or (at your option) any later version.
10
 *
11
 */
12
13
14
#include <errno.h>
15
#include <fcntl.h>
16
#include <signal.h>
17
#include <time.h>
18
#include <stdio.h>
19
#include <stdlib.h>
20
#include <syslog.h>
21
#include <sys/resource.h>
22
#include <sys/stat.h>
23
#include <sys/types.h>
24
#include <sys/utsname.h>
25
#include <sys/wait.h>
26
#include <unistd.h>
27
#ifdef USE_EPOLL
28
#include <sys/epoll.h>
29
#endif
30
31
#include <haproxy/api.h>
32
#include <haproxy/applet.h>
33
#include <haproxy/buf.h>
34
#include <haproxy/cfgparse.h>
35
#include <haproxy/cli.h>
36
#include <haproxy/clock.h>
37
#include <haproxy/debug.h>
38
#include <haproxy/fd.h>
39
#include <haproxy/global.h>
40
#include <haproxy/hlua.h>
41
#include <haproxy/http_ana.h>
42
#include <haproxy/limits.h>
43
#if defined(USE_LINUX_CAP)
44
#include <haproxy/linuxcap.h>
45
#endif
46
#include <haproxy/log.h>
47
#include <haproxy/net_helper.h>
48
#include <haproxy/sc_strm.h>
49
#include <haproxy/proxy.h>
50
#include <haproxy/stconn.h>
51
#include <haproxy/task.h>
52
#include <haproxy/thread.h>
53
#include <haproxy/time.h>
54
#include <haproxy/tools.h>
55
#include <haproxy/trace.h>
56
#include <haproxy/version.h>
57
#include <import/ist.h>
58
59
60
/* The dump state is made of:
61
 *   - num_thread on the lowest 15 bits
62
 *   - a SYNC flag on bit 15 (waiting for sync start)
63
 *   - number of participating threads on bits 16-30
64
 * Initiating a dump consists in setting it to SYNC and incrementing the
65
 * num_thread part when entering the function. The first thread periodically
66
 * recounts active threads and compares it to the ready ones, and clears SYNC
67
 * and sets the number of participants to the value found, which serves as a
68
 * start signal. A thread finished dumping looks up the TID of the next active
69
 * thread after it and writes it in the lowest part. If there's none, it sets
70
 * the thread counter to the number of participants and resets that part,
71
 * which serves as an end-of-dump signal. All threads decrement the num_thread
72
 * part. Then all threads wait for the value to reach zero. Only used when
73
 * USE_THREAD_DUMP is set.
74
 */
75
#define THREAD_DUMP_TMASK     0x00007FFFU
76
#define THREAD_DUMP_FSYNC     0x00008000U
77
#define THREAD_DUMP_PMASK     0x7FFF0000U
78
79
/* Description of a component with name, version, path, build options etc. E.g.
80
 * one of them is haproxy. Others might be some clearly identified shared libs.
81
 * They're intentionally self-contained and to be placed into an array to make
82
 * it easier to find them in a core. The important fields (name and version)
83
 * are locally allocated, other ones are dynamic.
84
 */
85
struct post_mortem_component {
86
  char name[32];           // symbolic short name
87
  char version[32];        // exact version
88
  char *toolchain;         // compiler and version (e.g. gcc-11.4.0)
89
  char *toolchain_opts;    // optims, arch-specific options (e.g. CFLAGS)
90
  char *build_settings;    // build options (e.g. USE_*, TARGET, etc)
91
  char *path;              // path if known.
92
};
93
94
/* This is a collection of information that are centralized to help with core
95
 * dump analysis. It must be used with a public variable and gather elements
96
 * as much as possible without dereferences so that even when identified in a
97
 * core dump it's possible to get the most out of it even if the core file is
98
 * not much exploitable. It's aligned to 256 so that it's easy to spot, given
99
 * that being that large it will not change its size much.
100
 */
101
struct post_mortem {
102
  /* platform-specific information */
103
  char post_mortem_magic[32];     // "POST-MORTEM STARTS HERE+7654321\0"
104
  struct {
105
    struct utsname utsname; // OS name+ver+arch+hostname
106
    char hw_vendor[64];     // hardware/hypervisor vendor when known
107
    char hw_family[64];     // hardware/hypervisor product family when known
108
    char hw_model[64];      // hardware/hypervisor product/model when known
109
    char brd_vendor[64];    // mainboard vendor when known
110
    char brd_model[64];     // mainboard model when known
111
    char soc_vendor[64];    // SoC/CPU vendor from cpuinfo
112
    char soc_model[64];     // SoC model when known and relevant
113
    char cpu_model[64];     // CPU model when different from SoC
114
    char virt_techno[16];   // when provided by cpuid
115
    char cont_techno[16];   // empty, "no", "yes", "docker" or others
116
  } platform;
117
118
  /* process-specific information */
119
  struct {
120
    pid_t pid;
121
    uid_t boot_uid;
122
    gid_t boot_gid;
123
    uid_t run_uid;
124
    gid_t run_gid;
125
#if defined(USE_LINUX_CAP)
126
    struct {
127
      // initial process capabilities
128
      struct __user_cap_data_struct boot[_LINUX_CAPABILITY_U32S_3];
129
      int err_boot; // errno, if capget() syscall fails at boot
130
      // runtime process capabilities
131
      struct __user_cap_data_struct run[_LINUX_CAPABILITY_U32S_3];
132
      int err_run; // errno, if capget() syscall fails at runtime
133
    } caps;
134
#endif
135
    struct rlimit boot_lim_fd;  // RLIMIT_NOFILE at startup
136
    struct rlimit boot_lim_ram; // RLIMIT_DATA at startup
137
    struct rlimit run_lim_fd;  // RLIMIT_NOFILE just before enter in polling loop
138
    struct rlimit run_lim_ram; // RLIMIT_DATA just before enter in polling loop
139
    char **argv;
140
    unsigned char argc;
141
  } process;
142
143
#if defined(HA_HAVE_DUMP_LIBS)
144
  /* information about dynamic shared libraries involved */
145
  char *libs;                      // dump of one addr / path per line, or NULL
146
#endif
147
  struct tgroup_info *tgroup_info; // pointer to ha_tgroup_info
148
  struct thread_info *thread_info; // pointer to ha_thread_info
149
  struct tgroup_ctx  *tgroup_ctx;  // pointer to ha_tgroup_ctx
150
  struct thread_ctx  *thread_ctx;  // pointer to ha_thread_ctx
151
  struct list *pools;              // pointer to the head of the pools list
152
  struct proxy **proxies;          // pointer to the head of the proxies list
153
  struct global *global;           // pointer to the struct global
154
  struct fdtab **fdtab;            // pointer to the fdtab array
155
  struct activity *activity;       // pointer to the activity[] per-thread array
156
157
  /* info about identified distinct components (executable, shared libs, etc).
158
   * These can be all listed at once in gdb using:
159
   *    p *post_mortem.components@post_mortem.nb_components
160
   */
161
  uint nb_components;              // # of components below
162
  struct post_mortem_component *components; // NULL or array
163
} post_mortem ALIGNED(256) HA_SECTION("_post_mortem") = { };
164
165
unsigned int debug_commands_issued = 0;
166
unsigned int warn_blocked_issued = 0;
167
unsigned int debug_enable_counters = (DEBUG_COUNTERS >= 2);
168
169
/* dumps a backtrace of the current thread that is appended to buffer <buf>.
170
 * Lines are prefixed with the string <prefix> which may be empty (used for
171
 * indenting). It is recommended to use this at a function's tail so that
172
 * the function does not appear in the call stack. The <dump> argument
173
 * indicates what dump state to start from, and should usually be zero. It
174
 * may be among the following values:
175
 *   - 0: search usual callers before step 1, or directly jump to 2
176
 *   - 1: skip usual callers before step 2
177
 *   - 2: dump until polling loop, scheduler, or main() (excluded)
178
 *   - 3: end
179
 *   - 4-7: like 0 but stops *after* main.
180
 */
181
void ha_dump_backtrace(struct buffer *buf, const char *prefix, int dump)
182
0
{
183
0
  sigset_t new_mask, old_mask;
184
0
  struct buffer bak;
185
0
  char pfx2[100];
186
0
  void *callers[100];
187
0
  int j, nptrs;
188
0
  const void *addr;
189
190
  /* make sure we don't re-enter from debug coming from other threads,
191
   * as some libc's backtrace() are not re-entrant. We'll block these
192
   * sensitive signals while possibly dumping a backtrace.
193
   */
194
0
  sigemptyset(&new_mask);
195
#ifdef WDTSIG
196
  sigaddset(&new_mask, WDTSIG);
197
#endif
198
#ifdef DEBUGSIG
199
  sigaddset(&new_mask, DEBUGSIG);
200
#endif
201
0
  ha_sigmask(SIG_BLOCK, &new_mask, &old_mask);
202
203
0
  nptrs = my_backtrace(callers, sizeof(callers)/sizeof(*callers));
204
0
  if (!nptrs)
205
0
    goto leave;
206
207
0
  if (snprintf(pfx2, sizeof(pfx2), "%s| ", prefix) > sizeof(pfx2))
208
0
    pfx2[0] = 0;
209
210
  /* The call backtrace_symbols_fd(callers, nptrs, STDOUT_FILENO would
211
   * produce similar output to the following:
212
   */
213
0
  chunk_appendf(buf, "%scall trace(%d):\n", prefix, nptrs);
214
0
  for (j = 0; (j < nptrs || (dump & 3) < 2); j++) {
215
0
    if (j == nptrs && !(dump & 3)) {
216
      /* we failed to spot the starting point of the
217
       * dump, let's start over dumping everything we
218
       * have.
219
       */
220
0
      dump += 2;
221
0
      j = 0;
222
0
    }
223
0
    bak = *buf;
224
0
    dump_addr_and_bytes(buf, pfx2, callers[j], -8);
225
0
    addr = resolve_sym_name(buf, ": ", callers[j]);
226
227
0
#if defined(__i386__) || defined(__x86_64__)
228
    /* Try to decode a relative call (0xe8 + 32-bit signed ofs) */
229
0
    if (may_access(callers[j] - 5) && may_access(callers[j] - 1) &&
230
0
        *((uchar*)(callers[j] - 5)) == 0xe8) {
231
0
      int ofs = *((int *)(callers[j] - 4));
232
0
      const void *addr2 = callers[j] + ofs;
233
0
      resolve_sym_name(buf, " > ", addr2);
234
0
    }
235
#elif defined(__aarch64__)
236
    /* Try to decode a relative call (0x9X + 26-bit signed ofs) */
237
    if (may_access(callers[j] - 4) && may_access(callers[j] - 1) &&
238
        (*((int*)(callers[j] - 4)) & 0xFC000000) == 0x94000000) {
239
      int ofs = (*((int *)(callers[j] - 4)) << 6) >> 4; // 26-bit signed immed*4
240
      const void *addr2 = callers[j] - 4 + ofs;
241
      resolve_sym_name(buf, " > ", addr2);
242
    }
243
#endif
244
0
    if ((dump & 3) == 0) {
245
      /* dump not started, will start *after* ha_thread_dump_one(),
246
       * ha_panic and ha_backtrace_to_stderr
247
       */
248
0
      if (addr == ha_panic ||
249
0
          addr == ha_backtrace_to_stderr || addr == ha_thread_dump_one)
250
0
        dump++;
251
0
      *buf = bak;
252
0
      continue;
253
0
    }
254
255
0
    if ((dump & 3) == 1) {
256
      /* starting */
257
0
      if (addr == ha_panic ||
258
0
          addr == ha_backtrace_to_stderr || addr == ha_thread_dump_one) {
259
0
        *buf = bak;
260
0
        continue;
261
0
      }
262
0
      dump++;
263
0
    }
264
265
0
    if ((dump & 3) == 2) {
266
      /* still dumping */
267
0
      if (dump == 6) {
268
        /* we only stop *after* main and we must send the LF */
269
0
        if (addr == main) {
270
0
          j = nptrs;
271
0
          dump++;
272
0
        }
273
0
      }
274
0
      else if (addr == run_poll_loop || addr == main || addr == run_tasks_from_lists) {
275
0
        dump++;
276
0
        *buf = bak;
277
0
        break;
278
0
      }
279
0
    }
280
    /* OK, line dumped */
281
0
    chunk_appendf(buf, "\n");
282
0
  }
283
0
 leave:
284
  /* unblock temporarily blocked signals */
285
0
  ha_sigmask(SIG_SETMASK, &old_mask, NULL);
286
0
}
287
288
/* dump a backtrace of current thread's stack to stderr. */
289
void ha_backtrace_to_stderr(void)
290
0
{
291
0
  char area[8192];
292
0
  struct buffer b = b_make(area, sizeof(area), 0, 0);
293
294
0
  ha_dump_backtrace(&b, "  ", 4);
295
0
  if (b.data)
296
0
    DISGUISE(write(2, b.area, b.data));
297
0
}
298
299
/* Dumps some known information about the current thread into its dump buffer,
300
 * and optionally extra info when it's considered safe to do so. The dump will
301
 * be appended to the buffer, so the caller is responsible for preliminary
302
 * initializing it. The <is_caller> argument will indicate if the thread is the
303
 * one requesting the dump (e.g. watchdog, panic etc), in order to display a
304
 * star ('*') in front of the thread to indicate the requesting one. Any stuck
305
 * thread is also prefixed with a '>'. The caller is responsible for atomically
306
 * setting up the thread's dump buffer to point to a valid buffer with enough
307
 * room. Output will be truncated if it does not fit. When the dump is complete
308
 * the dump buffer will have bit 0 set to 1 to tell the caller it's done, and
309
 * the caller will then change that value to indicate it's done once the
310
 * contents are collected.
311
 */
312
void ha_thread_dump_one(struct buffer *buf, int is_caller)
313
0
{
314
0
  unsigned long long p = th_ctx->prev_cpu_time;
315
0
  unsigned long long n = now_cpu_time();
316
0
  int stuck = !!(th_ctx->flags & TH_FL_STUCK);
317
318
  /* keep a copy of the dump pointer for post-mortem analysis */
319
0
  HA_ATOMIC_STORE(&th_ctx->last_dump_buffer, buf);
320
321
0
  chunk_appendf(buf,
322
0
                "%c%cThread %-2u: id=0x%llx act=%d glob=%d wq=%d rq=%d tl=%d tlsz=%d rqsz=%d\n"
323
0
                "     %2u/%-2u   stuck=%d prof=%d",
324
0
                (is_caller) ? '*' : ' ', stuck ? '>' : ' ', tid + 1,
325
0
          ha_get_pthread_id(tid),
326
0
          thread_has_tasks(),
327
0
                !eb_is_empty(&th_ctx->rqueue_shared),
328
0
                !eb_is_empty(&th_ctx->timers),
329
0
                !eb_is_empty(&th_ctx->rqueue),
330
0
                !(LIST_ISEMPTY(&th_ctx->tasklets[TL_URGENT]) &&
331
0
      LIST_ISEMPTY(&th_ctx->tasklets[TL_NORMAL]) &&
332
0
      LIST_ISEMPTY(&th_ctx->tasklets[TL_BULK]) &&
333
0
      MT_LIST_ISEMPTY(&th_ctx->shared_tasklet_list)),
334
0
                th_ctx->tasks_in_list,
335
0
                th_ctx->rq_total,
336
0
          ti->tgid, ti->ltid + 1,
337
0
                stuck,
338
0
                !!(th_ctx->flags & TH_FL_TASK_PROFILING));
339
340
#if defined(USE_THREAD)
341
  chunk_appendf(buf,
342
                " harmless=%d isolated=%d",
343
                !!(_HA_ATOMIC_LOAD(&tg_ctx->threads_harmless) & ti->ltid_bit),
344
          isolated_thread == tid);
345
#endif
346
347
0
  chunk_appendf(buf, "\n");
348
0
  chunk_appendf(buf, "             cpu_ns: poll=%llu now=%llu diff=%llu\n", p, n, n-p);
349
350
  /* this is the end of what we can dump from outside the current thread */
351
352
0
  chunk_appendf(buf, "             curr_task=");
353
0
  ha_task_dump(buf, th_ctx->current, "             ");
354
355
#if defined(USE_THREAD) && ((DEBUG_THREAD > 0) || defined(DEBUG_FULL))
356
  /* List the lock history */
357
  if (th_ctx->lock_history) {
358
    int lkh, lkl, lbl;
359
    int done;
360
361
    chunk_appendf(buf, "             lock_hist:");
362
    for (lkl = 7; lkl >= 0; lkl--) {
363
      lkh = (th_ctx->lock_history >> (lkl * 8)) & 0xff;
364
      if (!lkh)
365
        continue;
366
      chunk_appendf(buf, " %c:%s",
367
              "URSW"[lkh & 3], lock_label((lkh >> 2) - 1));
368
    }
369
370
    /* now rescan the list to only show those that remain */
371
    done = 0;
372
    for (lbl = 0; lbl < LOCK_LABELS; lbl++) {
373
      /* find the latest occurrence of each label */
374
      for (lkl = 0; lkl < 8; lkl++) {
375
        lkh = (th_ctx->lock_history >> (lkl * 8)) & 0xff;
376
        if (!lkh)
377
          continue;
378
        if ((lkh >> 2) == lbl)
379
          break;
380
      }
381
      if (lkl == 8) // not found
382
        continue;
383
      if ((lkh & 3) == _LK_UN)
384
        continue;
385
      if (!done)
386
        chunk_appendf(buf, " locked:");
387
      chunk_appendf(buf, " %s(%c)",
388
              lock_label((lkh >> 2) - 1),
389
              "URSW"[lkh & 3]);
390
      done++;
391
    }
392
    chunk_appendf(buf, "\n");
393
  }
394
#endif
395
396
0
  if (!(HA_ATOMIC_LOAD(&tg_ctx->threads_idle) & ti->ltid_bit)) {
397
    /* only dump the stack of active threads */
398
#ifdef USE_LUA
399
    if (th_ctx->current &&
400
        th_ctx->current->process == process_stream && th_ctx->current->context) {
401
      const struct stream *s = (const struct stream *)th_ctx->current->context;
402
      struct hlua *hlua = NULL;
403
404
      if (s) {
405
        if (s->hlua[0] && HLUA_IS_BUSY(s->hlua[0]))
406
          hlua = s->hlua[0];
407
        else if (s->hlua[1] && HLUA_IS_BUSY(s->hlua[1]))
408
          hlua = s->hlua[1];
409
      }
410
      if (hlua) {
411
        mark_tainted(TAINTED_LUA_STUCK);
412
        if (hlua->state_id == 0)
413
          mark_tainted(TAINTED_LUA_STUCK_SHARED);
414
      }
415
    }
416
#endif
417
418
0
    if (HA_ATOMIC_LOAD(&pool_trim_in_progress))
419
0
      mark_tainted(TAINTED_MEM_TRIMMING_STUCK);
420
421
0
    ha_dump_backtrace(buf, "             ", 0);
422
0
  }
423
0
 leave:
424
0
  return;
425
0
}
426
427
/* Triggers a thread dump from thread <thr>, either directly if it's the
428
 * current thread or if thread dump signals are not implemented, or by sending
429
 * a signal if it's a remote one and the feature is supported. The buffer <buf>
430
 * will get the dump appended, and the caller is responsible for making sure
431
 * there is enough room otherwise some contents will be truncated. The function
432
 * waits for the called thread to fill the buffer before returning (or cancelling
433
 * by reporting NULL). It does not release the called thread yet, unless it's the
434
 * current one, which in this case is always available. It returns a pointer to
435
 * the buffer used if the dump was done, otherwise NULL. When the dump starts, it
436
 * marks the current thread as dumping, which will only be released via a failure
437
 * (returns NULL) or via a call to ha_dump_thread_done().
438
 */
439
struct buffer *ha_thread_dump_fill(struct buffer *buf, int thr)
440
0
{
441
#ifdef USE_THREAD_DUMP
442
  /* silence bogus warning in gcc 11 without threads */
443
  ASSUME(0 <= thr && thr < MAX_THREADS);
444
445
  if (thr != tid) {
446
    struct buffer *old = NULL;
447
448
    /* try to impose our dump buffer and to reserve the target thread's
449
     * next dump for us.
450
     */
451
    do {
452
      if (old)
453
        ha_thread_relax();
454
      old = NULL;
455
    } while (!HA_ATOMIC_CAS(&ha_thread_ctx[thr].thread_dump_buffer, &old, buf));
456
457
    /* asking the remote thread to dump itself allows to get more details
458
     * including a backtrace.
459
     */
460
    ha_tkill(thr, DEBUGSIG);
461
462
    /* now wait for the dump to be done (or cancelled) */
463
    while (1) {
464
      buf = HA_ATOMIC_LOAD(&ha_thread_ctx[thr].thread_dump_buffer);
465
      if ((ulong)buf & 0x1)
466
        break;
467
      if (!buf)
468
        return buf;
469
      ha_thread_relax();
470
    }
471
  }
472
  else
473
    ha_thread_dump_one(buf, 1);
474
475
#else /* !USE_THREAD_DUMP below, we're on the target thread */
476
  /* when thread-dump is not supported, we can only dump our own thread */
477
0
  if (thr != tid)
478
0
    return NULL;
479
480
  /* the buffer might not be valid in case of a panic, since we
481
   * have to allocate it ourselves in this case.
482
   */
483
0
  if ((ulong)buf == 0x2UL)
484
0
    buf = get_trash_chunk();
485
0
  HA_ATOMIC_STORE(&th_ctx->thread_dump_buffer, buf);
486
0
  ha_thread_dump_one(buf, 1);
487
0
#endif
488
0
  return (struct buffer *)((ulong)buf & ~0x1UL);
489
0
}
490
491
/* Indicates to the called thread that the dumped data are collected by
492
 * clearing the thread_dump_buffer pointer. It waits for the dump to be
493
 * completed if it was not the case, and can also leave if the pointer
494
 * is already NULL (e.g. if a thread has aborted).
495
 */
496
void ha_thread_dump_done(int thr)
497
0
{
498
0
  struct buffer *old;
499
500
  /* silence bogus warning in gcc 11 without threads */
501
0
  ASSUME(0 <= thr && thr < MAX_THREADS);
502
503
  /* now wait for the dump to be done or cancelled, and release it */
504
0
  do {
505
0
    if (thr == tid)
506
0
      break;
507
0
    old = HA_ATOMIC_LOAD(&ha_thread_ctx[thr].thread_dump_buffer);
508
0
    if (!((ulong)old & 0x1)) {
509
0
      if (!old)
510
0
        break;
511
0
      ha_thread_relax();
512
0
      continue;
513
0
    }
514
0
  } while (!HA_ATOMIC_CAS(&ha_thread_ctx[thr].thread_dump_buffer, &old, NULL));
515
0
}
516
517
/* dumps into the buffer some information related to task <task> (which may
518
 * either be a task or a tasklet, and prepend each line except the first one
519
 * with <pfx>. The buffer is only appended and the first output starts by the
520
 * pointer itself. The caller is responsible for making sure the task is not
521
 * going to vanish during the dump.
522
 */
523
void ha_task_dump(struct buffer *buf, const struct task *task, const char *pfx)
524
0
{
525
0
  const struct stream *s = NULL;
526
0
  const struct appctx __maybe_unused *appctx = NULL;
527
0
  struct hlua __maybe_unused *hlua = NULL;
528
0
  const struct stconn *sc;
529
530
0
  if (!task) {
531
0
    chunk_appendf(buf, "0\n");
532
0
    return;
533
0
  }
534
535
0
  if (TASK_IS_TASKLET(task))
536
0
    chunk_appendf(buf,
537
0
                  "%p (tasklet) calls=%u\n",
538
0
                  task,
539
0
                  task->calls);
540
0
  else
541
0
    chunk_appendf(buf,
542
0
                  "%p (task) calls=%u last=%llu%s\n",
543
0
                  task,
544
0
                  task->calls,
545
0
                  task->wake_date ? (unsigned long long)(now_mono_time() - task->wake_date) : 0,
546
0
                  task->wake_date ? " ns ago" : "");
547
548
0
  chunk_appendf(buf, "%s  fct=%p(", pfx, task->process);
549
0
  resolve_sym_name(buf, NULL, task->process);
550
0
  chunk_appendf(buf,") ctx=%p", task->context);
551
552
0
  if (task->process == task_run_applet && (appctx = task->context))
553
0
    chunk_appendf(buf, "(%s)\n", appctx->applet->name);
554
0
  else
555
0
    chunk_appendf(buf, "\n");
556
557
0
  if (task->process == process_stream && task->context)
558
0
    s = (struct stream *)task->context;
559
0
  else if (task->process == task_run_applet && task->context && (sc = appctx_sc((struct appctx *)task->context)))
560
0
    s = sc_strm(sc);
561
0
  else if (task->process == sc_conn_io_cb && task->context)
562
0
    s = sc_strm(((struct stconn *)task->context));
563
564
0
  if (s) {
565
0
    chunk_appendf(buf, "%sstream=", pfx);
566
0
    strm_dump_to_buffer(buf, s, pfx, HA_ATOMIC_LOAD(&global.anon_key));
567
0
  }
568
569
#ifdef USE_LUA
570
  hlua = NULL;
571
  if (s && ((s->hlua[0] && HLUA_IS_BUSY(s->hlua[0])) ||
572
      (s->hlua[1] && HLUA_IS_BUSY(s->hlua[1])))) {
573
    hlua = (s->hlua[0] && HLUA_IS_BUSY(s->hlua[0])) ? s->hlua[0] : s->hlua[1];
574
    chunk_appendf(buf, "%sCurrent executing Lua from a stream analyser -- ", pfx);
575
  }
576
  else if (task->process == hlua_process_task && (hlua = task->context)) {
577
    chunk_appendf(buf, "%sCurrent executing a Lua task -- ", pfx);
578
  }
579
  else if (task->process == task_run_applet && (appctx = task->context) &&
580
     (appctx->applet->fct == hlua_applet_tcp_fct)) {
581
    chunk_appendf(buf, "%sCurrent executing a Lua TCP service -- ", pfx);
582
  }
583
  else if (task->process == task_run_applet && (appctx = task->context) &&
584
     (appctx->applet->fct == hlua_applet_http_fct)) {
585
    chunk_appendf(buf, "%sCurrent executing a Lua HTTP service -- ", pfx);
586
  }
587
588
  if (hlua && hlua->T) {
589
    chunk_appendf(buf, "stack traceback:\n    ");
590
    append_prefixed_str(buf, hlua_traceback(hlua->T, "\n    "), pfx, '\n', 0);
591
  }
592
593
  /* we may need to terminate the current line */
594
  if (*b_peek(buf, b_data(buf)-1) != '\n')
595
    b_putchr(buf, '\n');
596
#endif
597
0
}
598
599
/* This function dumps all profiling settings. It returns 0 if the output
600
 * buffer is full and it needs to be called again, otherwise non-zero.
601
 * Note: to not statify this one, it's hard to spot in backtraces!
602
 */
603
int cli_io_handler_show_threads(struct appctx *appctx)
604
0
{
605
0
  int *thr = appctx->svcctx;
606
607
0
  if (!thr)
608
0
    thr = applet_reserve_svcctx(appctx, sizeof(*thr));
609
610
0
  do {
611
0
    chunk_reset(&trash);
612
0
    if (ha_thread_dump_fill(&trash, *thr)) {
613
0
      ha_thread_dump_done(*thr);
614
0
      if (applet_putchk(appctx, &trash) == -1) {
615
        /* failed, try again */
616
0
        return 0;
617
0
      }
618
0
    }
619
0
    (*thr)++;
620
0
  } while (*thr < global.nbthread);
621
622
0
  return 1;
623
0
}
624
625
#if defined(HA_HAVE_DUMP_LIBS)
626
/* parse a "show libs" command. It returns 1 if it emits anything otherwise zero. */
627
static int debug_parse_cli_show_libs(char **args, char *payload, struct appctx *appctx, void *private)
628
{
629
  if (!cli_has_level(appctx, ACCESS_LVL_OPER))
630
    return 1;
631
632
  chunk_reset(&trash);
633
  if (dump_libs(&trash, 1))
634
    return cli_msg(appctx, LOG_INFO, trash.area);
635
  else
636
    return 0;
637
}
638
#endif
639
640
/* parse a "show dev" command. It returns 1 if it emits anything otherwise zero. */
641
static int debug_parse_cli_show_dev(char **args, char *payload, struct appctx *appctx, void *private)
642
0
{
643
0
  const char **build_opt;
644
0
  char *err = NULL;
645
0
  int i;
646
647
0
  if (*args[2])
648
0
    return cli_err(appctx, "This command takes no argument.\n");
649
650
0
  chunk_reset(&trash);
651
652
0
  chunk_appendf(&trash, "HAProxy version %s\n", haproxy_version);
653
0
  chunk_appendf(&trash, "Features\n  %s\n", build_features);
654
655
0
  chunk_appendf(&trash, "Build options\n");
656
0
  for (build_opt = NULL; (build_opt = hap_get_next_build_opt(build_opt)); )
657
0
    if (append_prefixed_str(&trash, *build_opt, "  ", '\n', 0) == 0)
658
0
      chunk_strcat(&trash, "\n");
659
660
0
  chunk_appendf(&trash, "Platform info\n");
661
0
  if (*post_mortem.platform.hw_vendor)
662
0
    chunk_appendf(&trash, "  machine vendor: %s\n", post_mortem.platform.hw_vendor);
663
0
  if (*post_mortem.platform.hw_family)
664
0
    chunk_appendf(&trash, "  machine family: %s\n", post_mortem.platform.hw_family);
665
0
  if (*post_mortem.platform.hw_model)
666
0
    chunk_appendf(&trash, "  machine model: %s\n", post_mortem.platform.hw_model);
667
0
  if (*post_mortem.platform.brd_vendor)
668
0
    chunk_appendf(&trash, "  board vendor: %s\n", post_mortem.platform.brd_vendor);
669
0
  if (*post_mortem.platform.brd_model)
670
0
    chunk_appendf(&trash, "  board model: %s\n", post_mortem.platform.brd_model);
671
0
  if (*post_mortem.platform.soc_vendor)
672
0
    chunk_appendf(&trash, "  soc vendor: %s\n", post_mortem.platform.soc_vendor);
673
0
  if (*post_mortem.platform.soc_model)
674
0
    chunk_appendf(&trash, "  soc model: %s\n", post_mortem.platform.soc_model);
675
0
  if (*post_mortem.platform.cpu_model)
676
0
    chunk_appendf(&trash, "  cpu model: %s\n", post_mortem.platform.cpu_model);
677
0
  if (*post_mortem.platform.virt_techno)
678
0
    chunk_appendf(&trash, "  virtual machine: %s\n", post_mortem.platform.virt_techno);
679
0
  if (*post_mortem.platform.cont_techno)
680
0
    chunk_appendf(&trash, "  container: %s\n", post_mortem.platform.cont_techno);
681
0
  if (*post_mortem.platform.utsname.sysname)
682
0
    chunk_appendf(&trash, "  OS name: %s\n", post_mortem.platform.utsname.sysname);
683
0
  if (*post_mortem.platform.utsname.release)
684
0
    chunk_appendf(&trash, "  OS release: %s\n", post_mortem.platform.utsname.release);
685
0
  if (*post_mortem.platform.utsname.version)
686
0
    chunk_appendf(&trash, "  OS version: %s\n", post_mortem.platform.utsname.version);
687
0
  if (*post_mortem.platform.utsname.machine)
688
0
    chunk_appendf(&trash, "  OS architecture: %s\n", post_mortem.platform.utsname.machine);
689
0
  if (*post_mortem.platform.utsname.nodename)
690
0
    chunk_appendf(&trash, "  node name: %s\n", HA_ANON_CLI(post_mortem.platform.utsname.nodename));
691
692
0
  chunk_appendf(&trash, "Process info\n");
693
0
  chunk_appendf(&trash, "  pid: %d\n", post_mortem.process.pid);
694
0
  chunk_appendf(&trash, "  cmdline: ");
695
0
  for (i = 0; i < post_mortem.process.argc; i++)
696
0
    chunk_appendf(&trash, "%s ", post_mortem.process.argv[i]);
697
0
  chunk_appendf(&trash, "\n");
698
#if defined(USE_LINUX_CAP)
699
  /* let's dump saved in feed_post_mortem() initial capabilities sets */
700
  if(!post_mortem.process.caps.err_boot) {
701
    chunk_appendf(&trash, "  boot capabilities:\n");
702
    chunk_appendf(&trash, "  \tCapEff: 0x%016llx\n",
703
            CAPS_TO_ULLONG(post_mortem.process.caps.boot[0].effective,
704
               post_mortem.process.caps.boot[1].effective));
705
    chunk_appendf(&trash, "  \tCapPrm: 0x%016llx\n",
706
            CAPS_TO_ULLONG(post_mortem.process.caps.boot[0].permitted,
707
               post_mortem.process.caps.boot[1].permitted));
708
    chunk_appendf(&trash, "  \tCapInh: 0x%016llx\n",
709
            CAPS_TO_ULLONG(post_mortem.process.caps.boot[0].inheritable,
710
               post_mortem.process.caps.boot[1].inheritable));
711
  } else
712
    chunk_appendf(&trash, "  capget() failed at boot with: %s.\n",
713
            errname(post_mortem.process.caps.err_boot, &err));
714
715
  /* let's print actual capabilities sets, could be useful in order to compare */
716
  if (!post_mortem.process.caps.err_run) {
717
    chunk_appendf(&trash, "  runtime capabilities:\n");
718
    chunk_appendf(&trash, "  \tCapEff: 0x%016llx\n",
719
            CAPS_TO_ULLONG(post_mortem.process.caps.run[0].effective,
720
               post_mortem.process.caps.run[1].effective));
721
    chunk_appendf(&trash, "  \tCapPrm: 0x%016llx\n",
722
            CAPS_TO_ULLONG(post_mortem.process.caps.run[0].permitted,
723
               post_mortem.process.caps.run[1].permitted));
724
    chunk_appendf(&trash, "  \tCapInh: 0x%016llx\n",
725
            CAPS_TO_ULLONG(post_mortem.process.caps.run[0].inheritable,
726
               post_mortem.process.caps.run[1].inheritable));
727
  } else
728
    chunk_appendf(&trash, "  capget() failed at runtime with: %s.\n",
729
            errname(post_mortem.process.caps.err_run, &err));
730
#endif
731
732
0
  chunk_appendf(&trash, "  %-22s  %-11s  %-11s \n", "identity:", "-boot-", "-runtime-");
733
0
  chunk_appendf(&trash, "  %-22s  %-11d  %-11d \n", "    uid:", post_mortem.process.boot_uid,
734
0
                                                          post_mortem.process.run_uid);
735
0
  chunk_appendf(&trash, "  %-22s  %-11d  %-11d \n", "    gid:", post_mortem.process.boot_gid,
736
0
                                                          post_mortem.process.run_gid);
737
0
  chunk_appendf(&trash, "  %-22s  %-11s  %-11s \n", "limits:", "-boot-", "-runtime-");
738
0
  chunk_appendf(&trash, "  %-22s  %-11s  %-11s \n", "    fd limit (soft):",
739
0
    LIM2A(normalize_rlim((ulong)post_mortem.process.boot_lim_fd.rlim_cur), "unlimited"),
740
0
    LIM2A(normalize_rlim((ulong)post_mortem.process.run_lim_fd.rlim_cur), "unlimited"));
741
0
  chunk_appendf(&trash, "  %-22s  %-11s  %-11s \n", "    fd limit (hard):",
742
0
    LIM2A(normalize_rlim((ulong)post_mortem.process.boot_lim_fd.rlim_max), "unlimited"),
743
0
    LIM2A(normalize_rlim((ulong)post_mortem.process.run_lim_fd.rlim_max), "unlimited"));
744
0
  chunk_appendf(&trash, "  %-22s  %-11s  %-11s \n", "    ram limit (soft):",
745
0
    LIM2A(normalize_rlim((ulong)post_mortem.process.boot_lim_ram.rlim_cur), "unlimited"),
746
0
    LIM2A(normalize_rlim((ulong)post_mortem.process.run_lim_ram.rlim_cur), "unlimited"));
747
0
  chunk_appendf(&trash, "  %-22s  %-11s  %-11s \n", "    ram limit (hard):",
748
0
    LIM2A(normalize_rlim((ulong)post_mortem.process.boot_lim_ram.rlim_max), "unlimited"),
749
0
    LIM2A(normalize_rlim((ulong)post_mortem.process.run_lim_ram.rlim_max), "unlimited"));
750
751
0
  ha_free(&err);
752
753
0
  return cli_msg(appctx, LOG_INFO, trash.area);
754
0
}
755
756
/* Dumps a state of all threads into the trash and on fd #2, then aborts. */
757
void ha_panic()
758
0
{
759
0
  struct buffer *buf;
760
0
  unsigned int thr;
761
762
0
  if (mark_tainted(TAINTED_PANIC) & TAINTED_PANIC) {
763
    /* a panic dump is already in progress, let's not disturb it,
764
     * we'll be called via signal DEBUGSIG. By returning we may be
765
     * able to leave a current signal handler (e.g. WDT) so that
766
     * this will ensure more reliable signal delivery.
767
     */
768
0
    return;
769
0
  }
770
771
0
  chunk_printf(&trash, "Thread %u is about to kill the process.\n", tid + 1);
772
0
  DISGUISE(write(2, trash.area, trash.data));
773
774
0
  for (thr = 0; thr < global.nbthread; thr++) {
775
0
    if (thr == tid)
776
0
      buf = get_trash_chunk();
777
0
    else
778
0
      buf = (void *)0x2UL; // let the target thread allocate it
779
780
0
    buf = ha_thread_dump_fill(buf, thr);
781
0
    if (!buf)
782
0
      continue;
783
784
0
    DISGUISE(write(2, buf->area, buf->data));
785
    /* restore the thread's dump pointer for easier post-mortem analysis */
786
0
    ha_thread_dump_done(thr);
787
0
  }
788
789
#ifdef USE_LUA
790
  if (get_tainted() & TAINTED_LUA_STUCK_SHARED && global.nbthread > 1) {
791
    chunk_printf(&trash,
792
           "### Note: at least one thread was stuck in a Lua context loaded using the\n"
793
           "          'lua-load' directive, which is known for causing heavy contention\n"
794
           "          when used with threads. Please consider using 'lua-load-per-thread'\n"
795
           "          instead if your code is safe to run in parallel on multiple threads.\n");
796
    DISGUISE(write(2, trash.area, trash.data));
797
  }
798
  else if (get_tainted() & TAINTED_LUA_STUCK) {
799
    chunk_printf(&trash,
800
           "### Note: at least one thread was stuck in a Lua context in a way that suggests\n"
801
           "          heavy processing inside a dependency or a long loop that can't yield.\n"
802
           "          Please make sure any external code you may rely on is safe for use in\n"
803
           "          an event-driven engine.\n");
804
    DISGUISE(write(2, trash.area, trash.data));
805
  }
806
#endif
807
0
  if (get_tainted() & TAINTED_MEM_TRIMMING_STUCK) {
808
0
    chunk_printf(&trash,
809
0
           "### Note: one thread was found stuck under malloc_trim(), which can run for a\n"
810
0
           "          very long time on large memory systems. You way want to disable this\n"
811
0
           "          memory reclaiming feature by setting 'no-memory-trimming' in the\n"
812
0
           "          'global' section of your configuration to avoid this in the future.\n");
813
0
    DISGUISE(write(2, trash.area, trash.data));
814
0
  }
815
816
0
  chunk_printf(&trash,
817
0
               "\n"
818
0
               "Hint: when reporting this bug to developers, please check if a core file was\n"
819
0
               "      produced, open it with 'gdb', issue 't a a bt full', check that the\n"
820
0
               "      output does not contain sensitive data, then join it with the bug report.\n"
821
0
               "      For more info, please see https://github.com/haproxy/haproxy/issues/2374\n");
822
823
0
  DISGUISE(write(2, trash.area, trash.data));
824
825
0
  for (;;)
826
0
    abort();
827
0
}
828
829
/* Dumps a state of the current thread on fd #2 and returns. It takes a great
830
 * care about not using any global state variable so as to gracefully recover.
831
 * It is designed to be called exclusively from the watchdog signal handler,
832
 * and takes care of not touching thread_dump_buffer so as not to interfere
833
 * with any other parallel dump that could have been started.
834
 */
835
void ha_stuck_warning(void)
836
0
{
837
0
  char msg_buf[8192];
838
0
  struct buffer buf;
839
0
  ullong n, p;
840
841
0
  if (mark_tainted(TAINTED_WARN_BLOCKED_TRAFFIC) & TAINTED_PANIC) {
842
    /* a panic dump is already in progress, let's not disturb it,
843
     * we'll be called via signal DEBUGSIG. By returning we may be
844
     * able to leave a current signal handler (e.g. WDT) so that
845
     * this will ensure more reliable signal delivery.
846
     */
847
0
    return;
848
0
  }
849
850
0
  HA_ATOMIC_INC(&warn_blocked_issued);
851
852
0
  buf = b_make(msg_buf, sizeof(msg_buf), 0, 0);
853
854
0
  p = HA_ATOMIC_LOAD(&th_ctx->prev_cpu_time);
855
0
  n = now_cpu_time();
856
857
0
  chunk_appendf(&buf,
858
0
         "\nWARNING! thread %u has stopped processing traffic for %llu milliseconds\n"
859
0
         "    with %d streams currently blocked, prevented from making any progress.\n"
860
0
         "    While this may occasionally happen with inefficient configurations\n"
861
0
         "    involving excess of regular expressions, map_reg, or heavy Lua processing,\n"
862
0
         "    this must remain exceptional because the system's stability is now at risk.\n"
863
0
         "    Timers in logs may be reported incorrectly, spurious timeouts may happen,\n"
864
0
         "    some incoming connections may silently be dropped, health checks may\n"
865
0
         "    randomly fail, and accesses to the CLI may block the whole process. The\n"
866
0
         "    blocking delay before emitting this warning may be adjusted via the global\n"
867
0
         "    'warn-blocked-traffic-after' directive. Please check the trace below for\n"
868
0
         "    any clues about configuration elements that need to be corrected:\n\n",
869
0
         tid + 1, (n - p) / 1000000ULL,
870
0
         HA_ATOMIC_LOAD(&ha_thread_ctx[tid].stream_cnt));
871
872
0
  ha_thread_dump_one(&buf, 1);
873
874
#ifdef USE_LUA
875
  if (get_tainted() & TAINTED_LUA_STUCK_SHARED && global.nbthread > 1) {
876
    chunk_appendf(&buf,
877
           "### Note: at least one thread was stuck in a Lua context loaded using the\n"
878
           "          'lua-load' directive, which is known for causing heavy contention\n"
879
           "          when used with threads. Please consider using 'lua-load-per-thread'\n"
880
           "          instead if your code is safe to run in parallel on multiple threads.\n");
881
  }
882
  else if (get_tainted() & TAINTED_LUA_STUCK) {
883
    chunk_appendf(&buf,
884
           "### Note: at least one thread was stuck in a Lua context in a way that suggests\n"
885
           "          heavy processing inside a dependency or a long loop that can't yield.\n"
886
           "          Please make sure any external code you may rely on is safe for use in\n"
887
           "          an event-driven engine.\n");
888
  }
889
#endif
890
0
  if (get_tainted() & TAINTED_MEM_TRIMMING_STUCK) {
891
0
    chunk_appendf(&buf,
892
0
           "### Note: one thread was found stuck under malloc_trim(), which can run for a\n"
893
0
           "          very long time on large memory systems. You way want to disable this\n"
894
0
           "          memory reclaiming feature by setting 'no-memory-trimming' in the\n"
895
0
           "          'global' section of your configuration to avoid this in the future.\n");
896
0
  }
897
898
0
  chunk_appendf(&buf, " => Trying to gracefully recover now.\n");
899
900
  /* Note: it's important to dump the whole buffer at once to avoid
901
   * interleaved outputs from multiple threads dumping in parallel.
902
   */
903
0
  DISGUISE(write(2, buf.area, buf.data));
904
0
}
905
906
/* Complain with message <msg> on stderr. If <counter> is not NULL, it is
907
 * atomically incremented, and the message is only printed when the counter
908
 * was zero, so that the message is only printed once. <taint> is only checked
909
 * on bit 1, and will taint the process either for a bug (2) or warn (0).
910
 */
911
void complain(int *counter, const char *msg, int taint)
912
0
{
913
0
  if (counter && _HA_ATOMIC_FETCH_ADD(counter, 1))
914
0
    return;
915
0
  DISGUISE(write(2, msg, strlen(msg)));
916
0
  if (taint & 2)
917
0
    mark_tainted(TAINTED_BUG);
918
0
  else
919
0
    mark_tainted(TAINTED_WARN);
920
0
}
921
922
/* parse a "debug dev exit" command. It always returns 1, though it should never return. */
923
static int debug_parse_cli_exit(char **args, char *payload, struct appctx *appctx, void *private)
924
0
{
925
0
  int code = atoi(args[3]);
926
927
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
928
0
    return 1;
929
930
0
  _HA_ATOMIC_INC(&debug_commands_issued);
931
0
  exit(code);
932
0
  return 1;
933
0
}
934
935
/* parse a "debug dev bug" command. It always returns 1, though it should never return.
936
 * Note: we make sure not to make the function static so that it appears in the trace.
937
 */
938
int debug_parse_cli_bug(char **args, char *payload, struct appctx *appctx, void *private)
939
0
{
940
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
941
0
    return 1;
942
943
0
  _HA_ATOMIC_INC(&debug_commands_issued);
944
0
  BUG_ON(one > zero, "This was triggered on purpose from the CLI 'debug dev bug' command.");
945
0
  return 1;
946
0
}
947
948
/* parse a "debug dev warn" command. It always returns 1.
949
 * Note: we make sure not to make the function static so that it appears in the trace.
950
 */
951
int debug_parse_cli_warn(char **args, char *payload, struct appctx *appctx, void *private)
952
0
{
953
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
954
0
    return 1;
955
956
0
  _HA_ATOMIC_INC(&debug_commands_issued);
957
0
  WARN_ON(one > zero, "This was triggered on purpose from the CLI 'debug dev warn' command.");
958
0
  return 1;
959
0
}
960
961
/* parse a "debug dev check" command. It always returns 1.
962
 * Note: we make sure not to make the function static so that it appears in the trace.
963
 */
964
int debug_parse_cli_check(char **args, char *payload, struct appctx *appctx, void *private)
965
0
{
966
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
967
0
    return 1;
968
969
0
  _HA_ATOMIC_INC(&debug_commands_issued);
970
0
  CHECK_IF(one > zero, "This was triggered on purpose from the CLI 'debug dev check' command.");
971
0
  return 1;
972
0
}
973
974
/* parse a "debug dev close" command. It always returns 1. */
975
static int debug_parse_cli_close(char **args, char *payload, struct appctx *appctx, void *private)
976
0
{
977
0
  int fd;
978
979
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
980
0
    return 1;
981
982
0
  if (!*args[3])
983
0
    return cli_err(appctx, "Missing file descriptor number (optionally followed by 'hard').\n");
984
985
0
  fd = atoi(args[3]);
986
0
  if (fd < 0 || fd >= global.maxsock)
987
0
    return cli_err(appctx, "File descriptor out of range.\n");
988
989
0
  if (strcmp(args[4], "hard") == 0) {
990
    /* hard silent close, even for unknown FDs */
991
0
    close(fd);
992
0
    goto done;
993
0
  }
994
0
  if (!fdtab[fd].owner)
995
0
    return cli_msg(appctx, LOG_INFO, "File descriptor was already closed.\n");
996
997
0
  fd_delete(fd);
998
0
 done:
999
0
  _HA_ATOMIC_INC(&debug_commands_issued);
1000
0
  return 1;
1001
0
}
1002
1003
/* this is meant to cause a deadlock when more than one task is running it or when run twice */
1004
struct task *debug_run_cli_deadlock(struct task *task, void *ctx, unsigned int state)
1005
0
{
1006
0
  static HA_SPINLOCK_T lock __maybe_unused;
1007
1008
0
  HA_SPIN_LOCK(OTHER_LOCK, &lock);
1009
0
  return NULL;
1010
0
}
1011
1012
/* parse a "debug dev deadlock" command. It always returns 1. */
1013
static int debug_parse_cli_deadlock(char **args, char *payload, struct appctx *appctx, void *private)
1014
0
{
1015
0
  int tasks;
1016
1017
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
1018
0
    return 1;
1019
1020
0
  _HA_ATOMIC_INC(&debug_commands_issued);
1021
0
  for (tasks = atoi(args[3]); tasks > 0; tasks--) {
1022
0
    struct task *t = task_new_on(tasks % global.nbthread);
1023
0
    if (!t)
1024
0
      continue;
1025
0
    t->process = debug_run_cli_deadlock;
1026
0
    t->context = NULL;
1027
0
    task_wakeup(t, TASK_WOKEN_INIT);
1028
0
  }
1029
1030
0
  return 1;
1031
0
}
1032
1033
/* parse a "debug dev delay" command. It always returns 1. */
1034
static int debug_parse_cli_delay(char **args, char *payload, struct appctx *appctx, void *private)
1035
0
{
1036
0
  int delay = atoi(args[3]);
1037
1038
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
1039
0
    return 1;
1040
1041
0
  _HA_ATOMIC_INC(&debug_commands_issued);
1042
0
  usleep((long)delay * 1000);
1043
0
  return 1;
1044
0
}
1045
1046
/* parse a "debug dev log" command. It always returns 1. */
1047
static int debug_parse_cli_log(char **args, char *payload, struct appctx *appctx, void *private)
1048
0
{
1049
0
  int arg;
1050
1051
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
1052
0
    return 1;
1053
1054
0
  _HA_ATOMIC_INC(&debug_commands_issued);
1055
0
  chunk_reset(&trash);
1056
0
  for (arg = 3; *args[arg]; arg++) {
1057
0
    if (arg > 3)
1058
0
      chunk_strcat(&trash, " ");
1059
0
    chunk_strcat(&trash, args[arg]);
1060
0
  }
1061
1062
0
  send_log(NULL, LOG_INFO, "%s\n", trash.area);
1063
0
  return 1;
1064
0
}
1065
1066
/* parse a "debug dev loop" command. It always returns 1. */
1067
int debug_parse_cli_loop(char **args, char *payload, struct appctx *appctx, void *private)
1068
0
{
1069
0
  struct timeval deadline, curr;
1070
0
  int loop = atoi(args[3]);
1071
0
  int isolate;
1072
0
  int warn;
1073
1074
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
1075
0
    return 1;
1076
1077
0
  isolate = strcmp(args[4], "isolated") == 0;
1078
0
  warn    = strcmp(args[4], "warn") == 0;
1079
1080
0
  _HA_ATOMIC_INC(&debug_commands_issued);
1081
0
  gettimeofday(&curr, NULL);
1082
0
  tv_ms_add(&deadline, &curr, loop);
1083
1084
0
  if (isolate)
1085
0
    thread_isolate();
1086
1087
0
  while (tv_ms_cmp(&curr, &deadline) < 0) {
1088
0
    if (warn)
1089
0
      _HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_STUCK);
1090
0
    gettimeofday(&curr, NULL);
1091
0
  }
1092
1093
0
  if (isolate)
1094
0
    thread_release();
1095
1096
0
  return 1;
1097
0
}
1098
1099
/* parse a "debug dev panic" command. It always returns 1, though it should never return. */
1100
int debug_parse_cli_panic(char **args, char *payload, struct appctx *appctx, void *private)
1101
0
{
1102
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
1103
0
    return 1;
1104
1105
0
  _HA_ATOMIC_INC(&debug_commands_issued);
1106
0
  ha_panic();
1107
0
  return 1;
1108
0
}
1109
1110
/* parse a "debug dev exec" command. It always returns 1. */
1111
#if defined(DEBUG_DEV)
1112
static int debug_parse_cli_exec(char **args, char *payload, struct appctx *appctx, void *private)
1113
{
1114
  int pipefd[2];
1115
  int arg;
1116
  int pid;
1117
1118
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
1119
    return 1;
1120
1121
  _HA_ATOMIC_INC(&debug_commands_issued);
1122
  chunk_reset(&trash);
1123
  for (arg = 3; *args[arg]; arg++) {
1124
    if (arg > 3)
1125
      chunk_strcat(&trash, " ");
1126
    chunk_strcat(&trash, args[arg]);
1127
  }
1128
1129
  thread_isolate();
1130
  if (pipe(pipefd) < 0)
1131
    goto fail_pipe;
1132
1133
  if (fd_set_cloexec(pipefd[0]) == -1)
1134
    goto fail_fcntl;
1135
1136
  if (fd_set_cloexec(pipefd[1]) == -1)
1137
    goto fail_fcntl;
1138
1139
  pid = fork();
1140
1141
  if (pid < 0)
1142
    goto fail_fork;
1143
  else if (pid == 0) {
1144
    /* child */
1145
    char *cmd[4] = { "/bin/sh", "-c", 0, 0 };
1146
1147
    close(0);
1148
    dup2(pipefd[1], 1);
1149
    dup2(pipefd[1], 2);
1150
1151
    cmd[2] = trash.area;
1152
    execvp(cmd[0], cmd);
1153
    printf("execvp() failed\n");
1154
    exit(1);
1155
  }
1156
1157
  /* parent */
1158
  thread_release();
1159
  close(pipefd[1]);
1160
  chunk_reset(&trash);
1161
  while (1) {
1162
    size_t ret = read(pipefd[0], trash.area + trash.data, trash.size - 20 - trash.data);
1163
    if (ret <= 0)
1164
      break;
1165
    trash.data += ret;
1166
    if (trash.data + 20 == trash.size) {
1167
      chunk_strcat(&trash, "\n[[[TRUNCATED]]]\n");
1168
      break;
1169
    }
1170
  }
1171
  close(pipefd[0]);
1172
  waitpid(pid, NULL, WNOHANG);
1173
  trash.area[trash.data] = 0;
1174
  return cli_msg(appctx, LOG_INFO, trash.area);
1175
1176
 fail_fork:
1177
 fail_fcntl:
1178
  close(pipefd[0]);
1179
  close(pipefd[1]);
1180
 fail_pipe:
1181
  thread_release();
1182
  return cli_err(appctx, "Failed to execute command.\n");
1183
}
1184
1185
/* handles SIGRTMAX to inject random delays on the receiving thread in order
1186
 * to try to increase the likelihood to reproduce inter-thread races. The
1187
 * signal is periodically sent by a task initiated by "debug dev delay-inj".
1188
 */
1189
void debug_delay_inj_sighandler(int sig, siginfo_t *si, void *arg)
1190
{
1191
  volatile int i = statistical_prng_range(10000);
1192
1193
  while (i--)
1194
    __ha_cpu_relax();
1195
}
1196
#endif
1197
1198
/* parse a "debug dev hex" command. It always returns 1. */
1199
static int debug_parse_cli_hex(char **args, char *payload, struct appctx *appctx, void *private)
1200
0
{
1201
0
  unsigned long start, len;
1202
1203
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
1204
0
    return 1;
1205
1206
0
  if (!*args[3])
1207
0
    return cli_err(appctx, "Missing memory address to dump from.\n");
1208
1209
0
  start = strtoul(args[3], NULL, 0);
1210
0
  if (!start)
1211
0
    return cli_err(appctx, "Will not dump from NULL address.\n");
1212
1213
0
  _HA_ATOMIC_INC(&debug_commands_issued);
1214
1215
  /* by default, dump ~128 till next block of 16 */
1216
0
  len = strtoul(args[4], NULL, 0);
1217
0
  if (!len)
1218
0
    len = ((start + 128) & -16) - start;
1219
1220
0
  chunk_reset(&trash);
1221
0
  dump_hex(&trash, "  ", (const void *)start, len, 1);
1222
0
  trash.area[trash.data] = 0;
1223
0
  return cli_msg(appctx, LOG_INFO, trash.area);
1224
0
}
1225
1226
/* parse a "debug dev sym <addr>" command. It always returns 1. */
1227
static int debug_parse_cli_sym(char **args, char *payload, struct appctx *appctx, void *private)
1228
0
{
1229
0
  unsigned long addr;
1230
1231
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
1232
0
    return 1;
1233
1234
0
  if (!*args[3])
1235
0
    return cli_err(appctx, "Missing memory address to be resolved.\n");
1236
1237
0
  _HA_ATOMIC_INC(&debug_commands_issued);
1238
1239
0
  addr = strtoul(args[3], NULL, 0);
1240
0
  chunk_printf(&trash, "%#lx resolves to ", addr);
1241
0
  resolve_sym_name(&trash, NULL, (const void *)addr);
1242
0
  chunk_appendf(&trash, "\n");
1243
1244
0
  return cli_msg(appctx, LOG_INFO, trash.area);
1245
0
}
1246
1247
/* parse a "debug dev tkill" command. It always returns 1. */
1248
static int debug_parse_cli_tkill(char **args, char *payload, struct appctx *appctx, void *private)
1249
0
{
1250
0
  int thr = 0;
1251
0
  int sig = SIGABRT;
1252
1253
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
1254
0
    return 1;
1255
1256
0
  if (*args[3])
1257
0
    thr = atoi(args[3]);
1258
1259
0
  if (thr < 0 || thr > global.nbthread)
1260
0
    return cli_err(appctx, "Thread number out of range (use 0 for current).\n");
1261
1262
0
  if (*args[4])
1263
0
    sig = atoi(args[4]);
1264
1265
0
  _HA_ATOMIC_INC(&debug_commands_issued);
1266
0
  if (thr)
1267
0
    ha_tkill(thr - 1, sig);
1268
0
  else
1269
0
    raise(sig);
1270
0
  return 1;
1271
0
}
1272
1273
/* hashes 'word' in "debug dev hash 'word' ". */
1274
static int debug_parse_cli_hash(char **args, char *payload, struct appctx *appctx, void *private)
1275
0
{
1276
0
  char *msg = NULL;
1277
1278
0
  cli_dynmsg(appctx, LOG_INFO, memprintf(&msg, "%s\n", HA_ANON_CLI(args[3])));
1279
0
  return 1;
1280
0
}
1281
1282
/* parse a "debug dev write" command. It always returns 1. */
1283
static int debug_parse_cli_write(char **args, char *payload, struct appctx *appctx, void *private)
1284
0
{
1285
0
  unsigned long len;
1286
1287
0
  if (!*args[3])
1288
0
    return cli_err(appctx, "Missing output size.\n");
1289
1290
0
  len = strtoul(args[3], NULL, 0);
1291
0
  if (len >= trash.size)
1292
0
    return cli_err(appctx, "Output too large, must be <tune.bufsize.\n");
1293
1294
0
  _HA_ATOMIC_INC(&debug_commands_issued);
1295
1296
0
  chunk_reset(&trash);
1297
0
  trash.data = len;
1298
0
  memset(trash.area, '.', trash.data);
1299
0
  trash.area[trash.data] = 0;
1300
0
  for (len = 64; len < trash.data; len += 64)
1301
0
    trash.area[len] = '\n';
1302
0
  return cli_msg(appctx, LOG_INFO, trash.area);
1303
0
}
1304
1305
/* parse a "debug dev stream" command */
1306
/*
1307
 *  debug dev stream [strm=<ptr>] [strm.f[{+-=}<flags>]] [txn.f[{+-=}<flags>]] \
1308
 *                   [req.f[{+-=}<flags>]] [res.f[{+-=}<flags>]]               \
1309
 *                   [sif.f[{+-=<flags>]] [sib.f[{+-=<flags>]]                 \
1310
 *                   [sif.s[=<state>]] [sib.s[=<state>]]
1311
 */
1312
static int debug_parse_cli_stream(char **args, char *payload, struct appctx *appctx, void *private)
1313
0
{
1314
0
  struct stream *s = appctx_strm(appctx);
1315
0
  int arg;
1316
0
  void *ptr;
1317
0
  int size;
1318
0
  const char *word, *end;
1319
0
  struct ist name;
1320
0
  char *msg = NULL;
1321
0
  char *endarg;
1322
0
  unsigned long long old, new;
1323
1324
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
1325
0
    return 1;
1326
1327
0
  ptr = NULL; size = 0;
1328
1329
0
  if (!*args[3]) {
1330
0
    return cli_err(appctx,
1331
0
             "Usage: debug dev stream [ strm=<ptr> ] { <obj> <op> <value> | wake }*\n"
1332
0
             "     <obj>   = { strm.f | strm.x | scf.s | scb.s | txn.f | req.f | res.f }\n"
1333
0
             "     <op>    = {'' (show) | '=' (assign) | '^' (xor) | '+' (or) | '-' (andnot)}\n"
1334
0
             "     <value> = 'now' | 64-bit dec/hex integer (0x prefix supported)\n"
1335
0
             "     'wake' wakes the stream assigned to 'strm' (default: current)\n"
1336
0
             );
1337
0
  }
1338
1339
0
  _HA_ATOMIC_INC(&debug_commands_issued);
1340
0
  for (arg = 3; *args[arg]; arg++) {
1341
0
    old = 0;
1342
0
    end = word = args[arg];
1343
0
    while (*end && *end != '=' && *end != '^' && *end != '+' && *end != '-')
1344
0
      end++;
1345
0
    name = ist2(word, end - word);
1346
0
    if (isteq(name, ist("strm"))) {
1347
0
      ptr = (!s || !may_access(s)) ? NULL : &s; size = sizeof(s);
1348
0
    } else if (isteq(name, ist("strm.f"))) {
1349
0
      ptr = (!s || !may_access(s)) ? NULL : &s->flags; size = sizeof(s->flags);
1350
0
    } else if (isteq(name, ist("strm.x"))) {
1351
0
      ptr = (!s || !may_access(s)) ? NULL : &s->conn_exp; size = sizeof(s->conn_exp);
1352
0
    } else if (isteq(name, ist("txn.f"))) {
1353
0
      ptr = (!s || !may_access(s)) ? NULL : &s->txn->flags; size = sizeof(s->txn->flags);
1354
0
    } else if (isteq(name, ist("req.f"))) {
1355
0
      ptr = (!s || !may_access(s)) ? NULL : &s->req.flags; size = sizeof(s->req.flags);
1356
0
    } else if (isteq(name, ist("res.f"))) {
1357
0
      ptr = (!s || !may_access(s)) ? NULL : &s->res.flags; size = sizeof(s->res.flags);
1358
0
    } else if (isteq(name, ist("scf.s"))) {
1359
0
      ptr = (!s || !may_access(s)) ? NULL : &s->scf->state; size = sizeof(s->scf->state);
1360
0
    } else if (isteq(name, ist("scb.s"))) {
1361
0
      ptr = (!s || !may_access(s)) ? NULL : &s->scf->state; size = sizeof(s->scb->state);
1362
0
    } else if (isteq(name, ist("wake"))) {
1363
0
      if (s && may_access(s) && may_access((void *)s + sizeof(*s) - 1))
1364
0
        task_wakeup(s->task, TASK_WOKEN_TIMER|TASK_WOKEN_IO|TASK_WOKEN_MSG);
1365
0
      continue;
1366
0
    } else
1367
0
      return cli_dynerr(appctx, memprintf(&msg, "Unsupported field name: '%s'.\n", word));
1368
1369
    /* read previous value */
1370
0
    if ((s || ptr == &s) && ptr && may_access(ptr) && may_access(ptr + size - 1)) {
1371
0
      if (size == 8)
1372
0
        old = read_u64(ptr);
1373
0
      else if (size == 4)
1374
0
        old = read_u32(ptr);
1375
0
      else if (size == 2)
1376
0
        old = read_u16(ptr);
1377
0
      else
1378
0
        old = *(const uint8_t *)ptr;
1379
0
    } else {
1380
0
      memprintf(&msg,
1381
0
          "%sSkipping inaccessible pointer %p for field '%.*s'.\n",
1382
0
          msg ? msg : "", ptr, (int)(end - word), word);
1383
0
      continue;
1384
0
    }
1385
1386
    /* parse the new value . */
1387
0
    new = strtoll(end + 1, &endarg, 0);
1388
0
    if (end[1] && *endarg) {
1389
0
      if (strcmp(end + 1, "now") == 0)
1390
0
        new = now_ms;
1391
0
      else {
1392
0
        memprintf(&msg,
1393
0
            "%sIgnoring unparsable value '%s' for field '%.*s'.\n",
1394
0
            msg ? msg : "", end + 1, (int)(end - word), word);
1395
0
        continue;
1396
0
      }
1397
0
    }
1398
1399
0
    switch (*end) {
1400
0
    case '\0': /* show */
1401
0
      memprintf(&msg, "%s%.*s=%#llx ", msg ? msg : "", (int)(end - word), word, old);
1402
0
      new = old; // do not change the value
1403
0
      break;
1404
1405
0
    case '=': /* set */
1406
0
      break;
1407
1408
0
    case '^': /* XOR */
1409
0
      new = old ^ new;
1410
0
      break;
1411
1412
0
    case '+': /* OR */
1413
0
      new = old | new;
1414
0
      break;
1415
1416
0
    case '-': /* AND NOT */
1417
0
      new = old & ~new;
1418
0
      break;
1419
1420
0
    default:
1421
0
      break;
1422
0
    }
1423
1424
    /* write the new value */
1425
0
    if (new != old) {
1426
0
      if (size == 8)
1427
0
        write_u64(ptr, new);
1428
0
      else if (size == 4)
1429
0
        write_u32(ptr, new);
1430
0
      else if (size == 2)
1431
0
        write_u16(ptr, new);
1432
0
      else
1433
0
        *(uint8_t *)ptr = new;
1434
0
    }
1435
0
  }
1436
1437
0
  if (msg && *msg)
1438
0
    return cli_dynmsg(appctx, LOG_INFO, msg);
1439
0
  return 1;
1440
0
}
1441
1442
/* parse a "debug dev stream" command */
1443
/*
1444
 *  debug dev task <ptr> [ "wake" | "expire" | "kill" ]
1445
 *  Show/change status of a task/tasklet
1446
 */
1447
static int debug_parse_cli_task(char **args, char *payload, struct appctx *appctx, void *private)
1448
0
{
1449
0
  const struct ha_caller *caller;
1450
0
  struct task *t;
1451
0
  char *endarg;
1452
0
  char *msg;
1453
0
  void *ptr;
1454
0
  int ret = 1;
1455
0
  int task_ok;
1456
0
  int arg;
1457
1458
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
1459
0
    return 1;
1460
1461
  /* parse the pointer value */
1462
0
  ptr = (void *)strtoul(args[3], &endarg, 0);
1463
0
  if (!*args[3] || *endarg)
1464
0
    goto usage;
1465
1466
0
  _HA_ATOMIC_INC(&debug_commands_issued);
1467
1468
  /* everything below must run under thread isolation till reaching label "leave" */
1469
0
  thread_isolate();
1470
1471
  /* struct tasklet is smaller than struct task and is sufficient to check
1472
   * the TASK_COMMON part.
1473
   */
1474
0
  if (!may_access(ptr) || !may_access(ptr + sizeof(struct tasklet) - 1) ||
1475
0
      ((const struct tasklet *)ptr)->tid  < -1 ||
1476
0
      ((const struct tasklet *)ptr)->tid  >= (int)MAX_THREADS) {
1477
0
    ret = cli_err(appctx, "The designated memory area doesn't look like a valid task/tasklet\n");
1478
0
    goto leave;
1479
0
  }
1480
1481
0
  t = ptr;
1482
0
  caller = t->caller;
1483
0
  msg = NULL;
1484
0
  task_ok = may_access(ptr + sizeof(*t) - 1);
1485
1486
0
  chunk_reset(&trash);
1487
0
  resolve_sym_name(&trash, NULL, (const void *)t->process);
1488
1489
  /* we need to be careful here because we may dump a freed task that's
1490
   * still in the pool cache, containing garbage in pointers.
1491
   */
1492
0
  if (!*args[4]) {
1493
0
    memprintf(&msg, "%s%p: %s state=%#x tid=%d process=%s ctx=%p calls=%d last=%s:%d intl=%d",
1494
0
        msg ? msg : "", t, (t->state & TASK_F_TASKLET) ? "tasklet" : "task",
1495
0
        t->state, t->tid, trash.area, t->context, t->calls,
1496
0
        caller && may_access(caller) && may_access(caller->func) && isalnum((uchar)*caller->func) ? caller->func : "0",
1497
0
        caller ? t->caller->line : 0,
1498
0
        (t->state & TASK_F_TASKLET) ? LIST_INLIST(&((const struct tasklet *)t)->list) : 0);
1499
1500
0
    if (task_ok && !(t->state & TASK_F_TASKLET))
1501
0
      memprintf(&msg, "%s inrq=%d inwq=%d exp=%d nice=%d",
1502
0
          msg ? msg : "", task_in_rq(t), task_in_wq(t), t->expire, t->nice);
1503
1504
0
    memprintf(&msg, "%s\n", msg ? msg : "");
1505
0
  }
1506
1507
0
  for (arg = 4; *args[arg]; arg++) {
1508
0
    if (strcmp(args[arg], "expire") == 0) {
1509
0
      if (t->state & TASK_F_TASKLET) {
1510
        /* do nothing for tasklets */
1511
0
      }
1512
0
      else if (task_ok) {
1513
        /* unlink task and wake with timer flag */
1514
0
        __task_unlink_wq(t);
1515
0
        t->expire = tick_add(now_ms, 0);
1516
0
        task_wakeup(t, TASK_WOKEN_TIMER);
1517
0
      }
1518
0
    } else if (strcmp(args[arg], "wake") == 0) {
1519
      /* wake with all flags but init / timer */
1520
0
      if (t->state & TASK_F_TASKLET)
1521
0
        tasklet_wakeup((struct tasklet *)t);
1522
0
      else if (task_ok)
1523
0
        task_wakeup(t, TASK_WOKEN_ANY & ~(TASK_WOKEN_INIT|TASK_WOKEN_TIMER));
1524
0
    } else if (strcmp(args[arg], "kill") == 0) {
1525
      /* Kill the task. This is not idempotent! */
1526
0
      if (!(t->state & TASK_KILLED)) {
1527
0
        if (t->state & TASK_F_TASKLET)
1528
0
          tasklet_kill((struct tasklet *)t);
1529
0
        else if (task_ok)
1530
0
          task_kill(t);
1531
0
      }
1532
0
    } else {
1533
0
      thread_release();
1534
0
      goto usage;
1535
0
    }
1536
0
  }
1537
1538
0
  if (msg && *msg)
1539
0
    ret = cli_dynmsg(appctx, LOG_INFO, msg);
1540
0
 leave:
1541
0
  thread_release();
1542
0
  return ret;
1543
0
 usage:
1544
0
  return cli_err(appctx,
1545
0
           "Usage: debug dev task <ptr> [ wake | expire | kill ]\n"
1546
0
           "  By default, dumps some info on task/tasklet <ptr>. 'wake' will wake it up\n"
1547
0
           "  with all conditions flags but init/exp. 'expire' will expire the entry, and\n"
1548
0
           "  'kill' will kill it (warning: may crash since later not idempotent!). All\n"
1549
0
           "  changes may crash the process if performed on a wrong object!\n"
1550
0
           );
1551
0
}
1552
1553
#if defined(DEBUG_DEV)
1554
static struct task *debug_delay_inj_task(struct task *t, void *ctx, unsigned int state)
1555
{
1556
  unsigned long *tctx = ctx; // [0] = interval, [1] = nbwakeups
1557
  unsigned long inter = tctx[0];
1558
  unsigned long count = tctx[1];
1559
  unsigned long rnd;
1560
1561
  if (inter)
1562
    t->expire = tick_add(now_ms, inter);
1563
  else
1564
    task_wakeup(t, TASK_WOKEN_MSG);
1565
1566
  /* wake a random thread */
1567
  while (count--) {
1568
    rnd = statistical_prng_range(global.nbthread);
1569
    ha_tkill(rnd, SIGRTMAX);
1570
  }
1571
  return t;
1572
}
1573
1574
/* parse a "debug dev delay-inj" command
1575
 * debug dev delay-inj <inter> <count>
1576
 */
1577
static int debug_parse_delay_inj(char **args, char *payload, struct appctx *appctx, void *private)
1578
{
1579
  unsigned long *tctx; // [0] = inter, [2] = count
1580
  struct task *task;
1581
1582
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
1583
    return 1;
1584
1585
  if (!*args[4])
1586
    return cli_err(appctx,  "Usage: debug dev delay-inj <inter_ms> <count>*\n");
1587
1588
  _HA_ATOMIC_INC(&debug_commands_issued);
1589
1590
  tctx = calloc(2, sizeof(*tctx));
1591
  if (!tctx)
1592
    goto fail;
1593
1594
  tctx[0] = atoi(args[3]);
1595
  tctx[1] = atoi(args[4]);
1596
1597
  task = task_new_here/*anywhere*/();
1598
  if (!task)
1599
    goto fail;
1600
1601
  task->process = debug_delay_inj_task;
1602
  task->context = tctx;
1603
  task_wakeup(task, TASK_WOKEN_INIT);
1604
  return 1;
1605
1606
 fail:
1607
  free(tctx);
1608
  return cli_err(appctx, "Not enough memory");
1609
}
1610
#endif // DEBUG_DEV
1611
1612
static struct task *debug_task_handler(struct task *t, void *ctx, unsigned int state)
1613
0
{
1614
0
  unsigned long *tctx = ctx; // [0] = #tasks, [1] = inter, [2+] = { tl | (tsk+1) }
1615
0
  unsigned long inter = tctx[1];
1616
0
  unsigned long rnd;
1617
1618
0
  if (stopping)
1619
0
    return NULL;
1620
1621
0
  t->expire = tick_add(now_ms, inter);
1622
1623
  /* half of the calls will wake up another entry */
1624
0
  rnd = statistical_prng();
1625
0
  if (rnd & 1) {
1626
0
    rnd >>= 1;
1627
0
    rnd %= tctx[0];
1628
0
    rnd = tctx[rnd + 2];
1629
1630
0
    if (rnd & 1)
1631
0
      task_wakeup((struct task *)(rnd - 1), TASK_WOKEN_MSG);
1632
0
    else
1633
0
      tasklet_wakeup((struct tasklet *)rnd);
1634
0
  }
1635
0
  return t;
1636
0
}
1637
1638
static struct task *debug_tasklet_handler(struct task *t, void *ctx, unsigned int state)
1639
0
{
1640
0
  unsigned long *tctx = ctx; // [0] = #tasks, [1] = inter, [2+] = { tl | (tsk+1) }
1641
0
  unsigned long rnd;
1642
0
  int i;
1643
1644
0
  if (stopping)
1645
0
    return NULL;
1646
1647
  /* wake up two random entries */
1648
0
  for (i = 0; i < 2; i++) {
1649
0
    rnd = statistical_prng() % tctx[0];
1650
0
    rnd = tctx[rnd + 2];
1651
1652
0
    if (rnd & 1)
1653
0
      task_wakeup((struct task *)(rnd - 1), TASK_WOKEN_MSG);
1654
0
    else
1655
0
      tasklet_wakeup((struct tasklet *)rnd);
1656
0
  }
1657
0
  return t;
1658
0
}
1659
1660
/* parse a "debug dev sched" command
1661
 * debug dev sched {task|tasklet} [count=<count>] [mask=<mask>] [single=<single>] [inter=<inter>]
1662
 */
1663
static int debug_parse_cli_sched(char **args, char *payload, struct appctx *appctx, void *private)
1664
0
{
1665
0
  int arg;
1666
0
  void *ptr;
1667
0
  int size;
1668
0
  const char *word, *end;
1669
0
  struct ist name;
1670
0
  char *msg = NULL;
1671
0
  char *endarg;
1672
0
  unsigned long long new;
1673
0
  unsigned long count = 0;
1674
0
  unsigned long thrid = tid;
1675
0
  unsigned int inter = 0;
1676
0
  unsigned long i;
1677
0
  int mode = 0; // 0 = tasklet; 1 = task
1678
0
  unsigned long *tctx; // [0] = #tasks, [1] = inter, [2+] = { tl | (tsk+1) }
1679
1680
0
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
1681
0
    return 1;
1682
1683
0
  ptr = NULL; size = 0;
1684
1685
0
  if (strcmp(args[3], "task") != 0 && strcmp(args[3], "tasklet") != 0) {
1686
0
    return cli_err(appctx,
1687
0
             "Usage: debug dev sched {task|tasklet} { <obj> = <value> }*\n"
1688
0
             "     <obj>   = {count | tid | inter }\n"
1689
0
             "     <value> = 64-bit dec/hex integer (0x prefix supported)\n"
1690
0
             );
1691
0
  }
1692
1693
0
  mode = strcmp(args[3], "task") == 0;
1694
1695
0
  _HA_ATOMIC_INC(&debug_commands_issued);
1696
0
  for (arg = 4; *args[arg]; arg++) {
1697
0
    end = word = args[arg];
1698
0
    while (*end && *end != '=' && *end != '^' && *end != '+' && *end != '-')
1699
0
      end++;
1700
0
    name = ist2(word, end - word);
1701
0
    if (isteq(name, ist("count"))) {
1702
0
      ptr = &count; size = sizeof(count);
1703
0
    } else if (isteq(name, ist("tid"))) {
1704
0
      ptr = &thrid; size = sizeof(thrid);
1705
0
    } else if (isteq(name, ist("inter"))) {
1706
0
      ptr = &inter; size = sizeof(inter);
1707
0
    } else
1708
0
      return cli_dynerr(appctx, memprintf(&msg, "Unsupported setting: '%s'.\n", word));
1709
1710
    /* parse the new value . */
1711
0
    new = strtoll(end + 1, &endarg, 0);
1712
0
    if (end[1] && *endarg) {
1713
0
      memprintf(&msg,
1714
0
                "%sIgnoring unparsable value '%s' for field '%.*s'.\n",
1715
0
                msg ? msg : "", end + 1, (int)(end - word), word);
1716
0
      continue;
1717
0
    }
1718
1719
    /* write the new value */
1720
0
    if (size == 8)
1721
0
      write_u64(ptr, new);
1722
0
    else if (size == 4)
1723
0
      write_u32(ptr, new);
1724
0
    else if (size == 2)
1725
0
      write_u16(ptr, new);
1726
0
    else
1727
0
      *(uint8_t *)ptr = new;
1728
0
  }
1729
1730
0
  tctx = calloc(count + 2, sizeof(*tctx));
1731
0
  if (!tctx)
1732
0
    goto fail;
1733
1734
0
  tctx[0] = (unsigned long)count;
1735
0
  tctx[1] = (unsigned long)inter;
1736
1737
0
  if ((int)thrid >= global.nbthread)
1738
0
    thrid = tid;
1739
1740
0
  for (i = 0; i < count; i++) {
1741
    /* now, if poly or mask was set, tmask corresponds to the
1742
     * valid thread mask to use, otherwise it remains zero.
1743
     */
1744
    //printf("%lu: mode=%d mask=%#lx\n", i, mode, tmask);
1745
0
    if (mode == 0) {
1746
0
      struct tasklet *tl = tasklet_new();
1747
1748
0
      if (!tl)
1749
0
        goto fail;
1750
1751
0
      tl->tid = thrid;
1752
0
      tl->process = debug_tasklet_handler;
1753
0
      tl->context = tctx;
1754
0
      tctx[i + 2] = (unsigned long)tl;
1755
0
    } else {
1756
0
      struct task *task = task_new_on(thrid);
1757
1758
0
      if (!task)
1759
0
        goto fail;
1760
1761
0
      task->process = debug_task_handler;
1762
0
      task->context = tctx;
1763
0
      tctx[i + 2] = (unsigned long)task + 1;
1764
0
    }
1765
0
  }
1766
1767
  /* start the tasks and tasklets */
1768
0
  for (i = 0; i < count; i++) {
1769
0
    unsigned long ctx = tctx[i + 2];
1770
1771
0
    if (ctx & 1)
1772
0
      task_wakeup((struct task *)(ctx - 1), TASK_WOKEN_INIT);
1773
0
    else
1774
0
      tasklet_wakeup((struct tasklet *)ctx);
1775
0
  }
1776
1777
0
  if (msg && *msg)
1778
0
    return cli_dynmsg(appctx, LOG_INFO, msg);
1779
0
  return 1;
1780
1781
0
 fail:
1782
  /* free partially allocated entries */
1783
0
  for (i = 0; tctx && i < count; i++) {
1784
0
    unsigned long ctx = tctx[i + 2];
1785
1786
0
    if (!ctx)
1787
0
      break;
1788
1789
0
    if (ctx & 1)
1790
0
      task_destroy((struct task *)(ctx - 1));
1791
0
    else
1792
0
      tasklet_free((struct tasklet *)ctx);
1793
0
  }
1794
1795
0
  free(tctx);
1796
0
  return cli_err(appctx, "Not enough memory");
1797
0
}
1798
1799
#if defined(DEBUG_DEV)
1800
/* All of this is for "trace dbg" */
1801
1802
static struct trace_source trace_dbg __read_mostly = {
1803
  .name = IST("dbg"),
1804
  .desc = "trace debugger",
1805
  .report_events = ~0,  // report everything by default
1806
};
1807
1808
#define TRACE_SOURCE &trace_dbg
1809
INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
1810
1811
/* This is the task handler used to send traces in loops. Note that the task's
1812
 * context contains the number of remaining calls to be done. The task sends 20
1813
 * messages per wakeup.
1814
 */
1815
static struct task *debug_trace_task(struct task *t, void *ctx, unsigned int state)
1816
{
1817
  ulong count;
1818
1819
  /* send 2 traces enter/leave +18 devel = 20 traces total */
1820
  TRACE_ENTER(1);
1821
  TRACE_DEVEL("msg01 has 20 bytes .", 1);
1822
  TRACE_DEVEL("msg02 has 20 bytes .", 1);
1823
  TRACE_DEVEL("msg03 has 20 bytes .", 1);
1824
  TRACE_DEVEL("msg04 has 70 bytes payload: 0123456789 0123456789 0123456789 012345678", 1);
1825
  TRACE_DEVEL("msg05 has 70 bytes payload: 0123456789 0123456789 0123456789 012345678", 1);
1826
  TRACE_DEVEL("msg06 has 70 bytes payload: 0123456789 0123456789 0123456789 012345678", 1);
1827
  TRACE_DEVEL("msg07 has 120 bytes payload: 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 012", 1);
1828
  TRACE_DEVEL("msg08 has 120 bytes payload: 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 012", 1);
1829
  TRACE_DEVEL("msg09 has 120 bytes payload: 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 012", 1);
1830
  TRACE_DEVEL("msg10 has 170 bytes payload: 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 012345678", 1);
1831
  TRACE_DEVEL("msg11 has 170 bytes payload: 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 012345678", 1);
1832
  TRACE_DEVEL("msg12 has 170 bytes payload: 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 012345678", 1);
1833
  TRACE_DEVEL("msg13 has 220 bytes payload: 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123", 1);
1834
  TRACE_DEVEL("msg14 has 220 bytes payload: 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123", 1);
1835
  TRACE_DEVEL("msg15 has 220 bytes payload: 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123", 1);
1836
  TRACE_DEVEL("msg16 has 270 bytes payload: 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789", 1);
1837
  TRACE_DEVEL("msg17 has 270 bytes payload: 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789", 1);
1838
  TRACE_DEVEL("msg18 has 270 bytes payload: 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 0123456789", 1);
1839
  TRACE_LEAVE(1);
1840
1841
  count = (ulong)t->context;
1842
  t->context = (void*)count - 1;
1843
1844
  if (count)
1845
    task_wakeup(t, TASK_WOKEN_MSG);
1846
  else {
1847
    task_destroy(t);
1848
    t = NULL;
1849
  }
1850
  return t;
1851
}
1852
1853
/* parse a "debug dev trace" command
1854
 * debug dev trace <nbthr>.
1855
 * It will create as many tasks (one per thread), starting from lowest threads.
1856
 * The traces will stop after 1M wakeups or 20M messages ~= 4GB of data.
1857
 */
1858
static int debug_parse_cli_trace(char **args, char *payload, struct appctx *appctx, void *private)
1859
{
1860
  unsigned long count = 1;
1861
  unsigned long i;
1862
  char *msg = NULL;
1863
  char *endarg;
1864
1865
  if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
1866
    return 1;
1867
1868
  _HA_ATOMIC_INC(&debug_commands_issued);
1869
1870
  if (!args[3][0]) {
1871
    memprintf(&msg, "Need a thread count. Note that 20M msg will be sent per thread.\n");
1872
    goto fail;
1873
  }
1874
1875
  /* parse the new value . */
1876
  count = strtoll(args[3], &endarg, 0);
1877
  if (args[3][1] && *endarg) {
1878
    memprintf(&msg, "Ignoring unparsable thread number '%s'.\n", args[3]);
1879
    goto fail;
1880
  }
1881
1882
  if (count >= global.nbthread)
1883
    count = global.nbthread;
1884
1885
  for (i = 0; i < count; i++) {
1886
    struct task *task = task_new_on(i);
1887
1888
    if (!task)
1889
      goto fail;
1890
1891
    task->process = debug_trace_task;
1892
    task->context = (void*)(ulong)1000000; // 1M wakeups = 20M messages
1893
    task_wakeup(task, TASK_WOKEN_INIT);
1894
  }
1895
1896
  if (msg && *msg)
1897
    return cli_dynmsg(appctx, LOG_INFO, msg);
1898
  return 1;
1899
1900
 fail:
1901
  return cli_dynmsg(appctx, LOG_ERR, msg);
1902
}
1903
#endif /* DEBUG_DEV */
1904
1905
/* CLI state for "debug dev fd" */
1906
struct dev_fd_ctx {
1907
  int start_fd;
1908
};
1909
1910
/* CLI parser for the "debug dev fd" command. The current FD to restart from is
1911
 * stored in a struct dev_fd_ctx pointed to by svcctx.
1912
 */
1913
static int debug_parse_cli_fd(char **args, char *payload, struct appctx *appctx, void *private)
1914
0
{
1915
0
  struct dev_fd_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
1916
1917
0
  if (!cli_has_level(appctx, ACCESS_LVL_OPER))
1918
0
    return 1;
1919
1920
  /* start at fd #0 */
1921
0
  ctx->start_fd = 0;
1922
0
  return 0;
1923
0
}
1924
1925
/* CLI I/O handler for the "debug dev fd" command. Dumps all FDs that are
1926
 * accessible from the process but not known from fdtab. The FD number to
1927
 * restart from is stored in a struct dev_fd_ctx pointed to by svcctx.
1928
 */
1929
static int debug_iohandler_fd(struct appctx *appctx)
1930
0
{
1931
0
  struct dev_fd_ctx *ctx = appctx->svcctx;
1932
0
  struct sockaddr_storage sa;
1933
0
  struct stat statbuf;
1934
0
  socklen_t salen, vlen;
1935
0
  int ret1, ret2, port;
1936
0
  char *addrstr;
1937
0
  int ret = 1;
1938
0
  int i, fd;
1939
1940
0
  chunk_reset(&trash);
1941
1942
0
  thread_isolate();
1943
1944
  /* we have two inner loops here, one for the proxy, the other one for
1945
   * the buffer.
1946
   */
1947
0
  for (fd = ctx->start_fd; fd < global.maxsock; fd++) {
1948
    /* check for FD's existence */
1949
0
    ret1 = fcntl(fd, F_GETFD, 0);
1950
0
    if (ret1 == -1)
1951
0
      continue; // not known to the process
1952
0
    if (fdtab[fd].owner)
1953
0
      continue; // well-known
1954
1955
    /* OK we're seeing an orphan let's try to retrieve as much
1956
     * information as possible about it.
1957
     */
1958
0
    chunk_printf(&trash, "%5d", fd);
1959
1960
0
    if (fstat(fd, &statbuf) != -1) {
1961
0
      chunk_appendf(&trash, " type=%s mod=%04o dev=%#llx siz=%#llx uid=%lld gid=%lld fs=%#llx ino=%#llx",
1962
0
              isatty(fd)                ? "tty.":
1963
0
              S_ISREG(statbuf.st_mode)  ? "file":
1964
0
              S_ISDIR(statbuf.st_mode)  ? "dir.":
1965
0
              S_ISCHR(statbuf.st_mode)  ? "chr.":
1966
0
              S_ISBLK(statbuf.st_mode)  ? "blk.":
1967
0
              S_ISFIFO(statbuf.st_mode) ? "pipe":
1968
0
              S_ISLNK(statbuf.st_mode)  ? "link":
1969
0
              S_ISSOCK(statbuf.st_mode) ? "sock":
1970
#ifdef USE_EPOLL
1971
              /* trick: epoll_ctl() will return -ENOENT when trying
1972
               * to remove from a valid epoll FD an FD that was not
1973
               * registered against it. But we don't want to risk
1974
               * disabling a random FD. Instead we'll create a new
1975
               * one by duplicating 0 (it should be valid since
1976
               * pointing to a terminal or /dev/null), and try to
1977
               * remove it.
1978
               */
1979
              ({
1980
                int fd2 = dup(0);
1981
                int ret = fd2;
1982
                if (ret >= 0) {
1983
                  ret = epoll_ctl(fd, EPOLL_CTL_DEL, fd2, NULL);
1984
                  if (ret == -1 && errno == ENOENT)
1985
                    ret = 0; // that's a real epoll
1986
                  else
1987
                    ret = -1; // it's something else
1988
                  close(fd2);
1989
                }
1990
                ret;
1991
              }) == 0 ? "epol" :
1992
#endif
1993
0
              "????",
1994
0
              (uint)statbuf.st_mode & 07777,
1995
1996
0
              (ullong)statbuf.st_rdev,
1997
0
              (ullong)statbuf.st_size,
1998
0
              (ullong)statbuf.st_uid,
1999
0
              (ullong)statbuf.st_gid,
2000
2001
0
              (ullong)statbuf.st_dev,
2002
0
              (ullong)statbuf.st_ino);
2003
0
    }
2004
2005
0
    chunk_appendf(&trash, " getfd=%s+%#x",
2006
0
           (ret1 & FD_CLOEXEC) ? "cloex" : "",
2007
0
           ret1 &~ FD_CLOEXEC);
2008
2009
    /* FD options */
2010
0
    ret2 = fcntl(fd, F_GETFL, 0);
2011
0
    if (ret2) {
2012
0
      chunk_appendf(&trash, " getfl=%s",
2013
0
              (ret1 & 3) >= 2 ? "O_RDWR" :
2014
0
              (ret1 & 1) ? "O_WRONLY" : "O_RDONLY");
2015
2016
0
      for (i = 2; i < 32; i++) {
2017
0
        if (!(ret2 & (1UL << i)))
2018
0
          continue;
2019
0
        switch (1UL << i) {
2020
0
        case O_CREAT:   chunk_appendf(&trash, ",O_CREAT");   break;
2021
0
        case O_EXCL:    chunk_appendf(&trash, ",O_EXCL");    break;
2022
0
        case O_NOCTTY:  chunk_appendf(&trash, ",O_NOCTTY");  break;
2023
0
        case O_TRUNC:   chunk_appendf(&trash, ",O_TRUNC");   break;
2024
0
        case O_APPEND:  chunk_appendf(&trash, ",O_APPEND");  break;
2025
0
#ifdef O_ASYNC
2026
0
        case O_ASYNC:   chunk_appendf(&trash, ",O_ASYNC");   break;
2027
0
#endif
2028
#ifdef O_DIRECT
2029
        case O_DIRECT:  chunk_appendf(&trash, ",O_DIRECT");  break;
2030
#endif
2031
#ifdef O_NOATIME
2032
        case O_NOATIME: chunk_appendf(&trash, ",O_NOATIME"); break;
2033
#endif
2034
0
        }
2035
0
      }
2036
0
    }
2037
2038
0
    vlen = sizeof(ret2);
2039
0
    ret1 = getsockopt(fd, SOL_SOCKET, SO_TYPE, &ret2, &vlen);
2040
0
    if (ret1 != -1)
2041
0
      chunk_appendf(&trash, " so_type=%d", ret2);
2042
2043
0
    vlen = sizeof(ret2);
2044
0
    ret1 = getsockopt(fd, SOL_SOCKET, SO_ACCEPTCONN, &ret2, &vlen);
2045
0
    if (ret1 != -1)
2046
0
      chunk_appendf(&trash, " so_accept=%d", ret2);
2047
2048
0
    vlen = sizeof(ret2);
2049
0
    ret1 = getsockopt(fd, SOL_SOCKET, SO_ERROR, &ret2, &vlen);
2050
0
    if (ret1 != -1)
2051
0
      chunk_appendf(&trash, " so_error=%d", ret2);
2052
2053
0
    salen = sizeof(sa);
2054
0
    if (getsockname(fd, (struct sockaddr *)&sa, &salen) != -1) {
2055
0
      int i;
2056
2057
0
      if (sa.ss_family == AF_INET)
2058
0
        port = ntohs(((const struct sockaddr_in *)&sa)->sin_port);
2059
0
      else if (sa.ss_family == AF_INET6)
2060
0
        port = ntohs(((const struct sockaddr_in6 *)&sa)->sin6_port);
2061
0
      else
2062
0
        port = 0;
2063
0
      addrstr = sa2str(&sa, port, 0);
2064
      /* cleanup the output */
2065
0
      for  (i = 0; i < strlen(addrstr); i++) {
2066
0
        if (iscntrl((unsigned char)addrstr[i]) || !isprint((unsigned char)addrstr[i]))
2067
0
          addrstr[i] = '.';
2068
0
      }
2069
2070
0
      chunk_appendf(&trash, " laddr=%s", addrstr);
2071
0
      free(addrstr);
2072
0
    }
2073
2074
0
    salen = sizeof(sa);
2075
0
    if (getpeername(fd, (struct sockaddr *)&sa, &salen) != -1) {
2076
0
      if (sa.ss_family == AF_INET)
2077
0
        port = ntohs(((const struct sockaddr_in *)&sa)->sin_port);
2078
0
      else if (sa.ss_family == AF_INET6)
2079
0
        port = ntohs(((const struct sockaddr_in6 *)&sa)->sin6_port);
2080
0
      else
2081
0
        port = 0;
2082
0
      addrstr = sa2str(&sa, port, 0);
2083
      /* cleanup the output */
2084
0
      for  (i = 0; i < strlen(addrstr); i++) {
2085
0
        if ((iscntrl((unsigned char)addrstr[i])) || !isprint((unsigned char)addrstr[i]))
2086
0
          addrstr[i] = '.';
2087
0
      }
2088
0
      chunk_appendf(&trash, " raddr=%s", addrstr);
2089
0
      free(addrstr);
2090
0
    }
2091
2092
0
    chunk_appendf(&trash, "\n");
2093
2094
0
    if (applet_putchk(appctx, &trash) == -1) {
2095
0
      ctx->start_fd = fd;
2096
0
      ret = 0;
2097
0
      break;
2098
0
    }
2099
0
  }
2100
2101
0
  thread_release();
2102
0
  return ret;
2103
0
}
2104
2105
#if defined(DEBUG_MEM_STATS)
2106
2107
/* CLI state for "debug dev memstats" */
2108
struct dev_mem_ctx {
2109
  struct mem_stats *start, *stop; /* begin/end of dump */
2110
  char *match;                    /* non-null if a name prefix is specified */
2111
  int show_all;                   /* show all entries if non-null */
2112
  int width;                      /* 1st column width */
2113
  long tot_size;                  /* sum of alloc-free */
2114
  ulong tot_calls;                /* sum of calls */
2115
};
2116
2117
/* CLI parser for the "debug dev memstats" command. Sets a dev_mem_ctx shown above. */
2118
static int debug_parse_cli_memstats(char **args, char *payload, struct appctx *appctx, void *private)
2119
{
2120
  struct dev_mem_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
2121
  int arg;
2122
2123
  extern __attribute__((__weak__)) struct mem_stats __start_mem_stats;
2124
  extern __attribute__((__weak__)) struct mem_stats __stop_mem_stats;
2125
2126
  if (!cli_has_level(appctx, ACCESS_LVL_OPER))
2127
    return 1;
2128
2129
  for (arg = 3; *args[arg]; arg++) {
2130
    if (strcmp(args[arg], "reset") == 0) {
2131
      struct mem_stats *ptr;
2132
2133
      if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
2134
        return 1;
2135
2136
      for (ptr = &__start_mem_stats; ptr < &__stop_mem_stats; ptr++) {
2137
        _HA_ATOMIC_STORE(&ptr->calls, 0);
2138
        _HA_ATOMIC_STORE(&ptr->size, 0);
2139
      }
2140
      return 1;
2141
    }
2142
    else if (strcmp(args[arg], "all") == 0) {
2143
      ctx->show_all = 1;
2144
      continue;
2145
    }
2146
    else if (strcmp(args[arg], "match") == 0 && *args[arg + 1]) {
2147
      ha_free(&ctx->match);
2148
      ctx->match = strdup(args[arg + 1]);
2149
      if (!ctx->match)
2150
        return cli_err(appctx, "Out of memory.\n");
2151
      arg++;
2152
      continue;
2153
    }
2154
    else
2155
      return cli_err(appctx, "Expects either 'reset', 'all', or 'match <pfx>'.\n");
2156
  }
2157
2158
  /* otherwise proceed with the dump from p0 to p1 */
2159
  ctx->start = &__start_mem_stats;
2160
  ctx->stop  = &__stop_mem_stats;
2161
  ctx->width = 0;
2162
  return 0;
2163
}
2164
2165
/* CLI I/O handler for the "debug dev memstats" command using a dev_mem_ctx
2166
 * found in appctx->svcctx. Dumps all mem_stats structs referenced by pointers
2167
 * located between ->start and ->stop. Dumps all entries if ->show_all != 0,
2168
 * otherwise only non-zero calls.
2169
 */
2170
static int debug_iohandler_memstats(struct appctx *appctx)
2171
{
2172
  struct dev_mem_ctx *ctx = appctx->svcctx;
2173
  struct mem_stats *ptr;
2174
  const char *pfx = ctx->match;
2175
  int ret = 1;
2176
2177
  if (!ctx->width) {
2178
    /* we don't know the first column's width, let's compute it
2179
     * now based on a first pass on printable entries and their
2180
     * expected width (approximated).
2181
     */
2182
    for (ptr = ctx->start; ptr != ctx->stop; ptr++) {
2183
      const char *p, *name;
2184
      int w = 0;
2185
      char tmp;
2186
2187
      if (!ptr->size && !ptr->calls && !ctx->show_all)
2188
        continue;
2189
2190
      for (p = name = ptr->caller.file; *p; p++) {
2191
        if (*p == '/')
2192
          name = p + 1;
2193
      }
2194
2195
      if (ctx->show_all)
2196
        w = snprintf(&tmp, 0, "%s(%s:%d) ", ptr->caller.func, name, ptr->caller.line);
2197
      else
2198
        w = snprintf(&tmp, 0, "%s:%d ", name, ptr->caller.line);
2199
2200
      if (w > ctx->width)
2201
        ctx->width = w;
2202
    }
2203
  }
2204
2205
  /* we have two inner loops here, one for the proxy, the other one for
2206
   * the buffer.
2207
   */
2208
  for (ptr = ctx->start; ptr != ctx->stop; ptr++) {
2209
    const char *type;
2210
    const char *name;
2211
    const char *p;
2212
    const char *info = NULL;
2213
    const char *func = NULL;
2214
    int direction = 0; // neither alloc nor free (e.g. realloc)
2215
2216
    if (!ptr->size && !ptr->calls && !ctx->show_all)
2217
      continue;
2218
2219
    /* basename only */
2220
    for (p = name = ptr->caller.file; *p; p++) {
2221
      if (*p == '/')
2222
        name = p + 1;
2223
    }
2224
2225
    func = ptr->caller.func;
2226
2227
    switch (ptr->caller.what) {
2228
    case MEM_STATS_TYPE_CALLOC:  type = "CALLOC";  direction =  1; break;
2229
    case MEM_STATS_TYPE_FREE:    type = "FREE";    direction = -1; break;
2230
    case MEM_STATS_TYPE_MALLOC:  type = "MALLOC";  direction =  1; break;
2231
    case MEM_STATS_TYPE_REALLOC: type = "REALLOC"; break;
2232
    case MEM_STATS_TYPE_STRDUP:  type = "STRDUP";  direction =  1; break;
2233
    case MEM_STATS_TYPE_P_ALLOC: type = "P_ALLOC"; direction =  1; if (ptr->extra) info = ((const struct pool_head *)ptr->extra)->name; break;
2234
    case MEM_STATS_TYPE_P_FREE:  type = "P_FREE";  direction = -1; if (ptr->extra) info = ((const struct pool_head *)ptr->extra)->name; break;
2235
    default:                     type = "UNSET";   break;
2236
    }
2237
2238
    //chunk_printf(&trash,
2239
    //       "%20s:%-5d %7s size: %12lu calls: %9lu size/call: %6lu\n",
2240
    //       name, ptr->line, type,
2241
    //       (unsigned long)ptr->size, (unsigned long)ptr->calls,
2242
    //       (unsigned long)(ptr->calls ? (ptr->size / ptr->calls) : 0));
2243
2244
    /* only match requested prefixes */
2245
    if (pfx && (!info || strncmp(info, pfx, strlen(pfx)) != 0))
2246
      continue;
2247
2248
    chunk_reset(&trash);
2249
    if (ctx->show_all)
2250
      chunk_appendf(&trash, "%s(", func);
2251
2252
    chunk_appendf(&trash, "%s:%d", name, ptr->caller.line);
2253
2254
    if (ctx->show_all)
2255
      chunk_appendf(&trash, ")");
2256
2257
    while (trash.data < ctx->width)
2258
      trash.area[trash.data++] = ' ';
2259
2260
    chunk_appendf(&trash, "%7s  size: %12lu  calls: %9lu  size/call: %6lu %s\n",
2261
           type,
2262
           (unsigned long)ptr->size, (unsigned long)ptr->calls,
2263
                 (unsigned long)(ptr->calls ? (ptr->size / ptr->calls) : 0),
2264
           info ? info : "");
2265
2266
    if (applet_putchk(appctx, &trash) == -1) {
2267
      ctx->start = ptr;
2268
      ret = 0;
2269
      goto end;
2270
    }
2271
    if (direction > 0) {
2272
      ctx->tot_size  += (ulong)ptr->size;
2273
      ctx->tot_calls += (ulong)ptr->calls;
2274
    }
2275
    else if (direction < 0) {
2276
      ctx->tot_size  -= (ulong)ptr->size;
2277
      ctx->tot_calls += (ulong)ptr->calls;
2278
    }
2279
  }
2280
2281
  /* now dump a summary */
2282
  chunk_reset(&trash);
2283
  chunk_appendf(&trash, "Total");
2284
  while (trash.data < ctx->width)
2285
    trash.area[trash.data++] = ' ';
2286
2287
  chunk_appendf(&trash, "%7s  size: %12ld  calls: %9lu  size/call: %6ld %s\n",
2288
          "BALANCE",
2289
          ctx->tot_size, ctx->tot_calls,
2290
          (long)(ctx->tot_calls ? (ctx->tot_size / ctx->tot_calls) : 0),
2291
          "(excl. realloc)");
2292
2293
  if (applet_putchk(appctx, &trash) == -1) {
2294
    ctx->start = ptr;
2295
    ret = 0;
2296
    goto end;
2297
  }
2298
 end:
2299
  return ret;
2300
}
2301
2302
/* release the "show pools" context */
2303
static void debug_release_memstats(struct appctx *appctx)
2304
{
2305
  struct dev_mem_ctx *ctx = appctx->svcctx;
2306
2307
  ha_free(&ctx->match);
2308
}
2309
#endif
2310
2311
#if !defined(USE_OBSOLETE_LINKER)
2312
2313
/* CLI state for "debug counters" */
2314
struct deb_cnt_ctx {
2315
  struct debug_count *start, *stop; /* begin/end of dump */
2316
  int types;                        /* OR mask of 1<<type */
2317
  int show_all;                     /* show all entries if non-null */
2318
};
2319
2320
/* CLI parser for the "debug counters" command. Sets a deb_cnt_ctx shown above. */
2321
static int debug_parse_cli_counters(char **args, char *payload, struct appctx *appctx, void *private)
2322
0
{
2323
0
  struct deb_cnt_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
2324
0
  int action;
2325
0
  int arg;
2326
2327
0
  if (!cli_has_level(appctx, ACCESS_LVL_OPER))
2328
0
    return 1;
2329
2330
0
  action = 0; // 0=show, 1=reset
2331
0
  for (arg = 2; *args[arg]; arg++) {
2332
0
    if (strcmp(args[arg], "reset") == 0) {
2333
0
      action = 1;
2334
0
      continue;
2335
0
    }
2336
0
    else if (strcmp(args[arg], "off") == 0) {
2337
0
      action = 2;
2338
0
      continue;
2339
0
    }
2340
0
    else if (strcmp(args[arg], "on") == 0) {
2341
0
      action = 3;
2342
0
      continue;
2343
0
    }
2344
0
    else if (strcmp(args[arg], "all") == 0) {
2345
0
      ctx->show_all = 1;
2346
0
      continue;
2347
0
    }
2348
0
    else if (strcmp(args[arg], "show") == 0) {
2349
0
      action = 0;
2350
0
      continue;
2351
0
    }
2352
0
    else if (strcmp(args[arg], "bug") == 0) {
2353
0
      ctx->types |= 1 << DBG_BUG;
2354
0
      continue;
2355
0
    }
2356
0
    else if (strcmp(args[arg], "chk") == 0) {
2357
0
      ctx->types |= 1 << DBG_BUG_ONCE;
2358
0
      continue;
2359
0
    }
2360
0
    else if (strcmp(args[arg], "cnt") == 0) {
2361
0
      ctx->types |= 1 << DBG_COUNT_IF;
2362
0
      continue;
2363
0
    }
2364
0
    else if (strcmp(args[arg], "glt") == 0) {
2365
0
      ctx->types |= 1 << DBG_GLITCH;
2366
0
      continue;
2367
0
    }
2368
0
    else
2369
0
      return cli_err(appctx, "Expects an optional action ('reset','show','on','off'), optional types ('bug','chk','cnt','glt') and optionally 'all' to even dump null counters.\n");
2370
0
  }
2371
2372
0
#if (DEBUG_STRICT > 0) || (DEBUG_COUNTERS > 0)
2373
0
  ctx->start = &__start_dbg_cnt;
2374
0
  ctx->stop  = &__stop_dbg_cnt;
2375
0
#endif
2376
0
  if (action == 1) { // reset
2377
0
    struct debug_count *ptr;
2378
2379
0
    if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
2380
0
      return 1;
2381
2382
0
    for (ptr = ctx->start; ptr < ctx->stop; ptr++) {
2383
0
      if (ctx->types && !(ctx->types & (1 << ptr->type)))
2384
0
        continue;
2385
0
      _HA_ATOMIC_STORE(&ptr->count, 0);
2386
0
    }
2387
0
    return 1;
2388
0
  }
2389
0
  else if (action == 2 || action == 3) { // off/on
2390
0
    if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
2391
0
      return 1;
2392
0
    HA_ATOMIC_STORE(&debug_enable_counters, action == 3);
2393
0
    return 0;
2394
0
  }
2395
2396
  /* OK it's a show, let's dump relevant counters */
2397
0
  return 0;
2398
0
}
2399
2400
/* CLI I/O handler for the "debug counters" command using a deb_cnt_ctx
2401
 * found in appctx->svcctx. Dumps all mem_stats structs referenced by pointers
2402
 * located between ->start and ->stop. Dumps all entries if ->show_all != 0,
2403
 * otherwise only non-zero calls.
2404
 */
2405
static int debug_iohandler_counters(struct appctx *appctx)
2406
0
{
2407
0
  const char *bug_type[DBG_COUNTER_TYPES] = {
2408
0
    [DBG_BUG]      = "BUG",
2409
0
    [DBG_BUG_ONCE] = "CHK",
2410
0
    [DBG_COUNT_IF] = "CNT",
2411
0
    [DBG_GLITCH]   = "GLT",
2412
0
  };
2413
0
  struct deb_cnt_ctx *ctx = appctx->svcctx;
2414
0
  struct debug_count *ptr;
2415
0
  int ret = 1;
2416
2417
  /* we have two inner loops here, one for the proxy, the other one for
2418
   * the buffer.
2419
   */
2420
0
  chunk_printf(&trash, "Count     Type Location function(): \"condition\" [comment]\n");
2421
0
  for (ptr = ctx->start; ptr != ctx->stop; ptr++) {
2422
0
    const char *p, *name;
2423
2424
0
    if (ctx->types && !(ctx->types & (1 << ptr->type)))
2425
0
      continue;
2426
2427
0
    if (!ptr->count && !ctx->show_all)
2428
0
      continue;
2429
2430
0
    for (p = name = ptr->file; *p; p++) {
2431
0
      if (*p == '/')
2432
0
        name = p + 1;
2433
0
    }
2434
2435
0
    if (ptr->type < DBG_COUNTER_TYPES)
2436
0
      chunk_appendf(&trash, "%-10u %3s %s:%d %s()%s%s%s\n",
2437
0
              ptr->count, bug_type[ptr->type],
2438
0
              name, ptr->line, ptr->func,
2439
0
              *ptr->desc ? ": " : "", ptr->desc,
2440
0
              (ptr->type == DBG_COUNT_IF && !debug_enable_counters) ? " (stopped)" : "");
2441
2442
0
    if (applet_putchk(appctx, &trash) == -1) {
2443
0
      ctx->start = ptr;
2444
0
      ret = 0;
2445
0
      goto end;
2446
0
    }
2447
0
  }
2448
2449
  /* we could even dump a summary here if needed, returning ret=0 */
2450
0
 end:
2451
0
  return ret;
2452
0
}
2453
#endif /* USE_OBSOLETE_LINKER */
2454
2455
#ifdef USE_THREAD_DUMP
2456
2457
/* handles DEBUGSIG to dump the state of the thread it's working on. This is
2458
 * appended at the end of thread_dump_buffer which must be protected against
2459
 * reentrance from different threads (a thread-local buffer works fine). If
2460
 * the buffer pointer is equal to 0x2, then it's a panic. The thread allocates
2461
 * the buffer from its own trash chunks so that the contents remain visible in
2462
 * the core, and it never returns.
2463
 */
2464
void debug_handler(int sig, siginfo_t *si, void *arg)
2465
{
2466
  struct buffer *buf = HA_ATOMIC_LOAD(&th_ctx->thread_dump_buffer);
2467
  int no_return = 0;
2468
2469
  /* first, let's check it's really for us and that we didn't just get
2470
   * a spurious DEBUGSIG.
2471
   */
2472
  if (!buf || (ulong)buf & 0x1UL)
2473
    return;
2474
2475
  /* inform callees to be careful, we're in a signal handler! */
2476
  _HA_ATOMIC_OR(&th_ctx->flags, TH_FL_IN_DBG_HANDLER);
2477
2478
  /* Special value 0x2 is used during panics and requires that the thread
2479
   * allocates its own dump buffer among its own trash buffers. The goal
2480
   * is that all threads keep a copy of their own dump.
2481
   */
2482
  if ((ulong)buf == 0x2UL) {
2483
    no_return = 1;
2484
    buf = get_trash_chunk();
2485
    HA_ATOMIC_STORE(&th_ctx->thread_dump_buffer, buf);
2486
  }
2487
2488
  /* Extra work might have been queued in the mean time via 0x2 */
2489
  if (((ulong)buf & 0x2UL)) {
2490
    buf = (void *)((ulong)buf & ~0x2);
2491
  }
2492
2493
  /* now dump the current state into the designated buffer, and indicate
2494
   * we come from a sig handler.
2495
   */
2496
  ha_thread_dump_one(buf, 0);
2497
2498
  /* end of dump, setting the buffer to 0x1 will tell the caller we're done */
2499
  HA_ATOMIC_OR((ulong*)DISGUISE(&th_ctx->thread_dump_buffer), 0x1UL);
2500
2501
  /* in case of panic, no return is planned so that we don't destroy
2502
   * the buffer's contents and we make sure not to trigger in loops.
2503
   */
2504
  while (no_return)
2505
    wait(NULL);
2506
2507
  _HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_IN_DBG_HANDLER);
2508
}
2509
2510
static int init_debug_per_thread()
2511
{
2512
  sigset_t set;
2513
2514
  /* unblock the DEBUGSIG signal we intend to use */
2515
  sigemptyset(&set);
2516
  sigaddset(&set, DEBUGSIG);
2517
#if defined(DEBUG_DEV)
2518
  sigaddset(&set, SIGRTMAX);
2519
#endif
2520
  ha_sigmask(SIG_UNBLOCK, &set, NULL);
2521
  return 1;
2522
}
2523
2524
static int init_debug()
2525
{
2526
  struct sigaction sa;
2527
  void *callers[1];
2528
  int ret = ERR_NONE;
2529
2530
  /* calling backtrace() will access libgcc at runtime. We don't want to
2531
   * do it after the chroot, so let's perform a first call to have it
2532
   * ready in memory for later use.
2533
   */
2534
  my_backtrace(callers, sizeof(callers)/sizeof(*callers));
2535
  sa.sa_handler = NULL;
2536
  sa.sa_sigaction = debug_handler;
2537
  sigemptyset(&sa.sa_mask);
2538
#ifdef WDTSIG
2539
  sigaddset(&sa.sa_mask, WDTSIG);
2540
#endif
2541
  sigaddset(&sa.sa_mask, DEBUGSIG);
2542
#if defined(DEBUG_DEV)
2543
  sigaddset(&sa.sa_mask, SIGRTMAX);
2544
#endif
2545
  sa.sa_flags = SA_SIGINFO;
2546
  sigaction(DEBUGSIG, &sa, NULL);
2547
2548
#if defined(DEBUG_DEV)
2549
  sa.sa_handler = NULL;
2550
  sa.sa_sigaction = debug_delay_inj_sighandler;
2551
  sigemptyset(&sa.sa_mask);
2552
  sa.sa_flags = SA_SIGINFO;
2553
  sigaction(SIGRTMAX, &sa, NULL);
2554
#endif
2555
2556
#if !defined(USE_OBSOLETE_LINKER) && ((DEBUG_STRICT > 0) || (DEBUG_COUNTERS > 0))
2557
  if (&__start_dbg_cnt) {
2558
    const struct debug_count *ptr;
2559
    const char *p;
2560
2561
    for (ptr = &__start_dbg_cnt; ptr < &__stop_dbg_cnt; ptr++) {
2562
      for (p = ptr->desc; *p; p++) {
2563
        if (*p < 0x20 || *p >= 0x7f) {
2564
          ha_warning("Invalid character 0x%02x at position %d in description string at %s:%d %s()\n",
2565
               (uchar)*p, (int)(p - ptr->desc), ptr->file, ptr->line, ptr->func);
2566
          ret = ERR_WARN;
2567
          break;
2568
        }
2569
      }
2570
    }
2571
  }
2572
#endif
2573
  return ret;
2574
}
2575
2576
REGISTER_POST_CHECK(init_debug);
2577
REGISTER_PER_THREAD_INIT(init_debug_per_thread);
2578
2579
#endif /* USE_THREAD_DUMP */
2580
2581
2582
static void feed_post_mortem_linux()
2583
0
{
2584
0
#if defined(__linux__)
2585
0
  struct stat statbuf;
2586
0
  FILE *file;
2587
2588
  /* DMI reports either HW or hypervisor, this allows to detect most VMs.
2589
   * On ARM the device-tree is often more precise for the model. Since many
2590
   * boards present "to be filled by OEM" or so in many fields, we dedup
2591
   * them as much as possible.
2592
   */
2593
0
  if (read_line_to_trash("/sys/class/dmi/id/sys_vendor") > 0)
2594
0
    strlcpy2(post_mortem.platform.hw_vendor, trash.area, sizeof(post_mortem.platform.hw_vendor));
2595
2596
0
  if (read_line_to_trash("/sys/class/dmi/id/product_family") > 0 &&
2597
0
      strcmp(trash.area, post_mortem.platform.hw_vendor) != 0)
2598
0
    strlcpy2(post_mortem.platform.hw_family, trash.area, sizeof(post_mortem.platform.hw_family));
2599
2600
0
  if ((read_line_to_trash("/sys/class/dmi/id/product_name") > 0 &&
2601
0
       strcmp(trash.area, post_mortem.platform.hw_vendor) != 0 &&
2602
0
       strcmp(trash.area, post_mortem.platform.hw_family) != 0))
2603
0
    strlcpy2(post_mortem.platform.hw_model, trash.area, sizeof(post_mortem.platform.hw_model));
2604
2605
0
  if ((read_line_to_trash("/sys/class/dmi/id/board_vendor") > 0 &&
2606
0
       strcmp(trash.area, post_mortem.platform.hw_vendor) != 0))
2607
0
    strlcpy2(post_mortem.platform.brd_vendor, trash.area, sizeof(post_mortem.platform.brd_vendor));
2608
2609
0
  if ((read_line_to_trash("/sys/firmware/devicetree/base/model") > 0 &&
2610
0
       strcmp(trash.area, post_mortem.platform.brd_vendor) != 0 &&
2611
0
       strcmp(trash.area, post_mortem.platform.hw_vendor) != 0 &&
2612
0
       strcmp(trash.area, post_mortem.platform.hw_family) != 0 &&
2613
0
       strcmp(trash.area, post_mortem.platform.hw_model) != 0) ||
2614
0
      (read_line_to_trash("/sys/class/dmi/id/board_name") > 0 &&
2615
0
       strcmp(trash.area, post_mortem.platform.brd_vendor) != 0 &&
2616
0
       strcmp(trash.area, post_mortem.platform.hw_vendor) != 0 &&
2617
0
       strcmp(trash.area, post_mortem.platform.hw_family) != 0 &&
2618
0
       strcmp(trash.area, post_mortem.platform.hw_model) != 0))
2619
0
    strlcpy2(post_mortem.platform.brd_model, trash.area, sizeof(post_mortem.platform.brd_model));
2620
2621
  /* Check for containers. In a container on linux we don't see keventd (2.4) kthreadd (2.6+) on pid 2 */
2622
0
  if (read_line_to_trash("/proc/2/status") <= 0 ||
2623
0
      (strcmp(trash.area, "Name:\tkthreadd") != 0 &&
2624
0
       strcmp(trash.area, "Name:\tkeventd") != 0)) {
2625
    /* OK we're in a container. Docker often has /.dockerenv */
2626
0
    const char *tech = "yes";
2627
2628
0
    if (stat("/.dockerenv", &statbuf) == 0)
2629
0
      tech = "docker";
2630
0
    strlcpy2(post_mortem.platform.cont_techno, tech, sizeof(post_mortem.platform.cont_techno));
2631
0
  }
2632
0
  else {
2633
0
    strlcpy2(post_mortem.platform.cont_techno, "no", sizeof(post_mortem.platform.cont_techno));
2634
0
  }
2635
2636
0
  file = fopen("/proc/cpuinfo", "r");
2637
0
  if (file) {
2638
0
    uint cpu_implem = 0, cpu_arch = 0, cpu_variant = 0, cpu_part = 0, cpu_rev = 0; // arm
2639
0
    uint cpu_family = 0, model = 0, stepping = 0;                                  // x86
2640
0
    char vendor_id[64] = "", model_name[64] = "";                                  // x86
2641
0
    char machine[64] = "", system_type[64] = "", cpu_model[64] = "";               // mips
2642
0
    const char *virt = "no";
2643
0
    char *p, *e, *v, *lf;
2644
2645
    /* let's figure what CPU we're working with */
2646
0
    while ((p = fgets(trash.area, trash.size, file)) != NULL) {
2647
0
      lf = strchr(p, '\n');
2648
0
      if (lf)
2649
0
        *lf = 0;
2650
2651
      /* stop at first line break */
2652
0
      if (!*p)
2653
0
        break;
2654
2655
      /* skip colon and spaces and trim spaces after name */
2656
0
      v = e = strchr(p, ':');
2657
0
      if (!e)
2658
0
        continue;
2659
2660
0
      do { *e-- = 0; } while (e >= p && (*e == ' ' || *e == '\t'));
2661
2662
      /* locate value after colon */
2663
0
      do { v++; } while (*v == ' ' || *v == '\t');
2664
2665
      /* ARM */
2666
0
      if (strcmp(p, "CPU implementer") == 0)
2667
0
        cpu_implem = strtoul(v, NULL, 0);
2668
0
      else if (strcmp(p, "CPU architecture") == 0)
2669
0
        cpu_arch = strtoul(v, NULL, 0);
2670
0
      else if (strcmp(p, "CPU variant") == 0)
2671
0
        cpu_variant = strtoul(v, NULL, 0);
2672
0
      else if (strcmp(p, "CPU part") == 0)
2673
0
        cpu_part = strtoul(v, NULL, 0);
2674
0
      else if (strcmp(p, "CPU revision") == 0)
2675
0
        cpu_rev = strtoul(v, NULL, 0);
2676
2677
      /* x86 */
2678
0
      else if (strcmp(p, "cpu family") == 0)
2679
0
        cpu_family = strtoul(v, NULL, 0);
2680
0
      else if (strcmp(p, "model") == 0)
2681
0
        model = strtoul(v, NULL, 0);
2682
0
      else if (strcmp(p, "stepping") == 0)
2683
0
        stepping = strtoul(v, NULL, 0);
2684
0
      else if (strcmp(p, "vendor_id") == 0)
2685
0
        strlcpy2(vendor_id, v, sizeof(vendor_id));
2686
0
      else if (strcmp(p, "model name") == 0)
2687
0
        strlcpy2(model_name, v, sizeof(model_name));
2688
0
      else if (strcmp(p, "flags") == 0) {
2689
0
        if (strstr(v, "hypervisor")) {
2690
0
          if (strncmp(post_mortem.platform.hw_vendor, "QEMU", 4) == 0)
2691
0
            virt = "qemu";
2692
0
          else if (strncmp(post_mortem.platform.hw_vendor, "VMware", 6) == 0)
2693
0
            virt = "vmware";
2694
0
          else
2695
0
            virt = "yes";
2696
0
        }
2697
0
      }
2698
2699
      /* MIPS */
2700
0
      else if (strcmp(p, "system type") == 0)
2701
0
        strlcpy2(system_type, v, sizeof(system_type));
2702
0
      else if (strcmp(p, "machine") == 0)
2703
0
        strlcpy2(machine, v, sizeof(machine));
2704
0
      else if (strcmp(p, "cpu model") == 0)
2705
0
        strlcpy2(cpu_model, v, sizeof(cpu_model));
2706
0
    }
2707
0
    fclose(file);
2708
2709
    /* Machine may replace hw_product on MIPS */
2710
0
    if (!*post_mortem.platform.hw_model)
2711
0
      strlcpy2(post_mortem.platform.hw_model, machine, sizeof(post_mortem.platform.hw_model));
2712
2713
    /* SoC vendor */
2714
0
    strlcpy2(post_mortem.platform.soc_vendor, vendor_id, sizeof(post_mortem.platform.soc_vendor));
2715
2716
    /* SoC model */
2717
0
    if (*system_type) {
2718
      /* MIPS */
2719
0
      strlcpy2(post_mortem.platform.soc_model, system_type, sizeof(post_mortem.platform.soc_model));
2720
0
      *system_type = 0;
2721
0
    } else if (*model_name) {
2722
      /* x86 */
2723
0
      strlcpy2(post_mortem.platform.soc_model, model_name, sizeof(post_mortem.platform.soc_model));
2724
0
      *model_name = 0;
2725
0
    }
2726
2727
    /* Create a CPU model name based on available IDs */
2728
0
    if (cpu_implem) // arm
2729
0
      snprintf(cpu_model + strlen(cpu_model),
2730
0
         sizeof(cpu_model) - strlen(cpu_model),
2731
0
         "%sImpl %#02x", *cpu_model ? " " : "", cpu_implem);
2732
2733
0
    if (cpu_family) // x86
2734
0
      snprintf(cpu_model + strlen(cpu_model),
2735
0
         sizeof(cpu_model) - strlen(cpu_model),
2736
0
         "%sFam %u", *cpu_model ? " " : "", cpu_family);
2737
2738
0
    if (model) // x86
2739
0
      snprintf(cpu_model + strlen(cpu_model),
2740
0
         sizeof(cpu_model) - strlen(cpu_model),
2741
0
         "%sModel %u", *cpu_model ? " " : "", model);
2742
2743
0
    if (stepping) // x86
2744
0
      snprintf(cpu_model + strlen(cpu_model),
2745
0
         sizeof(cpu_model) - strlen(cpu_model),
2746
0
         "%sStep %u", *cpu_model ? " " : "", stepping);
2747
2748
0
    if (cpu_arch) // arm
2749
0
      snprintf(cpu_model + strlen(cpu_model),
2750
0
         sizeof(cpu_model) - strlen(cpu_model),
2751
0
         "%sArch %u", *cpu_model ? " " : "", cpu_arch);
2752
2753
0
    if (cpu_part) // arm
2754
0
      snprintf(cpu_model + strlen(cpu_model),
2755
0
         sizeof(cpu_model) - strlen(cpu_model),
2756
0
         "%sPart %#03x", *cpu_model ? " " : "", cpu_part);
2757
2758
0
    if (cpu_variant || cpu_rev) // arm
2759
0
      snprintf(cpu_model + strlen(cpu_model),
2760
0
         sizeof(cpu_model) - strlen(cpu_model),
2761
0
         "%sr%up%u", *cpu_model ? " " : "", cpu_variant, cpu_rev);
2762
2763
0
    strlcpy2(post_mortem.platform.cpu_model, cpu_model, sizeof(post_mortem.platform.cpu_model));
2764
2765
0
    if (*virt)
2766
0
      strlcpy2(post_mortem.platform.virt_techno, virt, sizeof(post_mortem.platform.virt_techno));
2767
0
  }
2768
0
#endif // __linux__
2769
0
}
2770
2771
static int feed_post_mortem()
2772
0
{
2773
  /* write an easily identifiable magic at the beginning of the struct */
2774
0
  strncpy(post_mortem.post_mortem_magic,
2775
0
    "POST-MORTEM STARTS HERE+7654321\0",
2776
0
    sizeof(post_mortem.post_mortem_magic));
2777
  /* kernel type, version and arch */
2778
0
  uname(&post_mortem.platform.utsname);
2779
2780
  /* some boot-time info related to the process */
2781
0
  post_mortem.process.pid = getpid();
2782
0
  post_mortem.process.boot_uid = geteuid();
2783
0
  post_mortem.process.boot_gid = getegid();
2784
0
  post_mortem.process.argc = global.argc;
2785
0
  post_mortem.process.argv = global.argv;
2786
2787
#if defined(USE_LINUX_CAP)
2788
  if (capget(&cap_hdr_haproxy, post_mortem.process.caps.boot) == -1)
2789
    post_mortem.process.caps.err_boot = errno;
2790
#endif
2791
0
  post_mortem.process.boot_lim_fd.rlim_cur = rlim_fd_cur_at_boot;
2792
0
  post_mortem.process.boot_lim_fd.rlim_max = rlim_fd_max_at_boot;
2793
0
  getrlimit(RLIMIT_DATA, &post_mortem.process.boot_lim_ram);
2794
2795
0
  if (strcmp(post_mortem.platform.utsname.sysname, "Linux") == 0)
2796
0
    feed_post_mortem_linux();
2797
2798
#if defined(HA_HAVE_DUMP_LIBS)
2799
  chunk_reset(&trash);
2800
  if (dump_libs(&trash, 1))
2801
    post_mortem.libs = strdup(trash.area);
2802
#endif
2803
2804
0
  post_mortem.tgroup_info = ha_tgroup_info;
2805
0
  post_mortem.thread_info = ha_thread_info;
2806
0
  post_mortem.tgroup_ctx  = ha_tgroup_ctx;
2807
0
  post_mortem.thread_ctx  = ha_thread_ctx;
2808
0
  post_mortem.pools = &pools;
2809
0
  post_mortem.proxies = &proxies_list;
2810
0
  post_mortem.global = &global;
2811
0
  post_mortem.fdtab = &fdtab;
2812
0
  post_mortem.activity = activity;
2813
2814
0
  return ERR_NONE;
2815
0
}
2816
2817
REGISTER_POST_CHECK(feed_post_mortem);
2818
2819
static void deinit_post_mortem(void)
2820
0
{
2821
0
  int comp;
2822
2823
#if defined(HA_HAVE_DUMP_LIBS)
2824
  ha_free(&post_mortem.libs);
2825
#endif
2826
0
  for (comp = 0; comp < post_mortem.nb_components; comp++) {
2827
0
    free(post_mortem.components[comp].toolchain);
2828
0
    free(post_mortem.components[comp].toolchain_opts);
2829
0
    free(post_mortem.components[comp].build_settings);
2830
0
    free(post_mortem.components[comp].path);
2831
0
  }
2832
0
  ha_free(&post_mortem.components);
2833
0
}
2834
2835
REGISTER_POST_DEINIT(deinit_post_mortem);
2836
2837
/* Appends a component to the list of post_portem info. May silently fail
2838
 * on allocation errors but we don't care since the goal is to provide info
2839
 * we have in case it helps.
2840
 */
2841
void post_mortem_add_component(const char *name, const char *version,
2842
             const char *toolchain, const char *toolchain_opts,
2843
             const char *build_settings, const char *path)
2844
0
{
2845
0
  struct post_mortem_component *comp;
2846
0
  int nbcomp = post_mortem.nb_components;
2847
2848
0
  comp = realloc(post_mortem.components, (nbcomp + 1) * sizeof(*comp));
2849
0
  if (!comp)
2850
0
    return;
2851
2852
0
  memset(&comp[nbcomp], 0, sizeof(*comp));
2853
0
  strlcpy2(comp[nbcomp].name, name, sizeof(comp[nbcomp].name));
2854
0
  strlcpy2(comp[nbcomp].version, version, sizeof(comp[nbcomp].version));
2855
0
  comp[nbcomp].toolchain      = strdup(toolchain);
2856
0
  comp[nbcomp].toolchain_opts = strdup(toolchain_opts);
2857
0
  comp[nbcomp].build_settings = strdup(build_settings);
2858
0
  comp[nbcomp].path = strdup(path);
2859
2860
0
  post_mortem.nb_components++;
2861
0
  post_mortem.components = comp;
2862
0
}
2863
2864
#ifdef USE_THREAD
2865
/* init code is called one at a time so let's collect all per-thread info on
2866
 * the last starting thread. These info are not critical anyway and there's no
2867
 * problem if we get them slightly late.
2868
 */
2869
static int feed_post_mortem_late()
2870
{
2871
  static int per_thread_info_collected;
2872
2873
  if (HA_ATOMIC_ADD_FETCH(&per_thread_info_collected, 1) != global.nbthread)
2874
    return 1;
2875
2876
  /* also set runtime process settings. At this stage we are sure, that all
2877
   * config options and limits adjustments are successfully applied.
2878
   */
2879
  post_mortem.process.run_uid = geteuid();
2880
  post_mortem.process.run_gid = getegid();
2881
#if defined(USE_LINUX_CAP)
2882
  if (capget(&cap_hdr_haproxy, post_mortem.process.caps.run) == -1) {
2883
    post_mortem.process.caps.err_run = errno;
2884
  }
2885
#endif
2886
  getrlimit(RLIMIT_NOFILE, &post_mortem.process.run_lim_fd);
2887
  getrlimit(RLIMIT_DATA, &post_mortem.process.run_lim_ram);
2888
2889
  return 1;
2890
}
2891
2892
REGISTER_PER_THREAD_INIT(feed_post_mortem_late);
2893
#endif
2894
2895
#ifdef DEBUG_UNIT
2896
2897
extern struct list unittest_list;
2898
2899
void list_unittests()
2900
{
2901
  struct unittest_fct *unit;
2902
  int found = 0;
2903
2904
  fprintf(stdout, "Unit tests list :");
2905
2906
  list_for_each_entry(unit, &unittest_list, list) {
2907
    fprintf(stdout, " %s", unit->name);
2908
    found = 1;
2909
  }
2910
2911
  if (!found)
2912
    fprintf(stdout, " none");
2913
2914
  fprintf(stdout, "\n");
2915
}
2916
2917
#endif
2918
2919
#if DEBUG_STRICT > 1
2920
/* config parser for global "debug.counters", accepts "on" or "off" */
2921
static int cfg_parse_debug_counters(char **args, int section_type, struct proxy *curpx,
2922
                                    const struct proxy *defpx, const char *file, int line,
2923
                                    char **err)
2924
{
2925
  if (too_many_args(1, args, err, NULL))
2926
    return -1;
2927
2928
  if (strcmp(args[1], "on") == 0) {
2929
    HA_ATOMIC_STORE(&debug_enable_counters, 1);
2930
  }
2931
  else if (strcmp(args[1], "off") == 0)
2932
    HA_ATOMIC_STORE(&debug_enable_counters, 0);
2933
  else {
2934
    memprintf(err, "'%s' expects either 'on' or 'off' but got '%s'.", args[0], args[1]);
2935
    return -1;
2936
  }
2937
  return 0;
2938
}
2939
#endif
2940
2941
/* config keyword parsers */
2942
static struct cfg_kw_list cfg_kws = {ILH, {
2943
#if DEBUG_STRICT > 1
2944
  { CFG_GLOBAL, "debug.counters",         cfg_parse_debug_counters      },
2945
#endif
2946
  { 0, NULL, NULL }
2947
}};
2948
INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
2949
2950
/* register cli keywords */
2951
static struct cli_kw_list cli_kws = {{ },{
2952
#if !defined(USE_OBSOLETE_LINKER)
2953
  {{ "debug", "counters", NULL },        "debug counters [?|all|bug|cnt|chk|glt]* : dump/reset rare event counters",          debug_parse_cli_counters, debug_iohandler_counters, NULL, NULL, 0 },
2954
#endif
2955
  {{ "debug", "dev", "bug", NULL },      "debug dev bug                           : call BUG_ON() and crash",                 debug_parse_cli_bug,   NULL, NULL, NULL, ACCESS_EXPERT },
2956
  {{ "debug", "dev", "check", NULL },    "debug dev check                         : call CHECK_IF() and possibly crash",      debug_parse_cli_check, NULL, NULL, NULL, ACCESS_EXPERT },
2957
  {{ "debug", "dev", "close", NULL },    "debug dev close  <fd> [hard]            : close this file descriptor",              debug_parse_cli_close, NULL, NULL, NULL, ACCESS_EXPERT },
2958
  {{ "debug", "dev", "deadlock", NULL }, "debug dev deadlock [nbtask]             : deadlock between this number of tasks",   debug_parse_cli_deadlock, NULL, NULL, NULL, ACCESS_EXPERT },
2959
  {{ "debug", "dev", "delay", NULL },    "debug dev delay  [ms]                   : sleep this long",                         debug_parse_cli_delay, NULL, NULL, NULL, ACCESS_EXPERT },
2960
#if defined(DEBUG_DEV)
2961
  {{ "debug", "dev", "delay-inj", NULL },"debug dev delay-inj <inter> <count>     : inject random delays into threads",       debug_parse_delay_inj, NULL, NULL, NULL, ACCESS_EXPERT },
2962
  {{ "debug", "dev", "exec",  NULL },    "debug dev exec   [cmd] ...              : show this command's output",              debug_parse_cli_exec,  NULL, NULL, NULL, ACCESS_EXPERT },
2963
#endif
2964
  {{ "debug", "dev", "fd", NULL },       "debug dev fd                            : scan for rogue/unhandled FDs",            debug_parse_cli_fd,    debug_iohandler_fd, NULL, NULL, ACCESS_EXPERT },
2965
  {{ "debug", "dev", "exit",  NULL },    "debug dev exit   [code]                 : immediately exit the process",            debug_parse_cli_exit,  NULL, NULL, NULL, ACCESS_EXPERT },
2966
  {{ "debug", "dev", "hash", NULL },     "debug dev hash   [msg]                  : return msg hashed if anon is set",        debug_parse_cli_hash,  NULL, NULL, NULL, 0 },
2967
  {{ "debug", "dev", "hex",   NULL },    "debug dev hex    <addr> [len]           : dump a memory area",                      debug_parse_cli_hex,   NULL, NULL, NULL, ACCESS_EXPERT },
2968
  {{ "debug", "dev", "log",   NULL },    "debug dev log    [msg] ...              : send this msg to global logs",            debug_parse_cli_log,   NULL, NULL, NULL, ACCESS_EXPERT },
2969
  {{ "debug", "dev", "loop",  NULL },    "debug dev loop   <ms> [isolated|warn]   : loop this long, possibly isolated",       debug_parse_cli_loop,  NULL, NULL, NULL, ACCESS_EXPERT },
2970
#if defined(DEBUG_MEM_STATS)
2971
  {{ "debug", "dev", "memstats", NULL }, "debug dev memstats [reset|all|match ...]: dump/reset memory statistics",            debug_parse_cli_memstats, debug_iohandler_memstats, debug_release_memstats, NULL, 0 },
2972
#endif
2973
  {{ "debug", "dev", "panic", NULL },    "debug dev panic                         : immediately trigger a panic",             debug_parse_cli_panic, NULL, NULL, NULL, ACCESS_EXPERT },
2974
  {{ "debug", "dev", "sched", NULL },    "debug dev sched  {task|tasklet} [k=v]*  : stress the scheduler",                    debug_parse_cli_sched, NULL, NULL, NULL, ACCESS_EXPERT },
2975
  {{ "debug", "dev", "stream",NULL },    "debug dev stream [k=v]*                 : show/manipulate stream flags",            debug_parse_cli_stream,NULL, NULL, NULL, ACCESS_EXPERT },
2976
  {{ "debug", "dev", "sym",   NULL },    "debug dev sym    <addr>                 : resolve symbol address",                  debug_parse_cli_sym,   NULL, NULL, NULL, ACCESS_EXPERT },
2977
  {{ "debug", "dev", "task",  NULL },    "debug dev task <ptr> [wake|expire|kill] : show/wake/expire/kill task/tasklet",      debug_parse_cli_task,  NULL, NULL, NULL, ACCESS_EXPERT },
2978
  {{ "debug", "dev", "tkill", NULL },    "debug dev tkill  [thr] [sig]            : send signal to thread",                   debug_parse_cli_tkill, NULL, NULL, NULL, ACCESS_EXPERT },
2979
#if defined(DEBUG_DEV)
2980
  {{ "debug", "dev", "trace", NULL },    "debug dev trace [nbthr]                 : flood traces from that many threads",     debug_parse_cli_trace,  NULL, NULL, NULL, ACCESS_EXPERT },
2981
#endif
2982
  {{ "debug", "dev", "warn",  NULL },    "debug dev warn                          : call WARN_ON() and possibly crash",       debug_parse_cli_warn,  NULL, NULL, NULL, ACCESS_EXPERT },
2983
  {{ "debug", "dev", "write", NULL },    "debug dev write  [size]                 : write that many bytes in return",         debug_parse_cli_write, NULL, NULL, NULL, ACCESS_EXPERT },
2984
2985
  {{ "show", "dev", NULL, NULL },        "show dev                                : show debug info for developers",          debug_parse_cli_show_dev, NULL, NULL },
2986
#if defined(HA_HAVE_DUMP_LIBS)
2987
  {{ "show", "libs", NULL, NULL },       "show libs                               : show loaded object files and libraries", debug_parse_cli_show_libs, NULL, NULL },
2988
#endif
2989
  {{ "show", "threads", NULL, NULL },    "show threads                            : show some threads debugging information", NULL, cli_io_handler_show_threads, NULL },
2990
  {{},}
2991
}};
2992
2993
INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);