Coverage Report

Created: 2025-10-27 06:17

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/mhd2/src/mhd2/events_process.c
Line
Count
Source
1
/* SPDX-License-Identifier: LGPL-2.1-or-later OR (GPL-2.0-or-later WITH eCos-exception-2.0) */
2
/*
3
  This file is part of GNU libmicrohttpd.
4
  Copyright (C) 2024 Evgeny Grin (Karlson2k)
5
6
  GNU libmicrohttpd is free software; you can redistribute it and/or
7
  modify it under the terms of the GNU Lesser General Public
8
  License as published by the Free Software Foundation; either
9
  version 2.1 of the License, or (at your option) any later version.
10
11
  GNU libmicrohttpd is distributed in the hope that it will be useful,
12
  but WITHOUT ANY WARRANTY; without even the implied warranty of
13
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
  Lesser General Public License for more details.
15
16
  Alternatively, you can redistribute GNU libmicrohttpd and/or
17
  modify it under the terms of the GNU General Public License as
18
  published by the Free Software Foundation; either version 2 of
19
  the License, or (at your option) any later version, together
20
  with the eCos exception, as follows:
21
22
    As a special exception, if other files instantiate templates or
23
    use macros or inline functions from this file, or you compile this
24
    file and link it with other works to produce a work based on this
25
    file, this file does not by itself cause the resulting work to be
26
    covered by the GNU General Public License. However the source code
27
    for this file must still be made available in accordance with
28
    section (3) of the GNU General Public License v2.
29
30
    This exception does not invalidate any other reasons why a work
31
    based on this file might be covered by the GNU General Public
32
    License.
33
34
  You should have received copies of the GNU Lesser General Public
35
  License and the GNU General Public License along with this library;
36
  if not, see <https://www.gnu.org/licenses/>.
37
*/
38
39
/**
40
 * @file src/mhd2/events_process.c
41
 * @brief  The implementation of events processing functions
42
 * @author Karlson2k (Evgeny Grin)
43
 */
44
45
#include "mhd_sys_options.h"
46
#include "events_process.h"
47
48
#include "mhd_assert.h"
49
#include "mhd_unreachable.h"
50
51
#if defined(mhd_DEBUG_SUSPEND_RESUME) || defined(mhd_DEBUG_POLLING_FDS)
52
#  include <stdio.h>
53
#endif /* mhd_DEBUG_SUSPEND_RESUME */
54
55
#include "mhd_locks.h"
56
57
#include "mhd_socket_type.h"
58
#include "sys_poll.h"
59
#include "sys_select.h"
60
#ifdef MHD_SUPPORT_EPOLL
61
#  include <sys/epoll.h>
62
#endif
63
#ifdef MHD_SOCKETS_KIND_POSIX
64
#  include "sys_errno.h"
65
#endif
66
67
#include "mhd_itc.h"
68
69
#include "mhd_panic.h"
70
#include "mhd_dbg_print.h"
71
72
#include "mhd_sockets_macros.h"
73
74
#include "mhd_daemon.h"
75
#include "mhd_connection.h"
76
77
#include "conn_mark_ready.h"
78
#include "daemon_logger.h"
79
#include "daemon_add_conn.h"
80
#include "daemon_funcs.h"
81
#include "conn_data_process.h"
82
#include "stream_funcs.h"
83
#include "extr_events_funcs.h"
84
85
#ifdef MHD_SUPPORT_UPGRADE
86
#  include "upgrade_proc.h"
87
#endif /* MHD_SUPPORT_UPGRADE */
88
89
#include "mhd_public_api.h"
90
91
#ifdef mhd_DEBUG_POLLING_FDS
92
/**
93
 * Debug-printf request of FD polling/monitoring
94
 * @param fd_name the name of FD ("ITC", "lstn" or "conn")
95
 * @param fd the FD value
96
 * @param r_ready the request for read (or receive) readiness
97
 * @param w_ready the request for write (or send) readiness
98
 * @param e_ready the request for exception (or error) readiness
99
 */
100
MHD_INTERNAL MHD_FN_PAR_NONNULL_ALL_ void
101
mhd_dbg_print_fd_mon_req (const char *fd_name,
102
                          MHD_Socket fd,
103
                          bool r_ready,
104
                          bool w_ready,
105
                          bool e_ready)
106
{
107
  char state_str[] = "x:x:x";
108
  state_str[0] = r_ready ? 'R' : '-';
109
  state_str[2] = w_ready ? 'W' : '-';
110
  state_str[4] = e_ready ? 'E' : '-';
111
112
  fprintf (stderr,
113
           "### Set FD watching: %4s [%2llu] for %s\n",
114
           fd_name,
115
           (unsigned long long) fd,
116
           state_str);
117
}
118
119
120
/**
121
 * Debug-printf reported (by polling) status of FD
122
 * @param fd_name the name of FD ("ITC", "lstn" or "conn")
123
 * @param fd the FD value
124
 * @param r_ready the read (or receive) readiness
125
 * @param w_ready the write (or send) readiness
126
 * @param e_ready the exception (or error) readiness
127
 */
128
static MHD_FN_PAR_NONNULL_ALL_ void
129
dbg_print_fd_state_update (const char *fd_name,
130
                           MHD_Socket fd,
131
                           bool r_ready,
132
                           bool w_ready,
133
                           bool e_ready)
134
{
135
  char state_str[] = "x:x:x";
136
  state_str[0] = r_ready ? 'R' : '-';
137
  state_str[2] = w_ready ? 'W' : '-';
138
  state_str[4] = e_ready ? 'E' : '-';
139
140
  fprintf (stderr,
141
           "### FD state update: %4s [%2llu]  -> %s\n",
142
           fd_name,
143
           (unsigned long long) fd,
144
           state_str);
145
}
146
147
148
#else  /* ! mhd_DEBUG_POLLING_FDS */
149
#  define dbg_print_fd_state_update(fd_n,fd,r_ready,w_ready,e_ready) \
150
0
        ((void) 0)
151
#endif /* ! mhd_DEBUG_POLLING_FDS */
152
153
#ifdef MHD_SUPPORT_THREADS
154
/**
155
 * Log error message about broken ITC
156
 * @param d the daemon to use
157
 */
158
static MHD_FN_PAR_NONNULL_ALL_ void
159
log_itc_broken (struct MHD_Daemon *restrict d)
160
0
{
161
0
  mhd_LOG_MSG (d, \
162
0
               MHD_SC_ITC_STATUS_ERROR, \
163
0
               "System reported that ITC has an error status or broken.");
164
0
}
165
166
167
#endif /* MHD_SUPPORT_THREADS */
168
169
/**
170
 * Log error message about broken listen socket
171
 * @param d the daemon to use
172
 */
173
static MHD_FN_PAR_NONNULL_ALL_ void
174
log_listen_broken (struct MHD_Daemon *restrict d)
175
0
{
176
0
  mhd_LOG_MSG (d, MHD_SC_LISTEN_STATUS_ERROR, \
177
0
               "System reported that the listening socket has an error " \
178
0
               "status or broken. The daemon will not listen any more.");
179
0
}
180
181
182
MHD_INTERNAL MHD_FN_PAR_NONNULL_ALL_ uint_fast64_t
183
mhd_daemon_get_wait_max (struct MHD_Daemon *restrict d)
184
0
{
185
186
0
  mhd_assert (! mhd_D_HAS_WORKERS (d));
187
188
0
  if (d->events.accept_pending && ! d->conns.block_new)
189
0
  {
190
#ifdef mhd_DEBUG_POLLING_FDS
191
    fprintf (stderr,
192
             "### mhd_daemon_get_wait_max(daemon) -> zero "
193
             "(accept new conn pending)\n");
194
#endif
195
0
    return 0;
196
0
  }
197
0
  if (d->events.act_req.resume)
198
0
  {
199
#ifdef mhd_DEBUG_POLLING_FDS
200
    fprintf (stderr,
201
             "### mhd_daemon_get_wait_max(daemon) -> zero "
202
             "(resume connection pending)\n");
203
#endif
204
0
    return 0;
205
0
  }
206
0
  if (NULL != mhd_DLINKEDL_GET_FIRST (&(d->events), proc_ready))
207
0
  {
208
#ifdef mhd_DEBUG_POLLING_FDS
209
    fprintf (stderr,
210
             "### mhd_daemon_get_wait_max(daemon) -> zero "
211
             "(connection(s) is already ready)\n");
212
#endif
213
0
    return 0;
214
0
  }
215
216
#ifdef mhd_DEBUG_POLLING_FDS
217
  fprintf (stderr,
218
           "### mhd_daemon_get_wait_max(daemon) -> MHD_WAIT_INDEFINITELY\n");
219
#endif
220
0
  return MHD_WAIT_INDEFINITELY; // TODO: calculate correct timeout value
221
0
}
222
223
224
static MHD_FN_PAR_NONNULL_ALL_ void
225
start_resuming_connection (struct MHD_Connection *restrict c,
226
                           struct MHD_Daemon *restrict d)
227
0
{
228
0
  mhd_assert (c->suspended);
229
#ifdef mhd_DEBUG_SUSPEND_RESUME
230
  fprintf (stderr,
231
           "%%%%%%   Resuming connection, FD: %2llu\n",
232
           (unsigned long long) c->sk.fd);
233
#endif /* mhd_DEBUG_SUSPEND_RESUME */
234
0
  c->suspended = false;
235
0
  mhd_stream_resumed_activity_mark (c);
236
0
  mhd_conn_mark_ready (c, d); /* Force processing connection in this round */
237
0
}
238
239
240
/**
241
 * Check whether any resuming connections are pending and resume them
242
 * @param d the daemon to use
243
 */
244
static MHD_FN_PAR_NONNULL_ALL_ void
245
daemon_resume_conns_if_needed (struct MHD_Daemon *restrict d)
246
0
{
247
0
  struct MHD_Connection *c;
248
249
0
  if (! d->events.act_req.resume)
250
0
    return;
251
252
0
  d->events.act_req.resume = false; /* Reset flag before processing data */
253
254
0
  for (c = mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn);
255
0
       NULL != c;
256
0
       c = mhd_DLINKEDL_GET_NEXT (c,all_conn))
257
0
  {
258
0
    if (c->resuming)
259
0
      start_resuming_connection (c, d);
260
0
  }
261
0
}
262
263
264
mhd_DATA_TRUNCATION_RUNTIME_CHECK_DISABLE
265
266
static MHD_FN_PAR_NONNULL_ALL_ int
267
get_max_wait (struct MHD_Daemon *restrict d)
268
0
{
269
0
  uint_fast64_t ui64_wait = mhd_daemon_get_wait_max (d);
270
0
  int i_wait = (int) ui64_wait;
271
272
0
  if ((0 > i_wait) ||
273
0
      (ui64_wait != (uint_fast64_t) i_wait))
274
0
    return INT_MAX;
275
276
0
  return i_wait;
277
0
}
278
279
280
mhd_DATA_TRUNCATION_RUNTIME_CHECK_RESTORE
281
/* End of warning-less data truncation */
282
283
284
MHD_FN_PAR_NONNULL_ (1) static void
285
update_conn_net_status (struct MHD_Daemon *restrict d,
286
                        struct MHD_Connection *restrict c,
287
                        bool recv_ready,
288
                        bool send_ready,
289
                        bool err_state)
290
0
{
291
0
  enum mhd_SocketNetState sk_state;
292
293
0
  mhd_assert (d == c->daemon);
294
  /* "resuming" must be not processed yet */
295
0
  mhd_assert (! c->resuming || c->suspended);
296
297
0
  dbg_print_fd_state_update ("conn", \
298
0
                             c->sk.fd, \
299
0
                             recv_ready, \
300
0
                             send_ready, \
301
0
                             err_state);
302
303
0
  sk_state = mhd_SOCKET_NET_STATE_NOTHING;
304
0
  if (recv_ready)
305
0
    sk_state = (enum mhd_SocketNetState)
306
0
               (sk_state | (unsigned int) mhd_SOCKET_NET_STATE_RECV_READY);
307
0
  if (send_ready)
308
0
    sk_state = (enum mhd_SocketNetState)
309
0
               (sk_state | (unsigned int) mhd_SOCKET_NET_STATE_SEND_READY);
310
0
  if (err_state)
311
0
    sk_state = (enum mhd_SocketNetState)
312
0
               (sk_state | (unsigned int) mhd_SOCKET_NET_STATE_ERROR_READY);
313
0
  c->sk.ready = sk_state;
314
315
0
  if (! c->suspended)
316
0
    mhd_conn_mark_ready_update3 (c, err_state, d);
317
0
  else
318
0
    mhd_assert (! c->in_proc_ready);
319
0
}
320
321
322
/**
323
 * Accept new connections on the daemon
324
 * @param d the daemon to use
325
 * @return true if all incoming connections has been accepted,
326
 *         false if some connection may still wait to be accepted
327
 */
328
MHD_FN_PAR_NONNULL_ (1) static bool
329
daemon_accept_new_conns (struct MHD_Daemon *restrict d)
330
0
{
331
0
  unsigned int num_to_accept;
332
0
  mhd_assert (MHD_INVALID_SOCKET != d->net.listen.fd);
333
0
  mhd_assert (! d->net.listen.is_broken);
334
0
  mhd_assert (! d->conns.block_new);
335
0
  mhd_assert (d->conns.count < d->conns.cfg.count_limit);
336
0
  mhd_assert (! mhd_D_HAS_WORKERS (d));
337
338
0
  if (! d->net.listen.non_block)
339
0
    num_to_accept = 1; /* listen socket is blocking, only one connection can be processed */
340
0
  else
341
0
  {
342
0
    const unsigned int slots_left = d->conns.cfg.count_limit - d->conns.count;
343
0
    if (! mhd_D_HAS_MASTER (d))
344
0
    {
345
      /* Fill up to one quarter of allowed limit in one turn */
346
0
      num_to_accept = d->conns.cfg.count_limit / 4;
347
      /* Limit to a reasonable number */
348
0
      if (((sizeof(void *) > 4) ? 4096 : 1024) < num_to_accept)
349
0
        num_to_accept = ((sizeof(void *) > 4) ? 4096 : 1024);
350
0
      if (slots_left < num_to_accept)
351
0
        num_to_accept = slots_left;
352
0
    }
353
0
#ifdef MHD_SUPPORT_THREADS
354
0
    else
355
0
    {
356
      /* Has workers thread pool. Care must be taken to evenly distribute
357
         new connections in the workers pool.
358
         At the same time, the burst of new connections should be handled as
359
         quick as possible. */
360
0
      const unsigned int num_conn = d->conns.count;
361
0
      const unsigned int limit = d->conns.cfg.count_limit;
362
0
      const unsigned int num_workers =
363
0
        d->threading.hier.master->threading.hier.pool.num;
364
0
      if (num_conn < limit / 16)
365
0
      {
366
0
        num_to_accept = num_conn / num_workers;
367
0
        if (8 > num_to_accept)
368
0
        {
369
0
          if (8 > slots_left / 16)
370
0
            num_to_accept = slots_left / 16;
371
0
          else
372
0
            num_to_accept = 8;
373
0
        }
374
0
        if (64 < num_to_accept)
375
0
          num_to_accept = 64;
376
0
      }
377
0
      else if (num_conn < limit / 8)
378
0
      {
379
0
        num_to_accept = num_conn * 2 / num_workers;
380
0
        if (8 > num_to_accept)
381
0
        {
382
0
          if (8 > slots_left / 8)
383
0
            num_to_accept = slots_left / 8;
384
0
          else
385
0
            num_to_accept = 8;
386
0
        }
387
0
        if (128 < num_to_accept)
388
0
          num_to_accept = 128;
389
0
      }
390
0
      else if (num_conn < limit / 4)
391
0
      {
392
0
        num_to_accept = num_conn * 4 / num_workers;
393
0
        if (8 > num_to_accept)
394
0
          num_to_accept = 8;
395
0
        if (slots_left / 4 < num_to_accept)
396
0
          num_to_accept = slots_left / 4;
397
0
        if (256 < num_to_accept)
398
0
          num_to_accept = 256;
399
0
      }
400
0
      else if (num_conn < limit / 2)
401
0
      {
402
0
        num_to_accept = num_conn * 8 / num_workers;
403
0
        if (16 > num_to_accept)
404
0
          num_to_accept = 16;
405
0
        if (slots_left / 4 < num_to_accept)
406
0
          num_to_accept = slots_left / 4;
407
0
        if (256 < num_to_accept)
408
0
          num_to_accept = 256;
409
0
      }
410
0
      else if (slots_left > limit / 4)
411
0
      {
412
0
        num_to_accept = slots_left * 4 / num_workers;
413
0
        if (slots_left / 8 < num_to_accept)
414
0
          num_to_accept = slots_left / 8;
415
0
        if (128 < num_to_accept)
416
0
          num_to_accept = 128;
417
0
      }
418
0
      else if (slots_left > limit / 8)
419
0
      {
420
0
        num_to_accept = slots_left * 2 / num_workers;
421
0
        if (slots_left / 16 < num_to_accept)
422
0
          num_to_accept = slots_left / 16;
423
0
        if (64 < num_to_accept)
424
0
          num_to_accept = 64;
425
0
      }
426
0
      else /* (slots_left <= limit / 8) */
427
0
        num_to_accept = slots_left / 16;
428
429
0
      if (0 == num_to_accept)
430
0
        num_to_accept = 1;
431
0
      else if (slots_left > num_to_accept)
432
0
        num_to_accept = slots_left;
433
0
    }
434
0
#endif /* MHD_SUPPORT_THREADS */
435
0
  }
436
437
0
  while (0 != --num_to_accept)
438
0
  {
439
0
    enum mhd_DaemonAcceptResult res;
440
0
    res = mhd_daemon_accept_connection (d);
441
0
    if (mhd_DAEMON_ACCEPT_NO_MORE_PENDING == res)
442
0
      return true;
443
0
    if (mhd_DAEMON_ACCEPT_FAILED == res)
444
0
      return false; /* This is probably "no system resources" error.
445
                       To do try to accept more connections now. */
446
0
  }
447
0
  return false; /* More connections may need to be accepted */
448
0
}
449
450
451
/**
452
 * Check whether particular connection should be excluded from standard HTTP
453
 * communication.
454
 * @param c the connection the check
455
 * @return 'true' if connection should not be used for HTTP communication
456
 *         'false' if connection should be processed as HTTP
457
 */
458
MHD_static_inline_ MHD_FN_PAR_NONNULL_ALL_ bool
459
is_conn_excluded_from_http_comm (struct MHD_Connection *restrict c)
460
0
{
461
0
#ifdef MHD_SUPPORT_UPGRADE
462
0
  if (NULL != c->upgr.c)
463
0
  {
464
0
    mhd_assert ((mhd_HTTP_STAGE_UPGRADED == c->stage) || \
465
0
                (mhd_HTTP_STAGE_UPGRADED_CLEANING == c->stage));
466
0
    return true;
467
0
  }
468
0
#endif /* MHD_SUPPORT_UPGRADE */
469
470
0
  return c->suspended;
471
0
}
472
473
474
static bool
475
daemon_process_all_active_conns (struct MHD_Daemon *restrict d)
476
0
{
477
0
  struct MHD_Connection *c;
478
0
  mhd_assert (! mhd_D_HAS_WORKERS (d));
479
480
0
  c = mhd_DLINKEDL_GET_FIRST (&(d->events),proc_ready);
481
0
  while (NULL != c)
482
0
  {
483
0
    struct MHD_Connection *next;
484
    /* The current connection can be closed or removed from
485
       "ready" list */
486
0
    next = mhd_DLINKEDL_GET_NEXT (c, proc_ready);
487
0
    if (! mhd_conn_process_recv_send_data (c))
488
0
    {
489
0
      mhd_conn_pre_clean (c);
490
0
      mhd_conn_remove_from_daemon (c);
491
0
      mhd_conn_close_final (c);
492
0
    }
493
0
    else
494
0
    {
495
0
      mhd_assert (! c->resuming || c->suspended);
496
0
    }
497
498
0
    c = next;
499
0
  }
500
0
  return true;
501
0
}
502
503
504
#ifdef MHD_SUPPORT_UPGRADE
505
/**
506
 * Clean-up all HTTP-Upgraded connections scheduled for clean-up
507
 * @param d the daemon to process
508
 */
509
static MHD_FN_PAR_NONNULL_ALL_ void
510
daemon_cleanup_upgraded_conns (struct MHD_Daemon *d)
511
0
{
512
0
  volatile struct MHD_Daemon *voltl_d = d;
513
0
  mhd_assert (! mhd_D_HAS_WORKERS (d));
514
515
0
  if (NULL == mhd_DLINKEDL_GET_FIRST (&(voltl_d->conns.upgr), upgr_cleanup))
516
0
    return;
517
518
0
  while (true)
519
0
  {
520
0
    struct MHD_Connection *c;
521
522
0
    mhd_mutex_lock_chk (&(d->conns.upgr.ucu_lock));
523
0
    c = mhd_DLINKEDL_GET_FIRST (&(d->conns.upgr), upgr_cleanup);
524
0
    if (NULL != c)
525
0
      mhd_DLINKEDL_DEL (&(d->conns.upgr), c, upgr_cleanup);
526
0
    mhd_mutex_unlock_chk (&(d->conns.upgr.ucu_lock));
527
528
0
    if (NULL == c)
529
0
      break;
530
531
0
    mhd_assert (mhd_HTTP_STAGE_UPGRADED_CLEANING == c->stage);
532
0
    mhd_upgraded_deinit (c);
533
0
    mhd_conn_pre_clean (c);
534
0
    mhd_conn_remove_from_daemon (c);
535
0
    mhd_conn_close_final (c);
536
0
  }
537
0
}
538
539
540
#else  /* ! MHD_SUPPORT_UPGRADE */
541
#define daemon_cleanup_upgraded_conns(d) ((void) d)
542
#endif /* ! MHD_SUPPORT_UPGRADE */
543
544
MHD_INTERNAL MHD_FN_PAR_NONNULL_ALL_ void
545
mhd_daemon_close_all_conns (struct MHD_Daemon *d)
546
0
{
547
0
  struct MHD_Connection *c;
548
0
  bool has_upgraded_unclosed;
549
550
0
  has_upgraded_unclosed = false;
551
0
  if (! mhd_D_HAS_THR_PER_CONN (d))
552
0
  {
553
0
    for (c = mhd_DLINKEDL_GET_LAST (&(d->conns),all_conn);
554
0
         NULL != c;
555
0
         c = mhd_DLINKEDL_GET_LAST (&(d->conns),all_conn))
556
0
    {
557
0
#ifdef MHD_SUPPORT_UPGRADE
558
0
      mhd_assert (mhd_HTTP_STAGE_UPGRADING != c->stage);
559
0
      mhd_assert (mhd_HTTP_STAGE_UPGRADED_CLEANING != c->stage);
560
0
      if (NULL != c->upgr.c)
561
0
      {
562
0
        mhd_assert (c == c->upgr.c);
563
0
        has_upgraded_unclosed = true;
564
0
        mhd_upgraded_deinit (c);
565
0
      }
566
0
      else /* Combined with the next 'if' */
567
0
#endif
568
0
      if (1)
569
0
        mhd_conn_start_closing_d_shutdown (c);
570
0
      mhd_conn_pre_clean (c);
571
0
      mhd_conn_remove_from_daemon (c);
572
0
      mhd_conn_close_final (c);
573
0
    }
574
0
  }
575
0
  else
576
0
    mhd_assert (0 && "Not implemented yet");
577
578
0
  if (has_upgraded_unclosed)
579
0
    mhd_LOG_MSG (d, MHD_SC_DAEMON_DESTROYED_WITH_UNCLOSED_UPGRADED, \
580
0
                 "The daemon is being destroyed, but at least one " \
581
0
                 "HTTP-Upgraded connection is unclosed. Any use (including " \
582
0
                 "closing) of such connections is undefined behaviour.");
583
0
}
584
585
586
/**
587
 * Process all external events updated of existing connections, information
588
 * about new connections pending to be accept()'ed, presence of the events on
589
 * the daemon's ITC; resume connections.
590
 * @return 'true' if processed successfully,
591
 *         'false' is unrecoverable error occurs and the daemon must be
592
 *         closed
593
 */
594
static MHD_FN_PAR_NONNULL_ (1) bool
595
ext_events_process_net_updates_and_resume_conn (struct MHD_Daemon *restrict d)
596
0
{
597
0
  struct MHD_Connection *restrict c;
598
599
0
  mhd_assert (mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int));
600
0
  mhd_assert (mhd_POLL_TYPE_EXT == d->events.poll_type);
601
602
0
  d->events.act_req.resume = false; /* Reset flag before processing data */
603
604
0
#ifdef MHD_SUPPORT_THREADS
605
0
  if (d->events.data.extr.itc_data.is_active)
606
0
  {
607
0
    d->events.data.extr.itc_data.is_active = false;
608
    /* Clear ITC here, before other data processing.
609
     * Any external events will activate ITC again if additional data to
610
     * process is added externally. Clearing ITC early ensures that new data
611
     * (with additional ITC activation) will not be missed. */
612
0
    mhd_itc_clear (d->threading.itc);
613
0
  }
614
0
#endif /* MHD_SUPPORT_THREADS */
615
616
0
  for (c = mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn);
617
0
       NULL != c;
618
0
       c = mhd_DLINKEDL_GET_NEXT (c,all_conn))
619
0
  {
620
0
    bool has_err_state;
621
622
0
    if (c->resuming)
623
0
      start_resuming_connection (c, d);
624
0
    else
625
0
    {
626
0
      if (is_conn_excluded_from_http_comm (c))
627
0
      {
628
0
        mhd_assert (! c->in_proc_ready);
629
0
        continue;
630
0
      }
631
632
0
      has_err_state = (0 != (((unsigned int) c->sk.ready)
633
0
                             & mhd_SOCKET_NET_STATE_ERROR_READY));
634
635
0
      mhd_conn_mark_ready_update3 (c,
636
0
                                   has_err_state,
637
0
                                   d);
638
0
    }
639
0
  }
640
641
0
  return true;
642
0
}
643
644
645
/**
646
 * Update all registrations of FDs for external monitoring.
647
 * @return #MHD_SC_OK on success,
648
 *         error code otherwise
649
 */
650
static MHD_FN_PAR_NONNULL_ (1) enum MHD_StatusCode
651
ext_events_update_registrations (struct MHD_Daemon *restrict d)
652
0
{
653
0
  const bool rereg_all = d->events.data.extr.reg_all;
654
0
  const bool edge_trigg = (mhd_WM_INT_EXTERNAL_EVENTS_EDGE == d->wmode_int);
655
0
  bool daemon_fds_succeed;
656
0
  struct MHD_Connection *c;
657
0
  struct MHD_Connection *c_next;
658
659
0
  mhd_assert (mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int));
660
0
  mhd_assert (mhd_POLL_TYPE_EXT == d->events.poll_type);
661
662
  /* (Re-)register daemon's FDs */
663
664
0
#ifdef MHD_SUPPORT_THREADS
665
0
  if (rereg_all ||
666
0
      (NULL == d->events.data.extr.itc_data.app_cntx))
667
0
  {
668
    /* (Re-)register ITC FD */
669
0
    d->events.data.extr.itc_data.app_cntx =
670
0
      mhd_daemon_extr_event_reg (d,
671
0
                                 mhd_itc_r_fd (d->threading.itc),
672
0
                                 MHD_FD_STATE_RECV_EXCEPT,
673
0
                                 d->events.data.extr.itc_data.app_cntx,
674
0
                                 (struct MHD_EventUpdateContext *)
675
0
                                 mhd_SOCKET_REL_MARKER_ITC);
676
0
  }
677
0
  daemon_fds_succeed = (NULL != d->events.data.extr.itc_data.app_cntx);
678
#else  /* ! MHD_SUPPORT_THREADS */
679
  daemon_fds_succeed = true;
680
#endif /* ! MHD_SUPPORT_THREADS */
681
682
0
  if (daemon_fds_succeed)
683
0
  {
684
0
    if ((MHD_INVALID_SOCKET == d->net.listen.fd) &&
685
0
        (NULL != d->events.data.extr.listen_data.app_cntx))
686
0
    {
687
      /* De-register the listen FD */
688
0
      d->events.data.extr.listen_data.app_cntx =
689
0
        mhd_daemon_extr_event_reg (d,
690
0
                                   d->net.listen.fd,
691
0
                                   MHD_FD_STATE_NONE,
692
0
                                   d->events.data.extr.listen_data.app_cntx,
693
0
                                   (struct MHD_EventUpdateContext *)
694
0
                                   mhd_SOCKET_REL_MARKER_LISTEN);
695
0
      if (NULL != d->events.data.extr.listen_data.app_cntx)
696
0
        mhd_log_extr_event_dereg_failed (d);
697
0
    }
698
0
    else if ((MHD_INVALID_SOCKET != d->net.listen.fd) &&
699
0
             (rereg_all || (NULL == d->events.data.extr.listen_data.app_cntx)))
700
0
    {
701
      /* (Re-)register listen FD */
702
0
      d->events.data.extr.listen_data.app_cntx =
703
0
        mhd_daemon_extr_event_reg (d,
704
0
                                   d->net.listen.fd,
705
0
                                   MHD_FD_STATE_RECV_EXCEPT,
706
0
                                   d->events.data.extr.listen_data.app_cntx,
707
0
                                   (struct MHD_EventUpdateContext *)
708
0
                                   mhd_SOCKET_REL_MARKER_LISTEN);
709
710
0
      daemon_fds_succeed = (NULL != d->events.data.extr.listen_data.app_cntx);
711
0
    }
712
0
  }
713
714
0
  if (! daemon_fds_succeed)
715
0
  {
716
0
    mhd_LOG_MSG (d, MHD_SC_EXT_EVENT_REG_DAEMON_FDS_FAILURE, \
717
0
                 "Failed to register daemon FDs in the application "
718
0
                 "(external events) monitoring.");
719
0
    return MHD_SC_EXT_EVENT_REG_DAEMON_FDS_FAILURE;
720
0
  }
721
722
0
  for (c = mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn);
723
0
       NULL != c;
724
0
       c = c_next)
725
0
  {
726
0
    enum MHD_FdState watch_for;
727
728
    /* Get the next connection now, as the current connection could be removed
729
       from the daemon. */
730
0
    c_next = mhd_DLINKEDL_GET_NEXT (c,all_conn);
731
732
0
    mhd_assert (! c->resuming || c->suspended);
733
734
0
    if (is_conn_excluded_from_http_comm (c))
735
0
    {
736
0
      if (NULL != c->extr_event.app_cntx)
737
0
      {
738
        /* De-register the connection socket FD */
739
0
        c->extr_event.app_cntx =
740
0
          mhd_daemon_extr_event_reg (d,
741
0
                                     c->sk.fd,
742
0
                                     MHD_FD_STATE_NONE,
743
0
                                     c->extr_event.app_cntx,
744
0
                                     (struct MHD_EventUpdateContext *) c);
745
0
        if (NULL != c->extr_event.app_cntx)
746
0
          mhd_log_extr_event_dereg_failed (d);
747
0
      }
748
0
      continue;
749
0
    }
750
751
0
    watch_for =
752
0
      edge_trigg ?
753
0
      MHD_FD_STATE_RECV_SEND_EXCEPT :
754
0
      (enum MHD_FdState) (MHD_FD_STATE_EXCEPT
755
0
                          | (((unsigned int) c->event_loop_info)
756
0
                             & (MHD_EVENT_LOOP_INFO_RECV
757
0
                                | MHD_EVENT_LOOP_INFO_SEND)));
758
759
0
    mhd_assert ((! edge_trigg) || \
760
0
                (MHD_FD_STATE_RECV_SEND_EXCEPT == c->extr_event.reg_for) || \
761
0
                (NULL == c->extr_event.app_cntx));
762
763
0
    if ((NULL == c->extr_event.app_cntx) ||
764
0
        rereg_all ||
765
0
        (! edge_trigg && (watch_for != c->extr_event.reg_for)))
766
0
    {
767
      /* (Re-)register the connection socket FD */
768
0
      c->extr_event.app_cntx =
769
0
        mhd_daemon_extr_event_reg (d,
770
0
                                   c->sk.fd,
771
0
                                   watch_for,
772
0
                                   c->extr_event.app_cntx,
773
0
                                   (struct MHD_EventUpdateContext *) c);
774
0
      if (NULL == c->extr_event.app_cntx)
775
0
      {
776
0
        mhd_conn_start_closing_ext_event_failed (c);
777
0
        mhd_conn_pre_clean (c);
778
0
        mhd_conn_remove_from_daemon (c);
779
0
        mhd_conn_close_final (c);
780
0
      }
781
0
      c->extr_event.reg_for = watch_for;
782
0
    }
783
0
  }
784
785
0
  return MHD_SC_OK;
786
0
}
787
788
789
#ifdef MHD_SUPPORT_SELECT
790
791
/**
792
 * Add socket to the fd_set
793
 * @param fd the socket to add
794
 * @param fs the pointer to fd_set
795
 * @param max the pointer to variable to be updated with maximum FD value (or
796
 *            set to non-zero in case of WinSock)
797
 * @param d the daemon object
798
 */
799
MHD_static_inline_ MHD_FN_PAR_NONNULL_ALL_
800
MHD_FN_PAR_INOUT_ (2)
801
MHD_FN_PAR_INOUT_ (3) void
802
fd_set_wrap (MHD_Socket fd,
803
             fd_set *restrict fs,
804
             int *restrict max,
805
             struct MHD_Daemon *restrict d)
806
0
{
807
0
  mhd_assert (mhd_FD_FITS_DAEMON (d, fd)); /* Must be checked for every FD before
808
                                              it is added */
809
0
  mhd_assert (mhd_POLL_TYPE_SELECT == d->events.poll_type);
810
0
  (void) d; /* Unused with non-debug builds */
811
0
#if defined(MHD_SOCKETS_KIND_POSIX)
812
0
  FD_SET (fd, fs);
813
0
  if (*max < fd)
814
0
    *max = fd;
815
#elif defined(MHD_SOCKETS_KIND_WINSOCK)
816
  /* Use custom set function to take advantage of know uniqueness of
817
   * used sockets (to skip useless (for this function) check for duplicated
818
   * sockets implemented in system's macro). */
819
  mhd_assert (fs->fd_count < FD_SETSIZE - 1); /* Daemon limits set to always fit FD_SETSIZE */
820
  mhd_assert (! FD_ISSET (fd, fs)); /* All sockets must be unique */
821
  fs->fd_array[fs->fd_count++] = fd;
822
  *max = 1;
823
#else
824
#error Unknown sockets type
825
#endif
826
0
}
827
828
829
/**
830
 * Set daemon's FD_SETs to monitor all daemon's sockets
831
 * @param d the daemon to use
832
 * @param listen_only set to 'true' if connections's sockets should NOT
833
 *                    be monitored
834
 * @return with POSIX sockets: the maximum number of the socket used in
835
 *                             the FD_SETs;
836
 *         with winsock: non-zero if at least one socket has been added to
837
 *                       the FD_SETs,
838
 *                       zero if no sockets in the FD_SETs
839
 */
840
static MHD_FN_PAR_NONNULL_ (1) int
841
select_update_fdsets (struct MHD_Daemon *restrict d,
842
                      bool listen_only)
843
0
{
844
0
  struct MHD_Connection *c;
845
0
  fd_set *const restrict rfds = d->events.data.select.rfds;
846
0
  fd_set *const restrict wfds = d->events.data.select.wfds;
847
0
  fd_set *const restrict efds = d->events.data.select.efds;
848
0
  int ret;
849
850
0
  mhd_assert (mhd_POLL_TYPE_SELECT == d->events.poll_type);
851
0
  mhd_assert (NULL != rfds);
852
0
  mhd_assert (NULL != wfds);
853
0
  mhd_assert (NULL != efds);
854
0
  FD_ZERO (rfds);
855
0
  FD_ZERO (wfds);
856
0
  FD_ZERO (efds);
857
858
0
  ret = 0;
859
0
#ifdef MHD_SUPPORT_THREADS
860
0
  mhd_assert (mhd_ITC_IS_VALID (d->threading.itc));
861
0
  fd_set_wrap (mhd_itc_r_fd (d->threading.itc),
862
0
               rfds,
863
0
               &ret,
864
0
               d);
865
0
  fd_set_wrap (mhd_itc_r_fd (d->threading.itc),
866
0
               efds,
867
0
               &ret,
868
0
               d);
869
0
  mhd_dbg_print_fd_mon_req ("ITC", \
870
0
                            mhd_itc_r_fd (d->threading.itc), \
871
0
                            true, \
872
0
                            false, \
873
0
                            true);
874
0
#endif
875
0
  if ((MHD_INVALID_SOCKET != d->net.listen.fd)
876
0
      && ! d->conns.block_new)
877
0
  {
878
0
    mhd_assert (! d->net.listen.is_broken);
879
880
0
    fd_set_wrap (d->net.listen.fd,
881
0
                 rfds,
882
0
                 &ret,
883
0
                 d);
884
0
    fd_set_wrap (d->net.listen.fd,
885
0
                 efds,
886
0
                 &ret,
887
0
                 d);
888
0
    mhd_dbg_print_fd_mon_req ("lstn", \
889
0
                              d->net.listen.fd, \
890
0
                              true, \
891
0
                              false, \
892
0
                              true);
893
0
  }
894
0
  if (listen_only)
895
0
    return ret;
896
897
0
  for (c = mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn); NULL != c;
898
0
       c = mhd_DLINKEDL_GET_NEXT (c,all_conn))
899
0
  {
900
0
    mhd_assert (mhd_HTTP_STAGE_CLOSED != c->stage);
901
0
    if (is_conn_excluded_from_http_comm (c))
902
0
      continue;
903
904
0
    if (0 != (c->event_loop_info & MHD_EVENT_LOOP_INFO_RECV))
905
0
      fd_set_wrap (c->sk.fd,
906
0
                   rfds,
907
0
                   &ret,
908
0
                   d);
909
0
    if (0 != (c->event_loop_info & MHD_EVENT_LOOP_INFO_SEND))
910
0
      fd_set_wrap (c->sk.fd,
911
0
                   wfds,
912
0
                   &ret,
913
0
                   d);
914
0
    fd_set_wrap (c->sk.fd,
915
0
                 efds,
916
0
                 &ret,
917
0
                 d);
918
0
    mhd_dbg_print_fd_mon_req ("conn", \
919
0
                              c->sk.fd, \
920
0
                              FD_ISSET (c->sk.fd, rfds), \
921
0
                              FD_ISSET (c->sk.fd, wfds), \
922
0
                              true);
923
0
  }
924
925
0
  return ret;
926
0
}
927
928
929
static MHD_FN_PAR_NONNULL_ (1) bool
930
select_update_statuses_from_fdsets_and_resume_conn (struct MHD_Daemon *d,
931
                                                    int num_events)
932
0
{
933
0
  struct MHD_Connection *c;
934
0
  fd_set *const restrict rfds = d->events.data.select.rfds;
935
0
  fd_set *const restrict wfds = d->events.data.select.wfds;
936
0
  fd_set *const restrict efds = d->events.data.select.efds;
937
0
  bool resuming_conn;
938
939
0
  mhd_assert (mhd_POLL_TYPE_SELECT == d->events.poll_type);
940
0
  mhd_assert (0 <= num_events);
941
0
  mhd_assert (((unsigned int) num_events) <= d->dbg.num_events_elements);
942
943
0
  resuming_conn = d->events.act_req.resume;
944
0
  if (resuming_conn)
945
0
  {
946
0
    mhd_assert (! mhd_D_TYPE_IS_LISTEN_ONLY (d->threading.d_type));
947
0
    mhd_assert (! mhd_D_HAS_THR_PER_CONN (d));
948
0
    num_events = (int) -1; /* Force process all connections */
949
0
    d->events.act_req.resume = false;
950
0
  }
951
952
0
#ifndef MHD_FAVOR_SMALL_CODE
953
0
  if (0 == num_events)
954
0
    return true;
955
0
#endif /* MHD_FAVOR_SMALL_CODE */
956
957
0
#ifdef MHD_SUPPORT_THREADS
958
0
  mhd_assert (mhd_ITC_IS_VALID (d->threading.itc));
959
0
  dbg_print_fd_state_update ("ITC", \
960
0
                             mhd_itc_r_fd (d->threading.itc), \
961
0
                             FD_ISSET (mhd_itc_r_fd (d->threading.itc), rfds), \
962
0
                             FD_ISSET (mhd_itc_r_fd (d->threading.itc), wfds), \
963
0
                             FD_ISSET (mhd_itc_r_fd (d->threading.itc), efds));
964
0
  if (FD_ISSET (mhd_itc_r_fd (d->threading.itc), efds))
965
0
  {
966
0
    log_itc_broken (d);
967
    /* ITC is broken, need to stop the daemon thread now as otherwise
968
       application will not be able to stop the thread. */
969
0
    return false;
970
0
  }
971
0
  if (FD_ISSET (mhd_itc_r_fd (d->threading.itc), rfds))
972
0
  {
973
0
    --num_events;
974
    /* Clear ITC here, before other data processing.
975
     * Any external events will activate ITC again if additional data to
976
     * process is added externally. Clearing ITC early ensures that new data
977
     * (with additional ITC activation) will not be missed. */
978
0
    mhd_itc_clear (d->threading.itc);
979
0
  }
980
981
0
#ifndef MHD_FAVOR_SMALL_CODE
982
0
  if (0 == num_events)
983
0
    return true;
984
0
#endif /* MHD_FAVOR_SMALL_CODE */
985
0
#endif /* MHD_SUPPORT_THREADS */
986
987
0
  if (MHD_INVALID_SOCKET != d->net.listen.fd)
988
0
  {
989
0
    mhd_assert (! d->net.listen.is_broken);
990
0
    dbg_print_fd_state_update ("lstn", \
991
0
                               d->net.listen.fd, \
992
0
                               FD_ISSET (d->net.listen.fd, rfds), \
993
0
                               FD_ISSET (d->net.listen.fd, wfds), \
994
0
                               FD_ISSET (d->net.listen.fd, efds));
995
0
    if (FD_ISSET (d->net.listen.fd, efds))
996
0
    {
997
0
      --num_events;
998
0
      log_listen_broken (d);
999
      /* Close the listening socket unless the master daemon should close it */
1000
0
      if (! mhd_D_HAS_MASTER (d))
1001
0
        mhd_socket_close (d->net.listen.fd);
1002
1003
0
      d->events.accept_pending = false;
1004
0
      d->net.listen.is_broken = true;
1005
      /* Stop monitoring socket to avoid spinning with busy-waiting */
1006
0
      d->net.listen.fd = MHD_INVALID_SOCKET;
1007
0
    }
1008
0
    else
1009
0
    {
1010
0
      d->events.accept_pending = FD_ISSET (d->net.listen.fd, rfds);
1011
0
      if (d->events.accept_pending)
1012
0
        --num_events;
1013
0
    }
1014
0
  }
1015
1016
0
  mhd_assert ((0 == num_events) || \
1017
0
              (! mhd_D_TYPE_IS_LISTEN_ONLY (d->threading.d_type)));
1018
1019
#ifdef MHD_FAVOR_SMALL_CODE
1020
  (void) num_events;
1021
  num_events = 1; /* Use static value to minimise the binary size of the next loop */
1022
#endif /* ! MHD_FAVOR_SMALL_CODE */
1023
1024
0
  for (c = mhd_DLINKEDL_GET_FIRST (&(d->conns), all_conn);
1025
0
       (NULL != c) && (0 != num_events);
1026
0
       c = mhd_DLINKEDL_GET_NEXT (c, all_conn))
1027
0
  {
1028
0
    if (c->resuming)
1029
0
      start_resuming_connection (c, d);
1030
0
    else
1031
0
    {
1032
0
      MHD_Socket sk;
1033
0
      bool recv_ready;
1034
0
      bool send_ready;
1035
0
      bool err_state;
1036
1037
0
      if (is_conn_excluded_from_http_comm (c))
1038
0
        continue;
1039
1040
0
      sk = c->sk.fd;
1041
0
      recv_ready = FD_ISSET (sk, rfds);
1042
0
      send_ready = FD_ISSET (sk, wfds);
1043
0
      err_state = FD_ISSET (sk, efds);
1044
1045
0
      update_conn_net_status (d,
1046
0
                              c,
1047
0
                              recv_ready,
1048
0
                              send_ready,
1049
0
                              err_state);
1050
0
#ifndef MHD_FAVOR_SMALL_CODE
1051
0
      if (recv_ready || send_ready || err_state)
1052
0
        --num_events;
1053
0
#endif /* MHD_FAVOR_SMALL_CODE */
1054
0
    }
1055
0
  }
1056
1057
0
#ifndef MHD_FAVOR_SMALL_CODE
1058
0
  mhd_assert ((0 == num_events) || resuming_conn);
1059
0
#endif /* MHD_FAVOR_SMALL_CODE */
1060
0
  return true;
1061
0
}
1062
1063
1064
/**
1065
 * Update states of all connections, check for connection pending
1066
 * to be accept()'ed, check for the events on ITC; resume connections
1067
 * @param listen_only set to 'true' if connections's sockets should NOT
1068
 *                    be monitored
1069
 * @return 'true' if processed successfully,
1070
 *         'false' is unrecoverable error occurs and the daemon must be
1071
 *         closed
1072
 */
1073
static MHD_FN_PAR_NONNULL_ (1) bool
1074
get_all_net_updates_by_select_and_resume_conn (struct MHD_Daemon *restrict d,
1075
                                               bool listen_only)
1076
0
{
1077
0
  int max_socket;
1078
0
  int max_wait;
1079
0
  struct timeval tmvl;
1080
0
  int num_events;
1081
0
  mhd_assert (mhd_POLL_TYPE_SELECT == d->events.poll_type);
1082
1083
0
  max_socket = select_update_fdsets (d,
1084
0
                                     listen_only);
1085
1086
0
  max_wait = get_max_wait (d); // TODO: use correct timeout value
1087
1088
#ifdef MHD_SOCKETS_KIND_WINSOCK
1089
  if (0 == max_socket)
1090
  {
1091
    Sleep ((unsigned int) max_wait);
1092
    return true;
1093
  }
1094
#endif /* MHD_SOCKETS_KIND_WINSOCK */
1095
1096
0
  tmvl.tv_sec = max_wait / 1000;
1097
0
#ifndef MHD_SOCKETS_KIND_WINSOCK
1098
0
  tmvl.tv_usec = (uint_least16_t) ((max_wait % 1000) * 1000);
1099
#else
1100
  tmvl.tv_usec = (int) ((max_wait % 1000) * 1000);
1101
#endif
1102
1103
#ifdef mhd_DEBUG_POLLING_FDS
1104
  fprintf (stderr,
1105
           "### (Starting) select(%d, rfds, wfds, efds, [%llu, %llu])...\n",
1106
           max_socket + 1,
1107
           (unsigned long long) tmvl.tv_sec,
1108
           (unsigned long long) tmvl.tv_usec);
1109
#endif /* mhd_DEBUG_POLLING_FDS */
1110
0
  num_events = select (max_socket + 1,
1111
0
                       d->events.data.select.rfds,
1112
0
                       d->events.data.select.wfds,
1113
0
                       d->events.data.select.efds,
1114
0
                       &tmvl);
1115
#ifdef mhd_DEBUG_POLLING_FDS
1116
  fprintf (stderr,
1117
           "### (Finished) select(%d, rfds, wfds, efds, ->[%llu, %llu]) -> "
1118
           "%d\n",
1119
           max_socket + 1,
1120
           (unsigned long long) tmvl.tv_sec,
1121
           (unsigned long long) tmvl.tv_usec,
1122
           num_events);
1123
#endif /* mhd_DEBUG_POLLING_FDS */
1124
1125
0
  if (0 > num_events)
1126
0
  {
1127
0
    int err;
1128
0
    bool is_hard_error;
1129
0
    bool is_ignored_error;
1130
0
    is_hard_error = false;
1131
0
    is_ignored_error = false;
1132
0
#if defined(MHD_SOCKETS_KIND_POSIX)
1133
0
    err = errno;
1134
0
    if (0 != err)
1135
0
    {
1136
0
      is_hard_error =
1137
0
        ((mhd_EBADF_OR_ZERO == err) || (mhd_EINVAL_OR_ZERO == err));
1138
0
      is_ignored_error = (mhd_EINTR_OR_ZERO == err);
1139
0
    }
1140
#elif defined(MHD_SOCKETS_KIND_WINSOCK)
1141
    err = WSAGetLastError ();
1142
    is_hard_error =
1143
      ((WSAENETDOWN == err) || (WSAEFAULT == err) || (WSAEINVAL == err) ||
1144
       (WSANOTINITIALISED == err));
1145
#endif
1146
0
    if (! is_ignored_error)
1147
0
    {
1148
0
      if (is_hard_error)
1149
0
      {
1150
0
        mhd_LOG_MSG (d, MHD_SC_SELECT_HARD_ERROR, \
1151
0
                     "The select() encountered unrecoverable error.");
1152
0
        return false;
1153
0
      }
1154
0
      mhd_LOG_MSG (d, MHD_SC_SELECT_SOFT_ERROR, \
1155
0
                   "The select() encountered error.");
1156
0
      return true;
1157
0
    }
1158
0
  }
1159
1160
0
  return select_update_statuses_from_fdsets_and_resume_conn (d, num_events);
1161
0
}
1162
1163
1164
#endif /* MHD_SUPPORT_SELECT */
1165
1166
1167
#ifdef MHD_SUPPORT_POLL
1168
1169
static MHD_FN_PAR_NONNULL_ (1) unsigned int
1170
poll_update_fds (struct MHD_Daemon *restrict d,
1171
                 bool listen_only)
1172
0
{
1173
0
  unsigned int i_s;
1174
0
  unsigned int i_c;
1175
0
  struct MHD_Connection *restrict c;
1176
#ifndef NDEBUG
1177
  unsigned int num_skipped = 0;
1178
#endif /* ! NDEBUG */
1179
1180
0
  mhd_assert (mhd_POLL_TYPE_POLL == d->events.poll_type);
1181
1182
0
  i_s = 0;
1183
0
#ifdef MHD_SUPPORT_THREADS
1184
0
  mhd_assert (mhd_ITC_IS_VALID (d->threading.itc));
1185
0
  mhd_assert (d->events.data.poll.fds[i_s].fd == \
1186
0
              mhd_itc_r_fd (d->threading.itc));
1187
0
  mhd_assert (mhd_SOCKET_REL_MARKER_ITC == \
1188
0
              d->events.data.poll.rel[i_s].fd_id);
1189
0
#ifndef HAVE_POLL_CLOBBERS_EVENTS
1190
0
  mhd_assert (POLLIN == d->events.data.poll.fds[i_s].events);
1191
#else  /* HAVE_POLL_CLOBBERS_EVENTS */
1192
  d->events.data.poll.fds[i_s].events = POLLIN;
1193
#endif /* HAVE_POLL_CLOBBERS_EVENTS */
1194
0
  mhd_dbg_print_fd_mon_req ("ITC", \
1195
0
                            mhd_itc_r_fd (d->threading.itc), \
1196
0
                            true, \
1197
0
                            false, \
1198
0
                            false);
1199
0
  ++i_s;
1200
0
#endif
1201
0
  if (MHD_INVALID_SOCKET != d->net.listen.fd)
1202
0
  {
1203
0
    mhd_assert (! d->net.listen.is_broken);
1204
0
    mhd_assert (d->events.data.poll.fds[i_s].fd == d->net.listen.fd);
1205
0
    mhd_assert (mhd_SOCKET_REL_MARKER_LISTEN == \
1206
0
                d->events.data.poll.rel[i_s].fd_id);
1207
0
#ifndef HAVE_POLL_CLOBBERS_EVENTS
1208
0
    mhd_assert ((POLLIN == d->events.data.poll.fds[i_s].events) ||
1209
0
                (0 == d->events.data.poll.fds[i_s].events));
1210
0
#endif /* ! HAVE_POLL_CLOBBERS_EVENTS */
1211
0
    d->events.data.poll.fds[i_s].events = d->conns.block_new ? 0 : POLLIN;
1212
0
    mhd_dbg_print_fd_mon_req ("lstn", \
1213
0
                              d->net.listen.fd, \
1214
0
                              POLLIN == d->events.data.poll.fds[i_s].events, \
1215
0
                              false, \
1216
0
                              false);
1217
0
    ++i_s;
1218
0
  }
1219
0
  if (listen_only)
1220
0
    return i_s;
1221
1222
0
  i_c = i_s;
1223
0
  for (c = mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn); NULL != c;
1224
0
       c = mhd_DLINKEDL_GET_NEXT (c,all_conn))
1225
0
  {
1226
0
    unsigned short events; /* 'unsigned' for correct bits manipulations */
1227
1228
0
    if (is_conn_excluded_from_http_comm (c))
1229
0
    {
1230
#ifndef NDEBUG
1231
      ++num_skipped;
1232
#endif /* ! NDEBUG */
1233
0
      continue;
1234
0
    }
1235
1236
0
    mhd_assert ((i_c - i_s) < d->conns.cfg.count_limit);
1237
0
    mhd_assert (i_c < d->dbg.num_events_elements);
1238
0
    mhd_assert (mhd_HTTP_STAGE_CLOSED != c->stage);
1239
1240
0
    d->events.data.poll.fds[i_c].fd = c->sk.fd;
1241
0
    d->events.data.poll.rel[i_c].connection = c;
1242
0
    events = 0;
1243
0
    if (0 != (c->event_loop_info & MHD_EVENT_LOOP_INFO_RECV))
1244
0
      events |= MHD_POLL_IN;
1245
0
    if (0 != (c->event_loop_info & MHD_EVENT_LOOP_INFO_SEND))
1246
0
      events |= MHD_POLL_OUT;
1247
1248
0
    d->events.data.poll.fds[i_c].events = (short) events;
1249
0
    mhd_dbg_print_fd_mon_req ("conn", \
1250
0
                              c->sk.fd, \
1251
0
                              MHD_POLL_IN == (MHD_POLL_IN & events), \
1252
0
                              MHD_POLL_OUT == (MHD_POLL_OUT & events), \
1253
0
                              false);
1254
0
    ++i_c;
1255
0
  }
1256
0
  mhd_assert ((d->conns.count - num_skipped) == (i_c - i_s));
1257
0
  mhd_assert (i_c <= d->dbg.num_events_elements);
1258
0
  return i_c;
1259
0
}
1260
1261
1262
static MHD_FN_PAR_NONNULL_ (1) bool
1263
poll_update_statuses_from_fds (struct MHD_Daemon *restrict d,
1264
                               int num_events)
1265
0
{
1266
0
  unsigned int i_s;
1267
0
  unsigned int i_c;
1268
0
  mhd_assert (mhd_POLL_TYPE_POLL == d->events.poll_type);
1269
0
  mhd_assert (0 <= num_events);
1270
0
  mhd_assert (((unsigned int) num_events) <= d->dbg.num_events_elements);
1271
1272
0
  if (0 == num_events)
1273
0
    return true;
1274
1275
0
  i_s = 0;
1276
0
#ifdef MHD_SUPPORT_THREADS
1277
0
  mhd_assert (mhd_ITC_IS_VALID (d->threading.itc));
1278
0
  mhd_assert (d->events.data.poll.fds[i_s].fd == \
1279
0
              mhd_itc_r_fd (d->threading.itc));
1280
0
  mhd_assert (mhd_SOCKET_REL_MARKER_ITC == \
1281
0
              d->events.data.poll.rel[i_s].fd_id);
1282
0
#ifndef HAVE_POLL_CLOBBERS_EVENTS
1283
0
  mhd_assert (POLLIN == d->events.data.poll.fds[i_s].events);
1284
0
#endif /* ! HAVE_POLL_CLOBBERS_EVENTS */
1285
0
  dbg_print_fd_state_update ( \
1286
0
    "ITC", \
1287
0
    d->events.data.poll.fds[i_s].fd, \
1288
0
    0 != (d->events.data.poll.fds[i_s].revents & (MHD_POLL_IN | POLLIN)), \
1289
0
    0 != (d->events.data.poll.fds[i_s].revents & (MHD_POLL_OUT | POLLOUT)), \
1290
0
    0 != (d->events.data.poll.fds[i_s].revents & (POLLERR | POLLNVAL)));
1291
1292
0
  if (0 != (d->events.data.poll.fds[i_s].revents & (POLLERR | POLLNVAL)))
1293
0
  {
1294
0
    log_itc_broken (d);
1295
    /* ITC is broken, need to stop the daemon thread now as otherwise
1296
       application will not be able to stop the thread. */
1297
0
    return false;
1298
0
  }
1299
0
  if (0 != (d->events.data.poll.fds[i_s].revents & (MHD_POLL_IN | POLLIN)))
1300
0
  {
1301
0
    --num_events;
1302
    /* Clear ITC here, before other data processing.
1303
     * Any external events will activate ITC again if additional data to
1304
     * process is added externally. Clearing ITC early ensures that new data
1305
     * (with additional ITC activation) will not be missed. */
1306
0
    mhd_itc_clear (d->threading.itc);
1307
0
  }
1308
0
  ++i_s;
1309
1310
0
  if (0 == num_events)
1311
0
    return true;
1312
0
#endif /* MHD_SUPPORT_THREADS */
1313
1314
0
  if (MHD_INVALID_SOCKET != d->net.listen.fd)
1315
0
  {
1316
0
    const short revents = d->events.data.poll.fds[i_s].revents;
1317
1318
0
    mhd_assert (! d->net.listen.is_broken);
1319
0
    mhd_assert (d->events.data.poll.fds[i_s].fd == d->net.listen.fd);
1320
0
    mhd_assert (mhd_SOCKET_REL_MARKER_LISTEN == \
1321
0
                d->events.data.poll.rel[i_s].fd_id);
1322
0
#ifndef HAVE_POLL_CLOBBERS_EVENTS
1323
0
    mhd_assert ((POLLIN == d->events.data.poll.fds[i_s].events) ||
1324
0
                (0 == d->events.data.poll.fds[i_s].events));
1325
0
#endif /* ! HAVE_POLL_CLOBBERS_EVENTS */
1326
0
    dbg_print_fd_state_update ("lstn", \
1327
0
                               d->events.data.poll.fds[i_s].fd, \
1328
0
                               0 != (revents & (MHD_POLL_IN | POLLIN)), \
1329
0
                               0 != (revents & (MHD_POLL_OUT | POLLOUT)), \
1330
0
                               0 != (revents & (POLLERR | POLLNVAL | POLLHUP)));
1331
0
    if (0 != (revents & (POLLERR | POLLNVAL | POLLHUP)))
1332
0
    {
1333
0
      --num_events;
1334
0
      log_listen_broken (d);
1335
      /* Close the listening socket unless the master daemon should close it */
1336
0
      if (! mhd_D_HAS_MASTER (d))
1337
0
        mhd_socket_close (d->net.listen.fd);
1338
1339
0
      d->events.accept_pending = false;
1340
0
      d->net.listen.is_broken = true;
1341
      /* Stop monitoring socket to avoid spinning with busy-waiting */
1342
0
      d->net.listen.fd = MHD_INVALID_SOCKET;
1343
0
    }
1344
0
    else
1345
0
    {
1346
0
      const bool has_new_conns = (0 != (revents & (MHD_POLL_IN | POLLIN)));
1347
0
      if (has_new_conns)
1348
0
      {
1349
0
        --num_events;
1350
0
        d->events.accept_pending = true;
1351
0
      }
1352
0
      else
1353
0
      {
1354
        /* Check whether the listen socket was monitored for incoming
1355
           connections */
1356
0
        if (0 != (d->events.data.poll.fds[i_s].events & POLLIN))
1357
0
          d->events.accept_pending = false;
1358
0
      }
1359
0
    }
1360
0
    ++i_s;
1361
0
  }
1362
1363
0
  mhd_assert ((0 == num_events) || \
1364
0
              (! mhd_D_TYPE_IS_LISTEN_ONLY (d->threading.d_type)));
1365
1366
0
  for (i_c = i_s; (i_c < i_s + d->conns.count) && (0 < num_events); ++i_c)
1367
0
  {
1368
0
    struct MHD_Connection *restrict c;
1369
0
    bool recv_ready;
1370
0
    bool send_ready;
1371
0
    bool err_state;
1372
0
    short revents;
1373
0
    mhd_assert (i_c < d->dbg.num_events_elements);
1374
0
    mhd_assert (mhd_SOCKET_REL_MARKER_EMPTY != \
1375
0
                d->events.data.poll.rel[i_c].fd_id);
1376
0
    mhd_assert (mhd_SOCKET_REL_MARKER_ITC != \
1377
0
                d->events.data.poll.rel[i_c].fd_id);
1378
0
    mhd_assert (mhd_SOCKET_REL_MARKER_LISTEN != \
1379
0
                d->events.data.poll.rel[i_c].fd_id);
1380
1381
0
    c = d->events.data.poll.rel[i_c].connection;
1382
0
    mhd_assert (! is_conn_excluded_from_http_comm (c));
1383
0
    mhd_assert (c->sk.fd == d->events.data.poll.fds[i_c].fd);
1384
0
    revents = d->events.data.poll.fds[i_c].revents;
1385
0
    recv_ready = (0 != (revents & (MHD_POLL_IN | POLLIN)));
1386
0
    send_ready = (0 != (revents & (MHD_POLL_OUT | POLLOUT)));
1387
0
#ifndef MHD_POLLHUP_ON_REM_SHUT_WR
1388
0
    err_state = (0 != (revents & (POLLHUP | POLLERR | POLLNVAL)));
1389
#else
1390
    err_state = (0 != (revents & (POLLERR | POLLNVAL)));
1391
    if (0 != (revents & POLLHUP))
1392
    { /* This can be a disconnect OR remote side set SHUT_WR */
1393
      recv_ready = true; /* Check the socket by reading */
1394
      if (0 == (c->event_loop_info & MHD_EVENT_LOOP_INFO_RECV))
1395
        err_state = true; /* The socket will not be checked by reading, the only way to avoid spinning */
1396
    }
1397
#endif
1398
0
    if (0 != (revents & (MHD_POLLPRI | MHD_POLLRDBAND)))
1399
0
    { /* Statuses were not requested, but returned */
1400
0
      if (! recv_ready ||
1401
0
          (0 == (c->event_loop_info & MHD_EVENT_LOOP_INFO_RECV)))
1402
0
        err_state = true; /* The socket will not be read, the only way to avoid spinning */
1403
0
    }
1404
0
    if (0 != (revents & MHD_POLLWRBAND))
1405
0
    { /* Status was not requested, but returned */
1406
0
      if (! send_ready ||
1407
0
          (0 == (c->event_loop_info & MHD_EVENT_LOOP_INFO_SEND)))
1408
0
        err_state = true; /* The socket will not be written, the only way to avoid spinning */
1409
0
    }
1410
1411
0
    update_conn_net_status (d, c, recv_ready, send_ready, err_state);
1412
0
  }
1413
0
  mhd_assert (d->conns.count >= (i_c - i_s));
1414
0
  mhd_assert (i_c <= d->dbg.num_events_elements);
1415
0
  return true;
1416
0
}
1417
1418
1419
static MHD_FN_PAR_NONNULL_ (1) bool
1420
get_all_net_updates_by_poll (struct MHD_Daemon *restrict d,
1421
                             bool listen_only)
1422
0
{
1423
#ifdef mhd_DEBUG_POLLING_FDS
1424
#  ifdef MHD_SOCKETS_KIND_POSIX
1425
  static const char poll_fn_name[] = "poll";
1426
#  else  /* MHD_SOCKETS_KIND_WINSOCK */
1427
  static const char poll_fn_name[] = "WSAPoll";
1428
#  endif /* MHD_SOCKETS_KIND_WINSOCK */
1429
#endif /* mhd_DEBUG_POLLING_FDS */
1430
0
  unsigned int num_fds;
1431
0
  int max_wait;
1432
0
  int num_events;
1433
1434
0
  mhd_assert (mhd_POLL_TYPE_POLL == d->events.poll_type);
1435
1436
0
  num_fds = poll_update_fds (d, listen_only);
1437
1438
  // TODO: handle empty list situation
1439
0
  max_wait = get_max_wait (d); // TODO: use correct timeout value
1440
1441
#ifdef mhd_DEBUG_POLLING_FDS
1442
  fprintf (stderr,
1443
           "### (Starting) %s(fds, %u, %d)...\n",
1444
           poll_fn_name,
1445
           num_fds,
1446
           max_wait);
1447
#endif /* mhd_DEBUG_POLLING_FDS */
1448
0
  num_events = mhd_poll (d->events.data.poll.fds,
1449
0
                         num_fds,
1450
0
                         max_wait); // TODO: use correct timeout value
1451
#ifdef mhd_DEBUG_POLLING_FDS
1452
  fprintf (stderr,
1453
           "### (Finished) %s(fds, %u, %d) -> %d\n",
1454
           poll_fn_name,
1455
           num_fds,
1456
           max_wait,
1457
           num_events);
1458
#endif /* mhd_DEBUG_POLLING_FDS */
1459
0
  if (0 > num_events)
1460
0
  {
1461
0
    int err;
1462
0
    bool is_hard_error;
1463
0
    bool is_ignored_error;
1464
0
    is_hard_error = false;
1465
0
    is_ignored_error = false;
1466
0
#if defined(MHD_SOCKETS_KIND_POSIX)
1467
0
    err = errno;
1468
0
    if (0 != err)
1469
0
    {
1470
0
      is_hard_error =
1471
0
        ((mhd_EFAULT_OR_ZERO == err) || (mhd_EINVAL_OR_ZERO == err));
1472
0
      is_ignored_error = (mhd_EINTR_OR_ZERO == err);
1473
0
    }
1474
#elif defined(MHD_SOCKETS_KIND_WINSOCK)
1475
    err = WSAGetLastError ();
1476
    is_hard_error =
1477
      ((WSAENETDOWN == err) || (WSAEFAULT == err) || (WSAEINVAL == err));
1478
#endif
1479
0
    if (! is_ignored_error)
1480
0
    {
1481
0
      if (is_hard_error)
1482
0
      {
1483
0
        mhd_LOG_MSG (d, MHD_SC_POLL_HARD_ERROR, \
1484
0
                     "The poll() encountered unrecoverable error.");
1485
0
        return false;
1486
0
      }
1487
0
      mhd_LOG_MSG (d, MHD_SC_POLL_SOFT_ERROR, \
1488
0
                   "The poll() encountered error.");
1489
0
    }
1490
0
    return true;
1491
0
  }
1492
1493
0
  return poll_update_statuses_from_fds (d, num_events);
1494
0
}
1495
1496
1497
#endif /* MHD_SUPPORT_POLL */
1498
1499
#ifdef MHD_SUPPORT_EPOLL
1500
1501
/**
1502
 * Map events provided by epoll to connection states, ITC and
1503
 * listen socket states
1504
 */
1505
static MHD_FN_PAR_NONNULL_ (1) bool
1506
update_statuses_from_eevents (struct MHD_Daemon *restrict d,
1507
                              unsigned int num_events)
1508
0
{
1509
0
  unsigned int i;
1510
0
  struct epoll_event *const restrict events =
1511
0
    d->events.data.epoll.events;
1512
0
  for (i = 0; num_events > i; ++i)
1513
0
  {
1514
0
    struct epoll_event *const e = events + i;
1515
0
#ifdef MHD_SUPPORT_THREADS
1516
0
    if (((uint64_t) mhd_SOCKET_REL_MARKER_ITC) == e->data.u64) /* uint64_t is in the system header */
1517
0
    {
1518
0
      mhd_assert (mhd_ITC_IS_VALID (d->threading.itc));
1519
0
      dbg_print_fd_state_update ( \
1520
0
        "ITC", \
1521
0
        mhd_itc_r_fd (d->threading.itc), \
1522
0
        0 != (e->events & EPOLLIN), \
1523
0
        0 != (e->events & EPOLLOUT), \
1524
0
        0 != (e->events & (EPOLLPRI | EPOLLERR | EPOLLHUP)));
1525
1526
0
      if (0 != (e->events & (EPOLLPRI | EPOLLERR | EPOLLHUP)))
1527
0
      {
1528
0
        log_itc_broken (d);
1529
        /* ITC is broken, need to stop the daemon thread now as otherwise
1530
           application will not be able to stop the thread. */
1531
0
        return false;
1532
0
      }
1533
0
      if (0 != (e->events & EPOLLIN))
1534
0
      {
1535
        /* Clear ITC here, before other data processing.
1536
         * Any external events will activate ITC again if additional data to
1537
         * process is added externally. Clearing ITC early ensures that new data
1538
         * (with additional ITC activation) will not be missed. */
1539
0
        mhd_itc_clear (d->threading.itc);
1540
0
      }
1541
0
    }
1542
0
    else
1543
0
#endif /* MHD_SUPPORT_THREADS */
1544
0
    if (((uint64_t) mhd_SOCKET_REL_MARKER_LISTEN) == e->data.u64) /* uint64_t is in the system header */
1545
0
    {
1546
0
      mhd_assert (MHD_INVALID_SOCKET != d->net.listen.fd);
1547
0
      dbg_print_fd_state_update ( \
1548
0
        "lstn", \
1549
0
        d->net.listen.fd, \
1550
0
        0 != (e->events & EPOLLIN), \
1551
0
        0 != (e->events & EPOLLOUT), \
1552
0
        0 != (e->events & (EPOLLPRI | EPOLLERR | EPOLLHUP)));
1553
0
      if (0 != (e->events & (EPOLLPRI | EPOLLERR | EPOLLHUP)))
1554
0
      {
1555
0
        log_listen_broken (d);
1556
1557
        /* Close the listening socket unless the master daemon should close it */
1558
0
        if (! mhd_D_HAS_MASTER (d))
1559
0
          mhd_socket_close (d->net.listen.fd);
1560
0
        else
1561
0
        {
1562
          /* Ignore possible error as the socket could be already removed
1563
             from the epoll monitoring by closing the socket */
1564
0
          (void) epoll_ctl (d->events.data.epoll.e_fd,
1565
0
                            EPOLL_CTL_DEL,
1566
0
                            d->net.listen.fd,
1567
0
                            NULL);
1568
0
        }
1569
1570
0
        d->events.accept_pending = false;
1571
0
        d->net.listen.is_broken = true;
1572
0
        d->net.listen.fd = MHD_INVALID_SOCKET;
1573
0
      }
1574
0
      else
1575
0
        d->events.accept_pending = (0 != (e->events & EPOLLIN));
1576
0
    }
1577
0
    else
1578
0
    {
1579
0
      bool recv_ready;
1580
0
      bool send_ready;
1581
0
      bool err_state;
1582
0
      struct MHD_Connection *const restrict c =
1583
0
        (struct MHD_Connection *) e->data.ptr;
1584
0
      mhd_assert (! is_conn_excluded_from_http_comm (c));
1585
0
      recv_ready = (0 != (e->events & (EPOLLIN | EPOLLERR | EPOLLHUP)));
1586
0
      send_ready = (0 != (e->events & (EPOLLOUT | EPOLLERR | EPOLLHUP)));
1587
0
      err_state = (0 != (e->events & (EPOLLERR | EPOLLHUP)));
1588
1589
0
      update_conn_net_status (d, c, recv_ready, send_ready, err_state);
1590
0
    }
1591
0
  }
1592
0
  return true;
1593
0
}
1594
1595
1596
/**
1597
 * Update states of all connections, check for connection pending
1598
 * to be accept()'ed, check for the events on ITC.
1599
 */
1600
static MHD_FN_PAR_NONNULL_ (1) bool
1601
get_all_net_updates_by_epoll (struct MHD_Daemon *restrict d)
1602
0
{
1603
0
  int max_events;
1604
0
  int num_events;
1605
0
  unsigned int events_processed;
1606
0
  int max_wait;
1607
0
  mhd_assert (mhd_POLL_TYPE_EPOLL == d->events.poll_type);
1608
0
  mhd_assert (0 < ((int) d->events.data.epoll.num_elements));
1609
0
  mhd_assert (d->events.data.epoll.num_elements == \
1610
0
              (size_t) ((int) d->events.data.epoll.num_elements));
1611
0
  mhd_assert (0 != d->events.data.epoll.num_elements);
1612
0
  mhd_assert (0 != d->conns.cfg.count_limit);
1613
0
  mhd_assert (d->events.data.epoll.num_elements == d->dbg.num_events_elements);
1614
1615
  // TODO: add listen socket enable/disable
1616
1617
  /* Minimise amount of data passed from userspace to kernel and back */
1618
0
  max_events = (int) d->conns.cfg.count_limit;
1619
0
#ifdef MHD_SUPPORT_THREADS
1620
0
  ++max_events;
1621
0
#endif /* MHD_SUPPORT_THREADS */
1622
0
  if (MHD_INVALID_SOCKET != d->net.listen.fd)
1623
0
    ++max_events;
1624
  /* Make sure that one extra slot used to clearly detect that all events
1625
   * were gotten. */
1626
0
  ++max_events;
1627
0
  if ((0 > max_events) ||
1628
0
      (max_events > (int) d->events.data.epoll.num_elements))
1629
0
    max_events = (int) d->events.data.epoll.num_elements;
1630
1631
0
  events_processed = 0;
1632
0
  max_wait = get_max_wait (d); // TODO: use correct timeout value
1633
0
  do
1634
0
  {
1635
#ifdef mhd_DEBUG_POLLING_FDS
1636
    fprintf (stderr,
1637
             "### (Starting) epoll_wait(%d, events, %d, %d)...\n",
1638
             d->events.data.epoll.e_fd,
1639
             (int) d->events.data.epoll.num_elements,
1640
             max_wait);
1641
#endif /* mhd_DEBUG_POLLING_FDS */
1642
0
    num_events = epoll_wait (d->events.data.epoll.e_fd,
1643
0
                             d->events.data.epoll.events,
1644
0
                             max_events,
1645
0
                             max_wait);
1646
#ifdef mhd_DEBUG_POLLING_FDS
1647
    fprintf (stderr,
1648
             "### (Finished) epoll_wait(%d, events, %d, %d) -> %d\n",
1649
             d->events.data.epoll.e_fd,
1650
             max_events,
1651
             max_wait,
1652
             num_events);
1653
#endif /* mhd_DEBUG_POLLING_FDS */
1654
0
    max_wait = 0;
1655
0
    if (0 > num_events)
1656
0
    {
1657
0
      const int err = errno;
1658
0
      if (EINTR != err)
1659
0
      {
1660
0
        mhd_LOG_MSG (d, MHD_SC_EPOLL_HARD_ERROR, \
1661
0
                     "The epoll_wait() encountered unrecoverable error.");
1662
0
        return false;
1663
0
      }
1664
0
      return true; /* EINTR, try next time */
1665
0
    }
1666
0
    if (! update_statuses_from_eevents (d, (unsigned int) num_events))
1667
0
      return false;
1668
0
    if (max_events > num_events)
1669
0
      return true; /* All events have been read */
1670
1671
    /* Use all buffer for the next getting events round(s) */
1672
0
    max_events = (int) d->events.data.epoll.num_elements;
1673
0
    mhd_assert (0 < max_events);
1674
0
    mhd_assert (d->events.data.epoll.num_elements == (size_t) max_events);
1675
0
    max_wait = 0; /* Do not block on the next getting events rounds */
1676
1677
0
    events_processed += (unsigned int) num_events; /* Avoid reading too many events */
1678
0
  } while ((events_processed < d->conns.cfg.count_limit)
1679
0
           || (events_processed < d->conns.cfg.count_limit + 2));
1680
1681
0
  return true;
1682
0
}
1683
1684
1685
#endif /* MHD_SUPPORT_EPOLL */
1686
1687
1688
/**
1689
 * Perform one round of daemon connection and data processing.
1690
 *
1691
 * This function do the following:
1692
 * + poll all connections and daemon FDs (if internal polling is used);
1693
 * + resume connections pending to be resumed;
1694
 * + update connection statuses based on socket states (recv/send ready or
1695
 *   disconnect detection);
1696
 * + receive, send and/or parse connections data as needed, including call of
1697
 *   callbacks for processing requests and response generation;
1698
 * + close broken connections;
1699
 * + accept new connection (if needed);
1700
 * + cleanup closed "upgraded" connections.
1701
 * @param d the daemon to use
1702
 * @return 'true' on success,
1703
 *         'false' if daemon is broken
1704
 */
1705
static MHD_FN_PAR_NONNULL_ (1) bool
1706
process_all_events_and_data (struct MHD_Daemon *restrict d)
1707
0
{
1708
0
  switch (d->events.poll_type)
1709
0
  {
1710
0
  case mhd_POLL_TYPE_EXT:
1711
0
    mhd_assert (mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int));
1712
0
    if (! ext_events_process_net_updates_and_resume_conn (d))
1713
0
      return false;
1714
0
    break;
1715
0
#ifdef MHD_SUPPORT_SELECT
1716
0
  case mhd_POLL_TYPE_SELECT:
1717
0
    if (! get_all_net_updates_by_select_and_resume_conn (d, false))
1718
0
      return false;
1719
0
    break;
1720
0
#endif /* MHD_SUPPORT_SELECT */
1721
0
#ifdef MHD_SUPPORT_POLL
1722
0
  case mhd_POLL_TYPE_POLL:
1723
0
    if (! get_all_net_updates_by_poll (d, false))
1724
0
      return false;
1725
0
    daemon_resume_conns_if_needed (d);
1726
0
    break;
1727
0
#endif /* MHD_SUPPORT_POLL */
1728
0
#ifdef MHD_SUPPORT_EPOLL
1729
0
  case mhd_POLL_TYPE_EPOLL:
1730
0
    if (! get_all_net_updates_by_epoll (d))
1731
0
      return false;
1732
0
    daemon_resume_conns_if_needed (d);
1733
0
    break;
1734
0
#endif /* MHD_SUPPORT_EPOLL */
1735
#ifndef MHD_SUPPORT_SELECT
1736
  case mhd_POLL_TYPE_SELECT:
1737
#endif /* ! MHD_SUPPORT_SELECT */
1738
#ifndef MHD_SUPPORT_POLL
1739
  case mhd_POLL_TYPE_POLL:
1740
#endif /* ! MHD_SUPPORT_POLL */
1741
0
  case mhd_POLL_TYPE_NOT_SET_YET:
1742
0
  default:
1743
0
    mhd_UNREACHABLE ();
1744
0
    MHD_PANIC ("Daemon data integrity broken");
1745
0
    break;
1746
0
  }
1747
1748
0
  if (d->events.accept_pending && ! d->conns.block_new)
1749
0
    d->events.accept_pending = ! daemon_accept_new_conns (d);
1750
1751
0
  daemon_process_all_active_conns (d);
1752
0
  daemon_cleanup_upgraded_conns (d);
1753
0
  return ! mhd_D_HAS_STOP_REQ (d);
1754
0
}
1755
1756
1757
static
1758
MHD_FN_PAR_NONNULL_ (1) enum MHD_StatusCode
1759
process_reg_events_int (struct MHD_Daemon *MHD_RESTRICT daemon,
1760
                        uint_fast64_t *MHD_RESTRICT next_max_wait)
1761
0
{
1762
0
  enum MHD_StatusCode res;
1763
1764
0
  if (mhd_DAEMON_STATE_STARTED > daemon->state)
1765
0
    return MHD_SC_TOO_EARLY;
1766
0
  if (! mhd_WM_INT_HAS_EXT_EVENTS (daemon->wmode_int))
1767
0
    return MHD_SC_EXTERNAL_EVENT_ONLY;
1768
0
  if (mhd_DAEMON_STATE_STARTED < daemon->state)
1769
0
    return MHD_SC_TOO_LATE;
1770
1771
0
#ifdef MHD_SUPPORT_THREADS
1772
0
  if (daemon->events.data.extr.itc_data.is_broken)
1773
0
    return MHD_SC_DAEMON_SYS_DATA_BROKEN;
1774
0
#endif /* MHD_SUPPORT_THREADS */
1775
1776
0
  if (daemon->net.listen.is_broken)
1777
0
    return MHD_SC_DAEMON_SYS_DATA_BROKEN;
1778
1779
  /* Ignore returned value */
1780
0
  (void) process_all_events_and_data (daemon);
1781
1782
0
  if (NULL != next_max_wait)
1783
0
    *next_max_wait = MHD_WAIT_INDEFINITELY;
1784
1785
0
  res = ext_events_update_registrations (daemon);
1786
0
  if (MHD_SC_OK != res)
1787
0
    return res;
1788
1789
0
#ifdef MHD_SUPPORT_THREADS
1790
0
  if (daemon->events.data.extr.itc_data.is_broken)
1791
0
  {
1792
0
    log_itc_broken (daemon);
1793
0
    return MHD_SC_DAEMON_SYS_DATA_BROKEN;
1794
0
  }
1795
0
#endif /* MHD_SUPPORT_THREADS */
1796
1797
0
  if (daemon->net.listen.is_broken)
1798
0
  {
1799
0
    log_listen_broken (daemon);
1800
0
    return MHD_SC_DAEMON_SYS_DATA_BROKEN;
1801
0
  }
1802
1803
0
  if (NULL != next_max_wait)
1804
0
    *next_max_wait = mhd_daemon_get_wait_max (daemon);
1805
1806
0
  return MHD_SC_OK;
1807
0
}
1808
1809
1810
MHD_EXTERN_
1811
MHD_FN_PAR_NONNULL_ (1) enum MHD_StatusCode
1812
MHD_daemon_process_reg_events (struct MHD_Daemon *MHD_RESTRICT daemon,
1813
                               uint_fast64_t *MHD_RESTRICT next_max_wait)
1814
0
{
1815
0
  enum MHD_StatusCode res;
1816
#ifdef mhd_DEBUG_POLLING_FDS
1817
  fprintf (stderr,
1818
           "### (Starting) MHD_daemon_process_reg_events(daemon, [%s])...\n",
1819
           (NULL != next_max_wait) ? "non-NULL" : "NULL");
1820
#endif
1821
0
  res = process_reg_events_int (daemon,
1822
0
                                next_max_wait);
1823
#ifdef mhd_DEBUG_POLLING_FDS
1824
  if (NULL == next_max_wait)
1825
    fprintf (stderr,
1826
             "### (Finished) MHD_daemon_process_reg_events(daemon, [NULL]) ->"
1827
             "%u\n",
1828
             (unsigned int) res);
1829
  else if (MHD_WAIT_INDEFINITELY == *next_max_wait)
1830
    fprintf (stderr,
1831
             "### (Finished) MHD_daemon_process_reg_events(daemon, "
1832
             "->MHD_WAIT_INDEFINITELY) ->%u\n",
1833
             (unsigned int) res);
1834
  else
1835
    fprintf (stderr,
1836
             "### (Finished) MHD_daemon_process_reg_events(daemon, ->%llu) "
1837
             "->%u\n",
1838
             (unsigned long long) *next_max_wait,
1839
             (unsigned int) res);
1840
#endif
1841
0
  return res;
1842
0
}
1843
1844
1845
#ifdef MHD_SUPPORT_THREADS
1846
1847
/**
1848
 * The entry point for the daemon worker thread
1849
 * @param cls the closure
1850
 */
1851
mhd_THRD_RTRN_TYPE mhd_THRD_CALL_SPEC
1852
mhd_worker_all_events (void *cls)
1853
0
{
1854
0
  struct MHD_Daemon *const restrict d = (struct MHD_Daemon *) cls;
1855
0
  mhd_thread_handle_ID_set_current_thread_ID (&(d->threading.tid));
1856
0
  mhd_assert (d->dbg.net_inited);
1857
0
  mhd_assert (! d->dbg.net_deinited);
1858
0
  mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type));
1859
0
  mhd_assert (mhd_D_TYPE_HAS_EVENTS_PROCESSING (d->threading.d_type));
1860
0
  mhd_assert (mhd_DAEMON_TYPE_LISTEN_ONLY != d->threading.d_type);
1861
0
  mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type));
1862
0
  mhd_assert (mhd_WM_INT_INTERNAL_EVENTS_THREAD_PER_CONNECTION != d->wmode_int);
1863
0
  mhd_assert (d->dbg.events_fully_inited);
1864
0
  mhd_assert (d->dbg.connections_inited);
1865
1866
0
#ifdef mhd_HAVE_MHD_THREAD_BLOCK_SIGPIPE
1867
  // TODO: store and use the result
1868
0
  (void) mhd_thread_block_sigpipe ();
1869
0
#endif
1870
1871
0
  while (! d->threading.stop_requested)
1872
0
  {
1873
0
    if (! process_all_events_and_data (d))
1874
0
      break;
1875
0
  }
1876
0
  if (! d->threading.stop_requested)
1877
0
  {
1878
0
    mhd_LOG_MSG (d, MHD_SC_DAEMON_THREAD_STOP_UNEXPECTED, \
1879
0
                 "The daemon thread is stopping, but termination has not " \
1880
0
                 "been requested for the daemon.");
1881
0
  }
1882
0
  mhd_daemon_close_all_conns (d);
1883
1884
0
  return (mhd_THRD_RTRN_TYPE) 0;
1885
0
}
1886
1887
1888
static MHD_FN_PAR_NONNULL_ (1) bool
1889
process_listening_and_itc_only (struct MHD_Daemon *restrict d)
1890
0
{
1891
0
  if (false)
1892
0
    (void) 0;
1893
0
#ifdef MHD_SUPPORT_SELECT
1894
0
  else if (mhd_POLL_TYPE_SELECT == d->events.poll_type)
1895
0
  {
1896
0
    return false; // TODO: implement
1897
0
  }
1898
0
#endif /* MHD_SUPPORT_SELECT */
1899
0
#ifdef MHD_SUPPORT_POLL
1900
0
  else if (mhd_POLL_TYPE_POLL == d->events.poll_type)
1901
0
  {
1902
0
    if (! get_all_net_updates_by_poll (d, true))
1903
0
      return false;
1904
0
  }
1905
0
#endif /* MHD_SUPPORT_POLL */
1906
0
  else
1907
0
  {
1908
0
    (void) d; /* Mute compiler warning */
1909
0
    mhd_assert (0 && "Impossible value");
1910
0
    mhd_UNREACHABLE ();
1911
0
    MHD_PANIC ("Daemon data integrity broken");
1912
0
  }
1913
  // TODO: Accept connections
1914
0
  return false;
1915
0
}
1916
1917
1918
/**
1919
 * The entry point for the daemon listening thread
1920
 * @param cls the closure
1921
 */
1922
mhd_THRD_RTRN_TYPE mhd_THRD_CALL_SPEC
1923
mhd_worker_listening_only (void *cls)
1924
0
{
1925
0
  struct MHD_Daemon *const restrict d = (struct MHD_Daemon *) cls;
1926
0
  mhd_thread_handle_ID_set_current_thread_ID (&(d->threading.tid));
1927
1928
0
  mhd_assert (d->dbg.net_inited);
1929
0
  mhd_assert (! d->dbg.net_deinited);
1930
0
  mhd_assert (mhd_DAEMON_TYPE_LISTEN_ONLY == d->threading.d_type);
1931
0
  mhd_assert (mhd_WM_INT_INTERNAL_EVENTS_THREAD_PER_CONNECTION == d->wmode_int);
1932
0
  mhd_assert (d->dbg.events_fully_inited);
1933
0
  mhd_assert (d->dbg.connections_inited);
1934
1935
0
#ifdef mhd_HAVE_MHD_THREAD_BLOCK_SIGPIPE
1936
  // TODO: store and use the result
1937
0
  (void) mhd_thread_block_sigpipe ();
1938
0
#endif
1939
1940
0
  while (! d->threading.stop_requested)
1941
0
  {
1942
0
    if (! process_listening_and_itc_only (d))
1943
0
      break;
1944
0
  }
1945
0
  if (! d->threading.stop_requested)
1946
0
  {
1947
0
    mhd_LOG_MSG (d, MHD_SC_DAEMON_THREAD_STOP_UNEXPECTED, \
1948
0
                 "The daemon thread is stopping, but termination has " \
1949
0
                 "not been requested by the daemon.");
1950
0
  }
1951
0
  return (mhd_THRD_RTRN_TYPE) 0;
1952
0
}
1953
1954
1955
mhd_THRD_RTRN_TYPE mhd_THRD_CALL_SPEC
1956
mhd_worker_connection (void *cls)
1957
0
{
1958
0
  if (cls) // TODO: Implement
1959
0
    MHD_PANIC ("Not yet implemented");
1960
0
  return (mhd_THRD_RTRN_TYPE) 0;
1961
0
}
1962
1963
1964
#endif /* MHD_SUPPORT_THREADS */