Coverage Report

Created: 2025-11-24 06:35

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/libwebsockets/lib/plat/unix/unix-service.c
Line
Count
Source
1
/*
2
 * libwebsockets - small server side websockets and web server implementation
3
 *
4
 * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to
8
 * deal in the Software without restriction, including without limitation the
9
 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10
 * sell copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22
 * IN THE SOFTWARE.
23
 */
24
25
#if !defined(_GNU_SOURCE)
26
#define _GNU_SOURCE
27
#endif
28
#include "private-lib-core.h"
29
30
int
31
lws_poll_listen_fd(struct lws_pollfd *fd)
32
0
{
33
0
  return poll(fd, 1, 0);
34
0
}
35
36
int
37
_lws_plat_service_forced_tsi(struct lws_context *context, int tsi)
38
0
{
39
0
  struct lws_context_per_thread *pt = &context->pt[tsi];
40
0
  int m, n, r;
41
42
0
  r = lws_service_flag_pending(context, tsi);
43
44
  /* any socket with events to service? */
45
0
  for (n = 0; n < (int)pt->fds_count; n++) {
46
0
    lws_sockfd_type fd = pt->fds[n].fd;
47
48
0
    if (!pt->fds[n].revents)
49
0
      continue;
50
51
0
    m = lws_service_fd_tsi(context, &pt->fds[n], tsi);
52
0
    if (m < 0) {
53
0
      lwsl_err("%s: lws_service_fd_tsi returned %d\n",
54
0
         __func__, m);
55
0
      return -1;
56
0
    }
57
58
    /* if something closed, retry this slot since may have been
59
     * swapped with end fd */
60
0
    if (m && pt->fds[n].fd != fd)
61
0
      n--;
62
0
  }
63
64
0
  lws_service_do_ripe_rxflow(pt);
65
66
0
  return r;
67
0
}
68
69
0
#define LWS_POLL_WAIT_LIMIT 2000000000
70
71
int
72
_lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
73
0
{
74
0
  volatile struct lws_foreign_thread_pollfd *ftp, *next;
75
0
  volatile struct lws_context_per_thread *vpt;
76
0
  struct lws_context_per_thread *pt;
77
0
  lws_usec_t timeout_us, us;
78
#if defined(LWS_WITH_WAKE_LOGGING)
79
  unsigned int u;
80
  char hu[25];
81
  lws_usec_t a1;
82
#endif
83
#if defined(LWS_WITH_SYS_METRICS) || defined(LWS_WITH_WAKE_LOGGING)
84
  lws_usec_t a, b = 0;
85
#endif
86
0
  int n;
87
0
#if (defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)) || defined(LWS_WITH_TLS)
88
0
  int m;
89
0
#endif
90
91
  /* stay dead once we are dead */
92
93
0
  if (!context)
94
0
    return 1;
95
96
#if defined(LWS_WITH_SYS_METRICS)
97
  b =
98
#endif
99
0
      us = lws_now_usecs();
100
101
0
  pt = &context->pt[tsi];
102
0
  vpt = (volatile struct lws_context_per_thread *)pt;
103
104
0
  if (timeout_ms < 0)
105
0
    timeout_ms = 0;
106
0
  else
107
    /* force a default timeout of 23 days */
108
0
    timeout_ms = LWS_POLL_WAIT_LIMIT;
109
0
  timeout_us = ((lws_usec_t)timeout_ms) * LWS_US_PER_MS;
110
111
0
  if (context->event_loop_ops->run_pt)
112
0
    context->event_loop_ops->run_pt(context, tsi);
113
114
0
  if (!pt->service_tid_detected && context->vhost_list) {
115
0
    lws_fakewsi_def_plwsa(pt);
116
117
0
    lws_fakewsi_prep_plwsa_ctx(context);
118
119
0
    pt->service_tid = context->vhost_list->protocols[0].callback(
120
0
          (struct lws *)plwsa,
121
0
          LWS_CALLBACK_GET_THREAD_ID,
122
0
          context->vhost_list->protocols[0].user,
123
0
          NULL, 0);
124
0
    pt->service_tid_detected = 1;
125
0
  }
126
127
0
  lws_pt_lock(pt, __func__);
128
  /*
129
   * service ripe scheduled events, and limit wait to next expected one
130
   */
131
0
  us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS, us);
132
0
  if (us && us < timeout_us)
133
    /*
134
     * If something wants zero wait, that's OK, but if the next sul
135
     * coming ripe is an interval less than our wait resolution,
136
     * bump it to be the wait resolution.
137
     */
138
0
    timeout_us = us < context->us_wait_resolution ?
139
0
          context->us_wait_resolution : us;
140
141
0
  lws_pt_unlock(pt);
142
143
  /*
144
   * is there anybody with pending stuff that needs service forcing?
145
   */
146
0
  if (!lws_service_adjust_timeout(context, 1, tsi))
147
0
    timeout_us = 0;
148
149
  /* ensure we don't wrap at 2^31 with poll()'s signed int ms */
150
151
0
  timeout_us /= LWS_US_PER_MS; /* ms now */
152
153
#if defined(LWS_WITH_SYS_METRICS) || defined(LWS_WITH_WAKE_LOGGING)
154
  a = lws_now_usecs() - b;
155
#endif
156
#if defined(LWS_WITH_WAKE_LOGGING)
157
  a1 = lws_now_usecs();
158
  lws_humanize(hu, sizeof(hu), (uint64_t)(timeout_us * LWS_US_PER_MS), humanize_schema_us);
159
  lwsl_cx_notice(context, "event loop: entering sleep... scheduled wake after %s", hu);
160
#endif
161
0
  vpt->inside_poll = 1;
162
0
  lws_memory_barrier();
163
0
  n = poll(pt->fds, pt->fds_count, (int)timeout_us /* ms now */ );
164
0
  vpt->inside_poll = 0;
165
0
  lws_memory_barrier();
166
167
#if defined(LWS_WITH_SYS_METRICS) || defined(LWS_WITH_WAKE_LOGGING)
168
  b = lws_now_usecs();
169
#endif
170
#if defined(LWS_WITH_WAKE_LOGGING)
171
  lws_humanize(hu, sizeof(hu), (uint64_t)(b - a1), humanize_schema_us);
172
  lwsl_cx_notice(context, "event loop: WOKE after %s, %d fds ready", hu, n);
173
  for (u = 0; u < pt->fds_count; u++) {
174
    struct lws *wsi;
175
    struct lws_pollfd *pfd = &vpt->fds[u];
176
177
    if (lws_socket_is_valid(pfd->fd) &&
178
        (pfd->revents & (POLLIN | POLLOUT | POLLERR))) {
179
      wsi = wsi_from_fd(context, pfd->fd);
180
#if defined(LWS_WITH_SECURE_STREAMS)
181
      if (wsi->for_ss && wsi->a.opaque_user_data) {
182
        lws_ss_handle_t *fih = (lws_ss_handle_t *)wsi->a.opaque_user_data;
183
184
        lwsl_ss_notice(fih, "    ready fd %d, %s %s %s, SS policy %s", pfd->fd,
185
          pfd->revents & POLLIN ? "POLLIN" : "",
186
          pfd->revents & POLLOUT ? "POLLOUT" : "",
187
          pfd->revents & POLLERR ? "POLLERR": "",
188
          fih->policy ? fih->policy->streamtype : "(null)");
189
      } else
190
#endif
191
      lwsl_wsi_notice(wsi, "    ready fd %d, %s %s %s, protocol %s", pfd->fd,
192
          pfd->revents & POLLIN ? "POLLIN" : "",
193
          pfd->revents & POLLOUT ? "POLLOUT" : "",
194
          pfd->revents & POLLERR ? "POLLERR": "",
195
          wsi->a.protocol ? wsi->a.protocol->name : "(null)");
196
    }
197
  }
198
#endif
199
200
  /* Collision will be rare and brief.  Spin until it completes */
201
0
  while (vpt->foreign_spinlock)
202
0
    ;
203
204
  /*
205
   * At this point we are not inside a foreign thread pollfd
206
   * change, and we have marked ourselves as outside the poll()
207
   * wait.  So we are the only guys that can modify the
208
   * lws_foreign_thread_pollfd list on the pt.  Drain the list
209
   * and apply the changes to the affected pollfds in the correct
210
   * order.
211
   */
212
213
0
  lws_pt_lock(pt, __func__);
214
215
0
  ftp = vpt->foreign_pfd_list;
216
  //lwsl_notice("cleared list %p\n", ftp);
217
0
  while (ftp) {
218
0
    struct lws *wsi;
219
0
    struct lws_pollfd *pfd;
220
221
0
    next = ftp->next;
222
0
    pfd = &vpt->fds[ftp->fd_index];
223
0
    if (lws_socket_is_valid(pfd->fd)) {
224
0
      wsi = wsi_from_fd(context, pfd->fd);
225
0
      if (wsi)
226
0
        __lws_change_pollfd(wsi, ftp->_and,
227
0
                ftp->_or);
228
0
    }
229
#if defined(LWS_WITH_WAKE_LOGGING)
230
    else
231
      lwsl_cx_notice(context, "*** WOKE on Invalid fd in foreign pfd list");
232
#endif
233
0
    lws_free((void *)ftp);
234
0
    ftp = next;
235
0
  }
236
0
  vpt->foreign_pfd_list = NULL;
237
0
  lws_memory_barrier();
238
239
0
  lws_pt_unlock(pt);
240
241
0
#if (defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)) || defined(LWS_WITH_TLS)
242
0
  m = 0;
243
0
#endif
244
#if defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)
245
  m |= !!pt->ws.rx_draining_ext_list;
246
#endif
247
248
0
#if defined(LWS_WITH_TLS)
249
0
  if (pt->context->tls_ops &&
250
0
      pt->context->tls_ops->fake_POLLIN_for_buffered)
251
0
    m |= pt->context->tls_ops->fake_POLLIN_for_buffered(pt);
252
0
#endif
253
254
0
  if (
255
0
#if (defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)) || defined(LWS_WITH_TLS)
256
0
    !m &&
257
0
#endif
258
0
    !n) /* nothing to do */
259
0
    lws_service_do_ripe_rxflow(pt);
260
0
  else
261
0
    if (_lws_plat_service_forced_tsi(context, tsi) < 0)
262
0
      return -1;
263
264
#if defined(LWS_WITH_SYS_METRICS)
265
  lws_metric_event(context->mt_service, METRES_GO,
266
       (u_mt_t) (a + (lws_now_usecs() - b)));
267
#endif
268
269
0
  if (pt->destroy_self) {
270
0
    lws_context_destroy(pt->context);
271
0
    return -1;
272
0
  }
273
274
0
  return 0;
275
0
}
276
277
int
278
lws_plat_service(struct lws_context *context, int timeout_ms)
279
0
{
280
0
  return _lws_plat_service_tsi(context, timeout_ms, 0);
281
0
}