Coverage Report

Created: 2025-09-05 06:26

/src/libwebsockets/lib/core-net/pollfd.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * libwebsockets - small server side websockets and web server implementation
3
 *
4
 * Copyright (C) 2010 - 2020 Andy Green <andy@warmcat.com>
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to
8
 * deal in the Software without restriction, including without limitation the
9
 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10
 * sell copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22
 * IN THE SOFTWARE.
23
 */
24
25
#include "private-lib-core.h"
26
27
int
28
_lws_change_pollfd(struct lws *wsi, int _and, int _or, struct lws_pollargs *pa)
29
0
{
30
0
#if !defined(LWS_WITH_EVENT_LIBS) && !defined(LWS_PLAT_FREERTOS) && \
31
0
    !defined(WIN32) && !defined(_WIN32)
32
0
  volatile struct lws_context_per_thread *vpt;
33
0
#endif
34
0
  struct lws_context_per_thread *pt;
35
0
  struct lws_context *context;
36
0
  int ret = 0, pa_events;
37
0
  struct lws_pollfd *pfd;
38
0
  int sampled_tid, tid;
39
40
0
  if (!wsi)
41
0
    return 0;
42
43
0
  assert(wsi->position_in_fds_table == LWS_NO_FDS_POS ||
44
0
         wsi->position_in_fds_table >= 0);
45
46
0
  if (wsi->position_in_fds_table == LWS_NO_FDS_POS)
47
0
    return 0;
48
49
0
  if (((volatile struct lws *)wsi)->handling_pollout &&
50
0
      !_and && _or == LWS_POLLOUT) {
51
    /*
52
     * Happening alongside service thread handling POLLOUT.
53
     * The danger is when he is finished, he will disable POLLOUT,
54
     * countermanding what we changed here.
55
     *
56
     * Instead of changing the fds, inform the service thread
57
     * what happened, and ask it to leave POLLOUT active on exit
58
     */
59
0
    ((volatile struct lws *)wsi)->leave_pollout_active = 1;
60
    /*
61
     * by definition service thread is not in poll wait, so no need
62
     * to cancel service
63
     */
64
65
0
    lwsl_wsi_debug(wsi, "using leave_pollout_active");
66
67
0
    return 0;
68
0
  }
69
70
0
  context = wsi->a.context;
71
0
  pt = &context->pt[(int)wsi->tsi];
72
73
0
#if !defined(LWS_WITH_EVENT_LIBS) && !defined(LWS_PLAT_FREERTOS) && \
74
0
    !defined(WIN32) && !defined(_WIN32)
75
  /*
76
   * This only applies when we use the default poll() event loop.
77
   *
78
   * BSD can revert pa->events at any time, when the kernel decides to
79
   * exit from poll().  We can't protect against it using locking.
80
   *
81
   * Therefore we must check first if the service thread is in poll()
82
   * wait; if so, we know we must be being called from a foreign thread,
83
   * and we must keep a strictly ordered list of changes we made instead
84
   * of trying to apply them, since when poll() exits, which may happen
85
   * at any time it would revert our changes.
86
   *
87
   * The plat code will apply them when it leaves the poll() wait
88
   * before doing anything else.
89
   */
90
91
0
  vpt = (volatile struct lws_context_per_thread *)pt;
92
93
0
  vpt->foreign_spinlock = 1;
94
0
  lws_memory_barrier();
95
96
0
  if (vpt->inside_poll) {
97
0
    struct lws_foreign_thread_pollfd *ftp, **ftp1;
98
    /*
99
     * We are certainly a foreign thread trying to change events
100
     * while the service thread is in the poll() wait.
101
     *
102
     * Create a list of changes to be applied after poll() exit,
103
     * instead of trying to apply them now.
104
     */
105
0
    ftp = lws_malloc(sizeof(*ftp), "ftp");
106
0
    if (!ftp) {
107
0
      vpt->foreign_spinlock = 0;
108
0
      lws_memory_barrier();
109
0
      ret = -1;
110
0
      goto bail;
111
0
    }
112
113
0
    ftp->_and = _and;
114
0
    ftp->_or = _or;
115
0
    ftp->next = NULL;
116
117
0
    lws_pt_lock(pt, __func__);
118
0
    assert(wsi->position_in_fds_table < (int)pt->fds_count);
119
0
    ftp->fd_index = wsi->position_in_fds_table;
120
121
    /* place at END of list to maintain order */
122
0
    ftp1 = (struct lws_foreign_thread_pollfd **)
123
0
            &vpt->foreign_pfd_list;
124
0
    while (*ftp1)
125
0
      ftp1 = &((*ftp1)->next);
126
127
0
    *ftp1 = ftp;
128
0
    vpt->foreign_spinlock = 0;
129
0
    lws_memory_barrier();
130
131
0
    lws_pt_unlock(pt);
132
133
0
    lws_cancel_service_pt(wsi);
134
135
0
    return 0;
136
0
  }
137
138
0
  vpt->foreign_spinlock = 0;
139
0
  lws_memory_barrier();
140
0
#endif
141
142
#if !defined(__linux__) && !defined(WIN32)
143
  /* OSX couldn't see close on stdin pipe side otherwise; WSAPOLL
144
   * blows up if we give it POLLHUP
145
   */
146
  _or |= LWS_POLLHUP;
147
#endif
148
0
  lws_pt_lock(pt, __func__);
149
0
  assert(wsi->position_in_fds_table < (int)pt->fds_count);
150
0
  pfd = &pt->fds[wsi->position_in_fds_table];
151
0
  pa->prev_events = pfd->events;
152
0
  pa->events = pfd->events = (short)((pfd->events & ~_and) | _or);
153
0
  lws_pt_unlock(pt);
154
155
0
  pa->fd = wsi->desc.sockfd;
156
0
  lwsl_wsi_debug(wsi, "fd %d events %d -> %d", pa->fd, pa->prev_events,
157
0
    pa->events);
158
159
0
  if (wsi->mux_substream)
160
0
    return 0;
161
162
#if defined(LWS_WITH_EXTERNAL_POLL)
163
164
  if (wsi->a.vhost &&
165
      wsi->a.vhost->protocols[0].callback(wsi,
166
                        LWS_CALLBACK_CHANGE_MODE_POLL_FD,
167
                wsi->user_space, (void *)pa, 0)) {
168
    ret = -1;
169
    goto bail;
170
  }
171
#endif
172
173
0
  if (context->event_loop_ops->io) {
174
0
    if (_and & LWS_POLLIN)
175
0
      context->event_loop_ops->io(wsi,
176
0
          LWS_EV_STOP | LWS_EV_READ);
177
178
0
    if (_or & LWS_POLLIN)
179
0
      context->event_loop_ops->io(wsi,
180
0
          LWS_EV_START | LWS_EV_READ);
181
182
0
    if (_and & LWS_POLLOUT)
183
0
      context->event_loop_ops->io(wsi,
184
0
          LWS_EV_STOP | LWS_EV_WRITE);
185
186
0
    if (_or & LWS_POLLOUT)
187
0
      context->event_loop_ops->io(wsi,
188
0
          LWS_EV_START | LWS_EV_WRITE);
189
0
  }
190
191
  /*
192
   * if we changed something in this pollfd...
193
   *   ... and we're running in a different thread context
194
   *     than the service thread...
195
   *       ... and the service thread is waiting ...
196
   *         then cancel it to force a restart with our changed events
197
   */
198
0
  pa_events = pa->prev_events != pa->events;
199
0
  pfd->events = (short)pa->events;
200
201
0
  if (pa_events) {
202
0
    if (lws_plat_change_pollfd(context, wsi, pfd)) {
203
0
      lwsl_wsi_info(wsi, "failed");
204
0
      ret = -1;
205
0
      goto bail;
206
0
    }
207
0
    sampled_tid = pt->service_tid;
208
0
    if (sampled_tid && wsi->a.vhost) {
209
0
      tid = wsi->a.vhost->protocols[0].callback(wsi,
210
0
             LWS_CALLBACK_GET_THREAD_ID, NULL, NULL, 0);
211
0
      if (tid == -1) {
212
0
        ret = -1;
213
0
        goto bail;
214
0
      }
215
0
      if (tid != sampled_tid)
216
0
        lws_cancel_service_pt(wsi);
217
0
    }
218
0
  }
219
220
0
bail:
221
0
  return ret;
222
0
}
223
224
#if defined(LWS_WITH_SERVER)
225
/*
226
 * Enable or disable listen sockets on this pt globally...
227
 * it's modulated according to the pt having space for a new accept.
228
 */
229
static void
230
lws_accept_modulation(struct lws_context *context,
231
          struct lws_context_per_thread *pt, int allow)
232
0
{
233
0
  struct lws_vhost *vh = context->vhost_list;
234
0
  struct lws_pollargs pa1;
235
236
0
  while (vh) {
237
0
    lws_start_foreach_dll(struct lws_dll2 *, d,
238
0
              lws_dll2_get_head(&vh->listen_wsi)) {
239
0
      struct lws *wsi = lws_container_of(d, struct lws,
240
0
                 listen_list);
241
242
0
      _lws_change_pollfd(wsi, allow ? 0 : LWS_POLLIN,
243
0
            allow ? LWS_POLLIN : 0, &pa1);
244
0
    } lws_end_foreach_dll(d);
245
246
0
    vh = vh->vhost_next;
247
0
  }
248
0
}
249
#endif
250
251
#if _LWS_ENABLED_LOGS & LLL_WARN
252
void
253
__dump_fds(struct lws_context_per_thread *pt, const char *s)
254
0
{
255
0
  unsigned int n;
256
257
0
  lwsl_cx_warn(pt->context, "fds_count %u, %s", pt->fds_count, s);
258
259
0
  for (n = 0; n < pt->fds_count; n++) {
260
0
    struct lws *wsi = wsi_from_fd(pt->context, pt->fds[n].fd);
261
262
0
    lwsl_cx_warn(pt->context, "  %d: fd %d, wsi %s, pos_in_fds: %d",
263
0
      n + 1, pt->fds[n].fd, lws_wsi_tag(wsi),
264
0
      wsi ? wsi->position_in_fds_table : -1);
265
0
  }
266
0
}
267
#else
268
#define __dump_fds(x, y)
269
#endif
270
271
int
272
__insert_wsi_socket_into_fds(struct lws_context *context, struct lws *wsi)
273
0
{
274
#if defined(LWS_WITH_EXTERNAL_POLL)
275
  struct lws_pollargs pa = { wsi->desc.sockfd, LWS_POLLIN, 0 };
276
#endif
277
0
  struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
278
0
  int ret = 0;
279
280
//  __dump_fds(pt, "pre insert");
281
282
0
  lws_pt_assert_lock_held(pt);
283
284
0
  lwsl_wsi_debug(wsi, "tsi=%d, sock=%d, pos-in-fds=%d",
285
0
      wsi->tsi, wsi->desc.sockfd, pt->fds_count);
286
287
0
  if ((unsigned int)pt->fds_count >= context->fd_limit_per_thread) {
288
0
    lwsl_cx_err(context, "Too many fds (%d vs %d)", context->max_fds,
289
0
        context->fd_limit_per_thread);
290
0
    return 1;
291
0
  }
292
293
0
#if !defined(_WIN32)
294
0
  if (!wsi->a.context->max_fds_unrelated_to_ulimit &&
295
0
      wsi->desc.sockfd - lws_plat_socket_offset() >= (int)context->max_fds) {
296
0
    lwsl_cx_err(context, "Socket fd %d is too high (%d) offset %d",
297
0
       wsi->desc.sockfd, context->max_fds,
298
0
       lws_plat_socket_offset());
299
0
    return 1;
300
0
  }
301
0
#endif
302
303
0
  assert(wsi);
304
305
0
#if defined(LWS_WITH_NETLINK)
306
0
  assert(wsi->event_pipe || wsi->a.vhost || wsi == pt->context->netlink);
307
#else
308
  assert(wsi->event_pipe || wsi->a.vhost);
309
#endif
310
0
  assert(lws_socket_is_valid(wsi->desc.sockfd));
311
312
#if defined(LWS_WITH_EXTERNAL_POLL)
313
314
  if (wsi->a.vhost &&
315
      wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_LOCK_POLL,
316
             wsi->user_space, (void *) &pa, 1))
317
    return -1;
318
#endif
319
320
0
  if (insert_wsi(context, wsi))
321
0
    return -1;
322
0
  pt->count_conns++;
323
0
  wsi->position_in_fds_table = (int)pt->fds_count;
324
325
0
  pt->fds[wsi->position_in_fds_table].fd = wsi->desc.sockfd;
326
0
  pt->fds[wsi->position_in_fds_table].events = LWS_POLLIN;
327
#if defined(LWS_WITH_EXTERNAL_POLL)
328
  pa.events = pt->fds[pt->fds_count].events;
329
#endif
330
331
0
  lws_plat_insert_socket_into_fds(context, wsi);
332
333
#if defined(LWS_WITH_EXTERNAL_POLL)
334
335
  /* external POLL support via protocol 0 */
336
  if (wsi->a.vhost &&
337
      wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_ADD_POLL_FD,
338
             wsi->user_space, (void *) &pa, 0))
339
    ret =  -1;
340
#endif
341
0
#if defined(LWS_WITH_SERVER)
342
  /* if no more room, defeat accepts on this service thread */
343
0
  if ((unsigned int)pt->fds_count == context->fd_limit_per_thread - 1)
344
0
    lws_accept_modulation(context, pt, 0);
345
0
#endif
346
347
#if defined(LWS_WITH_EXTERNAL_POLL)
348
  if (wsi->a.vhost &&
349
      wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_UNLOCK_POLL,
350
             wsi->user_space, (void *)&pa, 1))
351
    ret = -1;
352
#endif
353
354
//  __dump_fds(pt, "post insert");
355
356
0
  return ret;
357
0
}
358
359
/* requires pt lock */
360
361
int
362
__remove_wsi_socket_from_fds(struct lws *wsi)
363
0
{
364
0
  struct lws_context *context = wsi->a.context;
365
#if defined(LWS_WITH_EXTERNAL_POLL)
366
  struct lws_pollargs pa = { wsi->desc.sockfd, 0, 0 };
367
#endif
368
0
  struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
369
0
  struct lws *end_wsi;
370
0
  int v, m, ret = 0;
371
372
0
  lws_pt_assert_lock_held(pt);
373
374
//  __dump_fds(pt, "pre remove");
375
376
0
#if !defined(_WIN32)
377
0
  if (!wsi->a.context->max_fds_unrelated_to_ulimit &&
378
0
      wsi->desc.sockfd - lws_plat_socket_offset() > (int)context->max_fds) {
379
0
    lwsl_wsi_err(wsi, "fd %d too high (%d)",
380
0
           wsi->desc.sockfd,
381
0
           context->max_fds);
382
383
0
    return 1;
384
0
  }
385
0
#endif
386
#if defined(LWS_WITH_EXTERNAL_POLL)
387
  if (wsi->a.vhost && wsi->a.vhost->protocols &&
388
      wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_LOCK_POLL,
389
             wsi->user_space, (void *)&pa, 1))
390
    return -1;
391
#endif
392
393
0
  __lws_same_vh_protocol_remove(wsi);
394
395
  /* the guy who is to be deleted's slot index in pt->fds */
396
0
  m = wsi->position_in_fds_table;
397
  
398
  /* these are the only valid possibilities for position_in_fds_table */
399
0
  assert(m == LWS_NO_FDS_POS || (m >= 0 && (unsigned int)m < pt->fds_count));
400
401
0
  if (context->event_loop_ops->io)
402
0
    context->event_loop_ops->io(wsi, LWS_EV_STOP | LWS_EV_READ |
403
0
                     LWS_EV_WRITE);
404
/*
405
  lwsl_notice("%s: wsi=%s, skt=%d, fds pos=%d, end guy pos=%d, endfd=%d\n",
406
      __func__, lws_wsi_tag(wsi), wsi->desc.sockfd, wsi->position_in_fds_table,
407
      pt->fds_count, pt->fds[pt->fds_count - 1].fd); */
408
409
0
  if (m != LWS_NO_FDS_POS) {
410
0
    char fixup = 0;
411
412
0
    assert(pt->fds_count && (unsigned int)m != pt->fds_count);
413
414
    /* deletion guy's lws_lookup entry needs nuking */
415
0
    delete_from_fd(context, wsi->desc.sockfd);
416
417
0
    if ((unsigned int)m != pt->fds_count - 1) {
418
      /* have the last guy take up the now vacant slot */
419
0
      pt->fds[m] = pt->fds[pt->fds_count - 1];
420
0
      fixup = 1;
421
0
    }
422
423
0
    pt->fds[pt->fds_count - 1].fd = -1;
424
425
    /* this decrements pt->fds_count */
426
0
    lws_plat_delete_socket_from_fds(context, wsi, m);
427
0
    pt->count_conns--;
428
0
    if (fixup) {
429
0
      v = (int) pt->fds[m].fd;
430
      /* old end guy's "position in fds table" is now the
431
       * deletion guy's old one */
432
0
      end_wsi = wsi_from_fd(context, v);
433
0
      if (!end_wsi) {
434
0
        lwsl_wsi_err(wsi, "no wsi for fd %d pos %d, "
435
0
              "pt->fds_count=%d",
436
0
              (int)pt->fds[m].fd, m,
437
0
              pt->fds_count);
438
        // assert(0);
439
0
      } else
440
0
        end_wsi->position_in_fds_table = m;
441
0
    }
442
443
    /* removed wsi has no position any more */
444
0
    wsi->position_in_fds_table = LWS_NO_FDS_POS;
445
446
#if defined(LWS_WITH_EXTERNAL_POLL)
447
    /* remove also from external POLL support via protocol 0 */
448
    if (lws_socket_is_valid(wsi->desc.sockfd) && wsi->a.vhost &&
449
        wsi->a.vhost->protocols[0].callback(wsi,
450
                    LWS_CALLBACK_DEL_POLL_FD,
451
                    wsi->user_space,
452
                    (void *) &pa, 0))
453
      ret = -1;
454
#endif
455
0
  }
456
457
0
#if defined(LWS_WITH_SERVER)
458
0
  if (!context->being_destroyed &&
459
      /* if this made some room, accept connects on this thread */
460
0
      (unsigned int)pt->fds_count < context->fd_limit_per_thread - 1)
461
0
    lws_accept_modulation(context, pt, 1);
462
0
#endif
463
464
#if defined(LWS_WITH_EXTERNAL_POLL)
465
  if (wsi->a.vhost &&
466
      wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_UNLOCK_POLL,
467
                wsi->user_space, (void *) &pa, 1))
468
    ret = -1;
469
#endif
470
471
//  __dump_fds(pt, "post remove");
472
473
0
  return ret;
474
0
}
475
476
int
477
__lws_change_pollfd(struct lws *wsi, int _and, int _or)
478
0
{
479
0
  struct lws_context *context;
480
0
  struct lws_pollargs pa;
481
0
  int ret = 0;
482
483
0
  if (!wsi || (!wsi->a.protocol && !wsi->event_pipe) ||
484
0
      wsi->position_in_fds_table == LWS_NO_FDS_POS)
485
0
    return 0;
486
487
0
  context = lws_get_context(wsi);
488
0
  if (!context)
489
0
    return 1;
490
491
#if defined(LWS_WITH_EXTERNAL_POLL)
492
  if (wsi->a.vhost &&
493
      wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_LOCK_POLL,
494
                wsi->user_space, (void *) &pa, 0))
495
    return -1;
496
#endif
497
498
0
  ret = _lws_change_pollfd(wsi, _and, _or, &pa);
499
500
#if defined(LWS_WITH_EXTERNAL_POLL)
501
  if (wsi->a.vhost &&
502
      wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_UNLOCK_POLL,
503
             wsi->user_space, (void *) &pa, 0))
504
    ret = -1;
505
#endif
506
507
0
  return ret;
508
0
}
509
510
int
511
lws_change_pollfd(struct lws *wsi, int _and, int _or)
512
0
{
513
0
  struct lws_context_per_thread *pt;
514
0
  int ret = 0;
515
516
0
  pt = &wsi->a.context->pt[(int)wsi->tsi];
517
518
0
  lws_pt_lock(pt, __func__);
519
0
  ret = __lws_change_pollfd(wsi, _and, _or);
520
0
  lws_pt_unlock(pt);
521
522
0
  return ret;
523
0
}
524
525
int
526
lws_callback_on_writable(struct lws *wsi)
527
0
{
528
0
  struct lws *w = wsi;
529
530
0
  if (lwsi_state(wsi) == LRS_SHUTDOWN)
531
0
    return 0;
532
533
0
  if (wsi->socket_is_permanently_unusable)
534
0
    return 0;
535
536
0
  if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_callback_on_writable)) {
537
0
    int q = lws_rops_func_fidx(wsi->role_ops,
538
0
             LWS_ROPS_callback_on_writable).
539
0
                  callback_on_writable(wsi);
540
0
    if (q)
541
0
      return 1;
542
0
    w = lws_get_network_wsi(wsi);
543
0
  } else
544
0
    if (w->position_in_fds_table == LWS_NO_FDS_POS) {
545
0
      lwsl_wsi_debug(wsi, "failed to find socket %d",
546
0
              wsi->desc.sockfd);
547
0
      return -1;
548
0
    }
549
550
0
  if (__lws_change_pollfd(w, 0, LWS_POLLOUT))
551
0
    return -1;
552
553
0
  return 1;
554
0
}
555
556
557
/*
558
 * stitch protocol choice into the vh protocol linked list
559
 * We always insert ourselves at the start of the list
560
 *
561
 * X <-> B
562
 * X <-> pAn <-> pB
563
 *
564
 * Illegal to attach more than once without detach inbetween
565
 */
566
void
567
lws_same_vh_protocol_insert(struct lws *wsi, int n)
568
0
{
569
0
  lws_context_lock(wsi->a.context, __func__);
570
0
  lws_vhost_lock(wsi->a.vhost);
571
572
0
  lws_dll2_remove(&wsi->same_vh_protocol);
573
0
  lws_dll2_add_head(&wsi->same_vh_protocol,
574
0
        &wsi->a.vhost->same_vh_protocol_owner[n]);
575
576
0
  wsi->bound_vhost_index = (uint8_t)n;
577
578
0
  lws_vhost_unlock(wsi->a.vhost);
579
0
  lws_context_unlock(wsi->a.context);
580
0
}
581
582
void
583
__lws_same_vh_protocol_remove(struct lws *wsi)
584
0
{
585
0
  if (wsi->a.vhost && wsi->a.vhost->same_vh_protocol_owner)
586
0
    lws_dll2_remove(&wsi->same_vh_protocol);
587
0
}
588
589
void
590
lws_same_vh_protocol_remove(struct lws *wsi)
591
0
{
592
0
  if (!wsi->a.vhost)
593
0
    return;
594
595
0
  lws_context_lock(wsi->a.context, __func__);
596
0
  lws_vhost_lock(wsi->a.vhost);
597
598
0
  __lws_same_vh_protocol_remove(wsi);
599
600
0
  lws_vhost_unlock(wsi->a.vhost);
601
0
  lws_context_unlock(wsi->a.context);
602
0
}
603
604
605
int
606
lws_callback_on_writable_all_protocol_vhost(const struct lws_vhost *vhost,
607
                   const struct lws_protocols *protocol)
608
0
{
609
0
  struct lws *wsi;
610
0
  int n;
611
612
0
  if (protocol < vhost->protocols ||
613
0
      protocol >= (vhost->protocols + vhost->count_protocols)) {
614
0
    lwsl_vhost_err((struct lws_vhost *)vhost,
615
0
             "protocol %p is not from vhost %p (%p - %p)",
616
0
             protocol, vhost->protocols, vhost,
617
0
          (vhost->protocols + vhost->count_protocols));
618
619
0
    return -1;
620
0
  }
621
622
0
  n = (int)(protocol - vhost->protocols);
623
624
0
  lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
625
0
      lws_dll2_get_head(&vhost->same_vh_protocol_owner[n])) {
626
0
    wsi = lws_container_of(d, struct lws, same_vh_protocol);
627
628
0
    assert(wsi->a.protocol &&
629
0
           wsi->a.protocol->callback == protocol->callback &&
630
0
           !strcmp(protocol->name, wsi->a.protocol->name));
631
632
0
    lws_callback_on_writable(wsi);
633
634
0
  } lws_end_foreach_dll_safe(d, d1);
635
636
0
  return 0;
637
0
}
638
639
int
640
lws_callback_on_writable_all_protocol(const struct lws_context *context,
641
              const struct lws_protocols *protocol)
642
0
{
643
0
  struct lws_vhost *vhost;
644
0
  int n;
645
646
0
  if (!context)
647
0
    return 0;
648
649
0
  vhost = context->vhost_list;
650
651
0
  while (vhost) {
652
0
    for (n = 0; n < vhost->count_protocols; n++)
653
0
      if (protocol->callback ==
654
0
           vhost->protocols[n].callback &&
655
0
          !strcmp(protocol->name, vhost->protocols[n].name))
656
0
        break;
657
0
    if (n != vhost->count_protocols)
658
0
      lws_callback_on_writable_all_protocol_vhost(
659
0
        vhost, &vhost->protocols[n]);
660
661
0
    vhost = vhost->vhost_next;
662
0
  }
663
664
0
  return 0;
665
0
}