/src/libwebsockets/lib/core-net/wsi.c
Line | Count | Source |
1 | | /* |
2 | | * libwebsockets - small server side websockets and web server implementation |
3 | | * |
4 | | * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com> |
5 | | * |
6 | | * Permission is hereby granted, free of charge, to any person obtaining a copy |
7 | | * of this software and associated documentation files (the "Software"), to |
8 | | * deal in the Software without restriction, including without limitation the |
9 | | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
10 | | * sell copies of the Software, and to permit persons to whom the Software is |
11 | | * furnished to do so, subject to the following conditions: |
12 | | * |
13 | | * The above copyright notice and this permission notice shall be included in |
14 | | * all copies or substantial portions of the Software. |
15 | | * |
16 | | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
19 | | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
20 | | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
21 | | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
22 | | * IN THE SOFTWARE. |
23 | | */ |
24 | | |
25 | | #include "private-lib-core.h" |
26 | | |
27 | 0 | const char *lws_wsi_tag(struct lws *wsi) { |
28 | 0 | if (!wsi) |
29 | 0 | return "[null wsi]"; |
30 | 0 | return lws_lc_tag(&wsi->lc); |
31 | 0 | } |
32 | | |
33 | | #if defined(_DEBUG) |
34 | 0 | void lwsi_set_role(struct lws *wsi, lws_wsi_state_t role) { |
35 | 0 | wsi->wsistate = (wsi->wsistate & (~LWSI_ROLE_MASK)) | role; |
36 | |
|
37 | 0 | lwsl_wsi_debug(wsi, "state 0x%lx", (unsigned long)wsi->wsistate); |
38 | 0 | } |
39 | | |
40 | 0 | void lwsi_set_state(struct lws *wsi, lws_wsi_state_t lrs) { |
41 | 0 | lws_wsi_state_t old = wsi->wsistate; |
42 | |
|
43 | 0 | wsi->wsistate = (old & (unsigned int)(~LRS_MASK)) | lrs; |
44 | |
|
45 | 0 | lwsl_wsi_debug(wsi, "lwsi_set_state 0x%lx -> 0x%lx", (unsigned long)old, |
46 | 0 | (unsigned long)wsi->wsistate); |
47 | 0 | } |
48 | | #endif |
49 | | |
50 | 0 | void lws_log_prepend_wsi(struct lws_log_cx *cx, void *obj, char **p, char *e) { |
51 | 0 | struct lws *wsi = (struct lws *)obj; |
52 | |
|
53 | 0 | *p += lws_snprintf(*p, lws_ptr_diff_size_t(e, (*p)), "%s: ", lws_wsi_tag(wsi)); |
54 | 0 | } |
55 | | |
56 | 0 | void lws_vhost_bind_wsi(struct lws_vhost *vh, struct lws *wsi) { |
57 | 0 | if (wsi->a.vhost == vh) |
58 | 0 | return; |
59 | | |
60 | 0 | lws_context_lock(vh->context, __func__); /* ---------- context { */ |
61 | 0 | wsi->a.vhost = vh; |
62 | |
|
63 | | #if defined(LWS_WITH_TLS_JIT_TRUST) |
64 | | if (!vh->count_bound_wsi && vh->grace_after_unref) { |
65 | | lwsl_wsi_info(wsi, "in use"); |
66 | | lws_sul_cancel(&vh->sul_unref); |
67 | | } |
68 | | #endif |
69 | |
|
70 | 0 | vh->count_bound_wsi++; |
71 | 0 | lws_context_unlock(vh->context); /* } context ---------- */ |
72 | |
|
73 | 0 | lwsl_wsi_debug(wsi, "vh %s: wsi %s/%s, count_bound_wsi %d\n", vh->name, |
74 | 0 | wsi->role_ops ? wsi->role_ops->name : "none", |
75 | 0 | wsi->a.protocol ? wsi->a.protocol->name : "none", |
76 | 0 | vh->count_bound_wsi); |
77 | 0 | assert(wsi->a.vhost->count_bound_wsi > 0); |
78 | 0 | } |
79 | | |
80 | | /* req cx lock... acquires vh lock */ |
81 | 0 | void __lws_vhost_unbind_wsi(struct lws *wsi) { |
82 | 0 | struct lws_vhost *vh = wsi->a.vhost; |
83 | |
|
84 | 0 | if (!vh) |
85 | 0 | return; |
86 | | |
87 | 0 | lws_context_assert_lock_held(wsi->a.context); |
88 | |
|
89 | 0 | lws_vhost_lock(vh); |
90 | |
|
91 | 0 | assert(vh->count_bound_wsi > 0); |
92 | 0 | vh->count_bound_wsi--; |
93 | |
|
94 | | #if defined(LWS_WITH_TLS_JIT_TRUST) |
95 | | if (!vh->count_bound_wsi && vh->grace_after_unref) |
96 | | lws_tls_jit_trust_vh_start_grace(vh); |
97 | | #endif |
98 | |
|
99 | 0 | lwsl_wsi_debug(wsi, "vh %s: count_bound_wsi %d", vh->name, |
100 | 0 | vh->count_bound_wsi); |
101 | |
|
102 | 0 | lws_vhost_unlock(vh); |
103 | |
|
104 | 0 | if (!vh->count_bound_wsi && vh->being_destroyed) |
105 | | /* |
106 | | * We have closed all wsi that were bound to this vhost |
107 | | * by any pt: nothing can be servicing any wsi belonging |
108 | | * to it any more. |
109 | | * |
110 | | * Finalize the vh destruction... must drop vh lock |
111 | | */ |
112 | 0 | __lws_vhost_destroy2(vh); |
113 | |
|
114 | 0 | wsi->a.vhost = NULL; |
115 | 0 | } |
116 | | |
117 | 0 | struct lws *lws_get_network_wsi(struct lws *wsi) { |
118 | 0 | if (!wsi) |
119 | 0 | return NULL; |
120 | | |
121 | 0 | #if defined(LWS_WITH_HTTP2) || defined(LWS_ROLE_MQTT) |
122 | 0 | if (!wsi->mux_substream |
123 | 0 | #if defined(LWS_WITH_CLIENT) |
124 | 0 | && !wsi->client_mux_substream |
125 | 0 | #endif |
126 | 0 | ) |
127 | 0 | return wsi; |
128 | | |
129 | 0 | while (wsi->mux.parent_wsi) |
130 | 0 | wsi = wsi->mux.parent_wsi; |
131 | 0 | #endif |
132 | |
|
133 | 0 | return wsi; |
134 | 0 | } |
135 | | |
136 | | const struct lws_protocols *lws_vhost_name_to_protocol(struct lws_vhost *vh, |
137 | 0 | const char *name) { |
138 | 0 | int n; |
139 | |
|
140 | 0 | for (n = 0; n < vh->count_protocols; n++) |
141 | 0 | if (vh->protocols[n].name && !strcmp(name, vh->protocols[n].name)) |
142 | 0 | return &vh->protocols[n]; |
143 | | |
144 | 0 | return NULL; |
145 | 0 | } |
146 | | |
147 | | int lws_callback_all_protocol(struct lws_context *context, |
148 | | const struct lws_protocols *protocol, |
149 | 0 | int reason) { |
150 | 0 | struct lws_context_per_thread *pt = &context->pt[0]; |
151 | 0 | unsigned int n, m = context->count_threads; |
152 | 0 | struct lws *wsi; |
153 | |
|
154 | 0 | while (m--) { |
155 | 0 | for (n = 0; n < pt->fds_count; n++) { |
156 | 0 | wsi = wsi_from_fd(context, pt->fds[n].fd); |
157 | 0 | if (!wsi || !wsi->a.protocol) |
158 | 0 | continue; |
159 | 0 | if (wsi->a.protocol->callback == protocol->callback && |
160 | 0 | !strcmp(protocol->name, wsi->a.protocol->name)) |
161 | 0 | protocol->callback(wsi, (enum lws_callback_reasons)reason, |
162 | 0 | wsi->user_space, NULL, 0); |
163 | 0 | } |
164 | 0 | pt++; |
165 | 0 | } |
166 | |
|
167 | 0 | return 0; |
168 | 0 | } |
169 | | |
170 | 0 | void *lws_evlib_wsi_to_evlib_pt(struct lws *wsi) { |
171 | 0 | struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi]; |
172 | |
|
173 | 0 | return pt->evlib_pt; |
174 | 0 | } |
175 | | |
176 | 0 | void *lws_evlib_tsi_to_evlib_pt(struct lws_context *cx, int tsi) { |
177 | 0 | struct lws_context_per_thread *pt = &cx->pt[tsi]; |
178 | |
|
179 | 0 | return pt->evlib_pt; |
180 | 0 | } |
181 | | |
182 | | int lws_callback_all_protocol_vhost_args(struct lws_vhost *vh, |
183 | | const struct lws_protocols *protocol, |
184 | 0 | int reason, void *argp, size_t len) { |
185 | 0 | struct lws_context *context = vh->context; |
186 | 0 | struct lws_context_per_thread *pt = &context->pt[0]; |
187 | 0 | unsigned int n, m = context->count_threads; |
188 | 0 | struct lws *wsi; |
189 | |
|
190 | 0 | while (m--) { |
191 | 0 | for (n = 0; n < pt->fds_count; n++) { |
192 | 0 | wsi = wsi_from_fd(context, pt->fds[n].fd); |
193 | |
|
194 | 0 | if (!wsi || !wsi->a.protocol || wsi->a.vhost != vh) |
195 | 0 | continue; |
196 | | |
197 | 0 | if (protocol && wsi->a.protocol->callback != protocol->callback && |
198 | 0 | strcmp(protocol->name, wsi->a.protocol->name)) |
199 | 0 | continue; |
200 | | |
201 | 0 | wsi->a.protocol->callback(wsi, (enum lws_callback_reasons)reason, |
202 | 0 | wsi->user_space, argp, len); |
203 | 0 | } |
204 | 0 | pt++; |
205 | 0 | } |
206 | |
|
207 | 0 | return 0; |
208 | 0 | } |
209 | | |
210 | | int lws_callback_all_protocol_vhost(struct lws_vhost *vh, |
211 | | const struct lws_protocols *protocol, |
212 | 0 | int reason) { |
213 | 0 | return lws_callback_all_protocol_vhost_args(vh, protocol, reason, NULL, 0); |
214 | 0 | } |
215 | | |
216 | | int lws_callback_vhost_protocols(struct lws *wsi, int reason, void *in, |
217 | 0 | size_t len) { |
218 | 0 | int n; |
219 | |
|
220 | 0 | for (n = 0; n < wsi->a.vhost->count_protocols; n++) |
221 | 0 | if (wsi->a.vhost->protocols[n].callback( |
222 | 0 | wsi, (enum lws_callback_reasons)reason, NULL, in, len)) |
223 | 0 | return 1; |
224 | | |
225 | 0 | return 0; |
226 | 0 | } |
227 | | |
228 | | #if defined(LWS_WITH_SYS_FAULT_INJECTION) |
229 | | /* |
230 | | * We want to inject a fault that makes it feel like the peer hung up on us, |
231 | | * or we were otherwise cut off. |
232 | | */ |
233 | | void lws_wsi_fault_timedclose_cb(lws_sorted_usec_list_t *s) { |
234 | | struct lws *wsi = lws_container_of(s, struct lws, sul_fault_timedclose); |
235 | | |
236 | | lwsl_wsi_warn(wsi, "force-closing"); |
237 | | lws_wsi_close(wsi, LWS_TO_KILL_ASYNC); |
238 | | } |
239 | | #endif |
240 | | |
241 | | #if defined(LWS_WITH_SYS_FAULT_INJECTION) |
242 | | void lws_wsi_fault_timedclose(struct lws *wsi) { |
243 | | uint64_t u; |
244 | | |
245 | | if (!lws_fi(&wsi->fic, "timedclose")) |
246 | | return; |
247 | | |
248 | | if (lws_fi_range(&wsi->fic, "timedclose_ms", &u)) |
249 | | return; |
250 | | |
251 | | lwsl_wsi_warn(wsi, "injecting close in %ums", (unsigned int)u); |
252 | | lws_sul_schedule(wsi->a.context, wsi->tsi, &wsi->sul_fault_timedclose, |
253 | | lws_wsi_fault_timedclose_cb, (lws_usec_t)(u * 1000ull)); |
254 | | } |
255 | | #endif |
256 | | |
257 | | /* |
258 | | * We need the context lock |
259 | | * |
260 | | * If we returned a wsi rather than NULL, it is listed on the |
261 | | * context->pre_natal_owner list of wild wsi not yet part of |
262 | | * a vhost or on the fd list. |
263 | | */ |
264 | | |
265 | | struct lws *__lws_wsi_create_with_role(struct lws_context *context, int tsi, |
266 | | const struct lws_role_ops *ops, |
267 | 0 | lws_log_cx_t *log_cx_template) { |
268 | 0 | struct lws_context_per_thread *pt = &context->pt[tsi]; |
269 | 0 | size_t s = sizeof(struct lws); |
270 | 0 | struct lws *wsi; |
271 | |
|
272 | 0 | assert(tsi >= 0 && tsi < LWS_MAX_SMP); |
273 | |
|
274 | 0 | lws_context_assert_lock_held(context); |
275 | |
|
276 | | #if defined(LWS_WITH_EVENT_LIBS) |
277 | | s += context->event_loop_ops->evlib_size_wsi; |
278 | | #endif |
279 | |
|
280 | 0 | wsi = lws_zalloc(s, __func__); |
281 | |
|
282 | 0 | if (!wsi) { |
283 | 0 | lwsl_cx_err(context, "OOM"); |
284 | 0 | return NULL; |
285 | 0 | } |
286 | | |
287 | 0 | if (log_cx_template) |
288 | 0 | wsi->lc.log_cx = log_cx_template; |
289 | 0 | else |
290 | 0 | wsi->lc.log_cx = context->log_cx; |
291 | |
|
292 | | #if defined(LWS_WITH_EVENT_LIBS) |
293 | | wsi->evlib_wsi = (uint8_t *)wsi + sizeof(*wsi); |
294 | | #endif |
295 | 0 | wsi->a.context = context; |
296 | 0 | lws_role_transition(wsi, 0, LRS_UNCONNECTED, ops); |
297 | 0 | wsi->pending_timeout = NO_PENDING_TIMEOUT; |
298 | 0 | wsi->a.protocol = NULL; |
299 | 0 | wsi->tsi = (char)tsi; |
300 | 0 | wsi->a.vhost = NULL; |
301 | 0 | wsi->desc.sockfd = LWS_SOCK_INVALID; |
302 | 0 | wsi->position_in_fds_table = LWS_NO_FDS_POS; |
303 | |
|
304 | | #if defined(LWS_WITH_SYS_FAULT_INJECTION) |
305 | | lws_xos_init(&wsi->fic.xos, lws_xos(&context->fic.xos)); |
306 | | #endif |
307 | |
|
308 | 0 | lws_fi_inherit_copy(&wsi->fic, &context->fic, "wsi", NULL); |
309 | |
|
310 | 0 | if (lws_fi(&wsi->fic, "createfail")) { |
311 | 0 | lws_dll2_remove(&wsi->pre_natal); |
312 | 0 | lws_fi_destroy(&wsi->fic); |
313 | 0 | lws_free(wsi); |
314 | 0 | return NULL; |
315 | 0 | } |
316 | | |
317 | 0 | lws_pt_lock(pt, __func__); /* -------------- pt { */ |
318 | 0 | lws_dll2_add_head(&wsi->pre_natal, &pt->pre_natal_wsi_owner); |
319 | 0 | lws_pt_unlock(pt); /* } pt --------------- */ |
320 | |
|
321 | 0 | return wsi; |
322 | 0 | } |
323 | | |
324 | 0 | int lws_wsi_inject_to_loop(struct lws_context_per_thread *pt, struct lws *wsi) { |
325 | 0 | int ret = 1; |
326 | |
|
327 | 0 | lws_pt_lock(pt, __func__); /* -------------- pt { */ |
328 | |
|
329 | 0 | if (pt->context->event_loop_ops->sock_accept) |
330 | 0 | if (pt->context->event_loop_ops->sock_accept(wsi)) |
331 | 0 | goto bail; |
332 | | |
333 | 0 | if (__insert_wsi_socket_into_fds(pt->context, wsi)) |
334 | 0 | goto bail; |
335 | | |
336 | 0 | lws_dll2_remove(&wsi->pre_natal); |
337 | 0 | ret = 0; |
338 | |
|
339 | 0 | bail: |
340 | 0 | lws_pt_unlock(pt); |
341 | |
|
342 | 0 | return ret; |
343 | 0 | } |
344 | | |
345 | | /* |
346 | | * Take a copy of wsi->desc.sockfd before calling this, then close it |
347 | | * afterwards |
348 | | */ |
349 | | |
350 | 0 | int lws_wsi_extract_from_loop(struct lws *wsi) { |
351 | 0 | if (lws_socket_is_valid(wsi->desc.sockfd)) |
352 | 0 | __remove_wsi_socket_from_fds(wsi); |
353 | |
|
354 | 0 | if (!wsi->a.context->event_loop_ops->destroy_wsi && |
355 | 0 | wsi->a.context->event_loop_ops->wsi_logical_close) { |
356 | 0 | wsi->a.context->event_loop_ops->wsi_logical_close(wsi); |
357 | 0 | return 1; /* close / destroy continues async */ |
358 | 0 | } |
359 | | |
360 | 0 | if (wsi->a.context->event_loop_ops->destroy_wsi) |
361 | 0 | wsi->a.context->event_loop_ops->destroy_wsi(wsi); |
362 | |
|
363 | 0 | return 0; /* he is destroyed */ |
364 | 0 | } |
365 | | |
366 | | int lws_callback_vhost_protocols_vhost(struct lws_vhost *vh, int reason, |
367 | 0 | void *in, size_t len) { |
368 | 0 | int n; |
369 | 0 | struct lws *wsi = lws_zalloc(sizeof(*wsi), "fake wsi"); |
370 | |
|
371 | 0 | if (!wsi) |
372 | 0 | return 1; |
373 | | |
374 | 0 | wsi->a.context = vh->context; |
375 | 0 | lws_vhost_bind_wsi(vh, wsi); |
376 | |
|
377 | 0 | for (n = 0; n < wsi->a.vhost->count_protocols; n++) { |
378 | 0 | wsi->a.protocol = &vh->protocols[n]; |
379 | 0 | if (wsi->a.protocol->callback(wsi, (enum lws_callback_reasons)reason, NULL, |
380 | 0 | in, len)) { |
381 | 0 | lws_free(wsi); |
382 | 0 | return 1; |
383 | 0 | } |
384 | 0 | } |
385 | | |
386 | 0 | lws_free(wsi); |
387 | |
|
388 | 0 | return 0; |
389 | 0 | } |
390 | | |
391 | 0 | int lws_rx_flow_control(struct lws *wsi, int _enable) { |
392 | 0 | struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi]; |
393 | 0 | int en = _enable; |
394 | | |
395 | | // h2 ignores rx flow control atm |
396 | 0 | if (lwsi_role_h2(wsi) || wsi->mux_substream || |
397 | 0 | lwsi_role_h2_ENCAPSULATION(wsi)) |
398 | 0 | return 0; |
399 | | |
400 | 0 | lwsl_wsi_info(wsi, "0x%x", _enable); |
401 | |
|
402 | 0 | if (!(_enable & LWS_RXFLOW_REASON_APPLIES)) { |
403 | | /* |
404 | | * convert user bool style to bitmap style... in user simple |
405 | | * bool style _enable = 0 = flow control it, = 1 = allow rx |
406 | | */ |
407 | 0 | en = LWS_RXFLOW_REASON_APPLIES | LWS_RXFLOW_REASON_USER_BOOL; |
408 | 0 | if (_enable & 1) |
409 | 0 | en |= LWS_RXFLOW_REASON_APPLIES_ENABLE_BIT; |
410 | 0 | } |
411 | |
|
412 | 0 | lws_pt_lock(pt, __func__); |
413 | | |
414 | | /* any bit set in rxflow_bitmap DISABLEs rxflow control */ |
415 | 0 | if (en & LWS_RXFLOW_REASON_APPLIES_ENABLE_BIT) |
416 | 0 | wsi->rxflow_bitmap = (uint8_t)(wsi->rxflow_bitmap & ~(en & 0xff)); |
417 | 0 | else |
418 | 0 | wsi->rxflow_bitmap = (uint8_t)(wsi->rxflow_bitmap | (en & 0xff)); |
419 | |
|
420 | 0 | if ((LWS_RXFLOW_PENDING_CHANGE | (!wsi->rxflow_bitmap)) == |
421 | 0 | wsi->rxflow_change_to) |
422 | 0 | goto skip; |
423 | | |
424 | 0 | wsi->rxflow_change_to = LWS_RXFLOW_PENDING_CHANGE | (!wsi->rxflow_bitmap); |
425 | |
|
426 | 0 | lwsl_wsi_info(wsi, "bitmap 0x%x: en 0x%x, ch 0x%x", wsi->rxflow_bitmap, en, |
427 | 0 | wsi->rxflow_change_to); |
428 | |
|
429 | 0 | if (_enable & LWS_RXFLOW_REASON_FLAG_PROCESS_NOW || |
430 | 0 | !wsi->rxflow_will_be_applied) { |
431 | 0 | en = __lws_rx_flow_control(wsi); |
432 | 0 | lws_pt_unlock(pt); |
433 | |
|
434 | 0 | return en; |
435 | 0 | } |
436 | | |
437 | 0 | skip: |
438 | 0 | lws_pt_unlock(pt); |
439 | |
|
440 | 0 | return 0; |
441 | 0 | } |
442 | | |
443 | | void lws_rx_flow_allow_all_protocol(const struct lws_context *context, |
444 | 0 | const struct lws_protocols *protocol) { |
445 | 0 | const struct lws_context_per_thread *pt = &context->pt[0]; |
446 | 0 | struct lws *wsi; |
447 | 0 | unsigned int n, m = context->count_threads; |
448 | |
|
449 | 0 | while (m--) { |
450 | 0 | for (n = 0; n < pt->fds_count; n++) { |
451 | 0 | wsi = wsi_from_fd(context, pt->fds[n].fd); |
452 | 0 | if (!wsi || !wsi->a.protocol) |
453 | 0 | continue; |
454 | 0 | if (wsi->a.protocol->callback == protocol->callback && |
455 | 0 | !strcmp(protocol->name, wsi->a.protocol->name)) |
456 | 0 | lws_rx_flow_control(wsi, LWS_RXFLOW_ALLOW); |
457 | 0 | } |
458 | 0 | pt++; |
459 | 0 | } |
460 | 0 | } |
461 | | |
462 | | int user_callback_handle_rxflow(lws_callback_function callback_function, |
463 | | struct lws *wsi, |
464 | | enum lws_callback_reasons reason, void *user, |
465 | 0 | void *in, size_t len) { |
466 | 0 | int n; |
467 | |
|
468 | 0 | wsi->rxflow_will_be_applied = 1; |
469 | 0 | n = callback_function(wsi, reason, user, in, len); |
470 | 0 | wsi->rxflow_will_be_applied = 0; |
471 | 0 | if (!n) |
472 | 0 | n = __lws_rx_flow_control(wsi); |
473 | |
|
474 | 0 | return n; |
475 | 0 | } |
476 | | |
477 | 0 | int __lws_rx_flow_control(struct lws *wsi) { |
478 | 0 | struct lws *wsic = wsi->child_list; |
479 | | |
480 | | // h2 ignores rx flow control atm |
481 | 0 | if (lwsi_role_h2(wsi) || wsi->mux_substream || |
482 | 0 | lwsi_role_h2_ENCAPSULATION(wsi)) |
483 | 0 | return 0; |
484 | | |
485 | | /* if he has children, do those if they were changed */ |
486 | 0 | while (wsic) { |
487 | 0 | if (wsic->rxflow_change_to & LWS_RXFLOW_PENDING_CHANGE) |
488 | 0 | __lws_rx_flow_control(wsic); |
489 | |
|
490 | 0 | wsic = wsic->sibling_list; |
491 | 0 | } |
492 | | |
493 | | /* there is no pending change */ |
494 | 0 | if (!(wsi->rxflow_change_to & LWS_RXFLOW_PENDING_CHANGE)) |
495 | 0 | return 0; |
496 | | |
497 | | /* stuff is still buffered, not ready to really accept new input */ |
498 | 0 | if (lws_buflist_next_segment_len(&wsi->buflist, NULL)) { |
499 | | /* get ourselves called back to deal with stashed buffer */ |
500 | 0 | lws_callback_on_writable(wsi); |
501 | |
|
502 | 0 | } |
503 | | |
504 | | /* now the pending is cleared, we can change rxflow state */ |
505 | |
|
506 | 0 | wsi->rxflow_change_to &= (~LWS_RXFLOW_PENDING_CHANGE) & 3; |
507 | |
|
508 | 0 | lwsl_wsi_info(wsi, "rxflow: change_to %d", |
509 | 0 | wsi->rxflow_change_to & LWS_RXFLOW_ALLOW); |
510 | | |
511 | | /* adjust the pollfd for this wsi */ |
512 | |
|
513 | 0 | if (wsi->rxflow_change_to & LWS_RXFLOW_ALLOW) { |
514 | 0 | lwsl_wsi_info(wsi, "reenable POLLIN"); |
515 | |
|
516 | 0 | if (__lws_change_pollfd(wsi, 0, LWS_POLLIN)) { |
517 | 0 | lwsl_wsi_info(wsi, "fail"); |
518 | 0 | return -1; |
519 | 0 | } |
520 | 0 | } else if (__lws_change_pollfd(wsi, LWS_POLLIN, 0)) |
521 | 0 | return -1; |
522 | | |
523 | 0 | return 0; |
524 | 0 | } |
525 | | |
526 | 0 | const struct lws_protocols *lws_get_protocol(struct lws *wsi) { |
527 | 0 | return wsi->a.protocol; |
528 | 0 | } |
529 | | |
530 | 0 | int lws_ensure_user_space(struct lws *wsi) { |
531 | 0 | if (!wsi->a.protocol) |
532 | 0 | return 0; |
533 | | |
534 | | /* allocate the per-connection user memory (if any) */ |
535 | | |
536 | 0 | if (!wsi->user_space) { |
537 | 0 | size_t s = wsi->a.protocol->per_session_data_size; |
538 | |
|
539 | 0 | if (!s) |
540 | 0 | s = (size_t)wsi->a.protocol->callback(wsi, |
541 | 0 | LWS_CALLBACK_GET_PSS_SIZE, NULL, NULL, 0); |
542 | |
|
543 | 0 | if (s) { |
544 | 0 | wsi->user_space = lws_zalloc(s, "user space"); |
545 | 0 | if (!wsi->user_space) { |
546 | 0 | lwsl_wsi_err(wsi, "OOM"); |
547 | 0 | return 1; |
548 | 0 | } |
549 | 0 | } |
550 | 0 | } else |
551 | 0 | lwsl_wsi_debug(wsi, "protocol pss %lu, user_space=%p", |
552 | 0 | (long)wsi->a.protocol->per_session_data_size, |
553 | 0 | wsi->user_space); |
554 | 0 | return 0; |
555 | 0 | } |
556 | | |
557 | 0 | void *lws_adjust_protocol_psds(struct lws *wsi, size_t new_size) { |
558 | 0 | ((struct lws_protocols *)lws_get_protocol(wsi))->per_session_data_size = |
559 | 0 | new_size; |
560 | |
|
561 | 0 | if (lws_ensure_user_space(wsi)) |
562 | 0 | return NULL; |
563 | | |
564 | 0 | return wsi->user_space; |
565 | 0 | } |
566 | | |
567 | 0 | int lws_get_tsi(struct lws *wsi) { return (int)wsi->tsi; } |
568 | | |
569 | 0 | int lws_is_ssl(struct lws *wsi) { |
570 | 0 | #if defined(LWS_WITH_TLS) |
571 | 0 | return wsi->tls.use_ssl & LCCSCF_USE_SSL; |
572 | | #else |
573 | | (void)wsi; |
574 | | return 0; |
575 | | #endif |
576 | 0 | } |
577 | | |
578 | | #if defined(LWS_WITH_TLS) && !defined(LWS_WITH_MBEDTLS) |
579 | 0 | lws_tls_conn *lws_get_ssl(struct lws *wsi) { return wsi->tls.ssl; } |
580 | | #endif |
581 | | |
582 | 0 | int lws_has_buffered_out(struct lws *wsi) { |
583 | 0 | if (wsi->buflist_out) |
584 | 0 | return 1; |
585 | | |
586 | 0 | #if defined(LWS_ROLE_H2) |
587 | 0 | { |
588 | 0 | struct lws *nwsi = lws_get_network_wsi(wsi); |
589 | |
|
590 | 0 | if (nwsi->buflist_out) |
591 | 0 | return 1; |
592 | 0 | } |
593 | 0 | #endif |
594 | | |
595 | 0 | return 0; |
596 | 0 | } |
597 | | |
598 | 0 | int lws_partial_buffered(struct lws *wsi) { return lws_has_buffered_out(wsi); } |
599 | | |
600 | 0 | lws_fileofs_t lws_get_peer_write_allowance(struct lws *wsi) { |
601 | 0 | if (!lws_rops_fidx(wsi->role_ops, LWS_ROPS_tx_credit)) |
602 | 0 | return -1; |
603 | | |
604 | 0 | return lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_tx_credit) |
605 | 0 | .tx_credit(wsi, LWSTXCR_US_TO_PEER, 0); |
606 | 0 | } |
607 | | |
608 | | void lws_role_transition(struct lws *wsi, enum lwsi_role role, |
609 | | enum lwsi_state state, |
610 | 0 | const struct lws_role_ops *ops) { |
611 | 0 | #if (_LWS_ENABLED_LOGS & LLL_DEBUG) |
612 | 0 | const char *name = "(unset)"; |
613 | 0 | #endif |
614 | 0 | wsi->wsistate = (unsigned int)role | (unsigned int)state; |
615 | 0 | if (ops) |
616 | 0 | wsi->role_ops = ops; |
617 | 0 | #if (_LWS_ENABLED_LOGS & LLL_DEBUG) |
618 | 0 | if (wsi->role_ops) |
619 | 0 | name = wsi->role_ops->name; |
620 | 0 | lwsl_wsi_debug(wsi, "wsistate 0x%lx, ops %s", (unsigned long)wsi->wsistate, |
621 | 0 | name); |
622 | 0 | #endif |
623 | 0 | } |
624 | | |
625 | | int lws_parse_uri(char *p, const char **prot, const char **ads, int *port, |
626 | 0 | const char **path) { |
627 | 0 | const char *end; |
628 | 0 | char unix_skt = 0; |
629 | | |
630 | | /* cut up the location into address, port and path */ |
631 | 0 | *prot = p; |
632 | 0 | while (*p && (*p != ':' || p[1] != '/' || p[2] != '/')) |
633 | 0 | p++; |
634 | 0 | if (!*p) { |
635 | 0 | end = p; |
636 | 0 | p = (char *)*prot; |
637 | 0 | *prot = end; |
638 | 0 | } else { |
639 | 0 | *p = '\0'; |
640 | 0 | p += 3; |
641 | 0 | } |
642 | 0 | if (*p == '+') /* unix skt */ |
643 | 0 | unix_skt = 1; |
644 | |
|
645 | 0 | *ads = p; |
646 | 0 | if (!strcmp(*prot, "http") || !strcmp(*prot, "ws")) |
647 | 0 | *port = 80; |
648 | 0 | else if (!strcmp(*prot, "https") || !strcmp(*prot, "wss")) |
649 | 0 | *port = 443; |
650 | |
|
651 | 0 | if (*p == '[') { |
652 | 0 | ++(*ads); |
653 | 0 | while (*p && *p != ']') |
654 | 0 | p++; |
655 | 0 | if (*p) |
656 | 0 | *p++ = '\0'; |
657 | 0 | } else |
658 | 0 | while (*p && *p != ':' && (unix_skt || *p != '/')) |
659 | 0 | p++; |
660 | |
|
661 | 0 | if (*p == ':') { |
662 | 0 | *p++ = '\0'; |
663 | 0 | *port = atoi(p); |
664 | 0 | while (*p && *p != '/') |
665 | 0 | p++; |
666 | 0 | } |
667 | 0 | *path = "/"; |
668 | 0 | if (*p) { |
669 | 0 | *p++ = '\0'; |
670 | 0 | if (*p) |
671 | 0 | *path = p; |
672 | 0 | } |
673 | |
|
674 | 0 | return 0; |
675 | 0 | } |
676 | | |
677 | | /* ... */ |
678 | | |
679 | | int lws_get_urlarg_by_name_safe(struct lws *wsi, const char *name, char *buf, |
680 | 0 | int len) { |
681 | 0 | int n = 0, fraglen, sl = (int)strlen(name); |
682 | |
|
683 | 0 | do { |
684 | 0 | fraglen = lws_hdr_copy_fragment(wsi, buf, len, WSI_TOKEN_HTTP_URI_ARGS, n); |
685 | |
|
686 | 0 | if (fraglen == -1) /* no fragment or basic problem */ |
687 | 0 | break; |
688 | | |
689 | 0 | if (fraglen > 0 && /* fragment could fit */ |
690 | 0 | fraglen + 1 < len && fraglen >= sl && !strncmp(buf, name, (size_t)sl)) { |
691 | | /* |
692 | | * If he left off the trailing =, trim it from the |
693 | | * result |
694 | | */ |
695 | |
|
696 | 0 | if (name[sl - 1] != '=' && sl < fraglen && buf[sl] == '=') |
697 | 0 | sl++; |
698 | |
|
699 | 0 | memmove(buf, buf + sl, (size_t)(fraglen - sl)); |
700 | 0 | buf[fraglen - sl] = '\0'; |
701 | |
|
702 | 0 | return fraglen - sl; |
703 | 0 | } |
704 | | |
705 | 0 | n++; |
706 | 0 | } while (1); |
707 | | |
708 | 0 | return -1; |
709 | 0 | } |
710 | | |
711 | | const char *lws_get_urlarg_by_name(struct lws *wsi, const char *name, char *buf, |
712 | 0 | int len) { |
713 | 0 | int n = lws_get_urlarg_by_name_safe(wsi, name, buf, len); |
714 | |
|
715 | 0 | return n < 0 ? NULL : buf; |
716 | 0 | } |
717 | | |
718 | | #if defined(LWS_WITHOUT_EXTENSIONS) |
719 | | |
720 | | /* we need to provide dummy callbacks for internal exts |
721 | | * so user code runs when faced with a lib compiled with |
722 | | * extensions disabled. |
723 | | */ |
724 | | |
725 | | int lws_extension_callback_pm_deflate( |
726 | | struct lws_context *context, const struct lws_extension *ext, |
727 | | struct lws *wsi, enum lws_extension_callback_reasons reason, void *user, |
728 | 0 | void *in, size_t len) { |
729 | 0 | (void)context; |
730 | 0 | (void)ext; |
731 | 0 | (void)wsi; |
732 | 0 | (void)reason; |
733 | 0 | (void)user; |
734 | 0 | (void)in; |
735 | 0 | (void)len; |
736 | |
|
737 | 0 | return 0; |
738 | 0 | } |
739 | | |
740 | | int lws_set_extension_option(struct lws *wsi, const char *ext_name, |
741 | 0 | const char *opt_name, const char *opt_val) { |
742 | 0 | return -1; |
743 | 0 | } |
744 | | #endif |
745 | | |
746 | 0 | int lws_is_cgi(struct lws *wsi) { |
747 | | #ifdef LWS_WITH_CGI |
748 | | return !!wsi->http.cgi; |
749 | | #else |
750 | 0 | return 0; |
751 | 0 | #endif |
752 | 0 | } |
753 | | |
754 | | const struct lws_protocol_vhost_options * |
755 | | lws_pvo_search(const struct lws_protocol_vhost_options *pvo, const char *name) |
756 | 0 | { |
757 | 0 | while (pvo) { |
758 | 0 | if (!strcmp(pvo->name, name)) |
759 | 0 | break; |
760 | | |
761 | 0 | pvo = pvo->next; |
762 | 0 | } |
763 | |
|
764 | 0 | return pvo; |
765 | 0 | } |
766 | | |
767 | | int lws_pvo_get_str(void *in, const char *name, const char **result) |
768 | 0 | { |
769 | 0 | const struct lws_protocol_vhost_options *pv = |
770 | 0 | lws_pvo_search((const struct lws_protocol_vhost_options *)in, name); |
771 | |
|
772 | 0 | if (!pv) |
773 | 0 | return 1; |
774 | | |
775 | 0 | *result = (const char *)pv->value; |
776 | |
|
777 | 0 | return 0; |
778 | 0 | } |
779 | | |
780 | | int lws_broadcast(struct lws_context_per_thread *pt, int reason, void *in, |
781 | 0 | size_t len) { |
782 | 0 | struct lws_vhost *v = pt->context->vhost_list; |
783 | 0 | lws_fakewsi_def_plwsa(pt); |
784 | 0 | int n, ret = 0; |
785 | |
|
786 | 0 | lws_fakewsi_prep_plwsa_ctx(pt->context); |
787 | | #if !defined(LWS_PLAT_FREERTOS) && LWS_MAX_SMP > 1 |
788 | | ((struct lws *)plwsa)->tsi = (char)(int)(pt - &pt->context->pt[0]); |
789 | | #endif |
790 | |
|
791 | 0 | while (v) { |
792 | 0 | const struct lws_protocols *p = v->protocols; |
793 | |
|
794 | 0 | plwsa->vhost = v; /* not a real bound wsi */ |
795 | |
|
796 | 0 | for (n = 0; n < v->count_protocols; n++) { |
797 | 0 | plwsa->protocol = p; |
798 | 0 | if (p->callback && |
799 | 0 | p->callback((struct lws *)plwsa, (enum lws_callback_reasons)reason, |
800 | 0 | NULL, in, len)) |
801 | 0 | ret |= 1; |
802 | 0 | p++; |
803 | 0 | } |
804 | |
|
805 | 0 | v = v->vhost_next; |
806 | 0 | } |
807 | |
|
808 | 0 | return ret; |
809 | 0 | } |
810 | | |
811 | 0 | void *lws_wsi_user(struct lws *wsi) { return wsi->user_space; } |
812 | | |
813 | 0 | int lws_wsi_tsi(struct lws *wsi) { return wsi->tsi; } |
814 | | |
815 | 0 | void lws_set_wsi_user(struct lws *wsi, void *data) { |
816 | 0 | if (!wsi->user_space_externally_allocated && wsi->user_space) |
817 | 0 | lws_free(wsi->user_space); |
818 | |
|
819 | 0 | wsi->user_space_externally_allocated = 1; |
820 | 0 | wsi->user_space = data; |
821 | 0 | } |
822 | | |
823 | 0 | struct lws *lws_get_parent(const struct lws *wsi) { return wsi->parent; } |
824 | | |
825 | 0 | struct lws *lws_get_child(const struct lws *wsi) { return wsi->child_list; } |
826 | | |
827 | 0 | void *lws_get_opaque_parent_data(const struct lws *wsi) { |
828 | 0 | return wsi->opaque_parent_data; |
829 | 0 | } |
830 | | |
831 | 0 | void lws_set_opaque_parent_data(struct lws *wsi, void *data) { |
832 | 0 | wsi->opaque_parent_data = data; |
833 | 0 | } |
834 | | |
835 | 0 | void *lws_get_opaque_user_data(const struct lws *wsi) { |
836 | 0 | return wsi->a.opaque_user_data; |
837 | 0 | } |
838 | | |
839 | 0 | void lws_set_opaque_user_data(struct lws *wsi, void *data) { |
840 | 0 | wsi->a.opaque_user_data = data; |
841 | 0 | } |
842 | | |
843 | 0 | int lws_get_child_pending_on_writable(const struct lws *wsi) { |
844 | 0 | return wsi->parent_pending_cb_on_writable; |
845 | 0 | } |
846 | | |
847 | 0 | void lws_clear_child_pending_on_writable(struct lws *wsi) { |
848 | 0 | wsi->parent_pending_cb_on_writable = 0; |
849 | 0 | } |
850 | | |
851 | 0 | const char *lws_get_vhost_name(struct lws_vhost *vhost) { return vhost->name; } |
852 | | |
853 | 0 | int lws_get_vhost_port(struct lws_vhost *vhost) { return vhost->listen_port; } |
854 | | |
855 | 0 | void *lws_get_vhost_user(struct lws_vhost *vhost) { return vhost->user; } |
856 | | |
857 | 0 | const char *lws_get_vhost_iface(struct lws_vhost *vhost) { |
858 | 0 | return vhost->iface; |
859 | 0 | } |
860 | | |
861 | 0 | lws_sockfd_type lws_get_socket_fd(struct lws *wsi) { |
862 | 0 | if (!wsi) |
863 | 0 | return -1; |
864 | 0 | return wsi->desc.sockfd; |
865 | 0 | } |
866 | | |
867 | 0 | struct lws_vhost *lws_vhost_get(struct lws *wsi) { return wsi->a.vhost; } |
868 | | |
869 | 0 | struct lws_vhost *lws_get_vhost(struct lws *wsi) { return wsi->a.vhost; } |
870 | | |
871 | 0 | const struct lws_protocols *lws_protocol_get(struct lws *wsi) { |
872 | 0 | return wsi->a.protocol; |
873 | 0 | } |
874 | | |
875 | | #if defined(LWS_WITH_UDP) |
876 | 0 | const struct lws_udp *lws_get_udp(const struct lws *wsi) { return wsi->udp; } |
877 | | #endif |
878 | | |
879 | 0 | struct lws_context *lws_get_context(const struct lws *wsi) { |
880 | 0 | return wsi->a.context; |
881 | 0 | } |
882 | | |
883 | 0 | struct lws_log_cx *lwsl_wsi_get_cx(struct lws *wsi) { |
884 | 0 | if (!wsi) |
885 | 0 | return NULL; |
886 | | |
887 | 0 | return wsi->lc.log_cx; |
888 | 0 | } |
889 | | |
890 | | #if defined(LWS_WITH_CLIENT) |
891 | | int _lws_generic_transaction_completed_active_conn(struct lws **_wsi, |
892 | 0 | char take_vh_lock) { |
893 | 0 | struct lws *wnew, *wsi = *_wsi; |
894 | | |
895 | | /* |
896 | | * Are we constitutionally capable of having a queue, ie, we are on |
897 | | * the "active client connections" list? |
898 | | * |
899 | | * If not, that's it for us. |
900 | | */ |
901 | |
|
902 | 0 | if (lws_dll2_is_detached(&wsi->dll_cli_active_conns)) |
903 | 0 | return 0; /* no new transaction */ |
904 | | |
905 | | /* |
906 | | * With h1 queuing, the original "active client" moves his attributes |
907 | | * like fd, ssl, queue and active client list entry to the next guy in |
908 | | * the queue before closing... it's because the user code knows the |
909 | | * individual wsi and the action must take place in the correct wsi |
910 | | * context. Note this means we don't truly pipeline headers. |
911 | | * |
912 | | * Trying to keep the original "active client" in place to do the work |
913 | | * of the wsi breaks down when dealing with queued POSTs otherwise; it's |
914 | | * also competing with the real mux child arrangements and complicating |
915 | | * the code. |
916 | | * |
917 | | * For that reason, see if we have any queued child now... |
918 | | */ |
919 | | |
920 | 0 | if (!wsi->dll2_cli_txn_queue_owner.head) { |
921 | | /* |
922 | | * Nothing pipelined... we should hang around a bit |
923 | | * in case something turns up... otherwise we'll close |
924 | | */ |
925 | 0 | lwsl_wsi_info(wsi, "nothing pipelined waiting"); |
926 | 0 | lwsi_set_state(wsi, LRS_IDLING); |
927 | |
|
928 | 0 | lws_set_timeout(wsi, PENDING_TIMEOUT_CLIENT_CONN_IDLE, wsi->keep_warm_secs); |
929 | |
|
930 | 0 | return 0; /* no new transaction right now */ |
931 | 0 | } |
932 | | |
933 | | /* |
934 | | * We have a queued child wsi we should bequeath our assets to, before |
935 | | * closing ourself |
936 | | */ |
937 | | |
938 | 0 | if (take_vh_lock) |
939 | 0 | lws_vhost_lock(wsi->a.vhost); |
940 | |
|
941 | 0 | wnew = lws_container_of(wsi->dll2_cli_txn_queue_owner.head, struct lws, |
942 | 0 | dll2_cli_txn_queue); |
943 | |
|
944 | 0 | assert(wsi != wnew); |
945 | |
|
946 | 0 | lws_dll2_remove(&wnew->dll2_cli_txn_queue); |
947 | |
|
948 | 0 | assert(lws_socket_is_valid(wsi->desc.sockfd)); |
949 | |
|
950 | 0 | __lws_change_pollfd(wsi, LWS_POLLOUT | LWS_POLLIN, 0); |
951 | | |
952 | | /* copy the fd */ |
953 | 0 | wnew->desc = wsi->desc; |
954 | |
|
955 | 0 | assert(lws_socket_is_valid(wnew->desc.sockfd)); |
956 | | |
957 | | /* disconnect the fd from association with old wsi */ |
958 | |
|
959 | 0 | if (__remove_wsi_socket_from_fds(wsi)) |
960 | 0 | return -1; |
961 | | |
962 | 0 | sanity_assert_no_wsi_traces(wsi->a.context, wsi); |
963 | 0 | sanity_assert_no_sockfd_traces(wsi->a.context, wsi->desc.sockfd); |
964 | 0 | wsi->desc.sockfd = LWS_SOCK_INVALID; |
965 | |
|
966 | 0 | __lws_wsi_remove_from_sul(wsi); |
967 | | |
968 | | /* |
969 | | * ... we're doing some magic here in terms of handing off the socket |
970 | | * that has been active to a wsi that has not yet itself been active... |
971 | | * depending on the event lib we may need to give a magic spark to the |
972 | | * new guy and snuff out the old guy's magic spark at that level as well |
973 | | */ |
974 | |
|
975 | | #if defined(LWS_WITH_EVENT_LIBS) |
976 | | if (wsi->a.context->event_loop_ops->destroy_wsi) |
977 | | wsi->a.context->event_loop_ops->destroy_wsi(wsi); |
978 | | if (wsi->a.context->event_loop_ops->sock_accept) |
979 | | wsi->a.context->event_loop_ops->sock_accept(wnew); |
980 | | #endif |
981 | | |
982 | | /* point the fd table entry to new guy */ |
983 | |
|
984 | 0 | assert(lws_socket_is_valid(wnew->desc.sockfd)); |
985 | |
|
986 | 0 | if (__insert_wsi_socket_into_fds(wsi->a.context, wnew)) |
987 | 0 | return -1; |
988 | | |
989 | 0 | #if defined(LWS_WITH_TLS) |
990 | | /* pass on the tls */ |
991 | | |
992 | 0 | wnew->tls = wsi->tls; |
993 | 0 | wsi->tls.client_bio = NULL; |
994 | 0 | wsi->tls.ssl = NULL; |
995 | 0 | wsi->tls.use_ssl = 0; |
996 | 0 | #endif |
997 | | |
998 | | /* take over his copy of his endpoint as an active connection */ |
999 | |
|
1000 | 0 | if (!wnew->cli_hostname_copy && wsi->cli_hostname_copy) { |
1001 | 0 | wnew->cli_hostname_copy = wsi->cli_hostname_copy; |
1002 | 0 | wsi->cli_hostname_copy = NULL; |
1003 | 0 | } |
1004 | 0 | wnew->keep_warm_secs = wsi->keep_warm_secs; |
1005 | | |
1006 | | /* |
1007 | | * selected queued guy now replaces the original leader on the |
1008 | | * active client conn list |
1009 | | */ |
1010 | |
|
1011 | 0 | lws_dll2_remove(&wsi->dll_cli_active_conns); |
1012 | 0 | lws_dll2_add_tail(&wnew->dll_cli_active_conns, |
1013 | 0 | &wsi->a.vhost->dll_cli_active_conns_owner); |
1014 | | |
1015 | | /* move any queued guys to queue on new active conn */ |
1016 | |
|
1017 | 0 | lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1, |
1018 | 0 | wsi->dll2_cli_txn_queue_owner.head) { |
1019 | 0 | struct lws *ww = lws_container_of(d, struct lws, dll2_cli_txn_queue); |
1020 | |
|
1021 | 0 | lws_dll2_remove(&ww->dll2_cli_txn_queue); |
1022 | 0 | lws_dll2_add_tail(&ww->dll2_cli_txn_queue, &wnew->dll2_cli_txn_queue_owner); |
1023 | 0 | } |
1024 | 0 | lws_end_foreach_dll_safe(d, d1); |
1025 | |
|
1026 | 0 | if (take_vh_lock) |
1027 | 0 | lws_vhost_unlock(wsi->a.vhost); |
1028 | | |
1029 | | /* |
1030 | | * The original leader who passed on all his powers already can die... |
1031 | | * in the call stack above us there are guys who still want to touch |
1032 | | * him, so have him die next time around the event loop, not now. |
1033 | | */ |
1034 | |
|
1035 | 0 | wsi->already_did_cce = 1; /* so the close doesn't trigger a CCE */ |
1036 | 0 | lws_set_timeout(wsi, 1, LWS_TO_KILL_ASYNC); |
1037 | | |
1038 | | /* after the first one, they can only be coming from the queue */ |
1039 | 0 | wnew->transaction_from_pipeline_queue = 1; |
1040 | |
|
1041 | 0 | lwsl_wsi_info(wsi, " pipeline queue passed -> %s", lws_wsi_tag(wnew)); |
1042 | |
|
1043 | 0 | *_wsi = wnew; /* inform caller we swapped */ |
1044 | |
|
1045 | 0 | return 1; /* new transaction */ |
1046 | 0 | } |
1047 | | #endif |
1048 | | |
1049 | 0 | int LWS_WARN_UNUSED_RESULT lws_raw_transaction_completed(struct lws *wsi) { |
1050 | 0 | if (lws_has_buffered_out(wsi)) { |
1051 | | /* |
1052 | | * ...so he tried to send something large, but it went out |
1053 | | * as a partial, but he immediately called us to say he wants |
1054 | | * to close the connection. |
1055 | | * |
1056 | | * Defer the close until the last part of the partial is sent. |
1057 | | * |
1058 | | */ |
1059 | |
|
1060 | 0 | lwsl_wsi_debug(wsi, "deferring due to partial"); |
1061 | 0 | wsi->close_when_buffered_out_drained = 1; |
1062 | 0 | lws_callback_on_writable(wsi); |
1063 | |
|
1064 | 0 | return 0; |
1065 | 0 | } |
1066 | | |
1067 | 0 | return -1; |
1068 | 0 | } |
1069 | | |
1070 | | int lws_bind_protocol(struct lws *wsi, const struct lws_protocols *p, |
1071 | 0 | const char *reason) { |
1072 | | // if (wsi->a.protocol == p) |
1073 | | // return 0; |
1074 | 0 | const struct lws_protocols *vp = wsi->a.vhost->protocols, *vpo; |
1075 | |
|
1076 | 0 | if (wsi->a.protocol && wsi->protocol_bind_balance) { |
1077 | 0 | wsi->a.protocol->callback( |
1078 | 0 | wsi, wsi->role_ops->protocol_unbind_cb[!!lwsi_role_server(wsi)], |
1079 | 0 | wsi->user_space, (void *)reason, 0); |
1080 | 0 | wsi->protocol_bind_balance = 0; |
1081 | 0 | } |
1082 | 0 | if (!wsi->user_space_externally_allocated) |
1083 | 0 | lws_free_set_NULL(wsi->user_space); |
1084 | |
|
1085 | 0 | lws_same_vh_protocol_remove(wsi); |
1086 | |
|
1087 | 0 | wsi->a.protocol = p; |
1088 | 0 | if (!p) |
1089 | 0 | return 0; |
1090 | | |
1091 | 0 | if (lws_ensure_user_space(wsi)) |
1092 | 0 | return 1; |
1093 | | |
1094 | 0 | if (p > vp && p < &vp[wsi->a.vhost->count_protocols]) |
1095 | 0 | lws_same_vh_protocol_insert(wsi, (int)(p - vp)); |
1096 | 0 | else { |
1097 | 0 | int n = wsi->a.vhost->count_protocols; |
1098 | 0 | int hit = 0; |
1099 | |
|
1100 | 0 | vpo = vp; |
1101 | |
|
1102 | 0 | while (n--) { |
1103 | 0 | if (p->name && vp->name && !strcmp(p->name, vp->name)) { |
1104 | 0 | hit = 1; |
1105 | 0 | lws_same_vh_protocol_insert(wsi, (int)(vp - vpo)); |
1106 | 0 | break; |
1107 | 0 | } |
1108 | 0 | vp++; |
1109 | 0 | } |
1110 | 0 | if (!hit) |
1111 | 0 | lwsl_err("%s: %p is not in vhost '%s' protocols list\n", __func__, p, |
1112 | 0 | wsi->a.vhost->name); |
1113 | 0 | } |
1114 | |
|
1115 | 0 | if (wsi->a.protocol->callback( |
1116 | 0 | wsi, wsi->role_ops->protocol_bind_cb[!!lwsi_role_server(wsi)], |
1117 | 0 | wsi->user_space, NULL, 0)) |
1118 | 0 | return 1; |
1119 | | |
1120 | 0 | wsi->protocol_bind_balance = 1; |
1121 | |
|
1122 | 0 | return 0; |
1123 | 0 | } |
1124 | | |
1125 | 0 | void lws_http_close_immortal(struct lws *wsi) { |
1126 | 0 | struct lws *nwsi; |
1127 | |
|
1128 | 0 | if (!wsi->mux_substream) |
1129 | 0 | return; |
1130 | | |
1131 | 0 | assert(wsi->mux_stream_immortal); |
1132 | 0 | wsi->mux_stream_immortal = 0; |
1133 | |
|
1134 | 0 | nwsi = lws_get_network_wsi(wsi); |
1135 | 0 | lwsl_wsi_debug(wsi, "%s (%d)", lws_wsi_tag(nwsi), |
1136 | 0 | nwsi->immortal_substream_count); |
1137 | 0 | assert(nwsi->immortal_substream_count); |
1138 | 0 | nwsi->immortal_substream_count--; |
1139 | 0 | if (!nwsi->immortal_substream_count) |
1140 | | /* |
1141 | | * since we closed the only immortal stream on this nwsi, we |
1142 | | * need to reapply a normal timeout regime to the nwsi |
1143 | | */ |
1144 | 0 | lws_set_timeout(wsi, PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE, |
1145 | 0 | lws_wsi_keepalive_timeout_eff(wsi)); |
1146 | 0 | } |
1147 | | |
1148 | 0 | void lws_mux_mark_immortal(struct lws *wsi) { |
1149 | 0 | struct lws *nwsi; |
1150 | |
|
1151 | 0 | lws_set_timeout(wsi, NO_PENDING_TIMEOUT, 0); |
1152 | |
|
1153 | 0 | if (!wsi->mux_substream |
1154 | 0 | #if defined(LWS_WITH_CLIENT) |
1155 | 0 | && !wsi->client_mux_substream |
1156 | 0 | #endif |
1157 | 0 | ) { |
1158 | | // lwsl_wsi_err(wsi, "not mux substream"); |
1159 | 0 | return; |
1160 | 0 | } |
1161 | | |
1162 | 0 | if (wsi->mux_stream_immortal) |
1163 | | /* only need to handle it once per child wsi */ |
1164 | 0 | return; |
1165 | | |
1166 | 0 | nwsi = lws_get_network_wsi(wsi); |
1167 | 0 | if (!nwsi) |
1168 | 0 | return; |
1169 | | |
1170 | 0 | lwsl_wsi_debug(wsi, "%s (%d)\n", lws_wsi_tag(nwsi), |
1171 | 0 | nwsi->immortal_substream_count); |
1172 | |
|
1173 | 0 | wsi->mux_stream_immortal = 1; |
1174 | 0 | assert(nwsi->immortal_substream_count < 255); /* largest count */ |
1175 | 0 | nwsi->immortal_substream_count++; |
1176 | 0 | if (nwsi->immortal_substream_count == 1) |
1177 | 0 | lws_set_timeout(nwsi, NO_PENDING_TIMEOUT, 0); |
1178 | 0 | } |
1179 | | |
1180 | 0 | int lws_http_mark_sse(struct lws *wsi) { |
1181 | 0 | if (!wsi) |
1182 | 0 | return 0; |
1183 | | |
1184 | 0 | lws_http_headers_detach(wsi); |
1185 | 0 | lws_mux_mark_immortal(wsi); |
1186 | |
|
1187 | 0 | if (wsi->mux_substream) |
1188 | 0 | wsi->h2_stream_carries_sse = 1; |
1189 | |
|
1190 | 0 | return 0; |
1191 | 0 | } |
1192 | | |
1193 | | #if defined(LWS_WITH_CLIENT) |
1194 | | |
1195 | | const char *lws_wsi_client_stash_item(struct lws *wsi, int stash_idx, |
1196 | 0 | int hdr_idx) { |
1197 | | /* try the generic client stash */ |
1198 | 0 | if (wsi->stash) |
1199 | 0 | return wsi->stash->cis[stash_idx]; |
1200 | | |
1201 | 0 | #if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2) |
1202 | | /* if not, use the ah stash if applicable */ |
1203 | 0 | return lws_hdr_simple_ptr(wsi, (enum lws_token_indexes)hdr_idx); |
1204 | | #else |
1205 | | return NULL; |
1206 | | #endif |
1207 | 0 | } |
1208 | | #endif |
1209 | | |
1210 | 0 | int lws_wsi_keepalive_timeout_eff(struct lws *wsi) { |
1211 | 0 | int ds = wsi->a.vhost->keepalive_timeout; |
1212 | |
|
1213 | 0 | #if defined(LWS_WITH_SERVER) |
1214 | 0 | if (wsi->http.mount_specific_keepalive_timeout_secs) |
1215 | 0 | ds = (int)wsi->http.mount_specific_keepalive_timeout_secs; |
1216 | |
|
1217 | 0 | if (wsi->parent && |
1218 | 0 | (int)wsi->parent->http.mount_specific_keepalive_timeout_secs > ds) |
1219 | 0 | ds = (int)wsi->parent->http.mount_specific_keepalive_timeout_secs; |
1220 | 0 | #endif |
1221 | |
|
1222 | 0 | if (!ds) |
1223 | 0 | ds = 31; |
1224 | | |
1225 | | // lwsl_wsi_notice(wsi, "Eff keepalive_timeout %ds ===================\n", |
1226 | | // ds); |
1227 | |
|
1228 | 0 | return ds; |
1229 | 0 | } |
1230 | | |
1231 | | #if defined(LWS_ROLE_H2) || defined(LWS_ROLE_MQTT) |
1232 | | |
1233 | | void lws_wsi_mux_insert(struct lws *wsi, struct lws *parent_wsi, |
1234 | 0 | unsigned int sid) { |
1235 | 0 | lwsl_wsi_info(wsi, "par %s: assign sid %d (curr %d)", lws_wsi_tag(parent_wsi), |
1236 | 0 | sid, wsi->mux.my_sid); |
1237 | |
|
1238 | 0 | if (wsi->mux.my_sid && wsi->mux.my_sid != (unsigned int)sid) |
1239 | 0 | assert(0); |
1240 | |
|
1241 | 0 | wsi->mux.my_sid = sid; |
1242 | 0 | wsi->mux.parent_wsi = parent_wsi; |
1243 | 0 | wsi->role_ops = parent_wsi->role_ops; |
1244 | | |
1245 | | /* new guy's sibling is whoever was the first child before */ |
1246 | 0 | wsi->mux.sibling_list = parent_wsi->mux.child_list; |
1247 | | |
1248 | | /* first child is now the new guy */ |
1249 | 0 | parent_wsi->mux.child_list = wsi; |
1250 | |
|
1251 | 0 | parent_wsi->mux.child_count++; |
1252 | 0 | } |
1253 | | |
1254 | 0 | struct lws *lws_wsi_mux_from_id(struct lws *parent_wsi, unsigned int sid) { |
1255 | 0 | lws_start_foreach_ll(struct lws *, wsi, parent_wsi->mux.child_list) { |
1256 | 0 | if (wsi->mux.my_sid == sid) |
1257 | 0 | return wsi; |
1258 | 0 | } |
1259 | 0 | lws_end_foreach_ll(wsi, mux.sibling_list); |
1260 | |
|
1261 | 0 | return NULL; |
1262 | 0 | } |
1263 | | |
1264 | 0 | void lws_wsi_mux_dump_children(struct lws *wsi) { |
1265 | 0 | #if defined(_DEBUG) |
1266 | 0 | if (!wsi->mux.parent_wsi || !lwsl_visible(LLL_INFO)) |
1267 | 0 | return; |
1268 | | |
1269 | 0 | lws_start_foreach_llp(struct lws **, w, wsi->mux.parent_wsi->mux.child_list) { |
1270 | 0 | lwsl_wsi_info(wsi, " \\---- child %s %s\n", |
1271 | 0 | (*w)->role_ops ? (*w)->role_ops->name : "?", lws_wsi_tag(*w)); |
1272 | 0 | assert(*w != (*w)->mux.sibling_list); |
1273 | 0 | } |
1274 | 0 | lws_end_foreach_llp(w, mux.sibling_list); |
1275 | 0 | #endif |
1276 | 0 | } |
1277 | | |
1278 | 0 | void lws_wsi_mux_close_children(struct lws *wsi, int reason) { |
1279 | 0 | struct lws *wsi2; |
1280 | 0 | struct lws **w; |
1281 | |
|
1282 | 0 | if (!wsi->mux.child_list) |
1283 | 0 | return; |
1284 | | |
1285 | 0 | w = &wsi->mux.child_list; |
1286 | 0 | while (*w) { |
1287 | 0 | lwsl_wsi_info((*w), " closing child"); |
1288 | | /* disconnect from siblings */ |
1289 | 0 | wsi2 = (*w)->mux.sibling_list; |
1290 | 0 | assert(wsi2 != *w); |
1291 | 0 | (*w)->mux.sibling_list = NULL; |
1292 | 0 | (*w)->socket_is_permanently_unusable = 1; |
1293 | 0 | __lws_close_free_wsi(*w, (enum lws_close_status)reason, |
1294 | 0 | "mux child recurse"); |
1295 | 0 | *w = wsi2; |
1296 | 0 | } |
1297 | 0 | } |
1298 | | |
1299 | 0 | void lws_wsi_mux_sibling_disconnect(struct lws *wsi) { |
1300 | 0 | struct lws *wsi2; |
1301 | |
|
1302 | 0 | lws_start_foreach_llp(struct lws **, w, wsi->mux.parent_wsi->mux.child_list) { |
1303 | | |
1304 | | /* disconnect from siblings */ |
1305 | 0 | if (*w == wsi) { |
1306 | 0 | wsi2 = (*w)->mux.sibling_list; |
1307 | 0 | (*w)->mux.sibling_list = NULL; |
1308 | 0 | *w = wsi2; |
1309 | 0 | lwsl_wsi_debug(wsi, " disentangled from sibling %s", lws_wsi_tag(wsi2)); |
1310 | 0 | break; |
1311 | 0 | } |
1312 | 0 | } |
1313 | 0 | lws_end_foreach_llp(w, mux.sibling_list); |
1314 | 0 | wsi->mux.parent_wsi->mux.child_count--; |
1315 | |
|
1316 | 0 | wsi->mux.parent_wsi = NULL; |
1317 | 0 | } |
1318 | | |
1319 | 0 | void lws_wsi_mux_dump_waiting_children(struct lws *wsi) { |
1320 | 0 | #if defined(_DEBUG) |
1321 | 0 | lwsl_info("%s: %s: children waiting for POLLOUT service:\n", __func__, |
1322 | 0 | lws_wsi_tag(wsi)); |
1323 | |
|
1324 | 0 | wsi = wsi->mux.child_list; |
1325 | 0 | while (wsi) { |
1326 | 0 | lwsl_wsi_info(wsi, " %c sid %u: 0x%x %s %s", |
1327 | 0 | wsi->mux.requested_POLLOUT ? '*' : ' ', wsi->mux.my_sid, |
1328 | 0 | lwsi_state(wsi), wsi->role_ops->name, |
1329 | 0 | wsi->a.protocol ? wsi->a.protocol->name : "noprotocol"); |
1330 | |
|
1331 | 0 | wsi = wsi->mux.sibling_list; |
1332 | 0 | } |
1333 | 0 | #endif |
1334 | 0 | } |
1335 | | |
1336 | 0 | int lws_wsi_mux_mark_parents_needing_writeable(struct lws *wsi) { |
1337 | 0 | struct lws /* *network_wsi = lws_get_network_wsi(wsi), */ *wsi2; |
1338 | | // int already = network_wsi->mux.requested_POLLOUT; |
1339 | | |
1340 | | /* mark everybody above him as requesting pollout */ |
1341 | |
|
1342 | 0 | wsi2 = wsi; |
1343 | 0 | while (wsi2) { |
1344 | 0 | wsi2->mux.requested_POLLOUT = 1; |
1345 | 0 | lwsl_wsi_info(wsi2, "sid %u, pending writable", wsi2->mux.my_sid); |
1346 | 0 | wsi2 = wsi2->mux.parent_wsi; |
1347 | 0 | } |
1348 | |
|
1349 | 0 | return 0; // already; |
1350 | 0 | } |
1351 | | |
1352 | 0 | struct lws *lws_wsi_mux_move_child_to_tail(struct lws **wsi2) { |
1353 | 0 | struct lws *w = *wsi2; |
1354 | |
|
1355 | 0 | while (w) { |
1356 | 0 | if (!w->mux.sibling_list) { /* w is the current last */ |
1357 | 0 | lwsl_wsi_debug(w, "*wsi2 = %s\n", lws_wsi_tag(*wsi2)); |
1358 | |
|
1359 | 0 | if (w == *wsi2) /* we are already last */ |
1360 | 0 | break; |
1361 | | |
1362 | | /* last points to us as new last */ |
1363 | 0 | w->mux.sibling_list = *wsi2; |
1364 | | |
1365 | | /* guy pointing to us until now points to |
1366 | | * our old next */ |
1367 | 0 | *wsi2 = (*wsi2)->mux.sibling_list; |
1368 | | |
1369 | | /* we point to nothing because we are last */ |
1370 | 0 | w->mux.sibling_list->mux.sibling_list = NULL; |
1371 | | |
1372 | | /* w becomes us */ |
1373 | 0 | w = w->mux.sibling_list; |
1374 | 0 | break; |
1375 | 0 | } |
1376 | 0 | w = w->mux.sibling_list; |
1377 | 0 | } |
1378 | | |
1379 | | /* clear the waiting for POLLOUT on the guy that was chosen */ |
1380 | |
|
1381 | 0 | if (w) |
1382 | 0 | w->mux.requested_POLLOUT = 0; |
1383 | |
|
1384 | 0 | return w; |
1385 | 0 | } |
1386 | | |
1387 | 0 | int lws_wsi_mux_action_pending_writeable_reqs(struct lws *wsi) { |
1388 | 0 | struct lws *w = wsi->mux.child_list; |
1389 | |
|
1390 | 0 | while (w) { |
1391 | 0 | if (w->mux.requested_POLLOUT) { |
1392 | 0 | if (lws_change_pollfd(wsi, 0, LWS_POLLOUT)) |
1393 | 0 | return -1; |
1394 | 0 | return 0; |
1395 | 0 | } |
1396 | 0 | w = w->mux.sibling_list; |
1397 | 0 | } |
1398 | | |
1399 | 0 | if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) |
1400 | 0 | return -1; |
1401 | | |
1402 | 0 | return 0; |
1403 | 0 | } |
1404 | | |
1405 | 0 | int lws_wsi_txc_check_skint(struct lws_tx_credit *txc, int32_t tx_cr) { |
1406 | 0 | if (txc->tx_cr <= 0) { |
1407 | | /* |
1408 | | * If other side is not able to cope with us sending any DATA |
1409 | | * so no matter if we have POLLOUT on our side if it's DATA we |
1410 | | * want to send. |
1411 | | */ |
1412 | |
|
1413 | 0 | if (!txc->skint) |
1414 | 0 | lwsl_info("%s: %p: skint (%d)\n", __func__, txc, (int)txc->tx_cr); |
1415 | |
|
1416 | 0 | txc->skint = 1; |
1417 | |
|
1418 | 0 | return 1; |
1419 | 0 | } |
1420 | | |
1421 | 0 | if (txc->skint) |
1422 | 0 | lwsl_info("%s: %p: unskint (%d)\n", __func__, txc, (int)txc->tx_cr); |
1423 | |
|
1424 | 0 | txc->skint = 0; |
1425 | |
|
1426 | 0 | return 0; |
1427 | 0 | } |
1428 | | |
1429 | | #if defined(_DEBUG) |
1430 | | void lws_wsi_txc_describe(struct lws_tx_credit *txc, const char *at, |
1431 | 0 | uint32_t sid) { |
1432 | 0 | lwsl_info("%s: %p: %s: sid %d: %speer-to-us: %d, us-to-peer: %d\n", __func__, |
1433 | 0 | txc, at, (int)sid, txc->skint ? "SKINT, " : "", |
1434 | 0 | (int)txc->peer_tx_cr_est, (int)txc->tx_cr); |
1435 | 0 | } |
1436 | | #endif |
1437 | | |
1438 | 0 | int lws_wsi_tx_credit(struct lws *wsi, char peer_to_us, int add) { |
1439 | 0 | if (wsi->role_ops && lws_rops_fidx(wsi->role_ops, LWS_ROPS_tx_credit)) |
1440 | 0 | return lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_tx_credit) |
1441 | 0 | .tx_credit(wsi, peer_to_us, add); |
1442 | | |
1443 | 0 | return 0; |
1444 | 0 | } |
1445 | | |
1446 | | /* |
1447 | | * Let the protocol know about incoming tx credit window updates if it's |
1448 | | * managing the flow control manually (it may want to proxy this information) |
1449 | | */ |
1450 | | |
1451 | 0 | int lws_wsi_txc_report_manual_txcr_in(struct lws *wsi, int32_t bump) { |
1452 | 0 | if (!wsi->txc.manual) |
1453 | | /* |
1454 | | * If we don't care about managing it manually, no need to |
1455 | | * report it |
1456 | | */ |
1457 | 0 | return 0; |
1458 | | |
1459 | 0 | return user_callback_handle_rxflow(wsi->a.protocol->callback, wsi, |
1460 | 0 | LWS_CALLBACK_WSI_TX_CREDIT_GET, |
1461 | 0 | wsi->user_space, NULL, (size_t)bump); |
1462 | 0 | } |
1463 | | |
1464 | | #if defined(LWS_WITH_CLIENT) |
1465 | | |
1466 | 0 | int lws_wsi_mux_apply_queue(struct lws *wsi) { |
1467 | | /* we have a transaction queue that wants to pipeline */ |
1468 | |
|
1469 | 0 | lws_context_lock(wsi->a.context, __func__); /* -------------- cx { */ |
1470 | 0 | lws_vhost_lock(wsi->a.vhost); |
1471 | |
|
1472 | 0 | lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1, |
1473 | 0 | wsi->dll2_cli_txn_queue_owner.head) { |
1474 | 0 | struct lws *w = lws_container_of(d, struct lws, dll2_cli_txn_queue); |
1475 | |
|
1476 | 0 | #if defined(LWS_ROLE_H2) |
1477 | 0 | if (lwsi_role_http(wsi) && |
1478 | 0 | lwsi_state(w) == LRS_H2_WAITING_TO_SEND_HEADERS) { |
1479 | 0 | lwsl_wsi_info(w, "cli pipeq to be h2"); |
1480 | |
|
1481 | 0 | lwsi_set_state(w, LRS_H1C_ISSUE_HANDSHAKE2); |
1482 | | |
1483 | | /* remove ourselves from client queue */ |
1484 | 0 | lws_dll2_remove(&w->dll2_cli_txn_queue); |
1485 | | |
1486 | | /* attach ourselves as an h2 stream */ |
1487 | 0 | lws_wsi_h2_adopt(wsi, w); |
1488 | 0 | } |
1489 | 0 | #endif |
1490 | |
|
1491 | | #if defined(LWS_ROLE_MQTT) |
1492 | | if (lwsi_role_mqtt(wsi) && lwsi_state(wsi) == LRS_ESTABLISHED) { |
1493 | | lwsl_wsi_info(w, "cli pipeq to be mqtt\n"); |
1494 | | |
1495 | | /* remove ourselves from client queue */ |
1496 | | lws_dll2_remove(&w->dll2_cli_txn_queue); |
1497 | | |
1498 | | /* attach ourselves as an h2 stream */ |
1499 | | lws_wsi_mqtt_adopt(wsi, w); |
1500 | | } |
1501 | | #endif |
1502 | 0 | } |
1503 | 0 | lws_end_foreach_dll_safe(d, d1); |
1504 | |
|
1505 | 0 | lws_vhost_unlock(wsi->a.vhost); |
1506 | 0 | lws_context_unlock(wsi->a.context); /* } cx -------------- */ |
1507 | |
|
1508 | 0 | return 0; |
1509 | 0 | } |
1510 | | |
1511 | | #endif |
1512 | | |
1513 | | #endif |