/src/libwebsockets/lib/core-net/wsi.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * libwebsockets - small server side websockets and web server implementation |
3 | | * |
4 | | * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com> |
5 | | * |
6 | | * Permission is hereby granted, free of charge, to any person obtaining a copy |
7 | | * of this software and associated documentation files (the "Software"), to |
8 | | * deal in the Software without restriction, including without limitation the |
9 | | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
10 | | * sell copies of the Software, and to permit persons to whom the Software is |
11 | | * furnished to do so, subject to the following conditions: |
12 | | * |
13 | | * The above copyright notice and this permission notice shall be included in |
14 | | * all copies or substantial portions of the Software. |
15 | | * |
16 | | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
19 | | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
20 | | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
21 | | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
22 | | * IN THE SOFTWARE. |
23 | | */ |
24 | | |
25 | | #include "private-lib-core.h" |
26 | | |
27 | | const char * |
28 | | lws_wsi_tag(struct lws *wsi) |
29 | 0 | { |
30 | 0 | if (!wsi) |
31 | 0 | return "[null wsi]"; |
32 | 0 | return lws_lc_tag(&wsi->lc); |
33 | 0 | } |
34 | | |
35 | | #if defined (_DEBUG) |
36 | | void lwsi_set_role(struct lws *wsi, lws_wsi_state_t role) |
37 | 0 | { |
38 | 0 | wsi->wsistate = (wsi->wsistate & (~LWSI_ROLE_MASK)) | role; |
39 | |
|
40 | 0 | lwsl_wsi_debug(wsi, "state 0x%lx", (unsigned long)wsi->wsistate); |
41 | 0 | } |
42 | | |
43 | | void lwsi_set_state(struct lws *wsi, lws_wsi_state_t lrs) |
44 | 0 | { |
45 | 0 | lws_wsi_state_t old = wsi->wsistate; |
46 | |
|
47 | 0 | wsi->wsistate = (old & (unsigned int)(~LRS_MASK)) | lrs; |
48 | |
|
49 | 0 | lwsl_wsi_debug(wsi, "lwsi_set_state 0x%lx -> 0x%lx", |
50 | 0 | (unsigned long)old, (unsigned long)wsi->wsistate); |
51 | 0 | } |
52 | | #endif |
53 | | |
54 | | |
55 | | void |
56 | | lws_log_prepend_wsi(struct lws_log_cx *cx, void *obj, char **p, char *e) |
57 | 0 | { |
58 | 0 | struct lws *wsi = (struct lws *)obj; |
59 | |
|
60 | 0 | *p += lws_snprintf(*p, lws_ptr_diff_size_t(e, (*p)), "%s: ", |
61 | 0 | lws_wsi_tag(wsi)); |
62 | 0 | } |
63 | | |
64 | | void |
65 | | lws_vhost_bind_wsi(struct lws_vhost *vh, struct lws *wsi) |
66 | 0 | { |
67 | 0 | if (wsi->a.vhost == vh) |
68 | 0 | return; |
69 | | |
70 | 0 | lws_context_lock(vh->context, __func__); /* ---------- context { */ |
71 | 0 | wsi->a.vhost = vh; |
72 | |
|
73 | | #if defined(LWS_WITH_TLS_JIT_TRUST) |
74 | | if (!vh->count_bound_wsi && vh->grace_after_unref) { |
75 | | lwsl_wsi_info(wsi, "in use"); |
76 | | lws_sul_cancel(&vh->sul_unref); |
77 | | } |
78 | | #endif |
79 | |
|
80 | 0 | vh->count_bound_wsi++; |
81 | 0 | lws_context_unlock(vh->context); /* } context ---------- */ |
82 | |
|
83 | 0 | lwsl_wsi_debug(wsi, "vh %s: wsi %s/%s, count_bound_wsi %d\n", |
84 | 0 | vh->name, wsi->role_ops ? wsi->role_ops->name : "none", |
85 | 0 | wsi->a.protocol ? wsi->a.protocol->name : "none", |
86 | 0 | vh->count_bound_wsi); |
87 | 0 | assert(wsi->a.vhost->count_bound_wsi > 0); |
88 | 0 | } |
89 | | |
90 | | |
91 | | /* req cx lock... acquires vh lock */ |
92 | | void |
93 | | __lws_vhost_unbind_wsi(struct lws *wsi) |
94 | 0 | { |
95 | 0 | struct lws_vhost *vh = wsi->a.vhost; |
96 | |
|
97 | 0 | if (!vh) |
98 | 0 | return; |
99 | | |
100 | 0 | lws_context_assert_lock_held(wsi->a.context); |
101 | |
|
102 | 0 | lws_vhost_lock(vh); |
103 | |
|
104 | 0 | assert(vh->count_bound_wsi > 0); |
105 | 0 | vh->count_bound_wsi--; |
106 | |
|
107 | | #if defined(LWS_WITH_TLS_JIT_TRUST) |
108 | | if (!vh->count_bound_wsi && vh->grace_after_unref) |
109 | | lws_tls_jit_trust_vh_start_grace(vh); |
110 | | #endif |
111 | |
|
112 | 0 | lwsl_wsi_debug(wsi, "vh %s: count_bound_wsi %d", |
113 | 0 | vh->name, vh->count_bound_wsi); |
114 | |
|
115 | 0 | lws_vhost_unlock(vh); |
116 | |
|
117 | 0 | if (!vh->count_bound_wsi && vh->being_destroyed) |
118 | | /* |
119 | | * We have closed all wsi that were bound to this vhost |
120 | | * by any pt: nothing can be servicing any wsi belonging |
121 | | * to it any more. |
122 | | * |
123 | | * Finalize the vh destruction... must drop vh lock |
124 | | */ |
125 | 0 | __lws_vhost_destroy2(vh); |
126 | |
|
127 | 0 | wsi->a.vhost = NULL; |
128 | 0 | } |
129 | | |
130 | | struct lws * |
131 | | lws_get_network_wsi(struct lws *wsi) |
132 | 0 | { |
133 | 0 | if (!wsi) |
134 | 0 | return NULL; |
135 | | |
136 | 0 | #if defined(LWS_WITH_HTTP2) || defined(LWS_ROLE_MQTT) |
137 | 0 | if (!wsi->mux_substream |
138 | 0 | #if defined(LWS_WITH_CLIENT) |
139 | 0 | && !wsi->client_mux_substream |
140 | 0 | #endif |
141 | 0 | ) |
142 | 0 | return wsi; |
143 | | |
144 | 0 | while (wsi->mux.parent_wsi) |
145 | 0 | wsi = wsi->mux.parent_wsi; |
146 | 0 | #endif |
147 | |
|
148 | 0 | return wsi; |
149 | 0 | } |
150 | | |
151 | | |
152 | | const struct lws_protocols * |
153 | | lws_vhost_name_to_protocol(struct lws_vhost *vh, const char *name) |
154 | 0 | { |
155 | 0 | int n; |
156 | |
|
157 | 0 | for (n = 0; n < vh->count_protocols; n++) |
158 | 0 | if (vh->protocols[n].name && !strcmp(name, vh->protocols[n].name)) |
159 | 0 | return &vh->protocols[n]; |
160 | | |
161 | 0 | return NULL; |
162 | 0 | } |
163 | | |
164 | | int |
165 | | lws_callback_all_protocol(struct lws_context *context, |
166 | | const struct lws_protocols *protocol, int reason) |
167 | 0 | { |
168 | 0 | struct lws_context_per_thread *pt = &context->pt[0]; |
169 | 0 | unsigned int n, m = context->count_threads; |
170 | 0 | struct lws *wsi; |
171 | |
|
172 | 0 | while (m--) { |
173 | 0 | for (n = 0; n < pt->fds_count; n++) { |
174 | 0 | wsi = wsi_from_fd(context, pt->fds[n].fd); |
175 | 0 | if (!wsi || !wsi->a.protocol) |
176 | 0 | continue; |
177 | 0 | if (wsi->a.protocol->callback == protocol->callback && |
178 | 0 | !strcmp(protocol->name, wsi->a.protocol->name)) |
179 | 0 | protocol->callback(wsi, |
180 | 0 | (enum lws_callback_reasons)reason, |
181 | 0 | wsi->user_space, NULL, 0); |
182 | 0 | } |
183 | 0 | pt++; |
184 | 0 | } |
185 | |
|
186 | 0 | return 0; |
187 | 0 | } |
188 | | |
189 | | void * |
190 | | lws_evlib_wsi_to_evlib_pt(struct lws *wsi) |
191 | 0 | { |
192 | 0 | struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi]; |
193 | |
|
194 | 0 | return pt->evlib_pt; |
195 | 0 | } |
196 | | |
197 | | void * |
198 | | lws_evlib_tsi_to_evlib_pt(struct lws_context *cx, int tsi) |
199 | 0 | { |
200 | 0 | struct lws_context_per_thread *pt = &cx->pt[tsi]; |
201 | |
|
202 | 0 | return pt->evlib_pt; |
203 | 0 | } |
204 | | |
205 | | int |
206 | | lws_callback_all_protocol_vhost_args(struct lws_vhost *vh, |
207 | | const struct lws_protocols *protocol, int reason, |
208 | | void *argp, size_t len) |
209 | 0 | { |
210 | 0 | struct lws_context *context = vh->context; |
211 | 0 | struct lws_context_per_thread *pt = &context->pt[0]; |
212 | 0 | unsigned int n, m = context->count_threads; |
213 | 0 | struct lws *wsi; |
214 | |
|
215 | 0 | while (m--) { |
216 | 0 | for (n = 0; n < pt->fds_count; n++) { |
217 | 0 | wsi = wsi_from_fd(context, pt->fds[n].fd); |
218 | |
|
219 | 0 | if (!wsi || !wsi->a.protocol || wsi->a.vhost != vh) |
220 | 0 | continue; |
221 | | |
222 | 0 | if (protocol && |
223 | 0 | wsi->a.protocol->callback != protocol->callback && |
224 | 0 | strcmp(protocol->name, wsi->a.protocol->name)) |
225 | 0 | continue; |
226 | | |
227 | 0 | wsi->a.protocol->callback(wsi, (enum lws_callback_reasons)reason, |
228 | 0 | wsi->user_space, argp, len); |
229 | 0 | } |
230 | 0 | pt++; |
231 | 0 | } |
232 | |
|
233 | 0 | return 0; |
234 | 0 | } |
235 | | |
236 | | int |
237 | | lws_callback_all_protocol_vhost(struct lws_vhost *vh, |
238 | | const struct lws_protocols *protocol, int reason) |
239 | 0 | { |
240 | 0 | return lws_callback_all_protocol_vhost_args(vh, protocol, reason, NULL, 0); |
241 | 0 | } |
242 | | |
243 | | int |
244 | | lws_callback_vhost_protocols(struct lws *wsi, int reason, void *in, size_t len) |
245 | 0 | { |
246 | 0 | int n; |
247 | |
|
248 | 0 | for (n = 0; n < wsi->a.vhost->count_protocols; n++) |
249 | 0 | if (wsi->a.vhost->protocols[n].callback(wsi, (enum lws_callback_reasons)reason, NULL, in, len)) |
250 | 0 | return 1; |
251 | | |
252 | 0 | return 0; |
253 | 0 | } |
254 | | |
255 | | #if defined(LWS_WITH_SYS_FAULT_INJECTION) |
256 | | /* |
257 | | * We want to inject a fault that makes it feel like the peer hung up on us, |
258 | | * or we were otherwise cut off. |
259 | | */ |
260 | | void |
261 | | lws_wsi_fault_timedclose_cb(lws_sorted_usec_list_t *s) |
262 | | { |
263 | | struct lws *wsi = lws_container_of(s, struct lws, sul_fault_timedclose); |
264 | | |
265 | | lwsl_wsi_warn(wsi, "force-closing"); |
266 | | lws_wsi_close(wsi, LWS_TO_KILL_ASYNC); |
267 | | } |
268 | | #endif |
269 | | |
270 | | #if defined(LWS_WITH_SYS_FAULT_INJECTION) |
271 | | void |
272 | | lws_wsi_fault_timedclose(struct lws *wsi) |
273 | | { |
274 | | uint64_t u; |
275 | | |
276 | | if (!lws_fi(&wsi->fic, "timedclose")) |
277 | | return; |
278 | | |
279 | | if (lws_fi_range(&wsi->fic, "timedclose_ms", &u)) |
280 | | return; |
281 | | |
282 | | lwsl_wsi_warn(wsi, "injecting close in %ums", (unsigned int)u); |
283 | | lws_sul_schedule(wsi->a.context, wsi->tsi, &wsi->sul_fault_timedclose, |
284 | | lws_wsi_fault_timedclose_cb, |
285 | | (lws_usec_t)(u * 1000ull)); |
286 | | } |
287 | | #endif |
288 | | |
289 | | |
290 | | /* |
291 | | * We need the context lock |
292 | | * |
293 | | * If we returned a wsi rather than NULL, it is listed on the |
294 | | * context->pre_natal_owner list of wild wsi not yet part of |
295 | | * a vhost or on the fd list. |
296 | | */ |
297 | | |
298 | | struct lws * |
299 | | __lws_wsi_create_with_role(struct lws_context *context, int tsi, |
300 | | const struct lws_role_ops *ops, |
301 | | lws_log_cx_t *log_cx_template) |
302 | 0 | { |
303 | 0 | struct lws_context_per_thread *pt = &context->pt[tsi]; |
304 | 0 | size_t s = sizeof(struct lws); |
305 | 0 | struct lws *wsi; |
306 | |
|
307 | 0 | assert(tsi >= 0 && tsi < LWS_MAX_SMP); |
308 | | |
309 | 0 | lws_context_assert_lock_held(context); |
310 | |
|
311 | | #if defined(LWS_WITH_EVENT_LIBS) |
312 | | s += context->event_loop_ops->evlib_size_wsi; |
313 | | #endif |
314 | |
|
315 | 0 | wsi = lws_zalloc(s, __func__); |
316 | |
|
317 | 0 | if (!wsi) { |
318 | 0 | lwsl_cx_err(context, "OOM"); |
319 | 0 | return NULL; |
320 | 0 | } |
321 | | |
322 | 0 | if (log_cx_template) |
323 | 0 | wsi->lc.log_cx = log_cx_template; |
324 | 0 | else |
325 | 0 | wsi->lc.log_cx = context->log_cx; |
326 | |
|
327 | | #if defined(LWS_WITH_EVENT_LIBS) |
328 | | wsi->evlib_wsi = (uint8_t *)wsi + sizeof(*wsi); |
329 | | #endif |
330 | 0 | wsi->a.context = context; |
331 | 0 | lws_role_transition(wsi, 0, LRS_UNCONNECTED, ops); |
332 | 0 | wsi->pending_timeout = NO_PENDING_TIMEOUT; |
333 | 0 | wsi->a.protocol = NULL; |
334 | 0 | wsi->tsi = (char)tsi; |
335 | 0 | wsi->a.vhost = NULL; |
336 | 0 | wsi->desc.sockfd = LWS_SOCK_INVALID; |
337 | 0 | wsi->position_in_fds_table = LWS_NO_FDS_POS; |
338 | |
|
339 | | #if defined(LWS_WITH_SYS_FAULT_INJECTION) |
340 | | lws_xos_init(&wsi->fic.xos, lws_xos(&context->fic.xos)); |
341 | | #endif |
342 | |
|
343 | 0 | lws_fi_inherit_copy(&wsi->fic, &context->fic, "wsi", NULL); |
344 | |
|
345 | 0 | if (lws_fi(&wsi->fic, "createfail")) { |
346 | 0 | lws_dll2_remove(&wsi->pre_natal); |
347 | 0 | lws_fi_destroy(&wsi->fic); |
348 | 0 | lws_free(wsi); |
349 | 0 | return NULL; |
350 | 0 | } |
351 | | |
352 | 0 | lws_pt_lock(pt, __func__); /* -------------- pt { */ |
353 | 0 | lws_dll2_add_head(&wsi->pre_natal, &pt->pre_natal_wsi_owner); |
354 | 0 | lws_pt_unlock(pt); /* } pt --------------- */ |
355 | |
|
356 | 0 | return wsi; |
357 | 0 | } |
358 | | |
359 | | int |
360 | | lws_wsi_inject_to_loop(struct lws_context_per_thread *pt, struct lws *wsi) |
361 | 0 | { |
362 | 0 | int ret = 1; |
363 | |
|
364 | 0 | lws_pt_lock(pt, __func__); /* -------------- pt { */ |
365 | |
|
366 | 0 | if (pt->context->event_loop_ops->sock_accept) |
367 | 0 | if (pt->context->event_loop_ops->sock_accept(wsi)) |
368 | 0 | goto bail; |
369 | | |
370 | 0 | if (__insert_wsi_socket_into_fds(pt->context, wsi)) |
371 | 0 | goto bail; |
372 | | |
373 | 0 | lws_dll2_remove(&wsi->pre_natal); |
374 | 0 | ret = 0; |
375 | |
|
376 | 0 | bail: |
377 | 0 | lws_pt_unlock(pt); |
378 | |
|
379 | 0 | return ret; |
380 | 0 | } |
381 | | |
382 | | /* |
383 | | * Take a copy of wsi->desc.sockfd before calling this, then close it |
384 | | * afterwards |
385 | | */ |
386 | | |
387 | | int |
388 | | lws_wsi_extract_from_loop(struct lws *wsi) |
389 | 0 | { |
390 | 0 | if (lws_socket_is_valid(wsi->desc.sockfd)) |
391 | 0 | __remove_wsi_socket_from_fds(wsi); |
392 | |
|
393 | 0 | if (!wsi->a.context->event_loop_ops->destroy_wsi && |
394 | 0 | wsi->a.context->event_loop_ops->wsi_logical_close) { |
395 | 0 | wsi->a.context->event_loop_ops->wsi_logical_close(wsi); |
396 | 0 | return 1; /* close / destroy continues async */ |
397 | 0 | } |
398 | | |
399 | 0 | if (wsi->a.context->event_loop_ops->destroy_wsi) |
400 | 0 | wsi->a.context->event_loop_ops->destroy_wsi(wsi); |
401 | |
|
402 | 0 | return 0; /* he is destroyed */ |
403 | 0 | } |
404 | | |
405 | | int |
406 | | lws_callback_vhost_protocols_vhost(struct lws_vhost *vh, int reason, void *in, |
407 | | size_t len) |
408 | 0 | { |
409 | 0 | int n; |
410 | 0 | struct lws *wsi = lws_zalloc(sizeof(*wsi), "fake wsi"); |
411 | |
|
412 | 0 | if (!wsi) |
413 | 0 | return 1; |
414 | | |
415 | 0 | wsi->a.context = vh->context; |
416 | 0 | lws_vhost_bind_wsi(vh, wsi); |
417 | |
|
418 | 0 | for (n = 0; n < wsi->a.vhost->count_protocols; n++) { |
419 | 0 | wsi->a.protocol = &vh->protocols[n]; |
420 | 0 | if (wsi->a.protocol->callback(wsi, (enum lws_callback_reasons)reason, NULL, in, len)) { |
421 | 0 | lws_free(wsi); |
422 | 0 | return 1; |
423 | 0 | } |
424 | 0 | } |
425 | | |
426 | 0 | lws_free(wsi); |
427 | |
|
428 | 0 | return 0; |
429 | 0 | } |
430 | | |
431 | | |
432 | | int |
433 | | lws_rx_flow_control(struct lws *wsi, int _enable) |
434 | 0 | { |
435 | 0 | struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi]; |
436 | 0 | int en = _enable; |
437 | | |
438 | | // h2 ignores rx flow control atm |
439 | 0 | if (lwsi_role_h2(wsi) || wsi->mux_substream || |
440 | 0 | lwsi_role_h2_ENCAPSULATION(wsi)) |
441 | 0 | return 0; // !!! |
442 | | |
443 | 0 | lwsl_wsi_info(wsi, "0x%x", _enable); |
444 | |
|
445 | 0 | if (!(_enable & LWS_RXFLOW_REASON_APPLIES)) { |
446 | | /* |
447 | | * convert user bool style to bitmap style... in user simple |
448 | | * bool style _enable = 0 = flow control it, = 1 = allow rx |
449 | | */ |
450 | 0 | en = LWS_RXFLOW_REASON_APPLIES | LWS_RXFLOW_REASON_USER_BOOL; |
451 | 0 | if (_enable & 1) |
452 | 0 | en |= LWS_RXFLOW_REASON_APPLIES_ENABLE_BIT; |
453 | 0 | } |
454 | |
|
455 | 0 | lws_pt_lock(pt, __func__); |
456 | | |
457 | | /* any bit set in rxflow_bitmap DISABLEs rxflow control */ |
458 | 0 | if (en & LWS_RXFLOW_REASON_APPLIES_ENABLE_BIT) |
459 | 0 | wsi->rxflow_bitmap = (uint8_t)(wsi->rxflow_bitmap & ~(en & 0xff)); |
460 | 0 | else |
461 | 0 | wsi->rxflow_bitmap = (uint8_t)(wsi->rxflow_bitmap | (en & 0xff)); |
462 | |
|
463 | 0 | if ((LWS_RXFLOW_PENDING_CHANGE | (!wsi->rxflow_bitmap)) == |
464 | 0 | wsi->rxflow_change_to) |
465 | 0 | goto skip; |
466 | | |
467 | 0 | wsi->rxflow_change_to = LWS_RXFLOW_PENDING_CHANGE | |
468 | 0 | (!wsi->rxflow_bitmap); |
469 | |
|
470 | 0 | lwsl_wsi_info(wsi, "bitmap 0x%x: en 0x%x, ch 0x%x", |
471 | 0 | wsi->rxflow_bitmap, en, wsi->rxflow_change_to); |
472 | |
|
473 | 0 | if (_enable & LWS_RXFLOW_REASON_FLAG_PROCESS_NOW || |
474 | 0 | !wsi->rxflow_will_be_applied) { |
475 | 0 | en = __lws_rx_flow_control(wsi); |
476 | 0 | lws_pt_unlock(pt); |
477 | |
|
478 | 0 | return en; |
479 | 0 | } |
480 | | |
481 | 0 | skip: |
482 | 0 | lws_pt_unlock(pt); |
483 | |
|
484 | 0 | return 0; |
485 | 0 | } |
486 | | |
487 | | void |
488 | | lws_rx_flow_allow_all_protocol(const struct lws_context *context, |
489 | | const struct lws_protocols *protocol) |
490 | 0 | { |
491 | 0 | const struct lws_context_per_thread *pt = &context->pt[0]; |
492 | 0 | struct lws *wsi; |
493 | 0 | unsigned int n, m = context->count_threads; |
494 | |
|
495 | 0 | while (m--) { |
496 | 0 | for (n = 0; n < pt->fds_count; n++) { |
497 | 0 | wsi = wsi_from_fd(context, pt->fds[n].fd); |
498 | 0 | if (!wsi || !wsi->a.protocol) |
499 | 0 | continue; |
500 | 0 | if (wsi->a.protocol->callback == protocol->callback && |
501 | 0 | !strcmp(protocol->name, wsi->a.protocol->name)) |
502 | 0 | lws_rx_flow_control(wsi, LWS_RXFLOW_ALLOW); |
503 | 0 | } |
504 | 0 | pt++; |
505 | 0 | } |
506 | 0 | } |
507 | | |
508 | | int user_callback_handle_rxflow(lws_callback_function callback_function, |
509 | | struct lws *wsi, |
510 | | enum lws_callback_reasons reason, void *user, |
511 | | void *in, size_t len) |
512 | 0 | { |
513 | 0 | int n; |
514 | |
|
515 | 0 | wsi->rxflow_will_be_applied = 1; |
516 | 0 | n = callback_function(wsi, reason, user, in, len); |
517 | 0 | wsi->rxflow_will_be_applied = 0; |
518 | 0 | if (!n) |
519 | 0 | n = __lws_rx_flow_control(wsi); |
520 | |
|
521 | 0 | return n; |
522 | 0 | } |
523 | | |
524 | | int |
525 | | __lws_rx_flow_control(struct lws *wsi) |
526 | 0 | { |
527 | 0 | struct lws *wsic = wsi->child_list; |
528 | | |
529 | | // h2 ignores rx flow control atm |
530 | 0 | if (lwsi_role_h2(wsi) || wsi->mux_substream || |
531 | 0 | lwsi_role_h2_ENCAPSULATION(wsi)) |
532 | 0 | return 0; // !!! |
533 | | |
534 | | /* if he has children, do those if they were changed */ |
535 | 0 | while (wsic) { |
536 | 0 | if (wsic->rxflow_change_to & LWS_RXFLOW_PENDING_CHANGE) |
537 | 0 | __lws_rx_flow_control(wsic); |
538 | |
|
539 | 0 | wsic = wsic->sibling_list; |
540 | 0 | } |
541 | | |
542 | | /* there is no pending change */ |
543 | 0 | if (!(wsi->rxflow_change_to & LWS_RXFLOW_PENDING_CHANGE)) |
544 | 0 | return 0; |
545 | | |
546 | | /* stuff is still buffered, not ready to really accept new input */ |
547 | 0 | if (lws_buflist_next_segment_len(&wsi->buflist, NULL)) { |
548 | | /* get ourselves called back to deal with stashed buffer */ |
549 | 0 | lws_callback_on_writable(wsi); |
550 | | // return 0; |
551 | 0 | } |
552 | | |
553 | | /* now the pending is cleared, we can change rxflow state */ |
554 | |
|
555 | 0 | wsi->rxflow_change_to &= (~LWS_RXFLOW_PENDING_CHANGE) & 3; |
556 | |
|
557 | 0 | lwsl_wsi_info(wsi, "rxflow: change_to %d", |
558 | 0 | wsi->rxflow_change_to & LWS_RXFLOW_ALLOW); |
559 | | |
560 | | /* adjust the pollfd for this wsi */ |
561 | |
|
562 | 0 | if (wsi->rxflow_change_to & LWS_RXFLOW_ALLOW) { |
563 | 0 | lwsl_wsi_info(wsi, "reenable POLLIN"); |
564 | | // lws_buflist_describe(&wsi->buflist, NULL, __func__); |
565 | 0 | if (__lws_change_pollfd(wsi, 0, LWS_POLLIN)) { |
566 | 0 | lwsl_wsi_info(wsi, "fail"); |
567 | 0 | return -1; |
568 | 0 | } |
569 | 0 | } else |
570 | 0 | if (__lws_change_pollfd(wsi, LWS_POLLIN, 0)) |
571 | 0 | return -1; |
572 | | |
573 | 0 | return 0; |
574 | 0 | } |
575 | | |
576 | | |
577 | | const struct lws_protocols * |
578 | | lws_get_protocol(struct lws *wsi) |
579 | 0 | { |
580 | 0 | return wsi->a.protocol; |
581 | 0 | } |
582 | | |
583 | | |
584 | | int |
585 | | lws_ensure_user_space(struct lws *wsi) |
586 | 0 | { |
587 | 0 | if (!wsi->a.protocol) |
588 | 0 | return 0; |
589 | | |
590 | | /* allocate the per-connection user memory (if any) */ |
591 | | |
592 | 0 | if (wsi->a.protocol->per_session_data_size && !wsi->user_space) { |
593 | 0 | wsi->user_space = lws_zalloc( |
594 | 0 | wsi->a.protocol->per_session_data_size, "user space"); |
595 | 0 | if (wsi->user_space == NULL) { |
596 | 0 | lwsl_wsi_err(wsi, "OOM"); |
597 | 0 | return 1; |
598 | 0 | } |
599 | 0 | } else |
600 | 0 | lwsl_wsi_debug(wsi, "protocol pss %lu, user_space=%p", |
601 | 0 | (long)wsi->a.protocol->per_session_data_size, |
602 | 0 | wsi->user_space); |
603 | 0 | return 0; |
604 | 0 | } |
605 | | |
606 | | void * |
607 | | lws_adjust_protocol_psds(struct lws *wsi, size_t new_size) |
608 | 0 | { |
609 | 0 | ((struct lws_protocols *)lws_get_protocol(wsi))->per_session_data_size = |
610 | 0 | new_size; |
611 | |
|
612 | 0 | if (lws_ensure_user_space(wsi)) |
613 | 0 | return NULL; |
614 | | |
615 | 0 | return wsi->user_space; |
616 | 0 | } |
617 | | |
618 | | int |
619 | | lws_get_tsi(struct lws *wsi) |
620 | 0 | { |
621 | 0 | return (int)wsi->tsi; |
622 | 0 | } |
623 | | |
624 | | int |
625 | | lws_is_ssl(struct lws *wsi) |
626 | 0 | { |
627 | 0 | #if defined(LWS_WITH_TLS) |
628 | 0 | return wsi->tls.use_ssl & LCCSCF_USE_SSL; |
629 | | #else |
630 | | (void)wsi; |
631 | | return 0; |
632 | | #endif |
633 | 0 | } |
634 | | |
635 | | #if defined(LWS_WITH_TLS) && !defined(LWS_WITH_MBEDTLS) |
636 | | lws_tls_conn* |
637 | | lws_get_ssl(struct lws *wsi) |
638 | 0 | { |
639 | 0 | return wsi->tls.ssl; |
640 | 0 | } |
641 | | #endif |
642 | | |
643 | | int |
644 | | lws_has_buffered_out(struct lws *wsi) |
645 | 0 | { |
646 | 0 | if (wsi->buflist_out) |
647 | 0 | return 1; |
648 | | |
649 | 0 | #if defined(LWS_ROLE_H2) |
650 | 0 | { |
651 | 0 | struct lws *nwsi = lws_get_network_wsi(wsi); |
652 | |
|
653 | 0 | if (nwsi->buflist_out) |
654 | 0 | return 1; |
655 | 0 | } |
656 | 0 | #endif |
657 | | |
658 | 0 | return 0; |
659 | 0 | } |
660 | | |
661 | | int |
662 | | lws_partial_buffered(struct lws *wsi) |
663 | 0 | { |
664 | 0 | return lws_has_buffered_out(wsi); |
665 | 0 | } |
666 | | |
667 | | lws_fileofs_t |
668 | | lws_get_peer_write_allowance(struct lws *wsi) |
669 | 0 | { |
670 | 0 | if (!lws_rops_fidx(wsi->role_ops, LWS_ROPS_tx_credit)) |
671 | 0 | return -1; |
672 | | |
673 | 0 | return lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_tx_credit). |
674 | 0 | tx_credit(wsi, LWSTXCR_US_TO_PEER, 0); |
675 | 0 | } |
676 | | |
677 | | void |
678 | | lws_role_transition(struct lws *wsi, enum lwsi_role role, enum lwsi_state state, |
679 | | const struct lws_role_ops *ops) |
680 | 0 | { |
681 | 0 | #if (_LWS_ENABLED_LOGS & LLL_DEBUG) |
682 | 0 | const char *name = "(unset)"; |
683 | 0 | #endif |
684 | 0 | wsi->wsistate = (unsigned int)role | (unsigned int)state; |
685 | 0 | if (ops) |
686 | 0 | wsi->role_ops = ops; |
687 | 0 | #if (_LWS_ENABLED_LOGS & LLL_DEBUG) |
688 | 0 | if (wsi->role_ops) |
689 | 0 | name = wsi->role_ops->name; |
690 | 0 | lwsl_wsi_debug(wsi, "wsistate 0x%lx, ops %s", |
691 | 0 | (unsigned long)wsi->wsistate, name); |
692 | 0 | #endif |
693 | 0 | } |
694 | | |
695 | | int |
696 | | lws_parse_uri(char *p, const char **prot, const char **ads, int *port, |
697 | | const char **path) |
698 | 0 | { |
699 | 0 | const char *end; |
700 | 0 | char unix_skt = 0; |
701 | | |
702 | | /* cut up the location into address, port and path */ |
703 | 0 | *prot = p; |
704 | 0 | while (*p && (*p != ':' || p[1] != '/' || p[2] != '/')) |
705 | 0 | p++; |
706 | 0 | if (!*p) { |
707 | 0 | end = p; |
708 | 0 | p = (char *)*prot; |
709 | 0 | *prot = end; |
710 | 0 | } else { |
711 | 0 | *p = '\0'; |
712 | 0 | p += 3; |
713 | 0 | } |
714 | 0 | if (*p == '+') /* unix skt */ |
715 | 0 | unix_skt = 1; |
716 | |
|
717 | 0 | *ads = p; |
718 | 0 | if (!strcmp(*prot, "http") || !strcmp(*prot, "ws")) |
719 | 0 | *port = 80; |
720 | 0 | else if (!strcmp(*prot, "https") || !strcmp(*prot, "wss")) |
721 | 0 | *port = 443; |
722 | |
|
723 | 0 | if (*p == '[') { |
724 | 0 | ++(*ads); |
725 | 0 | while (*p && *p != ']') |
726 | 0 | p++; |
727 | 0 | if (*p) |
728 | 0 | *p++ = '\0'; |
729 | 0 | } else |
730 | 0 | while (*p && *p != ':' && (unix_skt || *p != '/')) |
731 | 0 | p++; |
732 | |
|
733 | 0 | if (*p == ':') { |
734 | 0 | *p++ = '\0'; |
735 | 0 | *port = atoi(p); |
736 | 0 | while (*p && *p != '/') |
737 | 0 | p++; |
738 | 0 | } |
739 | 0 | *path = "/"; |
740 | 0 | if (*p) { |
741 | 0 | *p++ = '\0'; |
742 | 0 | if (*p) |
743 | 0 | *path = p; |
744 | 0 | } |
745 | |
|
746 | 0 | return 0; |
747 | 0 | } |
748 | | |
749 | | /* ... */ |
750 | | |
751 | | int |
752 | | lws_get_urlarg_by_name_safe(struct lws *wsi, const char *name, char *buf, int len) |
753 | 0 | { |
754 | 0 | int n = 0, fraglen, sl = (int)strlen(name); |
755 | |
|
756 | 0 | do { |
757 | 0 | fraglen = lws_hdr_copy_fragment(wsi, buf, len, |
758 | 0 | WSI_TOKEN_HTTP_URI_ARGS, n); |
759 | |
|
760 | 0 | if (fraglen == -1) /* no fragment or basic problem */ |
761 | 0 | break; |
762 | | |
763 | 0 | if (fraglen > 0 && /* fragment could fit */ |
764 | 0 | fraglen + 1 < len && |
765 | 0 | fraglen >= sl && |
766 | 0 | !strncmp(buf, name, (size_t)sl)) { |
767 | | /* |
768 | | * If he left off the trailing =, trim it from the |
769 | | * result |
770 | | */ |
771 | |
|
772 | 0 | if (name[sl - 1] != '=' && |
773 | 0 | sl < fraglen && |
774 | 0 | buf[sl] == '=') |
775 | 0 | sl++; |
776 | |
|
777 | 0 | memmove(buf, buf + sl, (size_t)(fraglen - sl)); |
778 | 0 | buf[fraglen - sl] = '\0'; |
779 | |
|
780 | 0 | return fraglen - sl; |
781 | 0 | } |
782 | | |
783 | 0 | n++; |
784 | 0 | } while (1); |
785 | | |
786 | 0 | return -1; |
787 | 0 | } |
788 | | |
789 | | const char * |
790 | | lws_get_urlarg_by_name(struct lws *wsi, const char *name, char *buf, int len) |
791 | 0 | { |
792 | 0 | int n = lws_get_urlarg_by_name_safe(wsi, name, buf, len); |
793 | |
|
794 | 0 | return n < 0 ? NULL : buf; |
795 | 0 | } |
796 | | |
797 | | |
798 | | #if defined(LWS_WITHOUT_EXTENSIONS) |
799 | | |
800 | | /* we need to provide dummy callbacks for internal exts |
801 | | * so user code runs when faced with a lib compiled with |
802 | | * extensions disabled. |
803 | | */ |
804 | | |
805 | | int |
806 | | lws_extension_callback_pm_deflate(struct lws_context *context, |
807 | | const struct lws_extension *ext, |
808 | | struct lws *wsi, |
809 | | enum lws_extension_callback_reasons reason, |
810 | | void *user, void *in, size_t len) |
811 | 0 | { |
812 | 0 | (void)context; |
813 | 0 | (void)ext; |
814 | 0 | (void)wsi; |
815 | 0 | (void)reason; |
816 | 0 | (void)user; |
817 | 0 | (void)in; |
818 | 0 | (void)len; |
819 | |
|
820 | 0 | return 0; |
821 | 0 | } |
822 | | |
823 | | int |
824 | | lws_set_extension_option(struct lws *wsi, const char *ext_name, |
825 | | const char *opt_name, const char *opt_val) |
826 | 0 | { |
827 | 0 | return -1; |
828 | 0 | } |
829 | | #endif |
830 | | |
831 | | int |
832 | 0 | lws_is_cgi(struct lws *wsi) { |
833 | | #ifdef LWS_WITH_CGI |
834 | | return !!wsi->http.cgi; |
835 | | #else |
836 | 0 | return 0; |
837 | 0 | #endif |
838 | 0 | } |
839 | | |
840 | | const struct lws_protocol_vhost_options * |
841 | | lws_pvo_search(const struct lws_protocol_vhost_options *pvo, const char *name) |
842 | 0 | { |
843 | 0 | while (pvo) { |
844 | 0 | if (!strcmp(pvo->name, name)) |
845 | 0 | break; |
846 | | |
847 | 0 | pvo = pvo->next; |
848 | 0 | } |
849 | |
|
850 | 0 | return pvo; |
851 | 0 | } |
852 | | |
853 | | int |
854 | | lws_pvo_get_str(void *in, const char *name, const char **result) |
855 | 0 | { |
856 | 0 | const struct lws_protocol_vhost_options *pv = |
857 | 0 | lws_pvo_search((const struct lws_protocol_vhost_options *)in, |
858 | 0 | name); |
859 | |
|
860 | 0 | if (!pv) |
861 | 0 | return 1; |
862 | | |
863 | 0 | *result = (const char *)pv->value; |
864 | |
|
865 | 0 | return 0; |
866 | 0 | } |
867 | | |
868 | | int |
869 | | lws_broadcast(struct lws_context_per_thread *pt, int reason, void *in, size_t len) |
870 | 0 | { |
871 | 0 | struct lws_vhost *v = pt->context->vhost_list; |
872 | 0 | lws_fakewsi_def_plwsa(pt); |
873 | 0 | int n, ret = 0; |
874 | |
|
875 | 0 | lws_fakewsi_prep_plwsa_ctx(pt->context); |
876 | | #if !defined(LWS_PLAT_FREERTOS) && LWS_MAX_SMP > 1 |
877 | | ((struct lws *)plwsa)->tsi = (char)(int)(pt - &pt->context->pt[0]); |
878 | | #endif |
879 | |
|
880 | 0 | while (v) { |
881 | 0 | const struct lws_protocols *p = v->protocols; |
882 | |
|
883 | 0 | plwsa->vhost = v; /* not a real bound wsi */ |
884 | |
|
885 | 0 | for (n = 0; n < v->count_protocols; n++) { |
886 | 0 | plwsa->protocol = p; |
887 | 0 | if (p->callback && |
888 | 0 | p->callback((struct lws *)plwsa, (enum lws_callback_reasons)reason, NULL, in, len)) |
889 | 0 | ret |= 1; |
890 | 0 | p++; |
891 | 0 | } |
892 | |
|
893 | 0 | v = v->vhost_next; |
894 | 0 | } |
895 | |
|
896 | 0 | return ret; |
897 | 0 | } |
898 | | |
899 | | void * |
900 | | lws_wsi_user(struct lws *wsi) |
901 | 0 | { |
902 | 0 | return wsi->user_space; |
903 | 0 | } |
904 | | |
905 | | int |
906 | | lws_wsi_tsi(struct lws *wsi) |
907 | 0 | { |
908 | 0 | return wsi->tsi; |
909 | 0 | } |
910 | | |
911 | | |
912 | | void |
913 | | lws_set_wsi_user(struct lws *wsi, void *data) |
914 | 0 | { |
915 | 0 | if (!wsi->user_space_externally_allocated && wsi->user_space) |
916 | 0 | lws_free(wsi->user_space); |
917 | |
|
918 | 0 | wsi->user_space_externally_allocated = 1; |
919 | 0 | wsi->user_space = data; |
920 | 0 | } |
921 | | |
922 | | struct lws * |
923 | | lws_get_parent(const struct lws *wsi) |
924 | 0 | { |
925 | 0 | return wsi->parent; |
926 | 0 | } |
927 | | |
928 | | struct lws * |
929 | | lws_get_child(const struct lws *wsi) |
930 | 0 | { |
931 | 0 | return wsi->child_list; |
932 | 0 | } |
933 | | |
934 | | void * |
935 | | lws_get_opaque_parent_data(const struct lws *wsi) |
936 | 0 | { |
937 | 0 | return wsi->opaque_parent_data; |
938 | 0 | } |
939 | | |
940 | | void |
941 | | lws_set_opaque_parent_data(struct lws *wsi, void *data) |
942 | 0 | { |
943 | 0 | wsi->opaque_parent_data = data; |
944 | 0 | } |
945 | | |
946 | | void * |
947 | | lws_get_opaque_user_data(const struct lws *wsi) |
948 | 0 | { |
949 | 0 | return wsi->a.opaque_user_data; |
950 | 0 | } |
951 | | |
952 | | void |
953 | | lws_set_opaque_user_data(struct lws *wsi, void *data) |
954 | 0 | { |
955 | 0 | wsi->a.opaque_user_data = data; |
956 | 0 | } |
957 | | |
958 | | int |
959 | | lws_get_child_pending_on_writable(const struct lws *wsi) |
960 | 0 | { |
961 | 0 | return wsi->parent_pending_cb_on_writable; |
962 | 0 | } |
963 | | |
964 | | void |
965 | | lws_clear_child_pending_on_writable(struct lws *wsi) |
966 | 0 | { |
967 | 0 | wsi->parent_pending_cb_on_writable = 0; |
968 | 0 | } |
969 | | |
970 | | |
971 | | |
972 | | const char * |
973 | | lws_get_vhost_name(struct lws_vhost *vhost) |
974 | 0 | { |
975 | 0 | return vhost->name; |
976 | 0 | } |
977 | | |
978 | | int |
979 | | lws_get_vhost_port(struct lws_vhost *vhost) |
980 | 0 | { |
981 | 0 | return vhost->listen_port; |
982 | 0 | } |
983 | | |
984 | | void * |
985 | | lws_get_vhost_user(struct lws_vhost *vhost) |
986 | 0 | { |
987 | 0 | return vhost->user; |
988 | 0 | } |
989 | | |
990 | | const char * |
991 | | lws_get_vhost_iface(struct lws_vhost *vhost) |
992 | 0 | { |
993 | 0 | return vhost->iface; |
994 | 0 | } |
995 | | |
996 | | lws_sockfd_type |
997 | | lws_get_socket_fd(struct lws *wsi) |
998 | 0 | { |
999 | 0 | if (!wsi) |
1000 | 0 | return -1; |
1001 | 0 | return wsi->desc.sockfd; |
1002 | 0 | } |
1003 | | |
1004 | | |
1005 | | struct lws_vhost * |
1006 | | lws_vhost_get(struct lws *wsi) |
1007 | 0 | { |
1008 | 0 | return wsi->a.vhost; |
1009 | 0 | } |
1010 | | |
1011 | | struct lws_vhost * |
1012 | | lws_get_vhost(struct lws *wsi) |
1013 | 0 | { |
1014 | 0 | return wsi->a.vhost; |
1015 | 0 | } |
1016 | | |
1017 | | const struct lws_protocols * |
1018 | | lws_protocol_get(struct lws *wsi) |
1019 | 0 | { |
1020 | 0 | return wsi->a.protocol; |
1021 | 0 | } |
1022 | | |
1023 | | #if defined(LWS_WITH_UDP) |
1024 | | const struct lws_udp * |
1025 | | lws_get_udp(const struct lws *wsi) |
1026 | 0 | { |
1027 | 0 | return wsi->udp; |
1028 | 0 | } |
1029 | | #endif |
1030 | | |
1031 | | struct lws_context * |
1032 | | lws_get_context(const struct lws *wsi) |
1033 | 0 | { |
1034 | 0 | return wsi->a.context; |
1035 | 0 | } |
1036 | | |
1037 | | struct lws_log_cx * |
1038 | | lwsl_wsi_get_cx(struct lws *wsi) |
1039 | 0 | { |
1040 | 0 | if (!wsi) |
1041 | 0 | return NULL; |
1042 | | |
1043 | 0 | return wsi->lc.log_cx; |
1044 | 0 | } |
1045 | | |
1046 | | #if defined(LWS_WITH_CLIENT) |
1047 | | int |
1048 | | _lws_generic_transaction_completed_active_conn(struct lws **_wsi, char take_vh_lock) |
1049 | 0 | { |
1050 | 0 | struct lws *wnew, *wsi = *_wsi; |
1051 | | |
1052 | | /* |
1053 | | * Are we constitutionally capable of having a queue, ie, we are on |
1054 | | * the "active client connections" list? |
1055 | | * |
1056 | | * If not, that's it for us. |
1057 | | */ |
1058 | |
|
1059 | 0 | if (lws_dll2_is_detached(&wsi->dll_cli_active_conns)) |
1060 | 0 | return 0; /* no new transaction */ |
1061 | | |
1062 | | /* |
1063 | | * With h1 queuing, the original "active client" moves his attributes |
1064 | | * like fd, ssl, queue and active client list entry to the next guy in |
1065 | | * the queue before closing... it's because the user code knows the |
1066 | | * individual wsi and the action must take place in the correct wsi |
1067 | | * context. Note this means we don't truly pipeline headers. |
1068 | | * |
1069 | | * Trying to keep the original "active client" in place to do the work |
1070 | | * of the wsi breaks down when dealing with queued POSTs otherwise; it's |
1071 | | * also competing with the real mux child arrangements and complicating |
1072 | | * the code. |
1073 | | * |
1074 | | * For that reason, see if we have any queued child now... |
1075 | | */ |
1076 | | |
1077 | 0 | if (!wsi->dll2_cli_txn_queue_owner.head) { |
1078 | | /* |
1079 | | * Nothing pipelined... we should hang around a bit |
1080 | | * in case something turns up... otherwise we'll close |
1081 | | */ |
1082 | 0 | lwsl_wsi_info(wsi, "nothing pipelined waiting"); |
1083 | 0 | lwsi_set_state(wsi, LRS_IDLING); |
1084 | |
|
1085 | 0 | lws_set_timeout(wsi, PENDING_TIMEOUT_CLIENT_CONN_IDLE, |
1086 | 0 | wsi->keep_warm_secs); |
1087 | |
|
1088 | 0 | return 0; /* no new transaction right now */ |
1089 | 0 | } |
1090 | | |
1091 | | /* |
1092 | | * We have a queued child wsi we should bequeath our assets to, before |
1093 | | * closing ourself |
1094 | | */ |
1095 | | |
1096 | 0 | if (take_vh_lock) |
1097 | 0 | lws_vhost_lock(wsi->a.vhost); |
1098 | |
|
1099 | 0 | wnew = lws_container_of(wsi->dll2_cli_txn_queue_owner.head, struct lws, |
1100 | 0 | dll2_cli_txn_queue); |
1101 | |
|
1102 | 0 | assert(wsi != wnew); |
1103 | | |
1104 | 0 | lws_dll2_remove(&wnew->dll2_cli_txn_queue); |
1105 | |
|
1106 | 0 | assert(lws_socket_is_valid(wsi->desc.sockfd)); |
1107 | | |
1108 | 0 | __lws_change_pollfd(wsi, LWS_POLLOUT | LWS_POLLIN, 0); |
1109 | | |
1110 | | /* copy the fd */ |
1111 | 0 | wnew->desc = wsi->desc; |
1112 | |
|
1113 | 0 | assert(lws_socket_is_valid(wnew->desc.sockfd)); |
1114 | | |
1115 | | /* disconnect the fd from association with old wsi */ |
1116 | | |
1117 | 0 | if (__remove_wsi_socket_from_fds(wsi)) |
1118 | 0 | return -1; |
1119 | | |
1120 | 0 | sanity_assert_no_wsi_traces(wsi->a.context, wsi); |
1121 | 0 | sanity_assert_no_sockfd_traces(wsi->a.context, wsi->desc.sockfd); |
1122 | 0 | wsi->desc.sockfd = LWS_SOCK_INVALID; |
1123 | |
|
1124 | 0 | __lws_wsi_remove_from_sul(wsi); |
1125 | | |
1126 | | /* |
1127 | | * ... we're doing some magic here in terms of handing off the socket |
1128 | | * that has been active to a wsi that has not yet itself been active... |
1129 | | * depending on the event lib we may need to give a magic spark to the |
1130 | | * new guy and snuff out the old guy's magic spark at that level as well |
1131 | | */ |
1132 | |
|
1133 | | #if defined(LWS_WITH_EVENT_LIBS) |
1134 | | if (wsi->a.context->event_loop_ops->destroy_wsi) |
1135 | | wsi->a.context->event_loop_ops->destroy_wsi(wsi); |
1136 | | if (wsi->a.context->event_loop_ops->sock_accept) |
1137 | | wsi->a.context->event_loop_ops->sock_accept(wnew); |
1138 | | #endif |
1139 | | |
1140 | | /* point the fd table entry to new guy */ |
1141 | |
|
1142 | 0 | assert(lws_socket_is_valid(wnew->desc.sockfd)); |
1143 | | |
1144 | 0 | if (__insert_wsi_socket_into_fds(wsi->a.context, wnew)) |
1145 | 0 | return -1; |
1146 | | |
1147 | 0 | #if defined(LWS_WITH_TLS) |
1148 | | /* pass on the tls */ |
1149 | | |
1150 | 0 | wnew->tls = wsi->tls; |
1151 | 0 | wsi->tls.client_bio = NULL; |
1152 | 0 | wsi->tls.ssl = NULL; |
1153 | 0 | wsi->tls.use_ssl = 0; |
1154 | 0 | #endif |
1155 | | |
1156 | | /* take over his copy of his endpoint as an active connection */ |
1157 | |
|
1158 | 0 | if (!wnew->cli_hostname_copy && wsi->cli_hostname_copy) { |
1159 | 0 | wnew->cli_hostname_copy = wsi->cli_hostname_copy; |
1160 | 0 | wsi->cli_hostname_copy = NULL; |
1161 | 0 | } |
1162 | 0 | wnew->keep_warm_secs = wsi->keep_warm_secs; |
1163 | | |
1164 | | /* |
1165 | | * selected queued guy now replaces the original leader on the |
1166 | | * active client conn list |
1167 | | */ |
1168 | |
|
1169 | 0 | lws_dll2_remove(&wsi->dll_cli_active_conns); |
1170 | 0 | lws_dll2_add_tail(&wnew->dll_cli_active_conns, |
1171 | 0 | &wsi->a.vhost->dll_cli_active_conns_owner); |
1172 | | |
1173 | | /* move any queued guys to queue on new active conn */ |
1174 | |
|
1175 | 0 | lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1, |
1176 | 0 | wsi->dll2_cli_txn_queue_owner.head) { |
1177 | 0 | struct lws *ww = lws_container_of(d, struct lws, |
1178 | 0 | dll2_cli_txn_queue); |
1179 | |
|
1180 | 0 | lws_dll2_remove(&ww->dll2_cli_txn_queue); |
1181 | 0 | lws_dll2_add_tail(&ww->dll2_cli_txn_queue, |
1182 | 0 | &wnew->dll2_cli_txn_queue_owner); |
1183 | |
|
1184 | 0 | } lws_end_foreach_dll_safe(d, d1); |
1185 | |
|
1186 | 0 | if (take_vh_lock) |
1187 | 0 | lws_vhost_unlock(wsi->a.vhost); |
1188 | | |
1189 | | /* |
1190 | | * The original leader who passed on all his powers already can die... |
1191 | | * in the call stack above us there are guys who still want to touch |
1192 | | * him, so have him die next time around the event loop, not now. |
1193 | | */ |
1194 | |
|
1195 | 0 | wsi->already_did_cce = 1; /* so the close doesn't trigger a CCE */ |
1196 | 0 | lws_set_timeout(wsi, 1, LWS_TO_KILL_ASYNC); |
1197 | | |
1198 | | /* after the first one, they can only be coming from the queue */ |
1199 | 0 | wnew->transaction_from_pipeline_queue = 1; |
1200 | |
|
1201 | 0 | lwsl_wsi_info(wsi, " pipeline queue passed -> %s", lws_wsi_tag(wnew)); |
1202 | |
|
1203 | 0 | *_wsi = wnew; /* inform caller we swapped */ |
1204 | |
|
1205 | 0 | return 1; /* new transaction */ |
1206 | 0 | } |
1207 | | #endif |
1208 | | |
1209 | | int LWS_WARN_UNUSED_RESULT |
1210 | | lws_raw_transaction_completed(struct lws *wsi) |
1211 | 0 | { |
1212 | 0 | if (lws_has_buffered_out(wsi)) { |
1213 | | /* |
1214 | | * ...so he tried to send something large, but it went out |
1215 | | * as a partial, but he immediately called us to say he wants |
1216 | | * to close the connection. |
1217 | | * |
1218 | | * Defer the close until the last part of the partial is sent. |
1219 | | * |
1220 | | */ |
1221 | |
|
1222 | 0 | lwsl_wsi_debug(wsi, "deferring due to partial"); |
1223 | 0 | wsi->close_when_buffered_out_drained = 1; |
1224 | 0 | lws_callback_on_writable(wsi); |
1225 | |
|
1226 | 0 | return 0; |
1227 | 0 | } |
1228 | | |
1229 | 0 | return -1; |
1230 | 0 | } |
1231 | | |
1232 | | int |
1233 | | lws_bind_protocol(struct lws *wsi, const struct lws_protocols *p, |
1234 | | const char *reason) |
1235 | 0 | { |
1236 | | // if (wsi->a.protocol == p) |
1237 | | // return 0; |
1238 | 0 | const struct lws_protocols *vp = wsi->a.vhost->protocols, *vpo; |
1239 | |
|
1240 | 0 | if (wsi->a.protocol && wsi->protocol_bind_balance) { |
1241 | 0 | wsi->a.protocol->callback(wsi, |
1242 | 0 | wsi->role_ops->protocol_unbind_cb[!!lwsi_role_server(wsi)], |
1243 | 0 | wsi->user_space, (void *)reason, 0); |
1244 | 0 | wsi->protocol_bind_balance = 0; |
1245 | 0 | } |
1246 | 0 | if (!wsi->user_space_externally_allocated) |
1247 | 0 | lws_free_set_NULL(wsi->user_space); |
1248 | |
|
1249 | 0 | lws_same_vh_protocol_remove(wsi); |
1250 | |
|
1251 | 0 | wsi->a.protocol = p; |
1252 | 0 | if (!p) |
1253 | 0 | return 0; |
1254 | | |
1255 | 0 | if (lws_ensure_user_space(wsi)) |
1256 | 0 | return 1; |
1257 | | |
1258 | 0 | if (p > vp && p < &vp[wsi->a.vhost->count_protocols]) |
1259 | 0 | lws_same_vh_protocol_insert(wsi, (int)(p - vp)); |
1260 | 0 | else { |
1261 | 0 | int n = wsi->a.vhost->count_protocols; |
1262 | 0 | int hit = 0; |
1263 | |
|
1264 | 0 | vpo = vp; |
1265 | |
|
1266 | 0 | while (n--) { |
1267 | 0 | if (p->name && vp->name && !strcmp(p->name, vp->name)) { |
1268 | 0 | hit = 1; |
1269 | 0 | lws_same_vh_protocol_insert(wsi, (int)(vp - vpo)); |
1270 | 0 | break; |
1271 | 0 | } |
1272 | 0 | vp++; |
1273 | 0 | } |
1274 | 0 | if (!hit) |
1275 | 0 | lwsl_err("%s: %p is not in vhost '%s' protocols list\n", |
1276 | 0 | __func__, p, wsi->a.vhost->name); |
1277 | 0 | } |
1278 | |
|
1279 | 0 | if (wsi->a.protocol->callback(wsi, wsi->role_ops->protocol_bind_cb[ |
1280 | 0 | !!lwsi_role_server(wsi)], |
1281 | 0 | wsi->user_space, NULL, 0)) |
1282 | 0 | return 1; |
1283 | | |
1284 | 0 | wsi->protocol_bind_balance = 1; |
1285 | |
|
1286 | 0 | return 0; |
1287 | 0 | } |
1288 | | |
1289 | | void |
1290 | | lws_http_close_immortal(struct lws *wsi) |
1291 | 0 | { |
1292 | 0 | struct lws *nwsi; |
1293 | |
|
1294 | 0 | if (!wsi->mux_substream) |
1295 | 0 | return; |
1296 | | |
1297 | 0 | assert(wsi->mux_stream_immortal); |
1298 | 0 | wsi->mux_stream_immortal = 0; |
1299 | |
|
1300 | 0 | nwsi = lws_get_network_wsi(wsi); |
1301 | 0 | lwsl_wsi_debug(wsi, "%s (%d)", lws_wsi_tag(nwsi), |
1302 | 0 | nwsi->immortal_substream_count); |
1303 | 0 | assert(nwsi->immortal_substream_count); |
1304 | 0 | nwsi->immortal_substream_count--; |
1305 | 0 | if (!nwsi->immortal_substream_count) |
1306 | | /* |
1307 | | * since we closed the only immortal stream on this nwsi, we |
1308 | | * need to reapply a normal timeout regime to the nwsi |
1309 | | */ |
1310 | 0 | lws_set_timeout(wsi, PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE, |
1311 | 0 | lws_wsi_keepalive_timeout_eff(wsi)); |
1312 | 0 | } |
1313 | | |
1314 | | void |
1315 | | lws_mux_mark_immortal(struct lws *wsi) |
1316 | 0 | { |
1317 | 0 | struct lws *nwsi; |
1318 | |
|
1319 | 0 | lws_set_timeout(wsi, NO_PENDING_TIMEOUT, 0); |
1320 | |
|
1321 | 0 | if (!wsi->mux_substream |
1322 | 0 | #if defined(LWS_WITH_CLIENT) |
1323 | 0 | && !wsi->client_mux_substream |
1324 | 0 | #endif |
1325 | 0 | ) { |
1326 | | // lwsl_wsi_err(wsi, "not mux substream"); |
1327 | 0 | return; |
1328 | 0 | } |
1329 | | |
1330 | 0 | if (wsi->mux_stream_immortal) |
1331 | | /* only need to handle it once per child wsi */ |
1332 | 0 | return; |
1333 | | |
1334 | 0 | nwsi = lws_get_network_wsi(wsi); |
1335 | 0 | if (!nwsi) |
1336 | 0 | return; |
1337 | | |
1338 | 0 | lwsl_wsi_debug(wsi, "%s (%d)\n", lws_wsi_tag(nwsi), |
1339 | 0 | nwsi->immortal_substream_count); |
1340 | |
|
1341 | 0 | wsi->mux_stream_immortal = 1; |
1342 | 0 | assert(nwsi->immortal_substream_count < 255); /* largest count */ |
1343 | 0 | nwsi->immortal_substream_count++; |
1344 | 0 | if (nwsi->immortal_substream_count == 1) |
1345 | 0 | lws_set_timeout(nwsi, NO_PENDING_TIMEOUT, 0); |
1346 | 0 | } |
1347 | | |
1348 | | int |
1349 | | lws_http_mark_sse(struct lws *wsi) |
1350 | 0 | { |
1351 | 0 | if (!wsi) |
1352 | 0 | return 0; |
1353 | | |
1354 | 0 | lws_http_headers_detach(wsi); |
1355 | 0 | lws_mux_mark_immortal(wsi); |
1356 | |
|
1357 | 0 | if (wsi->mux_substream) |
1358 | 0 | wsi->h2_stream_carries_sse = 1; |
1359 | |
|
1360 | 0 | return 0; |
1361 | 0 | } |
1362 | | |
1363 | | #if defined(LWS_WITH_CLIENT) |
1364 | | |
1365 | | const char * |
1366 | | lws_wsi_client_stash_item(struct lws *wsi, int stash_idx, int hdr_idx) |
1367 | 0 | { |
1368 | | /* try the generic client stash */ |
1369 | 0 | if (wsi->stash) |
1370 | 0 | return wsi->stash->cis[stash_idx]; |
1371 | | |
1372 | 0 | #if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2) |
1373 | | /* if not, use the ah stash if applicable */ |
1374 | 0 | return lws_hdr_simple_ptr(wsi, (enum lws_token_indexes)hdr_idx); |
1375 | | #else |
1376 | | return NULL; |
1377 | | #endif |
1378 | 0 | } |
1379 | | #endif |
1380 | | |
1381 | | int |
1382 | | lws_wsi_keepalive_timeout_eff(struct lws *wsi) |
1383 | 0 | { |
1384 | 0 | int ds = wsi->a.vhost->keepalive_timeout; |
1385 | |
|
1386 | 0 | #if defined(LWS_WITH_SERVER) |
1387 | 0 | if (wsi->http.mount_specific_keepalive_timeout_secs) |
1388 | 0 | ds = (int)wsi->http.mount_specific_keepalive_timeout_secs; |
1389 | |
|
1390 | 0 | if (wsi->parent && (int)wsi->parent->http.mount_specific_keepalive_timeout_secs > ds) |
1391 | 0 | ds = (int)wsi->parent->http.mount_specific_keepalive_timeout_secs; |
1392 | 0 | #endif |
1393 | |
|
1394 | 0 | if (!ds) |
1395 | 0 | ds = 31; |
1396 | | |
1397 | | // lwsl_wsi_notice(wsi, "Eff keepalive_timeout %ds ===================\n", ds); |
1398 | |
|
1399 | 0 | return ds; |
1400 | 0 | } |
1401 | | |
1402 | | #if defined(LWS_ROLE_H2) || defined(LWS_ROLE_MQTT) |
1403 | | |
1404 | | void |
1405 | | lws_wsi_mux_insert(struct lws *wsi, struct lws *parent_wsi, unsigned int sid) |
1406 | 0 | { |
1407 | 0 | lwsl_wsi_info(wsi, "par %s: assign sid %d (curr %d)", |
1408 | 0 | lws_wsi_tag(parent_wsi), sid, wsi->mux.my_sid); |
1409 | |
|
1410 | 0 | if (wsi->mux.my_sid && wsi->mux.my_sid != (unsigned int)sid) |
1411 | 0 | assert(0); |
1412 | | |
1413 | 0 | wsi->mux.my_sid = sid; |
1414 | 0 | wsi->mux.parent_wsi = parent_wsi; |
1415 | 0 | wsi->role_ops = parent_wsi->role_ops; |
1416 | | |
1417 | | /* new guy's sibling is whoever was the first child before */ |
1418 | 0 | wsi->mux.sibling_list = parent_wsi->mux.child_list; |
1419 | | |
1420 | | /* first child is now the new guy */ |
1421 | 0 | parent_wsi->mux.child_list = wsi; |
1422 | |
|
1423 | 0 | parent_wsi->mux.child_count++; |
1424 | 0 | } |
1425 | | |
1426 | | struct lws * |
1427 | | lws_wsi_mux_from_id(struct lws *parent_wsi, unsigned int sid) |
1428 | 0 | { |
1429 | 0 | lws_start_foreach_ll(struct lws *, wsi, parent_wsi->mux.child_list) { |
1430 | 0 | if (wsi->mux.my_sid == sid) |
1431 | 0 | return wsi; |
1432 | 0 | } lws_end_foreach_ll(wsi, mux.sibling_list); |
1433 | |
|
1434 | 0 | return NULL; |
1435 | 0 | } |
1436 | | |
1437 | | void |
1438 | | lws_wsi_mux_dump_children(struct lws *wsi) |
1439 | 0 | { |
1440 | 0 | #if defined(_DEBUG) |
1441 | 0 | if (!wsi->mux.parent_wsi || !lwsl_visible(LLL_INFO)) |
1442 | 0 | return; |
1443 | | |
1444 | 0 | lws_start_foreach_llp(struct lws **, w, |
1445 | 0 | wsi->mux.parent_wsi->mux.child_list) { |
1446 | 0 | lwsl_wsi_info(wsi, " \\---- child %s %s\n", |
1447 | 0 | (*w)->role_ops ? (*w)->role_ops->name : "?", |
1448 | 0 | lws_wsi_tag(*w)); |
1449 | 0 | assert(*w != (*w)->mux.sibling_list); |
1450 | 0 | } lws_end_foreach_llp(w, mux.sibling_list); |
1451 | 0 | #endif |
1452 | 0 | } |
1453 | | |
1454 | | void |
1455 | | lws_wsi_mux_close_children(struct lws *wsi, int reason) |
1456 | 0 | { |
1457 | 0 | struct lws *wsi2; |
1458 | 0 | struct lws **w; |
1459 | |
|
1460 | 0 | if (!wsi->mux.child_list) |
1461 | 0 | return; |
1462 | | |
1463 | 0 | w = &wsi->mux.child_list; |
1464 | 0 | while (*w) { |
1465 | 0 | lwsl_wsi_info((*w), " closing child"); |
1466 | | /* disconnect from siblings */ |
1467 | 0 | wsi2 = (*w)->mux.sibling_list; |
1468 | 0 | assert (wsi2 != *w); |
1469 | 0 | (*w)->mux.sibling_list = NULL; |
1470 | 0 | (*w)->socket_is_permanently_unusable = 1; |
1471 | 0 | __lws_close_free_wsi(*w, (enum lws_close_status)reason, "mux child recurse"); |
1472 | 0 | *w = wsi2; |
1473 | 0 | } |
1474 | 0 | } |
1475 | | |
1476 | | |
1477 | | void |
1478 | | lws_wsi_mux_sibling_disconnect(struct lws *wsi) |
1479 | 0 | { |
1480 | 0 | struct lws *wsi2; |
1481 | |
|
1482 | 0 | lws_start_foreach_llp(struct lws **, w, |
1483 | 0 | wsi->mux.parent_wsi->mux.child_list) { |
1484 | | |
1485 | | /* disconnect from siblings */ |
1486 | 0 | if (*w == wsi) { |
1487 | 0 | wsi2 = (*w)->mux.sibling_list; |
1488 | 0 | (*w)->mux.sibling_list = NULL; |
1489 | 0 | *w = wsi2; |
1490 | 0 | lwsl_wsi_debug(wsi, " disentangled from sibling %s", |
1491 | 0 | lws_wsi_tag(wsi2)); |
1492 | 0 | break; |
1493 | 0 | } |
1494 | 0 | } lws_end_foreach_llp(w, mux.sibling_list); |
1495 | 0 | wsi->mux.parent_wsi->mux.child_count--; |
1496 | |
|
1497 | 0 | wsi->mux.parent_wsi = NULL; |
1498 | 0 | } |
1499 | | |
1500 | | void |
1501 | | lws_wsi_mux_dump_waiting_children(struct lws *wsi) |
1502 | 0 | { |
1503 | 0 | #if defined(_DEBUG) |
1504 | 0 | lwsl_info("%s: %s: children waiting for POLLOUT service:\n", |
1505 | 0 | __func__, lws_wsi_tag(wsi)); |
1506 | |
|
1507 | 0 | wsi = wsi->mux.child_list; |
1508 | 0 | while (wsi) { |
1509 | 0 | lwsl_wsi_info(wsi, " %c sid %u: 0x%x %s %s", |
1510 | 0 | wsi->mux.requested_POLLOUT ? '*' : ' ', |
1511 | 0 | wsi->mux.my_sid, lwsi_state(wsi), |
1512 | 0 | wsi->role_ops->name, |
1513 | 0 | wsi->a.protocol ? wsi->a.protocol->name : "noprotocol"); |
1514 | |
|
1515 | 0 | wsi = wsi->mux.sibling_list; |
1516 | 0 | } |
1517 | 0 | #endif |
1518 | 0 | } |
1519 | | |
1520 | | int |
1521 | | lws_wsi_mux_mark_parents_needing_writeable(struct lws *wsi) |
1522 | 0 | { |
1523 | 0 | struct lws /* *network_wsi = lws_get_network_wsi(wsi), */ *wsi2; |
1524 | | //int already = network_wsi->mux.requested_POLLOUT; |
1525 | | |
1526 | | /* mark everybody above him as requesting pollout */ |
1527 | |
|
1528 | 0 | wsi2 = wsi; |
1529 | 0 | while (wsi2) { |
1530 | 0 | wsi2->mux.requested_POLLOUT = 1; |
1531 | 0 | lwsl_wsi_info(wsi2, "sid %u, pending writable", |
1532 | 0 | wsi2->mux.my_sid); |
1533 | 0 | wsi2 = wsi2->mux.parent_wsi; |
1534 | 0 | } |
1535 | |
|
1536 | 0 | return 0; // already; |
1537 | 0 | } |
1538 | | |
1539 | | struct lws * |
1540 | | lws_wsi_mux_move_child_to_tail(struct lws **wsi2) |
1541 | 0 | { |
1542 | 0 | struct lws *w = *wsi2; |
1543 | |
|
1544 | 0 | while (w) { |
1545 | 0 | if (!w->mux.sibling_list) { /* w is the current last */ |
1546 | 0 | lwsl_wsi_debug(w, "*wsi2 = %s\n", lws_wsi_tag(*wsi2)); |
1547 | |
|
1548 | 0 | if (w == *wsi2) /* we are already last */ |
1549 | 0 | break; |
1550 | | |
1551 | | /* last points to us as new last */ |
1552 | 0 | w->mux.sibling_list = *wsi2; |
1553 | | |
1554 | | /* guy pointing to us until now points to |
1555 | | * our old next */ |
1556 | 0 | *wsi2 = (*wsi2)->mux.sibling_list; |
1557 | | |
1558 | | /* we point to nothing because we are last */ |
1559 | 0 | w->mux.sibling_list->mux.sibling_list = NULL; |
1560 | | |
1561 | | /* w becomes us */ |
1562 | 0 | w = w->mux.sibling_list; |
1563 | 0 | break; |
1564 | 0 | } |
1565 | 0 | w = w->mux.sibling_list; |
1566 | 0 | } |
1567 | | |
1568 | | /* clear the waiting for POLLOUT on the guy that was chosen */ |
1569 | |
|
1570 | 0 | if (w) |
1571 | 0 | w->mux.requested_POLLOUT = 0; |
1572 | |
|
1573 | 0 | return w; |
1574 | 0 | } |
1575 | | |
1576 | | int |
1577 | | lws_wsi_mux_action_pending_writeable_reqs(struct lws *wsi) |
1578 | 0 | { |
1579 | 0 | struct lws *w = wsi->mux.child_list; |
1580 | |
|
1581 | 0 | while (w) { |
1582 | 0 | if (w->mux.requested_POLLOUT) { |
1583 | 0 | if (lws_change_pollfd(wsi, 0, LWS_POLLOUT)) |
1584 | 0 | return -1; |
1585 | 0 | return 0; |
1586 | 0 | } |
1587 | 0 | w = w->mux.sibling_list; |
1588 | 0 | } |
1589 | | |
1590 | 0 | if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) |
1591 | 0 | return -1; |
1592 | | |
1593 | 0 | return 0; |
1594 | 0 | } |
1595 | | |
1596 | | int |
1597 | | lws_wsi_txc_check_skint(struct lws_tx_credit *txc, int32_t tx_cr) |
1598 | 0 | { |
1599 | 0 | if (txc->tx_cr <= 0) { |
1600 | | /* |
1601 | | * If other side is not able to cope with us sending any DATA |
1602 | | * so no matter if we have POLLOUT on our side if it's DATA we |
1603 | | * want to send. |
1604 | | */ |
1605 | |
|
1606 | 0 | if (!txc->skint) |
1607 | 0 | lwsl_info("%s: %p: skint (%d)\n", __func__, txc, |
1608 | 0 | (int)txc->tx_cr); |
1609 | |
|
1610 | 0 | txc->skint = 1; |
1611 | |
|
1612 | 0 | return 1; |
1613 | 0 | } |
1614 | | |
1615 | 0 | if (txc->skint) |
1616 | 0 | lwsl_info("%s: %p: unskint (%d)\n", __func__, txc, |
1617 | 0 | (int)txc->tx_cr); |
1618 | |
|
1619 | 0 | txc->skint = 0; |
1620 | |
|
1621 | 0 | return 0; |
1622 | 0 | } |
1623 | | |
1624 | | #if defined(_DEBUG) |
1625 | | void |
1626 | | lws_wsi_txc_describe(struct lws_tx_credit *txc, const char *at, uint32_t sid) |
1627 | 0 | { |
1628 | 0 | lwsl_info("%s: %p: %s: sid %d: %speer-to-us: %d, us-to-peer: %d\n", |
1629 | 0 | __func__, txc, at, (int)sid, txc->skint ? "SKINT, " : "", |
1630 | 0 | (int)txc->peer_tx_cr_est, (int)txc->tx_cr); |
1631 | 0 | } |
1632 | | #endif |
1633 | | |
1634 | | int |
1635 | | lws_wsi_tx_credit(struct lws *wsi, char peer_to_us, int add) |
1636 | 0 | { |
1637 | 0 | if (wsi->role_ops && lws_rops_fidx(wsi->role_ops, LWS_ROPS_tx_credit)) |
1638 | 0 | return lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_tx_credit). |
1639 | 0 | tx_credit(wsi, peer_to_us, add); |
1640 | | |
1641 | 0 | return 0; |
1642 | 0 | } |
1643 | | |
1644 | | /* |
1645 | | * Let the protocol know about incoming tx credit window updates if it's |
1646 | | * managing the flow control manually (it may want to proxy this information) |
1647 | | */ |
1648 | | |
1649 | | int |
1650 | | lws_wsi_txc_report_manual_txcr_in(struct lws *wsi, int32_t bump) |
1651 | 0 | { |
1652 | 0 | if (!wsi->txc.manual) |
1653 | | /* |
1654 | | * If we don't care about managing it manually, no need to |
1655 | | * report it |
1656 | | */ |
1657 | 0 | return 0; |
1658 | | |
1659 | 0 | return user_callback_handle_rxflow(wsi->a.protocol->callback, |
1660 | 0 | wsi, LWS_CALLBACK_WSI_TX_CREDIT_GET, |
1661 | 0 | wsi->user_space, NULL, (size_t)bump); |
1662 | 0 | } |
1663 | | |
1664 | | #if defined(LWS_WITH_CLIENT) |
1665 | | |
1666 | | int |
1667 | | lws_wsi_mux_apply_queue(struct lws *wsi) |
1668 | 0 | { |
1669 | | /* we have a transaction queue that wants to pipeline */ |
1670 | |
|
1671 | 0 | lws_context_lock(wsi->a.context, __func__); /* -------------- cx { */ |
1672 | 0 | lws_vhost_lock(wsi->a.vhost); |
1673 | |
|
1674 | 0 | lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1, |
1675 | 0 | wsi->dll2_cli_txn_queue_owner.head) { |
1676 | 0 | struct lws *w = lws_container_of(d, struct lws, |
1677 | 0 | dll2_cli_txn_queue); |
1678 | |
|
1679 | 0 | #if defined(LWS_ROLE_H2) |
1680 | 0 | if (lwsi_role_http(wsi) && |
1681 | 0 | lwsi_state(w) == LRS_H2_WAITING_TO_SEND_HEADERS) { |
1682 | 0 | lwsl_wsi_info(w, "cli pipeq to be h2"); |
1683 | |
|
1684 | 0 | lwsi_set_state(w, LRS_H1C_ISSUE_HANDSHAKE2); |
1685 | | |
1686 | | /* remove ourselves from client queue */ |
1687 | 0 | lws_dll2_remove(&w->dll2_cli_txn_queue); |
1688 | | |
1689 | | /* attach ourselves as an h2 stream */ |
1690 | 0 | lws_wsi_h2_adopt(wsi, w); |
1691 | 0 | } |
1692 | 0 | #endif |
1693 | |
|
1694 | | #if defined(LWS_ROLE_MQTT) |
1695 | | if (lwsi_role_mqtt(wsi) && |
1696 | | lwsi_state(wsi) == LRS_ESTABLISHED) { |
1697 | | lwsl_wsi_info(w, "cli pipeq to be mqtt\n"); |
1698 | | |
1699 | | /* remove ourselves from client queue */ |
1700 | | lws_dll2_remove(&w->dll2_cli_txn_queue); |
1701 | | |
1702 | | /* attach ourselves as an h2 stream */ |
1703 | | lws_wsi_mqtt_adopt(wsi, w); |
1704 | | } |
1705 | | #endif |
1706 | |
|
1707 | 0 | } lws_end_foreach_dll_safe(d, d1); |
1708 | |
|
1709 | 0 | lws_vhost_unlock(wsi->a.vhost); |
1710 | 0 | lws_context_unlock(wsi->a.context); /* } cx -------------- */ |
1711 | |
|
1712 | 0 | return 0; |
1713 | 0 | } |
1714 | | |
1715 | | #endif |
1716 | | |
1717 | | #endif |