/src/libwebsockets/lib/core-net/close.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * libwebsockets - small server side websockets and web server implementation |
3 | | * |
4 | | * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com> |
5 | | * |
6 | | * Permission is hereby granted, free of charge, to any person obtaining a copy |
7 | | * of this software and associated documentation files (the "Software"), to |
8 | | * deal in the Software without restriction, including without limitation the |
9 | | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
10 | | * sell copies of the Software, and to permit persons to whom the Software is |
11 | | * furnished to do so, subject to the following conditions: |
12 | | * |
13 | | * The above copyright notice and this permission notice shall be included in |
14 | | * all copies or substantial portions of the Software. |
15 | | * |
16 | | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
19 | | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
20 | | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
21 | | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
22 | | * IN THE SOFTWARE. |
23 | | */ |
24 | | |
25 | | #include "private-lib-core.h" |
26 | | #include "private-lib-async-dns.h" |
27 | | |
28 | | #if defined(LWS_WITH_CLIENT) |
29 | | static int |
30 | | lws_close_trans_q_leader(struct lws_dll2 *d, void *user) |
31 | 0 | { |
32 | 0 | struct lws *w = lws_container_of(d, struct lws, dll2_cli_txn_queue); |
33 | |
|
34 | 0 | __lws_close_free_wsi(w, (enum lws_close_status)-1, "trans q leader closing"); |
35 | |
|
36 | 0 | return 0; |
37 | 0 | } |
38 | | #endif |
39 | | |
40 | | void |
41 | | __lws_reset_wsi(struct lws *wsi) |
42 | 0 | { |
43 | 0 | if (!wsi) |
44 | 0 | return; |
45 | | |
46 | 0 | #if defined(LWS_WITH_CLIENT) |
47 | | |
48 | 0 | lws_free_set_NULL(wsi->cli_hostname_copy); |
49 | |
|
50 | 0 | #if defined(LWS_WITH_CONMON) |
51 | |
|
52 | 0 | if (wsi->conmon.dns_results_copy) { |
53 | 0 | lws_conmon_addrinfo_destroy(wsi->conmon.dns_results_copy); |
54 | 0 | wsi->conmon.dns_results_copy = NULL; |
55 | 0 | } |
56 | |
|
57 | 0 | wsi->conmon.ciu_dns = |
58 | 0 | wsi->conmon.ciu_sockconn = |
59 | 0 | wsi->conmon.ciu_tls = |
60 | 0 | wsi->conmon.ciu_txn_resp = 0; |
61 | 0 | #endif |
62 | | |
63 | | /* |
64 | | * if we have wsi in our transaction queue, if we are closing we |
65 | | * must go through and close all those first |
66 | | */ |
67 | 0 | if (wsi->a.vhost) { |
68 | | |
69 | | /* we are no longer an active client connection that can piggyback */ |
70 | 0 | lws_dll2_remove(&wsi->dll_cli_active_conns); |
71 | |
|
72 | 0 | lws_dll2_foreach_safe(&wsi->dll2_cli_txn_queue_owner, NULL, |
73 | 0 | lws_close_trans_q_leader); |
74 | | |
75 | | /* |
76 | | * !!! If we are closing, but we have pending pipelined |
77 | | * transaction results we already sent headers for, that's going |
78 | | * to destroy sync for HTTP/1 and leave H2 stream with no live |
79 | | * swsi.` |
80 | | * |
81 | | * However this is normal if we are being closed because the |
82 | | * transaction queue leader is closing. |
83 | | */ |
84 | 0 | lws_dll2_remove(&wsi->dll2_cli_txn_queue); |
85 | 0 | } |
86 | 0 | #endif |
87 | |
|
88 | 0 | if (wsi->a.vhost) { |
89 | 0 | lws_vhost_lock(wsi->a.vhost); |
90 | 0 | lws_dll2_remove(&wsi->vh_awaiting_socket); |
91 | 0 | lws_vhost_unlock(wsi->a.vhost); |
92 | 0 | } |
93 | | |
94 | | /* |
95 | | * Protocol user data may be allocated either internally by lws |
96 | | * or by specified the user. We should only free what we allocated. |
97 | | */ |
98 | 0 | if (wsi->a.protocol && wsi->a.protocol->per_session_data_size && |
99 | 0 | wsi->user_space && !wsi->user_space_externally_allocated) { |
100 | | /* confirm no sul left scheduled in user data itself */ |
101 | 0 | lws_sul_debug_zombies(wsi->a.context, wsi->user_space, |
102 | 0 | wsi->a.protocol->per_session_data_size, __func__); |
103 | 0 | lws_free_set_NULL(wsi->user_space); |
104 | 0 | } |
105 | | |
106 | | /* |
107 | | * Don't let buflist content or state from the wsi's previous life |
108 | | * carry over to the new life |
109 | | */ |
110 | |
|
111 | 0 | lws_buflist_destroy_all_segments(&wsi->buflist); |
112 | 0 | lws_dll2_remove(&wsi->dll_buflist); |
113 | 0 | lws_buflist_destroy_all_segments(&wsi->buflist_out); |
114 | 0 | #if defined(LWS_WITH_UDP) |
115 | 0 | if (wsi->udp) { |
116 | | /* confirm no sul left scheduled in wsi->udp itself */ |
117 | 0 | lws_sul_debug_zombies(wsi->a.context, wsi->udp, |
118 | 0 | sizeof(*wsi->udp), "close udp wsi"); |
119 | 0 | lws_free_set_NULL(wsi->udp); |
120 | 0 | } |
121 | 0 | #endif |
122 | 0 | wsi->retry = 0; |
123 | 0 | wsi->mount_hit = 0; |
124 | |
|
125 | 0 | #if defined(LWS_WITH_CLIENT) |
126 | 0 | lws_dll2_remove(&wsi->dll2_cli_txn_queue); |
127 | 0 | lws_dll2_remove(&wsi->dll_cli_active_conns); |
128 | 0 | if (wsi->cli_hostname_copy) |
129 | 0 | lws_free_set_NULL(wsi->cli_hostname_copy); |
130 | 0 | #endif |
131 | |
|
132 | | #if defined(LWS_WITH_SYS_ASYNC_DNS) |
133 | | lws_async_dns_cancel(wsi); |
134 | | #endif |
135 | |
|
136 | | #if defined(LWS_WITH_HTTP_PROXY) |
137 | | if (wsi->http.buflist_post_body) |
138 | | lws_buflist_destroy_all_segments(&wsi->http.buflist_post_body); |
139 | | #endif |
140 | |
|
141 | 0 | #if defined(LWS_WITH_HTTP_DIGEST_AUTH) |
142 | 0 | if (wsi->http.digest_auth_hdr) { |
143 | 0 | lws_free(wsi->http.digest_auth_hdr); |
144 | 0 | wsi->http.digest_auth_hdr = NULL; |
145 | 0 | } |
146 | 0 | #endif |
147 | |
|
148 | 0 | #if defined(LWS_WITH_SERVER) |
149 | 0 | lws_dll2_remove(&wsi->listen_list); |
150 | 0 | #endif |
151 | |
|
152 | 0 | #if defined(LWS_WITH_CLIENT) |
153 | 0 | if (wsi->a.vhost) |
154 | 0 | lws_dll2_remove(&wsi->dll_cli_active_conns); |
155 | 0 | #endif |
156 | |
|
157 | 0 | __lws_same_vh_protocol_remove(wsi); |
158 | 0 | #if defined(LWS_WITH_CLIENT) |
159 | | //lws_free_set_NULL(wsi->stash); |
160 | 0 | lws_free_set_NULL(wsi->cli_hostname_copy); |
161 | 0 | #endif |
162 | |
|
163 | | #if defined(LWS_WITH_PEER_LIMITS) |
164 | | lws_peer_track_wsi_close(wsi->a.context, wsi->peer); |
165 | | wsi->peer = NULL; |
166 | | #endif |
167 | | |
168 | | /* since we will destroy the wsi, make absolutely sure now */ |
169 | |
|
170 | | #if defined(LWS_WITH_OPENSSL) |
171 | | __lws_ssl_remove_wsi_from_buffered_list(wsi); |
172 | | #endif |
173 | 0 | __lws_wsi_remove_from_sul(wsi); |
174 | |
|
175 | 0 | if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_destroy_role)) |
176 | 0 | lws_rops_func_fidx(wsi->role_ops, |
177 | 0 | LWS_ROPS_destroy_role).destroy_role(wsi); |
178 | |
|
179 | 0 | #if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2) |
180 | 0 | __lws_header_table_detach(wsi, 0); |
181 | 0 | #endif |
182 | |
|
183 | 0 | #if defined(LWS_ROLE_H2) |
184 | | /* |
185 | | * Let's try to clean out the h2-ness of the wsi |
186 | | */ |
187 | |
|
188 | 0 | memset(&wsi->h2, 0, sizeof(wsi->h2)); |
189 | |
|
190 | 0 | wsi->hdr_parsing_completed = wsi->mux_substream = |
191 | 0 | wsi->upgraded_to_http2 = wsi->mux_stream_immortal = |
192 | 0 | wsi->h2_acked_settings = wsi->seen_nonpseudoheader = |
193 | 0 | wsi->socket_is_permanently_unusable = wsi->favoured_pollin = |
194 | 0 | wsi->already_did_cce = wsi->told_user_closed = |
195 | 0 | wsi->waiting_to_send_close_frame = wsi->close_needs_ack = |
196 | 0 | wsi->parent_pending_cb_on_writable = wsi->seen_zero_length_recv = |
197 | 0 | wsi->close_when_buffered_out_drained = wsi->could_have_pending = 0; |
198 | 0 | #endif |
199 | |
|
200 | 0 | #if defined(LWS_WITH_CLIENT) |
201 | 0 | wsi->do_ws = wsi->chunked = wsi->client_rx_avail = |
202 | 0 | wsi->client_http_body_pending = wsi->transaction_from_pipeline_queue = |
203 | 0 | wsi->keepalive_active = wsi->keepalive_rejected = |
204 | 0 | wsi->redirected_to_get = wsi->client_pipeline = wsi->client_h2_alpn = |
205 | 0 | wsi->client_mux_substream = wsi->client_mux_migrated = |
206 | 0 | wsi->tls_session_reused = wsi->perf_done = 0; |
207 | |
|
208 | 0 | wsi->immortal_substream_count = 0; |
209 | 0 | #endif |
210 | 0 | } |
211 | | |
212 | | /* req cx lock */ |
213 | | |
214 | | void |
215 | | __lws_free_wsi(struct lws *wsi) |
216 | 0 | { |
217 | 0 | struct lws_vhost *vh; |
218 | |
|
219 | 0 | if (!wsi) |
220 | 0 | return; |
221 | | |
222 | 0 | lws_context_assert_lock_held(wsi->a.context); |
223 | | |
224 | | /* just in case */ |
225 | 0 | lws_dll2_remove(&wsi->pre_natal); |
226 | |
|
227 | 0 | #if defined(LWS_WITH_SECURE_STREAMS) |
228 | 0 | if (wsi->for_ss) { |
229 | |
|
230 | | #if defined(LWS_WITH_SECURE_STREAMS_PROXY_API) |
231 | | if (wsi->client_bound_sspc) { |
232 | | lws_sspc_handle_t *h = (lws_sspc_handle_t *) |
233 | | wsi->a.opaque_user_data; |
234 | | if (h) { |
235 | | h->txp_path.priv_onw = NULL; |
236 | | wsi->a.opaque_user_data = NULL; |
237 | | } |
238 | | } else |
239 | | #endif |
240 | 0 | { |
241 | | /* |
242 | | * Make certain it is disconnected from the ss by now |
243 | | */ |
244 | 0 | lws_ss_handle_t *h = (lws_ss_handle_t *) |
245 | 0 | wsi->a.opaque_user_data; |
246 | |
|
247 | 0 | if (h) { |
248 | 0 | h->wsi = NULL; |
249 | 0 | wsi->a.opaque_user_data = NULL; |
250 | 0 | } |
251 | 0 | } |
252 | 0 | } |
253 | 0 | #endif |
254 | |
|
255 | 0 | vh = wsi->a.vhost; |
256 | |
|
257 | 0 | __lws_reset_wsi(wsi); |
258 | 0 | __lws_wsi_remove_from_sul(wsi); |
259 | |
|
260 | 0 | if (vh) |
261 | | /* this may destroy vh */ |
262 | 0 | __lws_vhost_unbind_wsi(wsi); /* req cx + vh lock */ |
263 | |
|
264 | 0 | #if defined(LWS_WITH_CLIENT) |
265 | 0 | if (wsi->stash) |
266 | 0 | lws_free_set_NULL(wsi->stash); |
267 | 0 | #endif |
268 | |
|
269 | 0 | if (wsi->a.context->event_loop_ops->destroy_wsi) |
270 | 0 | wsi->a.context->event_loop_ops->destroy_wsi(wsi); |
271 | |
|
272 | 0 | lwsl_wsi_debug(wsi, "tsi fds count %d\n", |
273 | 0 | wsi->a.context->pt[(int)wsi->tsi].fds_count); |
274 | | |
275 | | /* confirm no sul left scheduled in wsi itself */ |
276 | 0 | lws_sul_debug_zombies(wsi->a.context, wsi, sizeof(*wsi), __func__); |
277 | |
|
278 | 0 | __lws_lc_untag(wsi->a.context, &wsi->lc); |
279 | 0 | lws_free(wsi); |
280 | 0 | } |
281 | | |
282 | | |
283 | | void |
284 | | lws_remove_child_from_any_parent(struct lws *wsi) |
285 | 0 | { |
286 | 0 | struct lws **pwsi; |
287 | 0 | int seen = 0; |
288 | |
|
289 | 0 | if (!wsi->parent) |
290 | 0 | return; |
291 | | |
292 | | /* detach ourselves from parent's child list */ |
293 | 0 | pwsi = &wsi->parent->child_list; |
294 | 0 | while (*pwsi) { |
295 | 0 | if (*pwsi == wsi) { |
296 | 0 | lwsl_wsi_info(wsi, "detach from parent %s", |
297 | 0 | lws_wsi_tag(wsi->parent)); |
298 | |
|
299 | 0 | if (wsi->parent->a.protocol) |
300 | 0 | wsi->parent->a.protocol->callback(wsi, |
301 | 0 | LWS_CALLBACK_CHILD_CLOSING, |
302 | 0 | wsi->parent->user_space, wsi, 0); |
303 | |
|
304 | 0 | *pwsi = wsi->sibling_list; |
305 | 0 | seen = 1; |
306 | 0 | break; |
307 | 0 | } |
308 | 0 | pwsi = &(*pwsi)->sibling_list; |
309 | 0 | } |
310 | 0 | if (!seen) |
311 | 0 | lwsl_wsi_err(wsi, "failed to detach from parent"); |
312 | |
|
313 | 0 | wsi->parent = NULL; |
314 | 0 | } |
315 | | |
316 | | #if defined(LWS_WITH_CLIENT) |
317 | | void |
318 | | lws_inform_client_conn_fail(struct lws *wsi, void *arg, size_t len) |
319 | 0 | { |
320 | 0 | lws_addrinfo_clean(wsi); |
321 | |
|
322 | 0 | if (wsi->already_did_cce) |
323 | 0 | return; |
324 | | |
325 | 0 | wsi->already_did_cce = 1; |
326 | |
|
327 | 0 | if (!wsi->a.protocol) |
328 | 0 | return; |
329 | | |
330 | 0 | if (!wsi->client_suppress_CONNECTION_ERROR) |
331 | 0 | wsi->a.protocol->callback(wsi, |
332 | 0 | LWS_CALLBACK_CLIENT_CONNECTION_ERROR, |
333 | 0 | wsi->user_space, arg, len); |
334 | 0 | } |
335 | | #endif |
336 | | |
337 | | void |
338 | | lws_addrinfo_clean(struct lws *wsi) |
339 | 0 | { |
340 | 0 | #if defined(LWS_WITH_CLIENT) |
341 | 0 | struct lws_dll2 *d = lws_dll2_get_head(&wsi->dns_sorted_list), *d1; |
342 | |
|
343 | 0 | while (d) { |
344 | 0 | lws_dns_sort_t *r = lws_container_of(d, lws_dns_sort_t, list); |
345 | |
|
346 | 0 | d1 = d->next; |
347 | 0 | lws_dll2_remove(d); |
348 | 0 | lws_free(r); |
349 | |
|
350 | 0 | d = d1; |
351 | 0 | } |
352 | 0 | #endif |
353 | 0 | } |
354 | | |
355 | | /* requires cx and pt lock */ |
356 | | |
357 | | void |
358 | | __lws_close_free_wsi(struct lws *wsi, enum lws_close_status reason, |
359 | | const char *caller) |
360 | 0 | { |
361 | 0 | struct lws_context_per_thread *pt; |
362 | 0 | const struct lws_protocols *pro; |
363 | 0 | #if defined(LWS_WITH_SECURE_STREAMS) |
364 | 0 | lws_ss_handle_t *hh = NULL; |
365 | 0 | #endif |
366 | 0 | struct lws_context *context; |
367 | 0 | struct lws *wsi1, *wsi2; |
368 | 0 | int n, ccb; |
369 | |
|
370 | 0 | if (!wsi) |
371 | 0 | return; |
372 | | |
373 | 0 | lwsl_wsi_info(wsi, "caller: %s", caller); |
374 | |
|
375 | 0 | lws_access_log(wsi); |
376 | |
|
377 | 0 | if (!lws_dll2_is_detached(&wsi->dll_buflist)) |
378 | 0 | lwsl_wsi_info(wsi, "going down with stuff in buflist"); |
379 | |
|
380 | 0 | context = wsi->a.context; |
381 | 0 | pt = &context->pt[(int)wsi->tsi]; |
382 | |
|
383 | 0 | if (pt->pipe_wsi == wsi) |
384 | 0 | pt->pipe_wsi = NULL; |
385 | |
|
386 | | #if defined(LWS_WITH_SYS_METRICS) && \ |
387 | | (defined(LWS_WITH_CLIENT) || defined(LWS_WITH_SERVER)) |
388 | | /* wsi level: only reports if dangling caliper */ |
389 | | if (wsi->cal_conn.mt && wsi->cal_conn.us_start) { |
390 | | if ((lws_metrics_priv_to_pub(wsi->cal_conn.mt)->flags) & LWSMTFL_REPORT_HIST) { |
391 | | lws_metrics_caliper_report_hist(wsi->cal_conn, (struct lws *)NULL); |
392 | | } else { |
393 | | lws_metrics_caliper_report(wsi->cal_conn, METRES_NOGO); |
394 | | lws_metrics_caliper_done(wsi->cal_conn); |
395 | | } |
396 | | } else |
397 | | lws_metrics_caliper_done(wsi->cal_conn); |
398 | | #endif |
399 | |
|
400 | | #if defined(LWS_WITH_SYS_ASYNC_DNS) |
401 | | /* is this wsi handling the interface to a dns server? */ |
402 | | { |
403 | | lws_async_dns_server_t *dsrv = |
404 | | __lws_async_dns_server_find_wsi(&context->async_dns, wsi); |
405 | | |
406 | | if (dsrv) |
407 | | dsrv->wsi = NULL; |
408 | | } |
409 | | #endif |
410 | |
|
411 | 0 | lws_pt_assert_lock_held(pt); |
412 | |
|
413 | 0 | #if defined(LWS_WITH_CLIENT) |
414 | |
|
415 | 0 | lws_free_set_NULL(wsi->cli_hostname_copy); |
416 | 0 | wsi->client_mux_substream_was = wsi->client_mux_substream; |
417 | |
|
418 | 0 | lws_addrinfo_clean(wsi); |
419 | 0 | #endif |
420 | |
|
421 | 0 | #if defined(LWS_WITH_HTTP2) |
422 | 0 | if (wsi->mux_stream_immortal) |
423 | 0 | lws_http_close_immortal(wsi); |
424 | 0 | #endif |
425 | | |
426 | | /* if we have children, close them first */ |
427 | 0 | if (wsi->child_list) { |
428 | 0 | wsi2 = wsi->child_list; |
429 | 0 | while (wsi2) { |
430 | 0 | wsi1 = wsi2->sibling_list; |
431 | | // wsi2->parent = NULL; |
432 | | /* stop it doing shutdown processing */ |
433 | 0 | wsi2->socket_is_permanently_unusable = 1; |
434 | 0 | __lws_close_free_wsi(wsi2, reason, |
435 | 0 | "general child recurse"); |
436 | 0 | wsi2 = wsi1; |
437 | 0 | } |
438 | 0 | wsi->child_list = NULL; |
439 | 0 | } |
440 | |
|
441 | 0 | #if defined(LWS_ROLE_RAW_FILE) |
442 | 0 | if (wsi->role_ops == &role_ops_raw_file) { |
443 | 0 | lws_remove_child_from_any_parent(wsi); |
444 | 0 | __remove_wsi_socket_from_fds(wsi); |
445 | 0 | if (wsi->a.protocol) |
446 | 0 | wsi->a.protocol->callback(wsi, wsi->role_ops->close_cb[0], |
447 | 0 | wsi->user_space, NULL, 0); |
448 | 0 | goto async_close; |
449 | 0 | } |
450 | 0 | #endif |
451 | | |
452 | 0 | wsi->wsistate_pre_close = wsi->wsistate; |
453 | |
|
454 | | #ifdef LWS_WITH_CGI |
455 | | if (wsi->role_ops == &role_ops_cgi) { |
456 | | |
457 | | // lwsl_debug("%s: closing stdwsi index %d\n", __func__, (int)wsi->lsp_channel); |
458 | | |
459 | | /* we are not a network connection, but a handler for CGI io */ |
460 | | if (wsi->parent && wsi->parent->http.cgi) { |
461 | | |
462 | | /* |
463 | | * We need to keep the logical cgi around so we can |
464 | | * drain it |
465 | | */ |
466 | | |
467 | | // if (wsi->parent->child_list == wsi && !wsi->sibling_list) |
468 | | // lws_cgi_remove_and_kill(wsi->parent); |
469 | | |
470 | | /* end the binding between us and network connection */ |
471 | | if (wsi->parent->http.cgi && wsi->parent->http.cgi->lsp) |
472 | | wsi->parent->http.cgi->lsp->stdwsi[(int)wsi->lsp_channel] = |
473 | | NULL; |
474 | | } |
475 | | wsi->socket_is_permanently_unusable = 1; |
476 | | |
477 | | goto just_kill_connection; |
478 | | } |
479 | | |
480 | | if (wsi->http.cgi) |
481 | | lws_cgi_remove_and_kill(wsi); |
482 | | #endif |
483 | |
|
484 | 0 | #if defined(LWS_WITH_CLIENT) |
485 | 0 | if (!wsi->close_is_redirect) |
486 | 0 | lws_free_set_NULL(wsi->stash); |
487 | 0 | #endif |
488 | |
|
489 | 0 | if (wsi->role_ops == &role_ops_raw_skt) { |
490 | 0 | wsi->socket_is_permanently_unusable = 1; |
491 | 0 | goto just_kill_connection; |
492 | 0 | } |
493 | 0 | #if defined(LWS_WITH_FILE_OPS) && (defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)) |
494 | 0 | if (lwsi_role_http(wsi) && lwsi_role_server(wsi) && |
495 | 0 | wsi->http.fop_fd != NULL) |
496 | 0 | lws_vfs_file_close(&wsi->http.fop_fd); |
497 | 0 | #endif |
498 | |
|
499 | 0 | if (lwsi_state(wsi) == LRS_DEAD_SOCKET) |
500 | 0 | return; |
501 | | |
502 | 0 | if (wsi->socket_is_permanently_unusable || |
503 | 0 | reason == LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY || |
504 | 0 | lwsi_state(wsi) == LRS_SHUTDOWN) |
505 | 0 | goto just_kill_connection; |
506 | | |
507 | 0 | switch (lwsi_state_PRE_CLOSE(wsi)) { |
508 | 0 | case LRS_DEAD_SOCKET: |
509 | 0 | return; |
510 | | |
511 | | /* we tried the polite way... */ |
512 | 0 | case LRS_WAITING_TO_SEND_CLOSE: |
513 | 0 | case LRS_AWAITING_CLOSE_ACK: |
514 | 0 | case LRS_RETURNED_CLOSE: |
515 | 0 | goto just_kill_connection; |
516 | | |
517 | 0 | case LRS_FLUSHING_BEFORE_CLOSE: |
518 | 0 | if (lws_has_buffered_out(wsi) |
519 | | #if defined(LWS_WITH_HTTP_STREAM_COMPRESSION) |
520 | | || wsi->http.comp_ctx.buflist_comp || |
521 | | wsi->http.comp_ctx.may_have_more |
522 | | #endif |
523 | 0 | ) { |
524 | 0 | lws_callback_on_writable(wsi); |
525 | 0 | return; |
526 | 0 | } |
527 | 0 | lwsl_wsi_info(wsi, " end LRS_FLUSHING_BEFORE_CLOSE"); |
528 | 0 | goto just_kill_connection; |
529 | 0 | default: |
530 | 0 | if (lws_has_buffered_out(wsi) |
531 | | #if defined(LWS_WITH_HTTP_STREAM_COMPRESSION) |
532 | | || wsi->http.comp_ctx.buflist_comp || |
533 | | wsi->http.comp_ctx.may_have_more |
534 | | #endif |
535 | 0 | ) { |
536 | 0 | lwsl_wsi_info(wsi, "LRS_FLUSHING_BEFORE_CLOSE"); |
537 | 0 | lwsi_set_state(wsi, LRS_FLUSHING_BEFORE_CLOSE); |
538 | 0 | __lws_set_timeout(wsi, |
539 | 0 | PENDING_FLUSH_STORED_SEND_BEFORE_CLOSE, 5); |
540 | 0 | return; |
541 | 0 | } |
542 | 0 | break; |
543 | 0 | } |
544 | | |
545 | 0 | if (lwsi_state(wsi) == LRS_WAITING_CONNECT || |
546 | 0 | lwsi_state(wsi) == LRS_WAITING_DNS || |
547 | 0 | lwsi_state(wsi) == LRS_H1C_ISSUE_HANDSHAKE) |
548 | 0 | goto just_kill_connection; |
549 | | |
550 | 0 | if (!wsi->told_user_closed && wsi->user_space && wsi->a.protocol && |
551 | 0 | wsi->protocol_bind_balance) { |
552 | 0 | wsi->a.protocol->callback(wsi, |
553 | 0 | wsi->role_ops->protocol_unbind_cb[ |
554 | 0 | !!lwsi_role_server(wsi)], |
555 | 0 | wsi->user_space, (void *)__func__, 0); |
556 | 0 | wsi->protocol_bind_balance = 0; |
557 | 0 | } |
558 | | |
559 | | /* |
560 | | * signal we are closing, lws_write will |
561 | | * add any necessary version-specific stuff. If the write fails, |
562 | | * no worries we are closing anyway. If we didn't initiate this |
563 | | * close, then our state has been changed to |
564 | | * LRS_RETURNED_CLOSE and we will skip this. |
565 | | * |
566 | | * Likewise if it's a second call to close this connection after we |
567 | | * sent the close indication to the peer already, we are in state |
568 | | * LRS_AWAITING_CLOSE_ACK and will skip doing this a second time. |
569 | | */ |
570 | |
|
571 | 0 | if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_close_via_role_protocol) && |
572 | 0 | lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_close_via_role_protocol). |
573 | 0 | close_via_role_protocol(wsi, reason)) { |
574 | 0 | lwsl_wsi_info(wsi, "close_via_role took over (sockfd %d)", |
575 | 0 | wsi->desc.sockfd); |
576 | 0 | return; |
577 | 0 | } |
578 | | |
579 | 0 | just_kill_connection: |
580 | |
|
581 | 0 | lwsl_wsi_debug(wsi, "real just_kill_connection A: (sockfd %d)", |
582 | 0 | wsi->desc.sockfd); |
583 | |
|
584 | | #if defined(LWS_WITH_THREADPOOL) && defined(LWS_HAVE_PTHREAD_H) |
585 | | lws_threadpool_wsi_closing(wsi); |
586 | | #endif |
587 | |
|
588 | 0 | #if defined(LWS_WITH_FILE_OPS) && (defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)) |
589 | 0 | if (lwsi_role_http(wsi) && lwsi_role_server(wsi) && |
590 | 0 | wsi->http.fop_fd != NULL) |
591 | 0 | lws_vfs_file_close(&wsi->http.fop_fd); |
592 | 0 | #endif |
593 | |
|
594 | 0 | lws_sul_cancel(&wsi->sul_connect_timeout); |
595 | | #if defined(WIN32) |
596 | | lws_sul_cancel(&wsi->win32_sul_connect_async_check); |
597 | | #endif |
598 | | #if defined(LWS_WITH_SYS_ASYNC_DNS) |
599 | | lws_async_dns_cancel(wsi); |
600 | | #endif |
601 | |
|
602 | | #if defined(LWS_WITH_HTTP_PROXY) |
603 | | if (wsi->http.buflist_post_body) |
604 | | lws_buflist_destroy_all_segments(&wsi->http.buflist_post_body); |
605 | | #endif |
606 | 0 | #if defined(LWS_WITH_UDP) |
607 | 0 | if (wsi->udp) { |
608 | | /* confirm no sul left scheduled in wsi->udp itself */ |
609 | 0 | lws_sul_debug_zombies(wsi->a.context, wsi->udp, |
610 | 0 | sizeof(*wsi->udp), "close udp wsi"); |
611 | |
|
612 | 0 | lws_free_set_NULL(wsi->udp); |
613 | 0 | } |
614 | 0 | #endif |
615 | |
|
616 | 0 | if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_close_kill_connection)) |
617 | 0 | lws_rops_func_fidx(wsi->role_ops, |
618 | 0 | LWS_ROPS_close_kill_connection). |
619 | 0 | close_kill_connection(wsi, reason); |
620 | |
|
621 | 0 | n = 0; |
622 | |
|
623 | 0 | if (!wsi->told_user_closed && wsi->user_space && |
624 | 0 | wsi->protocol_bind_balance && wsi->a.protocol) { |
625 | 0 | lwsl_debug("%s: %s: DROP_PROTOCOL %s\n", __func__, lws_wsi_tag(wsi), |
626 | 0 | wsi->a.protocol ? wsi->a.protocol->name: "NULL"); |
627 | 0 | if (wsi->a.protocol) |
628 | 0 | wsi->a.protocol->callback(wsi, |
629 | 0 | wsi->role_ops->protocol_unbind_cb[ |
630 | 0 | !!lwsi_role_server(wsi)], |
631 | 0 | wsi->user_space, (void *)__func__, 0); |
632 | 0 | wsi->protocol_bind_balance = 0; |
633 | 0 | } |
634 | |
|
635 | 0 | #if defined(LWS_WITH_CLIENT) |
636 | 0 | if (( |
637 | 0 | #if defined(LWS_ROLE_WS) |
638 | | /* |
639 | | * If our goal is a ws upgrade, effectively we did not reach |
640 | | * ESTABLISHED if we did not get the upgrade server reply |
641 | | */ |
642 | 0 | (lwsi_state(wsi) == LRS_WAITING_SERVER_REPLY && |
643 | 0 | wsi->role_ops == &role_ops_ws) || |
644 | 0 | #endif |
645 | 0 | lwsi_state(wsi) == LRS_WAITING_DNS || |
646 | 0 | lwsi_state(wsi) == LRS_WAITING_CONNECT) && |
647 | 0 | !wsi->already_did_cce && wsi->a.protocol && |
648 | 0 | !wsi->close_is_redirect) { |
649 | 0 | static const char _reason[] = "closed before established"; |
650 | |
|
651 | 0 | lwsl_wsi_debug(wsi, "closing in unestablished state 0x%x", |
652 | 0 | lwsi_state(wsi)); |
653 | 0 | wsi->socket_is_permanently_unusable = 1; |
654 | |
|
655 | 0 | lws_inform_client_conn_fail(wsi, |
656 | 0 | (void *)_reason, sizeof(_reason) - 1); |
657 | 0 | } |
658 | 0 | #endif |
659 | | |
660 | | /* |
661 | | * Testing with ab shows that we have to stage the socket close when |
662 | | * the system is under stress... shutdown any further TX, change the |
663 | | * state to one that won't emit anything more, and wait with a timeout |
664 | | * for the POLLIN to show a zero-size rx before coming back and doing |
665 | | * the actual close. |
666 | | */ |
667 | 0 | if (wsi->role_ops != &role_ops_raw_skt && !lwsi_role_client(wsi) && |
668 | 0 | lwsi_state(wsi) != LRS_SHUTDOWN && |
669 | 0 | lwsi_state(wsi) != LRS_UNCONNECTED && |
670 | 0 | reason != LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY && |
671 | 0 | !wsi->socket_is_permanently_unusable) { |
672 | |
|
673 | 0 | #if defined(LWS_WITH_TLS) |
674 | 0 | if (lws_is_ssl(wsi) && wsi->tls.ssl) { |
675 | 0 | n = 0; |
676 | 0 | switch (__lws_tls_shutdown(wsi)) { |
677 | 0 | case LWS_SSL_CAPABLE_DONE: |
678 | 0 | case LWS_SSL_CAPABLE_ERROR: |
679 | 0 | case LWS_SSL_CAPABLE_MORE_SERVICE_READ: |
680 | 0 | case LWS_SSL_CAPABLE_MORE_SERVICE_WRITE: |
681 | 0 | case LWS_SSL_CAPABLE_MORE_SERVICE: |
682 | 0 | if (wsi->lsp_channel++ == 8) { |
683 | 0 | lwsl_wsi_info(wsi, "avoiding shutdown spin"); |
684 | 0 | lwsi_set_state(wsi, LRS_SHUTDOWN); |
685 | 0 | } |
686 | 0 | break; |
687 | 0 | } |
688 | 0 | } else |
689 | 0 | #endif |
690 | 0 | { |
691 | 0 | lwsl_info("%s: shutdown conn: %s (sk %d, state 0x%x)\n", |
692 | 0 | __func__, lws_wsi_tag(wsi), (int)(lws_intptr_t)wsi->desc.sockfd, |
693 | 0 | lwsi_state(wsi)); |
694 | 0 | if (!wsi->socket_is_permanently_unusable && |
695 | 0 | lws_socket_is_valid(wsi->desc.sockfd)) { |
696 | 0 | wsi->socket_is_permanently_unusable = 1; |
697 | 0 | n = shutdown(wsi->desc.sockfd, SHUT_WR); |
698 | 0 | } |
699 | 0 | } |
700 | 0 | if (n) |
701 | 0 | lwsl_wsi_debug(wsi, "closing: shutdown (state 0x%x) ret %d", |
702 | 0 | lwsi_state(wsi), LWS_ERRNO); |
703 | | |
704 | | /* |
705 | | * This causes problems on WINCE / ESP32 with disconnection |
706 | | * when the events are half closing connection |
707 | | */ |
708 | 0 | #if !defined(_WIN32_WCE) && !defined(LWS_PLAT_FREERTOS) |
709 | | /* libuv: no event available to guarantee completion */ |
710 | 0 | if (!wsi->socket_is_permanently_unusable && |
711 | 0 | #if defined(LWS_WITH_CLIENT) |
712 | 0 | !wsi->close_is_redirect && |
713 | 0 | #endif |
714 | 0 | lws_socket_is_valid(wsi->desc.sockfd) && |
715 | 0 | lwsi_state(wsi) != LRS_SHUTDOWN && |
716 | 0 | (context->event_loop_ops->flags & LELOF_ISPOLL)) { |
717 | 0 | __lws_change_pollfd(wsi, LWS_POLLOUT, LWS_POLLIN); |
718 | 0 | lwsi_set_state(wsi, LRS_SHUTDOWN); |
719 | 0 | __lws_set_timeout(wsi, PENDING_TIMEOUT_SHUTDOWN_FLUSH, |
720 | 0 | (int)context->timeout_secs); |
721 | |
|
722 | 0 | return; |
723 | 0 | } |
724 | 0 | #endif |
725 | 0 | } |
726 | | |
727 | 0 | lwsl_wsi_info(wsi, "real just_kill_connection: sockfd %d\n", |
728 | 0 | wsi->desc.sockfd); |
729 | |
|
730 | | #ifdef LWS_WITH_HUBBUB |
731 | | if (wsi->http.rw) { |
732 | | lws_rewrite_destroy(wsi->http.rw); |
733 | | wsi->http.rw = NULL; |
734 | | } |
735 | | #endif |
736 | |
|
737 | 0 | if (wsi->http.pending_return_headers) |
738 | 0 | lws_free_set_NULL(wsi->http.pending_return_headers); |
739 | | |
740 | | /* |
741 | | * we won't be servicing or receiving anything further from this guy |
742 | | * delete socket from the internal poll list if still present |
743 | | */ |
744 | 0 | __lws_ssl_remove_wsi_from_buffered_list(wsi); |
745 | 0 | __lws_wsi_remove_from_sul(wsi); |
746 | | |
747 | | //if (wsi->told_event_loop_closed) // cgi std close case (dummy-callback) |
748 | | // return; |
749 | | |
750 | | /* checking return redundant since we anyway close */ |
751 | 0 | __remove_wsi_socket_from_fds(wsi); |
752 | |
|
753 | 0 | lwsi_set_state(wsi, LRS_DEAD_SOCKET); |
754 | 0 | lws_buflist_destroy_all_segments(&wsi->buflist); |
755 | 0 | lws_dll2_remove(&wsi->dll_buflist); |
756 | |
|
757 | 0 | if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_close_role)) |
758 | 0 | lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_close_role). |
759 | 0 | close_role(pt, wsi); |
760 | | |
761 | | /* tell the user it's all over for this guy */ |
762 | |
|
763 | 0 | ccb = 0; |
764 | 0 | if ((lwsi_state_est_PRE_CLOSE(wsi) || |
765 | | /* raw skt adopted but didn't complete tls hs should CLOSE */ |
766 | 0 | (wsi->role_ops == &role_ops_raw_skt && !lwsi_role_client(wsi)) || |
767 | 0 | lwsi_state_PRE_CLOSE(wsi) == LRS_WAITING_SERVER_REPLY) && |
768 | 0 | !wsi->told_user_closed && |
769 | 0 | wsi->role_ops->close_cb[lwsi_role_server(wsi)]) { |
770 | 0 | if (!wsi->upgraded_to_http2 || !lwsi_role_client(wsi)) |
771 | 0 | ccb = 1; |
772 | | /* |
773 | | * The network wsi for a client h2 connection shouldn't |
774 | | * call back for its role: the child stream connections |
775 | | * own the role. Otherwise h2 will call back closed |
776 | | * one too many times as the children do it and then |
777 | | * the closing network stream. |
778 | | */ |
779 | 0 | } |
780 | |
|
781 | 0 | if (!wsi->told_user_closed && |
782 | 0 | !lws_dll2_is_detached(&wsi->vh_awaiting_socket)) |
783 | | /* |
784 | | * He's a guy who go started with dns, but failed or is |
785 | | * caught with a shutdown before he got the result. We have |
786 | | * to issclient_mux_substream_wasue him a close cb |
787 | | */ |
788 | 0 | ccb = 1; |
789 | |
|
790 | 0 | lwsl_wsi_info(wsi, "cce=%d", ccb); |
791 | |
|
792 | 0 | pro = wsi->a.protocol; |
793 | |
|
794 | 0 | if (wsi->already_did_cce) |
795 | | /* |
796 | | * If we handled this by CLIENT_CONNECTION_ERROR, it's |
797 | | * mutually exclusive with CLOSE |
798 | | */ |
799 | 0 | ccb = 0; |
800 | |
|
801 | 0 | #if defined(LWS_WITH_CLIENT) |
802 | 0 | if (!wsi->close_is_redirect && !ccb && |
803 | 0 | (lwsi_state_PRE_CLOSE(wsi) & LWSIFS_NOT_EST) && |
804 | 0 | lwsi_role_client(wsi)) { |
805 | 0 | lws_inform_client_conn_fail(wsi, "Closed before conn", 18); |
806 | 0 | } |
807 | 0 | #endif |
808 | 0 | if (ccb |
809 | 0 | #if defined(LWS_WITH_CLIENT) |
810 | 0 | && !wsi->close_is_redirect |
811 | 0 | #endif |
812 | 0 | ) { |
813 | |
|
814 | 0 | if (!wsi->a.protocol && wsi->a.vhost && wsi->a.vhost->protocols) |
815 | 0 | pro = &wsi->a.vhost->protocols[0]; |
816 | |
|
817 | 0 | if (pro && pro->callback && wsi->role_ops) |
818 | 0 | pro->callback(wsi, |
819 | 0 | wsi->role_ops->close_cb[lwsi_role_server(wsi)], |
820 | 0 | wsi->user_space, NULL, 0); |
821 | 0 | wsi->told_user_closed = 1; |
822 | 0 | } |
823 | |
|
824 | 0 | #if defined(LWS_ROLE_RAW_FILE) |
825 | 0 | async_close: |
826 | 0 | #endif |
827 | |
|
828 | 0 | #if defined(LWS_WITH_SECURE_STREAMS) |
829 | 0 | if (wsi->for_ss) { |
830 | 0 | lwsl_wsi_debug(wsi, "for_ss"); |
831 | | /* |
832 | | * We were adopted for a particular ss, but, eg, we may not |
833 | | * have succeeded with the connection... we are closing which is |
834 | | * good, but we have to invalidate any pointer the related ss |
835 | | * handle may be holding on us |
836 | | */ |
837 | | #if defined(LWS_WITH_SECURE_STREAMS_PROXY_API) |
838 | | |
839 | | if (wsi->client_proxy_onward) { |
840 | | /* |
841 | | * We are an onward proxied wsi at the proxy, |
842 | | * opaque is proxing "conn", we must remove its pointer |
843 | | * to us since we are destroying |
844 | | */ |
845 | | lws_proxy_clean_conn_ss(wsi); |
846 | | } else |
847 | | |
848 | | if (wsi->client_bound_sspc) { |
849 | | lws_sspc_handle_t *h = (lws_sspc_handle_t *)wsi->a.opaque_user_data; |
850 | | |
851 | | if (h) { // && (h->info.flags & LWSSSINFLAGS_ACCEPTED)) { |
852 | | |
853 | | #if defined(LWS_WITH_SYS_METRICS) |
854 | | /* |
855 | | * If any hanging caliper measurement, dump it, and free any tags |
856 | | */ |
857 | | lws_metrics_caliper_report_hist(h->cal_txn, (struct lws *)NULL); |
858 | | #endif |
859 | | |
860 | | h->txp_path.priv_onw = NULL; |
861 | | //wsi->a.opaque_user_data = NULL; |
862 | | } |
863 | | } else |
864 | | #endif |
865 | 0 | { |
866 | 0 | hh = (lws_ss_handle_t *)wsi->a.opaque_user_data; |
867 | |
|
868 | 0 | if (hh) { // && (h->info.flags & LWSSSINFLAGS_ACCEPTED)) { |
869 | | |
870 | | /* |
871 | | * ss level: only reports if dangling caliper |
872 | | * not already reported |
873 | | */ |
874 | 0 | lws_metrics_caliper_report_hist(hh->cal_txn, wsi); |
875 | |
|
876 | 0 | hh->wsi = NULL; |
877 | 0 | wsi->a.opaque_user_data = NULL; |
878 | 0 | } |
879 | 0 | } |
880 | 0 | } |
881 | 0 | #endif |
882 | | |
883 | |
|
884 | 0 | lws_remove_child_from_any_parent(wsi); |
885 | 0 | wsi->socket_is_permanently_unusable = 1; |
886 | |
|
887 | 0 | if (wsi->a.context->event_loop_ops->wsi_logical_close) |
888 | 0 | if (wsi->a.context->event_loop_ops->wsi_logical_close(wsi)) |
889 | 0 | return; |
890 | | |
891 | 0 | __lws_close_free_wsi_final(wsi); |
892 | |
|
893 | 0 | #if defined(LWS_WITH_SECURE_STREAMS) |
894 | 0 | if (hh && hh->ss_dangling_connected && |
895 | 0 | lws_ss_event_helper(hh, LWSSSCS_DISCONNECTED) == LWSSSSRET_DESTROY_ME) |
896 | 0 | lws_ss_destroy(&hh); |
897 | 0 | #endif |
898 | 0 | } |
899 | | |
900 | | |
901 | | /* cx + vh lock */ |
902 | | |
903 | | void |
904 | | __lws_close_free_wsi_final(struct lws *wsi) |
905 | 0 | { |
906 | 0 | int n; |
907 | |
|
908 | 0 | if (!wsi->shadow && |
909 | 0 | lws_socket_is_valid(wsi->desc.sockfd) && !lws_ssl_close(wsi)) { |
910 | 0 | lwsl_wsi_debug(wsi, "fd %d", wsi->desc.sockfd); |
911 | | |
912 | | /* |
913 | | * if this is the pt pipe, skip the actual close, |
914 | | * go through the motions though so we will reach 0 open wsi |
915 | | * on the pt, and trigger the pt destroy to close the pipe fds |
916 | | */ |
917 | 0 | if (!lws_plat_pipe_is_fd_assocated(wsi->a.context, wsi->tsi, |
918 | 0 | wsi->desc.sockfd)) { |
919 | 0 | n = compatible_close(wsi->desc.sockfd); |
920 | 0 | if (n) |
921 | 0 | lwsl_wsi_debug(wsi, "closing: close ret %d", |
922 | 0 | LWS_ERRNO); |
923 | 0 | } |
924 | |
|
925 | 0 | __remove_wsi_socket_from_fds(wsi); |
926 | 0 | if (lws_socket_is_valid(wsi->desc.sockfd)) |
927 | 0 | delete_from_fd(wsi->a.context, wsi->desc.sockfd); |
928 | |
|
929 | 0 | #if !defined(LWS_PLAT_FREERTOS) && !defined(WIN32) && !defined(LWS_PLAT_OPTEE) |
930 | 0 | delete_from_fdwsi(wsi->a.context, wsi); |
931 | 0 | #endif |
932 | |
|
933 | 0 | sanity_assert_no_sockfd_traces(wsi->a.context, wsi->desc.sockfd); |
934 | 0 | } |
935 | | |
936 | | /* ... if we're closing the cancel pipe, account for it */ |
937 | |
|
938 | 0 | { |
939 | 0 | struct lws_context_per_thread *pt = |
940 | 0 | &wsi->a.context->pt[(int)wsi->tsi]; |
941 | |
|
942 | 0 | if (pt->pipe_wsi == wsi) |
943 | 0 | pt->pipe_wsi = NULL; |
944 | 0 | if (pt->dummy_pipe_fds[0] == wsi->desc.sockfd) |
945 | 0 | { |
946 | 0 | #if !defined(LWS_PLAT_FREERTOS) |
947 | 0 | pt->dummy_pipe_fds[0] = LWS_SOCK_INVALID; |
948 | 0 | #endif |
949 | 0 | } |
950 | 0 | } |
951 | |
|
952 | 0 | wsi->desc.sockfd = LWS_SOCK_INVALID; |
953 | |
|
954 | 0 | #if defined(LWS_WITH_CLIENT) |
955 | 0 | lws_free_set_NULL(wsi->cli_hostname_copy); |
956 | 0 | if (wsi->close_is_redirect) { |
957 | |
|
958 | 0 | wsi->close_is_redirect = 0; |
959 | |
|
960 | 0 | lwsl_wsi_info(wsi, "picking up redirection"); |
961 | |
|
962 | 0 | lws_role_transition(wsi, LWSIFR_CLIENT, LRS_UNCONNECTED, |
963 | 0 | &role_ops_h1); |
964 | |
|
965 | 0 | #if defined(LWS_WITH_HTTP2) |
966 | 0 | if (wsi->client_mux_substream_was) |
967 | 0 | wsi->h2.END_STREAM = wsi->h2.END_HEADERS = 0; |
968 | 0 | #endif |
969 | 0 | #if defined(LWS_ROLE_H2) || defined(LWS_ROLE_MQTT) |
970 | 0 | if (wsi->mux.parent_wsi) { |
971 | 0 | lws_wsi_mux_sibling_disconnect(wsi); |
972 | 0 | wsi->mux.parent_wsi = NULL; |
973 | 0 | } |
974 | 0 | #endif |
975 | |
|
976 | 0 | #if defined(LWS_WITH_TLS) |
977 | 0 | memset(&wsi->tls, 0, sizeof(wsi->tls)); |
978 | 0 | #endif |
979 | | |
980 | | // wsi->a.protocol = NULL; |
981 | 0 | if (wsi->a.protocol) |
982 | 0 | lws_bind_protocol(wsi, wsi->a.protocol, "client_reset"); |
983 | 0 | wsi->pending_timeout = NO_PENDING_TIMEOUT; |
984 | 0 | wsi->hdr_parsing_completed = 0; |
985 | |
|
986 | 0 | #if defined(LWS_WITH_TLS) |
987 | 0 | if (wsi->stash->cis[CIS_ALPN]) |
988 | 0 | lws_strncpy(wsi->alpn, wsi->stash->cis[CIS_ALPN], |
989 | 0 | sizeof(wsi->alpn)); |
990 | 0 | #endif |
991 | |
|
992 | 0 | if (lws_header_table_attach(wsi, 0)) { |
993 | 0 | lwsl_wsi_err(wsi, "failed to get ah"); |
994 | 0 | return; |
995 | 0 | } |
996 | | // } |
997 | | //_lws_header_table_reset(wsi->http.ah); |
998 | | |
999 | 0 | #if defined(LWS_WITH_TLS) |
1000 | 0 | wsi->tls.use_ssl = (unsigned int)wsi->flags; |
1001 | 0 | #endif |
1002 | |
|
1003 | | #if defined(LWS_WITH_TLS_JIT_TRUST) |
1004 | | if (wsi->stash && wsi->stash->cis[CIS_ADDRESS]) { |
1005 | | struct lws_vhost *vh = NULL; |
1006 | | lws_tls_jit_trust_vhost_bind(wsi->a.context, |
1007 | | wsi->stash->cis[CIS_ADDRESS], |
1008 | | &vh); |
1009 | | if (vh) { |
1010 | | if (!vh->count_bound_wsi && vh->grace_after_unref) { |
1011 | | lwsl_wsi_info(wsi, "%s in use\n", |
1012 | | vh->lc.gutag); |
1013 | | lws_sul_cancel(&vh->sul_unref); |
1014 | | } |
1015 | | vh->count_bound_wsi++; |
1016 | | wsi->a.vhost = vh; |
1017 | | } |
1018 | | } |
1019 | | #endif |
1020 | |
|
1021 | 0 | return; |
1022 | 0 | } |
1023 | 0 | #endif |
1024 | | |
1025 | | /* outermost destroy notification for wsi (user_space still intact) */ |
1026 | 0 | if (wsi->a.vhost) |
1027 | 0 | wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_WSI_DESTROY, |
1028 | 0 | wsi->user_space, NULL, 0); |
1029 | |
|
1030 | | #ifdef LWS_WITH_CGI |
1031 | | if (wsi->http.cgi) { |
1032 | | lws_spawn_piped_destroy(&wsi->http.cgi->lsp); |
1033 | | lws_sul_cancel(&wsi->http.cgi->sul_grace); |
1034 | | lws_free_set_NULL(wsi->http.cgi); |
1035 | | } |
1036 | | #endif |
1037 | |
|
1038 | | #if defined(LWS_WITH_SYS_FAULT_INJECTION) |
1039 | | lws_fi_destroy(&wsi->fic); |
1040 | | #endif |
1041 | |
|
1042 | 0 | __lws_wsi_remove_from_sul(wsi); |
1043 | 0 | sanity_assert_no_wsi_traces(wsi->a.context, wsi); |
1044 | 0 | __lws_free_wsi(wsi); |
1045 | 0 | } |
1046 | | |
1047 | | |
1048 | | void |
1049 | | lws_close_free_wsi(struct lws *wsi, enum lws_close_status reason, const char *caller) |
1050 | 0 | { |
1051 | 0 | struct lws_context *cx = wsi->a.context; |
1052 | 0 | struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi]; |
1053 | |
|
1054 | 0 | lws_context_lock(cx, __func__); |
1055 | |
|
1056 | 0 | lws_pt_lock(pt, __func__); |
1057 | | /* may destroy vhost, cannot hold vhost lock outside it */ |
1058 | 0 | __lws_close_free_wsi(wsi, reason, caller); |
1059 | 0 | lws_pt_unlock(pt); |
1060 | |
|
1061 | 0 | lws_context_unlock(cx); |
1062 | 0 | } |
1063 | | |
1064 | | |