/src/libwebsockets/lib/core-net/close.c
Line | Count | Source |
1 | | /* |
2 | | * libwebsockets - small server side websockets and web server implementation |
3 | | * |
4 | | * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com> |
5 | | * |
6 | | * Permission is hereby granted, free of charge, to any person obtaining a copy |
7 | | * of this software and associated documentation files (the "Software"), to |
8 | | * deal in the Software without restriction, including without limitation the |
9 | | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
10 | | * sell copies of the Software, and to permit persons to whom the Software is |
11 | | * furnished to do so, subject to the following conditions: |
12 | | * |
13 | | * The above copyright notice and this permission notice shall be included in |
14 | | * all copies or substantial portions of the Software. |
15 | | * |
16 | | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
19 | | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
20 | | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
21 | | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
22 | | * IN THE SOFTWARE. |
23 | | */ |
24 | | |
25 | | #include "private-lib-core.h" |
26 | | #include "private-lib-async-dns.h" |
27 | | |
28 | | #if defined(LWS_WITH_CLIENT) |
29 | | static int |
30 | | lws_close_trans_q_leader(struct lws_dll2 *d, void *user) |
31 | 0 | { |
32 | 0 | struct lws *w = lws_container_of(d, struct lws, dll2_cli_txn_queue); |
33 | |
|
34 | 0 | __lws_close_free_wsi(w, (enum lws_close_status)-1, "trans q leader closing"); |
35 | |
|
36 | 0 | return 0; |
37 | 0 | } |
38 | | #endif |
39 | | |
40 | | void |
41 | | __lws_reset_wsi(struct lws *wsi) |
42 | 0 | { |
43 | 0 | if (!wsi) |
44 | 0 | return; |
45 | | |
46 | 0 | #if defined(LWS_WITH_CLIENT) |
47 | | |
48 | 0 | lws_free_set_NULL(wsi->cli_hostname_copy); |
49 | |
|
50 | 0 | #if defined(LWS_WITH_CONMON) |
51 | |
|
52 | 0 | if (wsi->conmon.dns_results_copy) { |
53 | 0 | lws_conmon_addrinfo_destroy(wsi->conmon.dns_results_copy); |
54 | 0 | wsi->conmon.dns_results_copy = NULL; |
55 | 0 | } |
56 | |
|
57 | 0 | wsi->conmon.ciu_dns = |
58 | 0 | wsi->conmon.ciu_sockconn = |
59 | 0 | wsi->conmon.ciu_tls = |
60 | 0 | wsi->conmon.ciu_txn_resp = 0; |
61 | 0 | #endif |
62 | | |
63 | | /* |
64 | | * if we have wsi in our transaction queue, if we are closing we |
65 | | * must go through and close all those first |
66 | | */ |
67 | 0 | if (wsi->a.vhost) { |
68 | | |
69 | | /* we are no longer an active client connection that can piggyback */ |
70 | 0 | lws_dll2_remove(&wsi->dll_cli_active_conns); |
71 | |
|
72 | 0 | lws_dll2_foreach_safe(&wsi->dll2_cli_txn_queue_owner, NULL, |
73 | 0 | lws_close_trans_q_leader); |
74 | | |
75 | | /* |
76 | | * !!! If we are closing, but we have pending pipelined |
77 | | * transaction results we already sent headers for, that's going |
78 | | * to destroy sync for HTTP/1 and leave H2 stream with no live |
79 | | * swsi.` |
80 | | * |
81 | | * However this is normal if we are being closed because the |
82 | | * transaction queue leader is closing. |
83 | | */ |
84 | 0 | lws_dll2_remove(&wsi->dll2_cli_txn_queue); |
85 | 0 | } |
86 | 0 | #endif |
87 | |
|
88 | 0 | if (wsi->a.vhost) { |
89 | 0 | lws_vhost_lock(wsi->a.vhost); |
90 | 0 | lws_dll2_remove(&wsi->vh_awaiting_socket); |
91 | 0 | lws_vhost_unlock(wsi->a.vhost); |
92 | 0 | } |
93 | | |
94 | | /* |
95 | | * Protocol user data may be allocated either internally by lws |
96 | | * or by specified the user. We should only free what we allocated. |
97 | | */ |
98 | 0 | if (wsi->a.protocol && wsi->a.protocol->per_session_data_size && |
99 | 0 | wsi->user_space && !wsi->user_space_externally_allocated) { |
100 | | /* confirm no sul left scheduled in user data itself */ |
101 | 0 | lws_sul_debug_zombies(wsi->a.context, wsi->user_space, |
102 | 0 | wsi->a.protocol->per_session_data_size, __func__); |
103 | 0 | lws_free_set_NULL(wsi->user_space); |
104 | 0 | } |
105 | | |
106 | | /* |
107 | | * Don't let buflist content or state from the wsi's previous life |
108 | | * carry over to the new life |
109 | | */ |
110 | |
|
111 | 0 | lws_buflist_destroy_all_segments(&wsi->buflist); |
112 | 0 | lws_dll2_remove(&wsi->dll_buflist); |
113 | 0 | lws_buflist_destroy_all_segments(&wsi->buflist_out); |
114 | 0 | #if defined(LWS_WITH_UDP) |
115 | 0 | if (wsi->udp) { |
116 | | /* confirm no sul left scheduled in wsi->udp itself */ |
117 | 0 | lws_sul_debug_zombies(wsi->a.context, wsi->udp, |
118 | 0 | sizeof(*wsi->udp), "close udp wsi"); |
119 | 0 | lws_free_set_NULL(wsi->udp); |
120 | 0 | } |
121 | 0 | #endif |
122 | 0 | wsi->retry = 0; |
123 | 0 | wsi->mount_hit = 0; |
124 | |
|
125 | 0 | #if defined(LWS_WITH_CLIENT) |
126 | 0 | lws_dll2_remove(&wsi->dll2_cli_txn_queue); |
127 | 0 | lws_dll2_remove(&wsi->dll_cli_active_conns); |
128 | 0 | if (wsi->cli_hostname_copy) |
129 | 0 | lws_free_set_NULL(wsi->cli_hostname_copy); |
130 | 0 | #endif |
131 | |
|
132 | | #if defined(LWS_WITH_SYS_ASYNC_DNS) |
133 | | lws_async_dns_cancel(wsi); |
134 | | #endif |
135 | |
|
136 | | #if defined(LWS_WITH_HTTP_PROXY) |
137 | | if (wsi->http.buflist_post_body) |
138 | | lws_buflist_destroy_all_segments(&wsi->http.buflist_post_body); |
139 | | #endif |
140 | |
|
141 | 0 | #if defined(LWS_WITH_HTTP_DIGEST_AUTH) |
142 | 0 | if (wsi->http.digest_auth_hdr) { |
143 | 0 | lws_free(wsi->http.digest_auth_hdr); |
144 | 0 | wsi->http.digest_auth_hdr = NULL; |
145 | 0 | } |
146 | 0 | #endif |
147 | |
|
148 | 0 | #if defined(LWS_WITH_SERVER) |
149 | 0 | lws_dll2_remove(&wsi->listen_list); |
150 | 0 | #endif |
151 | |
|
152 | 0 | #if defined(LWS_WITH_CLIENT) |
153 | 0 | if (wsi->a.vhost) |
154 | 0 | lws_dll2_remove(&wsi->dll_cli_active_conns); |
155 | 0 | #endif |
156 | |
|
157 | 0 | __lws_same_vh_protocol_remove(wsi); |
158 | 0 | #if defined(LWS_WITH_CLIENT) |
159 | | //lws_free_set_NULL(wsi->stash); |
160 | 0 | lws_free_set_NULL(wsi->cli_hostname_copy); |
161 | 0 | #endif |
162 | |
|
163 | | #if defined(LWS_WITH_PEER_LIMITS) |
164 | | lws_peer_track_wsi_close(wsi->a.context, wsi->peer); |
165 | | wsi->peer = NULL; |
166 | | #endif |
167 | | |
168 | | /* since we will destroy the wsi, make absolutely sure now */ |
169 | |
|
170 | 0 | #if defined(LWS_WITH_TLS) |
171 | 0 | __lws_ssl_remove_wsi_from_buffered_list(wsi); |
172 | 0 | #endif |
173 | 0 | __lws_wsi_remove_from_sul(wsi); |
174 | |
|
175 | 0 | if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_destroy_role)) |
176 | 0 | lws_rops_func_fidx(wsi->role_ops, |
177 | 0 | LWS_ROPS_destroy_role).destroy_role(wsi); |
178 | |
|
179 | 0 | #if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2) |
180 | 0 | __lws_header_table_detach(wsi, 0); |
181 | 0 | #endif |
182 | |
|
183 | 0 | #if defined(LWS_ROLE_H2) |
184 | | /* |
185 | | * Let's try to clean out the h2-ness of the wsi |
186 | | */ |
187 | |
|
188 | 0 | memset(&wsi->h2, 0, sizeof(wsi->h2)); |
189 | |
|
190 | 0 | wsi->hdr_parsing_completed = wsi->mux_substream = |
191 | 0 | wsi->upgraded_to_http2 = wsi->mux_stream_immortal = |
192 | 0 | wsi->h2_acked_settings = wsi->seen_nonpseudoheader = |
193 | 0 | wsi->socket_is_permanently_unusable = wsi->favoured_pollin = |
194 | 0 | wsi->already_did_cce = wsi->told_user_closed = |
195 | 0 | wsi->waiting_to_send_close_frame = wsi->close_needs_ack = |
196 | 0 | wsi->parent_pending_cb_on_writable = wsi->seen_zero_length_recv = |
197 | 0 | wsi->close_when_buffered_out_drained = wsi->could_have_pending = 0; |
198 | 0 | #endif |
199 | |
|
200 | 0 | #if defined(LWS_WITH_CLIENT) |
201 | 0 | wsi->do_ws = wsi->chunked = wsi->client_rx_avail = |
202 | 0 | wsi->client_http_body_pending = wsi->transaction_from_pipeline_queue = |
203 | 0 | wsi->keepalive_active = wsi->keepalive_rejected = |
204 | 0 | wsi->redirected_to_get = wsi->client_pipeline = wsi->client_h2_alpn = |
205 | 0 | wsi->client_mux_substream = wsi->client_mux_migrated = |
206 | 0 | wsi->tls_session_reused = wsi->perf_done = 0; |
207 | |
|
208 | 0 | wsi->immortal_substream_count = 0; |
209 | 0 | #endif |
210 | 0 | } |
211 | | |
212 | | /* req cx lock */ |
213 | | |
214 | | void |
215 | | __lws_free_wsi(struct lws *wsi) |
216 | 0 | { |
217 | 0 | struct lws_vhost *vh; |
218 | |
|
219 | 0 | if (!wsi) |
220 | 0 | return; |
221 | | |
222 | 0 | lws_context_assert_lock_held(wsi->a.context); |
223 | | |
224 | | /* just in case */ |
225 | 0 | lws_dll2_remove(&wsi->pre_natal); |
226 | |
|
227 | 0 | #if defined(LWS_WITH_SECURE_STREAMS) |
228 | 0 | if (wsi->for_ss) { |
229 | |
|
230 | | #if defined(LWS_WITH_SECURE_STREAMS_PROXY_API) |
231 | | if (wsi->client_bound_sspc) { |
232 | | lws_sspc_handle_t *h = (lws_sspc_handle_t *) |
233 | | wsi->a.opaque_user_data; |
234 | | if (h) { |
235 | | h->txp_path.priv_onw = NULL; |
236 | | wsi->a.opaque_user_data = NULL; |
237 | | } |
238 | | } else |
239 | | #endif |
240 | 0 | { |
241 | | /* |
242 | | * Make certain it is disconnected from the ss by now |
243 | | */ |
244 | 0 | lws_ss_handle_t *h = (lws_ss_handle_t *) |
245 | 0 | wsi->a.opaque_user_data; |
246 | |
|
247 | 0 | if (h) { |
248 | 0 | h->wsi = NULL; |
249 | 0 | wsi->a.opaque_user_data = NULL; |
250 | 0 | } |
251 | 0 | } |
252 | 0 | } |
253 | 0 | #endif |
254 | |
|
255 | 0 | vh = wsi->a.vhost; |
256 | |
|
257 | 0 | __lws_reset_wsi(wsi); |
258 | 0 | __lws_wsi_remove_from_sul(wsi); |
259 | |
|
260 | 0 | if (vh) |
261 | | /* this may destroy vh */ |
262 | 0 | __lws_vhost_unbind_wsi(wsi); /* req cx + vh lock */ |
263 | |
|
264 | 0 | #if defined(LWS_WITH_CLIENT) |
265 | 0 | if (wsi->stash) |
266 | 0 | lws_free_set_NULL(wsi->stash); |
267 | 0 | #endif |
268 | |
|
269 | 0 | if (wsi->a.context->event_loop_ops->destroy_wsi) |
270 | 0 | wsi->a.context->event_loop_ops->destroy_wsi(wsi); |
271 | |
|
272 | 0 | lwsl_wsi_debug(wsi, "tsi fds count %d\n", |
273 | 0 | wsi->a.context->pt[(int)wsi->tsi].fds_count); |
274 | | |
275 | | /* confirm no sul left scheduled in wsi itself */ |
276 | 0 | lws_sul_debug_zombies(wsi->a.context, wsi, sizeof(*wsi), __func__); |
277 | |
|
278 | 0 | wsi->socket_is_permanently_unusable = 1; // !!! |
279 | |
|
280 | 0 | __lws_lc_untag(wsi->a.context, &wsi->lc); |
281 | 0 | lws_free(wsi); |
282 | 0 | } |
283 | | |
284 | | |
285 | | void |
286 | | lws_remove_child_from_any_parent(struct lws *wsi) |
287 | 0 | { |
288 | 0 | struct lws **pwsi; |
289 | 0 | int seen = 0; |
290 | |
|
291 | 0 | if (!wsi->parent) |
292 | 0 | return; |
293 | | |
294 | | /* detach ourselves from parent's child list */ |
295 | 0 | pwsi = &wsi->parent->child_list; |
296 | 0 | while (*pwsi) { |
297 | 0 | if (*pwsi == wsi) { |
298 | 0 | lwsl_wsi_info(wsi, "detach from parent %s", |
299 | 0 | lws_wsi_tag(wsi->parent)); |
300 | |
|
301 | 0 | if (wsi->parent->a.protocol) |
302 | 0 | wsi->parent->a.protocol->callback(wsi, |
303 | 0 | LWS_CALLBACK_CHILD_CLOSING, |
304 | 0 | wsi->parent->user_space, wsi, 0); |
305 | |
|
306 | 0 | *pwsi = wsi->sibling_list; |
307 | 0 | seen = 1; |
308 | 0 | break; |
309 | 0 | } |
310 | 0 | pwsi = &(*pwsi)->sibling_list; |
311 | 0 | } |
312 | 0 | if (!seen) |
313 | 0 | lwsl_wsi_err(wsi, "failed to detach from parent"); |
314 | |
|
315 | 0 | wsi->parent = NULL; |
316 | 0 | } |
317 | | |
318 | | #if defined(LWS_WITH_CLIENT) |
319 | | void |
320 | | lws_inform_client_conn_fail(struct lws *wsi, void *arg, size_t len) |
321 | 0 | { |
322 | 0 | lws_addrinfo_clean(wsi); |
323 | |
|
324 | 0 | if (wsi->already_did_cce) |
325 | 0 | return; |
326 | | |
327 | 0 | wsi->already_did_cce = 1; |
328 | |
|
329 | 0 | if (!wsi->a.protocol) |
330 | 0 | return; |
331 | | |
332 | 0 | if (!wsi->client_suppress_CONNECTION_ERROR) |
333 | 0 | wsi->a.protocol->callback(wsi, |
334 | 0 | LWS_CALLBACK_CLIENT_CONNECTION_ERROR, |
335 | 0 | wsi->user_space, arg, len); |
336 | 0 | } |
337 | | #endif |
338 | | |
339 | | void |
340 | | lws_addrinfo_clean(struct lws *wsi) |
341 | 0 | { |
342 | 0 | #if defined(LWS_WITH_CLIENT) |
343 | 0 | struct lws_dll2 *d = lws_dll2_get_head(&wsi->dns_sorted_list), *d1; |
344 | |
|
345 | 0 | while (d) { |
346 | 0 | lws_dns_sort_t *r = lws_container_of(d, lws_dns_sort_t, list); |
347 | |
|
348 | 0 | d1 = d->next; |
349 | 0 | lws_dll2_remove(d); |
350 | 0 | lws_free(r); |
351 | |
|
352 | 0 | d = d1; |
353 | 0 | } |
354 | 0 | #endif |
355 | 0 | } |
356 | | |
357 | | #if defined(LWS_WITH_ASYNC_QUEUE) |
358 | | static void |
359 | | lws_async_worker_wait_and_reap(struct lws *wsi) |
360 | | { |
361 | | while (1) { |
362 | | pthread_mutex_lock(&wsi->a.context->async_worker_mutex); |
363 | | if (!wsi->async_worker_job) { |
364 | | pthread_mutex_unlock(&wsi->a.context->async_worker_mutex); |
365 | | break; |
366 | | } |
367 | | struct lws_async_job *job = wsi->async_worker_job; |
368 | | if (job->list.owner == &wsi->a.context->async_worker_waiting || |
369 | | job->list.owner == &wsi->a.context->async_worker_finished || |
370 | | job->handled_by_main) { |
371 | | /* Not actively running. We can safely detach it and reap it. */ |
372 | | wsi->async_worker_job = NULL; |
373 | | lws_dll2_remove(&job->list); |
374 | | lws_free(job); |
375 | | pthread_mutex_unlock(&wsi->a.context->async_worker_mutex); |
376 | | break; |
377 | | } |
378 | | pthread_mutex_unlock(&wsi->a.context->async_worker_mutex); |
379 | | /* The background thread is actively modifying this WSI or its SSL contexts. |
380 | | * It is catastrophic to continue closing or freeing this WSI until it is done. |
381 | | * Because this happens very infrequently (shutdown collisions), we briefly yield. |
382 | | */ |
383 | | usleep(1000); |
384 | | } |
385 | | } |
386 | | #endif |
387 | | |
388 | | /* requires cx and pt lock */ |
389 | | |
390 | | void |
391 | | __lws_close_free_wsi(struct lws *wsi, enum lws_close_status reason, |
392 | | const char *caller) |
393 | 0 | { |
394 | 0 | struct lws_context_per_thread *pt; |
395 | 0 | const struct lws_protocols *pro; |
396 | 0 | #if defined(LWS_WITH_SECURE_STREAMS) |
397 | 0 | lws_ss_handle_t *hh = NULL; |
398 | 0 | #endif |
399 | 0 | struct lws_context *context; |
400 | 0 | struct lws *wsi1, *wsi2; |
401 | 0 | int n, ccb; |
402 | |
|
403 | 0 | if (!wsi) |
404 | 0 | return; |
405 | | |
406 | 0 | lwsl_wsi_info(wsi, "caller: %s", caller); |
407 | |
|
408 | 0 | lws_access_log(wsi); |
409 | |
|
410 | 0 | if (!lws_dll2_is_detached(&wsi->dll_buflist)) |
411 | 0 | lwsl_wsi_info(wsi, "going down with stuff in buflist"); |
412 | |
|
413 | 0 | context = wsi->a.context; |
414 | 0 | pt = &context->pt[(int)wsi->tsi]; |
415 | |
|
416 | 0 | if (pt->pipe_wsi == wsi) { |
417 | 0 | if (lws_socket_is_valid(wsi->desc.sockfd)) { |
418 | 0 | __remove_wsi_socket_from_fds(wsi); |
419 | 0 | if (lws_socket_is_valid(wsi->desc.sockfd)) |
420 | 0 | delete_from_fd(wsi->a.context, wsi->desc.sockfd); |
421 | 0 | #if !defined(LWS_PLAT_FREERTOS) && !defined(WIN32) && !defined(LWS_PLAT_OPTEE) |
422 | 0 | delete_from_fdwsi(wsi->a.context, wsi); |
423 | 0 | #endif |
424 | 0 | } |
425 | 0 | lws_plat_pipe_close(pt->pipe_wsi); |
426 | 0 | pt->pipe_wsi = NULL; |
427 | 0 | } |
428 | |
|
429 | | #if defined(LWS_WITH_SYS_METRICS) && \ |
430 | | (defined(LWS_WITH_CLIENT) || defined(LWS_WITH_SERVER)) |
431 | | /* wsi level: only reports if dangling caliper */ |
432 | | if (wsi->cal_conn.mt && wsi->cal_conn.us_start) { |
433 | | if ((lws_metrics_priv_to_pub(wsi->cal_conn.mt)->flags) & LWSMTFL_REPORT_HIST) { |
434 | | lws_metrics_caliper_report_hist(wsi->cal_conn, (struct lws *)NULL); |
435 | | } else { |
436 | | lws_metrics_caliper_report(wsi->cal_conn, METRES_NOGO); |
437 | | lws_metrics_caliper_done(wsi->cal_conn); |
438 | | } |
439 | | } else |
440 | | lws_metrics_caliper_done(wsi->cal_conn); |
441 | | #endif |
442 | |
|
443 | | #if defined(LWS_WITH_SYS_ASYNC_DNS) |
444 | | /* is this wsi handling the interface to a dns server? */ |
445 | | { |
446 | | lws_async_dns_server_t *dsrv = |
447 | | __lws_async_dns_server_find_wsi(&context->async_dns, wsi); |
448 | | |
449 | | if (dsrv) |
450 | | dsrv->wsi = NULL; |
451 | | } |
452 | | #endif |
453 | |
|
454 | 0 | lws_pt_assert_lock_held(pt); |
455 | |
|
456 | 0 | #if defined(LWS_WITH_CLIENT) |
457 | |
|
458 | 0 | lws_free_set_NULL(wsi->cli_hostname_copy); |
459 | 0 | wsi->client_mux_substream_was = wsi->client_mux_substream; |
460 | |
|
461 | 0 | lws_addrinfo_clean(wsi); |
462 | 0 | #endif |
463 | |
|
464 | 0 | #if defined(LWS_WITH_HTTP2) |
465 | 0 | if (wsi->mux_stream_immortal) |
466 | 0 | lws_http_close_immortal(wsi); |
467 | 0 | #endif |
468 | | |
469 | | /* if we have children, close them first */ |
470 | 0 | if (wsi->child_list) { |
471 | 0 | wsi2 = wsi->child_list; |
472 | 0 | while (wsi2) { |
473 | 0 | wsi1 = wsi2->sibling_list; |
474 | | // wsi2->parent = NULL; |
475 | | /* stop it doing shutdown processing */ |
476 | 0 | wsi2->socket_is_permanently_unusable = 1; |
477 | 0 | __lws_close_free_wsi(wsi2, reason, |
478 | 0 | "general child recurse"); |
479 | 0 | wsi2 = wsi1; |
480 | 0 | } |
481 | 0 | wsi->child_list = NULL; |
482 | 0 | } |
483 | |
|
484 | 0 | #if defined(LWS_ROLE_RAW_FILE) |
485 | 0 | if (wsi->role_ops == &role_ops_raw_file) { |
486 | 0 | lws_remove_child_from_any_parent(wsi); |
487 | 0 | __remove_wsi_socket_from_fds(wsi); |
488 | 0 | if (wsi->a.protocol) |
489 | 0 | wsi->a.protocol->callback(wsi, wsi->role_ops->close_cb[0], |
490 | 0 | wsi->user_space, NULL, 0); |
491 | 0 | goto async_close; |
492 | 0 | } |
493 | 0 | #endif |
494 | | |
495 | 0 | wsi->wsistate_pre_close = wsi->wsistate; |
496 | |
|
497 | | #ifdef LWS_WITH_CGI |
498 | | if (wsi->role_ops == &role_ops_cgi) { |
499 | | |
500 | | // lwsl_debug("%s: closing stdwsi index %d\n", __func__, (int)wsi->lsp_channel); |
501 | | |
502 | | /* we are not a network connection, but a handler for CGI io */ |
503 | | if (wsi->parent && wsi->parent->http.cgi) { |
504 | | |
505 | | /* |
506 | | * We need to keep the logical cgi around so we can |
507 | | * drain it |
508 | | */ |
509 | | |
510 | | // if (wsi->parent->child_list == wsi && !wsi->sibling_list) |
511 | | // lws_cgi_remove_and_kill(wsi->parent); |
512 | | |
513 | | /* end the binding between us and network connection */ |
514 | | if (wsi->parent->http.cgi && wsi->parent->http.cgi->lsp) |
515 | | wsi->parent->http.cgi->lsp->stdwsi[(int)wsi->lsp_channel] = |
516 | | NULL; |
517 | | } |
518 | | wsi->socket_is_permanently_unusable = 1; |
519 | | |
520 | | goto just_kill_connection; |
521 | | } |
522 | | |
523 | | if (wsi->http.cgi) |
524 | | lws_cgi_remove_and_kill(wsi); |
525 | | #endif |
526 | |
|
527 | 0 | #if defined(LWS_WITH_CLIENT) |
528 | 0 | if (!wsi->close_is_redirect) |
529 | 0 | lws_free_set_NULL(wsi->stash); |
530 | 0 | #endif |
531 | |
|
532 | | #if defined(LWS_WITH_ASYNC_QUEUE) |
533 | | lws_async_worker_wait_and_reap(wsi); |
534 | | #endif |
535 | |
|
536 | 0 | if (wsi->role_ops == &role_ops_raw_skt) { |
537 | 0 | wsi->socket_is_permanently_unusable = 1; |
538 | 0 | goto just_kill_connection; |
539 | 0 | } |
540 | 0 | #if defined(LWS_WITH_FILE_OPS) && (defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)) |
541 | 0 | if (lwsi_role_http(wsi) && lwsi_role_server(wsi) && |
542 | 0 | wsi->http.fop_fd != NULL) |
543 | 0 | lws_vfs_file_close(&wsi->http.fop_fd); |
544 | 0 | #endif |
545 | |
|
546 | 0 | if (lwsi_state(wsi) == LRS_DEAD_SOCKET) |
547 | 0 | return; |
548 | | |
549 | 0 | if (wsi->socket_is_permanently_unusable || |
550 | 0 | reason == LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY || |
551 | 0 | lwsi_state(wsi) == LRS_SHUTDOWN) |
552 | 0 | goto just_kill_connection; |
553 | | |
554 | 0 | switch (lwsi_state_PRE_CLOSE(wsi)) { |
555 | 0 | case LRS_DEAD_SOCKET: |
556 | 0 | return; |
557 | | |
558 | | /* we tried the polite way... */ |
559 | 0 | case LRS_WAITING_TO_SEND_CLOSE: |
560 | 0 | case LRS_AWAITING_CLOSE_ACK: |
561 | 0 | case LRS_RETURNED_CLOSE: |
562 | 0 | goto just_kill_connection; |
563 | | |
564 | 0 | case LRS_FLUSHING_BEFORE_CLOSE: |
565 | 0 | if (lws_has_buffered_out(wsi) |
566 | | #if defined(LWS_WITH_HTTP_STREAM_COMPRESSION) |
567 | | || wsi->http.comp_ctx.buflist_comp || |
568 | | wsi->http.comp_ctx.may_have_more |
569 | | #endif |
570 | 0 | ) { |
571 | 0 | lws_callback_on_writable(wsi); |
572 | 0 | return; |
573 | 0 | } |
574 | 0 | lwsl_wsi_info(wsi, " end LRS_FLUSHING_BEFORE_CLOSE"); |
575 | 0 | goto just_kill_connection; |
576 | 0 | default: |
577 | 0 | if (lws_has_buffered_out(wsi) |
578 | | #if defined(LWS_WITH_HTTP_STREAM_COMPRESSION) |
579 | | || wsi->http.comp_ctx.buflist_comp || |
580 | | wsi->http.comp_ctx.may_have_more |
581 | | #endif |
582 | 0 | ) { |
583 | 0 | lwsl_wsi_info(wsi, "LRS_FLUSHING_BEFORE_CLOSE"); |
584 | 0 | lwsi_set_state(wsi, LRS_FLUSHING_BEFORE_CLOSE); |
585 | 0 | __lws_set_timeout(wsi, |
586 | 0 | PENDING_FLUSH_STORED_SEND_BEFORE_CLOSE, 5); |
587 | 0 | return; |
588 | 0 | } |
589 | 0 | break; |
590 | 0 | } |
591 | | |
592 | 0 | if (lwsi_state(wsi) == LRS_WAITING_CONNECT || |
593 | 0 | lwsi_state(wsi) == LRS_WAITING_DNS || |
594 | 0 | lwsi_state(wsi) == LRS_H1C_ISSUE_HANDSHAKE) |
595 | 0 | goto just_kill_connection; |
596 | | |
597 | 0 | if (!wsi->told_user_closed && wsi->user_space && wsi->a.protocol && |
598 | 0 | wsi->protocol_bind_balance) { |
599 | 0 | wsi->a.protocol->callback(wsi, |
600 | 0 | wsi->role_ops->protocol_unbind_cb[ |
601 | 0 | !!lwsi_role_server(wsi)], |
602 | 0 | wsi->user_space, (void *)__func__, 0); |
603 | 0 | wsi->protocol_bind_balance = 0; |
604 | 0 | } |
605 | | |
606 | | /* |
607 | | * signal we are closing, lws_write will |
608 | | * add any necessary version-specific stuff. If the write fails, |
609 | | * no worries we are closing anyway. If we didn't initiate this |
610 | | * close, then our state has been changed to |
611 | | * LRS_RETURNED_CLOSE and we will skip this. |
612 | | * |
613 | | * Likewise if it's a second call to close this connection after we |
614 | | * sent the close indication to the peer already, we are in state |
615 | | * LRS_AWAITING_CLOSE_ACK and will skip doing this a second time. |
616 | | */ |
617 | |
|
618 | 0 | if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_close_via_role_protocol) && |
619 | 0 | lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_close_via_role_protocol). |
620 | 0 | close_via_role_protocol(wsi, reason)) { |
621 | 0 | lwsl_wsi_info(wsi, "close_via_role took over (sockfd %d)", |
622 | 0 | wsi->desc.sockfd); |
623 | 0 | return; |
624 | 0 | } |
625 | | |
626 | 0 | just_kill_connection: |
627 | |
|
628 | 0 | lwsl_wsi_debug(wsi, "real just_kill_connection A: (sockfd %d)", |
629 | 0 | wsi->desc.sockfd); |
630 | |
|
631 | | #if defined(LWS_WITH_THREADPOOL) && defined(LWS_HAVE_PTHREAD_H) |
632 | | lws_threadpool_wsi_closing(wsi); |
633 | | #endif |
634 | | |
635 | |
|
636 | 0 | #if defined(LWS_WITH_FILE_OPS) && (defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)) |
637 | 0 | if (lwsi_role_http(wsi) && lwsi_role_server(wsi) && |
638 | 0 | wsi->http.fop_fd != NULL) |
639 | 0 | lws_vfs_file_close(&wsi->http.fop_fd); |
640 | 0 | #endif |
641 | |
|
642 | 0 | lws_sul_cancel(&wsi->sul_connect_timeout); |
643 | | #if defined(WIN32) |
644 | | lws_sul_cancel(&wsi->win32_sul_connect_async_check); |
645 | | #endif |
646 | | #if defined(LWS_WITH_SYS_ASYNC_DNS) |
647 | | lws_async_dns_cancel(wsi); |
648 | | #endif |
649 | |
|
650 | | #if defined(LWS_WITH_HTTP_PROXY) |
651 | | if (wsi->http.buflist_post_body) |
652 | | lws_buflist_destroy_all_segments(&wsi->http.buflist_post_body); |
653 | | #endif |
654 | |
|
655 | 0 | if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_close_kill_connection)) |
656 | 0 | lws_rops_func_fidx(wsi->role_ops, |
657 | 0 | LWS_ROPS_close_kill_connection). |
658 | 0 | close_kill_connection(wsi, reason); |
659 | |
|
660 | 0 | n = 0; |
661 | |
|
662 | 0 | if (!wsi->told_user_closed && wsi->user_space && |
663 | 0 | wsi->protocol_bind_balance && wsi->a.protocol) { |
664 | 0 | lwsl_debug("%s: %s: DROP_PROTOCOL %s\n", __func__, lws_wsi_tag(wsi), |
665 | 0 | wsi->a.protocol ? wsi->a.protocol->name: "NULL"); |
666 | 0 | if (wsi->a.protocol) |
667 | 0 | wsi->a.protocol->callback(wsi, |
668 | 0 | wsi->role_ops->protocol_unbind_cb[ |
669 | 0 | !!lwsi_role_server(wsi)], |
670 | 0 | wsi->user_space, (void *)__func__, 0); |
671 | 0 | wsi->protocol_bind_balance = 0; |
672 | 0 | } |
673 | |
|
674 | 0 | #if defined(LWS_WITH_CLIENT) |
675 | 0 | if (( |
676 | 0 | #if defined(LWS_ROLE_WS) |
677 | | /* |
678 | | * If our goal is a ws upgrade, effectively we did not reach |
679 | | * ESTABLISHED if we did not get the upgrade server reply |
680 | | */ |
681 | 0 | (lwsi_state(wsi) == LRS_WAITING_SERVER_REPLY && |
682 | 0 | wsi->role_ops == &role_ops_ws) || |
683 | 0 | #endif |
684 | 0 | lwsi_state(wsi) == LRS_WAITING_DNS || |
685 | 0 | lwsi_state(wsi) == LRS_WAITING_CONNECT) && |
686 | 0 | !wsi->already_did_cce && wsi->a.protocol && |
687 | 0 | !wsi->close_is_redirect) { |
688 | 0 | static const char _reason[] = "closed before established"; |
689 | |
|
690 | 0 | lwsl_wsi_debug(wsi, "closing in unestablished state 0x%x", |
691 | 0 | lwsi_state(wsi)); |
692 | 0 | wsi->socket_is_permanently_unusable = 1; |
693 | |
|
694 | 0 | lws_inform_client_conn_fail(wsi, |
695 | 0 | (void *)_reason, sizeof(_reason) - 1); |
696 | 0 | } |
697 | 0 | #endif |
698 | | |
699 | | /* |
700 | | * Testing with ab shows that we have to stage the socket close when |
701 | | * the system is under stress... shutdown any further TX, change the |
702 | | * state to one that won't emit anything more, and wait with a timeout |
703 | | * for the POLLIN to show a zero-size rx before coming back and doing |
704 | | * the actual close. |
705 | | */ |
706 | 0 | if (wsi->role_ops != &role_ops_raw_skt && !lwsi_role_client(wsi) && |
707 | 0 | lwsi_state(wsi) != LRS_SHUTDOWN && |
708 | 0 | lwsi_state(wsi) != LRS_UNCONNECTED && |
709 | 0 | reason != LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY && |
710 | 0 | !wsi->socket_is_permanently_unusable) { |
711 | |
|
712 | 0 | #if defined(LWS_WITH_TLS) |
713 | 0 | if (lws_is_ssl(wsi) && wsi->tls.ssl) { |
714 | 0 | n = 0; |
715 | 0 | switch (__lws_tls_shutdown(wsi)) { |
716 | 0 | case LWS_SSL_CAPABLE_DONE: |
717 | 0 | case LWS_SSL_CAPABLE_ERROR: |
718 | 0 | case LWS_SSL_CAPABLE_MORE_SERVICE_READ: |
719 | 0 | case LWS_SSL_CAPABLE_MORE_SERVICE_WRITE: |
720 | 0 | case LWS_SSL_CAPABLE_MORE_SERVICE: |
721 | 0 | if (wsi->lsp_channel++ == 8) { |
722 | 0 | lwsl_wsi_info(wsi, "avoiding shutdown spin"); |
723 | 0 | lwsi_set_state(wsi, LRS_SHUTDOWN); |
724 | 0 | } |
725 | 0 | break; |
726 | 0 | } |
727 | 0 | } else |
728 | 0 | #endif |
729 | 0 | { |
730 | 0 | lwsl_info("%s: shutdown conn: %s (sk %d, state 0x%x)\n", |
731 | 0 | __func__, lws_wsi_tag(wsi), (int)(lws_intptr_t)wsi->desc.sockfd, |
732 | 0 | lwsi_state(wsi)); |
733 | 0 | if (!wsi->socket_is_permanently_unusable && |
734 | 0 | lws_socket_is_valid(wsi->desc.sockfd)) { |
735 | 0 | wsi->socket_is_permanently_unusable = 1; |
736 | 0 | n = shutdown(wsi->desc.sockfd, SHUT_WR); |
737 | 0 | } |
738 | 0 | } |
739 | 0 | if (n) |
740 | 0 | lwsl_wsi_debug(wsi, "closing: shutdown (state 0x%x) ret %d", |
741 | 0 | lwsi_state(wsi), LWS_ERRNO); |
742 | | |
743 | | /* |
744 | | * This causes problems on WINCE / ESP32 with disconnection |
745 | | * when the events are half closing connection |
746 | | */ |
747 | 0 | #if !defined(_WIN32_WCE) && !defined(LWS_PLAT_FREERTOS) |
748 | | /* libuv: no event available to guarantee completion */ |
749 | 0 | if (!wsi->socket_is_permanently_unusable && |
750 | 0 | #if defined(LWS_WITH_CLIENT) |
751 | 0 | !wsi->close_is_redirect && |
752 | 0 | #endif |
753 | 0 | lws_socket_is_valid(wsi->desc.sockfd) && |
754 | 0 | lwsi_state(wsi) != LRS_SHUTDOWN && |
755 | 0 | (context->event_loop_ops->flags & LELOF_ISPOLL)) { |
756 | 0 | __lws_change_pollfd(wsi, LWS_POLLOUT, LWS_POLLIN); |
757 | 0 | lwsi_set_state(wsi, LRS_SHUTDOWN); |
758 | 0 | __lws_set_timeout(wsi, PENDING_TIMEOUT_SHUTDOWN_FLUSH, |
759 | 0 | (int)context->timeout_secs); |
760 | |
|
761 | 0 | return; |
762 | 0 | } |
763 | 0 | #endif |
764 | 0 | } |
765 | | |
766 | 0 | lwsl_wsi_info(wsi, "real just_kill_connection: sockfd %d\n", |
767 | 0 | wsi->desc.sockfd); |
768 | |
|
769 | | #ifdef LWS_WITH_HUBBUB |
770 | | if (wsi->http.rw) { |
771 | | lws_rewrite_destroy(wsi->http.rw); |
772 | | wsi->http.rw = NULL; |
773 | | } |
774 | | #endif |
775 | |
|
776 | 0 | if (wsi->http.pending_return_headers) |
777 | 0 | lws_free_set_NULL(wsi->http.pending_return_headers); |
778 | | |
779 | | /* |
780 | | * we won't be servicing or receiving anything further from this guy |
781 | | * delete socket from the internal poll list if still present |
782 | | */ |
783 | 0 | __lws_ssl_remove_wsi_from_buffered_list(wsi); |
784 | 0 | __lws_wsi_remove_from_sul(wsi); |
785 | | |
786 | | //if (wsi->told_event_loop_closed) // cgi std close case (dummy-callback) |
787 | | // return; |
788 | | |
789 | | /* checking return redundant since we anyway close */ |
790 | 0 | __remove_wsi_socket_from_fds(wsi); |
791 | |
|
792 | 0 | lwsi_set_state(wsi, LRS_DEAD_SOCKET); |
793 | 0 | lws_buflist_destroy_all_segments(&wsi->buflist); |
794 | 0 | lws_dll2_remove(&wsi->dll_buflist); |
795 | |
|
796 | 0 | if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_close_role)) |
797 | 0 | lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_close_role). |
798 | 0 | close_role(pt, wsi); |
799 | | |
800 | | /* tell the user it's all over for this guy */ |
801 | |
|
802 | 0 | ccb = 0; |
803 | 0 | if ((lwsi_state_est_PRE_CLOSE(wsi) || |
804 | | /* raw skt adopted but didn't complete tls hs should CLOSE */ |
805 | 0 | (wsi->role_ops == &role_ops_raw_skt && !lwsi_role_client(wsi)) || |
806 | 0 | lwsi_state_PRE_CLOSE(wsi) == LRS_WAITING_SERVER_REPLY) && |
807 | 0 | !wsi->told_user_closed && |
808 | 0 | wsi->role_ops->close_cb[lwsi_role_server(wsi)]) { |
809 | 0 | if (!wsi->upgraded_to_http2 || !lwsi_role_client(wsi)) |
810 | 0 | ccb = 1; |
811 | | /* |
812 | | * The network wsi for a client h2 connection shouldn't |
813 | | * call back for its role: the child stream connections |
814 | | * own the role. Otherwise h2 will call back closed |
815 | | * one too many times as the children do it and then |
816 | | * the closing network stream. |
817 | | */ |
818 | 0 | } |
819 | |
|
820 | 0 | if (!wsi->told_user_closed && |
821 | 0 | !lws_dll2_is_detached(&wsi->vh_awaiting_socket)) |
822 | | /* |
823 | | * He's a guy who go started with dns, but failed or is |
824 | | * caught with a shutdown before he got the result. We have |
825 | | * to issclient_mux_substream_wasue him a close cb |
826 | | */ |
827 | 0 | ccb = 1; |
828 | |
|
829 | 0 | lwsl_wsi_info(wsi, "cce=%d", ccb); |
830 | |
|
831 | 0 | pro = wsi->a.protocol; |
832 | |
|
833 | 0 | if (wsi->already_did_cce) |
834 | | /* |
835 | | * If we handled this by CLIENT_CONNECTION_ERROR, it's |
836 | | * mutually exclusive with CLOSE |
837 | | */ |
838 | 0 | ccb = 0; |
839 | |
|
840 | 0 | #if defined(LWS_WITH_CLIENT) |
841 | 0 | if (!wsi->close_is_redirect && !ccb && |
842 | 0 | (lwsi_state_PRE_CLOSE(wsi) & LWSIFS_NOT_EST) && |
843 | 0 | lwsi_role_client(wsi)) { |
844 | 0 | lws_inform_client_conn_fail(wsi, "Closed before conn", 18); |
845 | 0 | } |
846 | 0 | #endif |
847 | 0 | if (ccb |
848 | 0 | #if defined(LWS_WITH_CLIENT) |
849 | 0 | && !wsi->close_is_redirect |
850 | 0 | #endif |
851 | 0 | ) { |
852 | |
|
853 | 0 | if (!wsi->a.protocol && wsi->a.vhost && wsi->a.vhost->protocols) |
854 | 0 | pro = &wsi->a.vhost->protocols[0]; |
855 | |
|
856 | 0 | if (pro && pro->callback) |
857 | 0 | pro->callback(wsi, |
858 | 0 | wsi->role_ops->close_cb[lwsi_role_server(wsi)], |
859 | 0 | wsi->user_space, NULL, 0); |
860 | 0 | wsi->told_user_closed = 1; |
861 | 0 | } |
862 | |
|
863 | 0 | #if defined(LWS_ROLE_RAW_FILE) |
864 | 0 | async_close: |
865 | 0 | #endif |
866 | |
|
867 | 0 | #if defined(LWS_WITH_SECURE_STREAMS) |
868 | 0 | if (wsi->for_ss) { |
869 | 0 | lwsl_wsi_debug(wsi, "for_ss"); |
870 | | /* |
871 | | * We were adopted for a particular ss, but, eg, we may not |
872 | | * have succeeded with the connection... we are closing which is |
873 | | * good, but we have to invalidate any pointer the related ss |
874 | | * handle may be holding on us |
875 | | */ |
876 | | #if defined(LWS_WITH_SECURE_STREAMS_PROXY_API) |
877 | | |
878 | | if (wsi->client_proxy_onward) { |
879 | | /* |
880 | | * We are an onward proxied wsi at the proxy, |
881 | | * opaque is proxing "conn", we must remove its pointer |
882 | | * to us since we are destroying |
883 | | */ |
884 | | lws_proxy_clean_conn_ss(wsi); |
885 | | } else |
886 | | |
887 | | if (wsi->client_bound_sspc) { |
888 | | lws_sspc_handle_t *h = (lws_sspc_handle_t *)wsi->a.opaque_user_data; |
889 | | |
890 | | if (h) { // && (h->info.flags & LWSSSINFLAGS_ACCEPTED)) { |
891 | | |
892 | | #if defined(LWS_WITH_SYS_METRICS) |
893 | | /* |
894 | | * If any hanging caliper measurement, dump it, and free any tags |
895 | | */ |
896 | | lws_metrics_caliper_report_hist(h->cal_txn, (struct lws *)NULL); |
897 | | #endif |
898 | | |
899 | | h->txp_path.priv_onw = NULL; |
900 | | //wsi->a.opaque_user_data = NULL; |
901 | | } |
902 | | } else |
903 | | #endif |
904 | 0 | { |
905 | 0 | hh = (lws_ss_handle_t *)wsi->a.opaque_user_data; |
906 | |
|
907 | 0 | if (hh) { // && (h->info.flags & LWSSSINFLAGS_ACCEPTED)) { |
908 | | |
909 | | /* |
910 | | * ss level: only reports if dangling caliper |
911 | | * not already reported |
912 | | */ |
913 | 0 | lws_metrics_caliper_report_hist(hh->cal_txn, wsi); |
914 | |
|
915 | 0 | hh->wsi = NULL; |
916 | 0 | wsi->a.opaque_user_data = NULL; |
917 | 0 | } |
918 | 0 | } |
919 | 0 | } |
920 | 0 | #endif |
921 | | |
922 | |
|
923 | 0 | lws_remove_child_from_any_parent(wsi); |
924 | 0 | wsi->socket_is_permanently_unusable = 1; |
925 | |
|
926 | 0 | if (wsi->a.context->event_loop_ops->wsi_logical_close) |
927 | 0 | if (wsi->a.context->event_loop_ops->wsi_logical_close(wsi)) |
928 | 0 | return; |
929 | | |
930 | 0 | __lws_close_free_wsi_final(wsi); |
931 | |
|
932 | 0 | #if defined(LWS_WITH_SECURE_STREAMS) |
933 | 0 | if (hh && hh->ss_dangling_connected && |
934 | 0 | lws_ss_event_helper(hh, LWSSSCS_DISCONNECTED) == LWSSSSRET_DESTROY_ME) |
935 | 0 | lws_ss_destroy(&hh); |
936 | 0 | #endif |
937 | 0 | } |
938 | | |
939 | | |
940 | | /* cx + vh lock */ |
941 | | |
942 | | void |
943 | | __lws_close_free_wsi_final(struct lws *wsi) |
944 | 0 | { |
945 | 0 | int n; |
946 | |
|
947 | | #if defined(LWS_WITH_ASYNC_QUEUE) |
948 | | lws_async_worker_wait_and_reap(wsi); |
949 | | #endif |
950 | |
|
951 | 0 | if (!wsi->shadow && |
952 | 0 | lws_socket_is_valid(wsi->desc.sockfd) && !lws_ssl_close(wsi)) { |
953 | 0 | lwsl_wsi_debug(wsi, "fd %d", wsi->desc.sockfd); |
954 | |
|
955 | 0 | __remove_wsi_socket_from_fds(wsi); |
956 | 0 | if (lws_socket_is_valid(wsi->desc.sockfd)) |
957 | 0 | delete_from_fd(wsi->a.context, wsi->desc.sockfd); |
958 | | |
959 | | /* |
960 | | * if this is the pt pipe, skip the actual close, |
961 | | * go through the motions though so we will reach 0 open wsi |
962 | | * on the pt, and trigger the pt destroy to close the pipe fds |
963 | | */ |
964 | 0 | if (!lws_plat_pipe_is_fd_assocated(wsi->a.context, wsi->tsi, |
965 | 0 | wsi->desc.sockfd)) { |
966 | 0 | n = compatible_close(wsi->desc.sockfd); |
967 | 0 | if (n) |
968 | 0 | lwsl_wsi_debug(wsi, "closing: close ret %d", |
969 | 0 | LWS_ERRNO); |
970 | 0 | } |
971 | |
|
972 | 0 | #if !defined(LWS_PLAT_FREERTOS) && !defined(WIN32) && !defined(LWS_PLAT_OPTEE) |
973 | 0 | delete_from_fdwsi(wsi->a.context, wsi); |
974 | 0 | #endif |
975 | |
|
976 | 0 | sanity_assert_no_sockfd_traces(wsi->a.context, wsi->desc.sockfd); |
977 | 0 | } |
978 | | |
979 | | /* ... if we're closing the cancel pipe, account for it */ |
980 | |
|
981 | 0 | { |
982 | 0 | struct lws_context_per_thread *pt = |
983 | 0 | &wsi->a.context->pt[(int)wsi->tsi]; |
984 | |
|
985 | 0 | if (pt->pipe_wsi == wsi) |
986 | 0 | pt->pipe_wsi = NULL; |
987 | 0 | if (pt->dummy_pipe_fds[0] == wsi->desc.sockfd) |
988 | 0 | { |
989 | 0 | #if !defined(LWS_PLAT_FREERTOS) |
990 | 0 | pt->dummy_pipe_fds[0] = LWS_SOCK_INVALID; |
991 | 0 | #endif |
992 | 0 | } |
993 | 0 | } |
994 | |
|
995 | 0 | wsi->desc.sockfd = LWS_SOCK_INVALID; |
996 | |
|
997 | 0 | #if defined(LWS_WITH_CLIENT) |
998 | 0 | lws_free_set_NULL(wsi->cli_hostname_copy); |
999 | 0 | if (wsi->close_is_redirect) { |
1000 | |
|
1001 | 0 | wsi->close_is_redirect = 0; |
1002 | |
|
1003 | 0 | lwsl_wsi_info(wsi, "picking up redirection"); |
1004 | |
|
1005 | 0 | lws_role_transition(wsi, LWSIFR_CLIENT, LRS_UNCONNECTED, |
1006 | 0 | &role_ops_h1); |
1007 | |
|
1008 | 0 | #if defined(LWS_WITH_HTTP2) |
1009 | 0 | if (wsi->client_mux_substream_was) |
1010 | 0 | wsi->h2.END_STREAM = wsi->h2.END_HEADERS = 0; |
1011 | 0 | #endif |
1012 | 0 | #if defined(LWS_ROLE_H2) || defined(LWS_ROLE_MQTT) |
1013 | 0 | if (wsi->mux.parent_wsi) { |
1014 | 0 | lws_wsi_mux_sibling_disconnect(wsi); |
1015 | 0 | wsi->mux.parent_wsi = NULL; |
1016 | 0 | } |
1017 | 0 | #endif |
1018 | |
|
1019 | 0 | #if defined(LWS_WITH_TLS) |
1020 | 0 | memset(&wsi->tls, 0, sizeof(wsi->tls)); |
1021 | 0 | #endif |
1022 | | |
1023 | | // wsi->a.protocol = NULL; |
1024 | 0 | if (wsi->a.protocol) |
1025 | 0 | lws_bind_protocol(wsi, wsi->a.protocol, "client_reset"); |
1026 | 0 | wsi->pending_timeout = NO_PENDING_TIMEOUT; |
1027 | 0 | wsi->hdr_parsing_completed = 0; |
1028 | |
|
1029 | 0 | #if defined(LWS_WITH_TLS) |
1030 | 0 | if (wsi->stash->cis[CIS_ALPN]) |
1031 | 0 | lws_strncpy(wsi->alpn, wsi->stash->cis[CIS_ALPN], |
1032 | 0 | sizeof(wsi->alpn)); |
1033 | 0 | #endif |
1034 | |
|
1035 | 0 | if (lws_header_table_attach(wsi, 0)) { |
1036 | 0 | lwsl_wsi_err(wsi, "failed to get ah"); |
1037 | 0 | return; |
1038 | 0 | } |
1039 | | // } |
1040 | | //_lws_header_table_reset(wsi->http.ah); |
1041 | | |
1042 | 0 | #if defined(LWS_WITH_TLS) |
1043 | 0 | wsi->tls.use_ssl = (unsigned int)wsi->flags; |
1044 | 0 | #endif |
1045 | |
|
1046 | | #if defined(LWS_WITH_TLS_JIT_TRUST) |
1047 | | if (wsi->stash && wsi->stash->cis[CIS_ADDRESS]) { |
1048 | | struct lws_vhost *vh = NULL; |
1049 | | lws_tls_jit_trust_vhost_bind(wsi->a.context, |
1050 | | wsi->stash->cis[CIS_ADDRESS], |
1051 | | &vh); |
1052 | | if (vh) { |
1053 | | if (!vh->count_bound_wsi && vh->grace_after_unref) { |
1054 | | lwsl_wsi_info(wsi, "%s in use\n", |
1055 | | vh->lc.gutag); |
1056 | | lws_sul_cancel(&vh->sul_unref); |
1057 | | } |
1058 | | vh->count_bound_wsi++; |
1059 | | wsi->a.vhost = vh; |
1060 | | } |
1061 | | } |
1062 | | #endif |
1063 | |
|
1064 | 0 | return; |
1065 | 0 | } |
1066 | 0 | #endif |
1067 | | |
1068 | | /* outermost destroy notification for wsi (user_space still intact) */ |
1069 | 0 | if (wsi->a.vhost) |
1070 | 0 | wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_WSI_DESTROY, |
1071 | 0 | wsi->user_space, NULL, 0); |
1072 | |
|
1073 | | #ifdef LWS_WITH_CGI |
1074 | | if (wsi->http.cgi) { |
1075 | | lws_spawn_piped_destroy(&wsi->http.cgi->lsp); |
1076 | | lws_sul_cancel(&wsi->http.cgi->sul_grace); |
1077 | | lws_free_set_NULL(wsi->http.cgi); |
1078 | | } |
1079 | | #endif |
1080 | |
|
1081 | | #if defined(LWS_WITH_SYS_FAULT_INJECTION) |
1082 | | lws_fi_destroy(&wsi->fic); |
1083 | | #endif |
1084 | |
|
1085 | 0 | __lws_wsi_remove_from_sul(wsi); |
1086 | 0 | sanity_assert_no_wsi_traces(wsi->a.context, wsi); |
1087 | 0 | __lws_free_wsi(wsi); |
1088 | 0 | } |
1089 | | |
1090 | | |
1091 | | void |
1092 | | lws_close_free_wsi(struct lws *wsi, enum lws_close_status reason, const char *caller) |
1093 | 0 | { |
1094 | 0 | struct lws_context *cx = wsi->a.context; |
1095 | 0 | struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi]; |
1096 | |
|
1097 | 0 | lws_context_lock(cx, __func__); |
1098 | |
|
1099 | 0 | lws_pt_lock(pt, __func__); |
1100 | | /* may destroy vhost, cannot hold vhost lock outside it */ |
1101 | 0 | __lws_close_free_wsi(wsi, reason, caller); |
1102 | 0 | lws_pt_unlock(pt); |
1103 | |
|
1104 | 0 | lws_context_unlock(cx); |
1105 | 0 | } |
1106 | | |
1107 | | |