/src/libwebsockets/lib/core-net/wsi-timeout.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * libwebsockets - small server side websockets and web server implementation |
3 | | * |
4 | | * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com> |
5 | | * |
6 | | * Permission is hereby granted, free of charge, to any person obtaining a copy |
7 | | * of this software and associated documentation files (the "Software"), to |
8 | | * deal in the Software without restriction, including without limitation the |
9 | | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
10 | | * sell copies of the Software, and to permit persons to whom the Software is |
11 | | * furnished to do so, subject to the following conditions: |
12 | | * |
13 | | * The above copyright notice and this permission notice shall be included in |
14 | | * all copies or substantial portions of the Software. |
15 | | * |
16 | | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
19 | | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
20 | | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
21 | | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
22 | | * IN THE SOFTWARE. |
23 | | */ |
24 | | |
25 | | #include "private-lib-core.h" |
26 | | |
27 | | void |
28 | | __lws_wsi_remove_from_sul(struct lws *wsi) |
29 | 0 | { |
30 | 0 | lws_sul_cancel(&wsi->sul_timeout); |
31 | 0 | lws_sul_cancel(&wsi->sul_hrtimer); |
32 | 0 | lws_sul_cancel(&wsi->sul_validity); |
33 | | #if defined(LWS_WITH_SYS_FAULT_INJECTION) |
34 | | lws_sul_cancel(&wsi->sul_fault_timedclose); |
35 | | #endif |
36 | 0 | } |
37 | | |
38 | | /* |
39 | | * hrtimer |
40 | | */ |
41 | | |
42 | | static void |
43 | | lws_sul_hrtimer_cb(lws_sorted_usec_list_t *sul) |
44 | 0 | { |
45 | 0 | struct lws *wsi = lws_container_of(sul, struct lws, sul_hrtimer); |
46 | |
|
47 | 0 | if (wsi->a.protocol && |
48 | 0 | wsi->a.protocol->callback(wsi, LWS_CALLBACK_TIMER, |
49 | 0 | wsi->user_space, NULL, 0)) |
50 | 0 | __lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, |
51 | 0 | "hrtimer cb errored"); |
52 | 0 | } |
53 | | |
54 | | void |
55 | | __lws_set_timer_usecs(struct lws *wsi, lws_usec_t us) |
56 | 0 | { |
57 | 0 | struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi]; |
58 | |
|
59 | 0 | wsi->sul_hrtimer.cb = lws_sul_hrtimer_cb; |
60 | 0 | __lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED], |
61 | 0 | &wsi->sul_hrtimer, us); |
62 | 0 | } |
63 | | |
64 | | void |
65 | | lws_set_timer_usecs(struct lws *wsi, lws_usec_t usecs) |
66 | 0 | { |
67 | 0 | if ((int64_t)usecs == (int64_t)LWS_SET_TIMER_USEC_CANCEL) |
68 | 0 | lws_sul_cancel(&wsi->sul_hrtimer); |
69 | 0 | else |
70 | 0 | __lws_set_timer_usecs(wsi, usecs); |
71 | 0 | } |
72 | | |
73 | | /* |
74 | | * wsi timeout |
75 | | */ |
76 | | |
77 | | static void |
78 | | lws_sul_wsitimeout_cb(lws_sorted_usec_list_t *sul) |
79 | 0 | { |
80 | 0 | struct lws *wsi = lws_container_of(sul, struct lws, sul_timeout); |
81 | 0 | struct lws_context *cx = wsi->a.context; |
82 | 0 | struct lws_context_per_thread *pt = &cx->pt[(int)wsi->tsi]; |
83 | | |
84 | | /* no need to log normal idle keepalive timeout */ |
85 | | // if (wsi->pending_timeout != PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE) |
86 | 0 | #if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2) |
87 | 0 | if (wsi->pending_timeout != PENDING_TIMEOUT_USER_OK) |
88 | 0 | lwsl_wsi_info(wsi, "TIMEDOUT WAITING %d, dhdr %d, ah %p, wl %d", |
89 | 0 | wsi->pending_timeout, |
90 | 0 | wsi->hdr_parsing_completed, wsi->http.ah, |
91 | 0 | pt->http.ah_wait_list_length); |
92 | | #if defined(LWS_WITH_CGI) |
93 | | if (wsi->http.cgi) |
94 | | lwsl_wsi_notice(wsi, "CGI timeout: %s", wsi->http.cgi->summary); |
95 | | #endif |
96 | | #else |
97 | | if (wsi->pending_timeout != PENDING_TIMEOUT_USER_OK) |
98 | | lwsl_wsi_info(wsi, "TIMEDOUT WAITING on %d ", |
99 | | wsi->pending_timeout); |
100 | | #endif |
101 | | /* cgi timeout */ |
102 | 0 | if (wsi->pending_timeout != PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE) |
103 | | /* |
104 | | * Since he failed a timeout, he already had a chance to |
105 | | * do something and was unable to... that includes |
106 | | * situations like half closed connections. So process |
107 | | * this "failed timeout" close as a violent death and |
108 | | * don't try to do protocol cleanup like flush partials. |
109 | | */ |
110 | 0 | wsi->socket_is_permanently_unusable = 1; |
111 | 0 | #if defined(LWS_WITH_CLIENT) |
112 | 0 | if (lwsi_state(wsi) == LRS_WAITING_SSL) |
113 | 0 | lws_inform_client_conn_fail(wsi, |
114 | 0 | (void *)"Timed out waiting SSL", 21); |
115 | 0 | if (lwsi_state(wsi) == LRS_WAITING_SERVER_REPLY) |
116 | 0 | lws_inform_client_conn_fail(wsi, |
117 | 0 | (void *)"Timed out waiting server reply", 30); |
118 | 0 | #endif |
119 | |
|
120 | 0 | lws_context_lock(cx, __func__); |
121 | 0 | lws_pt_lock(pt, __func__); |
122 | 0 | __lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, "timeout"); |
123 | 0 | lws_pt_unlock(pt); |
124 | 0 | lws_context_unlock(cx); |
125 | 0 | } |
126 | | |
127 | | void |
128 | | __lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs) |
129 | 0 | { |
130 | 0 | struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi]; |
131 | |
|
132 | 0 | wsi->sul_timeout.cb = lws_sul_wsitimeout_cb; |
133 | 0 | __lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED], |
134 | 0 | &wsi->sul_timeout, |
135 | 0 | ((lws_usec_t)secs) * LWS_US_PER_SEC); |
136 | |
|
137 | 0 | lwsl_wsi_debug(wsi, "%d secs, reason %d\n", secs, reason); |
138 | |
|
139 | 0 | wsi->pending_timeout = (char)reason; |
140 | 0 | } |
141 | | |
142 | | void |
143 | | lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs) |
144 | 0 | { |
145 | 0 | struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi]; |
146 | |
|
147 | 0 | lws_context_lock(pt->context, __func__); |
148 | 0 | lws_pt_lock(pt, __func__); |
149 | 0 | lws_dll2_remove(&wsi->sul_timeout.list); |
150 | 0 | lws_pt_unlock(pt); |
151 | |
|
152 | 0 | if (!secs) |
153 | 0 | goto bail; |
154 | | |
155 | 0 | if (secs == LWS_TO_KILL_SYNC) { |
156 | 0 | lwsl_wsi_debug(wsi, "TO_KILL_SYNC"); |
157 | 0 | lws_context_unlock(pt->context); |
158 | 0 | lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, |
159 | 0 | "to sync kill"); |
160 | 0 | return; |
161 | 0 | } |
162 | | |
163 | 0 | if (secs == LWS_TO_KILL_ASYNC) |
164 | 0 | secs = 0; |
165 | | |
166 | | // assert(!secs || !wsi->mux_stream_immortal); |
167 | 0 | if (secs && wsi->mux_stream_immortal) |
168 | 0 | lwsl_wsi_err(wsi, "on immortal stream %d %d", reason, secs); |
169 | |
|
170 | 0 | lws_pt_lock(pt, __func__); |
171 | 0 | __lws_set_timeout(wsi, reason, secs); |
172 | 0 | lws_pt_unlock(pt); |
173 | |
|
174 | 0 | bail: |
175 | 0 | lws_context_unlock(pt->context); |
176 | 0 | } |
177 | | |
178 | | void |
179 | | lws_set_timeout_us(struct lws *wsi, enum pending_timeout reason, lws_usec_t us) |
180 | 0 | { |
181 | 0 | struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi]; |
182 | |
|
183 | 0 | lws_pt_lock(pt, __func__); |
184 | 0 | lws_dll2_remove(&wsi->sul_timeout.list); |
185 | 0 | lws_pt_unlock(pt); |
186 | |
|
187 | 0 | if (!us) |
188 | 0 | return; |
189 | | |
190 | 0 | lws_pt_lock(pt, __func__); |
191 | 0 | __lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED], |
192 | 0 | &wsi->sul_timeout, us); |
193 | |
|
194 | 0 | lwsl_wsi_notice(wsi, "%llu us, reason %d", |
195 | 0 | (unsigned long long)us, reason); |
196 | |
|
197 | 0 | wsi->pending_timeout = (char)reason; |
198 | 0 | lws_pt_unlock(pt); |
199 | 0 | } |
200 | | |
201 | | static void |
202 | | lws_validity_cb(lws_sorted_usec_list_t *sul) |
203 | 0 | { |
204 | 0 | struct lws *wsi = lws_container_of(sul, struct lws, sul_validity); |
205 | 0 | struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi]; |
206 | 0 | const lws_retry_bo_t *rbo = wsi->retry_policy; |
207 | | |
208 | | /* one of either the ping or hangup validity threshold was crossed */ |
209 | |
|
210 | 0 | if (wsi->validity_hup) { |
211 | 0 | lwsl_wsi_info(wsi, "validity too old"); |
212 | 0 | struct lws_context *cx = wsi->a.context; |
213 | 0 | struct lws_context_per_thread *pt = &cx->pt[(int)wsi->tsi]; |
214 | |
|
215 | 0 | lws_context_lock(cx, __func__); |
216 | 0 | lws_pt_lock(pt, __func__); |
217 | 0 | __lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, |
218 | 0 | "validity timeout"); |
219 | 0 | lws_pt_unlock(pt); |
220 | 0 | lws_context_unlock(cx); |
221 | 0 | return; |
222 | 0 | } |
223 | | |
224 | | /* schedule a protocol-dependent ping */ |
225 | | |
226 | 0 | lwsl_wsi_info(wsi, "scheduling validity check"); |
227 | |
|
228 | 0 | if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_issue_keepalive)) |
229 | 0 | lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_issue_keepalive). |
230 | 0 | issue_keepalive(wsi, 0); |
231 | | |
232 | | /* |
233 | | * We arrange to come back here after the additional ping to hangup time |
234 | | * and do the hangup, unless we get validated (by, eg, a PONG) and |
235 | | * reset the timer |
236 | | */ |
237 | |
|
238 | 0 | assert(rbo->secs_since_valid_hangup > rbo->secs_since_valid_ping); |
239 | | |
240 | 0 | wsi->validity_hup = 1; |
241 | 0 | __lws_sul_insert_us(&pt->pt_sul_owner[!!wsi->conn_validity_wakesuspend], |
242 | 0 | &wsi->sul_validity, |
243 | 0 | ((uint64_t)rbo->secs_since_valid_hangup - |
244 | 0 | rbo->secs_since_valid_ping) * LWS_US_PER_SEC); |
245 | 0 | } |
246 | | |
247 | | /* |
248 | | * The role calls this back to actually confirm validity on a particular wsi |
249 | | * (which may not be the original wsi) |
250 | | */ |
251 | | |
252 | | void |
253 | | _lws_validity_confirmed_role(struct lws *wsi) |
254 | 0 | { |
255 | 0 | struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi]; |
256 | 0 | const lws_retry_bo_t *rbo = wsi->retry_policy; |
257 | |
|
258 | 0 | if (!rbo || !rbo->secs_since_valid_hangup) |
259 | 0 | return; |
260 | | |
261 | 0 | wsi->validity_hup = 0; |
262 | 0 | wsi->sul_validity.cb = lws_validity_cb; |
263 | |
|
264 | 0 | wsi->validity_hup = rbo->secs_since_valid_ping >= |
265 | 0 | rbo->secs_since_valid_hangup; |
266 | |
|
267 | 0 | lwsl_wsi_info(wsi, "setting validity timer %ds (hup %d)", |
268 | 0 | wsi->validity_hup ? rbo->secs_since_valid_hangup : |
269 | 0 | rbo->secs_since_valid_ping, |
270 | 0 | wsi->validity_hup); |
271 | |
|
272 | 0 | __lws_sul_insert_us(&pt->pt_sul_owner[!!wsi->conn_validity_wakesuspend], |
273 | 0 | &wsi->sul_validity, |
274 | 0 | ((uint64_t)(wsi->validity_hup ? |
275 | 0 | rbo->secs_since_valid_hangup : |
276 | 0 | rbo->secs_since_valid_ping)) * LWS_US_PER_SEC); |
277 | 0 | } |
278 | | |
279 | | void |
280 | | lws_validity_confirmed(struct lws *wsi) |
281 | 0 | { |
282 | | /* |
283 | | * This may be a stream inside a muxed network connection... leave it |
284 | | * to the role to figure out who actually needs to understand their |
285 | | * validity was confirmed. |
286 | | */ |
287 | 0 | if (!wsi->h2_stream_carries_ws && /* only if not encapsulated */ |
288 | 0 | wsi->role_ops && |
289 | 0 | lws_rops_fidx(wsi->role_ops, LWS_ROPS_issue_keepalive)) |
290 | 0 | lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_issue_keepalive). |
291 | 0 | issue_keepalive(wsi, 1); |
292 | 0 | } |