/src/libwebsockets/lib/roles/ws/ops-ws.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * libwebsockets - small server side websockets and web server implementation |
3 | | * |
4 | | * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com> |
5 | | * |
6 | | * Permission is hereby granted, free of charge, to any person obtaining a copy |
7 | | * of this software and associated documentation files (the "Software"), to |
8 | | * deal in the Software without restriction, including without limitation the |
9 | | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
10 | | * sell copies of the Software, and to permit persons to whom the Software is |
11 | | * furnished to do so, subject to the following conditions: |
12 | | * |
13 | | * The above copyright notice and this permission notice shall be included in |
14 | | * all copies or substantial portions of the Software. |
15 | | * |
16 | | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
19 | | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
20 | | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
21 | | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
22 | | * IN THE SOFTWARE. |
23 | | */ |
24 | | |
25 | | #include <private-lib-core.h> |
26 | | |
27 | | #define LWS_CPYAPP(ptr, str) { strcpy(ptr, str); ptr += strlen(str); } |
28 | | |
29 | | /* |
30 | | * client-parser.c: lws_ws_client_rx_sm() needs to be roughly kept in |
31 | | * sync with changes here, esp related to ext draining |
32 | | */ |
33 | | |
34 | | int |
35 | | lws_ws_rx_sm(struct lws *wsi, char already_processed, unsigned char c) |
36 | 0 | { |
37 | 0 | int callback_action = LWS_CALLBACK_RECEIVE; |
38 | 0 | struct lws_ext_pm_deflate_rx_ebufs pmdrx; |
39 | 0 | unsigned short close_code; |
40 | 0 | unsigned char *pp; |
41 | 0 | int ret = 0; |
42 | 0 | int n = 0; |
43 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
44 | | int rx_draining_ext = 0; |
45 | | int lin; |
46 | | #endif |
47 | |
|
48 | 0 | pmdrx.eb_in.token = NULL; |
49 | 0 | pmdrx.eb_in.len = 0; |
50 | 0 | pmdrx.eb_out.token = NULL; |
51 | 0 | pmdrx.eb_out.len = 0; |
52 | |
|
53 | 0 | switch (wsi->lws_rx_parse_state) { |
54 | 0 | case LWS_RXPS_NEW: |
55 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
56 | | if (wsi->ws->rx_draining_ext) { |
57 | | pmdrx.eb_in.token = NULL; |
58 | | pmdrx.eb_in.len = 0; |
59 | | pmdrx.eb_out.token = NULL; |
60 | | pmdrx.eb_out.len = 0; |
61 | | lws_remove_wsi_from_draining_ext_list(wsi); |
62 | | rx_draining_ext = 1; |
63 | | lwsl_debug("%s: doing draining flow\n", __func__); |
64 | | |
65 | | goto drain_extension; |
66 | | } |
67 | | #endif |
68 | 0 | switch (wsi->ws->ietf_spec_revision) { |
69 | 0 | case 13: |
70 | | /* |
71 | | * no prepended frame key any more |
72 | | */ |
73 | 0 | wsi->ws->all_zero_nonce = 1; |
74 | 0 | goto handle_first; |
75 | | |
76 | 0 | default: |
77 | 0 | lwsl_warn("lws_ws_rx_sm: unknown spec version %d\n", |
78 | 0 | wsi->ws->ietf_spec_revision); |
79 | 0 | break; |
80 | 0 | } |
81 | 0 | break; |
82 | 0 | case LWS_RXPS_04_mask_1: |
83 | 0 | wsi->ws->mask[1] = c; |
84 | 0 | if (c) |
85 | 0 | wsi->ws->all_zero_nonce = 0; |
86 | 0 | wsi->lws_rx_parse_state = LWS_RXPS_04_mask_2; |
87 | 0 | break; |
88 | 0 | case LWS_RXPS_04_mask_2: |
89 | 0 | wsi->ws->mask[2] = c; |
90 | 0 | if (c) |
91 | 0 | wsi->ws->all_zero_nonce = 0; |
92 | 0 | wsi->lws_rx_parse_state = LWS_RXPS_04_mask_3; |
93 | 0 | break; |
94 | 0 | case LWS_RXPS_04_mask_3: |
95 | 0 | wsi->ws->mask[3] = c; |
96 | 0 | if (c) |
97 | 0 | wsi->ws->all_zero_nonce = 0; |
98 | | |
99 | | /* |
100 | | * start from the zero'th byte in the XOR key buffer since |
101 | | * this is the start of a frame with a new key |
102 | | */ |
103 | |
|
104 | 0 | wsi->ws->mask_idx = 0; |
105 | |
|
106 | 0 | wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_1; |
107 | 0 | break; |
108 | | |
109 | | /* |
110 | | * 04 logical framing from the spec (all this is masked when incoming |
111 | | * and has to be unmasked) |
112 | | * |
113 | | * We ignore the possibility of extension data because we don't |
114 | | * negotiate any extensions at the moment. |
115 | | * |
116 | | * 0 1 2 3 |
117 | | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 |
118 | | * +-+-+-+-+-------+-+-------------+-------------------------------+ |
119 | | * |F|R|R|R| opcode|R| Payload len | Extended payload length | |
120 | | * |I|S|S|S| (4) |S| (7) | (16/63) | |
121 | | * |N|V|V|V| |V| | (if payload len==126/127) | |
122 | | * | |1|2|3| |4| | | |
123 | | * +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - + |
124 | | * | Extended payload length continued, if payload len == 127 | |
125 | | * + - - - - - - - - - - - - - - - +-------------------------------+ |
126 | | * | | Extension data | |
127 | | * +-------------------------------+ - - - - - - - - - - - - - - - + |
128 | | * : : |
129 | | * +---------------------------------------------------------------+ |
130 | | * : Application data : |
131 | | * +---------------------------------------------------------------+ |
132 | | * |
133 | | * We pass payload through to userland as soon as we get it, ignoring |
134 | | * FIN. It's up to userland to buffer it up if it wants to see a |
135 | | * whole unfragmented block of the original size (which may be up to |
136 | | * 2^63 long!) |
137 | | */ |
138 | | |
139 | 0 | case LWS_RXPS_04_FRAME_HDR_1: |
140 | 0 | handle_first: |
141 | |
|
142 | 0 | wsi->ws->opcode = c & 0xf; |
143 | 0 | wsi->ws->rsv = c & 0x70; |
144 | 0 | wsi->ws->final = !!((c >> 7) & 1); |
145 | 0 | wsi->ws->defeat_check_utf8 = 0; |
146 | |
|
147 | 0 | if (((wsi->ws->opcode) & 8) && !wsi->ws->final) { |
148 | 0 | lws_close_reason(wsi, LWS_CLOSE_STATUS_PROTOCOL_ERR, |
149 | 0 | (uint8_t *)"frag ctl", 8); |
150 | 0 | return -1; |
151 | 0 | } |
152 | | |
153 | 0 | switch (wsi->ws->opcode) { |
154 | 0 | case LWSWSOPC_TEXT_FRAME: |
155 | 0 | wsi->ws->check_utf8 = lws_check_opt( |
156 | 0 | wsi->a.context->options, |
157 | 0 | LWS_SERVER_OPTION_VALIDATE_UTF8); |
158 | | /* fallthru */ |
159 | 0 | case LWSWSOPC_BINARY_FRAME: |
160 | 0 | if (wsi->ws->opcode == LWSWSOPC_BINARY_FRAME) |
161 | 0 | wsi->ws->check_utf8 = 0; |
162 | 0 | if (wsi->ws->continuation_possible) { |
163 | 0 | lws_close_reason(wsi, |
164 | 0 | LWS_CLOSE_STATUS_PROTOCOL_ERR, |
165 | 0 | (uint8_t *)"bad cont", 8); |
166 | 0 | return -1; |
167 | 0 | } |
168 | 0 | wsi->ws->rsv_first_msg = (c & 0x70); |
169 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
170 | | /* |
171 | | * set the expectation that we will have to |
172 | | * fake up the zlib trailer to the inflator for this |
173 | | * frame |
174 | | */ |
175 | | wsi->ws->pmd_trailer_application = !!(c & 0x40); |
176 | | #endif |
177 | 0 | wsi->ws->frame_is_binary = |
178 | 0 | wsi->ws->opcode == LWSWSOPC_BINARY_FRAME; |
179 | 0 | wsi->ws->first_fragment = 1; |
180 | 0 | wsi->ws->continuation_possible = !wsi->ws->final; |
181 | 0 | break; |
182 | 0 | case LWSWSOPC_CONTINUATION: |
183 | 0 | if (!wsi->ws->continuation_possible) { |
184 | 0 | lws_close_reason(wsi, |
185 | 0 | LWS_CLOSE_STATUS_PROTOCOL_ERR, |
186 | 0 | (uint8_t *)"bad cont", 8); |
187 | 0 | return -1; |
188 | 0 | } |
189 | 0 | break; |
190 | 0 | case LWSWSOPC_CLOSE: |
191 | 0 | wsi->ws->check_utf8 = 0; |
192 | 0 | wsi->ws->utf8 = 0; |
193 | 0 | break; |
194 | 0 | case 3: |
195 | 0 | case 4: |
196 | 0 | case 5: |
197 | 0 | case 6: |
198 | 0 | case 7: |
199 | 0 | case 0xb: |
200 | 0 | case 0xc: |
201 | 0 | case 0xd: |
202 | 0 | case 0xe: |
203 | 0 | case 0xf: |
204 | 0 | lws_close_reason(wsi, LWS_CLOSE_STATUS_PROTOCOL_ERR, |
205 | 0 | (uint8_t *)"bad opc", 7); |
206 | 0 | lwsl_info("illegal opcode\n"); |
207 | 0 | return -1; |
208 | 0 | } |
209 | | |
210 | 0 | if (wsi->ws->owed_a_fin && |
211 | 0 | (wsi->ws->opcode == LWSWSOPC_TEXT_FRAME || |
212 | 0 | wsi->ws->opcode == LWSWSOPC_BINARY_FRAME)) { |
213 | 0 | lwsl_info("hey you owed us a FIN\n"); |
214 | 0 | lws_close_reason(wsi, LWS_CLOSE_STATUS_PROTOCOL_ERR, |
215 | 0 | (uint8_t *)"bad fin", 7); |
216 | 0 | return -1; |
217 | 0 | } |
218 | 0 | if ((!(wsi->ws->opcode & 8)) && wsi->ws->final) { |
219 | 0 | wsi->ws->continuation_possible = 0; |
220 | 0 | wsi->ws->owed_a_fin = 0; |
221 | 0 | } |
222 | |
|
223 | 0 | if (!wsi->ws->final) |
224 | 0 | wsi->ws->owed_a_fin = 1; |
225 | |
|
226 | 0 | wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN; |
227 | 0 | if (wsi->ws->rsv && |
228 | 0 | ( |
229 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
230 | | !wsi->ws->count_act_ext || |
231 | | #endif |
232 | 0 | (wsi->ws->rsv & ~0x40))) { |
233 | 0 | lws_close_reason(wsi, LWS_CLOSE_STATUS_PROTOCOL_ERR, |
234 | 0 | (uint8_t *)"rsv bits", 8); |
235 | 0 | return -1; |
236 | 0 | } |
237 | 0 | break; |
238 | | |
239 | 0 | case LWS_RXPS_04_FRAME_HDR_LEN: |
240 | |
|
241 | 0 | wsi->ws->this_frame_masked = !!(c & 0x80); |
242 | |
|
243 | 0 | switch (c & 0x7f) { |
244 | 0 | case 126: |
245 | | /* control frames are not allowed to have big lengths */ |
246 | 0 | if (wsi->ws->opcode & 8) |
247 | 0 | goto illegal_ctl_length; |
248 | | |
249 | 0 | wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN16_2; |
250 | 0 | break; |
251 | 0 | case 127: |
252 | | /* control frames are not allowed to have big lengths */ |
253 | 0 | if (wsi->ws->opcode & 8) |
254 | 0 | goto illegal_ctl_length; |
255 | | |
256 | 0 | wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_8; |
257 | 0 | break; |
258 | 0 | default: |
259 | 0 | wsi->ws->rx_packet_length = c & 0x7f; |
260 | | |
261 | |
|
262 | 0 | if (wsi->ws->this_frame_masked) |
263 | 0 | wsi->lws_rx_parse_state = |
264 | 0 | LWS_RXPS_07_COLLECT_FRAME_KEY_1; |
265 | 0 | else |
266 | 0 | if (wsi->ws->rx_packet_length) { |
267 | 0 | wsi->lws_rx_parse_state = |
268 | 0 | LWS_RXPS_WS_FRAME_PAYLOAD; |
269 | 0 | } else { |
270 | 0 | wsi->lws_rx_parse_state = LWS_RXPS_NEW; |
271 | 0 | goto spill; |
272 | 0 | } |
273 | 0 | break; |
274 | 0 | } |
275 | 0 | break; |
276 | | |
277 | 0 | case LWS_RXPS_04_FRAME_HDR_LEN16_2: |
278 | 0 | wsi->ws->rx_packet_length = (size_t)(c << 8); |
279 | 0 | wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN16_1; |
280 | 0 | break; |
281 | | |
282 | 0 | case LWS_RXPS_04_FRAME_HDR_LEN16_1: |
283 | 0 | wsi->ws->rx_packet_length |= c; |
284 | 0 | if (wsi->ws->this_frame_masked) |
285 | 0 | wsi->lws_rx_parse_state = |
286 | 0 | LWS_RXPS_07_COLLECT_FRAME_KEY_1; |
287 | 0 | else { |
288 | 0 | wsi->lws_rx_parse_state = |
289 | 0 | LWS_RXPS_WS_FRAME_PAYLOAD; |
290 | 0 | } |
291 | 0 | break; |
292 | | |
293 | 0 | case LWS_RXPS_04_FRAME_HDR_LEN64_8: |
294 | 0 | if (c & 0x80) { |
295 | 0 | lwsl_warn("b63 of length must be zero\n"); |
296 | | /* kill the connection */ |
297 | 0 | return -1; |
298 | 0 | } |
299 | 0 | #if defined __LP64__ |
300 | 0 | wsi->ws->rx_packet_length = ((size_t)c) << 56; |
301 | | #else |
302 | | wsi->ws->rx_packet_length = 0; |
303 | | #endif |
304 | 0 | wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_7; |
305 | 0 | break; |
306 | | |
307 | 0 | case LWS_RXPS_04_FRAME_HDR_LEN64_7: |
308 | 0 | #if defined __LP64__ |
309 | 0 | wsi->ws->rx_packet_length |= ((size_t)c) << 48; |
310 | 0 | #endif |
311 | 0 | wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_6; |
312 | 0 | break; |
313 | | |
314 | 0 | case LWS_RXPS_04_FRAME_HDR_LEN64_6: |
315 | 0 | #if defined __LP64__ |
316 | 0 | wsi->ws->rx_packet_length |= ((size_t)c) << 40; |
317 | 0 | #endif |
318 | 0 | wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_5; |
319 | 0 | break; |
320 | | |
321 | 0 | case LWS_RXPS_04_FRAME_HDR_LEN64_5: |
322 | 0 | #if defined __LP64__ |
323 | 0 | wsi->ws->rx_packet_length |= ((size_t)c) << 32; |
324 | 0 | #endif |
325 | 0 | wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_4; |
326 | 0 | break; |
327 | | |
328 | 0 | case LWS_RXPS_04_FRAME_HDR_LEN64_4: |
329 | 0 | wsi->ws->rx_packet_length |= ((size_t)c) << 24; |
330 | 0 | wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_3; |
331 | 0 | break; |
332 | | |
333 | 0 | case LWS_RXPS_04_FRAME_HDR_LEN64_3: |
334 | 0 | wsi->ws->rx_packet_length |= ((size_t)c) << 16; |
335 | 0 | wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_2; |
336 | 0 | break; |
337 | | |
338 | 0 | case LWS_RXPS_04_FRAME_HDR_LEN64_2: |
339 | 0 | wsi->ws->rx_packet_length |= ((size_t)c) << 8; |
340 | 0 | wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_1; |
341 | 0 | break; |
342 | | |
343 | 0 | case LWS_RXPS_04_FRAME_HDR_LEN64_1: |
344 | 0 | wsi->ws->rx_packet_length |= ((size_t)c); |
345 | 0 | if (wsi->ws->this_frame_masked) |
346 | 0 | wsi->lws_rx_parse_state = |
347 | 0 | LWS_RXPS_07_COLLECT_FRAME_KEY_1; |
348 | 0 | else |
349 | 0 | wsi->lws_rx_parse_state = LWS_RXPS_WS_FRAME_PAYLOAD; |
350 | 0 | break; |
351 | | |
352 | 0 | case LWS_RXPS_07_COLLECT_FRAME_KEY_1: |
353 | 0 | wsi->ws->mask[0] = c; |
354 | 0 | if (c) |
355 | 0 | wsi->ws->all_zero_nonce = 0; |
356 | 0 | wsi->lws_rx_parse_state = LWS_RXPS_07_COLLECT_FRAME_KEY_2; |
357 | 0 | break; |
358 | | |
359 | 0 | case LWS_RXPS_07_COLLECT_FRAME_KEY_2: |
360 | 0 | wsi->ws->mask[1] = c; |
361 | 0 | if (c) |
362 | 0 | wsi->ws->all_zero_nonce = 0; |
363 | 0 | wsi->lws_rx_parse_state = LWS_RXPS_07_COLLECT_FRAME_KEY_3; |
364 | 0 | break; |
365 | | |
366 | 0 | case LWS_RXPS_07_COLLECT_FRAME_KEY_3: |
367 | 0 | wsi->ws->mask[2] = c; |
368 | 0 | if (c) |
369 | 0 | wsi->ws->all_zero_nonce = 0; |
370 | 0 | wsi->lws_rx_parse_state = LWS_RXPS_07_COLLECT_FRAME_KEY_4; |
371 | 0 | break; |
372 | | |
373 | 0 | case LWS_RXPS_07_COLLECT_FRAME_KEY_4: |
374 | 0 | wsi->ws->mask[3] = c; |
375 | 0 | if (c) |
376 | 0 | wsi->ws->all_zero_nonce = 0; |
377 | 0 | wsi->lws_rx_parse_state = LWS_RXPS_WS_FRAME_PAYLOAD; |
378 | 0 | wsi->ws->mask_idx = 0; |
379 | 0 | if (wsi->ws->rx_packet_length == 0) { |
380 | 0 | wsi->lws_rx_parse_state = LWS_RXPS_NEW; |
381 | 0 | goto spill; |
382 | 0 | } |
383 | 0 | break; |
384 | | |
385 | | |
386 | 0 | case LWS_RXPS_WS_FRAME_PAYLOAD: |
387 | 0 | assert(wsi->ws->rx_ubuf); |
388 | | |
389 | 0 | if (wsi->ws->rx_ubuf_head + LWS_PRE >= wsi->ws->rx_ubuf_alloc) { |
390 | 0 | lwsl_err("Attempted overflow \n"); |
391 | 0 | return -1; |
392 | 0 | } |
393 | 0 | if (!(already_processed & ALREADY_PROCESSED_IGNORE_CHAR)) { |
394 | 0 | if (wsi->ws->all_zero_nonce) |
395 | 0 | wsi->ws->rx_ubuf[LWS_PRE + |
396 | 0 | (wsi->ws->rx_ubuf_head++)] = c; |
397 | 0 | else |
398 | 0 | wsi->ws->rx_ubuf[LWS_PRE + |
399 | 0 | (wsi->ws->rx_ubuf_head++)] = |
400 | 0 | c ^ wsi->ws->mask[(wsi->ws->mask_idx++) & 3]; |
401 | |
|
402 | 0 | --wsi->ws->rx_packet_length; |
403 | 0 | } |
404 | |
|
405 | 0 | if (!wsi->ws->rx_packet_length) { |
406 | 0 | lwsl_debug("%s: ws fragment length exhausted\n", |
407 | 0 | __func__); |
408 | | /* spill because we have the whole frame */ |
409 | 0 | wsi->lws_rx_parse_state = LWS_RXPS_NEW; |
410 | 0 | goto spill; |
411 | 0 | } |
412 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
413 | | if (wsi->ws->rx_draining_ext) { |
414 | | lwsl_debug("%s: UNTIL_EXHAUSTED draining\n", __func__); |
415 | | goto drain_extension; |
416 | | } |
417 | | #endif |
418 | | /* |
419 | | * if there's no protocol max frame size given, we are |
420 | | * supposed to default to context->pt_serv_buf_size |
421 | | */ |
422 | 0 | if (!wsi->a.protocol->rx_buffer_size && |
423 | 0 | wsi->ws->rx_ubuf_head != wsi->a.context->pt_serv_buf_size) |
424 | 0 | break; |
425 | | |
426 | 0 | if (wsi->a.protocol->rx_buffer_size && |
427 | 0 | wsi->ws->rx_ubuf_head != wsi->a.protocol->rx_buffer_size) |
428 | 0 | break; |
429 | | |
430 | | /* spill because we filled our rx buffer */ |
431 | 0 | spill: |
432 | | /* |
433 | | * is this frame a control packet we should take care of at this |
434 | | * layer? If so service it and hide it from the user callback |
435 | | */ |
436 | |
|
437 | 0 | lwsl_parser("spill on %s\n", wsi->a.protocol->name); |
438 | |
|
439 | 0 | switch (wsi->ws->opcode) { |
440 | 0 | case LWSWSOPC_CLOSE: |
441 | |
|
442 | 0 | if (wsi->ws->peer_has_sent_close) |
443 | 0 | break; |
444 | | |
445 | 0 | wsi->ws->peer_has_sent_close = 1; |
446 | |
|
447 | 0 | pp = &wsi->ws->rx_ubuf[LWS_PRE]; |
448 | 0 | if (lws_check_opt(wsi->a.context->options, |
449 | 0 | LWS_SERVER_OPTION_VALIDATE_UTF8) && |
450 | 0 | wsi->ws->rx_ubuf_head > 2 && |
451 | 0 | lws_check_utf8(&wsi->ws->utf8, pp + 2, |
452 | 0 | wsi->ws->rx_ubuf_head - 2)) |
453 | 0 | goto utf8_fail; |
454 | | |
455 | | /* is this an acknowledgment of our close? */ |
456 | 0 | if (lwsi_state(wsi) == LRS_AWAITING_CLOSE_ACK) { |
457 | | /* |
458 | | * fine he has told us he is closing too, let's |
459 | | * finish our close |
460 | | */ |
461 | 0 | lwsl_parser("seen client close ack\n"); |
462 | 0 | return -1; |
463 | 0 | } |
464 | 0 | if (lwsi_state(wsi) == LRS_RETURNED_CLOSE) |
465 | | /* if he sends us 2 CLOSE, kill him */ |
466 | 0 | return -1; |
467 | | |
468 | 0 | if (lws_partial_buffered(wsi)) { |
469 | | /* |
470 | | * if we're in the middle of something, |
471 | | * we can't do a normal close response and |
472 | | * have to just close our end. |
473 | | */ |
474 | 0 | wsi->socket_is_permanently_unusable = 1; |
475 | 0 | lwsl_parser("Closing on peer close " |
476 | 0 | "due to pending tx\n"); |
477 | 0 | return -1; |
478 | 0 | } |
479 | | |
480 | 0 | if (wsi->ws->rx_ubuf_head >= 2) { |
481 | 0 | close_code = (unsigned short)((pp[0] << 8) | pp[1]); |
482 | 0 | if (close_code < 1000 || |
483 | 0 | close_code == 1004 || |
484 | 0 | close_code == 1005 || |
485 | 0 | close_code == 1006 || |
486 | 0 | close_code == 1012 || |
487 | 0 | close_code == 1013 || |
488 | 0 | close_code == 1014 || |
489 | 0 | close_code == 1015 || |
490 | 0 | (close_code >= 1016 && close_code < 3000) |
491 | 0 | ) { |
492 | 0 | pp[0] = (LWS_CLOSE_STATUS_PROTOCOL_ERR >> 8) & 0xff; |
493 | 0 | pp[1] = LWS_CLOSE_STATUS_PROTOCOL_ERR & 0xff; |
494 | 0 | } |
495 | 0 | } |
496 | |
|
497 | 0 | if (user_callback_handle_rxflow( |
498 | 0 | wsi->a.protocol->callback, wsi, |
499 | 0 | LWS_CALLBACK_WS_PEER_INITIATED_CLOSE, |
500 | 0 | wsi->user_space, |
501 | 0 | &wsi->ws->rx_ubuf[LWS_PRE], |
502 | 0 | wsi->ws->rx_ubuf_head)) |
503 | 0 | return -1; |
504 | | |
505 | 0 | lwsl_parser("server sees client close packet\n"); |
506 | 0 | lwsi_set_state(wsi, LRS_RETURNED_CLOSE); |
507 | | /* deal with the close packet contents as a PONG */ |
508 | 0 | wsi->ws->payload_is_close = 1; |
509 | 0 | goto process_as_ping; |
510 | | |
511 | 0 | case LWSWSOPC_PING: |
512 | 0 | lwsl_info("received %d byte ping, sending pong\n", |
513 | 0 | (int)wsi->ws->rx_ubuf_head); |
514 | |
|
515 | 0 | if (wsi->ws->pong_pending_flag) { |
516 | | /* |
517 | | * there is already a pending pong payload |
518 | | * we should just log and drop |
519 | | */ |
520 | 0 | lwsl_parser("DROP PING since one pending\n"); |
521 | 0 | goto ping_drop; |
522 | 0 | } |
523 | 0 | process_as_ping: |
524 | | /* control packets can only be < 128 bytes long */ |
525 | 0 | if (wsi->ws->rx_ubuf_head > 128 - 3) { |
526 | 0 | lwsl_parser("DROP PING payload too large\n"); |
527 | 0 | goto ping_drop; |
528 | 0 | } |
529 | | |
530 | | /* stash the pong payload */ |
531 | 0 | memcpy(wsi->ws->pong_payload_buf + LWS_PRE, |
532 | 0 | &wsi->ws->rx_ubuf[LWS_PRE], |
533 | 0 | wsi->ws->rx_ubuf_head); |
534 | |
|
535 | 0 | wsi->ws->pong_payload_len = (uint8_t)wsi->ws->rx_ubuf_head; |
536 | 0 | wsi->ws->pong_pending_flag = 1; |
537 | | |
538 | | /* get it sent as soon as possible */ |
539 | 0 | lws_callback_on_writable(wsi); |
540 | 0 | ping_drop: |
541 | 0 | wsi->ws->rx_ubuf_head = 0; |
542 | 0 | return 0; |
543 | | |
544 | 0 | case LWSWSOPC_PONG: |
545 | 0 | lwsl_info("received pong\n"); |
546 | 0 | lwsl_hexdump(&wsi->ws->rx_ubuf[LWS_PRE], |
547 | 0 | wsi->ws->rx_ubuf_head); |
548 | |
|
549 | 0 | lws_validity_confirmed(wsi); |
550 | | |
551 | | /* issue it */ |
552 | 0 | callback_action = LWS_CALLBACK_RECEIVE_PONG; |
553 | 0 | break; |
554 | | |
555 | 0 | case LWSWSOPC_TEXT_FRAME: |
556 | 0 | case LWSWSOPC_BINARY_FRAME: |
557 | 0 | case LWSWSOPC_CONTINUATION: |
558 | 0 | break; |
559 | | |
560 | 0 | default: |
561 | 0 | lwsl_parser("unknown opc %x\n", wsi->ws->opcode); |
562 | |
|
563 | 0 | return -1; |
564 | 0 | } |
565 | | |
566 | | /* |
567 | | * No it's real payload, pass it up to the user callback. |
568 | | * |
569 | | * We have been statefully collecting it in the |
570 | | * LWS_RXPS_WS_FRAME_PAYLOAD clause above. |
571 | | * |
572 | | * It's nicely buffered with the pre-padding taken care of |
573 | | * so it can be sent straight out again using lws_write. |
574 | | * |
575 | | * However, now we have a chunk of it, we want to deal with it |
576 | | * all here. Since this may be input to permessage-deflate and |
577 | | * there are block limits on that for input and output, we may |
578 | | * need to iterate. |
579 | | */ |
580 | | |
581 | 0 | pmdrx.eb_in.token = &wsi->ws->rx_ubuf[LWS_PRE]; |
582 | 0 | pmdrx.eb_in.len = (int)wsi->ws->rx_ubuf_head; |
583 | | |
584 | | /* for the non-pm-deflate case */ |
585 | |
|
586 | 0 | pmdrx.eb_out = pmdrx.eb_in; |
587 | |
|
588 | 0 | if (wsi->ws->opcode == LWSWSOPC_PONG && !pmdrx.eb_in.len) |
589 | 0 | goto already_done; |
590 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
591 | | drain_extension: |
592 | | #endif |
593 | | |
594 | 0 | do { |
595 | | |
596 | | // lwsl_notice("%s: pmdrx.eb_in.len: %d\n", __func__, |
597 | | // (int)pmdrx.eb_in.len); |
598 | |
|
599 | 0 | if (lwsi_state(wsi) == LRS_RETURNED_CLOSE || |
600 | 0 | lwsi_state(wsi) == LRS_AWAITING_CLOSE_ACK) |
601 | 0 | goto already_done; |
602 | | |
603 | 0 | n = PMDR_DID_NOTHING; |
604 | |
|
605 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
606 | | lin = pmdrx.eb_in.len; |
607 | | //if (lin) |
608 | | // lwsl_hexdump_notice(ebuf.token, ebuf.len); |
609 | | lwsl_ext("%s: +++ passing %d %p to ext\n", __func__, |
610 | | pmdrx.eb_in.len, pmdrx.eb_in.token); |
611 | | |
612 | | n = lws_ext_cb_active(wsi, LWS_EXT_CB_PAYLOAD_RX, &pmdrx, 0); |
613 | | lwsl_debug("%s: ext says %d / ebuf.len %d\n", __func__, |
614 | | n, pmdrx.eb_out.len); |
615 | | if (wsi->ws->rx_draining_ext) |
616 | | already_processed &= (char)~ALREADY_PROCESSED_NO_CB; |
617 | | #endif |
618 | | |
619 | | /* |
620 | | * ebuf may be pointing somewhere completely different |
621 | | * now, it's the output |
622 | | */ |
623 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
624 | | if (n < 0) { |
625 | | /* |
626 | | * we may rely on this to get RX, just drop |
627 | | * connection |
628 | | */ |
629 | | wsi->socket_is_permanently_unusable = 1; |
630 | | return -1; |
631 | | } |
632 | | if (n == PMDR_DID_NOTHING) |
633 | | /* ie, not PMDR_NOTHING_WE_SHOULD_DO */ |
634 | | break; |
635 | | #endif |
636 | 0 | lwsl_debug("%s: post ext ret %d, ebuf in %d / out %d\n", |
637 | 0 | __func__, n, pmdrx.eb_in.len, |
638 | 0 | pmdrx.eb_out.len); |
639 | |
|
640 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
641 | | if (rx_draining_ext && !pmdrx.eb_out.len) { |
642 | | lwsl_debug(" --- ending drain on 0 read\n"); |
643 | | goto already_done; |
644 | | } |
645 | | |
646 | | if (n == PMDR_HAS_PENDING) |
647 | | /* |
648 | | * extension had more... |
649 | | * main loop will come back |
650 | | */ |
651 | | lws_add_wsi_to_draining_ext_list(wsi); |
652 | | else |
653 | | lws_remove_wsi_from_draining_ext_list(wsi); |
654 | | |
655 | | rx_draining_ext = wsi->ws->rx_draining_ext; |
656 | | #endif |
657 | |
|
658 | 0 | if (pmdrx.eb_out.len && |
659 | 0 | wsi->ws->check_utf8 && !wsi->ws->defeat_check_utf8) { |
660 | 0 | if (lws_check_utf8(&wsi->ws->utf8, |
661 | 0 | pmdrx.eb_out.token, |
662 | 0 | (size_t)pmdrx.eb_out.len)) { |
663 | 0 | lws_close_reason(wsi, |
664 | 0 | LWS_CLOSE_STATUS_INVALID_PAYLOAD, |
665 | 0 | (uint8_t *)"bad utf8", 8); |
666 | 0 | goto utf8_fail; |
667 | 0 | } |
668 | | |
669 | | /* we are ending partway through utf-8 character? */ |
670 | 0 | if (!wsi->ws->rx_packet_length && |
671 | 0 | wsi->ws->final && wsi->ws->utf8 |
672 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
673 | | /* if ext not negotiated, going to be UNKNOWN */ |
674 | | && (n == PMDR_EMPTY_FINAL || n == PMDR_UNKNOWN) |
675 | | #endif |
676 | 0 | ) { |
677 | 0 | lwsl_info("FINAL utf8 error\n"); |
678 | 0 | lws_close_reason(wsi, |
679 | 0 | LWS_CLOSE_STATUS_INVALID_PAYLOAD, |
680 | 0 | (uint8_t *)"partial utf8", 12); |
681 | 0 | utf8_fail: |
682 | 0 | lwsl_notice("utf8 error\n"); |
683 | 0 | lwsl_hexdump_notice(pmdrx.eb_out.token, |
684 | 0 | (size_t)pmdrx.eb_out.len); |
685 | |
|
686 | 0 | return -1; |
687 | 0 | } |
688 | 0 | } |
689 | | |
690 | | /* if pmd not enabled, in == out */ |
691 | | |
692 | 0 | if (n == PMDR_DID_NOTHING |
693 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
694 | | || |
695 | | n == PMDR_NOTHING_WE_SHOULD_DO || |
696 | | n == PMDR_UNKNOWN |
697 | | #endif |
698 | 0 | ) |
699 | 0 | pmdrx.eb_in.len -= pmdrx.eb_out.len; |
700 | |
|
701 | 0 | if (!wsi->wsistate_pre_close && |
702 | 0 | (pmdrx.eb_out.len >= 0 || |
703 | 0 | callback_action == LWS_CALLBACK_RECEIVE_PONG || |
704 | 0 | n == PMDR_EMPTY_FINAL)) { |
705 | 0 | if (pmdrx.eb_out.len) |
706 | 0 | pmdrx.eb_out.token[pmdrx.eb_out.len] = '\0'; |
707 | |
|
708 | 0 | if (wsi->a.protocol->callback && |
709 | 0 | !(already_processed & ALREADY_PROCESSED_NO_CB)) { |
710 | 0 | if (callback_action == |
711 | 0 | LWS_CALLBACK_RECEIVE_PONG) |
712 | 0 | lwsl_info("Doing pong callback\n"); |
713 | |
|
714 | 0 | ret = user_callback_handle_rxflow( |
715 | 0 | wsi->a.protocol->callback, wsi, |
716 | 0 | (enum lws_callback_reasons) |
717 | 0 | callback_action, |
718 | 0 | wsi->user_space, |
719 | 0 | pmdrx.eb_out.token, |
720 | 0 | (size_t)pmdrx.eb_out.len); |
721 | 0 | } |
722 | 0 | wsi->ws->first_fragment = 0; |
723 | 0 | } |
724 | |
|
725 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
726 | | if (!lin) |
727 | | break; |
728 | | #endif |
729 | |
|
730 | 0 | } while (pmdrx.eb_in.len |
731 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
732 | | || rx_draining_ext |
733 | | #endif |
734 | 0 | ); |
735 | | |
736 | 0 | already_done: |
737 | 0 | wsi->ws->rx_ubuf_head = 0; |
738 | 0 | break; |
739 | 0 | } |
740 | | |
741 | 0 | return ret; |
742 | | |
743 | 0 | illegal_ctl_length: |
744 | |
|
745 | 0 | lwsl_warn("Control frame with xtended length is illegal\n"); |
746 | | /* kill the connection */ |
747 | 0 | return -1; |
748 | 0 | } |
749 | | |
750 | | |
751 | | size_t |
752 | | lws_remaining_packet_payload(struct lws *wsi) |
753 | 0 | { |
754 | 0 | return wsi->ws->rx_packet_length; |
755 | 0 | } |
756 | | |
757 | | int lws_frame_is_binary(struct lws *wsi) |
758 | 0 | { |
759 | 0 | return wsi->ws->frame_is_binary; |
760 | 0 | } |
761 | | |
762 | | void |
763 | | lws_add_wsi_to_draining_ext_list(struct lws *wsi) |
764 | 0 | { |
765 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
766 | | struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi]; |
767 | | |
768 | | if (wsi->ws->rx_draining_ext) |
769 | | return; |
770 | | |
771 | | lwsl_debug("%s: RX EXT DRAINING: Adding to list\n", __func__); |
772 | | |
773 | | wsi->ws->rx_draining_ext = 1; |
774 | | wsi->ws->rx_draining_ext_list = pt->ws.rx_draining_ext_list; |
775 | | pt->ws.rx_draining_ext_list = wsi; |
776 | | #endif |
777 | 0 | } |
778 | | |
779 | | void |
780 | | lws_remove_wsi_from_draining_ext_list(struct lws *wsi) |
781 | 0 | { |
782 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
783 | | struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi]; |
784 | | struct lws **w = &pt->ws.rx_draining_ext_list; |
785 | | |
786 | | if (!wsi->ws->rx_draining_ext) |
787 | | return; |
788 | | |
789 | | lwsl_debug("%s: RX EXT DRAINING: Removing from list\n", __func__); |
790 | | |
791 | | wsi->ws->rx_draining_ext = 0; |
792 | | |
793 | | /* remove us from context draining ext list */ |
794 | | while (*w) { |
795 | | if (*w == wsi) { |
796 | | /* if us, point it instead to who we were pointing to */ |
797 | | *w = wsi->ws->rx_draining_ext_list; |
798 | | break; |
799 | | } |
800 | | w = &((*w)->ws->rx_draining_ext_list); |
801 | | } |
802 | | wsi->ws->rx_draining_ext_list = NULL; |
803 | | #endif |
804 | 0 | } |
805 | | |
806 | | static int |
807 | | lws_0405_frame_mask_generate(struct lws *wsi) |
808 | 0 | { |
809 | 0 | size_t n; |
810 | | /* fetch the per-frame nonce */ |
811 | |
|
812 | 0 | n = lws_get_random(lws_get_context(wsi), wsi->ws->mask, 4); |
813 | 0 | if (n != 4) { |
814 | 0 | lwsl_parser("Unable to read from random device %s %d\n", |
815 | 0 | SYSTEM_RANDOM_FILEPATH, (int)n); |
816 | 0 | return 1; |
817 | 0 | } |
818 | | |
819 | | /* start masking from first byte of masking key buffer */ |
820 | 0 | wsi->ws->mask_idx = 0; |
821 | |
|
822 | 0 | return 0; |
823 | 0 | } |
824 | | |
825 | | int |
826 | | lws_server_init_wsi_for_ws(struct lws *wsi) |
827 | 0 | { |
828 | 0 | int n; |
829 | |
|
830 | 0 | lwsi_set_state(wsi, LRS_ESTABLISHED); |
831 | | |
832 | | /* |
833 | | * create the frame buffer for this connection according to the |
834 | | * size mentioned in the protocol definition. If 0 there, use |
835 | | * a big default for compatibility |
836 | | */ |
837 | |
|
838 | 0 | n = (int)wsi->a.protocol->rx_buffer_size; |
839 | 0 | if (!n) |
840 | 0 | n = (int)wsi->a.context->pt_serv_buf_size; |
841 | 0 | n += LWS_PRE; |
842 | 0 | wsi->ws->rx_ubuf = lws_malloc((unsigned int)n + 4 /* 0x0000ffff zlib */, "rx_ubuf"); |
843 | 0 | if (!wsi->ws->rx_ubuf) { |
844 | 0 | lwsl_err("Out of Mem allocating rx buffer %d\n", n); |
845 | 0 | return 1; |
846 | 0 | } |
847 | 0 | wsi->ws->rx_ubuf_alloc = (uint32_t)n; |
848 | | |
849 | | /* notify user code that we're ready to roll */ |
850 | |
|
851 | 0 | if (wsi->a.protocol->callback) |
852 | 0 | if (wsi->a.protocol->callback(wsi, LWS_CALLBACK_ESTABLISHED, |
853 | 0 | wsi->user_space, |
854 | 0 | #ifdef LWS_WITH_TLS |
855 | 0 | wsi->tls.ssl, |
856 | | #else |
857 | | NULL, |
858 | | #endif |
859 | 0 | wsi->h2_stream_carries_ws)) |
860 | 0 | return 1; |
861 | | |
862 | 0 | lws_validity_confirmed(wsi); |
863 | 0 | lwsl_debug("ws established\n"); |
864 | |
|
865 | 0 | return 0; |
866 | 0 | } |
867 | | |
868 | | |
869 | | |
870 | | int |
871 | | lws_is_final_fragment(struct lws *wsi) |
872 | 0 | { |
873 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
874 | | lwsl_debug("%s: final %d, rx pk length %ld, draining %ld\n", __func__, |
875 | | wsi->ws->final, (long)wsi->ws->rx_packet_length, |
876 | | (long)wsi->ws->rx_draining_ext); |
877 | | return wsi->ws->final && !wsi->ws->rx_packet_length && |
878 | | !wsi->ws->rx_draining_ext; |
879 | | #else |
880 | 0 | return wsi->ws->final && !wsi->ws->rx_packet_length; |
881 | 0 | #endif |
882 | 0 | } |
883 | | |
884 | | int |
885 | | lws_is_first_fragment(struct lws *wsi) |
886 | 0 | { |
887 | 0 | return wsi->ws->first_fragment; |
888 | 0 | } |
889 | | |
890 | | unsigned char |
891 | | lws_get_reserved_bits(struct lws *wsi) |
892 | 0 | { |
893 | 0 | return wsi->ws->rsv; |
894 | 0 | } |
895 | | |
896 | | uint8_t |
897 | | lws_get_opcode(struct lws *wsi) |
898 | 0 | { |
899 | 0 | return wsi->ws->opcode; |
900 | 0 | } |
901 | | |
902 | | int |
903 | | lws_get_close_length(struct lws *wsi) |
904 | 0 | { |
905 | 0 | return wsi->ws->close_in_ping_buffer_len; |
906 | 0 | } |
907 | | |
908 | | unsigned char * |
909 | | lws_get_close_payload(struct lws *wsi) |
910 | 0 | { |
911 | 0 | return &wsi->ws->ping_payload_buf[LWS_PRE]; |
912 | 0 | } |
913 | | |
914 | | void |
915 | | lws_close_reason(struct lws *wsi, enum lws_close_status status, |
916 | | unsigned char *buf, size_t len) |
917 | 0 | { |
918 | 0 | unsigned char *p, *start; |
919 | 0 | int budget = sizeof(wsi->ws->ping_payload_buf) - LWS_PRE; |
920 | |
|
921 | 0 | assert(lwsi_role_ws(wsi)); |
922 | | |
923 | 0 | start = p = &wsi->ws->ping_payload_buf[LWS_PRE]; |
924 | |
|
925 | 0 | *p++ = (uint8_t)((((int)status) >> 8) & 0xff); |
926 | 0 | *p++ = (uint8_t)(((int)status) & 0xff); |
927 | |
|
928 | 0 | if (buf) |
929 | 0 | while (len-- && p < start + budget) |
930 | 0 | *p++ = *buf++; |
931 | |
|
932 | 0 | wsi->ws->close_in_ping_buffer_len = (uint8_t)lws_ptr_diff(p, start); |
933 | 0 | } |
934 | | |
935 | | static int |
936 | | lws_is_ws_with_ext(struct lws *wsi) |
937 | 0 | { |
938 | 0 | #if defined(LWS_WITHOUT_EXTENSIONS) |
939 | 0 | return 0; |
940 | | #else |
941 | | return lwsi_role_ws(wsi) && !!wsi->ws->count_act_ext; |
942 | | #endif |
943 | 0 | } |
944 | | |
945 | | static int |
946 | | rops_handle_POLLIN_ws(struct lws_context_per_thread *pt, struct lws *wsi, |
947 | | struct lws_pollfd *pollfd) |
948 | 0 | { |
949 | 0 | unsigned int pending = 0; |
950 | 0 | struct lws_tokens ebuf; |
951 | 0 | char buffered = 0; |
952 | 0 | int n = 0, m, sanity = 10; |
953 | 0 | #if defined(LWS_WITH_HTTP2) |
954 | 0 | struct lws *wsi1; |
955 | 0 | #endif |
956 | |
|
957 | 0 | if (!wsi->ws) { |
958 | 0 | lwsl_err("ws role wsi with no ws\n"); |
959 | 0 | return LWS_HPI_RET_PLEASE_CLOSE_ME; |
960 | 0 | } |
961 | | |
962 | | // lwsl_notice("%s: %s\n", __func__, wsi->a.protocol->name); |
963 | | |
964 | | //lwsl_info("%s: wsistate 0x%x, pollout %d\n", __func__, |
965 | | // wsi->wsistate, pollfd->revents & LWS_POLLOUT); |
966 | | |
967 | | /* |
968 | | * something went wrong with parsing the handshake, and |
969 | | * we ended up back in the event loop without completing it |
970 | | */ |
971 | 0 | if (lwsi_state(wsi) == LRS_PRE_WS_SERVING_ACCEPT) { |
972 | 0 | wsi->socket_is_permanently_unusable = 1; |
973 | 0 | return LWS_HPI_RET_PLEASE_CLOSE_ME; |
974 | 0 | } |
975 | | |
976 | 0 | ebuf.token = NULL; |
977 | 0 | ebuf.len = 0; |
978 | |
|
979 | 0 | if (lwsi_state(wsi) == LRS_WAITING_CONNECT) { |
980 | 0 | #if defined(LWS_WITH_CLIENT) |
981 | 0 | if ((pollfd->revents & LWS_POLLOUT) && |
982 | 0 | lws_handle_POLLOUT_event(wsi, pollfd)) { |
983 | 0 | lwsl_debug("POLLOUT event closed it\n"); |
984 | 0 | return LWS_HPI_RET_PLEASE_CLOSE_ME; |
985 | 0 | } |
986 | | |
987 | 0 | n = lws_http_client_socket_service(wsi, pollfd); |
988 | 0 | if (n) |
989 | 0 | return LWS_HPI_RET_WSI_ALREADY_DIED; |
990 | 0 | #endif |
991 | 0 | return LWS_HPI_RET_HANDLED; |
992 | 0 | } |
993 | | |
994 | | /* 1: something requested a callback when it was OK to write */ |
995 | | |
996 | 0 | if ((pollfd->revents & LWS_POLLOUT) && |
997 | 0 | lwsi_state_can_handle_POLLOUT(wsi) && |
998 | 0 | lws_handle_POLLOUT_event(wsi, pollfd)) { |
999 | 0 | if (lwsi_state(wsi) == LRS_RETURNED_CLOSE) |
1000 | 0 | lwsi_set_state(wsi, LRS_FLUSHING_BEFORE_CLOSE); |
1001 | |
|
1002 | 0 | return LWS_HPI_RET_PLEASE_CLOSE_ME; |
1003 | 0 | } |
1004 | | |
1005 | 0 | if (lwsi_state(wsi) == LRS_RETURNED_CLOSE || |
1006 | 0 | lwsi_state(wsi) == LRS_WAITING_TO_SEND_CLOSE) { |
1007 | | /* |
1008 | | * we stopped caring about anything except control |
1009 | | * packets. Force flow control off, defeat tx |
1010 | | * draining. |
1011 | | */ |
1012 | 0 | lws_rx_flow_control(wsi, 1); |
1013 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
1014 | | if (wsi->ws) |
1015 | | wsi->ws->tx_draining_ext = 0; |
1016 | | #endif |
1017 | 0 | } |
1018 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
1019 | | if (wsi->ws->tx_draining_ext) { |
1020 | | lws_handle_POLLOUT_event(wsi, pollfd); |
1021 | | //lwsl_notice("%s: tx drain\n", __func__); |
1022 | | /* |
1023 | | * We cannot deal with new RX until the TX ext path has |
1024 | | * been drained. It's because new rx will, eg, crap on |
1025 | | * the wsi rx buf that may be needed to retain state. |
1026 | | * |
1027 | | * TX ext drain path MUST go through event loop to avoid |
1028 | | * blocking. |
1029 | | */ |
1030 | | lws_callback_on_writable(wsi); |
1031 | | return LWS_HPI_RET_HANDLED; |
1032 | | } |
1033 | | #endif |
1034 | 0 | if ((pollfd->revents & LWS_POLLIN) && lws_is_flowcontrolled(wsi)) { |
1035 | | /* We cannot deal with any kind of new RX because we are |
1036 | | * RX-flowcontrolled. |
1037 | | */ |
1038 | 0 | lwsl_info("%s: flowcontrolled, ignoring rx\n", __func__); |
1039 | |
|
1040 | 0 | if (__lws_change_pollfd(wsi, LWS_POLLIN, 0)) |
1041 | 0 | return -1; |
1042 | | |
1043 | 0 | return LWS_HPI_RET_HANDLED; |
1044 | 0 | } |
1045 | | |
1046 | 0 | if (lws_is_flowcontrolled(wsi)) |
1047 | 0 | return LWS_HPI_RET_HANDLED; |
1048 | | |
1049 | 0 | #if defined(LWS_WITH_HTTP2) |
1050 | 0 | if (wsi->mux_substream || wsi->upgraded_to_http2) { |
1051 | 0 | wsi1 = lws_get_network_wsi(wsi); |
1052 | 0 | if (wsi1 && lws_has_buffered_out(wsi1)) |
1053 | | /* We cannot deal with any kind of new RX |
1054 | | * because we are dealing with a partial send |
1055 | | * (new RX may trigger new http_action() that |
1056 | | * expect to be able to send) |
1057 | | */ |
1058 | 0 | return LWS_HPI_RET_HANDLED; |
1059 | 0 | } |
1060 | 0 | #endif |
1061 | | |
1062 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
1063 | | /* 2: RX Extension needs to be drained |
1064 | | */ |
1065 | | |
1066 | | if (wsi->ws->rx_draining_ext) { |
1067 | | |
1068 | | lwsl_debug("%s: RX EXT DRAINING: Service\n", __func__); |
1069 | | #if defined(LWS_WITH_CLIENT) |
1070 | | if (lwsi_role_client(wsi)) { |
1071 | | n = lws_ws_client_rx_sm(wsi, 0); |
1072 | | if (n < 0) |
1073 | | /* we closed wsi */ |
1074 | | return LWS_HPI_RET_PLEASE_CLOSE_ME; |
1075 | | } else |
1076 | | #endif |
1077 | | n = lws_ws_rx_sm(wsi, ALREADY_PROCESSED_IGNORE_CHAR, 0); |
1078 | | |
1079 | | return LWS_HPI_RET_HANDLED; |
1080 | | } |
1081 | | |
1082 | | if (wsi->ws->rx_draining_ext) |
1083 | | /* |
1084 | | * We have RX EXT content to drain, but can't do it |
1085 | | * right now. That means we cannot do anything lower |
1086 | | * priority either. |
1087 | | */ |
1088 | | return LWS_HPI_RET_HANDLED; |
1089 | | #endif |
1090 | | |
1091 | | /* 3: buflist needs to be drained |
1092 | | */ |
1093 | 0 | read: |
1094 | | //lws_buflist_describe(&wsi->buflist, wsi, __func__); |
1095 | 0 | ebuf.len = (int)lws_buflist_next_segment_len(&wsi->buflist, |
1096 | 0 | &ebuf.token); |
1097 | 0 | if (ebuf.len) { |
1098 | 0 | lwsl_info("draining buflist (len %d)\n", ebuf.len); |
1099 | 0 | buffered = 1; |
1100 | 0 | goto drain; |
1101 | 0 | } |
1102 | | |
1103 | 0 | if (!(pollfd->revents & pollfd->events & LWS_POLLIN) && !wsi->http.ah) |
1104 | 0 | return LWS_HPI_RET_HANDLED; |
1105 | | |
1106 | 0 | if (lws_is_flowcontrolled(wsi)) { |
1107 | 0 | lwsl_info("%s: %p should be rxflow (bm 0x%x)..\n", |
1108 | 0 | __func__, wsi, wsi->rxflow_bitmap); |
1109 | 0 | return LWS_HPI_RET_HANDLED; |
1110 | 0 | } |
1111 | | |
1112 | 0 | if (!(lwsi_role_client(wsi) && |
1113 | 0 | (lwsi_state(wsi) != LRS_ESTABLISHED && |
1114 | 0 | lwsi_state(wsi) != LRS_AWAITING_CLOSE_ACK && |
1115 | 0 | lwsi_state(wsi) != LRS_H2_WAITING_TO_SEND_HEADERS))) { |
1116 | | /* |
1117 | | * In case we are going to react to this rx by scheduling |
1118 | | * writes, we need to restrict the amount of rx to the size |
1119 | | * the protocol reported for rx buffer. |
1120 | | * |
1121 | | * Otherwise we get a situation we have to absorb possibly a |
1122 | | * lot of reads before we get a chance to drain them by writing |
1123 | | * them, eg, with echo type tests in autobahn. |
1124 | | */ |
1125 | |
|
1126 | 0 | buffered = 0; |
1127 | 0 | ebuf.token = pt->serv_buf; |
1128 | 0 | if (lwsi_role_ws(wsi)) |
1129 | 0 | ebuf.len = (int)wsi->ws->rx_ubuf_alloc; |
1130 | 0 | else |
1131 | 0 | ebuf.len = (int)wsi->a.context->pt_serv_buf_size; |
1132 | |
|
1133 | 0 | if ((unsigned int)ebuf.len > wsi->a.context->pt_serv_buf_size) |
1134 | 0 | ebuf.len = (int)wsi->a.context->pt_serv_buf_size; |
1135 | |
|
1136 | 0 | if ((int)pending > ebuf.len) |
1137 | 0 | pending = (unsigned int)ebuf.len; |
1138 | |
|
1139 | 0 | ebuf.len = lws_ssl_capable_read(wsi, ebuf.token, |
1140 | 0 | (size_t)(pending ? pending : |
1141 | 0 | (unsigned int)ebuf.len)); |
1142 | 0 | switch (ebuf.len) { |
1143 | 0 | case 0: |
1144 | 0 | lwsl_info("%s: zero length read\n", |
1145 | 0 | __func__); |
1146 | 0 | return LWS_HPI_RET_PLEASE_CLOSE_ME; |
1147 | 0 | case LWS_SSL_CAPABLE_MORE_SERVICE: |
1148 | 0 | lwsl_info("SSL Capable more service\n"); |
1149 | 0 | return LWS_HPI_RET_HANDLED; |
1150 | 0 | case LWS_SSL_CAPABLE_ERROR: |
1151 | 0 | lwsl_info("%s: LWS_SSL_CAPABLE_ERROR\n", |
1152 | 0 | __func__); |
1153 | 0 | return LWS_HPI_RET_PLEASE_CLOSE_ME; |
1154 | 0 | } |
1155 | | |
1156 | | /* |
1157 | | * coverity thinks ssl_capable_read() may read over |
1158 | | * 2GB. Dissuade it... |
1159 | | */ |
1160 | 0 | ebuf.len &= 0x7fffffff; |
1161 | 0 | } |
1162 | | |
1163 | 0 | drain: |
1164 | | |
1165 | | /* |
1166 | | * give any active extensions a chance to munge the buffer |
1167 | | * before parse. We pass in a pointer to an lws_tokens struct |
1168 | | * prepared with the default buffer and content length that's in |
1169 | | * there. Rather than rewrite the default buffer, extensions |
1170 | | * that expect to grow the buffer can adapt .token to |
1171 | | * point to their own per-connection buffer in the extension |
1172 | | * user allocation. By default with no extensions or no |
1173 | | * extension callback handling, just the normal input buffer is |
1174 | | * used then so it is efficient. |
1175 | | */ |
1176 | 0 | m = 0; |
1177 | 0 | do { |
1178 | | |
1179 | | /* service incoming data */ |
1180 | | //lws_buflist_describe(&wsi->buflist, wsi, __func__); |
1181 | 0 | if (ebuf.len > 0) { |
1182 | 0 | #if defined(LWS_ROLE_H2) |
1183 | 0 | if (lwsi_role_h2(wsi) && lwsi_state(wsi) != LRS_BODY && |
1184 | 0 | lwsi_state(wsi) != LRS_DISCARD_BODY) |
1185 | 0 | n = lws_read_h2(wsi, ebuf.token, |
1186 | 0 | (unsigned int)ebuf.len); |
1187 | 0 | else |
1188 | 0 | #endif |
1189 | 0 | n = lws_read_h1(wsi, ebuf.token, |
1190 | 0 | (unsigned int)ebuf.len); |
1191 | |
|
1192 | 0 | if (n < 0) { |
1193 | | /* we closed wsi */ |
1194 | 0 | return LWS_HPI_RET_WSI_ALREADY_DIED; |
1195 | 0 | } |
1196 | | //lws_buflist_describe(&wsi->buflist, wsi, __func__); |
1197 | | //lwsl_notice("%s: consuming %d / %d\n", __func__, n, ebuf.len); |
1198 | 0 | if (ebuf.len < 0 || |
1199 | 0 | lws_buflist_aware_finished_consuming(wsi, &ebuf, n, |
1200 | 0 | buffered, __func__)) |
1201 | 0 | return LWS_HPI_RET_PLEASE_CLOSE_ME; |
1202 | 0 | } |
1203 | | |
1204 | 0 | ebuf.token = NULL; |
1205 | 0 | ebuf.len = 0; |
1206 | 0 | } while (m); |
1207 | | |
1208 | 0 | if (wsi->http.ah |
1209 | 0 | #if defined(LWS_WITH_CLIENT) |
1210 | 0 | && !wsi->client_h2_alpn |
1211 | 0 | #endif |
1212 | 0 | ) { |
1213 | 0 | lwsl_info("%s: %p: detaching ah\n", __func__, wsi); |
1214 | 0 | lws_header_table_detach(wsi, 0); |
1215 | 0 | } |
1216 | |
|
1217 | 0 | pending = (unsigned int)lws_ssl_pending(wsi); |
1218 | |
|
1219 | 0 | #if defined(LWS_WITH_CLIENT) |
1220 | 0 | if (!pending && (wsi->flags & LCCSCF_PRIORITIZE_READS) && |
1221 | 0 | lws_buflist_total_len(&wsi->buflist)) |
1222 | 0 | pending = 9999999; |
1223 | 0 | #endif |
1224 | |
|
1225 | 0 | if (pending) { |
1226 | 0 | if (lws_is_ws_with_ext(wsi)) |
1227 | 0 | pending = pending > wsi->ws->rx_ubuf_alloc ? |
1228 | 0 | wsi->ws->rx_ubuf_alloc : pending; |
1229 | 0 | else |
1230 | 0 | pending = pending > wsi->a.context->pt_serv_buf_size ? |
1231 | 0 | wsi->a.context->pt_serv_buf_size : pending; |
1232 | 0 | if (--sanity) { |
1233 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
1234 | | while (wsi->ws->rx_draining_ext) { |
1235 | | // RX Extension needs to be drained before next read |
1236 | | n = lws_ws_rx_sm(wsi, ALREADY_PROCESSED_IGNORE_CHAR, 0); |
1237 | | if (n < 0) { |
1238 | | return LWS_HPI_RET_PLEASE_CLOSE_ME; |
1239 | | } |
1240 | | } |
1241 | | #endif |
1242 | 0 | goto read; |
1243 | 0 | } |
1244 | 0 | else |
1245 | | /* |
1246 | | * Something has gone wrong, we are spinning... |
1247 | | * let's bail on this connection |
1248 | | */ |
1249 | 0 | return LWS_HPI_RET_PLEASE_CLOSE_ME; |
1250 | 0 | } |
1251 | | |
1252 | 0 | if (buffered && /* were draining, now nothing left */ |
1253 | 0 | !lws_buflist_next_segment_len(&wsi->buflist, NULL)) { |
1254 | 0 | lwsl_info("%s: %p flow buf: drained\n", __func__, wsi); |
1255 | | /* having drained the rxflow buffer, can rearm POLLIN */ |
1256 | | #if !defined(LWS_WITH_SERVER) |
1257 | | n = |
1258 | | #endif |
1259 | 0 | __lws_rx_flow_control(wsi); |
1260 | | /* n ignored, needed for NO_SERVER case */ |
1261 | 0 | } |
1262 | | |
1263 | | /* n = 0 */ |
1264 | 0 | return LWS_HPI_RET_HANDLED; |
1265 | 0 | } |
1266 | | |
1267 | | |
1268 | | int rops_handle_POLLOUT_ws(struct lws *wsi) |
1269 | 0 | { |
1270 | 0 | int write_type = LWS_WRITE_PONG; |
1271 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
1272 | | struct lws_ext_pm_deflate_rx_ebufs pmdrx; |
1273 | | int ret, m; |
1274 | | #endif |
1275 | 0 | int n; |
1276 | |
|
1277 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
1278 | | lwsl_debug("%s: %s: wsi->ws->tx_draining_ext %d\n", __func__, |
1279 | | wsi->a.protocol->name, wsi->ws->tx_draining_ext); |
1280 | | #endif |
1281 | | |
1282 | | /* Priority 3: pending control packets (pong or close) |
1283 | | * |
1284 | | * 3a: close notification packet requested from close api |
1285 | | */ |
1286 | |
|
1287 | 0 | if (lwsi_state(wsi) == LRS_WAITING_TO_SEND_CLOSE) { |
1288 | 0 | lwsl_debug("sending close packet\n"); |
1289 | 0 | lwsl_hexdump_debug(&wsi->ws->ping_payload_buf[LWS_PRE], |
1290 | 0 | wsi->ws->close_in_ping_buffer_len); |
1291 | 0 | wsi->waiting_to_send_close_frame = 0; |
1292 | 0 | n = lws_write(wsi, &wsi->ws->ping_payload_buf[LWS_PRE], |
1293 | 0 | wsi->ws->close_in_ping_buffer_len, |
1294 | 0 | LWS_WRITE_CLOSE); |
1295 | 0 | if (n >= 0) { |
1296 | 0 | if (wsi->close_needs_ack) { |
1297 | 0 | lwsi_set_state(wsi, LRS_AWAITING_CLOSE_ACK); |
1298 | 0 | lws_set_timeout(wsi, PENDING_TIMEOUT_CLOSE_ACK, |
1299 | 0 | 5); |
1300 | 0 | lwsl_debug("sent close, await ack\n"); |
1301 | |
|
1302 | 0 | return LWS_HP_RET_BAIL_OK; |
1303 | 0 | } |
1304 | 0 | wsi->close_needs_ack = 0; |
1305 | 0 | lwsi_set_state(wsi, LRS_RETURNED_CLOSE); |
1306 | 0 | } |
1307 | | |
1308 | 0 | return LWS_HP_RET_BAIL_DIE; |
1309 | 0 | } |
1310 | | |
1311 | | /* else, the send failed and we should just hang up */ |
1312 | | |
1313 | 0 | if ((lwsi_role_ws(wsi) && wsi->ws->pong_pending_flag) || |
1314 | 0 | (lwsi_state(wsi) == LRS_RETURNED_CLOSE && |
1315 | 0 | wsi->ws->payload_is_close)) { |
1316 | |
|
1317 | 0 | if (wsi->ws->payload_is_close) |
1318 | 0 | write_type = LWS_WRITE_CLOSE; |
1319 | 0 | else { |
1320 | 0 | if (wsi->wsistate_pre_close) { |
1321 | | /* we started close flow, forget pong */ |
1322 | 0 | wsi->ws->pong_pending_flag = 0; |
1323 | 0 | return LWS_HP_RET_BAIL_OK; |
1324 | 0 | } |
1325 | 0 | lwsl_info("issuing pong %d on %s\n", |
1326 | 0 | wsi->ws->pong_payload_len, lws_wsi_tag(wsi)); |
1327 | 0 | } |
1328 | | |
1329 | 0 | n = lws_write(wsi, &wsi->ws->pong_payload_buf[LWS_PRE], |
1330 | 0 | wsi->ws->pong_payload_len, (enum lws_write_protocol)write_type); |
1331 | 0 | if (n < 0) |
1332 | 0 | return LWS_HP_RET_BAIL_DIE; |
1333 | | |
1334 | | /* well he is sent, mark him done */ |
1335 | 0 | wsi->ws->pong_pending_flag = 0; |
1336 | 0 | if (wsi->ws->payload_is_close) { |
1337 | | // assert(0); |
1338 | | /* oh... a close frame was it... then we are done */ |
1339 | 0 | return LWS_HP_RET_BAIL_DIE; |
1340 | 0 | } |
1341 | | |
1342 | | /* otherwise for PING, leave POLLOUT active either way */ |
1343 | 0 | return LWS_HP_RET_BAIL_OK; |
1344 | 0 | } |
1345 | | |
1346 | 0 | if (!wsi->socket_is_permanently_unusable && |
1347 | 0 | wsi->ws->send_check_ping) { |
1348 | |
|
1349 | 0 | lwsl_info("%s: issuing ping on wsi %s: %s %s h2: %d\n", __func__, |
1350 | 0 | lws_wsi_tag(wsi), |
1351 | 0 | wsi->role_ops->name, wsi->a.protocol->name, |
1352 | 0 | wsi->mux_substream); |
1353 | 0 | wsi->ws->send_check_ping = 0; |
1354 | 0 | n = lws_write(wsi, &wsi->ws->ping_payload_buf[LWS_PRE], |
1355 | 0 | 0, LWS_WRITE_PING); |
1356 | 0 | if (n < 0) |
1357 | 0 | return LWS_HP_RET_BAIL_DIE; |
1358 | | |
1359 | 0 | return LWS_HP_RET_BAIL_OK; |
1360 | 0 | } |
1361 | | |
1362 | | /* Priority 4: if we are closing, not allowed to send more data frags |
1363 | | * which means user callback or tx ext flush banned now |
1364 | | */ |
1365 | 0 | if (lwsi_state(wsi) == LRS_RETURNED_CLOSE) |
1366 | 0 | return LWS_HP_RET_USER_SERVICE; |
1367 | | |
1368 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
1369 | | /* Priority 5: Tx path extension with more to send |
1370 | | * |
1371 | | * These are handled as new fragments each time around |
1372 | | * So while we must block new writeable callback to enforce |
1373 | | * payload ordering, but since they are always complete |
1374 | | * fragments control packets can interleave OK. |
1375 | | */ |
1376 | | if (wsi->ws->tx_draining_ext) { |
1377 | | lwsl_ext("SERVICING TX EXT DRAINING\n"); |
1378 | | if (lws_write(wsi, NULL, 0, LWS_WRITE_CONTINUATION) < 0) |
1379 | | return LWS_HP_RET_BAIL_DIE; |
1380 | | /* leave POLLOUT active */ |
1381 | | return LWS_HP_RET_BAIL_OK; |
1382 | | } |
1383 | | |
1384 | | /* Priority 6: extensions |
1385 | | */ |
1386 | | if (!wsi->ws->extension_data_pending && !wsi->ws->tx_draining_ext) { |
1387 | | lwsl_ext("%s: !wsi->ws->extension_data_pending\n", __func__); |
1388 | | return LWS_HP_RET_USER_SERVICE; |
1389 | | } |
1390 | | |
1391 | | /* |
1392 | | * Check in on the active extensions, see if they had pending stuff to |
1393 | | * spill... they need to get the first look-in otherwise sequence will |
1394 | | * be disordered. |
1395 | | * |
1396 | | * coming here with a NULL, zero-length ebuf means just spill pending |
1397 | | */ |
1398 | | |
1399 | | ret = 1; |
1400 | | if (wsi->role_ops == &role_ops_raw_skt |
1401 | | #if defined(LWS_ROLE_RAW_FILE) |
1402 | | || wsi->role_ops == &role_ops_raw_file |
1403 | | #endif |
1404 | | ) |
1405 | | ret = 0; |
1406 | | |
1407 | | while (ret == 1) { |
1408 | | |
1409 | | /* default to nobody has more to spill */ |
1410 | | |
1411 | | ret = 0; |
1412 | | pmdrx.eb_in.token = NULL; |
1413 | | pmdrx.eb_in.len = 0; |
1414 | | |
1415 | | /* give every extension a chance to spill */ |
1416 | | |
1417 | | m = lws_ext_cb_active(wsi, LWS_EXT_CB_PACKET_TX_PRESEND, |
1418 | | &pmdrx, 0); |
1419 | | if (m < 0) { |
1420 | | lwsl_err("ext reports fatal error\n"); |
1421 | | return LWS_HP_RET_BAIL_DIE; |
1422 | | } |
1423 | | if (m) |
1424 | | /* |
1425 | | * at least one extension told us he has more |
1426 | | * to spill, so we will go around again after |
1427 | | */ |
1428 | | ret = 1; |
1429 | | |
1430 | | /* assuming they gave us something to send, send it */ |
1431 | | |
1432 | | if (pmdrx.eb_in.len) { |
1433 | | n = lws_issue_raw(wsi, (unsigned char *)pmdrx.eb_in.token, |
1434 | | (unsigned int)pmdrx.eb_in.len); |
1435 | | if (n < 0) { |
1436 | | lwsl_info("closing from POLLOUT spill\n"); |
1437 | | return LWS_HP_RET_BAIL_DIE; |
1438 | | } |
1439 | | /* |
1440 | | * Keep amount spilled small to minimize chance of this |
1441 | | */ |
1442 | | if (n != pmdrx.eb_in.len) { |
1443 | | lwsl_err("Unable to spill ext %d vs %d\n", |
1444 | | pmdrx.eb_in.len, n); |
1445 | | return LWS_HP_RET_BAIL_DIE; |
1446 | | } |
1447 | | } else |
1448 | | continue; |
1449 | | |
1450 | | /* no extension has more to spill */ |
1451 | | |
1452 | | if (!ret) |
1453 | | continue; |
1454 | | |
1455 | | /* |
1456 | | * There's more to spill from an extension, but we just sent |
1457 | | * something... did that leave the pipe choked? |
1458 | | */ |
1459 | | |
1460 | | if (!lws_send_pipe_choked(wsi)) |
1461 | | /* no we could add more */ |
1462 | | continue; |
1463 | | |
1464 | | lwsl_info("choked in POLLOUT service\n"); |
1465 | | |
1466 | | /* |
1467 | | * Yes, he's choked. Leave the POLLOUT masked on so we will |
1468 | | * come back here when he is unchoked. Don't call the user |
1469 | | * callback to enforce ordering of spilling, he'll get called |
1470 | | * when we come back here and there's nothing more to spill. |
1471 | | */ |
1472 | | |
1473 | | return LWS_HP_RET_BAIL_OK; |
1474 | | } |
1475 | | |
1476 | | wsi->ws->extension_data_pending = 0; |
1477 | | #endif |
1478 | | |
1479 | 0 | return LWS_HP_RET_USER_SERVICE; |
1480 | 0 | } |
1481 | | |
1482 | | static int |
1483 | | rops_service_flag_pending_ws(struct lws_context *context, int tsi) |
1484 | 0 | { |
1485 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
1486 | | struct lws_context_per_thread *pt = &context->pt[tsi]; |
1487 | | struct lws *wsi; |
1488 | | int forced = 0; |
1489 | | |
1490 | | /* POLLIN faking (the pt lock is taken by the parent) */ |
1491 | | |
1492 | | /* |
1493 | | * 1) For all guys with already-available ext data to drain, if they are |
1494 | | * not flowcontrolled, fake their POLLIN status |
1495 | | */ |
1496 | | wsi = pt->ws.rx_draining_ext_list; |
1497 | | while (wsi && wsi->position_in_fds_table != LWS_NO_FDS_POS) { |
1498 | | pt->fds[wsi->position_in_fds_table].revents = |
1499 | | (short)((short)pt->fds[wsi->position_in_fds_table].revents | |
1500 | | (short)(pt->fds[wsi->position_in_fds_table].events & LWS_POLLIN)); |
1501 | | if (pt->fds[wsi->position_in_fds_table].revents & LWS_POLLIN) |
1502 | | forced = 1; |
1503 | | |
1504 | | wsi = wsi->ws->rx_draining_ext_list; |
1505 | | } |
1506 | | |
1507 | | return forced; |
1508 | | #else |
1509 | 0 | return 0; |
1510 | 0 | #endif |
1511 | 0 | } |
1512 | | |
1513 | | static int |
1514 | | rops_close_via_role_protocol_ws(struct lws *wsi, enum lws_close_status reason) |
1515 | 0 | { |
1516 | 0 | if (!wsi->ws) |
1517 | 0 | return 0; |
1518 | | |
1519 | 0 | if (!wsi->ws->close_in_ping_buffer_len && /* already a reason */ |
1520 | 0 | (reason == LWS_CLOSE_STATUS_NOSTATUS || |
1521 | 0 | reason == LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY)) |
1522 | 0 | return 0; |
1523 | | |
1524 | 0 | lwsl_debug("%s: sending close indication...\n", __func__); |
1525 | | |
1526 | | /* if no prepared close reason, use 1000 and no aux data */ |
1527 | |
|
1528 | 0 | if (!wsi->ws->close_in_ping_buffer_len) { |
1529 | 0 | wsi->ws->close_in_ping_buffer_len = 2; |
1530 | 0 | wsi->ws->ping_payload_buf[LWS_PRE] = (reason >> 8) & 0xff; |
1531 | 0 | wsi->ws->ping_payload_buf[LWS_PRE + 1] = reason & 0xff; |
1532 | 0 | } |
1533 | |
|
1534 | 0 | wsi->waiting_to_send_close_frame = 1; |
1535 | 0 | wsi->close_needs_ack = 1; |
1536 | 0 | lwsi_set_state(wsi, LRS_WAITING_TO_SEND_CLOSE); |
1537 | 0 | __lws_set_timeout(wsi, PENDING_TIMEOUT_CLOSE_SEND, 5); |
1538 | |
|
1539 | 0 | lws_callback_on_writable(wsi); |
1540 | |
|
1541 | 0 | return 1; |
1542 | 0 | } |
1543 | | |
1544 | | static int |
1545 | | rops_close_role_ws(struct lws_context_per_thread *pt, struct lws *wsi) |
1546 | 0 | { |
1547 | 0 | if (!wsi->ws) |
1548 | 0 | return 0; |
1549 | | |
1550 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
1551 | | |
1552 | | if (wsi->ws->rx_draining_ext) { |
1553 | | struct lws **w = &pt->ws.rx_draining_ext_list; |
1554 | | |
1555 | | wsi->ws->rx_draining_ext = 0; |
1556 | | /* remove us from context draining ext list */ |
1557 | | while (*w) { |
1558 | | if (*w == wsi) { |
1559 | | *w = wsi->ws->rx_draining_ext_list; |
1560 | | break; |
1561 | | } |
1562 | | w = &((*w)->ws->rx_draining_ext_list); |
1563 | | } |
1564 | | wsi->ws->rx_draining_ext_list = NULL; |
1565 | | } |
1566 | | |
1567 | | if (wsi->ws->tx_draining_ext) { |
1568 | | struct lws **w = &pt->ws.tx_draining_ext_list; |
1569 | | lwsl_ext("%s: CLEARING tx_draining_ext\n", __func__); |
1570 | | wsi->ws->tx_draining_ext = 0; |
1571 | | /* remove us from context draining ext list */ |
1572 | | while (*w) { |
1573 | | if (*w == wsi) { |
1574 | | *w = wsi->ws->tx_draining_ext_list; |
1575 | | break; |
1576 | | } |
1577 | | w = &((*w)->ws->tx_draining_ext_list); |
1578 | | } |
1579 | | wsi->ws->tx_draining_ext_list = NULL; |
1580 | | } |
1581 | | #endif |
1582 | 0 | lws_free_set_NULL(wsi->ws->rx_ubuf); |
1583 | |
|
1584 | 0 | wsi->ws->pong_payload_len = 0; |
1585 | 0 | wsi->ws->pong_pending_flag = 0; |
1586 | | |
1587 | | /* deallocate any active extension contexts */ |
1588 | |
|
1589 | 0 | if (lws_ext_cb_active(wsi, LWS_EXT_CB_DESTROY, NULL, 0) < 0) |
1590 | 0 | lwsl_warn("extension destruction failed\n"); |
1591 | |
|
1592 | 0 | return 0; |
1593 | 0 | } |
1594 | | |
1595 | | static int |
1596 | | rops_write_role_protocol_ws(struct lws *wsi, unsigned char *buf, size_t len, |
1597 | | enum lws_write_protocol *wp) |
1598 | 0 | { |
1599 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
1600 | | struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi]; |
1601 | | enum lws_write_protocol wpt; |
1602 | | #endif |
1603 | 0 | struct lws_ext_pm_deflate_rx_ebufs pmdrx; |
1604 | 0 | int masked7 = lwsi_role_client(wsi); |
1605 | 0 | unsigned char is_masked_bit = 0; |
1606 | 0 | unsigned char *dropmask = NULL; |
1607 | 0 | size_t orig_len = len; |
1608 | 0 | int pre = 0, n = 0; |
1609 | | |
1610 | | // lwsl_err("%s: wp 0x%x len %d\n", __func__, *wp, (int)len); |
1611 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
1612 | | if (wsi->ws->tx_draining_ext) { |
1613 | | /* remove us from the list */ |
1614 | | struct lws **w = &pt->ws.tx_draining_ext_list; |
1615 | | |
1616 | | lwsl_ext("%s: CLEARING tx_draining_ext\n", __func__); |
1617 | | wsi->ws->tx_draining_ext = 0; |
1618 | | /* remove us from context draining ext list */ |
1619 | | while (*w) { |
1620 | | if (*w == wsi) { |
1621 | | *w = wsi->ws->tx_draining_ext_list; |
1622 | | break; |
1623 | | } |
1624 | | w = &((*w)->ws->tx_draining_ext_list); |
1625 | | } |
1626 | | wsi->ws->tx_draining_ext_list = NULL; |
1627 | | |
1628 | | wpt = *wp; |
1629 | | *wp = (wsi->ws->tx_draining_stashed_wp & 0xc0) | |
1630 | | LWS_WRITE_CONTINUATION; |
1631 | | |
1632 | | /* |
1633 | | * When we are just flushing (len == 0), we can trust the |
1634 | | * stashed wp info completely. Otherwise adjust it to the |
1635 | | * FIN status of the incoming packet. |
1636 | | */ |
1637 | | |
1638 | | if (!(wpt & LWS_WRITE_NO_FIN) && len) |
1639 | | *wp &= (enum lws_write_protocol)~LWS_WRITE_NO_FIN; |
1640 | | |
1641 | | lwsl_ext("FORCED draining wp to 0x%02X " |
1642 | | "(stashed 0x%02X, incoming 0x%02X)\n", *wp, |
1643 | | wsi->ws->tx_draining_stashed_wp, wpt); |
1644 | | // assert(0); |
1645 | | } |
1646 | | #endif |
1647 | |
|
1648 | 0 | if (((*wp) & 0x1f) == LWS_WRITE_HTTP || |
1649 | 0 | ((*wp) & 0x1f) == LWS_WRITE_HTTP_FINAL || |
1650 | 0 | ((*wp) & 0x1f) == LWS_WRITE_HTTP_HEADERS_CONTINUATION || |
1651 | 0 | ((*wp) & 0x1f) == LWS_WRITE_HTTP_HEADERS) |
1652 | 0 | goto send_raw; |
1653 | | |
1654 | | |
1655 | | |
1656 | | /* if we are continuing a frame that already had its header done */ |
1657 | | |
1658 | 0 | if (wsi->ws->inside_frame) { |
1659 | 0 | lwsl_debug("INSIDE FRAME\n"); |
1660 | 0 | goto do_more_inside_frame; |
1661 | 0 | } |
1662 | | |
1663 | 0 | wsi->ws->clean_buffer = 1; |
1664 | | |
1665 | | /* |
1666 | | * give a chance to the extensions to modify payload |
1667 | | * the extension may decide to produce unlimited payload erratically |
1668 | | * (eg, compression extension), so we require only that if he produces |
1669 | | * something, it will be a complete fragment of the length known at |
1670 | | * the time (just the fragment length known), and if he has |
1671 | | * more we will come back next time he is writeable and allow him to |
1672 | | * produce more fragments until he's drained. |
1673 | | * |
1674 | | * This allows what is sent each time it is writeable to be limited to |
1675 | | * a size that can be sent without partial sends or blocking, allows |
1676 | | * interleaving of control frames and other connection service. |
1677 | | */ |
1678 | |
|
1679 | 0 | pmdrx.eb_in.token = buf; |
1680 | 0 | pmdrx.eb_in.len = (int)len; |
1681 | | |
1682 | | /* for the non-pm-deflate case */ |
1683 | |
|
1684 | 0 | pmdrx.eb_out = pmdrx.eb_in; |
1685 | |
|
1686 | 0 | switch ((int)*wp) { |
1687 | 0 | case LWS_WRITE_PING: |
1688 | 0 | case LWS_WRITE_PONG: |
1689 | 0 | case LWS_WRITE_CLOSE: |
1690 | 0 | break; |
1691 | 0 | default: |
1692 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
1693 | | n = lws_ext_cb_active(wsi, (int)LWS_EXT_CB_PAYLOAD_TX, &pmdrx, (int)*wp); |
1694 | | if (n < 0) |
1695 | | return -1; |
1696 | | lwsl_ext("%s: defl ext ret %d, ext in remaining %d, " |
1697 | | "out %d compressed (wp 0x%x)\n", __func__, n, |
1698 | | (int)pmdrx.eb_in.len, (int)pmdrx.eb_out.len, *wp); |
1699 | | |
1700 | | if (n == PMDR_HAS_PENDING) { |
1701 | | lwsl_ext("%s: HAS PENDING: write drain len %d " |
1702 | | "(wp 0x%x) SETTING tx_draining_ext " |
1703 | | "(remaining in %d)\n", __func__, |
1704 | | (int)pmdrx.eb_out.len, *wp, |
1705 | | (int)pmdrx.eb_in.len); |
1706 | | /* extension requires further draining */ |
1707 | | wsi->ws->tx_draining_ext = 1; |
1708 | | wsi->ws->tx_draining_ext_list = |
1709 | | pt->ws.tx_draining_ext_list; |
1710 | | pt->ws.tx_draining_ext_list = wsi; |
1711 | | /* we must come back to do more */ |
1712 | | lws_callback_on_writable(wsi); |
1713 | | /* |
1714 | | * keep a copy of the write type for the overall |
1715 | | * action that has provoked generation of these |
1716 | | * fragments, so the last guy can use its FIN state. |
1717 | | */ |
1718 | | wsi->ws->tx_draining_stashed_wp = (uint8_t)*wp; |
1719 | | /* |
1720 | | * Despite what we may have thought, this is definitely |
1721 | | * NOT the last fragment, because the extension asserted |
1722 | | * he has more coming. For example, the extension may |
1723 | | * be compressing, and has saved up everything until the |
1724 | | * end, where the output is larger than one chunk. |
1725 | | * |
1726 | | * Make sure this intermediate one doesn't actually |
1727 | | * go out with a FIN. |
1728 | | */ |
1729 | | *wp |= LWS_WRITE_NO_FIN; |
1730 | | } |
1731 | | #endif |
1732 | 0 | if (pmdrx.eb_out.len && wsi->ws->stashed_write_pending) { |
1733 | 0 | wsi->ws->stashed_write_pending = 0; |
1734 | 0 | *wp = (unsigned int)(((*wp) & 0xc0) | (unsigned int)wsi->ws->stashed_write_type); |
1735 | 0 | } |
1736 | 0 | } |
1737 | | |
1738 | | /* |
1739 | | * an extension did something we need to keep... for example, if |
1740 | | * compression extension, it has already updated its state according |
1741 | | * to this being issued |
1742 | | */ |
1743 | 0 | if (buf != pmdrx.eb_out.token) { |
1744 | | /* |
1745 | | * ext might eat it, but not have anything to issue yet. |
1746 | | * In that case we have to follow his lead, but stash and |
1747 | | * replace the write type that was lost here the first time. |
1748 | | */ |
1749 | 0 | if (len && !pmdrx.eb_out.len) { |
1750 | 0 | if (!wsi->ws->stashed_write_pending) |
1751 | 0 | wsi->ws->stashed_write_type = |
1752 | 0 | (char)(*wp) & 0x3f; |
1753 | 0 | wsi->ws->stashed_write_pending = 1; |
1754 | 0 | return (int)len; |
1755 | 0 | } |
1756 | | /* |
1757 | | * extension recreated it: |
1758 | | * need to buffer this if not all sent |
1759 | | */ |
1760 | 0 | wsi->ws->clean_buffer = 0; |
1761 | 0 | } |
1762 | | |
1763 | 0 | buf = pmdrx.eb_out.token; |
1764 | 0 | len = (unsigned int)pmdrx.eb_out.len; |
1765 | |
|
1766 | 0 | if (!buf) { |
1767 | 0 | lwsl_err("null buf (%d)\n", (int)len); |
1768 | 0 | return -1; |
1769 | 0 | } |
1770 | | |
1771 | 0 | switch (wsi->ws->ietf_spec_revision) { |
1772 | 0 | case 13: |
1773 | 0 | if (masked7) { |
1774 | 0 | pre += 4; |
1775 | 0 | dropmask = &buf[0 - pre]; |
1776 | 0 | is_masked_bit = 0x80; |
1777 | 0 | } |
1778 | |
|
1779 | 0 | switch ((*wp) & 0xf) { |
1780 | 0 | case LWS_WRITE_TEXT: |
1781 | 0 | n = LWSWSOPC_TEXT_FRAME; |
1782 | 0 | break; |
1783 | 0 | case LWS_WRITE_BINARY: |
1784 | 0 | n = LWSWSOPC_BINARY_FRAME; |
1785 | 0 | break; |
1786 | 0 | case LWS_WRITE_CONTINUATION: |
1787 | 0 | n = LWSWSOPC_CONTINUATION; |
1788 | 0 | break; |
1789 | | |
1790 | 0 | case LWS_WRITE_CLOSE: |
1791 | 0 | n = LWSWSOPC_CLOSE; |
1792 | 0 | break; |
1793 | 0 | case LWS_WRITE_PING: |
1794 | 0 | n = LWSWSOPC_PING; |
1795 | 0 | break; |
1796 | 0 | case LWS_WRITE_PONG: |
1797 | 0 | n = LWSWSOPC_PONG; |
1798 | 0 | break; |
1799 | 0 | default: |
1800 | 0 | lwsl_warn("lws_write: unknown write opc / wp\n"); |
1801 | 0 | return -1; |
1802 | 0 | } |
1803 | | |
1804 | 0 | if (!((*wp) & LWS_WRITE_NO_FIN)) |
1805 | 0 | n |= 1 << 7; |
1806 | |
|
1807 | 0 | if (len < 126) { |
1808 | 0 | pre += 2; |
1809 | 0 | buf[-pre] = (uint8_t)n; |
1810 | 0 | buf[-pre + 1] = (unsigned char)(len | is_masked_bit); |
1811 | 0 | } else { |
1812 | 0 | if (len < 65536) { |
1813 | 0 | pre += 4; |
1814 | 0 | buf[-pre] = (uint8_t)n; |
1815 | 0 | buf[-pre + 1] = (uint8_t)(126 | is_masked_bit); |
1816 | 0 | buf[-pre + 2] = (unsigned char)(len >> 8); |
1817 | 0 | buf[-pre + 3] = (unsigned char)len; |
1818 | 0 | } else { |
1819 | 0 | pre += 10; |
1820 | 0 | buf[-pre] = (uint8_t)n; |
1821 | 0 | buf[-pre + 1] = (uint8_t)(127 | is_masked_bit); |
1822 | 0 | #if defined __LP64__ |
1823 | 0 | buf[-pre + 2] = (len >> 56) & 0x7f; |
1824 | 0 | buf[-pre + 3] = (uint8_t)(len >> 48); |
1825 | 0 | buf[-pre + 4] = (uint8_t)(len >> 40); |
1826 | 0 | buf[-pre + 5] = (uint8_t)(len >> 32); |
1827 | | #else |
1828 | | buf[-pre + 2] = 0; |
1829 | | buf[-pre + 3] = 0; |
1830 | | buf[-pre + 4] = 0; |
1831 | | buf[-pre + 5] = 0; |
1832 | | #endif |
1833 | 0 | buf[-pre + 6] = (unsigned char)(len >> 24); |
1834 | 0 | buf[-pre + 7] = (unsigned char)(len >> 16); |
1835 | 0 | buf[-pre + 8] = (unsigned char)(len >> 8); |
1836 | 0 | buf[-pre + 9] = (unsigned char)len; |
1837 | 0 | } |
1838 | 0 | } |
1839 | 0 | break; |
1840 | 0 | } |
1841 | | |
1842 | 0 | do_more_inside_frame: |
1843 | | |
1844 | | /* |
1845 | | * Deal with masking if we are in client -> server direction and |
1846 | | * the wp demands it |
1847 | | */ |
1848 | |
|
1849 | 0 | if (masked7) { |
1850 | 0 | if (!wsi->ws->inside_frame) |
1851 | 0 | if (lws_0405_frame_mask_generate(wsi)) { |
1852 | 0 | lwsl_err("frame mask generation failed\n"); |
1853 | 0 | return -1; |
1854 | 0 | } |
1855 | | |
1856 | | /* |
1857 | | * in v7, just mask the payload |
1858 | | */ |
1859 | 0 | if (dropmask) { /* never set if already inside frame */ |
1860 | 0 | for (n = 4; n < (int)len + 4; n++) |
1861 | 0 | dropmask[n] = dropmask[n] ^ wsi->ws->mask[ |
1862 | 0 | (wsi->ws->mask_idx++) & 3]; |
1863 | | |
1864 | | /* copy the frame nonce into place */ |
1865 | 0 | memcpy(dropmask, wsi->ws->mask, 4); |
1866 | 0 | } |
1867 | 0 | } |
1868 | | |
1869 | 0 | if (lwsi_role_h2_ENCAPSULATION(wsi)) { |
1870 | 0 | struct lws *encap = lws_get_network_wsi(wsi); |
1871 | |
|
1872 | 0 | assert(encap != wsi); |
1873 | | |
1874 | 0 | return lws_rops_func_fidx(encap->role_ops, |
1875 | 0 | LWS_ROPS_write_role_protocol). |
1876 | 0 | write_role_protocol(wsi, buf - pre, |
1877 | 0 | len + (unsigned int)pre, wp); |
1878 | 0 | } |
1879 | | |
1880 | 0 | switch ((*wp) & 0x1f) { |
1881 | 0 | case LWS_WRITE_TEXT: |
1882 | 0 | case LWS_WRITE_BINARY: |
1883 | 0 | case LWS_WRITE_CONTINUATION: |
1884 | 0 | if (!wsi->h2_stream_carries_ws) { |
1885 | | |
1886 | | /* |
1887 | | * give any active extensions a chance to munge the |
1888 | | * buffer before send. We pass in a pointer to an |
1889 | | * lws_tokens struct prepared with the default buffer |
1890 | | * and content length that's in there. Rather than |
1891 | | * rewrite the default buffer, extensions that expect |
1892 | | * to grow the buffer can adapt .token to point to their |
1893 | | * own per-connection buffer in the extension user |
1894 | | * allocation. By default with no extensions or no |
1895 | | * extension callback handling, just the normal input |
1896 | | * buffer is used then so it is efficient. |
1897 | | * |
1898 | | * callback returns 1 in case it wants to spill more |
1899 | | * buffers |
1900 | | * |
1901 | | * This takes care of holding the buffer if send is |
1902 | | * incomplete, ie, if wsi->ws->clean_buffer is 0 |
1903 | | * (meaning an extension meddled with the buffer). If |
1904 | | * wsi->ws->clean_buffer is 1, it will instead return |
1905 | | * to the user code how much OF THE USER BUFFER was |
1906 | | * consumed. |
1907 | | */ |
1908 | |
|
1909 | 0 | n = lws_issue_raw_ext_access(wsi, buf - pre, len + (unsigned int)pre); |
1910 | 0 | wsi->ws->inside_frame = 1; |
1911 | 0 | if (n <= 0) |
1912 | 0 | return n; |
1913 | | |
1914 | 0 | if (n == (int)len + pre) { |
1915 | | /* everything in the buffer was handled |
1916 | | * (or rebuffered...) */ |
1917 | 0 | wsi->ws->inside_frame = 0; |
1918 | 0 | return (int)orig_len; |
1919 | 0 | } |
1920 | | |
1921 | | /* |
1922 | | * it is how many bytes of user buffer got sent... may |
1923 | | * be < orig_len in which case callback when writable |
1924 | | * has already been arranged and user code can call |
1925 | | * lws_write() again with the rest later. |
1926 | | */ |
1927 | | |
1928 | 0 | return n - pre; |
1929 | 0 | } |
1930 | 0 | break; |
1931 | 0 | default: |
1932 | 0 | break; |
1933 | 0 | } |
1934 | | |
1935 | 0 | send_raw: |
1936 | 0 | return lws_issue_raw(wsi, (unsigned char *)buf - pre, len + (unsigned int)pre); |
1937 | 0 | } |
1938 | | |
1939 | | static int |
1940 | | rops_close_kill_connection_ws(struct lws *wsi, enum lws_close_status reason) |
1941 | 0 | { |
1942 | | /* deal with ws encapsulation in h2 */ |
1943 | 0 | #if defined(LWS_WITH_HTTP2) |
1944 | 0 | if (wsi->mux_substream && wsi->h2_stream_carries_ws) |
1945 | 0 | return lws_rops_func_fidx(&role_ops_h2, |
1946 | 0 | LWS_ROPS_close_kill_connection). |
1947 | 0 | close_kill_connection(wsi, reason); |
1948 | | |
1949 | 0 | return 0; |
1950 | | #else |
1951 | | return 0; |
1952 | | #endif |
1953 | 0 | } |
1954 | | |
1955 | | static int |
1956 | | rops_callback_on_writable_ws(struct lws *wsi) |
1957 | 0 | { |
1958 | 0 | #if defined(LWS_WITH_HTTP2) |
1959 | 0 | if (lwsi_role_h2_ENCAPSULATION(wsi)) { |
1960 | | /* we know then that it has an h2 parent */ |
1961 | 0 | struct lws *enc = lws_rops_func_fidx(&role_ops_h2, |
1962 | 0 | LWS_ROPS_encapsulation_parent). |
1963 | 0 | encapsulation_parent(wsi); |
1964 | |
|
1965 | 0 | assert(enc); |
1966 | 0 | if (lws_rops_func_fidx(enc->role_ops, |
1967 | 0 | LWS_ROPS_callback_on_writable). |
1968 | 0 | callback_on_writable(wsi)) |
1969 | 0 | return 1; |
1970 | 0 | } |
1971 | 0 | #endif |
1972 | 0 | return 0; |
1973 | 0 | } |
1974 | | |
1975 | | static int |
1976 | | rops_init_vhost_ws(struct lws_vhost *vh, |
1977 | | const struct lws_context_creation_info *info) |
1978 | 0 | { |
1979 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
1980 | | #ifdef LWS_WITH_PLUGINS |
1981 | | struct lws_plugin *plugin; |
1982 | | int m; |
1983 | | |
1984 | | if (vh->context->plugin_extension_count) { |
1985 | | |
1986 | | m = 0; |
1987 | | while (info->extensions && info->extensions[m].callback) |
1988 | | m++; |
1989 | | |
1990 | | /* |
1991 | | * give the vhost a unified list of extensions including the |
1992 | | * ones that came from plugins |
1993 | | */ |
1994 | | vh->ws.extensions = lws_zalloc(sizeof(struct lws_extension) * |
1995 | | (unsigned int)(m + vh->context->plugin_extension_count + 1), |
1996 | | "extensions"); |
1997 | | if (!vh->ws.extensions) |
1998 | | return 1; |
1999 | | |
2000 | | memcpy((struct lws_extension *)vh->ws.extensions, info->extensions, |
2001 | | sizeof(struct lws_extension) * (unsigned int)m); |
2002 | | plugin = vh->context->plugin_list; |
2003 | | while (plugin) { |
2004 | | const lws_plugin_protocol_t *plpr = |
2005 | | (const lws_plugin_protocol_t *)plugin->hdr; |
2006 | | |
2007 | | memcpy((struct lws_extension *)&vh->ws.extensions[m], |
2008 | | plpr->extensions, |
2009 | | sizeof(struct lws_extension) * |
2010 | | (unsigned int)plpr->count_extensions); |
2011 | | m += plpr->count_extensions; |
2012 | | plugin = plugin->list; |
2013 | | } |
2014 | | } else |
2015 | | #endif |
2016 | | vh->ws.extensions = info->extensions; |
2017 | | #endif |
2018 | |
|
2019 | 0 | return 0; |
2020 | 0 | } |
2021 | | |
2022 | | static int |
2023 | | rops_destroy_vhost_ws(struct lws_vhost *vh) |
2024 | 0 | { |
2025 | | #ifdef LWS_WITH_PLUGINS |
2026 | | #if !defined(LWS_WITHOUT_EXTENSIONS) |
2027 | | if (vh->context->plugin_extension_count) |
2028 | | lws_free((void *)vh->ws.extensions); |
2029 | | #endif |
2030 | | #endif |
2031 | |
|
2032 | 0 | return 0; |
2033 | 0 | } |
2034 | | |
2035 | | #if defined(LWS_WITH_HTTP_PROXY) |
2036 | | static int |
2037 | | ws_destroy_proxy_buf(struct lws_dll2 *d, void *user) |
2038 | | { |
2039 | | lws_free(d); |
2040 | | |
2041 | | return 0; |
2042 | | } |
2043 | | #endif |
2044 | | |
2045 | | static int |
2046 | | rops_destroy_role_ws(struct lws *wsi) |
2047 | 0 | { |
2048 | | #if defined(LWS_WITH_HTTP_PROXY) |
2049 | | lws_dll2_foreach_safe(&wsi->ws->proxy_owner, NULL, ws_destroy_proxy_buf); |
2050 | | #endif |
2051 | |
|
2052 | 0 | lws_free_set_NULL(wsi->ws); |
2053 | |
|
2054 | 0 | return 0; |
2055 | 0 | } |
2056 | | |
2057 | | static int |
2058 | | rops_issue_keepalive_ws(struct lws *wsi, int isvalid) |
2059 | 0 | { |
2060 | 0 | uint64_t us; |
2061 | |
|
2062 | 0 | #if defined(LWS_WITH_HTTP2) |
2063 | 0 | if (lwsi_role_h2_ENCAPSULATION(wsi)) { |
2064 | | /* we know then that it has an h2 parent */ |
2065 | 0 | struct lws *enc = lws_rops_func_fidx(&role_ops_h2, |
2066 | 0 | LWS_ROPS_encapsulation_parent). |
2067 | 0 | encapsulation_parent(wsi); |
2068 | |
|
2069 | 0 | assert(enc); |
2070 | 0 | if (lws_rops_func_fidx(enc->role_ops, LWS_ROPS_issue_keepalive). |
2071 | 0 | issue_keepalive(enc, isvalid)) |
2072 | 0 | return 1; |
2073 | 0 | } |
2074 | 0 | #endif |
2075 | | |
2076 | 0 | if (isvalid) |
2077 | 0 | _lws_validity_confirmed_role(wsi); |
2078 | 0 | else { |
2079 | 0 | us = (uint64_t)lws_now_usecs(); |
2080 | 0 | memcpy(&wsi->ws->ping_payload_buf[LWS_PRE], &us, 8); |
2081 | 0 | wsi->ws->send_check_ping = 1; |
2082 | 0 | lws_callback_on_writable(wsi); |
2083 | 0 | } |
2084 | |
|
2085 | 0 | return 0; |
2086 | 0 | } |
2087 | | |
2088 | | static const lws_rops_t rops_table_ws[] = { |
2089 | | /* 1 */ { .init_vhost = rops_init_vhost_ws }, |
2090 | | /* 2 */ { .destroy_vhost = rops_destroy_vhost_ws }, |
2091 | | /* 3 */ { .service_flag_pending = rops_service_flag_pending_ws }, |
2092 | | /* 4 */ { .handle_POLLIN = rops_handle_POLLIN_ws }, |
2093 | | /* 5 */ { .handle_POLLOUT = rops_handle_POLLOUT_ws }, |
2094 | | /* 6 */ { .callback_on_writable = rops_callback_on_writable_ws }, |
2095 | | /* 7 */ { .write_role_protocol = rops_write_role_protocol_ws }, |
2096 | | /* 8 */ { .close_via_role_protocol = rops_close_via_role_protocol_ws }, |
2097 | | /* 9 */ { .close_role = rops_close_role_ws }, |
2098 | | /* 10 */ { .close_kill_connection = rops_close_kill_connection_ws }, |
2099 | | /* 11 */ { .destroy_role = rops_destroy_role_ws }, |
2100 | | /* 12 */ { .issue_keepalive = rops_issue_keepalive_ws }, |
2101 | | }; |
2102 | | |
2103 | | const struct lws_role_ops role_ops_ws = { |
2104 | | /* role name */ "ws", |
2105 | | /* alpn id */ NULL, |
2106 | | |
2107 | | /* rops_table */ rops_table_ws, |
2108 | | /* rops_idx */ { |
2109 | | /* LWS_ROPS_check_upgrades */ |
2110 | | /* LWS_ROPS_pt_init_destroy */ 0x00, |
2111 | | /* LWS_ROPS_init_vhost */ |
2112 | | /* LWS_ROPS_destroy_vhost */ 0x12, |
2113 | | /* LWS_ROPS_service_flag_pending */ |
2114 | | /* LWS_ROPS_handle_POLLIN */ 0x34, |
2115 | | /* LWS_ROPS_handle_POLLOUT */ |
2116 | | /* LWS_ROPS_perform_user_POLLOUT */ 0x50, |
2117 | | /* LWS_ROPS_callback_on_writable */ |
2118 | | /* LWS_ROPS_tx_credit */ 0x60, |
2119 | | /* LWS_ROPS_write_role_protocol */ |
2120 | | /* LWS_ROPS_encapsulation_parent */ 0x70, |
2121 | | /* LWS_ROPS_alpn_negotiated */ |
2122 | | /* LWS_ROPS_close_via_role_protocol */ 0x08, |
2123 | | /* LWS_ROPS_close_role */ |
2124 | | /* LWS_ROPS_close_kill_connection */ 0x9a, |
2125 | | /* LWS_ROPS_destroy_role */ |
2126 | | /* LWS_ROPS_adoption_bind */ 0xb0, |
2127 | | /* LWS_ROPS_client_bind */ |
2128 | | /* LWS_ROPS_issue_keepalive */ 0x0c, |
2129 | | }, |
2130 | | |
2131 | | /* adoption_cb clnt, srv */ { LWS_CALLBACK_SERVER_NEW_CLIENT_INSTANTIATED, |
2132 | | LWS_CALLBACK_SERVER_NEW_CLIENT_INSTANTIATED }, |
2133 | | /* rx_cb clnt, srv */ { LWS_CALLBACK_CLIENT_RECEIVE, |
2134 | | LWS_CALLBACK_RECEIVE }, |
2135 | | /* writeable cb clnt, srv */ { LWS_CALLBACK_CLIENT_WRITEABLE, |
2136 | | LWS_CALLBACK_SERVER_WRITEABLE }, |
2137 | | /* close cb clnt, srv */ { LWS_CALLBACK_CLIENT_CLOSED, |
2138 | | LWS_CALLBACK_CLOSED }, |
2139 | | /* protocol_bind cb c, srv */ { LWS_CALLBACK_WS_CLIENT_BIND_PROTOCOL, |
2140 | | LWS_CALLBACK_WS_SERVER_BIND_PROTOCOL }, |
2141 | | /* protocol_unbind cb c, srv */ { LWS_CALLBACK_WS_CLIENT_DROP_PROTOCOL, |
2142 | | LWS_CALLBACK_WS_SERVER_DROP_PROTOCOL }, |
2143 | | /* file handles */ 0 |
2144 | | }; |