/src/libwebsockets/lib/roles/pipe/ops-pipe.c
Line | Count | Source |
1 | | /* |
2 | | * libwebsockets - small server side websockets and web server implementation |
3 | | * |
4 | | * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com> |
5 | | * |
6 | | * Permission is hereby granted, free of charge, to any person obtaining a copy |
7 | | * of this software and associated documentation files (the "Software"), to |
8 | | * deal in the Software without restriction, including without limitation the |
9 | | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
10 | | * sell copies of the Software, and to permit persons to whom the Software is |
11 | | * furnished to do so, subject to the following conditions: |
12 | | * |
13 | | * The above copyright notice and this permission notice shall be included in |
14 | | * all copies or substantial portions of the Software. |
15 | | * |
16 | | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
19 | | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
20 | | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
21 | | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
22 | | * IN THE SOFTWARE. |
23 | | */ |
24 | | |
25 | | #include <private-lib-core.h> |
26 | | |
27 | | static lws_handling_result_t |
28 | | rops_handle_POLLIN_pipe(struct lws_context_per_thread *pt, struct lws *wsi, |
29 | | struct lws_pollfd *pollfd) |
30 | 0 | { |
31 | | #if defined(LWS_WITH_LATENCY) |
32 | | lws_usec_t _pipe_start = lws_now_usecs(); |
33 | | #endif |
34 | 0 | #if defined(LWS_HAVE_EVENTFD) |
35 | 0 | eventfd_t value; |
36 | 0 | int n; |
37 | |
|
38 | 0 | n = eventfd_read(wsi->desc.sockfd, &value); |
39 | 0 | if (n < 0) { |
40 | 0 | lwsl_notice("%s: eventfd read %d bailed errno %d\n", __func__, |
41 | 0 | wsi->desc.sockfd, LWS_ERRNO); |
42 | 0 | return LWS_HPI_RET_PLEASE_CLOSE_ME; |
43 | 0 | } |
44 | | #elif !defined(WIN32) && !defined(_WIN32) |
45 | | char s[100]; |
46 | | int n; |
47 | | |
48 | | /* |
49 | | * discard the byte(s) that signaled us |
50 | | * We really don't care about the number of bytes, but coverity |
51 | | * thinks we should. |
52 | | */ |
53 | | n = (int)read(wsi->desc.sockfd, s, sizeof(s)); |
54 | | (void)n; |
55 | | if (n < 0) |
56 | | return LWS_HPI_RET_PLEASE_CLOSE_ME; |
57 | | #elif defined(WIN32) |
58 | | char s[100]; |
59 | | int n; |
60 | | |
61 | | n = recv(wsi->desc.sockfd, s, sizeof(s), 0); |
62 | | if (n == SOCKET_ERROR) |
63 | | return LWS_HPI_RET_PLEASE_CLOSE_ME; |
64 | | #endif |
65 | | |
66 | | #if defined(LWS_WITH_THREADPOOL) && defined(LWS_HAVE_PTHREAD_H) |
67 | | /* |
68 | | * threadpools that need to call for on_writable callbacks do it by |
69 | | * marking the task as needing one for its wsi, then cancelling service. |
70 | | * |
71 | | * Each tsi will call this to perform the actual callback_on_writable |
72 | | * from the correct service thread context |
73 | | */ |
74 | | lws_threadpool_tsi_context(pt->context, pt->tid); |
75 | | #endif |
76 | | |
77 | | #if defined(LWS_WITH_ASYNC_QUEUE) |
78 | | { |
79 | | struct lws_dll2_owner handled; |
80 | | |
81 | | lws_dll2_owner_clear(&handled); |
82 | | pthread_mutex_lock(&pt->context->async_worker_mutex); |
83 | | if (pt->context->async_worker_finished.count) { |
84 | | lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1, lws_dll2_get_head(&pt->context->async_worker_finished)) { |
85 | | struct lws_async_job *job = lws_container_of(d, struct lws_async_job, list); |
86 | | |
87 | | if (!job->wsi) { |
88 | | lws_dll2_remove(d); |
89 | | if (job->type != LWS_AQ_FILE_READ) |
90 | | lws_free(job); /* file read frees itself */ |
91 | | continue; |
92 | | } |
93 | | |
94 | | if (job->wsi->tsi == pt->tid) { |
95 | | job->handled_by_main = 1; |
96 | | lws_dll2_remove(d); |
97 | | lws_dll2_add_tail(d, &handled); |
98 | | } |
99 | | } lws_end_foreach_dll_safe(d, d1); |
100 | | } |
101 | | pthread_mutex_unlock(&pt->context->async_worker_mutex); |
102 | | |
103 | | lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1, lws_dll2_get_head(&handled)) { |
104 | | struct lws_async_job *job = lws_container_of(d, struct lws_async_job, list); |
105 | | |
106 | | lws_dll2_remove(d); |
107 | | if (job->type == LWS_AQ_FILE_READ) { |
108 | | lwsi_set_state(job->wsi, LRS_ISSUING_FILE); |
109 | | lws_callback_on_writable(job->wsi); |
110 | | } |
111 | | #if defined(LWS_WITH_TLS) |
112 | | else if (job->type == LWS_AQ_SSL_ACCEPT) { |
113 | | job->wsi->async_worker_job = NULL; |
114 | | |
115 | | if (lws_tls_server_accept_completed(job->wsi, job->u.ssl.status)) { |
116 | | lws_close_free_wsi(job->wsi, LWS_CLOSE_STATUS_NOSTATUS, "ssl accept failed"); |
117 | | } else if (lwsi_state(job->wsi) != LRS_SSL_ACK_PENDING) { |
118 | | |
119 | | /* restore POLLIN which was stripped before entering async worker queue */ |
120 | | if (lws_change_pollfd(job->wsi, 0, LWS_POLLIN)) { |
121 | | lws_close_free_wsi(job->wsi, LWS_CLOSE_STATUS_NOSTATUS, "ssl accept pollin failed"); |
122 | | } else { |
123 | | if (lws_server_socket_service_ssl(job->wsi, job->wsi->desc.sockfd, 0)) |
124 | | lwsl_notice("OOB ssl success path failed\n"); |
125 | | |
126 | | /* |
127 | | * OpenSSL background accept might have slurped the HTTP/2 preface |
128 | | * into its internal BIO without generating a kernel POLLIN. |
129 | | * Force a fake POLLIN by adding to the pending list once, but ONLY if |
130 | | * it actually has decoded bytes. If not, it will spin WANT_READ endlessly. |
131 | | */ |
132 | | if (lws_ssl_pending(job->wsi)) { |
133 | | lws_pt_lock(pt, __func__); |
134 | | if (lws_dll2_is_detached(&job->wsi->tls.dll_pending_tls)) { |
135 | | lws_dll2_add_head(&job->wsi->tls.dll_pending_tls, |
136 | | &pt->tls.dll_pending_tls_owner); |
137 | | lwsl_notice("ops-pipe added %s to pending tls list, pos=%d\n", lws_wsi_tag(job->wsi), job->wsi->position_in_fds_table); |
138 | | } |
139 | | lws_pt_unlock(pt); |
140 | | } |
141 | | } |
142 | | } |
143 | | } |
144 | | #endif |
145 | | if (job->type != LWS_AQ_FILE_READ) |
146 | | lws_free(job); |
147 | | } lws_end_foreach_dll_safe(d, d1); |
148 | | } |
149 | | #endif |
150 | | |
151 | | #if LWS_MAX_SMP > 1 |
152 | | |
153 | | /* |
154 | | * Other pts need to take care of their own wsi bound to a vhost that |
155 | | * is going down |
156 | | */ |
157 | | |
158 | | if (pt->context->owner_vh_being_destroyed.head) { |
159 | | |
160 | | lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1, |
161 | | pt->context->owner_vh_being_destroyed.head) { |
162 | | struct lws_vhost *v = |
163 | | lws_container_of(d, struct lws_vhost, |
164 | | vh_being_destroyed_list); |
165 | | |
166 | | lws_vhost_lock(v); /* -------------- vh { */ |
167 | | __lws_vhost_destroy_pt_wsi_dieback_start(v); |
168 | | lws_vhost_unlock(v); /* } vh -------------- */ |
169 | | |
170 | | } lws_end_foreach_dll_safe(d, d1); |
171 | | } |
172 | | |
173 | | #endif |
174 | | |
175 | 0 | #if defined(LWS_WITH_SECURE_STREAMS) |
176 | 0 | lws_dll2_foreach_safe(&pt->ss_owner, NULL, lws_ss_cancel_notify_dll); |
177 | | #if defined(LWS_WITH_SECURE_STREAMS_PROXY_API) && defined(LWS_WITH_CLIENT) |
178 | | lws_dll2_foreach_safe(&pt->ss_client_owner, NULL, lws_sspc_cancel_notify_dll); |
179 | | #endif |
180 | 0 | #endif |
181 | | |
182 | | /* |
183 | | * the poll() wait, or the event loop for libuv etc is a |
184 | | * process-wide resource that we interrupted. So let every |
185 | | * protocol that may be interested in the pipe event know that |
186 | | * it happened. |
187 | | */ |
188 | 0 | if (lws_broadcast(pt, LWS_CALLBACK_EVENT_WAIT_CANCELLED, NULL, 0)) { |
189 | 0 | lwsl_info("closed in event cancel\n"); |
190 | 0 | return LWS_HPI_RET_PLEASE_CLOSE_ME; |
191 | 0 | } |
192 | | |
193 | | #if defined(LWS_WITH_LATENCY) |
194 | | { |
195 | | unsigned int ms = (unsigned int)((lws_now_usecs() - _pipe_start) / 1000); |
196 | | if (ms > 2) |
197 | | lws_latency_note(pt, _pipe_start, 2000, "pipe:%dms", ms); |
198 | | } |
199 | | #endif |
200 | | |
201 | 0 | return LWS_HPI_RET_HANDLED; |
202 | 0 | } |
203 | | |
204 | | static const lws_rops_t rops_table_pipe[] = { |
205 | | /* 1 */ { .handle_POLLIN = rops_handle_POLLIN_pipe }, |
206 | | }; |
207 | | |
208 | | |
209 | | const struct lws_role_ops role_ops_pipe = { |
210 | | /* role name */ "pipe", |
211 | | /* alpn id */ NULL, |
212 | | |
213 | | /* rops_table */ rops_table_pipe, |
214 | | /* rops_idx */ { |
215 | | /* LWS_ROPS_check_upgrades */ |
216 | | /* LWS_ROPS_pt_init_destroy */ 0x00, |
217 | | /* LWS_ROPS_init_vhost */ |
218 | | /* LWS_ROPS_destroy_vhost */ 0x00, |
219 | | /* LWS_ROPS_service_flag_pending */ |
220 | | /* LWS_ROPS_handle_POLLIN */ 0x01, |
221 | | /* LWS_ROPS_handle_POLLOUT */ |
222 | | /* LWS_ROPS_perform_user_POLLOUT */ 0x00, |
223 | | /* LWS_ROPS_callback_on_writable */ |
224 | | /* LWS_ROPS_tx_credit */ 0x00, |
225 | | /* LWS_ROPS_write_role_protocol */ |
226 | | /* LWS_ROPS_encapsulation_parent */ 0x00, |
227 | | /* LWS_ROPS_alpn_negotiated */ |
228 | | /* LWS_ROPS_close_via_role_protocol */ 0x00, |
229 | | /* LWS_ROPS_close_role */ |
230 | | /* LWS_ROPS_close_kill_connection */ 0x00, |
231 | | /* LWS_ROPS_destroy_role */ |
232 | | /* LWS_ROPS_adoption_bind */ 0x00, |
233 | | /* LWS_ROPS_client_bind */ |
234 | | /* LWS_ROPS_issue_keepalive */ 0x00, |
235 | | }, |
236 | | |
237 | | /* adoption_cb clnt, srv */ { 0, 0 }, |
238 | | /* rx_cb clnt, srv */ { 0, 0 }, |
239 | | /* writeable cb clnt, srv */ { 0, 0 }, |
240 | | /* close cb clnt, srv */ { 0, 0 }, |
241 | | /* protocol_bind_cb c,s */ { 0, 0 }, |
242 | | /* protocol_unbind_cb c,s */ { 0, 0 }, |
243 | | #if defined(WIN32) |
244 | | /* file_handle (no, UDP) */ 0, |
245 | | #else |
246 | | /* file_handle */ 1, |
247 | | #endif |
248 | | }; |