/src/uWebSockets/src/HttpContext.h
Line | Count | Source |
1 | | /* |
2 | | * Authored by Alex Hultman, 2018-2020. |
3 | | * Intellectual property of third-party. |
4 | | |
5 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
6 | | * you may not use this file except in compliance with the License. |
7 | | * You may obtain a copy of the License at |
8 | | |
9 | | * http://www.apache.org/licenses/LICENSE-2.0 |
10 | | |
11 | | * Unless required by applicable law or agreed to in writing, software |
12 | | * distributed under the License is distributed on an "AS IS" BASIS, |
13 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
14 | | * See the License for the specific language governing permissions and |
15 | | * limitations under the License. |
16 | | */ |
17 | | |
18 | | #ifndef UWS_HTTPCONTEXT_H |
19 | | #define UWS_HTTPCONTEXT_H |
20 | | |
21 | | /* This class defines the main behavior of HTTP and emits various events */ |
22 | | |
23 | | #include "Loop.h" |
24 | | #include "HttpContextData.h" |
25 | | #include "HttpResponseData.h" |
26 | | #include "AsyncSocket.h" |
27 | | #include "WebSocketData.h" |
28 | | |
29 | | #include <string_view> |
30 | | #include <iostream> |
31 | | #include "MoveOnlyFunction.h" |
32 | | |
33 | | namespace uWS { |
34 | | template<bool> struct HttpResponse; |
35 | | |
36 | | template <bool SSL> |
37 | | struct HttpContext { |
38 | | template<bool> friend struct TemplatedApp; |
39 | | template<bool> friend struct HttpResponse; |
40 | | private: |
41 | | HttpContext() = delete; |
42 | | |
43 | | /* Maximum delay allowed until an HTTP connection is terminated due to outstanding request or rejected data (slow loris protection) */ |
44 | | static const int HTTP_IDLE_TIMEOUT_S = 10; |
45 | | |
46 | | /* Minimum allowed receive throughput per second (clients uploading less than 16kB/sec get dropped) */ |
47 | | static const int HTTP_RECEIVE_THROUGHPUT_BYTES = 16 * 1024; |
48 | | |
49 | | us_loop_t *getLoop() { |
50 | | return us_socket_context_loop(SSL, getSocketContext()); |
51 | | } |
52 | | |
53 | 629k | us_socket_context_t *getSocketContext() { |
54 | 629k | return (us_socket_context_t *) this; |
55 | 629k | } uWS::HttpContext<true>::getSocketContext() Line | Count | Source | 53 | 162k | us_socket_context_t *getSocketContext() { | 54 | 162k | return (us_socket_context_t *) this; | 55 | 162k | } |
uWS::HttpContext<false>::getSocketContext() Line | Count | Source | 53 | 466k | us_socket_context_t *getSocketContext() { | 54 | 466k | return (us_socket_context_t *) this; | 55 | 466k | } |
|
56 | | |
57 | 13.3M | static us_socket_context_t *getSocketContext(us_socket_t *s) { |
58 | 13.3M | return (us_socket_context_t *) us_socket_context(SSL, s); |
59 | 13.3M | } uWS::HttpContext<true>::getSocketContext(us_socket_t*) Line | Count | Source | 57 | 3.17M | static us_socket_context_t *getSocketContext(us_socket_t *s) { | 58 | 3.17M | return (us_socket_context_t *) us_socket_context(SSL, s); | 59 | 3.17M | } |
uWS::HttpContext<false>::getSocketContext(us_socket_t*) Line | Count | Source | 57 | 10.1M | static us_socket_context_t *getSocketContext(us_socket_t *s) { | 58 | 10.1M | return (us_socket_context_t *) us_socket_context(SSL, s); | 59 | 10.1M | } |
|
60 | | |
61 | 447k | HttpContextData<SSL> *getSocketContextData() { |
62 | 447k | return (HttpContextData<SSL> *) us_socket_context_ext(SSL, getSocketContext()); |
63 | 447k | } uWS::HttpContext<true>::getSocketContextData() Line | Count | Source | 61 | 116k | HttpContextData<SSL> *getSocketContextData() { | 62 | 116k | return (HttpContextData<SSL> *) us_socket_context_ext(SSL, getSocketContext()); | 63 | 116k | } |
uWS::HttpContext<false>::getSocketContextData() Line | Count | Source | 61 | 331k | HttpContextData<SSL> *getSocketContextData() { | 62 | 331k | return (HttpContextData<SSL> *) us_socket_context_ext(SSL, getSocketContext()); | 63 | 331k | } |
|
64 | | |
65 | 13.3M | static HttpContextData<SSL> *getSocketContextDataS(us_socket_t *s) { |
66 | 13.3M | return (HttpContextData<SSL> *) us_socket_context_ext(SSL, getSocketContext(s)); |
67 | 13.3M | } uWS::HttpContext<true>::getSocketContextDataS(us_socket_t*) Line | Count | Source | 65 | 3.17M | static HttpContextData<SSL> *getSocketContextDataS(us_socket_t *s) { | 66 | 3.17M | return (HttpContextData<SSL> *) us_socket_context_ext(SSL, getSocketContext(s)); | 67 | 3.17M | } |
uWS::HttpContext<false>::getSocketContextDataS(us_socket_t*) Line | Count | Source | 65 | 10.1M | static HttpContextData<SSL> *getSocketContextDataS(us_socket_t *s) { | 66 | 10.1M | return (HttpContextData<SSL> *) us_socket_context_ext(SSL, getSocketContext(s)); | 67 | 10.1M | } |
|
68 | | |
69 | | /* Init the HttpContext by registering libusockets event handlers */ |
70 | 22.7k | HttpContext<SSL> *init() { |
71 | | /* Handle socket connections */ |
72 | 5.17M | us_socket_context_on_open(SSL, getSocketContext(), [](us_socket_t *s, int /*is_client*/, char */*ip*/, int /*ip_length*/) { |
73 | | /* Any connected socket should timeout until it has a request */ |
74 | 5.17M | us_socket_timeout(SSL, s, HTTP_IDLE_TIMEOUT_S); |
75 | | |
76 | | /* Init socket ext */ |
77 | 5.17M | new (us_socket_ext(SSL, s)) HttpResponseData<SSL>; |
78 | | |
79 | | /* Call filter */ |
80 | 5.17M | HttpContextData<SSL> *httpContextData = getSocketContextDataS(s); |
81 | 5.17M | for (auto &f : httpContextData->filterHandlers) { |
82 | 0 | f((HttpResponse<SSL> *) s, 1); |
83 | 0 | } |
84 | | |
85 | 5.17M | return s; |
86 | 5.17M | }); uWS::HttpContext<true>::init()::{lambda(us_socket_t*, int, char*, int)#1}::operator()(us_socket_t*, int, char*, int) constLine | Count | Source | 72 | 1.11M | us_socket_context_on_open(SSL, getSocketContext(), [](us_socket_t *s, int /*is_client*/, char */*ip*/, int /*ip_length*/) { | 73 | | /* Any connected socket should timeout until it has a request */ | 74 | 1.11M | us_socket_timeout(SSL, s, HTTP_IDLE_TIMEOUT_S); | 75 | | | 76 | | /* Init socket ext */ | 77 | 1.11M | new (us_socket_ext(SSL, s)) HttpResponseData<SSL>; | 78 | | | 79 | | /* Call filter */ | 80 | 1.11M | HttpContextData<SSL> *httpContextData = getSocketContextDataS(s); | 81 | 1.11M | for (auto &f : httpContextData->filterHandlers) { | 82 | 0 | f((HttpResponse<SSL> *) s, 1); | 83 | 0 | } | 84 | | | 85 | 1.11M | return s; | 86 | 1.11M | }); |
uWS::HttpContext<false>::init()::{lambda(us_socket_t*, int, char*, int)#1}::operator()(us_socket_t*, int, char*, int) constLine | Count | Source | 72 | 4.05M | us_socket_context_on_open(SSL, getSocketContext(), [](us_socket_t *s, int /*is_client*/, char */*ip*/, int /*ip_length*/) { | 73 | | /* Any connected socket should timeout until it has a request */ | 74 | 4.05M | us_socket_timeout(SSL, s, HTTP_IDLE_TIMEOUT_S); | 75 | | | 76 | | /* Init socket ext */ | 77 | 4.05M | new (us_socket_ext(SSL, s)) HttpResponseData<SSL>; | 78 | | | 79 | | /* Call filter */ | 80 | 4.05M | HttpContextData<SSL> *httpContextData = getSocketContextDataS(s); | 81 | 4.05M | for (auto &f : httpContextData->filterHandlers) { | 82 | 0 | f((HttpResponse<SSL> *) s, 1); | 83 | 0 | } | 84 | | | 85 | 4.05M | return s; | 86 | 4.05M | }); |
|
87 | | |
88 | | /* Handle socket disconnections */ |
89 | 4.83M | us_socket_context_on_close(SSL, getSocketContext(), [](us_socket_t *s, int /*code*/, void */*reason*/) { |
90 | | /* Get socket ext */ |
91 | 4.83M | HttpResponseData<SSL> *httpResponseData = (HttpResponseData<SSL> *) us_socket_ext(SSL, s); |
92 | | |
93 | | /* Call filter */ |
94 | 4.83M | HttpContextData<SSL> *httpContextData = getSocketContextDataS(s); |
95 | 4.83M | for (auto &f : httpContextData->filterHandlers) { |
96 | 0 | f((HttpResponse<SSL> *) s, -1); |
97 | 0 | } |
98 | | |
99 | | /* Signal broken HTTP request only if we have a pending request */ |
100 | 4.83M | if (httpResponseData->onAborted) { |
101 | 5.69k | httpResponseData->onAborted(); |
102 | 5.69k | } |
103 | | |
104 | | /* Destruct socket ext */ |
105 | 4.83M | httpResponseData->~HttpResponseData<SSL>(); |
106 | | |
107 | 4.83M | return s; |
108 | 4.83M | }); uWS::HttpContext<true>::init()::{lambda(us_socket_t*, int, void*)#1}::operator()(us_socket_t*, int, void*) constLine | Count | Source | 89 | 1.01M | us_socket_context_on_close(SSL, getSocketContext(), [](us_socket_t *s, int /*code*/, void */*reason*/) { | 90 | | /* Get socket ext */ | 91 | 1.01M | HttpResponseData<SSL> *httpResponseData = (HttpResponseData<SSL> *) us_socket_ext(SSL, s); | 92 | | | 93 | | /* Call filter */ | 94 | 1.01M | HttpContextData<SSL> *httpContextData = getSocketContextDataS(s); | 95 | 1.01M | for (auto &f : httpContextData->filterHandlers) { | 96 | 0 | f((HttpResponse<SSL> *) s, -1); | 97 | 0 | } | 98 | | | 99 | | /* Signal broken HTTP request only if we have a pending request */ | 100 | 1.01M | if (httpResponseData->onAborted) { | 101 | 0 | httpResponseData->onAborted(); | 102 | 0 | } | 103 | | | 104 | | /* Destruct socket ext */ | 105 | 1.01M | httpResponseData->~HttpResponseData<SSL>(); | 106 | | | 107 | 1.01M | return s; | 108 | 1.01M | }); |
uWS::HttpContext<false>::init()::{lambda(us_socket_t*, int, void*)#1}::operator()(us_socket_t*, int, void*) constLine | Count | Source | 89 | 3.81M | us_socket_context_on_close(SSL, getSocketContext(), [](us_socket_t *s, int /*code*/, void */*reason*/) { | 90 | | /* Get socket ext */ | 91 | 3.81M | HttpResponseData<SSL> *httpResponseData = (HttpResponseData<SSL> *) us_socket_ext(SSL, s); | 92 | | | 93 | | /* Call filter */ | 94 | 3.81M | HttpContextData<SSL> *httpContextData = getSocketContextDataS(s); | 95 | 3.81M | for (auto &f : httpContextData->filterHandlers) { | 96 | 0 | f((HttpResponse<SSL> *) s, -1); | 97 | 0 | } | 98 | | | 99 | | /* Signal broken HTTP request only if we have a pending request */ | 100 | 3.81M | if (httpResponseData->onAborted) { | 101 | 5.69k | httpResponseData->onAborted(); | 102 | 5.69k | } | 103 | | | 104 | | /* Destruct socket ext */ | 105 | 3.81M | httpResponseData->~HttpResponseData<SSL>(); | 106 | | | 107 | 3.81M | return s; | 108 | 3.81M | }); |
|
109 | | |
110 | | /* Handle HTTP data streams */ |
111 | 3.32M | us_socket_context_on_data(SSL, getSocketContext(), [](us_socket_t *s, char *data, int length) { |
112 | | |
113 | | // total overhead is about 210k down to 180k |
114 | | // ~210k req/sec is the original perf with write in data |
115 | | // ~200k req/sec is with cork and formatting |
116 | | // ~190k req/sec is with http parsing |
117 | | // ~180k - 190k req/sec is with varying routing |
118 | | |
119 | 3.32M | HttpContextData<SSL> *httpContextData = getSocketContextDataS(s); |
120 | | |
121 | | /* Do not accept any data while in shutdown state */ |
122 | 3.32M | if (us_socket_is_shut_down(SSL, (us_socket_t *) s)) { |
123 | 0 | return s; |
124 | 0 | } |
125 | | |
126 | 3.32M | HttpResponseData<SSL> *httpResponseData = (HttpResponseData<SSL> *) us_socket_ext(SSL, s); |
127 | | |
128 | | /* Cork this socket */ |
129 | 3.32M | ((AsyncSocket<SSL> *) s)->cork(); |
130 | | |
131 | | /* Mark that we are inside the parser now */ |
132 | 3.32M | httpContextData->isParsingHttp = true; |
133 | | |
134 | | // clients need to know the cursor after http parse, not servers! |
135 | | // how far did we read then? we need to know to continue with websocket parsing data? or? |
136 | | |
137 | 3.32M | void *proxyParser = nullptr; |
138 | | #ifdef UWS_WITH_PROXY |
139 | | proxyParser = &httpResponseData->proxyParser; |
140 | | #endif |
141 | | |
142 | | /* The return value is entirely up to us to interpret. The HttpParser only care for whether the returned value is DIFFERENT or not from passed user */ |
143 | 3.32M | auto [err, returnedSocket] = httpResponseData->consumePostPadded(data, (unsigned int) length, s, proxyParser, [httpContextData](void *s, HttpRequest *httpRequest) -> void * { |
144 | | /* For every request we reset the timeout and hang until user makes action */ |
145 | | /* Warning: if we are in shutdown state, resetting the timer is a security issue! */ |
146 | 448k | us_socket_timeout(SSL, (us_socket_t *) s, 0); |
147 | | |
148 | | /* Reset httpResponse */ |
149 | 448k | HttpResponseData<SSL> *httpResponseData = (HttpResponseData<SSL> *) us_socket_ext(SSL, (us_socket_t *) s); |
150 | 448k | httpResponseData->offset = 0; |
151 | | |
152 | | /* Are we not ready for another request yet? Terminate the connection. |
153 | | * Important for denying async pipelining until, if ever, we want to suppot it. |
154 | | * Otherwise requests can get mixed up on the same connection. We still support sync pipelining. */ |
155 | 448k | if (httpResponseData->state & HttpResponseData<SSL>::HTTP_RESPONSE_PENDING) { |
156 | 1.17k | us_socket_close(SSL, (us_socket_t *) s, 0, nullptr); |
157 | 1.17k | return nullptr; |
158 | 1.17k | } |
159 | | |
160 | | /* Mark pending request and emit it */ |
161 | 447k | httpResponseData->state = HttpResponseData<SSL>::HTTP_RESPONSE_PENDING; |
162 | | |
163 | | /* Mark this response as connectionClose if ancient or connection: close */ |
164 | 447k | if (httpRequest->isAncient() || httpRequest->getHeader("connection").length() == 5) { |
165 | 3.89k | httpResponseData->state |= HttpResponseData<SSL>::HTTP_CONNECTION_CLOSE; |
166 | 3.89k | } |
167 | | |
168 | | /* Select the router based on SNI (only possible for SSL) */ |
169 | 447k | auto *selectedRouter = &httpContextData->router; |
170 | 447k | if constexpr (SSL) { |
171 | 116k | void *domainRouter = us_socket_server_name_userdata(SSL, (struct us_socket_t *) s); |
172 | 116k | if (domainRouter) { |
173 | 0 | selectedRouter = (decltype(selectedRouter)) domainRouter; |
174 | 0 | } |
175 | 116k | } |
176 | | |
177 | | /* Route the method and URL */ |
178 | 447k | selectedRouter->getUserData() = {(HttpResponse<SSL> *) s, httpRequest}; |
179 | 447k | if (!selectedRouter->route(httpRequest->getCaseSensitiveMethod(), httpRequest->getUrl())) { |
180 | | /* We have to force close this socket as we have no handler for it */ |
181 | 0 | us_socket_close(SSL, (us_socket_t *) s, 0, nullptr); |
182 | 0 | return nullptr; |
183 | 0 | } |
184 | | |
185 | | /* First of all we need to check if this socket was deleted due to upgrade */ |
186 | 447k | if (httpContextData->upgradedWebSocket) { |
187 | | /* We differ between closed and upgraded below */ |
188 | 338k | return nullptr; |
189 | 338k | } |
190 | | |
191 | | /* Was the socket closed? */ |
192 | 109k | if (us_socket_is_closed(SSL, (struct us_socket_t *) s)) { |
193 | 891 | return nullptr; |
194 | 891 | } |
195 | | |
196 | | /* We absolutely have to terminate parsing if shutdown */ |
197 | 108k | if (us_socket_is_shut_down(SSL, (us_socket_t *) s)) { |
198 | 0 | return nullptr; |
199 | 0 | } |
200 | | |
201 | | /* Returning from a request handler without responding or attaching an onAborted handler is ill-use */ |
202 | 108k | if (!((HttpResponse<SSL> *) s)->hasResponded() && !httpResponseData->onAborted) { |
203 | | /* Throw exception here? */ |
204 | 0 | std::cerr << "Error: Returning from a request handler without responding or attaching an abort handler is forbidden!" << std::endl; |
205 | 0 | std::terminate(); |
206 | 0 | } |
207 | | |
208 | | /* If we have not responded and we have a data handler, we need to timeout to enfore client sending the data */ |
209 | 108k | if (!((HttpResponse<SSL> *) s)->hasResponded() && httpResponseData->inStream) { |
210 | 3.05k | us_socket_timeout(SSL, (us_socket_t *) s, HTTP_IDLE_TIMEOUT_S); |
211 | 3.05k | } |
212 | | |
213 | | /* Continue parsing */ |
214 | 108k | return s; |
215 | | |
216 | 144k | }, [httpResponseData](void *user, std::string_view data, bool fin) -> void * {uWS::HttpContext<true>::init()::{lambda(us_socket_t*, char*, int)#1}::operator()(us_socket_t*, char*, int) const::{lambda(void*, uWS::HttpRequest*)#1}::operator()({lambda(us_socket_t*, char*, int)#1}, uWS::HttpRequest) constLine | Count | Source | 143 | 116k | auto [err, returnedSocket] = httpResponseData->consumePostPadded(data, (unsigned int) length, s, proxyParser, [httpContextData](void *s, HttpRequest *httpRequest) -> void * { | 144 | | /* For every request we reset the timeout and hang until user makes action */ | 145 | | /* Warning: if we are in shutdown state, resetting the timer is a security issue! */ | 146 | 116k | us_socket_timeout(SSL, (us_socket_t *) s, 0); | 147 | | | 148 | | /* Reset httpResponse */ | 149 | 116k | HttpResponseData<SSL> *httpResponseData = (HttpResponseData<SSL> *) us_socket_ext(SSL, (us_socket_t *) s); | 150 | 116k | httpResponseData->offset = 0; | 151 | | | 152 | | /* Are we not ready for another request yet? Terminate the connection. | 153 | | * Important for denying async pipelining until, if ever, we want to suppot it. | 154 | | * Otherwise requests can get mixed up on the same connection. We still support sync pipelining. */ | 155 | 116k | if (httpResponseData->state & HttpResponseData<SSL>::HTTP_RESPONSE_PENDING) { | 156 | 0 | us_socket_close(SSL, (us_socket_t *) s, 0, nullptr); | 157 | 0 | return nullptr; | 158 | 0 | } | 159 | | | 160 | | /* Mark pending request and emit it */ | 161 | 116k | httpResponseData->state = HttpResponseData<SSL>::HTTP_RESPONSE_PENDING; | 162 | | | 163 | | /* Mark this response as connectionClose if ancient or connection: close */ | 164 | 116k | if (httpRequest->isAncient() || httpRequest->getHeader("connection").length() == 5) { | 165 | 865 | httpResponseData->state |= HttpResponseData<SSL>::HTTP_CONNECTION_CLOSE; | 166 | 865 | } | 167 | | | 168 | | /* Select the router based on SNI (only possible for SSL) */ | 169 | 116k | auto *selectedRouter = &httpContextData->router; | 170 | 116k | if constexpr (SSL) { | 171 | 116k | void *domainRouter = us_socket_server_name_userdata(SSL, (struct us_socket_t *) s); | 172 | 116k | if (domainRouter) { | 173 | 0 | selectedRouter = (decltype(selectedRouter)) domainRouter; | 174 | 0 | } | 175 | 116k | } | 176 | | | 177 | | /* Route the method and URL */ | 178 | 116k | selectedRouter->getUserData() = {(HttpResponse<SSL> *) s, httpRequest}; | 179 | 116k | if (!selectedRouter->route(httpRequest->getCaseSensitiveMethod(), httpRequest->getUrl())) { | 180 | | /* We have to force close this socket as we have no handler for it */ | 181 | 0 | us_socket_close(SSL, (us_socket_t *) s, 0, nullptr); | 182 | 0 | return nullptr; | 183 | 0 | } | 184 | | | 185 | | /* First of all we need to check if this socket was deleted due to upgrade */ | 186 | 116k | if (httpContextData->upgradedWebSocket) { | 187 | | /* We differ between closed and upgraded below */ | 188 | 99.0k | return nullptr; | 189 | 99.0k | } | 190 | | | 191 | | /* Was the socket closed? */ | 192 | 17.0k | if (us_socket_is_closed(SSL, (struct us_socket_t *) s)) { | 193 | 0 | return nullptr; | 194 | 0 | } | 195 | | | 196 | | /* We absolutely have to terminate parsing if shutdown */ | 197 | 17.0k | if (us_socket_is_shut_down(SSL, (us_socket_t *) s)) { | 198 | 0 | return nullptr; | 199 | 0 | } | 200 | | | 201 | | /* Returning from a request handler without responding or attaching an onAborted handler is ill-use */ | 202 | 17.0k | if (!((HttpResponse<SSL> *) s)->hasResponded() && !httpResponseData->onAborted) { | 203 | | /* Throw exception here? */ | 204 | 0 | std::cerr << "Error: Returning from a request handler without responding or attaching an abort handler is forbidden!" << std::endl; | 205 | 0 | std::terminate(); | 206 | 0 | } | 207 | | | 208 | | /* If we have not responded and we have a data handler, we need to timeout to enfore client sending the data */ | 209 | 17.0k | if (!((HttpResponse<SSL> *) s)->hasResponded() && httpResponseData->inStream) { | 210 | 0 | us_socket_timeout(SSL, (us_socket_t *) s, HTTP_IDLE_TIMEOUT_S); | 211 | 0 | } | 212 | | | 213 | | /* Continue parsing */ | 214 | 17.0k | return s; | 215 | | | 216 | 17.0k | }, [httpResponseData](void *user, std::string_view data, bool fin) -> void * { |
uWS::HttpContext<false>::init()::{lambda(us_socket_t*, char*, int)#1}::operator()(us_socket_t*, char*, int) const::{lambda(void*, uWS::HttpRequest*)#1}::operator()({lambda(us_socket_t*, char*, int)#1}, uWS::HttpRequest) constLine | Count | Source | 143 | 332k | auto [err, returnedSocket] = httpResponseData->consumePostPadded(data, (unsigned int) length, s, proxyParser, [httpContextData](void *s, HttpRequest *httpRequest) -> void * { | 144 | | /* For every request we reset the timeout and hang until user makes action */ | 145 | | /* Warning: if we are in shutdown state, resetting the timer is a security issue! */ | 146 | 332k | us_socket_timeout(SSL, (us_socket_t *) s, 0); | 147 | | | 148 | | /* Reset httpResponse */ | 149 | 332k | HttpResponseData<SSL> *httpResponseData = (HttpResponseData<SSL> *) us_socket_ext(SSL, (us_socket_t *) s); | 150 | 332k | httpResponseData->offset = 0; | 151 | | | 152 | | /* Are we not ready for another request yet? Terminate the connection. | 153 | | * Important for denying async pipelining until, if ever, we want to suppot it. | 154 | | * Otherwise requests can get mixed up on the same connection. We still support sync pipelining. */ | 155 | 332k | if (httpResponseData->state & HttpResponseData<SSL>::HTTP_RESPONSE_PENDING) { | 156 | 1.17k | us_socket_close(SSL, (us_socket_t *) s, 0, nullptr); | 157 | 1.17k | return nullptr; | 158 | 1.17k | } | 159 | | | 160 | | /* Mark pending request and emit it */ | 161 | 331k | httpResponseData->state = HttpResponseData<SSL>::HTTP_RESPONSE_PENDING; | 162 | | | 163 | | /* Mark this response as connectionClose if ancient or connection: close */ | 164 | 331k | if (httpRequest->isAncient() || httpRequest->getHeader("connection").length() == 5) { | 165 | 3.02k | httpResponseData->state |= HttpResponseData<SSL>::HTTP_CONNECTION_CLOSE; | 166 | 3.02k | } | 167 | | | 168 | | /* Select the router based on SNI (only possible for SSL) */ | 169 | 331k | auto *selectedRouter = &httpContextData->router; | 170 | | if constexpr (SSL) { | 171 | | void *domainRouter = us_socket_server_name_userdata(SSL, (struct us_socket_t *) s); | 172 | | if (domainRouter) { | 173 | | selectedRouter = (decltype(selectedRouter)) domainRouter; | 174 | | } | 175 | | } | 176 | | | 177 | | /* Route the method and URL */ | 178 | 331k | selectedRouter->getUserData() = {(HttpResponse<SSL> *) s, httpRequest}; | 179 | 331k | if (!selectedRouter->route(httpRequest->getCaseSensitiveMethod(), httpRequest->getUrl())) { | 180 | | /* We have to force close this socket as we have no handler for it */ | 181 | 0 | us_socket_close(SSL, (us_socket_t *) s, 0, nullptr); | 182 | 0 | return nullptr; | 183 | 0 | } | 184 | | | 185 | | /* First of all we need to check if this socket was deleted due to upgrade */ | 186 | 331k | if (httpContextData->upgradedWebSocket) { | 187 | | /* We differ between closed and upgraded below */ | 188 | 239k | return nullptr; | 189 | 239k | } | 190 | | | 191 | | /* Was the socket closed? */ | 192 | 92.2k | if (us_socket_is_closed(SSL, (struct us_socket_t *) s)) { | 193 | 891 | return nullptr; | 194 | 891 | } | 195 | | | 196 | | /* We absolutely have to terminate parsing if shutdown */ | 197 | 91.3k | if (us_socket_is_shut_down(SSL, (us_socket_t *) s)) { | 198 | 0 | return nullptr; | 199 | 0 | } | 200 | | | 201 | | /* Returning from a request handler without responding or attaching an onAborted handler is ill-use */ | 202 | 91.3k | if (!((HttpResponse<SSL> *) s)->hasResponded() && !httpResponseData->onAborted) { | 203 | | /* Throw exception here? */ | 204 | 0 | std::cerr << "Error: Returning from a request handler without responding or attaching an abort handler is forbidden!" << std::endl; | 205 | 0 | std::terminate(); | 206 | 0 | } | 207 | | | 208 | | /* If we have not responded and we have a data handler, we need to timeout to enfore client sending the data */ | 209 | 91.3k | if (!((HttpResponse<SSL> *) s)->hasResponded() && httpResponseData->inStream) { | 210 | 3.05k | us_socket_timeout(SSL, (us_socket_t *) s, HTTP_IDLE_TIMEOUT_S); | 211 | 3.05k | } | 212 | | | 213 | | /* Continue parsing */ | 214 | 91.3k | return s; | 215 | | | 216 | 91.3k | }, [httpResponseData](void *user, std::string_view data, bool fin) -> void * { |
|
217 | | /* We always get an empty chunk even if there is no data */ |
218 | 144k | if (httpResponseData->inStream) { |
219 | | |
220 | | /* Todo: can this handle timeout for non-post as well? */ |
221 | 37.1k | if (fin) { |
222 | | /* If we just got the last chunk (or empty chunk), disable timeout */ |
223 | 1.26k | us_socket_timeout(SSL, (struct us_socket_t *) user, 0); |
224 | 35.8k | } else { |
225 | | /* We still have some more data coming in later, so reset timeout */ |
226 | | /* Only reset timeout if we got enough bytes (16kb/sec) since last time we reset here */ |
227 | 35.8k | httpResponseData->received_bytes_per_timeout += (unsigned int) data.length(); |
228 | 35.8k | if (httpResponseData->received_bytes_per_timeout >= HTTP_RECEIVE_THROUGHPUT_BYTES * HTTP_IDLE_TIMEOUT_S) { |
229 | 13 | us_socket_timeout(SSL, (struct us_socket_t *) user, HTTP_IDLE_TIMEOUT_S); |
230 | 13 | httpResponseData->received_bytes_per_timeout = 0; |
231 | 13 | } |
232 | 35.8k | } |
233 | | |
234 | | /* We might respond in the handler, so do not change timeout after this */ |
235 | 37.1k | httpResponseData->inStream(data, fin); |
236 | | |
237 | | /* Was the socket closed? */ |
238 | 37.1k | if (us_socket_is_closed(SSL, (struct us_socket_t *) user)) { |
239 | 0 | return nullptr; |
240 | 0 | } |
241 | | |
242 | | /* We absolutely have to terminate parsing if shutdown */ |
243 | 37.1k | if (us_socket_is_shut_down(SSL, (us_socket_t *) user)) { |
244 | 0 | return nullptr; |
245 | 0 | } |
246 | | |
247 | | /* If we were given the last data chunk, reset data handler to ensure following |
248 | | * requests on the same socket won't trigger any previously registered behavior */ |
249 | 37.1k | if (fin) { |
250 | 1.26k | httpResponseData->inStream = nullptr; |
251 | 1.26k | } |
252 | 37.1k | } |
253 | 144k | return user; |
254 | 144k | }); uWS::HttpContext<true>::init()::{lambda(us_socket_t*, char*, int)#1}::operator()(us_socket_t*, char*, int) const::{lambda(void*, std::__1::basic_string_view<char, void*::char_traits<char> >, bool)#1}::operator()({lambda(us_socket_t*, char*, int)#1}, void*::char_traits<char>, bool) constLine | Count | Source | 216 | 17.2k | }, [httpResponseData](void *user, std::string_view data, bool fin) -> void * { | 217 | | /* We always get an empty chunk even if there is no data */ | 218 | 17.2k | if (httpResponseData->inStream) { | 219 | | | 220 | | /* Todo: can this handle timeout for non-post as well? */ | 221 | 0 | if (fin) { | 222 | | /* If we just got the last chunk (or empty chunk), disable timeout */ | 223 | 0 | us_socket_timeout(SSL, (struct us_socket_t *) user, 0); | 224 | 0 | } else { | 225 | | /* We still have some more data coming in later, so reset timeout */ | 226 | | /* Only reset timeout if we got enough bytes (16kb/sec) since last time we reset here */ | 227 | 0 | httpResponseData->received_bytes_per_timeout += (unsigned int) data.length(); | 228 | 0 | if (httpResponseData->received_bytes_per_timeout >= HTTP_RECEIVE_THROUGHPUT_BYTES * HTTP_IDLE_TIMEOUT_S) { | 229 | 0 | us_socket_timeout(SSL, (struct us_socket_t *) user, HTTP_IDLE_TIMEOUT_S); | 230 | 0 | httpResponseData->received_bytes_per_timeout = 0; | 231 | 0 | } | 232 | 0 | } | 233 | | | 234 | | /* We might respond in the handler, so do not change timeout after this */ | 235 | 0 | httpResponseData->inStream(data, fin); | 236 | | | 237 | | /* Was the socket closed? */ | 238 | 0 | if (us_socket_is_closed(SSL, (struct us_socket_t *) user)) { | 239 | 0 | return nullptr; | 240 | 0 | } | 241 | | | 242 | | /* We absolutely have to terminate parsing if shutdown */ | 243 | 0 | if (us_socket_is_shut_down(SSL, (us_socket_t *) user)) { | 244 | 0 | return nullptr; | 245 | 0 | } | 246 | | | 247 | | /* If we were given the last data chunk, reset data handler to ensure following | 248 | | * requests on the same socket won't trigger any previously registered behavior */ | 249 | 0 | if (fin) { | 250 | 0 | httpResponseData->inStream = nullptr; | 251 | 0 | } | 252 | 0 | } | 253 | 17.2k | return user; | 254 | 17.2k | }); |
uWS::HttpContext<false>::init()::{lambda(us_socket_t*, char*, int)#1}::operator()(us_socket_t*, char*, int) const::{lambda(void*, std::__1::basic_string_view<char, void*::char_traits<char> >, bool)#1}::operator()({lambda(us_socket_t*, char*, int)#1}, void*::char_traits<char>, bool) constLine | Count | Source | 216 | 126k | }, [httpResponseData](void *user, std::string_view data, bool fin) -> void * { | 217 | | /* We always get an empty chunk even if there is no data */ | 218 | 126k | if (httpResponseData->inStream) { | 219 | | | 220 | | /* Todo: can this handle timeout for non-post as well? */ | 221 | 37.1k | if (fin) { | 222 | | /* If we just got the last chunk (or empty chunk), disable timeout */ | 223 | 1.26k | us_socket_timeout(SSL, (struct us_socket_t *) user, 0); | 224 | 35.8k | } else { | 225 | | /* We still have some more data coming in later, so reset timeout */ | 226 | | /* Only reset timeout if we got enough bytes (16kb/sec) since last time we reset here */ | 227 | 35.8k | httpResponseData->received_bytes_per_timeout += (unsigned int) data.length(); | 228 | 35.8k | if (httpResponseData->received_bytes_per_timeout >= HTTP_RECEIVE_THROUGHPUT_BYTES * HTTP_IDLE_TIMEOUT_S) { | 229 | 13 | us_socket_timeout(SSL, (struct us_socket_t *) user, HTTP_IDLE_TIMEOUT_S); | 230 | 13 | httpResponseData->received_bytes_per_timeout = 0; | 231 | 13 | } | 232 | 35.8k | } | 233 | | | 234 | | /* We might respond in the handler, so do not change timeout after this */ | 235 | 37.1k | httpResponseData->inStream(data, fin); | 236 | | | 237 | | /* Was the socket closed? */ | 238 | 37.1k | if (us_socket_is_closed(SSL, (struct us_socket_t *) user)) { | 239 | 0 | return nullptr; | 240 | 0 | } | 241 | | | 242 | | /* We absolutely have to terminate parsing if shutdown */ | 243 | 37.1k | if (us_socket_is_shut_down(SSL, (us_socket_t *) user)) { | 244 | 0 | return nullptr; | 245 | 0 | } | 246 | | | 247 | | /* If we were given the last data chunk, reset data handler to ensure following | 248 | | * requests on the same socket won't trigger any previously registered behavior */ | 249 | 37.1k | if (fin) { | 250 | 1.26k | httpResponseData->inStream = nullptr; | 251 | 1.26k | } | 252 | 37.1k | } | 253 | 126k | return user; | 254 | 126k | }); |
|
255 | | |
256 | | /* Mark that we are no longer parsing Http */ |
257 | 3.32M | httpContextData->isParsingHttp = false; |
258 | | |
259 | | /* If we got fullptr that means the parser wants us to close the socket from error (same as calling the errorHandler) */ |
260 | 3.32M | if (returnedSocket == FULLPTR) { |
261 | | /* For errors, we only deliver them "at most once". We don't care if they get halfways delivered or not. */ |
262 | 1.52M | us_socket_write(SSL, s, httpErrorResponses[err].data(), (int) httpErrorResponses[err].length(), false); |
263 | 1.52M | us_socket_shutdown(SSL, s); |
264 | | /* Close any socket on HTTP errors */ |
265 | 1.52M | us_socket_close(SSL, s, 0, nullptr); |
266 | | /* This just makes the following code act as if the socket was closed from error inside the parser. */ |
267 | 1.52M | returnedSocket = nullptr; |
268 | 1.52M | } |
269 | | |
270 | | /* We need to uncork in all cases, except for nullptr (closed socket, or upgraded socket) */ |
271 | 3.32M | if (returnedSocket != nullptr) { |
272 | | /* Timeout on uncork failure */ |
273 | 1.46M | auto [written, failed] = ((AsyncSocket<SSL> *) returnedSocket)->uncork(); |
274 | 1.46M | if (failed) { |
275 | | /* All Http sockets timeout by this, and this behavior match the one in HttpResponse::cork */ |
276 | | /* Warning: both HTTP_IDLE_TIMEOUT_S and HTTP_TIMEOUT_S are 10 seconds and both are used the same */ |
277 | 102k | ((AsyncSocket<SSL> *) s)->timeout(HTTP_IDLE_TIMEOUT_S); |
278 | 102k | } |
279 | | |
280 | | /* We need to check if we should close this socket here now */ |
281 | 1.46M | if (httpResponseData->state & HttpResponseData<SSL>::HTTP_CONNECTION_CLOSE) { |
282 | 5.28k | if ((httpResponseData->state & HttpResponseData<SSL>::HTTP_RESPONSE_PENDING) == 0) { |
283 | 4.08k | if (((AsyncSocket<SSL> *) s)->getBufferedAmount() == 0) { |
284 | 915 | ((AsyncSocket<SSL> *) s)->shutdown(); |
285 | | /* We need to force close after sending FIN since we want to hinder |
286 | | * clients from keeping to send their huge data */ |
287 | 915 | ((AsyncSocket<SSL> *) s)->close(); |
288 | 915 | } |
289 | 4.08k | } |
290 | 5.28k | } |
291 | | |
292 | 1.46M | return (us_socket_t *) returnedSocket; |
293 | 1.46M | } |
294 | | |
295 | | /* If we upgraded, check here (differ between nullptr close and nullptr upgrade) */ |
296 | 1.86M | if (httpContextData->upgradedWebSocket) { |
297 | | /* This path is only for upgraded websockets */ |
298 | 338k | AsyncSocket<SSL> *asyncSocket = (AsyncSocket<SSL> *) httpContextData->upgradedWebSocket; |
299 | | |
300 | | /* Uncork here as well (note: what if we failed to uncork and we then pub/sub before we even upgraded?) */ |
301 | 338k | auto [written, failed] = asyncSocket->uncork(); |
302 | | |
303 | | /* If we succeeded in uncorking, check if we have sent WebSocket FIN */ |
304 | 338k | if (!failed) { |
305 | 22.2k | WebSocketData *webSocketData = (WebSocketData *) asyncSocket->getAsyncSocketData(); |
306 | 22.2k | if (webSocketData->isShuttingDown) { |
307 | | /* In that case, also send TCP FIN (this is similar to what we have in ws drain handler) */ |
308 | 0 | asyncSocket->shutdown(); |
309 | 0 | } |
310 | 22.2k | } |
311 | | |
312 | | /* Reset upgradedWebSocket before we return */ |
313 | 338k | httpContextData->upgradedWebSocket = nullptr; |
314 | | |
315 | | /* Return the new upgraded websocket */ |
316 | 338k | return (us_socket_t *) asyncSocket; |
317 | 338k | } |
318 | | |
319 | | /* It is okay to uncork a closed socket and we need to */ |
320 | 1.52M | ((AsyncSocket<SSL> *) s)->uncork(); |
321 | | |
322 | | /* We cannot return nullptr to the underlying stack in any case */ |
323 | 1.52M | return s; |
324 | 1.86M | }); uWS::HttpContext<true>::init()::{lambda(us_socket_t*, char*, int)#1}::operator()(us_socket_t*, char*, int) constLine | Count | Source | 111 | 1.03M | us_socket_context_on_data(SSL, getSocketContext(), [](us_socket_t *s, char *data, int length) { | 112 | | | 113 | | // total overhead is about 210k down to 180k | 114 | | // ~210k req/sec is the original perf with write in data | 115 | | // ~200k req/sec is with cork and formatting | 116 | | // ~190k req/sec is with http parsing | 117 | | // ~180k - 190k req/sec is with varying routing | 118 | | | 119 | 1.03M | HttpContextData<SSL> *httpContextData = getSocketContextDataS(s); | 120 | | | 121 | | /* Do not accept any data while in shutdown state */ | 122 | 1.03M | if (us_socket_is_shut_down(SSL, (us_socket_t *) s)) { | 123 | 0 | return s; | 124 | 0 | } | 125 | | | 126 | 1.03M | HttpResponseData<SSL> *httpResponseData = (HttpResponseData<SSL> *) us_socket_ext(SSL, s); | 127 | | | 128 | | /* Cork this socket */ | 129 | 1.03M | ((AsyncSocket<SSL> *) s)->cork(); | 130 | | | 131 | | /* Mark that we are inside the parser now */ | 132 | 1.03M | httpContextData->isParsingHttp = true; | 133 | | | 134 | | // clients need to know the cursor after http parse, not servers! | 135 | | // how far did we read then? we need to know to continue with websocket parsing data? or? | 136 | | | 137 | 1.03M | void *proxyParser = nullptr; | 138 | | #ifdef UWS_WITH_PROXY | 139 | | proxyParser = &httpResponseData->proxyParser; | 140 | | #endif | 141 | | | 142 | | /* The return value is entirely up to us to interpret. The HttpParser only care for whether the returned value is DIFFERENT or not from passed user */ | 143 | 1.03M | auto [err, returnedSocket] = httpResponseData->consumePostPadded(data, (unsigned int) length, s, proxyParser, [httpContextData](void *s, HttpRequest *httpRequest) -> void * { | 144 | | /* For every request we reset the timeout and hang until user makes action */ | 145 | | /* Warning: if we are in shutdown state, resetting the timer is a security issue! */ | 146 | 1.03M | us_socket_timeout(SSL, (us_socket_t *) s, 0); | 147 | | | 148 | | /* Reset httpResponse */ | 149 | 1.03M | HttpResponseData<SSL> *httpResponseData = (HttpResponseData<SSL> *) us_socket_ext(SSL, (us_socket_t *) s); | 150 | 1.03M | httpResponseData->offset = 0; | 151 | | | 152 | | /* Are we not ready for another request yet? Terminate the connection. | 153 | | * Important for denying async pipelining until, if ever, we want to suppot it. | 154 | | * Otherwise requests can get mixed up on the same connection. We still support sync pipelining. */ | 155 | 1.03M | if (httpResponseData->state & HttpResponseData<SSL>::HTTP_RESPONSE_PENDING) { | 156 | 1.03M | us_socket_close(SSL, (us_socket_t *) s, 0, nullptr); | 157 | 1.03M | return nullptr; | 158 | 1.03M | } | 159 | | | 160 | | /* Mark pending request and emit it */ | 161 | 1.03M | httpResponseData->state = HttpResponseData<SSL>::HTTP_RESPONSE_PENDING; | 162 | | | 163 | | /* Mark this response as connectionClose if ancient or connection: close */ | 164 | 1.03M | if (httpRequest->isAncient() || httpRequest->getHeader("connection").length() == 5) { | 165 | 1.03M | httpResponseData->state |= HttpResponseData<SSL>::HTTP_CONNECTION_CLOSE; | 166 | 1.03M | } | 167 | | | 168 | | /* Select the router based on SNI (only possible for SSL) */ | 169 | 1.03M | auto *selectedRouter = &httpContextData->router; | 170 | 1.03M | if constexpr (SSL) { | 171 | 1.03M | void *domainRouter = us_socket_server_name_userdata(SSL, (struct us_socket_t *) s); | 172 | 1.03M | if (domainRouter) { | 173 | 1.03M | selectedRouter = (decltype(selectedRouter)) domainRouter; | 174 | 1.03M | } | 175 | 1.03M | } | 176 | | | 177 | | /* Route the method and URL */ | 178 | 1.03M | selectedRouter->getUserData() = {(HttpResponse<SSL> *) s, httpRequest}; | 179 | 1.03M | if (!selectedRouter->route(httpRequest->getCaseSensitiveMethod(), httpRequest->getUrl())) { | 180 | | /* We have to force close this socket as we have no handler for it */ | 181 | 1.03M | us_socket_close(SSL, (us_socket_t *) s, 0, nullptr); | 182 | 1.03M | return nullptr; | 183 | 1.03M | } | 184 | | | 185 | | /* First of all we need to check if this socket was deleted due to upgrade */ | 186 | 1.03M | if (httpContextData->upgradedWebSocket) { | 187 | | /* We differ between closed and upgraded below */ | 188 | 1.03M | return nullptr; | 189 | 1.03M | } | 190 | | | 191 | | /* Was the socket closed? */ | 192 | 1.03M | if (us_socket_is_closed(SSL, (struct us_socket_t *) s)) { | 193 | 1.03M | return nullptr; | 194 | 1.03M | } | 195 | | | 196 | | /* We absolutely have to terminate parsing if shutdown */ | 197 | 1.03M | if (us_socket_is_shut_down(SSL, (us_socket_t *) s)) { | 198 | 1.03M | return nullptr; | 199 | 1.03M | } | 200 | | | 201 | | /* Returning from a request handler without responding or attaching an onAborted handler is ill-use */ | 202 | 1.03M | if (!((HttpResponse<SSL> *) s)->hasResponded() && !httpResponseData->onAborted) { | 203 | | /* Throw exception here? */ | 204 | 1.03M | std::cerr << "Error: Returning from a request handler without responding or attaching an abort handler is forbidden!" << std::endl; | 205 | 1.03M | std::terminate(); | 206 | 1.03M | } | 207 | | | 208 | | /* If we have not responded and we have a data handler, we need to timeout to enfore client sending the data */ | 209 | 1.03M | if (!((HttpResponse<SSL> *) s)->hasResponded() && httpResponseData->inStream) { | 210 | 1.03M | us_socket_timeout(SSL, (us_socket_t *) s, HTTP_IDLE_TIMEOUT_S); | 211 | 1.03M | } | 212 | | | 213 | | /* Continue parsing */ | 214 | 1.03M | return s; | 215 | | | 216 | 1.03M | }, [httpResponseData](void *user, std::string_view data, bool fin) -> void * { | 217 | | /* We always get an empty chunk even if there is no data */ | 218 | 1.03M | if (httpResponseData->inStream) { | 219 | | | 220 | | /* Todo: can this handle timeout for non-post as well? */ | 221 | 1.03M | if (fin) { | 222 | | /* If we just got the last chunk (or empty chunk), disable timeout */ | 223 | 1.03M | us_socket_timeout(SSL, (struct us_socket_t *) user, 0); | 224 | 1.03M | } else { | 225 | | /* We still have some more data coming in later, so reset timeout */ | 226 | | /* Only reset timeout if we got enough bytes (16kb/sec) since last time we reset here */ | 227 | 1.03M | httpResponseData->received_bytes_per_timeout += (unsigned int) data.length(); | 228 | 1.03M | if (httpResponseData->received_bytes_per_timeout >= HTTP_RECEIVE_THROUGHPUT_BYTES * HTTP_IDLE_TIMEOUT_S) { | 229 | 1.03M | us_socket_timeout(SSL, (struct us_socket_t *) user, HTTP_IDLE_TIMEOUT_S); | 230 | 1.03M | httpResponseData->received_bytes_per_timeout = 0; | 231 | 1.03M | } | 232 | 1.03M | } | 233 | | | 234 | | /* We might respond in the handler, so do not change timeout after this */ | 235 | 1.03M | httpResponseData->inStream(data, fin); | 236 | | | 237 | | /* Was the socket closed? */ | 238 | 1.03M | if (us_socket_is_closed(SSL, (struct us_socket_t *) user)) { | 239 | 1.03M | return nullptr; | 240 | 1.03M | } | 241 | | | 242 | | /* We absolutely have to terminate parsing if shutdown */ | 243 | 1.03M | if (us_socket_is_shut_down(SSL, (us_socket_t *) user)) { | 244 | 1.03M | return nullptr; | 245 | 1.03M | } | 246 | | | 247 | | /* If we were given the last data chunk, reset data handler to ensure following | 248 | | * requests on the same socket won't trigger any previously registered behavior */ | 249 | 1.03M | if (fin) { | 250 | 1.03M | httpResponseData->inStream = nullptr; | 251 | 1.03M | } | 252 | 1.03M | } | 253 | 1.03M | return user; | 254 | 1.03M | }); | 255 | | | 256 | | /* Mark that we are no longer parsing Http */ | 257 | 1.03M | httpContextData->isParsingHttp = false; | 258 | | | 259 | | /* If we got fullptr that means the parser wants us to close the socket from error (same as calling the errorHandler) */ | 260 | 1.03M | if (returnedSocket == FULLPTR) { | 261 | | /* For errors, we only deliver them "at most once". We don't care if they get halfways delivered or not. */ | 262 | 464k | us_socket_write(SSL, s, httpErrorResponses[err].data(), (int) httpErrorResponses[err].length(), false); | 263 | 464k | us_socket_shutdown(SSL, s); | 264 | | /* Close any socket on HTTP errors */ | 265 | 464k | us_socket_close(SSL, s, 0, nullptr); | 266 | | /* This just makes the following code act as if the socket was closed from error inside the parser. */ | 267 | 464k | returnedSocket = nullptr; | 268 | 464k | } | 269 | | | 270 | | /* We need to uncork in all cases, except for nullptr (closed socket, or upgraded socket) */ | 271 | 1.03M | if (returnedSocket != nullptr) { | 272 | | /* Timeout on uncork failure */ | 273 | 468k | auto [written, failed] = ((AsyncSocket<SSL> *) returnedSocket)->uncork(); | 274 | 468k | if (failed) { | 275 | | /* All Http sockets timeout by this, and this behavior match the one in HttpResponse::cork */ | 276 | | /* Warning: both HTTP_IDLE_TIMEOUT_S and HTTP_TIMEOUT_S are 10 seconds and both are used the same */ | 277 | 14.4k | ((AsyncSocket<SSL> *) s)->timeout(HTTP_IDLE_TIMEOUT_S); | 278 | 14.4k | } | 279 | | | 280 | | /* We need to check if we should close this socket here now */ | 281 | 468k | if (httpResponseData->state & HttpResponseData<SSL>::HTTP_CONNECTION_CLOSE) { | 282 | 1.26k | if ((httpResponseData->state & HttpResponseData<SSL>::HTTP_RESPONSE_PENDING) == 0) { | 283 | 1.26k | if (((AsyncSocket<SSL> *) s)->getBufferedAmount() == 0) { | 284 | 194 | ((AsyncSocket<SSL> *) s)->shutdown(); | 285 | | /* We need to force close after sending FIN since we want to hinder | 286 | | * clients from keeping to send their huge data */ | 287 | 194 | ((AsyncSocket<SSL> *) s)->close(); | 288 | 194 | } | 289 | 1.26k | } | 290 | 1.26k | } | 291 | | | 292 | 468k | return (us_socket_t *) returnedSocket; | 293 | 468k | } | 294 | | | 295 | | /* If we upgraded, check here (differ between nullptr close and nullptr upgrade) */ | 296 | 563k | if (httpContextData->upgradedWebSocket) { | 297 | | /* This path is only for upgraded websockets */ | 298 | 99.0k | AsyncSocket<SSL> *asyncSocket = (AsyncSocket<SSL> *) httpContextData->upgradedWebSocket; | 299 | | | 300 | | /* Uncork here as well (note: what if we failed to uncork and we then pub/sub before we even upgraded?) */ | 301 | 99.0k | auto [written, failed] = asyncSocket->uncork(); | 302 | | | 303 | | /* If we succeeded in uncorking, check if we have sent WebSocket FIN */ | 304 | 99.0k | if (!failed) { | 305 | 969 | WebSocketData *webSocketData = (WebSocketData *) asyncSocket->getAsyncSocketData(); | 306 | 969 | if (webSocketData->isShuttingDown) { | 307 | | /* In that case, also send TCP FIN (this is similar to what we have in ws drain handler) */ | 308 | 0 | asyncSocket->shutdown(); | 309 | 0 | } | 310 | 969 | } | 311 | | | 312 | | /* Reset upgradedWebSocket before we return */ | 313 | 99.0k | httpContextData->upgradedWebSocket = nullptr; | 314 | | | 315 | | /* Return the new upgraded websocket */ | 316 | 99.0k | return (us_socket_t *) asyncSocket; | 317 | 99.0k | } | 318 | | | 319 | | /* It is okay to uncork a closed socket and we need to */ | 320 | 464k | ((AsyncSocket<SSL> *) s)->uncork(); | 321 | | | 322 | | /* We cannot return nullptr to the underlying stack in any case */ | 323 | 464k | return s; | 324 | 563k | }); |
uWS::HttpContext<false>::init()::{lambda(us_socket_t*, char*, int)#1}::operator()(us_socket_t*, char*, int) constLine | Count | Source | 111 | 2.29M | us_socket_context_on_data(SSL, getSocketContext(), [](us_socket_t *s, char *data, int length) { | 112 | | | 113 | | // total overhead is about 210k down to 180k | 114 | | // ~210k req/sec is the original perf with write in data | 115 | | // ~200k req/sec is with cork and formatting | 116 | | // ~190k req/sec is with http parsing | 117 | | // ~180k - 190k req/sec is with varying routing | 118 | | | 119 | 2.29M | HttpContextData<SSL> *httpContextData = getSocketContextDataS(s); | 120 | | | 121 | | /* Do not accept any data while in shutdown state */ | 122 | 2.29M | if (us_socket_is_shut_down(SSL, (us_socket_t *) s)) { | 123 | 0 | return s; | 124 | 0 | } | 125 | | | 126 | 2.29M | HttpResponseData<SSL> *httpResponseData = (HttpResponseData<SSL> *) us_socket_ext(SSL, s); | 127 | | | 128 | | /* Cork this socket */ | 129 | 2.29M | ((AsyncSocket<SSL> *) s)->cork(); | 130 | | | 131 | | /* Mark that we are inside the parser now */ | 132 | 2.29M | httpContextData->isParsingHttp = true; | 133 | | | 134 | | // clients need to know the cursor after http parse, not servers! | 135 | | // how far did we read then? we need to know to continue with websocket parsing data? or? | 136 | | | 137 | 2.29M | void *proxyParser = nullptr; | 138 | | #ifdef UWS_WITH_PROXY | 139 | | proxyParser = &httpResponseData->proxyParser; | 140 | | #endif | 141 | | | 142 | | /* The return value is entirely up to us to interpret. The HttpParser only care for whether the returned value is DIFFERENT or not from passed user */ | 143 | 2.29M | auto [err, returnedSocket] = httpResponseData->consumePostPadded(data, (unsigned int) length, s, proxyParser, [httpContextData](void *s, HttpRequest *httpRequest) -> void * { | 144 | | /* For every request we reset the timeout and hang until user makes action */ | 145 | | /* Warning: if we are in shutdown state, resetting the timer is a security issue! */ | 146 | 2.29M | us_socket_timeout(SSL, (us_socket_t *) s, 0); | 147 | | | 148 | | /* Reset httpResponse */ | 149 | 2.29M | HttpResponseData<SSL> *httpResponseData = (HttpResponseData<SSL> *) us_socket_ext(SSL, (us_socket_t *) s); | 150 | 2.29M | httpResponseData->offset = 0; | 151 | | | 152 | | /* Are we not ready for another request yet? Terminate the connection. | 153 | | * Important for denying async pipelining until, if ever, we want to suppot it. | 154 | | * Otherwise requests can get mixed up on the same connection. We still support sync pipelining. */ | 155 | 2.29M | if (httpResponseData->state & HttpResponseData<SSL>::HTTP_RESPONSE_PENDING) { | 156 | 2.29M | us_socket_close(SSL, (us_socket_t *) s, 0, nullptr); | 157 | 2.29M | return nullptr; | 158 | 2.29M | } | 159 | | | 160 | | /* Mark pending request and emit it */ | 161 | 2.29M | httpResponseData->state = HttpResponseData<SSL>::HTTP_RESPONSE_PENDING; | 162 | | | 163 | | /* Mark this response as connectionClose if ancient or connection: close */ | 164 | 2.29M | if (httpRequest->isAncient() || httpRequest->getHeader("connection").length() == 5) { | 165 | 2.29M | httpResponseData->state |= HttpResponseData<SSL>::HTTP_CONNECTION_CLOSE; | 166 | 2.29M | } | 167 | | | 168 | | /* Select the router based on SNI (only possible for SSL) */ | 169 | 2.29M | auto *selectedRouter = &httpContextData->router; | 170 | 2.29M | if constexpr (SSL) { | 171 | 2.29M | void *domainRouter = us_socket_server_name_userdata(SSL, (struct us_socket_t *) s); | 172 | 2.29M | if (domainRouter) { | 173 | 2.29M | selectedRouter = (decltype(selectedRouter)) domainRouter; | 174 | 2.29M | } | 175 | 2.29M | } | 176 | | | 177 | | /* Route the method and URL */ | 178 | 2.29M | selectedRouter->getUserData() = {(HttpResponse<SSL> *) s, httpRequest}; | 179 | 2.29M | if (!selectedRouter->route(httpRequest->getCaseSensitiveMethod(), httpRequest->getUrl())) { | 180 | | /* We have to force close this socket as we have no handler for it */ | 181 | 2.29M | us_socket_close(SSL, (us_socket_t *) s, 0, nullptr); | 182 | 2.29M | return nullptr; | 183 | 2.29M | } | 184 | | | 185 | | /* First of all we need to check if this socket was deleted due to upgrade */ | 186 | 2.29M | if (httpContextData->upgradedWebSocket) { | 187 | | /* We differ between closed and upgraded below */ | 188 | 2.29M | return nullptr; | 189 | 2.29M | } | 190 | | | 191 | | /* Was the socket closed? */ | 192 | 2.29M | if (us_socket_is_closed(SSL, (struct us_socket_t *) s)) { | 193 | 2.29M | return nullptr; | 194 | 2.29M | } | 195 | | | 196 | | /* We absolutely have to terminate parsing if shutdown */ | 197 | 2.29M | if (us_socket_is_shut_down(SSL, (us_socket_t *) s)) { | 198 | 2.29M | return nullptr; | 199 | 2.29M | } | 200 | | | 201 | | /* Returning from a request handler without responding or attaching an onAborted handler is ill-use */ | 202 | 2.29M | if (!((HttpResponse<SSL> *) s)->hasResponded() && !httpResponseData->onAborted) { | 203 | | /* Throw exception here? */ | 204 | 2.29M | std::cerr << "Error: Returning from a request handler without responding or attaching an abort handler is forbidden!" << std::endl; | 205 | 2.29M | std::terminate(); | 206 | 2.29M | } | 207 | | | 208 | | /* If we have not responded and we have a data handler, we need to timeout to enfore client sending the data */ | 209 | 2.29M | if (!((HttpResponse<SSL> *) s)->hasResponded() && httpResponseData->inStream) { | 210 | 2.29M | us_socket_timeout(SSL, (us_socket_t *) s, HTTP_IDLE_TIMEOUT_S); | 211 | 2.29M | } | 212 | | | 213 | | /* Continue parsing */ | 214 | 2.29M | return s; | 215 | | | 216 | 2.29M | }, [httpResponseData](void *user, std::string_view data, bool fin) -> void * { | 217 | | /* We always get an empty chunk even if there is no data */ | 218 | 2.29M | if (httpResponseData->inStream) { | 219 | | | 220 | | /* Todo: can this handle timeout for non-post as well? */ | 221 | 2.29M | if (fin) { | 222 | | /* If we just got the last chunk (or empty chunk), disable timeout */ | 223 | 2.29M | us_socket_timeout(SSL, (struct us_socket_t *) user, 0); | 224 | 2.29M | } else { | 225 | | /* We still have some more data coming in later, so reset timeout */ | 226 | | /* Only reset timeout if we got enough bytes (16kb/sec) since last time we reset here */ | 227 | 2.29M | httpResponseData->received_bytes_per_timeout += (unsigned int) data.length(); | 228 | 2.29M | if (httpResponseData->received_bytes_per_timeout >= HTTP_RECEIVE_THROUGHPUT_BYTES * HTTP_IDLE_TIMEOUT_S) { | 229 | 2.29M | us_socket_timeout(SSL, (struct us_socket_t *) user, HTTP_IDLE_TIMEOUT_S); | 230 | 2.29M | httpResponseData->received_bytes_per_timeout = 0; | 231 | 2.29M | } | 232 | 2.29M | } | 233 | | | 234 | | /* We might respond in the handler, so do not change timeout after this */ | 235 | 2.29M | httpResponseData->inStream(data, fin); | 236 | | | 237 | | /* Was the socket closed? */ | 238 | 2.29M | if (us_socket_is_closed(SSL, (struct us_socket_t *) user)) { | 239 | 2.29M | return nullptr; | 240 | 2.29M | } | 241 | | | 242 | | /* We absolutely have to terminate parsing if shutdown */ | 243 | 2.29M | if (us_socket_is_shut_down(SSL, (us_socket_t *) user)) { | 244 | 2.29M | return nullptr; | 245 | 2.29M | } | 246 | | | 247 | | /* If we were given the last data chunk, reset data handler to ensure following | 248 | | * requests on the same socket won't trigger any previously registered behavior */ | 249 | 2.29M | if (fin) { | 250 | 2.29M | httpResponseData->inStream = nullptr; | 251 | 2.29M | } | 252 | 2.29M | } | 253 | 2.29M | return user; | 254 | 2.29M | }); | 255 | | | 256 | | /* Mark that we are no longer parsing Http */ | 257 | 2.29M | httpContextData->isParsingHttp = false; | 258 | | | 259 | | /* If we got fullptr that means the parser wants us to close the socket from error (same as calling the errorHandler) */ | 260 | 2.29M | if (returnedSocket == FULLPTR) { | 261 | | /* For errors, we only deliver them "at most once". We don't care if they get halfways delivered or not. */ | 262 | 1.05M | us_socket_write(SSL, s, httpErrorResponses[err].data(), (int) httpErrorResponses[err].length(), false); | 263 | 1.05M | us_socket_shutdown(SSL, s); | 264 | | /* Close any socket on HTTP errors */ | 265 | 1.05M | us_socket_close(SSL, s, 0, nullptr); | 266 | | /* This just makes the following code act as if the socket was closed from error inside the parser. */ | 267 | 1.05M | returnedSocket = nullptr; | 268 | 1.05M | } | 269 | | | 270 | | /* We need to uncork in all cases, except for nullptr (closed socket, or upgraded socket) */ | 271 | 2.29M | if (returnedSocket != nullptr) { | 272 | | /* Timeout on uncork failure */ | 273 | 998k | auto [written, failed] = ((AsyncSocket<SSL> *) returnedSocket)->uncork(); | 274 | 998k | if (failed) { | 275 | | /* All Http sockets timeout by this, and this behavior match the one in HttpResponse::cork */ | 276 | | /* Warning: both HTTP_IDLE_TIMEOUT_S and HTTP_TIMEOUT_S are 10 seconds and both are used the same */ | 277 | 87.8k | ((AsyncSocket<SSL> *) s)->timeout(HTTP_IDLE_TIMEOUT_S); | 278 | 87.8k | } | 279 | | | 280 | | /* We need to check if we should close this socket here now */ | 281 | 998k | if (httpResponseData->state & HttpResponseData<SSL>::HTTP_CONNECTION_CLOSE) { | 282 | 4.01k | if ((httpResponseData->state & HttpResponseData<SSL>::HTTP_RESPONSE_PENDING) == 0) { | 283 | 2.82k | if (((AsyncSocket<SSL> *) s)->getBufferedAmount() == 0) { | 284 | 721 | ((AsyncSocket<SSL> *) s)->shutdown(); | 285 | | /* We need to force close after sending FIN since we want to hinder | 286 | | * clients from keeping to send their huge data */ | 287 | 721 | ((AsyncSocket<SSL> *) s)->close(); | 288 | 721 | } | 289 | 2.82k | } | 290 | 4.01k | } | 291 | | | 292 | 998k | return (us_socket_t *) returnedSocket; | 293 | 998k | } | 294 | | | 295 | | /* If we upgraded, check here (differ between nullptr close and nullptr upgrade) */ | 296 | 1.29M | if (httpContextData->upgradedWebSocket) { | 297 | | /* This path is only for upgraded websockets */ | 298 | 239k | AsyncSocket<SSL> *asyncSocket = (AsyncSocket<SSL> *) httpContextData->upgradedWebSocket; | 299 | | | 300 | | /* Uncork here as well (note: what if we failed to uncork and we then pub/sub before we even upgraded?) */ | 301 | 239k | auto [written, failed] = asyncSocket->uncork(); | 302 | | | 303 | | /* If we succeeded in uncorking, check if we have sent WebSocket FIN */ | 304 | 239k | if (!failed) { | 305 | 21.2k | WebSocketData *webSocketData = (WebSocketData *) asyncSocket->getAsyncSocketData(); | 306 | 21.2k | if (webSocketData->isShuttingDown) { | 307 | | /* In that case, also send TCP FIN (this is similar to what we have in ws drain handler) */ | 308 | 0 | asyncSocket->shutdown(); | 309 | 0 | } | 310 | 21.2k | } | 311 | | | 312 | | /* Reset upgradedWebSocket before we return */ | 313 | 239k | httpContextData->upgradedWebSocket = nullptr; | 314 | | | 315 | | /* Return the new upgraded websocket */ | 316 | 239k | return (us_socket_t *) asyncSocket; | 317 | 239k | } | 318 | | | 319 | | /* It is okay to uncork a closed socket and we need to */ | 320 | 1.05M | ((AsyncSocket<SSL> *) s)->uncork(); | 321 | | | 322 | | /* We cannot return nullptr to the underlying stack in any case */ | 323 | 1.05M | return s; | 324 | 1.29M | }); |
|
325 | | |
326 | | /* Handle HTTP write out (note: SSL_read may trigger this spuriously, the app need to handle spurious calls) */ |
327 | 22.9k | us_socket_context_on_writable(SSL, getSocketContext(), [](us_socket_t *s) { |
328 | | |
329 | 22.9k | AsyncSocket<SSL> *asyncSocket = (AsyncSocket<SSL> *) s; |
330 | 22.9k | HttpResponseData<SSL> *httpResponseData = (HttpResponseData<SSL> *) asyncSocket->getAsyncSocketData(); |
331 | | |
332 | | /* Ask the developer to write data and return success (true) or failure (false), OR skip sending anything and return success (true). */ |
333 | 22.9k | if (httpResponseData->onWritable) { |
334 | | /* We are now writable, so hang timeout again, the user does not have to do anything so we should hang until end or tryEnd rearms timeout */ |
335 | 0 | us_socket_timeout(SSL, s, 0); |
336 | | |
337 | | /* We expect the developer to return whether or not write was successful (true). |
338 | | * If write was never called, the developer should still return true so that we may drain. */ |
339 | 0 | bool success = httpResponseData->callOnWritable(httpResponseData->offset); |
340 | | |
341 | | /* The developer indicated that their onWritable failed. */ |
342 | 0 | if (!success) { |
343 | | /* Skip testing if we can drain anything since that might perform an extra syscall */ |
344 | 0 | return s; |
345 | 0 | } |
346 | | |
347 | | /* We don't want to fall through since we don't want to mess with timeout. |
348 | | * It makes little sense to drain any backpressure when the user has registered onWritable. */ |
349 | 0 | return s; |
350 | 0 | } |
351 | | |
352 | | /* Drain any socket buffer, this might empty our backpressure and thus finish the request */ |
353 | 22.9k | /*auto [written, failed] = */asyncSocket->write(nullptr, 0, true, 0); |
354 | | |
355 | | /* Should we close this connection after a response - and is this response really done? */ |
356 | 22.9k | if (httpResponseData->state & HttpResponseData<SSL>::HTTP_CONNECTION_CLOSE) { |
357 | 3.98k | if ((httpResponseData->state & HttpResponseData<SSL>::HTTP_RESPONSE_PENDING) == 0) { |
358 | 3.58k | if (asyncSocket->getBufferedAmount() == 0) { |
359 | 1.53k | asyncSocket->shutdown(); |
360 | | /* We need to force close after sending FIN since we want to hinder |
361 | | * clients from keeping to send their huge data */ |
362 | 1.53k | asyncSocket->close(); |
363 | 1.53k | } |
364 | 3.58k | } |
365 | 3.98k | } |
366 | | |
367 | | /* Expect another writable event, or another request within the timeout */ |
368 | 22.9k | asyncSocket->timeout(HTTP_IDLE_TIMEOUT_S); |
369 | | |
370 | 22.9k | return s; |
371 | 22.9k | }); uWS::HttpContext<true>::init()::{lambda(us_socket_t*)#1}::operator()(us_socket_t*) constLine | Count | Source | 327 | 3.29k | us_socket_context_on_writable(SSL, getSocketContext(), [](us_socket_t *s) { | 328 | | | 329 | 3.29k | AsyncSocket<SSL> *asyncSocket = (AsyncSocket<SSL> *) s; | 330 | 3.29k | HttpResponseData<SSL> *httpResponseData = (HttpResponseData<SSL> *) asyncSocket->getAsyncSocketData(); | 331 | | | 332 | | /* Ask the developer to write data and return success (true) or failure (false), OR skip sending anything and return success (true). */ | 333 | 3.29k | if (httpResponseData->onWritable) { | 334 | | /* We are now writable, so hang timeout again, the user does not have to do anything so we should hang until end or tryEnd rearms timeout */ | 335 | 0 | us_socket_timeout(SSL, s, 0); | 336 | | | 337 | | /* We expect the developer to return whether or not write was successful (true). | 338 | | * If write was never called, the developer should still return true so that we may drain. */ | 339 | 0 | bool success = httpResponseData->callOnWritable(httpResponseData->offset); | 340 | | | 341 | | /* The developer indicated that their onWritable failed. */ | 342 | 0 | if (!success) { | 343 | | /* Skip testing if we can drain anything since that might perform an extra syscall */ | 344 | 0 | return s; | 345 | 0 | } | 346 | | | 347 | | /* We don't want to fall through since we don't want to mess with timeout. | 348 | | * It makes little sense to drain any backpressure when the user has registered onWritable. */ | 349 | 0 | return s; | 350 | 0 | } | 351 | | | 352 | | /* Drain any socket buffer, this might empty our backpressure and thus finish the request */ | 353 | 3.29k | /*auto [written, failed] = */asyncSocket->write(nullptr, 0, true, 0); | 354 | | | 355 | | /* Should we close this connection after a response - and is this response really done? */ | 356 | 3.29k | if (httpResponseData->state & HttpResponseData<SSL>::HTTP_CONNECTION_CLOSE) { | 357 | 1.02k | if ((httpResponseData->state & HttpResponseData<SSL>::HTTP_RESPONSE_PENDING) == 0) { | 358 | 1.02k | if (asyncSocket->getBufferedAmount() == 0) { | 359 | 605 | asyncSocket->shutdown(); | 360 | | /* We need to force close after sending FIN since we want to hinder | 361 | | * clients from keeping to send their huge data */ | 362 | 605 | asyncSocket->close(); | 363 | 605 | } | 364 | 1.02k | } | 365 | 1.02k | } | 366 | | | 367 | | /* Expect another writable event, or another request within the timeout */ | 368 | 3.29k | asyncSocket->timeout(HTTP_IDLE_TIMEOUT_S); | 369 | | | 370 | 3.29k | return s; | 371 | 3.29k | }); |
uWS::HttpContext<false>::init()::{lambda(us_socket_t*)#1}::operator()(us_socket_t*) constLine | Count | Source | 327 | 19.6k | us_socket_context_on_writable(SSL, getSocketContext(), [](us_socket_t *s) { | 328 | | | 329 | 19.6k | AsyncSocket<SSL> *asyncSocket = (AsyncSocket<SSL> *) s; | 330 | 19.6k | HttpResponseData<SSL> *httpResponseData = (HttpResponseData<SSL> *) asyncSocket->getAsyncSocketData(); | 331 | | | 332 | | /* Ask the developer to write data and return success (true) or failure (false), OR skip sending anything and return success (true). */ | 333 | 19.6k | if (httpResponseData->onWritable) { | 334 | | /* We are now writable, so hang timeout again, the user does not have to do anything so we should hang until end or tryEnd rearms timeout */ | 335 | 0 | us_socket_timeout(SSL, s, 0); | 336 | | | 337 | | /* We expect the developer to return whether or not write was successful (true). | 338 | | * If write was never called, the developer should still return true so that we may drain. */ | 339 | 0 | bool success = httpResponseData->callOnWritable(httpResponseData->offset); | 340 | | | 341 | | /* The developer indicated that their onWritable failed. */ | 342 | 0 | if (!success) { | 343 | | /* Skip testing if we can drain anything since that might perform an extra syscall */ | 344 | 0 | return s; | 345 | 0 | } | 346 | | | 347 | | /* We don't want to fall through since we don't want to mess with timeout. | 348 | | * It makes little sense to drain any backpressure when the user has registered onWritable. */ | 349 | 0 | return s; | 350 | 0 | } | 351 | | | 352 | | /* Drain any socket buffer, this might empty our backpressure and thus finish the request */ | 353 | 19.6k | /*auto [written, failed] = */asyncSocket->write(nullptr, 0, true, 0); | 354 | | | 355 | | /* Should we close this connection after a response - and is this response really done? */ | 356 | 19.6k | if (httpResponseData->state & HttpResponseData<SSL>::HTTP_CONNECTION_CLOSE) { | 357 | 2.96k | if ((httpResponseData->state & HttpResponseData<SSL>::HTTP_RESPONSE_PENDING) == 0) { | 358 | 2.56k | if (asyncSocket->getBufferedAmount() == 0) { | 359 | 927 | asyncSocket->shutdown(); | 360 | | /* We need to force close after sending FIN since we want to hinder | 361 | | * clients from keeping to send their huge data */ | 362 | 927 | asyncSocket->close(); | 363 | 927 | } | 364 | 2.56k | } | 365 | 2.96k | } | 366 | | | 367 | | /* Expect another writable event, or another request within the timeout */ | 368 | 19.6k | asyncSocket->timeout(HTTP_IDLE_TIMEOUT_S); | 369 | | | 370 | 19.6k | return s; | 371 | 19.6k | }); |
|
372 | | |
373 | | /* Handle FIN, HTTP does not support half-closed sockets, so simply close */ |
374 | 158k | us_socket_context_on_end(SSL, getSocketContext(), [](us_socket_t *s) { |
375 | | |
376 | | /* We do not care for half closed sockets */ |
377 | 158k | AsyncSocket<SSL> *asyncSocket = (AsyncSocket<SSL> *) s; |
378 | 158k | return asyncSocket->close(); |
379 | | |
380 | 158k | }); uWS::HttpContext<true>::init()::{lambda(us_socket_t*)#2}::operator()(us_socket_t*) constLine | Count | Source | 374 | 63.2k | us_socket_context_on_end(SSL, getSocketContext(), [](us_socket_t *s) { | 375 | | | 376 | | /* We do not care for half closed sockets */ | 377 | 63.2k | AsyncSocket<SSL> *asyncSocket = (AsyncSocket<SSL> *) s; | 378 | 63.2k | return asyncSocket->close(); | 379 | | | 380 | 63.2k | }); |
uWS::HttpContext<false>::init()::{lambda(us_socket_t*)#2}::operator()(us_socket_t*) constLine | Count | Source | 374 | 94.8k | us_socket_context_on_end(SSL, getSocketContext(), [](us_socket_t *s) { | 375 | | | 376 | | /* We do not care for half closed sockets */ | 377 | 94.8k | AsyncSocket<SSL> *asyncSocket = (AsyncSocket<SSL> *) s; | 378 | 94.8k | return asyncSocket->close(); | 379 | | | 380 | 94.8k | }); |
|
381 | | |
382 | | /* Handle socket timeouts, simply close them so to not confuse client with FIN */ |
383 | 35.0k | us_socket_context_on_timeout(SSL, getSocketContext(), [](us_socket_t *s) { |
384 | | |
385 | | /* Force close rather than gracefully shutdown and risk confusing the client with a complete download */ |
386 | 35.0k | AsyncSocket<SSL> *asyncSocket = (AsyncSocket<SSL> *) s; |
387 | 35.0k | return asyncSocket->close(); |
388 | | |
389 | 35.0k | }); uWS::HttpContext<true>::init()::{lambda(us_socket_t*)#3}::operator()(us_socket_t*) constLine | Count | Source | 383 | 9.40k | us_socket_context_on_timeout(SSL, getSocketContext(), [](us_socket_t *s) { | 384 | | | 385 | | /* Force close rather than gracefully shutdown and risk confusing the client with a complete download */ | 386 | 9.40k | AsyncSocket<SSL> *asyncSocket = (AsyncSocket<SSL> *) s; | 387 | 9.40k | return asyncSocket->close(); | 388 | | | 389 | 9.40k | }); |
uWS::HttpContext<false>::init()::{lambda(us_socket_t*)#3}::operator()(us_socket_t*) constLine | Count | Source | 383 | 25.6k | us_socket_context_on_timeout(SSL, getSocketContext(), [](us_socket_t *s) { | 384 | | | 385 | | /* Force close rather than gracefully shutdown and risk confusing the client with a complete download */ | 386 | 25.6k | AsyncSocket<SSL> *asyncSocket = (AsyncSocket<SSL> *) s; | 387 | 25.6k | return asyncSocket->close(); | 388 | | | 389 | 25.6k | }); |
|
390 | | |
391 | 22.7k | return this; |
392 | 22.7k | } uWS::HttpContext<true>::init() Line | Count | Source | 70 | 5.74k | HttpContext<SSL> *init() { | 71 | | /* Handle socket connections */ | 72 | 5.74k | us_socket_context_on_open(SSL, getSocketContext(), [](us_socket_t *s, int /*is_client*/, char */*ip*/, int /*ip_length*/) { | 73 | | /* Any connected socket should timeout until it has a request */ | 74 | 5.74k | us_socket_timeout(SSL, s, HTTP_IDLE_TIMEOUT_S); | 75 | | | 76 | | /* Init socket ext */ | 77 | 5.74k | new (us_socket_ext(SSL, s)) HttpResponseData<SSL>; | 78 | | | 79 | | /* Call filter */ | 80 | 5.74k | HttpContextData<SSL> *httpContextData = getSocketContextDataS(s); | 81 | 5.74k | for (auto &f : httpContextData->filterHandlers) { | 82 | 5.74k | f((HttpResponse<SSL> *) s, 1); | 83 | 5.74k | } | 84 | | | 85 | 5.74k | return s; | 86 | 5.74k | }); | 87 | | | 88 | | /* Handle socket disconnections */ | 89 | 5.74k | us_socket_context_on_close(SSL, getSocketContext(), [](us_socket_t *s, int /*code*/, void */*reason*/) { | 90 | | /* Get socket ext */ | 91 | 5.74k | HttpResponseData<SSL> *httpResponseData = (HttpResponseData<SSL> *) us_socket_ext(SSL, s); | 92 | | | 93 | | /* Call filter */ | 94 | 5.74k | HttpContextData<SSL> *httpContextData = getSocketContextDataS(s); | 95 | 5.74k | for (auto &f : httpContextData->filterHandlers) { | 96 | 5.74k | f((HttpResponse<SSL> *) s, -1); | 97 | 5.74k | } | 98 | | | 99 | | /* Signal broken HTTP request only if we have a pending request */ | 100 | 5.74k | if (httpResponseData->onAborted) { | 101 | 5.74k | httpResponseData->onAborted(); | 102 | 5.74k | } | 103 | | | 104 | | /* Destruct socket ext */ | 105 | 5.74k | httpResponseData->~HttpResponseData<SSL>(); | 106 | | | 107 | 5.74k | return s; | 108 | 5.74k | }); | 109 | | | 110 | | /* Handle HTTP data streams */ | 111 | 5.74k | us_socket_context_on_data(SSL, getSocketContext(), [](us_socket_t *s, char *data, int length) { | 112 | | | 113 | | // total overhead is about 210k down to 180k | 114 | | // ~210k req/sec is the original perf with write in data | 115 | | // ~200k req/sec is with cork and formatting | 116 | | // ~190k req/sec is with http parsing | 117 | | // ~180k - 190k req/sec is with varying routing | 118 | | | 119 | 5.74k | HttpContextData<SSL> *httpContextData = getSocketContextDataS(s); | 120 | | | 121 | | /* Do not accept any data while in shutdown state */ | 122 | 5.74k | if (us_socket_is_shut_down(SSL, (us_socket_t *) s)) { | 123 | 5.74k | return s; | 124 | 5.74k | } | 125 | | | 126 | 5.74k | HttpResponseData<SSL> *httpResponseData = (HttpResponseData<SSL> *) us_socket_ext(SSL, s); | 127 | | | 128 | | /* Cork this socket */ | 129 | 5.74k | ((AsyncSocket<SSL> *) s)->cork(); | 130 | | | 131 | | /* Mark that we are inside the parser now */ | 132 | 5.74k | httpContextData->isParsingHttp = true; | 133 | | | 134 | | // clients need to know the cursor after http parse, not servers! | 135 | | // how far did we read then? we need to know to continue with websocket parsing data? or? | 136 | | | 137 | 5.74k | void *proxyParser = nullptr; | 138 | | #ifdef UWS_WITH_PROXY | 139 | | proxyParser = &httpResponseData->proxyParser; | 140 | | #endif | 141 | | | 142 | | /* The return value is entirely up to us to interpret. The HttpParser only care for whether the returned value is DIFFERENT or not from passed user */ | 143 | 5.74k | auto [err, returnedSocket] = httpResponseData->consumePostPadded(data, (unsigned int) length, s, proxyParser, [httpContextData](void *s, HttpRequest *httpRequest) -> void * { | 144 | | /* For every request we reset the timeout and hang until user makes action */ | 145 | | /* Warning: if we are in shutdown state, resetting the timer is a security issue! */ | 146 | 5.74k | us_socket_timeout(SSL, (us_socket_t *) s, 0); | 147 | | | 148 | | /* Reset httpResponse */ | 149 | 5.74k | HttpResponseData<SSL> *httpResponseData = (HttpResponseData<SSL> *) us_socket_ext(SSL, (us_socket_t *) s); | 150 | 5.74k | httpResponseData->offset = 0; | 151 | | | 152 | | /* Are we not ready for another request yet? Terminate the connection. | 153 | | * Important for denying async pipelining until, if ever, we want to suppot it. | 154 | | * Otherwise requests can get mixed up on the same connection. We still support sync pipelining. */ | 155 | 5.74k | if (httpResponseData->state & HttpResponseData<SSL>::HTTP_RESPONSE_PENDING) { | 156 | 5.74k | us_socket_close(SSL, (us_socket_t *) s, 0, nullptr); | 157 | 5.74k | return nullptr; | 158 | 5.74k | } | 159 | | | 160 | | /* Mark pending request and emit it */ | 161 | 5.74k | httpResponseData->state = HttpResponseData<SSL>::HTTP_RESPONSE_PENDING; | 162 | | | 163 | | /* Mark this response as connectionClose if ancient or connection: close */ | 164 | 5.74k | if (httpRequest->isAncient() || httpRequest->getHeader("connection").length() == 5) { | 165 | 5.74k | httpResponseData->state |= HttpResponseData<SSL>::HTTP_CONNECTION_CLOSE; | 166 | 5.74k | } | 167 | | | 168 | | /* Select the router based on SNI (only possible for SSL) */ | 169 | 5.74k | auto *selectedRouter = &httpContextData->router; | 170 | 5.74k | if constexpr (SSL) { | 171 | 5.74k | void *domainRouter = us_socket_server_name_userdata(SSL, (struct us_socket_t *) s); | 172 | 5.74k | if (domainRouter) { | 173 | 5.74k | selectedRouter = (decltype(selectedRouter)) domainRouter; | 174 | 5.74k | } | 175 | 5.74k | } | 176 | | | 177 | | /* Route the method and URL */ | 178 | 5.74k | selectedRouter->getUserData() = {(HttpResponse<SSL> *) s, httpRequest}; | 179 | 5.74k | if (!selectedRouter->route(httpRequest->getCaseSensitiveMethod(), httpRequest->getUrl())) { | 180 | | /* We have to force close this socket as we have no handler for it */ | 181 | 5.74k | us_socket_close(SSL, (us_socket_t *) s, 0, nullptr); | 182 | 5.74k | return nullptr; | 183 | 5.74k | } | 184 | | | 185 | | /* First of all we need to check if this socket was deleted due to upgrade */ | 186 | 5.74k | if (httpContextData->upgradedWebSocket) { | 187 | | /* We differ between closed and upgraded below */ | 188 | 5.74k | return nullptr; | 189 | 5.74k | } | 190 | | | 191 | | /* Was the socket closed? */ | 192 | 5.74k | if (us_socket_is_closed(SSL, (struct us_socket_t *) s)) { | 193 | 5.74k | return nullptr; | 194 | 5.74k | } | 195 | | | 196 | | /* We absolutely have to terminate parsing if shutdown */ | 197 | 5.74k | if (us_socket_is_shut_down(SSL, (us_socket_t *) s)) { | 198 | 5.74k | return nullptr; | 199 | 5.74k | } | 200 | | | 201 | | /* Returning from a request handler without responding or attaching an onAborted handler is ill-use */ | 202 | 5.74k | if (!((HttpResponse<SSL> *) s)->hasResponded() && !httpResponseData->onAborted) { | 203 | | /* Throw exception here? */ | 204 | 5.74k | std::cerr << "Error: Returning from a request handler without responding or attaching an abort handler is forbidden!" << std::endl; | 205 | 5.74k | std::terminate(); | 206 | 5.74k | } | 207 | | | 208 | | /* If we have not responded and we have a data handler, we need to timeout to enfore client sending the data */ | 209 | 5.74k | if (!((HttpResponse<SSL> *) s)->hasResponded() && httpResponseData->inStream) { | 210 | 5.74k | us_socket_timeout(SSL, (us_socket_t *) s, HTTP_IDLE_TIMEOUT_S); | 211 | 5.74k | } | 212 | | | 213 | | /* Continue parsing */ | 214 | 5.74k | return s; | 215 | | | 216 | 5.74k | }, [httpResponseData](void *user, std::string_view data, bool fin) -> void * { | 217 | | /* We always get an empty chunk even if there is no data */ | 218 | 5.74k | if (httpResponseData->inStream) { | 219 | | | 220 | | /* Todo: can this handle timeout for non-post as well? */ | 221 | 5.74k | if (fin) { | 222 | | /* If we just got the last chunk (or empty chunk), disable timeout */ | 223 | 5.74k | us_socket_timeout(SSL, (struct us_socket_t *) user, 0); | 224 | 5.74k | } else { | 225 | | /* We still have some more data coming in later, so reset timeout */ | 226 | | /* Only reset timeout if we got enough bytes (16kb/sec) since last time we reset here */ | 227 | 5.74k | httpResponseData->received_bytes_per_timeout += (unsigned int) data.length(); | 228 | 5.74k | if (httpResponseData->received_bytes_per_timeout >= HTTP_RECEIVE_THROUGHPUT_BYTES * HTTP_IDLE_TIMEOUT_S) { | 229 | 5.74k | us_socket_timeout(SSL, (struct us_socket_t *) user, HTTP_IDLE_TIMEOUT_S); | 230 | 5.74k | httpResponseData->received_bytes_per_timeout = 0; | 231 | 5.74k | } | 232 | 5.74k | } | 233 | | | 234 | | /* We might respond in the handler, so do not change timeout after this */ | 235 | 5.74k | httpResponseData->inStream(data, fin); | 236 | | | 237 | | /* Was the socket closed? */ | 238 | 5.74k | if (us_socket_is_closed(SSL, (struct us_socket_t *) user)) { | 239 | 5.74k | return nullptr; | 240 | 5.74k | } | 241 | | | 242 | | /* We absolutely have to terminate parsing if shutdown */ | 243 | 5.74k | if (us_socket_is_shut_down(SSL, (us_socket_t *) user)) { | 244 | 5.74k | return nullptr; | 245 | 5.74k | } | 246 | | | 247 | | /* If we were given the last data chunk, reset data handler to ensure following | 248 | | * requests on the same socket won't trigger any previously registered behavior */ | 249 | 5.74k | if (fin) { | 250 | 5.74k | httpResponseData->inStream = nullptr; | 251 | 5.74k | } | 252 | 5.74k | } | 253 | 5.74k | return user; | 254 | 5.74k | }); | 255 | | | 256 | | /* Mark that we are no longer parsing Http */ | 257 | 5.74k | httpContextData->isParsingHttp = false; | 258 | | | 259 | | /* If we got fullptr that means the parser wants us to close the socket from error (same as calling the errorHandler) */ | 260 | 5.74k | if (returnedSocket == FULLPTR) { | 261 | | /* For errors, we only deliver them "at most once". We don't care if they get halfways delivered or not. */ | 262 | 5.74k | us_socket_write(SSL, s, httpErrorResponses[err].data(), (int) httpErrorResponses[err].length(), false); | 263 | 5.74k | us_socket_shutdown(SSL, s); | 264 | | /* Close any socket on HTTP errors */ | 265 | 5.74k | us_socket_close(SSL, s, 0, nullptr); | 266 | | /* This just makes the following code act as if the socket was closed from error inside the parser. */ | 267 | 5.74k | returnedSocket = nullptr; | 268 | 5.74k | } | 269 | | | 270 | | /* We need to uncork in all cases, except for nullptr (closed socket, or upgraded socket) */ | 271 | 5.74k | if (returnedSocket != nullptr) { | 272 | | /* Timeout on uncork failure */ | 273 | 5.74k | auto [written, failed] = ((AsyncSocket<SSL> *) returnedSocket)->uncork(); | 274 | 5.74k | if (failed) { | 275 | | /* All Http sockets timeout by this, and this behavior match the one in HttpResponse::cork */ | 276 | | /* Warning: both HTTP_IDLE_TIMEOUT_S and HTTP_TIMEOUT_S are 10 seconds and both are used the same */ | 277 | 5.74k | ((AsyncSocket<SSL> *) s)->timeout(HTTP_IDLE_TIMEOUT_S); | 278 | 5.74k | } | 279 | | | 280 | | /* We need to check if we should close this socket here now */ | 281 | 5.74k | if (httpResponseData->state & HttpResponseData<SSL>::HTTP_CONNECTION_CLOSE) { | 282 | 5.74k | if ((httpResponseData->state & HttpResponseData<SSL>::HTTP_RESPONSE_PENDING) == 0) { | 283 | 5.74k | if (((AsyncSocket<SSL> *) s)->getBufferedAmount() == 0) { | 284 | 5.74k | ((AsyncSocket<SSL> *) s)->shutdown(); | 285 | | /* We need to force close after sending FIN since we want to hinder | 286 | | * clients from keeping to send their huge data */ | 287 | 5.74k | ((AsyncSocket<SSL> *) s)->close(); | 288 | 5.74k | } | 289 | 5.74k | } | 290 | 5.74k | } | 291 | | | 292 | 5.74k | return (us_socket_t *) returnedSocket; | 293 | 5.74k | } | 294 | | | 295 | | /* If we upgraded, check here (differ between nullptr close and nullptr upgrade) */ | 296 | 5.74k | if (httpContextData->upgradedWebSocket) { | 297 | | /* This path is only for upgraded websockets */ | 298 | 5.74k | AsyncSocket<SSL> *asyncSocket = (AsyncSocket<SSL> *) httpContextData->upgradedWebSocket; | 299 | | | 300 | | /* Uncork here as well (note: what if we failed to uncork and we then pub/sub before we even upgraded?) */ | 301 | 5.74k | auto [written, failed] = asyncSocket->uncork(); | 302 | | | 303 | | /* If we succeeded in uncorking, check if we have sent WebSocket FIN */ | 304 | 5.74k | if (!failed) { | 305 | 5.74k | WebSocketData *webSocketData = (WebSocketData *) asyncSocket->getAsyncSocketData(); | 306 | 5.74k | if (webSocketData->isShuttingDown) { | 307 | | /* In that case, also send TCP FIN (this is similar to what we have in ws drain handler) */ | 308 | 5.74k | asyncSocket->shutdown(); | 309 | 5.74k | } | 310 | 5.74k | } | 311 | | | 312 | | /* Reset upgradedWebSocket before we return */ | 313 | 5.74k | httpContextData->upgradedWebSocket = nullptr; | 314 | | | 315 | | /* Return the new upgraded websocket */ | 316 | 5.74k | return (us_socket_t *) asyncSocket; | 317 | 5.74k | } | 318 | | | 319 | | /* It is okay to uncork a closed socket and we need to */ | 320 | 5.74k | ((AsyncSocket<SSL> *) s)->uncork(); | 321 | | | 322 | | /* We cannot return nullptr to the underlying stack in any case */ | 323 | 5.74k | return s; | 324 | 5.74k | }); | 325 | | | 326 | | /* Handle HTTP write out (note: SSL_read may trigger this spuriously, the app need to handle spurious calls) */ | 327 | 5.74k | us_socket_context_on_writable(SSL, getSocketContext(), [](us_socket_t *s) { | 328 | | | 329 | 5.74k | AsyncSocket<SSL> *asyncSocket = (AsyncSocket<SSL> *) s; | 330 | 5.74k | HttpResponseData<SSL> *httpResponseData = (HttpResponseData<SSL> *) asyncSocket->getAsyncSocketData(); | 331 | | | 332 | | /* Ask the developer to write data and return success (true) or failure (false), OR skip sending anything and return success (true). */ | 333 | 5.74k | if (httpResponseData->onWritable) { | 334 | | /* We are now writable, so hang timeout again, the user does not have to do anything so we should hang until end or tryEnd rearms timeout */ | 335 | 5.74k | us_socket_timeout(SSL, s, 0); | 336 | | | 337 | | /* We expect the developer to return whether or not write was successful (true). | 338 | | * If write was never called, the developer should still return true so that we may drain. */ | 339 | 5.74k | bool success = httpResponseData->callOnWritable(httpResponseData->offset); | 340 | | | 341 | | /* The developer indicated that their onWritable failed. */ | 342 | 5.74k | if (!success) { | 343 | | /* Skip testing if we can drain anything since that might perform an extra syscall */ | 344 | 5.74k | return s; | 345 | 5.74k | } | 346 | | | 347 | | /* We don't want to fall through since we don't want to mess with timeout. | 348 | | * It makes little sense to drain any backpressure when the user has registered onWritable. */ | 349 | 5.74k | return s; | 350 | 5.74k | } | 351 | | | 352 | | /* Drain any socket buffer, this might empty our backpressure and thus finish the request */ | 353 | | /*auto [written, failed] = */asyncSocket->write(nullptr, 0, true, 0); | 354 | | | 355 | | /* Should we close this connection after a response - and is this response really done? */ | 356 | 5.74k | if (httpResponseData->state & HttpResponseData<SSL>::HTTP_CONNECTION_CLOSE) { | 357 | 5.74k | if ((httpResponseData->state & HttpResponseData<SSL>::HTTP_RESPONSE_PENDING) == 0) { | 358 | 5.74k | if (asyncSocket->getBufferedAmount() == 0) { | 359 | 5.74k | asyncSocket->shutdown(); | 360 | | /* We need to force close after sending FIN since we want to hinder | 361 | | * clients from keeping to send their huge data */ | 362 | 5.74k | asyncSocket->close(); | 363 | 5.74k | } | 364 | 5.74k | } | 365 | 5.74k | } | 366 | | | 367 | | /* Expect another writable event, or another request within the timeout */ | 368 | 5.74k | asyncSocket->timeout(HTTP_IDLE_TIMEOUT_S); | 369 | | | 370 | 5.74k | return s; | 371 | 5.74k | }); | 372 | | | 373 | | /* Handle FIN, HTTP does not support half-closed sockets, so simply close */ | 374 | 5.74k | us_socket_context_on_end(SSL, getSocketContext(), [](us_socket_t *s) { | 375 | | | 376 | | /* We do not care for half closed sockets */ | 377 | 5.74k | AsyncSocket<SSL> *asyncSocket = (AsyncSocket<SSL> *) s; | 378 | 5.74k | return asyncSocket->close(); | 379 | | | 380 | 5.74k | }); | 381 | | | 382 | | /* Handle socket timeouts, simply close them so to not confuse client with FIN */ | 383 | 5.74k | us_socket_context_on_timeout(SSL, getSocketContext(), [](us_socket_t *s) { | 384 | | | 385 | | /* Force close rather than gracefully shutdown and risk confusing the client with a complete download */ | 386 | 5.74k | AsyncSocket<SSL> *asyncSocket = (AsyncSocket<SSL> *) s; | 387 | 5.74k | return asyncSocket->close(); | 388 | | | 389 | 5.74k | }); | 390 | | | 391 | 5.74k | return this; | 392 | 5.74k | } |
uWS::HttpContext<false>::init() Line | Count | Source | 70 | 16.9k | HttpContext<SSL> *init() { | 71 | | /* Handle socket connections */ | 72 | 16.9k | us_socket_context_on_open(SSL, getSocketContext(), [](us_socket_t *s, int /*is_client*/, char */*ip*/, int /*ip_length*/) { | 73 | | /* Any connected socket should timeout until it has a request */ | 74 | 16.9k | us_socket_timeout(SSL, s, HTTP_IDLE_TIMEOUT_S); | 75 | | | 76 | | /* Init socket ext */ | 77 | 16.9k | new (us_socket_ext(SSL, s)) HttpResponseData<SSL>; | 78 | | | 79 | | /* Call filter */ | 80 | 16.9k | HttpContextData<SSL> *httpContextData = getSocketContextDataS(s); | 81 | 16.9k | for (auto &f : httpContextData->filterHandlers) { | 82 | 16.9k | f((HttpResponse<SSL> *) s, 1); | 83 | 16.9k | } | 84 | | | 85 | 16.9k | return s; | 86 | 16.9k | }); | 87 | | | 88 | | /* Handle socket disconnections */ | 89 | 16.9k | us_socket_context_on_close(SSL, getSocketContext(), [](us_socket_t *s, int /*code*/, void */*reason*/) { | 90 | | /* Get socket ext */ | 91 | 16.9k | HttpResponseData<SSL> *httpResponseData = (HttpResponseData<SSL> *) us_socket_ext(SSL, s); | 92 | | | 93 | | /* Call filter */ | 94 | 16.9k | HttpContextData<SSL> *httpContextData = getSocketContextDataS(s); | 95 | 16.9k | for (auto &f : httpContextData->filterHandlers) { | 96 | 16.9k | f((HttpResponse<SSL> *) s, -1); | 97 | 16.9k | } | 98 | | | 99 | | /* Signal broken HTTP request only if we have a pending request */ | 100 | 16.9k | if (httpResponseData->onAborted) { | 101 | 16.9k | httpResponseData->onAborted(); | 102 | 16.9k | } | 103 | | | 104 | | /* Destruct socket ext */ | 105 | 16.9k | httpResponseData->~HttpResponseData<SSL>(); | 106 | | | 107 | 16.9k | return s; | 108 | 16.9k | }); | 109 | | | 110 | | /* Handle HTTP data streams */ | 111 | 16.9k | us_socket_context_on_data(SSL, getSocketContext(), [](us_socket_t *s, char *data, int length) { | 112 | | | 113 | | // total overhead is about 210k down to 180k | 114 | | // ~210k req/sec is the original perf with write in data | 115 | | // ~200k req/sec is with cork and formatting | 116 | | // ~190k req/sec is with http parsing | 117 | | // ~180k - 190k req/sec is with varying routing | 118 | | | 119 | 16.9k | HttpContextData<SSL> *httpContextData = getSocketContextDataS(s); | 120 | | | 121 | | /* Do not accept any data while in shutdown state */ | 122 | 16.9k | if (us_socket_is_shut_down(SSL, (us_socket_t *) s)) { | 123 | 16.9k | return s; | 124 | 16.9k | } | 125 | | | 126 | 16.9k | HttpResponseData<SSL> *httpResponseData = (HttpResponseData<SSL> *) us_socket_ext(SSL, s); | 127 | | | 128 | | /* Cork this socket */ | 129 | 16.9k | ((AsyncSocket<SSL> *) s)->cork(); | 130 | | | 131 | | /* Mark that we are inside the parser now */ | 132 | 16.9k | httpContextData->isParsingHttp = true; | 133 | | | 134 | | // clients need to know the cursor after http parse, not servers! | 135 | | // how far did we read then? we need to know to continue with websocket parsing data? or? | 136 | | | 137 | 16.9k | void *proxyParser = nullptr; | 138 | | #ifdef UWS_WITH_PROXY | 139 | | proxyParser = &httpResponseData->proxyParser; | 140 | | #endif | 141 | | | 142 | | /* The return value is entirely up to us to interpret. The HttpParser only care for whether the returned value is DIFFERENT or not from passed user */ | 143 | 16.9k | auto [err, returnedSocket] = httpResponseData->consumePostPadded(data, (unsigned int) length, s, proxyParser, [httpContextData](void *s, HttpRequest *httpRequest) -> void * { | 144 | | /* For every request we reset the timeout and hang until user makes action */ | 145 | | /* Warning: if we are in shutdown state, resetting the timer is a security issue! */ | 146 | 16.9k | us_socket_timeout(SSL, (us_socket_t *) s, 0); | 147 | | | 148 | | /* Reset httpResponse */ | 149 | 16.9k | HttpResponseData<SSL> *httpResponseData = (HttpResponseData<SSL> *) us_socket_ext(SSL, (us_socket_t *) s); | 150 | 16.9k | httpResponseData->offset = 0; | 151 | | | 152 | | /* Are we not ready for another request yet? Terminate the connection. | 153 | | * Important for denying async pipelining until, if ever, we want to suppot it. | 154 | | * Otherwise requests can get mixed up on the same connection. We still support sync pipelining. */ | 155 | 16.9k | if (httpResponseData->state & HttpResponseData<SSL>::HTTP_RESPONSE_PENDING) { | 156 | 16.9k | us_socket_close(SSL, (us_socket_t *) s, 0, nullptr); | 157 | 16.9k | return nullptr; | 158 | 16.9k | } | 159 | | | 160 | | /* Mark pending request and emit it */ | 161 | 16.9k | httpResponseData->state = HttpResponseData<SSL>::HTTP_RESPONSE_PENDING; | 162 | | | 163 | | /* Mark this response as connectionClose if ancient or connection: close */ | 164 | 16.9k | if (httpRequest->isAncient() || httpRequest->getHeader("connection").length() == 5) { | 165 | 16.9k | httpResponseData->state |= HttpResponseData<SSL>::HTTP_CONNECTION_CLOSE; | 166 | 16.9k | } | 167 | | | 168 | | /* Select the router based on SNI (only possible for SSL) */ | 169 | 16.9k | auto *selectedRouter = &httpContextData->router; | 170 | 16.9k | if constexpr (SSL) { | 171 | 16.9k | void *domainRouter = us_socket_server_name_userdata(SSL, (struct us_socket_t *) s); | 172 | 16.9k | if (domainRouter) { | 173 | 16.9k | selectedRouter = (decltype(selectedRouter)) domainRouter; | 174 | 16.9k | } | 175 | 16.9k | } | 176 | | | 177 | | /* Route the method and URL */ | 178 | 16.9k | selectedRouter->getUserData() = {(HttpResponse<SSL> *) s, httpRequest}; | 179 | 16.9k | if (!selectedRouter->route(httpRequest->getCaseSensitiveMethod(), httpRequest->getUrl())) { | 180 | | /* We have to force close this socket as we have no handler for it */ | 181 | 16.9k | us_socket_close(SSL, (us_socket_t *) s, 0, nullptr); | 182 | 16.9k | return nullptr; | 183 | 16.9k | } | 184 | | | 185 | | /* First of all we need to check if this socket was deleted due to upgrade */ | 186 | 16.9k | if (httpContextData->upgradedWebSocket) { | 187 | | /* We differ between closed and upgraded below */ | 188 | 16.9k | return nullptr; | 189 | 16.9k | } | 190 | | | 191 | | /* Was the socket closed? */ | 192 | 16.9k | if (us_socket_is_closed(SSL, (struct us_socket_t *) s)) { | 193 | 16.9k | return nullptr; | 194 | 16.9k | } | 195 | | | 196 | | /* We absolutely have to terminate parsing if shutdown */ | 197 | 16.9k | if (us_socket_is_shut_down(SSL, (us_socket_t *) s)) { | 198 | 16.9k | return nullptr; | 199 | 16.9k | } | 200 | | | 201 | | /* Returning from a request handler without responding or attaching an onAborted handler is ill-use */ | 202 | 16.9k | if (!((HttpResponse<SSL> *) s)->hasResponded() && !httpResponseData->onAborted) { | 203 | | /* Throw exception here? */ | 204 | 16.9k | std::cerr << "Error: Returning from a request handler without responding or attaching an abort handler is forbidden!" << std::endl; | 205 | 16.9k | std::terminate(); | 206 | 16.9k | } | 207 | | | 208 | | /* If we have not responded and we have a data handler, we need to timeout to enfore client sending the data */ | 209 | 16.9k | if (!((HttpResponse<SSL> *) s)->hasResponded() && httpResponseData->inStream) { | 210 | 16.9k | us_socket_timeout(SSL, (us_socket_t *) s, HTTP_IDLE_TIMEOUT_S); | 211 | 16.9k | } | 212 | | | 213 | | /* Continue parsing */ | 214 | 16.9k | return s; | 215 | | | 216 | 16.9k | }, [httpResponseData](void *user, std::string_view data, bool fin) -> void * { | 217 | | /* We always get an empty chunk even if there is no data */ | 218 | 16.9k | if (httpResponseData->inStream) { | 219 | | | 220 | | /* Todo: can this handle timeout for non-post as well? */ | 221 | 16.9k | if (fin) { | 222 | | /* If we just got the last chunk (or empty chunk), disable timeout */ | 223 | 16.9k | us_socket_timeout(SSL, (struct us_socket_t *) user, 0); | 224 | 16.9k | } else { | 225 | | /* We still have some more data coming in later, so reset timeout */ | 226 | | /* Only reset timeout if we got enough bytes (16kb/sec) since last time we reset here */ | 227 | 16.9k | httpResponseData->received_bytes_per_timeout += (unsigned int) data.length(); | 228 | 16.9k | if (httpResponseData->received_bytes_per_timeout >= HTTP_RECEIVE_THROUGHPUT_BYTES * HTTP_IDLE_TIMEOUT_S) { | 229 | 16.9k | us_socket_timeout(SSL, (struct us_socket_t *) user, HTTP_IDLE_TIMEOUT_S); | 230 | 16.9k | httpResponseData->received_bytes_per_timeout = 0; | 231 | 16.9k | } | 232 | 16.9k | } | 233 | | | 234 | | /* We might respond in the handler, so do not change timeout after this */ | 235 | 16.9k | httpResponseData->inStream(data, fin); | 236 | | | 237 | | /* Was the socket closed? */ | 238 | 16.9k | if (us_socket_is_closed(SSL, (struct us_socket_t *) user)) { | 239 | 16.9k | return nullptr; | 240 | 16.9k | } | 241 | | | 242 | | /* We absolutely have to terminate parsing if shutdown */ | 243 | 16.9k | if (us_socket_is_shut_down(SSL, (us_socket_t *) user)) { | 244 | 16.9k | return nullptr; | 245 | 16.9k | } | 246 | | | 247 | | /* If we were given the last data chunk, reset data handler to ensure following | 248 | | * requests on the same socket won't trigger any previously registered behavior */ | 249 | 16.9k | if (fin) { | 250 | 16.9k | httpResponseData->inStream = nullptr; | 251 | 16.9k | } | 252 | 16.9k | } | 253 | 16.9k | return user; | 254 | 16.9k | }); | 255 | | | 256 | | /* Mark that we are no longer parsing Http */ | 257 | 16.9k | httpContextData->isParsingHttp = false; | 258 | | | 259 | | /* If we got fullptr that means the parser wants us to close the socket from error (same as calling the errorHandler) */ | 260 | 16.9k | if (returnedSocket == FULLPTR) { | 261 | | /* For errors, we only deliver them "at most once". We don't care if they get halfways delivered or not. */ | 262 | 16.9k | us_socket_write(SSL, s, httpErrorResponses[err].data(), (int) httpErrorResponses[err].length(), false); | 263 | 16.9k | us_socket_shutdown(SSL, s); | 264 | | /* Close any socket on HTTP errors */ | 265 | 16.9k | us_socket_close(SSL, s, 0, nullptr); | 266 | | /* This just makes the following code act as if the socket was closed from error inside the parser. */ | 267 | 16.9k | returnedSocket = nullptr; | 268 | 16.9k | } | 269 | | | 270 | | /* We need to uncork in all cases, except for nullptr (closed socket, or upgraded socket) */ | 271 | 16.9k | if (returnedSocket != nullptr) { | 272 | | /* Timeout on uncork failure */ | 273 | 16.9k | auto [written, failed] = ((AsyncSocket<SSL> *) returnedSocket)->uncork(); | 274 | 16.9k | if (failed) { | 275 | | /* All Http sockets timeout by this, and this behavior match the one in HttpResponse::cork */ | 276 | | /* Warning: both HTTP_IDLE_TIMEOUT_S and HTTP_TIMEOUT_S are 10 seconds and both are used the same */ | 277 | 16.9k | ((AsyncSocket<SSL> *) s)->timeout(HTTP_IDLE_TIMEOUT_S); | 278 | 16.9k | } | 279 | | | 280 | | /* We need to check if we should close this socket here now */ | 281 | 16.9k | if (httpResponseData->state & HttpResponseData<SSL>::HTTP_CONNECTION_CLOSE) { | 282 | 16.9k | if ((httpResponseData->state & HttpResponseData<SSL>::HTTP_RESPONSE_PENDING) == 0) { | 283 | 16.9k | if (((AsyncSocket<SSL> *) s)->getBufferedAmount() == 0) { | 284 | 16.9k | ((AsyncSocket<SSL> *) s)->shutdown(); | 285 | | /* We need to force close after sending FIN since we want to hinder | 286 | | * clients from keeping to send their huge data */ | 287 | 16.9k | ((AsyncSocket<SSL> *) s)->close(); | 288 | 16.9k | } | 289 | 16.9k | } | 290 | 16.9k | } | 291 | | | 292 | 16.9k | return (us_socket_t *) returnedSocket; | 293 | 16.9k | } | 294 | | | 295 | | /* If we upgraded, check here (differ between nullptr close and nullptr upgrade) */ | 296 | 16.9k | if (httpContextData->upgradedWebSocket) { | 297 | | /* This path is only for upgraded websockets */ | 298 | 16.9k | AsyncSocket<SSL> *asyncSocket = (AsyncSocket<SSL> *) httpContextData->upgradedWebSocket; | 299 | | | 300 | | /* Uncork here as well (note: what if we failed to uncork and we then pub/sub before we even upgraded?) */ | 301 | 16.9k | auto [written, failed] = asyncSocket->uncork(); | 302 | | | 303 | | /* If we succeeded in uncorking, check if we have sent WebSocket FIN */ | 304 | 16.9k | if (!failed) { | 305 | 16.9k | WebSocketData *webSocketData = (WebSocketData *) asyncSocket->getAsyncSocketData(); | 306 | 16.9k | if (webSocketData->isShuttingDown) { | 307 | | /* In that case, also send TCP FIN (this is similar to what we have in ws drain handler) */ | 308 | 16.9k | asyncSocket->shutdown(); | 309 | 16.9k | } | 310 | 16.9k | } | 311 | | | 312 | | /* Reset upgradedWebSocket before we return */ | 313 | 16.9k | httpContextData->upgradedWebSocket = nullptr; | 314 | | | 315 | | /* Return the new upgraded websocket */ | 316 | 16.9k | return (us_socket_t *) asyncSocket; | 317 | 16.9k | } | 318 | | | 319 | | /* It is okay to uncork a closed socket and we need to */ | 320 | 16.9k | ((AsyncSocket<SSL> *) s)->uncork(); | 321 | | | 322 | | /* We cannot return nullptr to the underlying stack in any case */ | 323 | 16.9k | return s; | 324 | 16.9k | }); | 325 | | | 326 | | /* Handle HTTP write out (note: SSL_read may trigger this spuriously, the app need to handle spurious calls) */ | 327 | 16.9k | us_socket_context_on_writable(SSL, getSocketContext(), [](us_socket_t *s) { | 328 | | | 329 | 16.9k | AsyncSocket<SSL> *asyncSocket = (AsyncSocket<SSL> *) s; | 330 | 16.9k | HttpResponseData<SSL> *httpResponseData = (HttpResponseData<SSL> *) asyncSocket->getAsyncSocketData(); | 331 | | | 332 | | /* Ask the developer to write data and return success (true) or failure (false), OR skip sending anything and return success (true). */ | 333 | 16.9k | if (httpResponseData->onWritable) { | 334 | | /* We are now writable, so hang timeout again, the user does not have to do anything so we should hang until end or tryEnd rearms timeout */ | 335 | 16.9k | us_socket_timeout(SSL, s, 0); | 336 | | | 337 | | /* We expect the developer to return whether or not write was successful (true). | 338 | | * If write was never called, the developer should still return true so that we may drain. */ | 339 | 16.9k | bool success = httpResponseData->callOnWritable(httpResponseData->offset); | 340 | | | 341 | | /* The developer indicated that their onWritable failed. */ | 342 | 16.9k | if (!success) { | 343 | | /* Skip testing if we can drain anything since that might perform an extra syscall */ | 344 | 16.9k | return s; | 345 | 16.9k | } | 346 | | | 347 | | /* We don't want to fall through since we don't want to mess with timeout. | 348 | | * It makes little sense to drain any backpressure when the user has registered onWritable. */ | 349 | 16.9k | return s; | 350 | 16.9k | } | 351 | | | 352 | | /* Drain any socket buffer, this might empty our backpressure and thus finish the request */ | 353 | | /*auto [written, failed] = */asyncSocket->write(nullptr, 0, true, 0); | 354 | | | 355 | | /* Should we close this connection after a response - and is this response really done? */ | 356 | 16.9k | if (httpResponseData->state & HttpResponseData<SSL>::HTTP_CONNECTION_CLOSE) { | 357 | 16.9k | if ((httpResponseData->state & HttpResponseData<SSL>::HTTP_RESPONSE_PENDING) == 0) { | 358 | 16.9k | if (asyncSocket->getBufferedAmount() == 0) { | 359 | 16.9k | asyncSocket->shutdown(); | 360 | | /* We need to force close after sending FIN since we want to hinder | 361 | | * clients from keeping to send their huge data */ | 362 | 16.9k | asyncSocket->close(); | 363 | 16.9k | } | 364 | 16.9k | } | 365 | 16.9k | } | 366 | | | 367 | | /* Expect another writable event, or another request within the timeout */ | 368 | 16.9k | asyncSocket->timeout(HTTP_IDLE_TIMEOUT_S); | 369 | | | 370 | 16.9k | return s; | 371 | 16.9k | }); | 372 | | | 373 | | /* Handle FIN, HTTP does not support half-closed sockets, so simply close */ | 374 | 16.9k | us_socket_context_on_end(SSL, getSocketContext(), [](us_socket_t *s) { | 375 | | | 376 | | /* We do not care for half closed sockets */ | 377 | 16.9k | AsyncSocket<SSL> *asyncSocket = (AsyncSocket<SSL> *) s; | 378 | 16.9k | return asyncSocket->close(); | 379 | | | 380 | 16.9k | }); | 381 | | | 382 | | /* Handle socket timeouts, simply close them so to not confuse client with FIN */ | 383 | 16.9k | us_socket_context_on_timeout(SSL, getSocketContext(), [](us_socket_t *s) { | 384 | | | 385 | | /* Force close rather than gracefully shutdown and risk confusing the client with a complete download */ | 386 | 16.9k | AsyncSocket<SSL> *asyncSocket = (AsyncSocket<SSL> *) s; | 387 | 16.9k | return asyncSocket->close(); | 388 | | | 389 | 16.9k | }); | 390 | | | 391 | 16.9k | return this; | 392 | 16.9k | } |
|
393 | | |
394 | | public: |
395 | | /* Construct a new HttpContext using specified loop */ |
396 | 22.7k | static HttpContext *create(Loop *loop, us_socket_context_options_t options = {}) { |
397 | 22.7k | HttpContext *httpContext; |
398 | | |
399 | 22.7k | httpContext = (HttpContext *) us_create_socket_context(SSL, (us_loop_t *) loop, sizeof(HttpContextData<SSL>), options); |
400 | | |
401 | 22.7k | if (!httpContext) { |
402 | 0 | return nullptr; |
403 | 0 | } |
404 | | |
405 | | /* Init socket context data */ |
406 | 22.7k | new ((HttpContextData<SSL> *) us_socket_context_ext(SSL, (us_socket_context_t *) httpContext)) HttpContextData<SSL>(); |
407 | 22.7k | return httpContext->init(); |
408 | 22.7k | } uWS::HttpContext<true>::create(uWS::Loop*, us_socket_context_options_t) Line | Count | Source | 396 | 5.74k | static HttpContext *create(Loop *loop, us_socket_context_options_t options = {}) { | 397 | 5.74k | HttpContext *httpContext; | 398 | | | 399 | 5.74k | httpContext = (HttpContext *) us_create_socket_context(SSL, (us_loop_t *) loop, sizeof(HttpContextData<SSL>), options); | 400 | | | 401 | 5.74k | if (!httpContext) { | 402 | 0 | return nullptr; | 403 | 0 | } | 404 | | | 405 | | /* Init socket context data */ | 406 | 5.74k | new ((HttpContextData<SSL> *) us_socket_context_ext(SSL, (us_socket_context_t *) httpContext)) HttpContextData<SSL>(); | 407 | 5.74k | return httpContext->init(); | 408 | 5.74k | } |
uWS::HttpContext<false>::create(uWS::Loop*, us_socket_context_options_t) Line | Count | Source | 396 | 16.9k | static HttpContext *create(Loop *loop, us_socket_context_options_t options = {}) { | 397 | 16.9k | HttpContext *httpContext; | 398 | | | 399 | 16.9k | httpContext = (HttpContext *) us_create_socket_context(SSL, (us_loop_t *) loop, sizeof(HttpContextData<SSL>), options); | 400 | | | 401 | 16.9k | if (!httpContext) { | 402 | 0 | return nullptr; | 403 | 0 | } | 404 | | | 405 | | /* Init socket context data */ | 406 | 16.9k | new ((HttpContextData<SSL> *) us_socket_context_ext(SSL, (us_socket_context_t *) httpContext)) HttpContextData<SSL>(); | 407 | 16.9k | return httpContext->init(); | 408 | 16.9k | } |
|
409 | | |
410 | | /* Destruct the HttpContext, it does not follow RAII */ |
411 | 22.7k | void free() { |
412 | | /* Destruct socket context data */ |
413 | 22.7k | HttpContextData<SSL> *httpContextData = getSocketContextData(); |
414 | 22.7k | httpContextData->~HttpContextData<SSL>(); |
415 | | |
416 | | /* Free the socket context in whole */ |
417 | 22.7k | us_socket_context_free(SSL, getSocketContext()); |
418 | 22.7k | } uWS::HttpContext<true>::free() Line | Count | Source | 411 | 5.74k | void free() { | 412 | | /* Destruct socket context data */ | 413 | 5.74k | HttpContextData<SSL> *httpContextData = getSocketContextData(); | 414 | 5.74k | httpContextData->~HttpContextData<SSL>(); | 415 | | | 416 | | /* Free the socket context in whole */ | 417 | 5.74k | us_socket_context_free(SSL, getSocketContext()); | 418 | 5.74k | } |
uWS::HttpContext<false>::free() Line | Count | Source | 411 | 16.9k | void free() { | 412 | | /* Destruct socket context data */ | 413 | 16.9k | HttpContextData<SSL> *httpContextData = getSocketContextData(); | 414 | 16.9k | httpContextData->~HttpContextData<SSL>(); | 415 | | | 416 | | /* Free the socket context in whole */ | 417 | 16.9k | us_socket_context_free(SSL, getSocketContext()); | 418 | 16.9k | } |
|
419 | | |
420 | | void filter(MoveOnlyFunction<void(HttpResponse<SSL> *, int)> &&filterHandler) { |
421 | | getSocketContextData()->filterHandlers.emplace_back(std::move(filterHandler)); |
422 | | } |
423 | | |
424 | | /* Register an HTTP route handler acording to URL pattern */ |
425 | 79.6k | void onHttp(std::string method, std::string pattern, MoveOnlyFunction<void(HttpResponse<SSL> *, HttpRequest *)> &&handler, bool upgrade = false) { |
426 | 79.6k | HttpContextData<SSL> *httpContextData = getSocketContextData(); |
427 | | |
428 | | /* Todo: This is ugly, fix */ |
429 | 79.6k | std::vector<std::string> methods; |
430 | 79.6k | if (method == "*") { |
431 | 29.5k | methods = {"*"}; |
432 | 50.1k | } else { |
433 | 50.1k | methods = {method}; |
434 | 50.1k | } |
435 | | |
436 | 79.6k | uint32_t priority = method == "*" ? httpContextData->currentRouter->LOW_PRIORITY : (upgrade ? httpContextData->currentRouter->HIGH_PRIORITY : httpContextData->currentRouter->MEDIUM_PRIORITY); |
437 | | |
438 | | /* If we are passed nullptr then remove this */ |
439 | 79.6k | if (!handler) { |
440 | 0 | httpContextData->currentRouter->remove(methods[0], pattern, priority); |
441 | 0 | return; |
442 | 0 | } |
443 | | |
444 | | /* Record this route's parameter offsets */ |
445 | 79.6k | std::map<std::string, unsigned short, std::less<>> parameterOffsets; |
446 | 79.6k | unsigned short offset = 0; |
447 | 328k | for (unsigned int i = 0; i < pattern.length(); i++) { |
448 | 248k | if (pattern[i] == ':') { |
449 | 6.80k | i++; |
450 | 6.80k | unsigned int start = i; |
451 | 40.8k | while (i < pattern.length() && pattern[i] != '/') { |
452 | 34.0k | i++; |
453 | 34.0k | } |
454 | 6.80k | parameterOffsets[std::string(pattern.data() + start, i - start)] = offset; |
455 | | //std::cout << "<" << std::string(pattern.data() + start, i - start) << "> is offset " << offset; |
456 | 6.80k | offset++; |
457 | 6.80k | } |
458 | 248k | } |
459 | | |
460 | 467k | httpContextData->currentRouter->add(methods, pattern, [handler = std::move(handler), parameterOffsets = std::move(parameterOffsets)](auto *r) mutable { |
461 | 467k | auto user = r->getUserData(); |
462 | 467k | user.httpRequest->setYield(false); |
463 | 467k | user.httpRequest->setParameters(r->getParameters()); |
464 | 467k | user.httpRequest->setParameterOffsets(¶meterOffsets); |
465 | | |
466 | | /* Middleware? Automatically respond to expectations */ |
467 | 467k | std::string_view expect = user.httpRequest->getHeader("expect"); |
468 | 467k | if (expect.length() && expect == "100-continue") { |
469 | 2.77k | user.httpResponse->writeContinue(); |
470 | 2.77k | } |
471 | | |
472 | 467k | handler(user.httpResponse, user.httpRequest); |
473 | | |
474 | | /* If any handler yielded, the router will keep looking for a suitable handler. */ |
475 | 467k | if (user.httpRequest->getYield()) { |
476 | 20.1k | return false; |
477 | 20.1k | } |
478 | 447k | return true; |
479 | 467k | }, priority); auto uWS::HttpContext<true>::onHttp(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, ofats::any_invocable<void (uWS::HttpResponse<true>*, uWS::HttpRequest*)>&&, bool)::{lambda(auto:1*)#1}::operator()<uWS::HttpRouter<uWS::HttpContextData<true>::RouterData> >(uWS::HttpRouter<uWS::HttpContextData<true>::RouterData>*)Line | Count | Source | 460 | 120k | httpContextData->currentRouter->add(methods, pattern, [handler = std::move(handler), parameterOffsets = std::move(parameterOffsets)](auto *r) mutable { | 461 | 120k | auto user = r->getUserData(); | 462 | 120k | user.httpRequest->setYield(false); | 463 | 120k | user.httpRequest->setParameters(r->getParameters()); | 464 | 120k | user.httpRequest->setParameterOffsets(¶meterOffsets); | 465 | | | 466 | | /* Middleware? Automatically respond to expectations */ | 467 | 120k | std::string_view expect = user.httpRequest->getHeader("expect"); | 468 | 120k | if (expect.length() && expect == "100-continue") { | 469 | 251 | user.httpResponse->writeContinue(); | 470 | 251 | } | 471 | | | 472 | 120k | handler(user.httpResponse, user.httpRequest); | 473 | | | 474 | | /* If any handler yielded, the router will keep looking for a suitable handler. */ | 475 | 120k | if (user.httpRequest->getYield()) { | 476 | 3.95k | return false; | 477 | 3.95k | } | 478 | 116k | return true; | 479 | 120k | }, priority); |
auto uWS::HttpContext<false>::onHttp(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, ofats::any_invocable<void (uWS::HttpResponse<false>*, uWS::HttpRequest*)>&&, bool)::{lambda(auto:1*)#1}::operator()<uWS::HttpRouter<uWS::HttpContextData<false>::RouterData> >(uWS::HttpRouter<uWS::HttpContextData<false>::RouterData>*)Line | Count | Source | 460 | 347k | httpContextData->currentRouter->add(methods, pattern, [handler = std::move(handler), parameterOffsets = std::move(parameterOffsets)](auto *r) mutable { | 461 | 347k | auto user = r->getUserData(); | 462 | 347k | user.httpRequest->setYield(false); | 463 | 347k | user.httpRequest->setParameters(r->getParameters()); | 464 | 347k | user.httpRequest->setParameterOffsets(¶meterOffsets); | 465 | | | 466 | | /* Middleware? Automatically respond to expectations */ | 467 | 347k | std::string_view expect = user.httpRequest->getHeader("expect"); | 468 | 347k | if (expect.length() && expect == "100-continue") { | 469 | 2.52k | user.httpResponse->writeContinue(); | 470 | 2.52k | } | 471 | | | 472 | 347k | handler(user.httpResponse, user.httpRequest); | 473 | | | 474 | | /* If any handler yielded, the router will keep looking for a suitable handler. */ | 475 | 347k | if (user.httpRequest->getYield()) { | 476 | 16.1k | return false; | 477 | 16.1k | } | 478 | 331k | return true; | 479 | 347k | }, priority); |
|
480 | 79.6k | } uWS::HttpContext<true>::onHttp(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, ofats::any_invocable<void (uWS::HttpResponse<true>*, uWS::HttpRequest*)>&&, bool) Line | Count | Source | 425 | 11.4k | void onHttp(std::string method, std::string pattern, MoveOnlyFunction<void(HttpResponse<SSL> *, HttpRequest *)> &&handler, bool upgrade = false) { | 426 | 11.4k | HttpContextData<SSL> *httpContextData = getSocketContextData(); | 427 | | | 428 | | /* Todo: This is ugly, fix */ | 429 | 11.4k | std::vector<std::string> methods; | 430 | 11.4k | if (method == "*") { | 431 | 5.74k | methods = {"*"}; | 432 | 5.74k | } else { | 433 | 5.74k | methods = {method}; | 434 | 5.74k | } | 435 | | | 436 | 11.4k | uint32_t priority = method == "*" ? httpContextData->currentRouter->LOW_PRIORITY : (upgrade ? httpContextData->currentRouter->HIGH_PRIORITY : httpContextData->currentRouter->MEDIUM_PRIORITY); | 437 | | | 438 | | /* If we are passed nullptr then remove this */ | 439 | 11.4k | if (!handler) { | 440 | 0 | httpContextData->currentRouter->remove(methods[0], pattern, priority); | 441 | 0 | return; | 442 | 0 | } | 443 | | | 444 | | /* Record this route's parameter offsets */ | 445 | 11.4k | std::map<std::string, unsigned short, std::less<>> parameterOffsets; | 446 | 11.4k | unsigned short offset = 0; | 447 | 34.4k | for (unsigned int i = 0; i < pattern.length(); i++) { | 448 | 22.9k | if (pattern[i] == ':') { | 449 | 0 | i++; | 450 | 0 | unsigned int start = i; | 451 | 0 | while (i < pattern.length() && pattern[i] != '/') { | 452 | 0 | i++; | 453 | 0 | } | 454 | 0 | parameterOffsets[std::string(pattern.data() + start, i - start)] = offset; | 455 | | //std::cout << "<" << std::string(pattern.data() + start, i - start) << "> is offset " << offset; | 456 | 0 | offset++; | 457 | 0 | } | 458 | 22.9k | } | 459 | | | 460 | 11.4k | httpContextData->currentRouter->add(methods, pattern, [handler = std::move(handler), parameterOffsets = std::move(parameterOffsets)](auto *r) mutable { | 461 | 11.4k | auto user = r->getUserData(); | 462 | 11.4k | user.httpRequest->setYield(false); | 463 | 11.4k | user.httpRequest->setParameters(r->getParameters()); | 464 | 11.4k | user.httpRequest->setParameterOffsets(¶meterOffsets); | 465 | | | 466 | | /* Middleware? Automatically respond to expectations */ | 467 | 11.4k | std::string_view expect = user.httpRequest->getHeader("expect"); | 468 | 11.4k | if (expect.length() && expect == "100-continue") { | 469 | 11.4k | user.httpResponse->writeContinue(); | 470 | 11.4k | } | 471 | | | 472 | 11.4k | handler(user.httpResponse, user.httpRequest); | 473 | | | 474 | | /* If any handler yielded, the router will keep looking for a suitable handler. */ | 475 | 11.4k | if (user.httpRequest->getYield()) { | 476 | 11.4k | return false; | 477 | 11.4k | } | 478 | 11.4k | return true; | 479 | 11.4k | }, priority); | 480 | 11.4k | } |
uWS::HttpContext<false>::onHttp(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, ofats::any_invocable<void (uWS::HttpResponse<false>*, uWS::HttpRequest*)>&&, bool) Line | Count | Source | 425 | 68.1k | void onHttp(std::string method, std::string pattern, MoveOnlyFunction<void(HttpResponse<SSL> *, HttpRequest *)> &&handler, bool upgrade = false) { | 426 | 68.1k | HttpContextData<SSL> *httpContextData = getSocketContextData(); | 427 | | | 428 | | /* Todo: This is ugly, fix */ | 429 | 68.1k | std::vector<std::string> methods; | 430 | 68.1k | if (method == "*") { | 431 | 23.7k | methods = {"*"}; | 432 | 44.3k | } else { | 433 | 44.3k | methods = {method}; | 434 | 44.3k | } | 435 | | | 436 | 68.1k | uint32_t priority = method == "*" ? httpContextData->currentRouter->LOW_PRIORITY : (upgrade ? httpContextData->currentRouter->HIGH_PRIORITY : httpContextData->currentRouter->MEDIUM_PRIORITY); | 437 | | | 438 | | /* If we are passed nullptr then remove this */ | 439 | 68.1k | if (!handler) { | 440 | 0 | httpContextData->currentRouter->remove(methods[0], pattern, priority); | 441 | 0 | return; | 442 | 0 | } | 443 | | | 444 | | /* Record this route's parameter offsets */ | 445 | 68.1k | std::map<std::string, unsigned short, std::less<>> parameterOffsets; | 446 | 68.1k | unsigned short offset = 0; | 447 | 294k | for (unsigned int i = 0; i < pattern.length(); i++) { | 448 | 225k | if (pattern[i] == ':') { | 449 | 6.80k | i++; | 450 | 6.80k | unsigned int start = i; | 451 | 40.8k | while (i < pattern.length() && pattern[i] != '/') { | 452 | 34.0k | i++; | 453 | 34.0k | } | 454 | 6.80k | parameterOffsets[std::string(pattern.data() + start, i - start)] = offset; | 455 | | //std::cout << "<" << std::string(pattern.data() + start, i - start) << "> is offset " << offset; | 456 | 6.80k | offset++; | 457 | 6.80k | } | 458 | 225k | } | 459 | | | 460 | 68.1k | httpContextData->currentRouter->add(methods, pattern, [handler = std::move(handler), parameterOffsets = std::move(parameterOffsets)](auto *r) mutable { | 461 | 68.1k | auto user = r->getUserData(); | 462 | 68.1k | user.httpRequest->setYield(false); | 463 | 68.1k | user.httpRequest->setParameters(r->getParameters()); | 464 | 68.1k | user.httpRequest->setParameterOffsets(¶meterOffsets); | 465 | | | 466 | | /* Middleware? Automatically respond to expectations */ | 467 | 68.1k | std::string_view expect = user.httpRequest->getHeader("expect"); | 468 | 68.1k | if (expect.length() && expect == "100-continue") { | 469 | 68.1k | user.httpResponse->writeContinue(); | 470 | 68.1k | } | 471 | | | 472 | 68.1k | handler(user.httpResponse, user.httpRequest); | 473 | | | 474 | | /* If any handler yielded, the router will keep looking for a suitable handler. */ | 475 | 68.1k | if (user.httpRequest->getYield()) { | 476 | 68.1k | return false; | 477 | 68.1k | } | 478 | 68.1k | return true; | 479 | 68.1k | }, priority); | 480 | 68.1k | } |
|
481 | | |
482 | | /* Listen to port using this HttpContext */ |
483 | 22.7k | us_listen_socket_t *listen(const char *host, int port, int options) { |
484 | 22.7k | return us_socket_context_listen(SSL, getSocketContext(), host, port, options, sizeof(HttpResponseData<SSL>)); |
485 | 22.7k | } uWS::HttpContext<true>::listen(char const*, int, int) Line | Count | Source | 483 | 5.74k | us_listen_socket_t *listen(const char *host, int port, int options) { | 484 | 5.74k | return us_socket_context_listen(SSL, getSocketContext(), host, port, options, sizeof(HttpResponseData<SSL>)); | 485 | 5.74k | } |
uWS::HttpContext<false>::listen(char const*, int, int) Line | Count | Source | 483 | 16.9k | us_listen_socket_t *listen(const char *host, int port, int options) { | 484 | 16.9k | return us_socket_context_listen(SSL, getSocketContext(), host, port, options, sizeof(HttpResponseData<SSL>)); | 485 | 16.9k | } |
|
486 | | |
487 | | /* Listen to unix domain socket using this HttpContext */ |
488 | | us_listen_socket_t *listen(const char *path, int options) { |
489 | | return us_socket_context_listen_unix(SSL, getSocketContext(), path, options, sizeof(HttpResponseData<SSL>)); |
490 | | } |
491 | | |
492 | | void onPreOpen(LIBUS_SOCKET_DESCRIPTOR (*handler)(struct us_socket_context_t *, LIBUS_SOCKET_DESCRIPTOR)) { |
493 | | us_socket_context_on_pre_open(SSL, getSocketContext(), handler); |
494 | | } |
495 | | |
496 | | /* Adopt an externally accepted socket into this HttpContext */ |
497 | | us_socket_t *adoptAcceptedSocket(LIBUS_SOCKET_DESCRIPTOR accepted_fd) { |
498 | | return us_adopt_accepted_socket(SSL, getSocketContext(), accepted_fd, sizeof(HttpResponseData<SSL>), 0, 0); |
499 | | } |
500 | | }; |
501 | | |
502 | | } |
503 | | |
504 | | #endif // UWS_HTTPCONTEXT_H |