/src/h2o/lib/handler/connect.c
Line | Count | Source |
1 | | /* |
2 | | * Copyright (c) 2021 Fastly Inc. |
3 | | * |
4 | | * Permission is hereby granted, free of charge, to any person obtaining a copy |
5 | | * of this software and associated documentation files (the "Software"), to |
6 | | * deal in the Software without restriction, including without limitation the |
7 | | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
8 | | * sell copies of the Software, and to permit persons to whom the Software is |
9 | | * furnished to do so, subject to the following conditions: |
10 | | * |
11 | | * The above copyright notice and this permission notice shall be included in |
12 | | * all copies or substantial portions of the Software. |
13 | | * |
14 | | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
17 | | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
20 | | * IN THE SOFTWARE. |
21 | | */ |
22 | | #include "h2o/hostinfo.h" |
23 | | #include "h2o/memory.h" |
24 | | #include "h2o/socket.h" |
25 | | #include "h2o.h" |
26 | | #include "../probes_.h" |
27 | | |
28 | 0 | #define MODULE_NAME "lib/handler/connect.c" |
29 | | |
30 | | struct st_connect_handler_t { |
31 | | h2o_handler_t super; |
32 | | h2o_proxy_config_vars_t config; |
33 | | struct { |
34 | | size_t count; |
35 | | h2o_connect_acl_entry_t entries[0]; |
36 | | } acl; |
37 | | }; |
38 | | |
39 | 0 | #define MAX_ADDRESSES_PER_FAMILY 4 |
40 | 0 | #define UDP_CHUNK_OVERHEAD 10 /* sufficient space to hold DATAGRAM capsule header (RFC 9297) and context ID of zero (RFC 9298) */ |
41 | | |
42 | | struct st_server_address_t { |
43 | | struct sockaddr *sa; |
44 | | socklen_t salen; |
45 | | }; |
46 | | |
47 | | struct st_connect_generator_t { |
48 | | h2o_generator_t super; |
49 | | struct st_connect_handler_t *handler; |
50 | | h2o_req_t *src_req; |
51 | | |
52 | | struct { |
53 | | h2o_hostinfo_getaddr_req_t *v4, *v6; |
54 | | } getaddr_req; |
55 | | struct { |
56 | | struct st_server_address_t list[MAX_ADDRESSES_PER_FAMILY * 2]; |
57 | | size_t size; |
58 | | size_t used; |
59 | | } server_addresses; |
60 | | |
61 | | h2o_socket_t *sock; |
62 | | /** |
63 | | * Most significant and latest error that occurred, if any. Significance is represented as `class`, in descending order. |
64 | | */ |
65 | | struct { |
66 | | enum error_class { ERROR_CLASS_NAME_RESOLUTION, ERROR_CLASS_ACCESS_PROHIBITED, ERROR_CLASS_CONNECT } class; |
67 | | const char *str; |
68 | | } last_error; |
69 | | |
70 | | /** |
71 | | * timer used to handle user-visible timeouts (i.e., connect- and io-timeout) |
72 | | */ |
73 | | h2o_timer_t timeout; |
74 | | /** |
75 | | * timer used to for RFC 8305-style happy eyeballs (resolution delay and connection attempt delay) |
76 | | */ |
77 | | h2o_timer_t eyeball_delay; |
78 | | |
79 | | /** |
80 | | * Pick v4 (or v6) address in the next connection attempt. RFC 8305 recommends trying the other family one by one. |
81 | | */ |
82 | | unsigned pick_v4 : 1; |
83 | | /** |
84 | | * `h2o_process_request` was called without request streaming; all data that have to be sent is inside `h2o_req_t::entity` |
85 | | */ |
86 | | unsigned no_req_streaming : 1; |
87 | | /** |
88 | | * set when the send-side is closed by the user |
89 | | */ |
90 | | unsigned write_closed : 1; |
91 | | /** |
92 | | * set when h2o_send has been called to notify that the socket has been closed |
93 | | */ |
94 | | unsigned read_closed : 1; |
95 | | /** |
96 | | * if socket has been closed |
97 | | */ |
98 | | unsigned socket_closed : 1; |
99 | | /** |
100 | | * if connecting using TCP (or UDP) |
101 | | */ |
102 | | unsigned is_tcp : 1; |
103 | | /** |
104 | | * TCP- and UDP-specific data |
105 | | */ |
106 | | union { |
107 | | struct { |
108 | | h2o_buffer_t *sendbuf; |
109 | | h2o_buffer_t *recvbuf_detached; |
110 | | } tcp; |
111 | | struct { |
112 | | struct { |
113 | | h2o_buffer_t *buf; /* for datagram fragments */ |
114 | | h2o_timer_t delayed; |
115 | | } egress; |
116 | | struct { |
117 | | char buf[UDP_CHUNK_OVERHEAD + 1500]; |
118 | | } ingress; |
119 | | /** |
120 | | * if using draft-03 style encoding rather than RFC 9298 |
121 | | */ |
122 | | unsigned is_draft03 : 1; |
123 | | } udp; |
124 | | }; |
125 | | }; |
126 | | |
127 | | static h2o_iovec_t get_proxy_status_identity(h2o_req_t *req) |
128 | 0 | { |
129 | 0 | h2o_iovec_t identity = req->conn->ctx->globalconf->proxy_status_identity; |
130 | 0 | if (identity.base == NULL) |
131 | 0 | identity = h2o_iovec_init(H2O_STRLIT("h2o")); |
132 | 0 | return identity; |
133 | 0 | } |
134 | | |
135 | | static const struct st_server_address_t *get_dest_addr(struct st_connect_generator_t *self) |
136 | 0 | { |
137 | 0 | if (self->server_addresses.used > 0) { |
138 | 0 | return &self->server_addresses.list[self->server_addresses.used - 1]; |
139 | 0 | } else { |
140 | 0 | return NULL; |
141 | 0 | } |
142 | 0 | } |
143 | | |
144 | | static void add_proxy_status_header(struct st_connect_handler_t *handler, h2o_req_t *req, const char *error_type, |
145 | | const char *details, const char *rcode, h2o_iovec_t dest_addr_str) |
146 | 0 | { |
147 | 0 | if (!handler->config.connect_proxy_status_enabled) |
148 | 0 | return; |
149 | | |
150 | 0 | h2o_mem_pool_t *pool = &req->pool; |
151 | 0 | h2o_iovec_t parts[10] = { |
152 | 0 | get_proxy_status_identity(req), |
153 | 0 | }; |
154 | 0 | size_t nparts = 1; |
155 | 0 | if (error_type != NULL) { |
156 | 0 | parts[nparts++] = h2o_iovec_init(H2O_STRLIT("; error=")); |
157 | 0 | parts[nparts++] = h2o_iovec_init(error_type, strlen(error_type)); |
158 | 0 | } |
159 | 0 | if (rcode != NULL) { |
160 | 0 | parts[nparts++] = h2o_iovec_init(H2O_STRLIT("; rcode=")); |
161 | 0 | parts[nparts++] = h2o_encode_sf_string(pool, rcode, SIZE_MAX); |
162 | 0 | } |
163 | 0 | if (details != NULL) { |
164 | 0 | parts[nparts++] = h2o_iovec_init(H2O_STRLIT("; details=")); |
165 | 0 | parts[nparts++] = h2o_encode_sf_string(pool, details, SIZE_MAX); |
166 | 0 | } |
167 | 0 | if (dest_addr_str.base != NULL) { |
168 | 0 | parts[nparts++] = h2o_iovec_init(H2O_STRLIT("; next-hop=\"")); |
169 | 0 | parts[nparts++] = dest_addr_str; |
170 | 0 | parts[nparts++] = h2o_iovec_init(H2O_STRLIT("\"")); |
171 | 0 | } |
172 | 0 | assert(nparts <= sizeof(parts) / sizeof(parts[0])); |
173 | 0 | h2o_iovec_t hval = h2o_concat_list(pool, parts, nparts); |
174 | 0 | h2o_add_header_by_str(pool, &req->res.headers, H2O_STRLIT("proxy-status"), 0, NULL, hval.base, hval.len); |
175 | 0 | } |
176 | | |
177 | 0 | #define TO_BITMASK(type, len) ((type) ~(((type)1 << (sizeof(type) * 8 - (len))) - 1)) |
178 | | |
179 | | static void record_error(struct st_connect_handler_t *handler, h2o_req_t *req, const struct st_server_address_t *addr, |
180 | | const char *error_type, const char *details, const char *rcode) |
181 | 0 | { |
182 | 0 | H2O_PROBE_REQUEST(CONNECT_ERROR, req, error_type, details, rcode); |
183 | |
|
184 | 0 | char dest_addr_strbuf[NI_MAXHOST]; |
185 | 0 | h2o_iovec_t dest_addr_str = h2o_iovec_init(NULL, 0); |
186 | 0 | if (addr != NULL) { |
187 | 0 | size_t len = h2o_socket_getnumerichost(addr->sa, addr->salen, dest_addr_strbuf); |
188 | 0 | if (len != SIZE_MAX) { |
189 | 0 | dest_addr_str = h2o_iovec_init(dest_addr_strbuf, len); |
190 | 0 | } |
191 | 0 | } |
192 | |
|
193 | 0 | h2o_req_log_error(req, MODULE_NAME, "%s; rcode=%s; details=%s; next-hop=%s", error_type, rcode != NULL ? rcode : "(null)", |
194 | 0 | details != NULL ? details : "(null)", dest_addr_str.base != NULL ? dest_addr_str.base : "(null)"); |
195 | |
|
196 | 0 | add_proxy_status_header(handler, req, error_type, details, rcode, dest_addr_str); |
197 | 0 | } |
198 | | |
199 | | static void record_connect_success(struct st_connect_generator_t *self) |
200 | 0 | { |
201 | 0 | const struct st_server_address_t *addr = get_dest_addr(self); |
202 | 0 | if (addr == NULL) |
203 | 0 | return; |
204 | | |
205 | 0 | H2O_PROBE_REQUEST(CONNECT_SUCCESS, self->src_req, addr->sa); |
206 | |
|
207 | 0 | char dest_addr_strbuf[NI_MAXHOST]; |
208 | 0 | size_t len = h2o_socket_getnumerichost(addr->sa, addr->salen, dest_addr_strbuf); |
209 | 0 | if (len != SIZE_MAX) { |
210 | 0 | add_proxy_status_header(self->handler, self->src_req, NULL, NULL, NULL, h2o_iovec_init(dest_addr_strbuf, len)); |
211 | 0 | } |
212 | 0 | } |
213 | | |
214 | | static void record_socket_error(struct st_connect_generator_t *self, const char *err) |
215 | 0 | { |
216 | 0 | const char *error_type; |
217 | 0 | const char *details = NULL; |
218 | 0 | if (err == h2o_socket_error_conn_refused) |
219 | 0 | error_type = "connection_refused"; |
220 | 0 | else if (err == h2o_socket_error_conn_timed_out) |
221 | 0 | error_type = "connection_timeout"; |
222 | 0 | else if (err == h2o_socket_error_network_unreachable || err == h2o_socket_error_host_unreachable) |
223 | 0 | error_type = "destination_ip_unroutable"; |
224 | 0 | else { |
225 | 0 | error_type = "proxy_internal_error"; |
226 | 0 | details = err; |
227 | 0 | } |
228 | 0 | record_error(self->handler, self->src_req, get_dest_addr(self), error_type, details, NULL); |
229 | 0 | } |
230 | | |
231 | | static void try_connect(struct st_connect_generator_t *self); |
232 | | static int tcp_start_connect(struct st_connect_generator_t *self, struct st_server_address_t *server_address); |
233 | | static int udp_connect(struct st_connect_generator_t *self, struct st_server_address_t *server_address); |
234 | | |
235 | | static h2o_loop_t *get_loop(struct st_connect_generator_t *self) |
236 | 0 | { |
237 | 0 | return self->src_req->conn->ctx->loop; |
238 | 0 | } |
239 | | |
240 | | static void stop_eyeballs(struct st_connect_generator_t *self) |
241 | 0 | { |
242 | 0 | if (self->getaddr_req.v4 != NULL) { |
243 | 0 | h2o_hostinfo_getaddr_cancel(self->getaddr_req.v4); |
244 | 0 | self->getaddr_req.v4 = NULL; |
245 | 0 | } |
246 | 0 | if (self->getaddr_req.v6 != NULL) { |
247 | 0 | h2o_hostinfo_getaddr_cancel(self->getaddr_req.v6); |
248 | 0 | self->getaddr_req.v6 = NULL; |
249 | 0 | } |
250 | 0 | if (self->eyeball_delay.cb != NULL) { |
251 | 0 | h2o_timer_unlink(&self->eyeball_delay); |
252 | 0 | self->eyeball_delay.cb = NULL; |
253 | 0 | } |
254 | 0 | } |
255 | | |
256 | | static void dispose_generator(struct st_connect_generator_t *self) |
257 | 0 | { |
258 | 0 | stop_eyeballs(self); |
259 | 0 | if (self->sock != NULL) { |
260 | 0 | h2o_socket_close(self->sock); |
261 | 0 | self->sock = NULL; |
262 | 0 | self->socket_closed = 1; |
263 | 0 | } |
264 | 0 | if (self->is_tcp) { |
265 | 0 | if (self->tcp.sendbuf != NULL) |
266 | 0 | h2o_buffer_dispose(&self->tcp.sendbuf); |
267 | 0 | if (self->tcp.recvbuf_detached != NULL) |
268 | 0 | h2o_buffer_dispose(&self->tcp.recvbuf_detached); |
269 | 0 | } else { |
270 | 0 | if (self->udp.egress.buf != NULL) |
271 | 0 | h2o_buffer_dispose(&self->udp.egress.buf); |
272 | 0 | h2o_timer_unlink(&self->udp.egress.delayed); |
273 | 0 | } |
274 | 0 | h2o_timer_unlink(&self->timeout); |
275 | 0 | } |
276 | | |
277 | | static int close_socket(struct st_connect_generator_t *self) |
278 | 0 | { |
279 | 0 | int send_inflight; |
280 | |
|
281 | 0 | if (self->is_tcp) { |
282 | 0 | self->tcp.recvbuf_detached = self->sock->input; |
283 | 0 | send_inflight = self->tcp.recvbuf_detached->size != 0; |
284 | 0 | } else { |
285 | 0 | send_inflight = !h2o_socket_is_reading(self->sock); |
286 | 0 | } |
287 | 0 | h2o_buffer_init(&self->sock->input, &h2o_socket_buffer_prototype); |
288 | 0 | h2o_socket_close(self->sock); |
289 | 0 | self->sock = NULL; |
290 | 0 | self->socket_closed = 1; |
291 | |
|
292 | 0 | return send_inflight; |
293 | 0 | } |
294 | | |
295 | | static void close_readwrite(struct st_connect_generator_t *self) |
296 | 0 | { |
297 | 0 | int send_inflight = 0; |
298 | |
|
299 | 0 | if (self->sock != NULL) |
300 | 0 | send_inflight = close_socket(self); |
301 | 0 | else if (self->is_tcp) |
302 | 0 | send_inflight = self->tcp.recvbuf_detached->size != 0; |
303 | |
|
304 | 0 | if (h2o_timer_is_linked(&self->timeout)) |
305 | 0 | h2o_timer_unlink(&self->timeout); |
306 | | |
307 | | /* immediately notify read-close if necessary, setting up delayed task to for destroying other items; the timer is reset if |
308 | | * `h2o_send` indirectly invokes `dispose_generator`. */ |
309 | 0 | if (!self->read_closed && !send_inflight) { |
310 | 0 | h2o_timer_link(get_loop(self), 0, &self->timeout); |
311 | 0 | self->read_closed = 1; |
312 | 0 | h2o_send(self->src_req, NULL, 0, H2O_SEND_STATE_FINAL); |
313 | 0 | return; |
314 | 0 | } |
315 | | |
316 | | /* notify write-close if necessary; see the comment above regarding the use of the timer */ |
317 | 0 | if (!self->write_closed && self->is_tcp && self->tcp.sendbuf->size != 0) { |
318 | 0 | self->write_closed = 1; |
319 | 0 | h2o_timer_link(get_loop(self), 0, &self->timeout); |
320 | 0 | self->src_req->proceed_req(self->src_req, h2o_httpclient_error_io /* TODO notify as cancel? */); |
321 | 0 | return; |
322 | 0 | } |
323 | 0 | } |
324 | | |
325 | | static void on_io_timeout(h2o_timer_t *timer) |
326 | 0 | { |
327 | 0 | struct st_connect_generator_t *self = H2O_STRUCT_FROM_MEMBER(struct st_connect_generator_t, timeout, timer); |
328 | 0 | H2O_PROBE_REQUEST0(CONNECT_IO_TIMEOUT, self->src_req); |
329 | 0 | close_readwrite(self); |
330 | 0 | } |
331 | | |
332 | | static void reset_io_timeout(struct st_connect_generator_t *self) |
333 | 0 | { |
334 | 0 | if (self->sock != NULL) { |
335 | 0 | h2o_timer_unlink(&self->timeout); |
336 | 0 | h2o_timer_link(get_loop(self), self->handler->config.io_timeout, &self->timeout); |
337 | 0 | } |
338 | 0 | } |
339 | | |
340 | | static void send_connect_error(struct st_connect_generator_t *self, int code, const char *msg, const char *errstr) |
341 | 0 | { |
342 | 0 | stop_eyeballs(self); |
343 | 0 | h2o_timer_unlink(&self->timeout); |
344 | |
|
345 | 0 | if (self->sock != NULL) { |
346 | 0 | h2o_socket_close(self->sock); |
347 | 0 | self->sock = NULL; |
348 | 0 | } |
349 | |
|
350 | 0 | h2o_send_error_generic(self->src_req, code, msg, errstr, H2O_SEND_ERROR_KEEP_HEADERS); |
351 | 0 | } |
352 | | |
353 | | static void on_connect_error(struct st_connect_generator_t *self, const char *errstr) |
354 | 0 | { |
355 | 0 | send_connect_error(self, 502, "Gateway Error", errstr); |
356 | 0 | } |
357 | | |
358 | | static void on_connect_timeout(h2o_timer_t *entry) |
359 | 0 | { |
360 | 0 | struct st_connect_generator_t *self = H2O_STRUCT_FROM_MEMBER(struct st_connect_generator_t, timeout, entry); |
361 | 0 | if (self->server_addresses.size > 0) { |
362 | 0 | record_error(self->handler, self->src_req, get_dest_addr(self), "connection_timeout", NULL, NULL); |
363 | 0 | } else { |
364 | 0 | record_error(self->handler, self->src_req, NULL, "dns_timeout", NULL, NULL); |
365 | 0 | } |
366 | 0 | on_connect_error(self, h2o_httpclient_error_io_timeout); |
367 | 0 | } |
368 | | |
369 | | static void set_last_error(struct st_connect_generator_t *self, enum error_class class, const char *str) |
370 | 0 | { |
371 | 0 | if (self->last_error.class <= class) { |
372 | 0 | self->last_error.class = class; |
373 | 0 | self->last_error.str = str; |
374 | 0 | } |
375 | 0 | } |
376 | | |
377 | | static void on_resolution_delay_timeout(h2o_timer_t *entry) |
378 | 0 | { |
379 | 0 | struct st_connect_generator_t *self = H2O_STRUCT_FROM_MEMBER(struct st_connect_generator_t, eyeball_delay, entry); |
380 | |
|
381 | 0 | assert(self->server_addresses.used == 0); |
382 | | |
383 | 0 | try_connect(self); |
384 | 0 | } |
385 | | |
386 | | static void on_connection_attempt_delay_timeout(h2o_timer_t *entry) |
387 | 0 | { |
388 | 0 | struct st_connect_generator_t *self = H2O_STRUCT_FROM_MEMBER(struct st_connect_generator_t, eyeball_delay, entry); |
389 | | |
390 | | /* If no more addresses are available, continue trying the current attempt until the connect_timeout expires. */ |
391 | 0 | if (self->server_addresses.used == self->server_addresses.size) |
392 | 0 | return; |
393 | | |
394 | | /* close current connection attempt and try next. */ |
395 | 0 | h2o_socket_close(self->sock); |
396 | 0 | self->sock = NULL; |
397 | 0 | try_connect(self); |
398 | 0 | } |
399 | | |
400 | | static int store_server_addresses(struct st_connect_generator_t *self, struct addrinfo *res) |
401 | 0 | { |
402 | 0 | size_t num_added = 0; |
403 | | |
404 | | /* copy first entries in the response; ordering of addresses being returned by `getaddrinfo` is respected, as ordinary clients |
405 | | * (incl. forward proxy) are not expected to distribute the load among the addresses being returned. */ |
406 | 0 | do { |
407 | 0 | assert(self->server_addresses.size < PTLS_ELEMENTSOF(self->server_addresses.list)); |
408 | 0 | if (h2o_connect_lookup_acl(self->handler->acl.entries, self->handler->acl.count, res->ai_addr)) { |
409 | 0 | struct st_server_address_t *dst = self->server_addresses.list + self->server_addresses.size++; |
410 | 0 | dst->sa = h2o_mem_alloc_pool_aligned(&self->src_req->pool, H2O_ALIGNOF(struct sockaddr), res->ai_addrlen); |
411 | 0 | memcpy(dst->sa, res->ai_addr, res->ai_addrlen); |
412 | 0 | dst->salen = res->ai_addrlen; |
413 | 0 | ++num_added; |
414 | 0 | } |
415 | 0 | } while ((res = res->ai_next) != NULL && num_added < MAX_ADDRESSES_PER_FAMILY); |
416 | | |
417 | 0 | return num_added != 0; |
418 | 0 | } |
419 | | |
420 | | static void on_getaddr(h2o_hostinfo_getaddr_req_t *getaddr_req, const char *errstr, struct addrinfo *res, void *_self) |
421 | 0 | { |
422 | 0 | struct st_connect_generator_t *self = _self; |
423 | 0 | if (getaddr_req == self->getaddr_req.v4) { |
424 | 0 | self->getaddr_req.v4 = NULL; |
425 | 0 | } else if (getaddr_req == self->getaddr_req.v6) { |
426 | 0 | self->getaddr_req.v6 = NULL; |
427 | 0 | } else { |
428 | 0 | h2o_fatal("unexpected getaddr_req"); |
429 | 0 | } |
430 | | |
431 | | /* Store addresses, or convert error to ACL denial. */ |
432 | 0 | if (errstr == NULL) { |
433 | 0 | if (self->is_tcp) { |
434 | 0 | assert(res->ai_socktype == SOCK_STREAM); |
435 | 0 | } else { |
436 | 0 | assert(res->ai_socktype == SOCK_DGRAM); |
437 | 0 | } |
438 | 0 | assert(res != NULL && "upon successful return, getaddrinfo shall return at least one address (RFC 3493 Section 6.1)"); |
439 | 0 | if (!store_server_addresses(self, res)) |
440 | 0 | set_last_error(self, ERROR_CLASS_ACCESS_PROHIBITED, "destination_ip_prohibited"); |
441 | 0 | } else { |
442 | 0 | set_last_error(self, ERROR_CLASS_NAME_RESOLUTION, errstr); |
443 | 0 | } |
444 | | |
445 | 0 | if (self->getaddr_req.v4 == NULL) { |
446 | | /* If v6 lookup is still running, that means that v4 lookup has *just* completed. Set the resolution delay timer if v4 |
447 | | * addresses are available. */ |
448 | 0 | if (self->getaddr_req.v6 != NULL) { |
449 | 0 | assert(self->server_addresses.used == 0); |
450 | 0 | if (self->server_addresses.size != 0) { |
451 | 0 | self->eyeball_delay.cb = on_resolution_delay_timeout; |
452 | 0 | h2o_timer_link(get_loop(self), self->handler->config.happy_eyeballs.name_resolution_delay, &self->eyeball_delay); |
453 | 0 | } |
454 | 0 | return; |
455 | 0 | } |
456 | | |
457 | | /* Both v4 and v6 lookups are complete. If the resolution delay timer is running. Reset it. */ |
458 | 0 | if (h2o_timer_is_linked(&self->eyeball_delay) && self->eyeball_delay.cb == on_resolution_delay_timeout) { |
459 | 0 | assert(self->server_addresses.used == 0); |
460 | 0 | h2o_timer_unlink(&self->eyeball_delay); |
461 | 0 | } |
462 | | /* In case no addresses are available, send HTTP error. */ |
463 | 0 | if (self->server_addresses.size == 0) { |
464 | 0 | if (self->last_error.class == ERROR_CLASS_ACCESS_PROHIBITED) { |
465 | 0 | record_error(self->handler, self->src_req, NULL, self->last_error.str, NULL, NULL); |
466 | 0 | send_connect_error(self, 403, "Destination IP Prohibited", "Destination IP Prohibited"); |
467 | 0 | } else { |
468 | 0 | const char *rcode; |
469 | 0 | if (self->last_error.str == h2o_hostinfo_error_nxdomain) { |
470 | 0 | rcode = "NXDOMAIN"; |
471 | 0 | } else if (self->last_error.str == h2o_hostinfo_error_nodata) { |
472 | 0 | rcode = "NODATA"; |
473 | 0 | } else if (self->last_error.str == h2o_hostinfo_error_refused) { |
474 | 0 | rcode = "REFUSED"; |
475 | 0 | } else if (self->last_error.str == h2o_hostinfo_error_servfail) { |
476 | 0 | rcode = "SERVFAIL"; |
477 | 0 | } else { |
478 | 0 | rcode = NULL; |
479 | 0 | } |
480 | 0 | record_error(self->handler, self->src_req, NULL, "dns_error", self->last_error.str, rcode); |
481 | 0 | on_connect_error(self, self->last_error.str); |
482 | 0 | } |
483 | 0 | return; |
484 | 0 | } |
485 | 0 | } |
486 | | |
487 | | /* If the connection attempt has been under way for more than CONNECTION_ATTEMPT_DELAY_MS and the lookup that just completed |
488 | | * gave us a new address to try, then stop that connection attempt and start a new connection attempt using the new address. |
489 | | * |
490 | | * If the connection attempt has been under way for less than that, then do nothing for now. Eventually, either the timeout |
491 | | * will expire or the connection attempt will complete. |
492 | | * |
493 | | * If the connection attempt is under way but the lookup has not provided us any new address to try, then do nothing for now, |
494 | | * and wait for the connection attempt to complete. */ |
495 | 0 | if (self->sock != NULL) { |
496 | 0 | if (h2o_timer_is_linked(&self->eyeball_delay)) |
497 | 0 | return; |
498 | 0 | if (self->server_addresses.used == self->server_addresses.size) |
499 | 0 | return; |
500 | 0 | h2o_socket_close(self->sock); |
501 | 0 | self->sock = NULL; |
502 | 0 | } |
503 | 0 | try_connect(self); |
504 | 0 | } |
505 | | |
506 | | static struct st_server_address_t *pick_and_swap(struct st_connect_generator_t *self, size_t idx) |
507 | 0 | { |
508 | 0 | struct st_server_address_t *server_address = NULL; |
509 | |
|
510 | 0 | if (idx != self->server_addresses.used) { |
511 | 0 | struct st_server_address_t swap = self->server_addresses.list[idx]; |
512 | 0 | self->server_addresses.list[idx] = self->server_addresses.list[self->server_addresses.used]; |
513 | 0 | self->server_addresses.list[self->server_addresses.used] = swap; |
514 | 0 | } |
515 | 0 | server_address = &self->server_addresses.list[self->server_addresses.used]; |
516 | 0 | self->server_addresses.used++; |
517 | 0 | self->pick_v4 = !self->pick_v4; |
518 | 0 | return server_address; |
519 | 0 | } |
520 | | |
521 | | static struct st_server_address_t *get_next_server_address_for_connect(struct st_connect_generator_t *self) |
522 | 0 | { |
523 | 0 | struct st_server_address_t *server_address = NULL; |
524 | | |
525 | | /* Fetch the next address from the list of resolved addresses. */ |
526 | 0 | for (size_t i = self->server_addresses.used; i < self->server_addresses.size; i++) { |
527 | 0 | if (self->pick_v4 && self->server_addresses.list[i].sa->sa_family == AF_INET) { |
528 | 0 | server_address = pick_and_swap(self, i); |
529 | 0 | break; |
530 | 0 | } else if (!self->pick_v4 && self->server_addresses.list[i].sa->sa_family == AF_INET6) { |
531 | 0 | server_address = pick_and_swap(self, i); |
532 | 0 | break; |
533 | 0 | } |
534 | 0 | } |
535 | | |
536 | | /* If address of the preferred address family is not available, select one of the other family, if available. Otherwise, |
537 | | * send an HTTP error response or wait for address resolution. */ |
538 | 0 | if (server_address == NULL && self->server_addresses.used < self->server_addresses.size) { |
539 | 0 | server_address = &self->server_addresses.list[self->server_addresses.used]; |
540 | 0 | self->server_addresses.used++; |
541 | 0 | } |
542 | |
|
543 | 0 | return server_address; |
544 | 0 | } |
545 | | |
546 | | static void try_connect(struct st_connect_generator_t *self) |
547 | 0 | { |
548 | 0 | struct st_server_address_t *server_address; |
549 | |
|
550 | 0 | do { |
551 | 0 | server_address = get_next_server_address_for_connect(self); |
552 | 0 | if (server_address == NULL) { |
553 | | /* If address an is not available, send an HTTP error response or wait for address resolution. */ |
554 | 0 | if (self->getaddr_req.v4 == NULL && self->getaddr_req.v6 == NULL) { |
555 | | /* No pending address resolution, send error response. */ |
556 | 0 | assert(self->last_error.class == ERROR_CLASS_CONNECT); |
557 | 0 | record_socket_error(self, self->last_error.str); |
558 | 0 | on_connect_error(self, self->last_error.str); |
559 | 0 | } |
560 | 0 | return; |
561 | 0 | } |
562 | | |
563 | | /* Connect. Retry if the connect function returns error immediately. */ |
564 | 0 | } while (!(self->is_tcp ? tcp_start_connect : udp_connect)(self, server_address)); |
565 | 0 | } |
566 | | |
567 | | static void tcp_on_write_complete(h2o_socket_t *_sock, const char *err) |
568 | 0 | { |
569 | 0 | struct st_connect_generator_t *self = _sock->data; |
570 | |
|
571 | 0 | if (err != NULL) { |
572 | 0 | H2O_PROBE_REQUEST(CONNECT_TCP_WRITE_ERROR, self->src_req, err); |
573 | 0 | } |
574 | | |
575 | | /* until h2o_socket_t implements shutdown(SHUT_WR), do a bidirectional close when we close the write-side */ |
576 | 0 | if (err != NULL || self->write_closed) { |
577 | 0 | close_readwrite(self); |
578 | 0 | return; |
579 | 0 | } |
580 | | |
581 | 0 | reset_io_timeout(self); |
582 | |
|
583 | 0 | h2o_buffer_consume(&self->tcp.sendbuf, self->tcp.sendbuf->size); |
584 | 0 | self->src_req->proceed_req(self->src_req, NULL); |
585 | 0 | } |
586 | | |
587 | | static void tcp_do_write(struct st_connect_generator_t *self) |
588 | 0 | { |
589 | 0 | reset_io_timeout(self); |
590 | |
|
591 | 0 | h2o_iovec_t vec = h2o_iovec_init(self->tcp.sendbuf->bytes, self->tcp.sendbuf->size); |
592 | 0 | H2O_PROBE_REQUEST(CONNECT_TCP_WRITE, self->src_req, vec.len); |
593 | 0 | h2o_socket_write(self->sock, &vec, 1, tcp_on_write_complete); |
594 | 0 | } |
595 | | |
596 | | static int tcp_write(void *_self, int is_end_stream) |
597 | 0 | { |
598 | 0 | struct st_connect_generator_t *self = _self; |
599 | 0 | h2o_iovec_t chunk = self->src_req->entity; |
600 | |
|
601 | 0 | assert(!self->write_closed); |
602 | 0 | assert(self->tcp.sendbuf->size == 0); |
603 | | |
604 | | /* the socket might have been closed due to a read error */ |
605 | 0 | if (self->socket_closed) |
606 | 0 | return 1; |
607 | | |
608 | 0 | assert(self->sock != NULL && "write_req called before proceed_req is called?"); |
609 | | |
610 | | /* buffer input */ |
611 | 0 | h2o_buffer_append(&self->tcp.sendbuf, chunk.base, chunk.len); |
612 | 0 | if (is_end_stream) |
613 | 0 | self->write_closed = 1; |
614 | | |
615 | | /* write if the socket has been opened */ |
616 | 0 | if (self->sock != NULL && !h2o_socket_is_writing(self->sock)) |
617 | 0 | tcp_do_write(self); |
618 | |
|
619 | 0 | return 0; |
620 | 0 | } |
621 | | |
622 | | static void tcp_on_read(h2o_socket_t *_sock, const char *err) |
623 | 0 | { |
624 | 0 | struct st_connect_generator_t *self = _sock->data; |
625 | |
|
626 | 0 | h2o_socket_read_stop(self->sock); |
627 | 0 | h2o_timer_unlink(&self->timeout); |
628 | |
|
629 | 0 | if (err == NULL) { |
630 | 0 | h2o_iovec_t vec = h2o_iovec_init(self->sock->input->bytes, self->sock->input->size); |
631 | 0 | H2O_PROBE_REQUEST(CONNECT_TCP_READ, self->src_req, vec.len); |
632 | 0 | h2o_send(self->src_req, &vec, 1, H2O_SEND_STATE_IN_PROGRESS); |
633 | 0 | } else { |
634 | 0 | H2O_PROBE_REQUEST(CONNECT_TCP_READ_ERROR, self->src_req, err); |
635 | | /* unidirectional close is signalled using H2O_SEND_STATE_FINAL, but the write side remains open */ |
636 | 0 | self->read_closed = 1; |
637 | 0 | h2o_send(self->src_req, NULL, 0, H2O_SEND_STATE_FINAL); |
638 | 0 | } |
639 | 0 | } |
640 | | |
641 | | static void tcp_on_proceed(h2o_generator_t *_self, h2o_req_t *req) |
642 | 0 | { |
643 | 0 | struct st_connect_generator_t *self = H2O_STRUCT_FROM_MEMBER(struct st_connect_generator_t, super, _self); |
644 | |
|
645 | 0 | assert(!self->read_closed); |
646 | | |
647 | 0 | if (self->sock != NULL) { |
648 | 0 | h2o_buffer_consume(&self->sock->input, self->sock->input->size); |
649 | 0 | reset_io_timeout(self); |
650 | 0 | h2o_socket_read_start(self->sock, tcp_on_read); |
651 | 0 | } else { |
652 | 0 | self->read_closed = 1; |
653 | 0 | h2o_send(self->src_req, NULL, 0, H2O_SEND_STATE_FINAL); |
654 | 0 | } |
655 | 0 | } |
656 | | |
657 | | static void tcp_on_connect(h2o_socket_t *_sock, const char *err) |
658 | 0 | { |
659 | 0 | struct st_connect_generator_t *self = _sock->data; |
660 | |
|
661 | 0 | assert(self->sock == _sock); |
662 | | |
663 | 0 | if (err != NULL) { |
664 | 0 | set_last_error(self, ERROR_CLASS_CONNECT, err); |
665 | 0 | h2o_socket_close(self->sock); |
666 | 0 | self->sock = NULL; |
667 | 0 | try_connect(self); |
668 | 0 | return; |
669 | 0 | } |
670 | | |
671 | 0 | stop_eyeballs(self); |
672 | 0 | self->timeout.cb = on_io_timeout; |
673 | 0 | reset_io_timeout(self); |
674 | | |
675 | | /* Start write. Once write is complete (or if there is nothing to write), `proceed_req` will be called or the socket would be |
676 | | * closed if `write_closed` is set. */ |
677 | 0 | self->src_req->write_req.cb(self, self->no_req_streaming); |
678 | |
|
679 | 0 | record_connect_success(self); |
680 | | |
681 | | /* build and submit 200 response */ |
682 | 0 | self->src_req->res.status = 200; |
683 | 0 | h2o_start_response(self->src_req, &self->super); |
684 | 0 | h2o_send(self->src_req, NULL, 0, H2O_SEND_STATE_IN_PROGRESS); |
685 | 0 | } |
686 | | |
687 | | static int tcp_start_connect(struct st_connect_generator_t *self, struct st_server_address_t *server_address) |
688 | 0 | { |
689 | 0 | H2O_PROBE_REQUEST(CONNECT_TCP_START, self->src_req, server_address->sa); |
690 | |
|
691 | 0 | const char *errstr; |
692 | 0 | if ((self->sock = h2o_socket_connect(get_loop(self), server_address->sa, server_address->salen, tcp_on_connect, &errstr)) == |
693 | 0 | NULL) { |
694 | 0 | set_last_error(self, ERROR_CLASS_CONNECT, errstr); |
695 | 0 | return 0; |
696 | 0 | } |
697 | | |
698 | 0 | self->sock->data = self; |
699 | 0 | #if !H2O_USE_LIBUV |
700 | | /* This is the maximum amount of data that will be buffered within userspace. It is hard-coded to 64KB to balance throughput |
701 | | * and latency, and because we do not expect the need to change the value. */ |
702 | 0 | h2o_evloop_socket_set_max_read_size(self->sock, 64 * 1024); |
703 | 0 | #endif |
704 | 0 | self->eyeball_delay.cb = on_connection_attempt_delay_timeout; |
705 | 0 | h2o_timer_link(get_loop(self), self->handler->config.happy_eyeballs.connection_attempt_delay, &self->eyeball_delay); |
706 | |
|
707 | 0 | return 1; |
708 | 0 | } |
709 | | |
710 | | static h2o_iovec_t udp_get_next_chunk(const char *start, size_t len, size_t *to_consume, int *skip) |
711 | 0 | { |
712 | 0 | const uint8_t *bytes = (const uint8_t *)start; |
713 | 0 | const uint8_t *end = bytes + len; |
714 | 0 | uint64_t chunk_type, chunk_length; |
715 | |
|
716 | 0 | chunk_type = ptls_decode_quicint(&bytes, end); |
717 | 0 | if (chunk_type == UINT64_MAX) |
718 | 0 | return h2o_iovec_init(NULL, 0); |
719 | 0 | chunk_length = ptls_decode_quicint(&bytes, end); |
720 | 0 | if (chunk_length == UINT64_MAX) |
721 | 0 | return h2o_iovec_init(NULL, 0); |
722 | | |
723 | | /* chunk is incomplete */ |
724 | 0 | if (end - bytes < chunk_length) |
725 | 0 | return h2o_iovec_init(NULL, 0); |
726 | | |
727 | | /* |
728 | | * https://tools.ietf.org/html/draft-ietf-masque-connect-udp-03#section-6 |
729 | | * CONNECT-UDP Stream Chunks can be used to convey UDP payloads, by |
730 | | * using a CONNECT-UDP Stream Chunk Type of UDP_PACKET (value 0x00). |
731 | | */ |
732 | 0 | *skip = chunk_type != 0; |
733 | 0 | *to_consume = (bytes + chunk_length) - (const uint8_t *)start; |
734 | |
|
735 | 0 | return h2o_iovec_init(bytes, chunk_length); |
736 | 0 | } |
737 | | |
738 | | static void udp_write_core(struct st_connect_generator_t *self, h2o_iovec_t datagram) |
739 | 0 | { |
740 | 0 | const uint8_t *src = (const uint8_t *)datagram.base, *end = src + datagram.len; |
741 | | |
742 | | /* When using RFC 9298, the payload starts with a Context ID; drop anything other than UDP packets. |
743 | | * TODO: propagate error when decoding fails? */ |
744 | 0 | if (!self->udp.is_draft03 && (ptls_decode_quicint(&src, end)) != 0) |
745 | 0 | return; |
746 | | |
747 | 0 | H2O_PROBE_REQUEST(CONNECT_UDP_WRITE, self->src_req, end - src); |
748 | 0 | while (send(h2o_socket_get_fd(self->sock), src, end - src, 0) == -1 && errno == EINTR) |
749 | 0 | ; |
750 | 0 | } |
751 | | |
752 | | static void udp_write_stream_complete_delayed(h2o_timer_t *_timer) |
753 | 0 | { |
754 | 0 | struct st_connect_generator_t *self = H2O_STRUCT_FROM_MEMBER(struct st_connect_generator_t, udp.egress.delayed, _timer); |
755 | |
|
756 | 0 | if (self->write_closed) { |
757 | 0 | close_readwrite(self); |
758 | 0 | return; |
759 | 0 | } |
760 | | |
761 | 0 | self->src_req->proceed_req(self->src_req, NULL); |
762 | 0 | } |
763 | | |
764 | | static void udp_do_write_stream(struct st_connect_generator_t *self, h2o_iovec_t chunk) |
765 | 0 | { |
766 | 0 | int from_buf = 0; |
767 | 0 | size_t off = 0; |
768 | |
|
769 | 0 | reset_io_timeout(self); |
770 | |
|
771 | 0 | if (self->udp.egress.buf->size != 0) { |
772 | 0 | from_buf = 1; |
773 | 0 | if (chunk.len != 0) |
774 | 0 | h2o_buffer_append(&self->udp.egress.buf, chunk.base, chunk.len); |
775 | 0 | chunk.base = self->udp.egress.buf->bytes; |
776 | 0 | chunk.len = self->udp.egress.buf->size; |
777 | 0 | } |
778 | 0 | do { |
779 | 0 | int skip = 0; |
780 | 0 | size_t to_consume; |
781 | 0 | h2o_iovec_t datagram = udp_get_next_chunk(chunk.base + off, chunk.len - off, &to_consume, &skip); |
782 | 0 | if (datagram.base == NULL) |
783 | 0 | break; |
784 | 0 | if (!skip) |
785 | 0 | udp_write_core(self, datagram); |
786 | 0 | off += to_consume; |
787 | 0 | } while (1); |
788 | |
|
789 | 0 | if (from_buf) { |
790 | 0 | h2o_buffer_consume(&self->udp.egress.buf, off); |
791 | 0 | } else if (chunk.len != off) { |
792 | 0 | h2o_buffer_append(&self->udp.egress.buf, chunk.base + off, chunk.len - off); |
793 | 0 | } |
794 | |
|
795 | 0 | h2o_timer_link(get_loop(self), 0, &self->udp.egress.delayed); |
796 | 0 | } |
797 | | |
798 | | static int udp_write_stream(void *_self, int is_end_stream) |
799 | 0 | { |
800 | 0 | struct st_connect_generator_t *self = _self; |
801 | 0 | h2o_iovec_t chunk = self->src_req->entity; |
802 | |
|
803 | 0 | assert(!self->write_closed); |
804 | | |
805 | | /* the socket might have been closed tue to a read error */ |
806 | 0 | if (self->socket_closed) |
807 | 0 | return 1; |
808 | | |
809 | 0 | assert(self->sock != NULL && "write_req called before proceed_req is called?"); |
810 | | |
811 | 0 | if (is_end_stream) |
812 | 0 | self->write_closed = 1; |
813 | | |
814 | | /* if the socket is not yet open, buffer input and return */ |
815 | 0 | if (self->sock == NULL) { |
816 | 0 | h2o_buffer_append(&self->udp.egress.buf, chunk.base, chunk.len); |
817 | 0 | return 0; |
818 | 0 | } |
819 | | |
820 | 0 | udp_do_write_stream(self, chunk); |
821 | 0 | return 0; |
822 | 0 | } |
823 | | |
824 | | static void udp_write_datagrams(h2o_req_t *_req, h2o_iovec_t *datagrams, size_t num_datagrams) |
825 | 0 | { |
826 | 0 | struct st_connect_generator_t *self = _req->write_req.ctx; |
827 | |
|
828 | 0 | reset_io_timeout(self); |
829 | |
|
830 | 0 | for (size_t i = 0; i != num_datagrams; ++i) |
831 | 0 | udp_write_core(self, datagrams[i]); |
832 | 0 | } |
833 | | |
834 | | static void udp_on_read(h2o_socket_t *_sock, const char *err) |
835 | 0 | { |
836 | 0 | struct st_connect_generator_t *self = _sock->data; |
837 | 0 | h2o_iovec_t payload = |
838 | 0 | h2o_iovec_init(self->udp.ingress.buf + UDP_CHUNK_OVERHEAD, sizeof(self->udp.ingress.buf) - UDP_CHUNK_OVERHEAD); |
839 | |
|
840 | 0 | if (err != NULL) { |
841 | 0 | close_readwrite(self); |
842 | 0 | return; |
843 | 0 | } |
844 | | |
845 | 0 | { /* read UDP packet, or return */ |
846 | 0 | ssize_t rret; |
847 | 0 | while ((rret = recv(h2o_socket_get_fd(self->sock), payload.base, payload.len, 0)) == -1 && errno == EINTR) |
848 | 0 | ; |
849 | 0 | if (rret == -1) |
850 | 0 | return; |
851 | 0 | payload.len = rret; |
852 | 0 | } |
853 | 0 | H2O_PROBE_REQUEST(CONNECT_UDP_READ, self->src_req, payload.len); |
854 | | |
855 | | /* prepend Context ID (of zero, indicating UDP packet) if RFC 9298 */ |
856 | 0 | if (!self->udp.is_draft03) { |
857 | 0 | *--payload.base = 0; |
858 | 0 | payload.len += 1; |
859 | 0 | } |
860 | | |
861 | | /* forward UDP datagram as is; note that it might be zero-sized */ |
862 | 0 | if (self->src_req->forward_datagram.read_ != NULL) { |
863 | 0 | self->src_req->forward_datagram.read_(self->src_req, &payload, 1); |
864 | 0 | } else { |
865 | 0 | h2o_socket_read_stop(self->sock); |
866 | 0 | h2o_timer_unlink(&self->timeout); |
867 | 0 | { /* prepend Datagram Capsule length */ |
868 | 0 | uint8_t length_buf[8]; |
869 | 0 | size_t length_len = quicly_encodev(length_buf, payload.len) - length_buf; |
870 | 0 | memcpy(payload.base - length_len, length_buf, length_len); |
871 | 0 | payload.base -= length_len; |
872 | 0 | payload.len += length_len; |
873 | 0 | } |
874 | | /* prepend Datagram Capsule Type */ |
875 | 0 | *--payload.base = 0; |
876 | 0 | payload.len += 1; |
877 | 0 | assert(payload.base >= self->udp.ingress.buf); |
878 | 0 | h2o_send(self->src_req, &payload, 1, H2O_SEND_STATE_IN_PROGRESS); |
879 | 0 | } |
880 | 0 | } |
881 | | |
882 | | static void udp_on_proceed(h2o_generator_t *_self, h2o_req_t *req) |
883 | 0 | { |
884 | 0 | struct st_connect_generator_t *self = H2O_STRUCT_FROM_MEMBER(struct st_connect_generator_t, super, _self); |
885 | |
|
886 | 0 | if (self->sock != NULL) { |
887 | 0 | h2o_buffer_consume(&self->sock->input, self->sock->input->size); |
888 | 0 | reset_io_timeout(self); |
889 | 0 | h2o_socket_read_start(self->sock, udp_on_read); |
890 | 0 | } else { |
891 | 0 | self->read_closed = 1; |
892 | 0 | h2o_send(self->src_req, NULL, 0, H2O_SEND_STATE_FINAL); |
893 | 0 | } |
894 | 0 | } |
895 | | |
896 | | static int udp_connect(struct st_connect_generator_t *self, struct st_server_address_t *server_address) |
897 | 0 | { |
898 | 0 | int fd; |
899 | |
|
900 | 0 | assert(self->udp.egress.buf->size == 0); /* the handler does not call `proceed_req` until the connection becomes ready */ |
901 | | |
902 | 0 | H2O_PROBE_REQUEST(CONNECT_UDP_START, self->src_req, server_address->sa); |
903 | | /* connect */ |
904 | 0 | if ((fd = socket(server_address->sa->sa_family, SOCK_DGRAM, 0)) == -1 || |
905 | 0 | connect(fd, server_address->sa, server_address->salen) != 0) { |
906 | 0 | const char *err = h2o_socket_error_conn_fail; |
907 | 0 | if (fd != -1) { |
908 | 0 | err = h2o_socket_get_error_string(errno, err); |
909 | 0 | close(fd); |
910 | 0 | } |
911 | 0 | set_last_error(self, ERROR_CLASS_CONNECT, err); |
912 | 0 | return 0; |
913 | 0 | } |
914 | | |
915 | 0 | stop_eyeballs(self); |
916 | 0 | self->timeout.cb = on_io_timeout; |
917 | 0 | reset_io_timeout(self); |
918 | | |
919 | | /* setup, initiating transfer of early data */ |
920 | | #if H2O_USE_LIBUV |
921 | | self->sock = h2o_uv__poll_create(get_loop(self), fd, (uv_close_cb)free); |
922 | | #else |
923 | 0 | self->sock = h2o_evloop_socket_create(get_loop(self), fd, H2O_SOCKET_FLAG_DONT_READ); |
924 | 0 | #endif |
925 | 0 | assert(self->sock != NULL); |
926 | 0 | self->sock->data = self; |
927 | 0 | self->src_req->write_req.cb = udp_write_stream; |
928 | 0 | self->src_req->forward_datagram.write_ = udp_write_datagrams; |
929 | 0 | self->src_req->write_req.ctx = self; |
930 | |
|
931 | 0 | record_connect_success(self); |
932 | | |
933 | | /* build and submit success */ |
934 | 0 | if (self->src_req->version < 0x200 && !self->udp.is_draft03) { |
935 | 0 | assert(self->src_req->upgrade.base != NULL); |
936 | 0 | self->src_req->res.status = 101; |
937 | 0 | self->src_req->res.reason = "Switching Protocols"; |
938 | 0 | h2o_add_header(&self->src_req->pool, &self->src_req->res.headers, H2O_TOKEN_UPGRADE, NULL, H2O_STRLIT("connect-udp")); |
939 | 0 | } else { |
940 | 0 | self->src_req->res.status = 200; |
941 | 0 | } |
942 | 0 | if (!self->udp.is_draft03) |
943 | 0 | h2o_add_header_by_str(&self->src_req->pool, &self->src_req->res.headers, H2O_STRLIT("capsule-protocol"), 0, NULL, |
944 | 0 | H2O_STRLIT("?1")); |
945 | 0 | h2o_start_response(self->src_req, &self->super); |
946 | 0 | h2o_send(self->src_req, NULL, 0, H2O_SEND_STATE_IN_PROGRESS); |
947 | | |
948 | | /* write any data if provided, or just call the proceed_req callback */ |
949 | 0 | self->src_req->write_req.cb(self, self->no_req_streaming); |
950 | |
|
951 | 0 | return 1; |
952 | 0 | } |
953 | | |
954 | | static void on_stop(h2o_generator_t *_self, h2o_req_t *req) |
955 | 0 | { |
956 | 0 | struct st_connect_generator_t *self = H2O_STRUCT_FROM_MEMBER(struct st_connect_generator_t, super, _self); |
957 | 0 | dispose_generator(self); |
958 | 0 | } |
959 | | |
960 | | static void on_generator_dispose(void *_self) |
961 | 0 | { |
962 | 0 | struct st_connect_generator_t *self = _self; |
963 | 0 | H2O_PROBE_REQUEST0(CONNECT_DISPOSE, self->src_req); |
964 | 0 | dispose_generator(self); |
965 | 0 | } |
966 | | |
967 | | /** |
968 | | * expects "/host/port/" as input, where the preceding slash is optional |
969 | | */ |
970 | | static int masque_decode_hostport(h2o_mem_pool_t *pool, const char *_src, size_t _len, h2o_iovec_t *host, uint16_t *port) |
971 | 0 | { |
972 | 0 | char *src = (char *)_src; /* h2o_strtosizefwd takes non-const arg, so ... */ |
973 | 0 | const char *end = src + _len; |
974 | |
|
975 | 0 | if (src < end && src[0] == '/') |
976 | 0 | ++src; |
977 | |
|
978 | 0 | { /* extract host */ |
979 | 0 | size_t host_len; |
980 | 0 | if ((host_len = h2o_strstr(src, end - src, H2O_STRLIT("/"))) == SIZE_MAX || host_len == 0) |
981 | 0 | return 0; |
982 | 0 | if ((*host = h2o_uri_unescape(pool, src, host_len)).base == NULL) |
983 | 0 | return 0; |
984 | 0 | src += host_len + 1; |
985 | 0 | } |
986 | | |
987 | 0 | { /* parse port */ |
988 | 0 | size_t v; |
989 | 0 | if ((v = h2o_strtosizefwd(&src, end - src)) >= 65535) |
990 | 0 | return 0; |
991 | 0 | if (src == end || *src != '/') |
992 | 0 | return 0; |
993 | 0 | *port = (uint16_t)v; |
994 | 0 | } |
995 | | |
996 | 0 | return 1; |
997 | 0 | } |
998 | | |
999 | | static int on_req_core(struct st_connect_handler_t *handler, h2o_req_t *req, h2o_iovec_t host, uint16_t port, int is_tcp, |
1000 | | int is_masque_draft03) |
1001 | 0 | { |
1002 | 0 | struct st_connect_generator_t *self; |
1003 | 0 | size_t sizeof_self = offsetof(struct st_connect_generator_t, tcp) + (is_tcp ? sizeof(self->tcp) : sizeof(self->udp)); |
1004 | 0 | self = h2o_mem_alloc_shared(&req->pool, sizeof_self, on_generator_dispose); |
1005 | 0 | memset(self, 0, sizeof_self); |
1006 | 0 | self->super.stop = on_stop; |
1007 | 0 | self->handler = handler; |
1008 | 0 | self->src_req = req; |
1009 | 0 | self->timeout.cb = on_connect_timeout; |
1010 | 0 | if (is_tcp) { |
1011 | 0 | self->is_tcp = 1; |
1012 | 0 | self->super.proceed = tcp_on_proceed; |
1013 | 0 | h2o_buffer_init(&self->tcp.sendbuf, &h2o_socket_buffer_prototype); |
1014 | 0 | } else { |
1015 | 0 | self->super.proceed = udp_on_proceed; |
1016 | 0 | h2o_buffer_init(&self->udp.egress.buf, &h2o_socket_buffer_prototype); |
1017 | 0 | self->udp.egress.delayed = (h2o_timer_t){.cb = udp_write_stream_complete_delayed}; |
1018 | 0 | self->udp.is_draft03 = is_masque_draft03; |
1019 | 0 | } |
1020 | 0 | h2o_timer_link(get_loop(self), handler->config.connect_timeout, &self->timeout); |
1021 | | |
1022 | | /* setup write_req now, so that the protocol handler would not provide additional data until we call `proceed_req` */ |
1023 | 0 | assert(req->entity.base != NULL && "CONNECT must indicate existence of payload"); |
1024 | 0 | self->src_req->write_req.cb = is_tcp ? tcp_write : udp_write_stream; |
1025 | 0 | self->src_req->write_req.ctx = self; |
1026 | 0 | if (self->src_req->proceed_req == NULL) |
1027 | 0 | self->no_req_streaming = 1; |
1028 | |
|
1029 | 0 | char port_str[sizeof(H2O_UINT16_LONGEST_STR)]; |
1030 | 0 | int port_strlen = sprintf(port_str, "%" PRIu16, port); |
1031 | |
|
1032 | 0 | self->getaddr_req.v6 = h2o_hostinfo_getaddr( |
1033 | 0 | &self->src_req->conn->ctx->receivers.hostinfo_getaddr, host, h2o_iovec_init(port_str, port_strlen), AF_INET6, |
1034 | 0 | is_tcp ? SOCK_STREAM : SOCK_DGRAM, is_tcp ? IPPROTO_TCP : IPPROTO_UDP, AI_ADDRCONFIG | AI_NUMERICSERV, on_getaddr, self); |
1035 | 0 | self->getaddr_req.v4 = h2o_hostinfo_getaddr( |
1036 | 0 | &self->src_req->conn->ctx->receivers.hostinfo_getaddr, host, h2o_iovec_init(port_str, port_strlen), AF_INET, |
1037 | 0 | is_tcp ? SOCK_STREAM : SOCK_DGRAM, is_tcp ? IPPROTO_TCP : IPPROTO_UDP, AI_ADDRCONFIG | AI_NUMERICSERV, on_getaddr, self); |
1038 | |
|
1039 | 0 | return 0; |
1040 | 0 | } |
1041 | | |
1042 | | static int on_req_classic_connect(h2o_handler_t *_handler, h2o_req_t *req) |
1043 | 0 | { |
1044 | 0 | struct st_connect_handler_t *handler = (void *)_handler; |
1045 | 0 | h2o_iovec_t host; |
1046 | 0 | uint16_t port; |
1047 | 0 | int is_tcp; |
1048 | |
|
1049 | 0 | if (req->upgrade.base != NULL) { |
1050 | 0 | return -1; |
1051 | 0 | } else if (h2o_memis(req->method.base, req->method.len, H2O_STRLIT("CONNECT"))) { |
1052 | | /* old-style CONNECT */ |
1053 | 0 | is_tcp = 1; |
1054 | 0 | } else if (h2o_memis(req->method.base, req->method.len, H2O_STRLIT("CONNECT-UDP"))) { |
1055 | | /* masque (draft 03); host and port are stored the same way as ordinary CONNECT |
1056 | | * TODO remove code once we drop support for draft-03 */ |
1057 | 0 | if (!handler->config.support_masque_draft_03) { |
1058 | 0 | h2o_send_error_405(req, "Method Not Allowed", "Method Not Allowed", H2O_SEND_ERROR_KEEP_HEADERS); |
1059 | 0 | return 0; |
1060 | 0 | } |
1061 | 0 | is_tcp = 0; |
1062 | 0 | } else { |
1063 | | /* it is not the task of this handler to handle non-CONNECT requests */ |
1064 | 0 | return -1; |
1065 | 0 | } |
1066 | | |
1067 | | /* parse host and port from authority, unless it is handled above in the case of extended connect */ |
1068 | 0 | if (h2o_url_parse_hostport(req->authority.base, req->authority.len, &host, &port) == NULL || port == 0 || port == 65535) { |
1069 | 0 | record_error(handler, req, NULL, "http_request_error", "invalid host:port", NULL); |
1070 | 0 | h2o_send_error_400(req, "Bad Request", "Bad Request", H2O_SEND_ERROR_KEEP_HEADERS); |
1071 | 0 | return 0; |
1072 | 0 | } |
1073 | | |
1074 | 0 | return on_req_core((void *)handler, req, host, port, is_tcp, 1); |
1075 | 0 | } |
1076 | | |
1077 | | /** |
1078 | | * handles RFC9298 requests |
1079 | | */ |
1080 | | static int on_req_connect_udp(h2o_handler_t *_handler, h2o_req_t *req) |
1081 | 0 | { |
1082 | 0 | struct st_connect_handler_t *handler = (void *)_handler; |
1083 | 0 | h2o_iovec_t host; |
1084 | 0 | uint16_t port; |
1085 | | |
1086 | | /* reject requests wo. upgrade: connect-udp */ |
1087 | 0 | if (!(req->upgrade.base != NULL && h2o_lcstris(req->upgrade.base, req->upgrade.len, H2O_STRLIT("connect-udp")))) |
1088 | 0 | return -1; |
1089 | | |
1090 | | /* check method */ |
1091 | 0 | if (!(req->version < 0x200 ? h2o_memis(req->method.base, req->method.len, H2O_STRLIT("GET")) |
1092 | 0 | : h2o_memis(req->method.base, req->method.len, H2O_STRLIT("CONNECT")))) |
1093 | 0 | return -1; |
1094 | | |
1095 | | /* masque (RFC 9298); parse host/port */ |
1096 | 0 | if (!masque_decode_hostport(&req->pool, req->path_normalized.base + req->pathconf->path.len, |
1097 | 0 | req->path_normalized.len - req->pathconf->path.len, &host, &port)) { |
1098 | 0 | record_error(handler, req, NULL, "http_request_error", "invalid URI", NULL); |
1099 | 0 | h2o_send_error_400(req, "Bad Request", "Bad Request", H2O_SEND_ERROR_KEEP_HEADERS); |
1100 | 0 | return 0; |
1101 | 0 | } |
1102 | | |
1103 | 0 | return on_req_core((void *)handler, req, host, port, 0, 0); |
1104 | 0 | } |
1105 | | |
1106 | | static void do_register(h2o_pathconf_t *pathconf, h2o_proxy_config_vars_t *config, h2o_connect_acl_entry_t *acl_entries, |
1107 | | size_t num_acl_entries, int (*on_req)(struct st_h2o_handler_t *self, h2o_req_t *req)) |
1108 | 0 | { |
1109 | 0 | assert(config->max_buffer_size != 0); |
1110 | | |
1111 | 0 | struct st_connect_handler_t *self = (void *)h2o_create_handler(pathconf, offsetof(struct st_connect_handler_t, acl.entries) + |
1112 | 0 | sizeof(*self->acl.entries) * num_acl_entries); |
1113 | |
|
1114 | 0 | self->super.on_req = on_req; |
1115 | 0 | self->super.supports_request_streaming = 1; |
1116 | 0 | self->config = *config; |
1117 | 0 | self->acl.count = num_acl_entries; |
1118 | 0 | memcpy(self->acl.entries, acl_entries, sizeof(self->acl.entries[0]) * num_acl_entries); |
1119 | 0 | } |
1120 | | |
1121 | | void h2o_connect_register(h2o_pathconf_t *pathconf, h2o_proxy_config_vars_t *config, h2o_connect_acl_entry_t *acl_entries, |
1122 | | size_t num_acl_entries) |
1123 | 0 | { |
1124 | 0 | do_register(pathconf, config, acl_entries, num_acl_entries, on_req_classic_connect); |
1125 | 0 | } |
1126 | | |
1127 | | void h2o_connect_udp_register(h2o_pathconf_t *pathconf, h2o_proxy_config_vars_t *config, h2o_connect_acl_entry_t *acl_entries, |
1128 | | size_t num_acl_entries) |
1129 | 0 | { |
1130 | 0 | do_register(pathconf, config, acl_entries, num_acl_entries, on_req_connect_udp); |
1131 | 0 | } |
1132 | | |
1133 | | const char *h2o_connect_parse_acl(h2o_connect_acl_entry_t *output, const char *input) |
1134 | 0 | { |
1135 | | /* type */ |
1136 | 0 | switch (input[0]) { |
1137 | 0 | case '+': |
1138 | 0 | output->allow_ = 1; |
1139 | 0 | break; |
1140 | 0 | case '-': |
1141 | 0 | output->allow_ = 0; |
1142 | 0 | break; |
1143 | 0 | default: |
1144 | 0 | return "ACL entry must begin with + or -"; |
1145 | 0 | } |
1146 | | |
1147 | | /* extract address, port */ |
1148 | 0 | h2o_iovec_t host_vec; |
1149 | 0 | uint16_t port; |
1150 | 0 | const char *slash_at; |
1151 | 0 | if ((slash_at = h2o_url_parse_hostport(input + 1, strlen(input + 1), &host_vec, &port)) == NULL) |
1152 | 0 | goto GenericParseError; |
1153 | 0 | char *host = alloca(host_vec.len + 1); |
1154 | 0 | memcpy(host, host_vec.base, host_vec.len); |
1155 | 0 | host[host_vec.len] = '\0'; |
1156 | | |
1157 | | /* parse netmask (or addr_mask is set to zero to indicate that mask was not specified) */ |
1158 | 0 | if (*slash_at != '\0') { |
1159 | 0 | if (*slash_at != '/') |
1160 | 0 | goto GenericParseError; |
1161 | 0 | if (sscanf(slash_at + 1, "%zu", &output->addr_mask) != 1 || output->addr_mask == 0) |
1162 | 0 | return "invalid address mask"; |
1163 | 0 | } else { |
1164 | 0 | output->addr_mask = 0; |
1165 | 0 | } |
1166 | | |
1167 | | /* parse address */ |
1168 | 0 | struct in_addr v4addr; |
1169 | 0 | struct in6_addr v6addr; |
1170 | 0 | if (strcmp(host, "*") == 0) { |
1171 | 0 | output->addr_family = H2O_CONNECT_ACL_ADDRESS_ANY; |
1172 | 0 | if (output->addr_mask != 0) |
1173 | 0 | return "wildcard address (*) cannot have a netmask"; |
1174 | 0 | } else if (inet_pton(AF_INET, host, &v4addr) == 1) { |
1175 | 0 | output->addr_family = H2O_CONNECT_ACL_ADDRESS_V4; |
1176 | 0 | if (output->addr_mask == 0) { |
1177 | 0 | output->addr_mask = 32; |
1178 | 0 | } else if (output->addr_mask > 32) { |
1179 | 0 | return "invalid address mask"; |
1180 | 0 | } |
1181 | 0 | output->addr.v4 = ntohl(v4addr.s_addr) & TO_BITMASK(uint32_t, output->addr_mask); |
1182 | 0 | } else if (inet_pton(AF_INET6, host, &v6addr) == 1) { |
1183 | 0 | output->addr_family = H2O_CONNECT_ACL_ADDRESS_V6; |
1184 | 0 | if (output->addr_mask == 0) { |
1185 | 0 | output->addr_mask = 128; |
1186 | 0 | } else if (output->addr_mask > 128) { |
1187 | 0 | return "invalid address mask"; |
1188 | 0 | } |
1189 | 0 | size_t i; |
1190 | 0 | for (i = 0; i < output->addr_mask / 8; ++i) |
1191 | 0 | output->addr.v6[i] = v6addr.s6_addr[i]; |
1192 | 0 | if (output->addr_mask % 8 != 0) |
1193 | 0 | output->addr.v6[i] = v6addr.s6_addr[i] & TO_BITMASK(uint8_t, output->addr_mask % 8); |
1194 | 0 | for (++i; i < PTLS_ELEMENTSOF(output->addr.v6); ++i) |
1195 | 0 | output->addr.v6[i] = 0; |
1196 | 0 | } else { |
1197 | 0 | return "failed to parse address"; |
1198 | 0 | } |
1199 | | |
1200 | | /* set port (for whatever reason, `h2o_url_parse_hostport` sets port to 65535 when not specified, convert that to zero) */ |
1201 | 0 | output->port = port == 65535 ? 0 : port; |
1202 | |
|
1203 | 0 | return NULL; |
1204 | | |
1205 | 0 | GenericParseError: |
1206 | 0 | return "failed to parse input, expected format is: [+-]address(?::port|)(?:/netmask|)"; |
1207 | 0 | } |
1208 | | |
1209 | | int h2o_connect_lookup_acl(h2o_connect_acl_entry_t *acl_entries, size_t num_acl_entries, struct sockaddr *target) |
1210 | 0 | { |
1211 | 0 | uint32_t target_v4addr = 0; |
1212 | 0 | uint16_t target_port; |
1213 | | |
1214 | | /* reject anything other than v4/v6, as well as converting the values to native format */ |
1215 | 0 | switch (target->sa_family) { |
1216 | 0 | case AF_INET: { |
1217 | 0 | struct sockaddr_in *sin = (void *)target; |
1218 | 0 | target_v4addr = ntohl(sin->sin_addr.s_addr); |
1219 | 0 | target_port = ntohs(sin->sin_port); |
1220 | 0 | } break; |
1221 | 0 | case AF_INET6: |
1222 | 0 | target_port = htons(((struct sockaddr_in6 *)target)->sin6_port); |
1223 | 0 | break; |
1224 | 0 | default: |
1225 | 0 | return 0; |
1226 | 0 | } |
1227 | | |
1228 | | /* check each ACL entry */ |
1229 | 0 | for (size_t i = 0; i != num_acl_entries; ++i) { |
1230 | 0 | h2o_connect_acl_entry_t *entry = acl_entries + i; |
1231 | | /* check port */ |
1232 | 0 | if (entry->port != 0 && entry->port != target_port) |
1233 | 0 | goto Next; |
1234 | | /* check address */ |
1235 | 0 | switch (entry->addr_family) { |
1236 | 0 | case H2O_CONNECT_ACL_ADDRESS_ANY: |
1237 | 0 | break; |
1238 | 0 | case H2O_CONNECT_ACL_ADDRESS_V4: { |
1239 | 0 | if (target->sa_family != AF_INET) |
1240 | 0 | goto Next; |
1241 | 0 | if (entry->addr.v4 != (target_v4addr & TO_BITMASK(uint32_t, entry->addr_mask))) |
1242 | 0 | goto Next; |
1243 | 0 | } break; |
1244 | 0 | case H2O_CONNECT_ACL_ADDRESS_V6: { |
1245 | 0 | if (target->sa_family != AF_INET6) |
1246 | 0 | continue; |
1247 | 0 | uint8_t *target_v6addr = ((struct sockaddr_in6 *)target)->sin6_addr.s6_addr; |
1248 | 0 | size_t i; |
1249 | 0 | for (i = 0; i < entry->addr_mask / 8; ++i) |
1250 | 0 | if (entry->addr.v6[i] != target_v6addr[i]) |
1251 | 0 | goto Next; |
1252 | 0 | if (entry->addr_mask % 8 != 0 && entry->addr.v6[i] != (target_v6addr[i] & TO_BITMASK(uint8_t, entry->addr_mask % 8))) |
1253 | 0 | goto Next; |
1254 | 0 | } break; |
1255 | 0 | } |
1256 | | /* match */ |
1257 | 0 | return entry->allow_; |
1258 | 0 | Next:; |
1259 | 0 | } |
1260 | | |
1261 | | /* default rule is deny */ |
1262 | 0 | return 0; |
1263 | 0 | } |