Coverage Report

Created: 2023-11-19 06:15

/src/h2o/lib/handler/connect.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 2021 Fastly Inc.
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a copy
5
 * of this software and associated documentation files (the "Software"), to
6
 * deal in the Software without restriction, including without limitation the
7
 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
8
 * sell copies of the Software, and to permit persons to whom the Software is
9
 * furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice shall be included in
12
 * all copies or substantial portions of the Software.
13
 *
14
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20
 * IN THE SOFTWARE.
21
 */
22
#include "h2o/hostinfo.h"
23
#include "h2o/memory.h"
24
#include "h2o/socket.h"
25
#include "h2o.h"
26
#include "../probes_.h"
27
28
0
#define MODULE_NAME "lib/handler/connect.c"
29
30
struct st_connect_handler_t {
31
    h2o_handler_t super;
32
    h2o_proxy_config_vars_t config;
33
    struct {
34
        size_t count;
35
        h2o_connect_acl_entry_t entries[0];
36
    } acl;
37
};
38
39
0
#define MAX_ADDRESSES_PER_FAMILY 4
40
0
#define UDP_CHUNK_OVERHEAD 3
41
42
struct st_server_address_t {
43
    struct sockaddr *sa;
44
    socklen_t salen;
45
};
46
47
struct st_connect_generator_t {
48
    h2o_generator_t super;
49
    struct st_connect_handler_t *handler;
50
    h2o_req_t *src_req;
51
52
    struct {
53
        h2o_hostinfo_getaddr_req_t *v4, *v6;
54
    } getaddr_req;
55
    struct {
56
        struct st_server_address_t list[MAX_ADDRESSES_PER_FAMILY * 2];
57
        size_t size;
58
        size_t used;
59
    } server_addresses;
60
61
    h2o_socket_t *sock;
62
    /**
63
     * Most significant and latest error that occurred, if any. Significance is represented as `class`, in descending order.
64
     */
65
    struct {
66
        enum error_class { ERROR_CLASS_NAME_RESOLUTION, ERROR_CLASS_ACCESS_PROHIBITED, ERROR_CLASS_CONNECT } class;
67
        const char *str;
68
    } last_error;
69
70
    /**
71
     * timer used to handle user-visible timeouts (i.e., connect- and io-timeout)
72
     */
73
    h2o_timer_t timeout;
74
    /**
75
     * timer used to for RFC 8305-style happy eyeballs (resolution delay and connection attempt delay)
76
     */
77
    h2o_timer_t eyeball_delay;
78
79
    /**
80
     * Pick v4 (or v6) address in the next connection attempt. RFC 8305 recommends trying the other family one by one.
81
     */
82
    unsigned pick_v4 : 1;
83
    /**
84
     * set when the send-side is closed by the user
85
     */
86
    unsigned write_closed : 1;
87
    /**
88
     * set when h2o_send has been called to notify that the socket has been closed
89
     */
90
    unsigned read_closed : 1;
91
    /**
92
     * if socket has been closed
93
     */
94
    unsigned socket_closed : 1;
95
    /**
96
     * if connecting using TCP (or UDP)
97
     */
98
    unsigned is_tcp : 1;
99
    /**
100
     * TCP- and UDP-specific data
101
     */
102
    union {
103
        struct {
104
            h2o_buffer_t *sendbuf;
105
            h2o_buffer_t *recvbuf_detached;
106
        } tcp;
107
        struct {
108
            struct {
109
                h2o_buffer_t *buf; /* for datagram fragments */
110
                h2o_timer_t delayed;
111
            } egress;
112
            struct {
113
                uint8_t buf[UDP_CHUNK_OVERHEAD + 1500];
114
            } ingress;
115
        } udp;
116
    };
117
};
118
119
static h2o_iovec_t get_proxy_status_identity(struct st_connect_generator_t *self)
120
0
{
121
0
    h2o_iovec_t identity = self->src_req->conn->ctx->globalconf->proxy_status_identity;
122
0
    if (identity.base == NULL)
123
0
        identity = h2o_iovec_init(H2O_STRLIT("h2o"));
124
0
    return identity;
125
0
}
126
127
static const struct st_server_address_t *get_dest_addr(struct st_connect_generator_t *self)
128
0
{
129
0
    if (self->server_addresses.used > 0) {
130
0
        return &self->server_addresses.list[self->server_addresses.used - 1];
131
0
    } else {
132
0
        return NULL;
133
0
    }
134
0
}
135
136
static void add_proxy_status_header(struct st_connect_generator_t *self, const char *error_type, const char *details,
137
                                    const char *rcode, h2o_iovec_t dest_addr_str)
138
0
{
139
0
    if (!self->handler->config.connect_proxy_status_enabled)
140
0
        return;
141
142
0
    h2o_mem_pool_t *pool = &self->src_req->pool;
143
0
    h2o_iovec_t parts[9] = {
144
0
        get_proxy_status_identity(self),
145
0
    };
146
0
    size_t nparts = 1;
147
0
    if (error_type != NULL) {
148
0
        parts[nparts++] = h2o_iovec_init(H2O_STRLIT("; error="));
149
0
        parts[nparts++] = h2o_iovec_init(error_type, strlen(error_type));
150
0
    }
151
0
    if (rcode != NULL) {
152
0
        parts[nparts++] = h2o_iovec_init(H2O_STRLIT("; rcode="));
153
0
        parts[nparts++] = h2o_iovec_init(rcode, strlen(rcode));
154
0
    }
155
0
    if (details != NULL) {
156
0
        parts[nparts++] = h2o_iovec_init(H2O_STRLIT("; details="));
157
0
        parts[nparts++] = h2o_encode_sf_string(pool, details, SIZE_MAX);
158
0
    }
159
0
    if (dest_addr_str.base != NULL) {
160
0
        parts[nparts++] = h2o_iovec_init(H2O_STRLIT("; next-hop="));
161
0
        parts[nparts++] = dest_addr_str;
162
0
    }
163
0
    assert(nparts <= sizeof(parts) / sizeof(parts[0]));
164
0
    h2o_iovec_t hval = h2o_concat_list(pool, parts, nparts);
165
0
    h2o_add_header_by_str(pool, &self->src_req->res.headers, H2O_STRLIT("proxy-status"), 0, NULL, hval.base, hval.len);
166
0
}
167
168
0
#define TO_BITMASK(type, len) ((type) ~(((type)1 << (sizeof(type) * 8 - (len))) - 1))
169
170
static void record_error(struct st_connect_generator_t *self, const char *error_type, const char *details, const char *rcode)
171
0
{
172
0
    H2O_PROBE_REQUEST(CONNECT_ERROR, self->src_req, error_type, details, rcode);
173
174
0
    char dest_addr_strbuf[NI_MAXHOST];
175
0
    h2o_iovec_t dest_addr_str = h2o_iovec_init(NULL, 0);
176
0
    const struct st_server_address_t *addr = get_dest_addr(self);
177
0
    if (addr != NULL) {
178
0
        size_t len = h2o_socket_getnumerichost(addr->sa, addr->salen, dest_addr_strbuf);
179
0
        if (len != SIZE_MAX) {
180
0
            dest_addr_str = h2o_iovec_init(dest_addr_strbuf, len);
181
0
        }
182
0
    }
183
184
0
    h2o_req_log_error(self->src_req, MODULE_NAME, "%s; rcode=%s; details=%s; next-hop=%s", error_type,
185
0
                      rcode != NULL ? rcode : "(null)", details != NULL ? details : "(null)",
186
0
                      dest_addr_str.base != NULL ? dest_addr_str.base : "(null)");
187
188
0
    add_proxy_status_header(self, error_type, details, rcode, dest_addr_str);
189
0
}
190
191
static void record_connect_success(struct st_connect_generator_t *self)
192
0
{
193
0
    const struct st_server_address_t *addr = get_dest_addr(self);
194
0
    if (addr == NULL)
195
0
        return;
196
197
0
    H2O_PROBE_REQUEST(CONNECT_SUCCESS, self->src_req, addr->sa);
198
199
0
    char dest_addr_strbuf[NI_MAXHOST];
200
0
    size_t len = h2o_socket_getnumerichost(addr->sa, addr->salen, dest_addr_strbuf);
201
0
    if (len != SIZE_MAX) {
202
0
        add_proxy_status_header(self, NULL, NULL, NULL, h2o_iovec_init(dest_addr_strbuf, len));
203
0
    }
204
0
}
205
206
static void record_socket_error(struct st_connect_generator_t *self, const char *err)
207
0
{
208
0
    const char *error_type;
209
0
    const char *details = NULL;
210
0
    if (err == h2o_socket_error_conn_refused)
211
0
        error_type = "connection_refused";
212
0
    else if (err == h2o_socket_error_conn_timed_out)
213
0
        error_type = "connection_timeout";
214
0
    else if (err == h2o_socket_error_network_unreachable || err == h2o_socket_error_host_unreachable)
215
0
        error_type = "destination_ip_unroutable";
216
0
    else {
217
0
        error_type = "proxy_internal_error";
218
0
        details = err;
219
0
    }
220
0
    record_error(self, error_type, details, NULL);
221
0
}
222
223
static void try_connect(struct st_connect_generator_t *self);
224
static int tcp_start_connect(struct st_connect_generator_t *self, struct st_server_address_t *server_address);
225
static int udp_connect(struct st_connect_generator_t *self, struct st_server_address_t *server_address);
226
227
static h2o_loop_t *get_loop(struct st_connect_generator_t *self)
228
0
{
229
0
    return self->src_req->conn->ctx->loop;
230
0
}
231
232
static void stop_eyeballs(struct st_connect_generator_t *self)
233
0
{
234
0
    if (self->getaddr_req.v4 != NULL) {
235
0
        h2o_hostinfo_getaddr_cancel(self->getaddr_req.v4);
236
0
        self->getaddr_req.v4 = NULL;
237
0
    }
238
0
    if (self->getaddr_req.v6 != NULL) {
239
0
        h2o_hostinfo_getaddr_cancel(self->getaddr_req.v6);
240
0
        self->getaddr_req.v6 = NULL;
241
0
    }
242
0
    if (self->eyeball_delay.cb != NULL) {
243
0
        h2o_timer_unlink(&self->eyeball_delay);
244
0
        self->eyeball_delay.cb = NULL;
245
0
    }
246
0
}
247
248
static void dispose_generator(struct st_connect_generator_t *self)
249
0
{
250
0
    stop_eyeballs(self);
251
0
    if (self->sock != NULL) {
252
0
        h2o_socket_close(self->sock);
253
0
        self->sock = NULL;
254
0
        self->socket_closed = 1;
255
0
    }
256
0
    if (self->is_tcp) {
257
0
        if (self->tcp.sendbuf != NULL)
258
0
            h2o_buffer_dispose(&self->tcp.sendbuf);
259
0
        if (self->tcp.recvbuf_detached != NULL)
260
0
            h2o_buffer_dispose(&self->tcp.recvbuf_detached);
261
0
    } else {
262
0
        if (self->udp.egress.buf != NULL)
263
0
            h2o_buffer_dispose(&self->udp.egress.buf);
264
0
        h2o_timer_unlink(&self->udp.egress.delayed);
265
0
    }
266
0
    h2o_timer_unlink(&self->timeout);
267
0
}
268
269
static int close_socket(struct st_connect_generator_t *self)
270
0
{
271
0
    int send_inflight;
272
273
0
    if (self->is_tcp) {
274
0
        self->tcp.recvbuf_detached = self->sock->input;
275
0
        send_inflight = self->tcp.recvbuf_detached->size != 0;
276
0
    } else {
277
0
        send_inflight = !h2o_socket_is_reading(self->sock);
278
0
    }
279
0
    h2o_buffer_init(&self->sock->input, &h2o_socket_buffer_prototype);
280
0
    h2o_socket_close(self->sock);
281
0
    self->sock = NULL;
282
0
    self->socket_closed = 1;
283
284
0
    return send_inflight;
285
0
}
286
287
static void close_readwrite(struct st_connect_generator_t *self)
288
0
{
289
0
    int send_inflight = 0;
290
291
0
    if (self->sock != NULL)
292
0
        send_inflight = close_socket(self);
293
0
    else if (self->is_tcp)
294
0
        send_inflight = self->tcp.recvbuf_detached->size != 0;
295
296
0
    if (h2o_timer_is_linked(&self->timeout))
297
0
        h2o_timer_unlink(&self->timeout);
298
299
    /* immediately notify read-close if necessary, setting up delayed task to for destroying other items; the timer is reset if
300
     * `h2o_send` indirectly invokes `dispose_generator`. */
301
0
    if (!self->read_closed && !send_inflight) {
302
0
        h2o_timer_link(get_loop(self), 0, &self->timeout);
303
0
        self->read_closed = 1;
304
0
        h2o_send(self->src_req, NULL, 0, H2O_SEND_STATE_FINAL);
305
0
        return;
306
0
    }
307
308
    /* notify write-close if necessary; see the comment above regarding the use of the timer */
309
0
    if (!self->write_closed && self->is_tcp && self->tcp.sendbuf->size != 0) {
310
0
        self->write_closed = 1;
311
0
        h2o_timer_link(get_loop(self), 0, &self->timeout);
312
0
        self->src_req->proceed_req(self->src_req, h2o_httpclient_error_io /* TODO notify as cancel? */);
313
0
        return;
314
0
    }
315
0
}
316
317
static void on_io_timeout(h2o_timer_t *timer)
318
0
{
319
0
    struct st_connect_generator_t *self = H2O_STRUCT_FROM_MEMBER(struct st_connect_generator_t, timeout, timer);
320
0
    H2O_PROBE_REQUEST0(CONNECT_IO_TIMEOUT, self->src_req);
321
0
    close_readwrite(self);
322
0
}
323
324
static void reset_io_timeout(struct st_connect_generator_t *self)
325
0
{
326
0
    if (self->sock != NULL) {
327
0
        h2o_timer_unlink(&self->timeout);
328
0
        h2o_timer_link(get_loop(self), self->handler->config.io_timeout, &self->timeout);
329
0
    }
330
0
}
331
332
static void send_connect_error(struct st_connect_generator_t *self, int code, const char *msg, const char *errstr)
333
0
{
334
0
    stop_eyeballs(self);
335
0
    h2o_timer_unlink(&self->timeout);
336
337
0
    if (self->sock != NULL) {
338
0
        h2o_socket_close(self->sock);
339
0
        self->sock = NULL;
340
0
    }
341
342
0
    h2o_send_error_generic(self->src_req, code, msg, errstr, H2O_SEND_ERROR_KEEP_HEADERS);
343
0
}
344
345
static void on_connect_error(struct st_connect_generator_t *self, const char *errstr)
346
0
{
347
0
    send_connect_error(self, 502, "Gateway Error", errstr);
348
0
}
349
350
static void on_connect_timeout(h2o_timer_t *entry)
351
0
{
352
0
    struct st_connect_generator_t *self = H2O_STRUCT_FROM_MEMBER(struct st_connect_generator_t, timeout, entry);
353
0
    if (self->server_addresses.size > 0) {
354
0
        record_error(self, "connection_timeout", NULL, NULL);
355
0
    } else {
356
0
        record_error(self, "dns_timeout", NULL, NULL);
357
0
    }
358
0
    on_connect_error(self, h2o_httpclient_error_io_timeout);
359
0
}
360
361
static void set_last_error(struct st_connect_generator_t *self, enum error_class class, const char *str)
362
0
{
363
0
    if (self->last_error.class <= class) {
364
0
        self->last_error.class = class;
365
0
        self->last_error.str = str;
366
0
    }
367
0
}
368
369
static void on_resolution_delay_timeout(h2o_timer_t *entry)
370
0
{
371
0
    struct st_connect_generator_t *self = H2O_STRUCT_FROM_MEMBER(struct st_connect_generator_t, eyeball_delay, entry);
372
373
0
    assert(self->server_addresses.used == 0);
374
375
0
    try_connect(self);
376
0
}
377
378
static void on_connection_attempt_delay_timeout(h2o_timer_t *entry)
379
0
{
380
0
    struct st_connect_generator_t *self = H2O_STRUCT_FROM_MEMBER(struct st_connect_generator_t, eyeball_delay, entry);
381
382
    /* If no more addresses are available, continue trying the current attempt until the connect_timeout expires. */
383
0
    if (self->server_addresses.used == self->server_addresses.size)
384
0
        return;
385
386
    /* close current connection attempt and try next. */
387
0
    h2o_socket_close(self->sock);
388
0
    self->sock = NULL;
389
0
    try_connect(self);
390
0
}
391
392
static int store_server_addresses(struct st_connect_generator_t *self, struct addrinfo *res)
393
0
{
394
0
    size_t num_added = 0;
395
396
    /* copy first entries in the response; ordering of addresses being returned by `getaddrinfo` is respected, as ordinary clients
397
     * (incl. forward proxy) are not expected to distribute the load among the addresses being returned. */
398
0
    do {
399
0
        assert(self->server_addresses.size < PTLS_ELEMENTSOF(self->server_addresses.list));
400
0
        if (h2o_connect_lookup_acl(self->handler->acl.entries, self->handler->acl.count, res->ai_addr)) {
401
0
            struct st_server_address_t *dst = self->server_addresses.list + self->server_addresses.size++;
402
0
            dst->sa = h2o_mem_alloc_pool_aligned(&self->src_req->pool, H2O_ALIGNOF(struct sockaddr), res->ai_addrlen);
403
0
            memcpy(dst->sa, res->ai_addr, res->ai_addrlen);
404
0
            dst->salen = res->ai_addrlen;
405
0
            ++num_added;
406
0
        }
407
0
    } while ((res = res->ai_next) != NULL && num_added < MAX_ADDRESSES_PER_FAMILY);
408
409
0
    return num_added != 0;
410
0
}
411
412
static void on_getaddr(h2o_hostinfo_getaddr_req_t *getaddr_req, const char *errstr, struct addrinfo *res, void *_self)
413
0
{
414
0
    struct st_connect_generator_t *self = _self;
415
0
    if (getaddr_req == self->getaddr_req.v4) {
416
0
        self->getaddr_req.v4 = NULL;
417
0
    } else if (getaddr_req == self->getaddr_req.v6) {
418
0
        self->getaddr_req.v6 = NULL;
419
0
    } else {
420
0
        h2o_fatal("unexpected getaddr_req");
421
0
    }
422
423
    /* Store addresses, or convert error to ACL denial. */
424
0
    if (errstr == NULL) {
425
0
        if (self->is_tcp) {
426
0
            assert(res->ai_socktype == SOCK_STREAM);
427
0
        } else {
428
0
            assert(res->ai_socktype == SOCK_DGRAM);
429
0
        }
430
0
        assert(res != NULL && "upon successful return, getaddrinfo shall return at least one address (RFC 3493 Section 6.1)");
431
0
        if (!store_server_addresses(self, res))
432
0
            set_last_error(self, ERROR_CLASS_ACCESS_PROHIBITED, "destination_ip_prohibited");
433
0
    } else {
434
0
        set_last_error(self, ERROR_CLASS_NAME_RESOLUTION, errstr);
435
0
    }
436
437
0
    if (self->getaddr_req.v4 == NULL) {
438
        /* If v6 lookup is still running, that means that v4 lookup has *just* completed. Set the resolution delay timer if v4
439
         * addresses are available. */
440
0
        if (self->getaddr_req.v6 != NULL) {
441
0
            assert(self->server_addresses.used == 0);
442
0
            if (self->server_addresses.size != 0) {
443
0
                self->eyeball_delay.cb = on_resolution_delay_timeout;
444
0
                h2o_timer_link(get_loop(self), self->handler->config.happy_eyeballs.name_resolution_delay, &self->eyeball_delay);
445
0
            }
446
0
            return;
447
0
        }
448
449
        /* Both v4 and v6 lookups are complete. If the resolution delay timer is running. Reset it. */
450
0
        if (h2o_timer_is_linked(&self->eyeball_delay) && self->eyeball_delay.cb == on_resolution_delay_timeout) {
451
0
            assert(self->server_addresses.used == 0);
452
0
            h2o_timer_unlink(&self->eyeball_delay);
453
0
        }
454
        /* In case no addresses are available, send HTTP error. */
455
0
        if (self->server_addresses.size == 0) {
456
0
            if (self->last_error.class == ERROR_CLASS_ACCESS_PROHIBITED) {
457
0
                record_error(self, self->last_error.str, NULL, NULL);
458
0
                send_connect_error(self, 403, "Destination IP Prohibited", "Destination IP Prohibited");
459
0
            } else {
460
0
                const char *rcode;
461
0
                if (self->last_error.str == h2o_hostinfo_error_nxdomain) {
462
0
                    rcode = "NXDOMAIN";
463
0
                } else if (self->last_error.str == h2o_hostinfo_error_nodata) {
464
0
                    rcode = "NODATA";
465
0
                } else if (self->last_error.str == h2o_hostinfo_error_refused) {
466
0
                    rcode = "REFUSED";
467
0
                } else if (self->last_error.str == h2o_hostinfo_error_servfail) {
468
0
                    rcode = "SERVFAIL";
469
0
                } else {
470
0
                    rcode = NULL;
471
0
                }
472
0
                record_error(self, "dns_error", self->last_error.str, rcode);
473
0
                on_connect_error(self, self->last_error.str);
474
0
            }
475
0
            return;
476
0
        }
477
0
    }
478
479
    /* If the connection attempt has been under way for more than CONNECTION_ATTEMPT_DELAY_MS and the lookup that just completed
480
     * gave us a new address to try, then stop that connection attempt and start a new connection attempt using the new address.
481
     *
482
     * If the connection attempt has been under way for less than that, then do nothing for now.  Eventually, either the timeout
483
     * will expire or the connection attempt will complete.
484
     *
485
     * If the connection attempt is under way but the lookup has not provided us any new address to try, then do nothing for now,
486
     * and wait for the connection attempt to complete. */
487
0
    if (self->sock != NULL) {
488
0
        if (h2o_timer_is_linked(&self->eyeball_delay))
489
0
            return;
490
0
        if (self->server_addresses.used == self->server_addresses.size)
491
0
            return;
492
0
        h2o_socket_close(self->sock);
493
0
        self->sock = NULL;
494
0
    }
495
0
    try_connect(self);
496
0
}
497
498
static struct st_server_address_t *pick_and_swap(struct st_connect_generator_t *self, size_t idx)
499
0
{
500
0
    struct st_server_address_t *server_address = NULL;
501
502
0
    if (idx != self->server_addresses.used) {
503
0
        struct st_server_address_t swap = self->server_addresses.list[idx];
504
0
        self->server_addresses.list[idx] = self->server_addresses.list[self->server_addresses.used];
505
0
        self->server_addresses.list[self->server_addresses.used] = swap;
506
0
    }
507
0
    server_address = &self->server_addresses.list[self->server_addresses.used];
508
0
    self->server_addresses.used++;
509
0
    self->pick_v4 = !self->pick_v4;
510
0
    return server_address;
511
0
}
512
513
static void try_connect(struct st_connect_generator_t *self)
514
0
{
515
0
    struct st_server_address_t *server_address;
516
517
0
    do {
518
0
        server_address = NULL;
519
520
        /* Fetch the next address from the list of resolved addresses. */
521
0
        for (size_t i = self->server_addresses.used; i < self->server_addresses.size; i++) {
522
0
            if (self->pick_v4 && self->server_addresses.list[i].sa->sa_family == AF_INET)
523
0
                server_address = pick_and_swap(self, i);
524
0
            else if (!self->pick_v4 && self->server_addresses.list[i].sa->sa_family == AF_INET6)
525
0
                server_address = pick_and_swap(self, i);
526
0
        }
527
528
        /* If address of the preferred address family is not available, select one of the other family, if available. Otherwise,
529
         * send an HTTP error response or wait for address resolution. */
530
0
        if (server_address == NULL) {
531
0
            if (self->server_addresses.used == self->server_addresses.size) {
532
0
                if (self->getaddr_req.v4 == NULL && self->getaddr_req.v6 == NULL) {
533
0
                    assert(self->last_error.class == ERROR_CLASS_CONNECT);
534
0
                    record_socket_error(self, self->last_error.str);
535
0
                    on_connect_error(self, self->last_error.str);
536
0
                }
537
0
                return;
538
0
            }
539
0
            server_address = &self->server_addresses.list[self->server_addresses.used];
540
0
            self->server_addresses.used++;
541
0
        }
542
543
        /* Connect. Retry if the connect function returns error immediately. */
544
0
    } while (!(self->is_tcp ? tcp_start_connect : udp_connect)(self, server_address));
545
0
}
546
547
static void tcp_on_write_complete(h2o_socket_t *_sock, const char *err)
548
0
{
549
0
    struct st_connect_generator_t *self = _sock->data;
550
551
0
    if (err != NULL) {
552
0
        H2O_PROBE_REQUEST(CONNECT_TCP_WRITE_ERROR, self->src_req, err);
553
0
    }
554
555
    /* until h2o_socket_t implements shutdown(SHUT_WR), do a bidirectional close when we close the write-side */
556
0
    if (err != NULL || self->write_closed) {
557
0
        close_readwrite(self);
558
0
        return;
559
0
    }
560
561
0
    reset_io_timeout(self);
562
563
0
    h2o_buffer_consume(&self->tcp.sendbuf, self->tcp.sendbuf->size);
564
0
    self->src_req->proceed_req(self->src_req, NULL);
565
0
}
566
567
static void tcp_do_write(struct st_connect_generator_t *self)
568
0
{
569
0
    reset_io_timeout(self);
570
571
0
    h2o_iovec_t vec = h2o_iovec_init(self->tcp.sendbuf->bytes, self->tcp.sendbuf->size);
572
0
    H2O_PROBE_REQUEST(CONNECT_TCP_WRITE, self->src_req, vec.len);
573
0
    h2o_socket_write(self->sock, &vec, 1, tcp_on_write_complete);
574
0
}
575
576
static int tcp_write(void *_self, int is_end_stream)
577
0
{
578
0
    struct st_connect_generator_t *self = _self;
579
0
    h2o_iovec_t chunk = self->src_req->entity;
580
581
0
    assert(!self->write_closed);
582
0
    assert(self->tcp.sendbuf->size == 0);
583
584
    /* the socket might have been closed due to a read error */
585
0
    if (self->socket_closed)
586
0
        return 1;
587
588
    /* buffer input */
589
0
    h2o_buffer_append(&self->tcp.sendbuf, chunk.base, chunk.len);
590
0
    if (is_end_stream)
591
0
        self->write_closed = 1;
592
593
    /* write if the socket has been opened */
594
0
    if (self->sock != NULL && !h2o_socket_is_writing(self->sock))
595
0
        tcp_do_write(self);
596
597
0
    return 0;
598
0
}
599
600
static void tcp_on_read(h2o_socket_t *_sock, const char *err)
601
0
{
602
0
    struct st_connect_generator_t *self = _sock->data;
603
604
0
    h2o_socket_read_stop(self->sock);
605
0
    h2o_timer_unlink(&self->timeout);
606
607
0
    if (err == NULL) {
608
0
        h2o_iovec_t vec = h2o_iovec_init(self->sock->input->bytes, self->sock->input->size);
609
0
        H2O_PROBE_REQUEST(CONNECT_TCP_READ, self->src_req, vec.len);
610
0
        h2o_send(self->src_req, &vec, 1, H2O_SEND_STATE_IN_PROGRESS);
611
0
    } else {
612
0
        H2O_PROBE_REQUEST(CONNECT_TCP_READ_ERROR, self->src_req, err);
613
        /* unidirectional close is signalled using H2O_SEND_STATE_FINAL, but the write side remains open */
614
0
        self->read_closed = 1;
615
0
        h2o_send(self->src_req, NULL, 0, H2O_SEND_STATE_FINAL);
616
0
    }
617
0
}
618
619
static void tcp_on_proceed(h2o_generator_t *_self, h2o_req_t *req)
620
0
{
621
0
    struct st_connect_generator_t *self = H2O_STRUCT_FROM_MEMBER(struct st_connect_generator_t, super, _self);
622
623
0
    assert(!self->read_closed);
624
625
0
    if (self->sock != NULL) {
626
0
        h2o_buffer_consume(&self->sock->input, self->sock->input->size);
627
0
        reset_io_timeout(self);
628
0
        h2o_socket_read_start(self->sock, tcp_on_read);
629
0
    } else {
630
0
        self->read_closed = 1;
631
0
        h2o_send(self->src_req, NULL, 0, H2O_SEND_STATE_FINAL);
632
0
    }
633
0
}
634
635
static void tcp_on_connect(h2o_socket_t *_sock, const char *err)
636
0
{
637
0
    struct st_connect_generator_t *self = _sock->data;
638
639
0
    assert(self->sock == _sock);
640
641
0
    if (err != NULL) {
642
0
        set_last_error(self, ERROR_CLASS_CONNECT, err);
643
0
        h2o_socket_close(self->sock);
644
0
        self->sock = NULL;
645
0
        try_connect(self);
646
0
        return;
647
0
    }
648
649
0
    stop_eyeballs(self);
650
0
    self->timeout.cb = on_io_timeout;
651
0
    reset_io_timeout(self);
652
653
    /* start the write if there's data to be sent */
654
0
    if (self->tcp.sendbuf->size != 0 || self->write_closed)
655
0
        tcp_do_write(self);
656
657
0
    record_connect_success(self);
658
659
    /* build and submit 200 response */
660
0
    self->src_req->res.status = 200;
661
0
    h2o_start_response(self->src_req, &self->super);
662
0
    h2o_send(self->src_req, NULL, 0, H2O_SEND_STATE_IN_PROGRESS);
663
0
}
664
665
static int tcp_start_connect(struct st_connect_generator_t *self, struct st_server_address_t *server_address)
666
0
{
667
0
    H2O_PROBE_REQUEST(CONNECT_TCP_START, self->src_req, server_address->sa);
668
669
0
    const char *errstr;
670
0
    if ((self->sock = h2o_socket_connect(get_loop(self), server_address->sa, server_address->salen, tcp_on_connect, &errstr)) ==
671
0
        NULL) {
672
0
        set_last_error(self, ERROR_CLASS_CONNECT, errstr);
673
0
        return 0;
674
0
    }
675
676
0
    self->sock->data = self;
677
0
#if !H2O_USE_LIBUV
678
    /* This is the maximum amount of data that will be buffered within userspace. It is hard-coded to 64KB to balance throughput
679
     * and latency, and because we do not expect the need to change the value. */
680
0
    h2o_evloop_socket_set_max_read_size(self->sock, 64 * 1024);
681
0
#endif
682
0
    self->eyeball_delay.cb = on_connection_attempt_delay_timeout;
683
0
    h2o_timer_link(get_loop(self), self->handler->config.happy_eyeballs.connection_attempt_delay, &self->eyeball_delay);
684
685
0
    return 1;
686
0
}
687
688
static h2o_iovec_t udp_get_next_chunk(const char *start, size_t len, size_t *to_consume, int *skip)
689
0
{
690
0
    const uint8_t *bytes = (const uint8_t *)start;
691
0
    const uint8_t *end = bytes + len;
692
0
    uint64_t chunk_type, chunk_length;
693
694
0
    chunk_type = ptls_decode_quicint(&bytes, end);
695
0
    if (chunk_type == UINT64_MAX)
696
0
        return h2o_iovec_init(NULL, 0);
697
0
    chunk_length = ptls_decode_quicint(&bytes, end);
698
0
    if (chunk_length == UINT64_MAX)
699
0
        return h2o_iovec_init(NULL, 0);
700
701
    /* chunk is incomplete */
702
0
    if (end - bytes < chunk_length)
703
0
        return h2o_iovec_init(NULL, 0);
704
705
    /*
706
     * https://tools.ietf.org/html/draft-ietf-masque-connect-udp-03#section-6
707
     * CONNECT-UDP Stream Chunks can be used to convey UDP payloads, by
708
     * using a CONNECT-UDP Stream Chunk Type of UDP_PACKET (value 0x00).
709
     */
710
0
    *skip = chunk_type != 0;
711
0
    *to_consume = (bytes + chunk_length) - (const uint8_t *)start;
712
713
0
    return h2o_iovec_init(bytes, chunk_length);
714
0
}
715
716
static void udp_write_core(struct st_connect_generator_t *self, h2o_iovec_t datagram)
717
0
{
718
0
    H2O_PROBE_REQUEST(CONNECT_UDP_WRITE, self->src_req, datagram.len);
719
0
    while (send(h2o_socket_get_fd(self->sock), datagram.base, datagram.len, 0) == -1 && errno == EINTR)
720
0
        ;
721
0
}
722
723
static void udp_write_stream_complete_delayed(h2o_timer_t *_timer)
724
0
{
725
0
    struct st_connect_generator_t *self = H2O_STRUCT_FROM_MEMBER(struct st_connect_generator_t, udp.egress.delayed, _timer);
726
727
0
    if (self->write_closed) {
728
0
        close_readwrite(self);
729
0
        return;
730
0
    }
731
732
0
    self->src_req->proceed_req(self->src_req, NULL);
733
0
}
734
735
static void udp_do_write_stream(struct st_connect_generator_t *self, h2o_iovec_t chunk)
736
0
{
737
0
    int from_buf = 0;
738
0
    size_t off = 0;
739
740
0
    reset_io_timeout(self);
741
742
0
    if (self->udp.egress.buf->size != 0) {
743
0
        from_buf = 1;
744
0
        if (chunk.len != 0)
745
0
            h2o_buffer_append(&self->udp.egress.buf, chunk.base, chunk.len);
746
0
        chunk.base = self->udp.egress.buf->bytes;
747
0
        chunk.len = self->udp.egress.buf->size;
748
0
    }
749
0
    do {
750
0
        int skip = 0;
751
0
        size_t to_consume;
752
0
        h2o_iovec_t datagram = udp_get_next_chunk(chunk.base + off, chunk.len - off, &to_consume, &skip);
753
0
        if (datagram.base == NULL)
754
0
            break;
755
0
        if (!skip)
756
0
            udp_write_core(self, datagram);
757
0
        off += to_consume;
758
0
    } while (1);
759
760
0
    if (from_buf) {
761
0
        h2o_buffer_consume(&self->udp.egress.buf, off);
762
0
    } else if (chunk.len != off) {
763
0
        h2o_buffer_append(&self->udp.egress.buf, chunk.base + off, chunk.len - off);
764
0
    }
765
766
0
    h2o_timer_link(get_loop(self), 0, &self->udp.egress.delayed);
767
0
}
768
769
static int udp_write_stream(void *_self, int is_end_stream)
770
0
{
771
0
    struct st_connect_generator_t *self = _self;
772
0
    h2o_iovec_t chunk = self->src_req->entity;
773
774
0
    assert(!self->write_closed);
775
776
    /* the socket might have been closed tue to a read error */
777
0
    if (self->socket_closed)
778
0
        return 1;
779
780
0
    if (is_end_stream)
781
0
        self->write_closed = 1;
782
783
    /* if the socket is not yet open, buffer input and return */
784
0
    if (self->sock == NULL) {
785
0
        h2o_buffer_append(&self->udp.egress.buf, chunk.base, chunk.len);
786
0
        return 0;
787
0
    }
788
789
0
    udp_do_write_stream(self, chunk);
790
0
    return 0;
791
0
}
792
793
static void udp_write_datagrams(h2o_req_t *_req, h2o_iovec_t *datagrams, size_t num_datagrams)
794
0
{
795
0
    struct st_connect_generator_t *self = _req->write_req.ctx;
796
797
0
    reset_io_timeout(self);
798
799
0
    for (size_t i = 0; i != num_datagrams; ++i)
800
0
        udp_write_core(self, datagrams[i]);
801
0
}
802
803
static void udp_on_read(h2o_socket_t *_sock, const char *err)
804
0
{
805
0
    struct st_connect_generator_t *self = _sock->data;
806
807
0
    if (err != NULL) {
808
0
        close_readwrite(self);
809
0
        return;
810
0
    }
811
812
    /* read UDP packet, or return */
813
0
    ssize_t rret;
814
0
    while ((rret = recv(h2o_socket_get_fd(self->sock), self->udp.ingress.buf + UDP_CHUNK_OVERHEAD,
815
0
                        sizeof(self->udp.ingress.buf) - UDP_CHUNK_OVERHEAD, 0)) == -1 &&
816
0
           errno == EINTR)
817
0
        ;
818
0
    if (rret == -1)
819
0
        return;
820
0
    H2O_PROBE_REQUEST(CONNECT_UDP_READ, self->src_req, (size_t)rret);
821
822
    /* forward UDP datagram as is; note that it might be zero-sized */
823
0
    if (self->src_req->forward_datagram.read_ != NULL) {
824
0
        h2o_iovec_t vec = h2o_iovec_init(self->udp.ingress.buf + UDP_CHUNK_OVERHEAD, rret);
825
0
        self->src_req->forward_datagram.read_(self->src_req, &vec, 1);
826
0
    } else {
827
0
        h2o_socket_read_stop(self->sock);
828
0
        h2o_timer_unlink(&self->timeout);
829
0
        size_t off = 0;
830
0
        self->udp.ingress.buf[off++] = 0; /* chunk type = UDP_PACKET */
831
0
        off = quicly_encodev(self->udp.ingress.buf + off, (uint64_t)rret) - self->udp.ingress.buf;
832
0
        assert(off <= UDP_CHUNK_OVERHEAD);
833
0
        if (off < UDP_CHUNK_OVERHEAD)
834
0
            memmove(self->udp.ingress.buf + off, self->udp.ingress.buf + UDP_CHUNK_OVERHEAD, rret);
835
0
        off += rret;
836
0
        h2o_iovec_t vec = h2o_iovec_init(self->udp.ingress.buf, off);
837
0
        h2o_send(self->src_req, &vec, 1, H2O_SEND_STATE_IN_PROGRESS);
838
0
    }
839
0
}
840
841
static void udp_on_proceed(h2o_generator_t *_self, h2o_req_t *req)
842
0
{
843
0
    struct st_connect_generator_t *self = H2O_STRUCT_FROM_MEMBER(struct st_connect_generator_t, super, _self);
844
845
0
    if (self->sock != NULL) {
846
0
        h2o_buffer_consume(&self->sock->input, self->sock->input->size);
847
0
        reset_io_timeout(self);
848
0
        h2o_socket_read_start(self->sock, udp_on_read);
849
0
    } else {
850
0
        self->read_closed = 1;
851
0
        h2o_send(self->src_req, NULL, 0, H2O_SEND_STATE_FINAL);
852
0
    }
853
0
}
854
855
static int udp_connect(struct st_connect_generator_t *self, struct st_server_address_t *server_address)
856
0
{
857
0
    int fd;
858
859
0
    H2O_PROBE_REQUEST(CONNECT_UDP_START, self->src_req, server_address->sa);
860
    /* connect */
861
0
    if ((fd = socket(server_address->sa->sa_family, SOCK_DGRAM, 0)) == -1 ||
862
0
        connect(fd, server_address->sa, server_address->salen) != 0) {
863
0
        const char *err = h2o_socket_error_conn_fail;
864
0
        if (fd != -1) {
865
0
            err = h2o_socket_get_error_string(errno, err);
866
0
            close(fd);
867
0
        }
868
0
        set_last_error(self, ERROR_CLASS_CONNECT, err);
869
0
        return 0;
870
0
    }
871
872
0
    stop_eyeballs(self);
873
0
    self->timeout.cb = on_io_timeout;
874
0
    reset_io_timeout(self);
875
876
    /* setup, initiating transfer of early data */
877
#if H2O_USE_LIBUV
878
    self->sock = h2o_uv__poll_create(get_loop(self), fd, (uv_close_cb)free);
879
#else
880
0
    self->sock = h2o_evloop_socket_create(get_loop(self), fd, H2O_SOCKET_FLAG_DONT_READ);
881
0
#endif
882
0
    assert(self->sock != NULL);
883
0
    self->sock->data = self;
884
0
    self->src_req->write_req.cb = udp_write_stream;
885
0
    self->src_req->forward_datagram.write_ = udp_write_datagrams;
886
0
    self->src_req->write_req.ctx = self;
887
0
    if (self->udp.egress.buf->size != 0 || self->write_closed)
888
0
        udp_do_write_stream(self, h2o_iovec_init(NULL, 0));
889
890
0
    record_connect_success(self);
891
892
    /* build and submit 200 response */
893
0
    self->src_req->res.status = 200;
894
0
    h2o_start_response(self->src_req, &self->super);
895
0
    h2o_send(self->src_req, NULL, 0, H2O_SEND_STATE_IN_PROGRESS);
896
897
0
    return 1;
898
0
}
899
900
static void on_stop(h2o_generator_t *_self, h2o_req_t *req)
901
0
{
902
0
    struct st_connect_generator_t *self = H2O_STRUCT_FROM_MEMBER(struct st_connect_generator_t, super, _self);
903
0
    dispose_generator(self);
904
0
}
905
906
static void on_generator_dispose(void *_self)
907
0
{
908
0
    struct st_connect_generator_t *self = _self;
909
0
    H2O_PROBE_REQUEST0(CONNECT_DISPOSE, self->src_req);
910
0
    dispose_generator(self);
911
0
}
912
913
static int on_req(h2o_handler_t *_handler, h2o_req_t *req)
914
0
{
915
0
    struct st_connect_handler_t *handler = (void *)_handler;
916
0
    h2o_iovec_t host;
917
0
    uint16_t port;
918
0
    int is_tcp;
919
920
0
    if (h2o_memis(req->input.method.base, req->input.method.len, H2O_STRLIT("CONNECT"))) {
921
0
        is_tcp = 1;
922
0
    } else if (h2o_memis(req->input.method.base, req->input.method.len, H2O_STRLIT("CONNECT-UDP"))) {
923
0
        is_tcp = 0;
924
0
    } else {
925
0
        return -1;
926
0
    }
927
928
0
    if (h2o_url_parse_hostport(req->authority.base, req->authority.len, &host, &port) == NULL || port == 0 || port == 65535) {
929
0
        h2o_send_error_400(req, "Bad Request", "Bad Request", H2O_SEND_ERROR_KEEP_HEADERS);
930
0
        return 0;
931
0
    }
932
933
0
    struct st_connect_generator_t *self;
934
0
    size_t sizeof_self = offsetof(struct st_connect_generator_t, tcp) + (is_tcp ? sizeof(self->tcp) : sizeof(self->udp));
935
0
    self = h2o_mem_alloc_shared(&req->pool, sizeof_self, on_generator_dispose);
936
0
    memset(self, 0, sizeof_self);
937
0
    self->super.stop = on_stop;
938
0
    self->handler = handler;
939
0
    self->src_req = req;
940
0
    self->timeout.cb = on_connect_timeout;
941
0
    if (is_tcp) {
942
0
        self->is_tcp = 1;
943
0
        self->super.proceed = tcp_on_proceed;
944
0
        h2o_buffer_init(&self->tcp.sendbuf, &h2o_socket_buffer_prototype);
945
0
    } else {
946
0
        self->super.proceed = udp_on_proceed;
947
0
        h2o_buffer_init(&self->udp.egress.buf, &h2o_socket_buffer_prototype);
948
0
        self->udp.egress.delayed = (h2o_timer_t){.cb = udp_write_stream_complete_delayed};
949
0
    }
950
0
    h2o_timer_link(get_loop(self), handler->config.connect_timeout, &self->timeout);
951
952
    /* setup write_req now, so that the protocol handler would not provide additional data until we call `proceed_req` */
953
0
    assert(req->entity.len == 0 && "the handler is incapable of accepting input via `write_req.cb` while writing req->entity");
954
0
    self->src_req->write_req.cb = is_tcp ? tcp_write : udp_write_stream;
955
0
    self->src_req->write_req.ctx = self;
956
957
0
    char port_str[sizeof(H2O_UINT16_LONGEST_STR)];
958
0
    int port_strlen = sprintf(port_str, "%" PRIu16, port);
959
960
0
    self->getaddr_req.v6 = h2o_hostinfo_getaddr(
961
0
        &self->src_req->conn->ctx->receivers.hostinfo_getaddr, host, h2o_iovec_init(port_str, port_strlen), AF_INET6,
962
0
        is_tcp ? SOCK_STREAM : SOCK_DGRAM, is_tcp ? IPPROTO_TCP : IPPROTO_UDP, AI_ADDRCONFIG | AI_NUMERICSERV, on_getaddr, self);
963
0
    self->getaddr_req.v4 = h2o_hostinfo_getaddr(
964
0
        &self->src_req->conn->ctx->receivers.hostinfo_getaddr, host, h2o_iovec_init(port_str, port_strlen), AF_INET,
965
0
        is_tcp ? SOCK_STREAM : SOCK_DGRAM, is_tcp ? IPPROTO_TCP : IPPROTO_UDP, AI_ADDRCONFIG | AI_NUMERICSERV, on_getaddr, self);
966
967
0
    return 0;
968
0
}
969
970
void h2o_connect_register(h2o_pathconf_t *pathconf, h2o_proxy_config_vars_t *config, h2o_connect_acl_entry_t *acl_entries,
971
                          size_t num_acl_entries)
972
0
{
973
0
    assert(config->max_buffer_size != 0);
974
975
0
    struct st_connect_handler_t *self = (void *)h2o_create_handler(pathconf, offsetof(struct st_connect_handler_t, acl.entries) +
976
0
                                                                                 sizeof(*self->acl.entries) * num_acl_entries);
977
978
0
    self->super.on_req = on_req;
979
0
    self->super.supports_request_streaming = 1;
980
0
    self->config = *config;
981
0
    self->acl.count = num_acl_entries;
982
0
    memcpy(self->acl.entries, acl_entries, sizeof(self->acl.entries[0]) * num_acl_entries);
983
0
}
984
985
const char *h2o_connect_parse_acl(h2o_connect_acl_entry_t *output, const char *input)
986
0
{
987
    /* type */
988
0
    switch (input[0]) {
989
0
    case '+':
990
0
        output->allow_ = 1;
991
0
        break;
992
0
    case '-':
993
0
        output->allow_ = 0;
994
0
        break;
995
0
    default:
996
0
        return "ACL entry must begin with + or -";
997
0
    }
998
999
    /* extract address, port */
1000
0
    h2o_iovec_t host_vec;
1001
0
    uint16_t port;
1002
0
    const char *slash_at;
1003
0
    if ((slash_at = h2o_url_parse_hostport(input + 1, strlen(input + 1), &host_vec, &port)) == NULL)
1004
0
        goto GenericParseError;
1005
0
    char *host = alloca(host_vec.len + 1);
1006
0
    memcpy(host, host_vec.base, host_vec.len);
1007
0
    host[host_vec.len] = '\0';
1008
1009
    /* parse netmask (or addr_mask is set to zero to indicate that mask was not specified) */
1010
0
    if (*slash_at != '\0') {
1011
0
        if (*slash_at != '/')
1012
0
            goto GenericParseError;
1013
0
        if (sscanf(slash_at + 1, "%zu", &output->addr_mask) != 1 || output->addr_mask == 0)
1014
0
            return "invalid address mask";
1015
0
    } else {
1016
0
        output->addr_mask = 0;
1017
0
    }
1018
1019
    /* parse address */
1020
0
    struct in_addr v4addr;
1021
0
    struct in6_addr v6addr;
1022
0
    if (strcmp(host, "*") == 0) {
1023
0
        output->addr_family = H2O_CONNECT_ACL_ADDRESS_ANY;
1024
0
        if (output->addr_mask != 0)
1025
0
            return "wildcard address (*) cannot have a netmask";
1026
0
    } else if (inet_pton(AF_INET, host, &v4addr) == 1) {
1027
0
        output->addr_family = H2O_CONNECT_ACL_ADDRESS_V4;
1028
0
        if (output->addr_mask == 0) {
1029
0
            output->addr_mask = 32;
1030
0
        } else if (output->addr_mask > 32) {
1031
0
            return "invalid address mask";
1032
0
        }
1033
0
        output->addr.v4 = ntohl(v4addr.s_addr) & TO_BITMASK(uint32_t, output->addr_mask);
1034
0
    } else if (inet_pton(AF_INET6, host, &v6addr) == 1) {
1035
0
        output->addr_family = H2O_CONNECT_ACL_ADDRESS_V6;
1036
0
        if (output->addr_mask == 0) {
1037
0
            output->addr_mask = 128;
1038
0
        } else if (output->addr_mask > 128) {
1039
0
            return "invalid address mask";
1040
0
        }
1041
0
        size_t i;
1042
0
        for (i = 0; i < output->addr_mask / 8; ++i)
1043
0
            output->addr.v6[i] = v6addr.s6_addr[i];
1044
0
        if (output->addr_mask % 8 != 0)
1045
0
            output->addr.v6[i] = v6addr.s6_addr[i] & TO_BITMASK(uint8_t, output->addr_mask % 8);
1046
0
        for (++i; i < PTLS_ELEMENTSOF(output->addr.v6); ++i)
1047
0
            output->addr.v6[i] = 0;
1048
0
    } else {
1049
0
        return "failed to parse address";
1050
0
    }
1051
1052
    /* set port (for whatever reason, `h2o_url_parse_hostport` sets port to 65535 when not specified, convert that to zero) */
1053
0
    output->port = port == 65535 ? 0 : port;
1054
1055
0
    return NULL;
1056
1057
0
GenericParseError:
1058
0
    return "failed to parse input, expected format is: [+-]address(?::port|)(?:/netmask|)";
1059
0
}
1060
1061
int h2o_connect_lookup_acl(h2o_connect_acl_entry_t *acl_entries, size_t num_acl_entries, struct sockaddr *target)
1062
0
{
1063
0
    uint32_t target_v4addr = 0;
1064
0
    uint16_t target_port;
1065
1066
    /* reject anything other than v4/v6, as well as converting the values to native format */
1067
0
    switch (target->sa_family) {
1068
0
    case AF_INET: {
1069
0
        struct sockaddr_in *sin = (void *)target;
1070
0
        target_v4addr = ntohl(sin->sin_addr.s_addr);
1071
0
        target_port = ntohs(sin->sin_port);
1072
0
    } break;
1073
0
    case AF_INET6:
1074
0
        target_port = htons(((struct sockaddr_in6 *)target)->sin6_port);
1075
0
        break;
1076
0
    default:
1077
0
        return 0;
1078
0
    }
1079
1080
    /* check each ACL entry */
1081
0
    for (size_t i = 0; i != num_acl_entries; ++i) {
1082
0
        h2o_connect_acl_entry_t *entry = acl_entries + i;
1083
        /* check port */
1084
0
        if (entry->port != 0 && entry->port != target_port)
1085
0
            goto Next;
1086
        /* check address */
1087
0
        switch (entry->addr_family) {
1088
0
        case H2O_CONNECT_ACL_ADDRESS_ANY:
1089
0
            break;
1090
0
        case H2O_CONNECT_ACL_ADDRESS_V4: {
1091
0
            if (target->sa_family != AF_INET)
1092
0
                goto Next;
1093
0
            if (entry->addr.v4 != (target_v4addr & TO_BITMASK(uint32_t, entry->addr_mask)))
1094
0
                goto Next;
1095
0
        } break;
1096
0
        case H2O_CONNECT_ACL_ADDRESS_V6: {
1097
0
            if (target->sa_family != AF_INET6)
1098
0
                continue;
1099
0
            uint8_t *target_v6addr = ((struct sockaddr_in6 *)target)->sin6_addr.s6_addr;
1100
0
            size_t i;
1101
0
            for (i = 0; i < entry->addr_mask / 8; ++i)
1102
0
                if (entry->addr.v6[i] != target_v6addr[i])
1103
0
                    goto Next;
1104
0
            if (entry->addr_mask % 8 != 0 && entry->addr.v6[i] != (target_v6addr[i] & TO_BITMASK(uint8_t, entry->addr_mask % 8)))
1105
0
                goto Next;
1106
0
        } break;
1107
0
        }
1108
        /* match */
1109
0
        return entry->allow_;
1110
0
    Next:;
1111
0
    }
1112
1113
    /* default rule is deny */
1114
0
    return 0;
1115
0
}