Coverage Report

Created: 2025-07-29 06:09

/src/h2o/lib/core/request.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 2014-2016 DeNA Co., Ltd., Kazuho Oku, Tatsuhiro Tsujikawa
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a copy
5
 * of this software and associated documentation files (the "Software"), to
6
 * deal in the Software without restriction, including without limitation the
7
 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
8
 * sell copies of the Software, and to permit persons to whom the Software is
9
 * furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice shall be included in
12
 * all copies or substantial portions of the Software.
13
 *
14
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20
 * IN THE SOFTWARE.
21
 */
22
#include <fcntl.h>
23
#include <limits.h>
24
#include <stdio.h>
25
#include <stdlib.h>
26
#include <sys/uio.h>
27
#include "h2o.h"
28
#include "h2o/socket.h"
29
30
#ifndef IOV_MAX
31
#define IOV_MAX UIO_MAXIOV
32
#endif
33
34
#define INITIAL_INBUFSZ 8192
35
36
struct st_deferred_request_action_t {
37
    h2o_timer_t timeout;
38
    h2o_req_t *req;
39
};
40
41
struct st_reprocess_request_deferred_t {
42
    struct st_deferred_request_action_t super;
43
    h2o_iovec_t method;
44
    const h2o_url_scheme_t *scheme;
45
    h2o_iovec_t authority;
46
    h2o_iovec_t path;
47
    h2o_req_overrides_t *overrides;
48
    int is_delegated;
49
};
50
51
struct st_send_error_deferred_t {
52
    h2o_req_t *req;
53
    int status;
54
    const char *reason;
55
    const char *body;
56
    int flags;
57
    h2o_timer_t _timeout;
58
};
59
60
static void on_deferred_action_dispose(void *_action)
61
0
{
62
0
    struct st_deferred_request_action_t *action = _action;
63
0
    h2o_timer_unlink(&action->timeout);
64
0
}
65
66
static struct st_deferred_request_action_t *create_deferred_action(h2o_req_t *req, size_t sz, h2o_timer_cb cb)
67
0
{
68
0
    struct st_deferred_request_action_t *action = h2o_mem_alloc_shared(&req->pool, sz, on_deferred_action_dispose);
69
0
    action->req = req;
70
0
    h2o_timer_init(&action->timeout, cb);
71
0
    h2o_timer_link(req->conn->ctx->loop, 0, &action->timeout);
72
0
    return action;
73
0
}
74
75
static h2o_hostconf_t *find_hostconf(h2o_hostconf_t **hostconfs, h2o_iovec_t authority, uint16_t default_port,
76
                                     h2o_iovec_t *wildcard_match)
77
0
{
78
0
    h2o_iovec_t hostname;
79
0
    uint16_t port;
80
0
    char *hostname_lc;
81
82
    /* safe-guard for alloca */
83
0
    if (authority.len >= 65536)
84
0
        return NULL;
85
86
    /* extract the specified hostname and port */
87
0
    if (h2o_url_parse_hostport(authority.base, authority.len, &hostname, &port) == NULL)
88
0
        return NULL;
89
0
    if (port == 65535)
90
0
        port = default_port;
91
92
    /* convert supplied hostname to lower-case */
93
0
    hostname_lc = alloca(hostname.len);
94
0
    h2o_strcopytolower(hostname_lc, hostname.base, hostname.len);
95
96
0
    do {
97
0
        h2o_hostconf_t *hostconf = *hostconfs;
98
0
        if (hostconf->authority.port == port || (hostconf->authority.port == 65535 && port == default_port)) {
99
0
            if (hostconf->authority.host.base[0] == '*') {
100
                /* matching against "*.foo.bar" */
101
0
                size_t cmplen = hostconf->authority.host.len - 1;
102
0
                if (cmplen < hostname.len &&
103
0
                    memcmp(hostconf->authority.host.base + 1, hostname_lc + hostname.len - cmplen, cmplen) == 0) {
104
0
                    *wildcard_match = h2o_iovec_init(hostname.base, hostname.len - cmplen);
105
0
                    return hostconf;
106
0
                }
107
0
            } else {
108
                /* exact match */
109
0
                if (h2o_memis(hostconf->authority.host.base, hostconf->authority.host.len, hostname_lc, hostname.len))
110
0
                    return hostconf;
111
0
            }
112
0
        }
113
0
    } while (*++hostconfs != NULL);
114
115
0
    return NULL;
116
0
}
117
118
static h2o_hostconf_t *find_default_hostconf(h2o_hostconf_t **hostconfs)
119
21.3k
{
120
21.3k
    h2o_hostconf_t *fallback_host = hostconfs[0]->global->fallback_host;
121
122
21.3k
    do {
123
21.3k
        h2o_hostconf_t *hostconf = *hostconfs;
124
21.3k
        if (!hostconf->strict_match)
125
21.3k
            return hostconf;
126
21.3k
    } while (*++hostconfs != NULL);
127
128
0
    return fallback_host;
129
21.3k
}
130
131
h2o_hostconf_t *h2o_req_setup(h2o_req_t *req)
132
21.3k
{
133
21.3k
    h2o_context_t *ctx = req->conn->ctx;
134
21.3k
    h2o_hostconf_t *hostconf;
135
136
21.3k
    req->processed_at = h2o_get_timestamp(ctx, &req->pool);
137
138
    /* find the host context (or use the default if authority is missing or is of zero-length) */
139
21.3k
    if (req->input.authority.len != 0) {
140
8.18k
        if (req->conn->hosts[1] == NULL ||
141
8.18k
            (hostconf = find_hostconf(req->conn->hosts, req->input.authority, req->input.scheme->default_port,
142
0
                                      &req->authority_wildcard_match)) == NULL)
143
8.18k
            hostconf = find_default_hostconf(req->conn->hosts);
144
13.1k
    } else {
145
13.1k
        hostconf = find_default_hostconf(req->conn->hosts);
146
13.1k
        req->input.authority = hostconf->authority.hostport;
147
13.1k
    }
148
149
21.3k
    req->scheme = req->input.scheme;
150
21.3k
    req->method = req->input.method;
151
21.3k
    req->authority = req->input.authority;
152
21.3k
    req->path = req->input.path;
153
21.3k
    req->path_normalized =
154
21.3k
        h2o_url_normalize_path(&req->pool, req->input.path.base, req->input.path.len, &req->query_at, &req->norm_indexes);
155
21.3k
    req->input.query_at = req->query_at; /* we can do this since input.path == path */
156
157
21.3k
    return hostconf;
158
21.3k
}
159
160
static void call_handlers(h2o_req_t *req, h2o_handler_t **handler)
161
14.9k
{
162
14.9k
    h2o_handler_t **end = req->pathconf->handlers.entries + req->pathconf->handlers.size;
163
164
25.8k
    for (; handler != end; ++handler) {
165
14.9k
        req->handler = *handler;
166
14.9k
        if ((*handler)->on_req(*handler, req) == 0)
167
4.08k
            return;
168
14.9k
    }
169
170
10.9k
    h2o_send_error_404(req, "File Not Found", "not found", 0);
171
10.9k
}
172
173
static void setup_pathconf(h2o_req_t *req, h2o_hostconf_t *hostconf)
174
15.4k
{
175
15.4k
    h2o_pathconf_t *selected_pathconf = &hostconf->fallback_path;
176
15.4k
    size_t i;
177
178
    /* setup pathconf, or redirect to "path/" */
179
40.7k
    for (i = 0; i != hostconf->paths.size; ++i) {
180
40.7k
        h2o_pathconf_t *candidate = hostconf->paths.entries[i];
181
40.7k
        if (req->path_normalized.len >= candidate->path.len &&
182
40.7k
            memcmp(req->path_normalized.base, candidate->path.base, candidate->path.len) == 0 &&
183
40.7k
            (candidate->path.base[candidate->path.len - 1] == '/' || req->path_normalized.len == candidate->path.len ||
184
15.5k
             req->path_normalized.base[candidate->path.len] == '/')) {
185
15.4k
            selected_pathconf = candidate;
186
15.4k
            break;
187
15.4k
        }
188
40.7k
    }
189
15.4k
    h2o_req_bind_conf(req, hostconf, selected_pathconf);
190
15.4k
}
191
192
static void deferred_proceed_cb(h2o_timer_t *entry)
193
0
{
194
0
    h2o_req_t *req = H2O_STRUCT_FROM_MEMBER(h2o_req_t, _timeout_entry, entry);
195
0
    h2o_proceed_response(req);
196
0
}
197
198
static void close_generator_and_filters(h2o_req_t *req)
199
74.8k
{
200
    /* close the generator if it is still open */
201
74.8k
    if (req->_generator != NULL) {
202
        /* close generator */
203
74
        if (req->_generator->stop != NULL)
204
74
            req->_generator->stop(req->_generator, req);
205
74
        req->_generator = NULL;
206
74
    }
207
    /* close the ostreams still open */
208
74.8k
    while (req->_ostr_top->next != NULL) {
209
0
        if (req->_ostr_top->stop != NULL)
210
0
            req->_ostr_top->stop(req->_ostr_top, req);
211
0
        req->_ostr_top = req->_ostr_top->next;
212
0
    }
213
74.8k
}
214
215
static void reset_response(h2o_req_t *req)
216
3.94k
{
217
3.94k
    req->res = (h2o_res_t){0, NULL, SIZE_MAX};
218
3.94k
    req->res.reason = "OK";
219
3.94k
    req->_next_filter_index = 0;
220
3.94k
    req->bytes_sent = 0;
221
3.94k
}
222
223
static void retain_original_response(h2o_req_t *req)
224
24.4k
{
225
24.4k
    if (req->res.original.status != 0)
226
0
        return;
227
228
24.4k
    req->res.original.status = req->res.status;
229
24.4k
    h2o_vector_reserve(&req->pool, &req->res.original.headers, req->res.headers.size);
230
24.4k
    h2o_memcpy(req->res.original.headers.entries, req->res.headers.entries,
231
24.4k
               sizeof(req->res.headers.entries[0]) * req->res.headers.size);
232
24.4k
    req->res.original.headers.size = req->res.headers.size;
233
24.4k
}
234
235
void h2o_write_error_log(h2o_iovec_t prefix, h2o_iovec_t msg)
236
121
{
237
    /* use writev(2) to emit error atomically */
238
121
    struct iovec vecs[] = {{prefix.base, prefix.len}, {msg.base, msg.len}, {"\n", 1}};
239
121
    H2O_BUILD_ASSERT(sizeof(vecs) / sizeof(vecs[0]) <= IOV_MAX);
240
121
    writev(2, vecs, sizeof(vecs) / sizeof(vecs[0]));
241
121
}
242
243
static void on_default_error_callback(void *data, h2o_iovec_t prefix, h2o_iovec_t msg)
244
121
{
245
121
    h2o_req_t *req = (void *)data;
246
121
    if (req->error_logs == NULL)
247
121
        h2o_buffer_init(&req->error_logs, &h2o_socket_buffer_prototype);
248
121
    h2o_buffer_append(&req->error_logs, prefix.base, prefix.len);
249
121
    h2o_buffer_append(&req->error_logs, msg.base, msg.len);
250
251
121
    if (req->pathconf->error_log.emit_request_errors) {
252
121
        h2o_write_error_log(prefix, msg);
253
121
    }
254
121
}
255
256
void h2o_init_request(h2o_req_t *req, h2o_conn_t *conn, h2o_req_t *src)
257
70.8k
{
258
    /* clear all memory (expect memory pool, since it is large) */
259
70.8k
    memset(req, 0, offsetof(h2o_req_t, pool));
260
261
    /* init memory pool (before others, since it may be used) */
262
70.8k
    h2o_mem_init_pool(&req->pool);
263
264
    /* init properties that should be initialized to non-zero */
265
70.8k
    req->conn = conn;
266
70.8k
    req->_timeout_entry.cb = deferred_proceed_cb;
267
70.8k
    req->res.reason = "OK"; /* default to "OK" regardless of the status value, it's not important after all (never sent in HTTP2) */
268
70.8k
    req->res.content_length = SIZE_MAX;
269
70.8k
    req->preferred_chunk_size = SIZE_MAX;
270
70.8k
    req->content_length = SIZE_MAX;
271
70.8k
    req->remaining_delegations = conn == NULL ? 0 : conn->ctx->globalconf->max_delegations;
272
70.8k
    req->remaining_reprocesses = conn == NULL ? 0 : conn->ctx->globalconf->max_reprocesses;
273
70.8k
    req->error_log_delegate.cb = on_default_error_callback;
274
70.8k
    req->error_log_delegate.data = req;
275
276
70.8k
    if (src != NULL) {
277
135
        size_t i;
278
135
#define COPY(buf)                                                                                                                  \
279
405
    do {                                                                                                                           \
280
405
        req->buf.base = h2o_mem_alloc_pool(&req->pool, char, src->buf.len);                                                        \
281
405
        memcpy(req->buf.base, src->buf.base, src->buf.len);                                                                        \
282
405
        req->buf.len = src->buf.len;                                                                                               \
283
405
    } while (0)
284
135
        COPY(input.authority);
285
135
        COPY(input.method);
286
135
        COPY(input.path);
287
135
        req->input.scheme = src->input.scheme;
288
135
        req->version = src->version;
289
135
        req->entity = src->entity;
290
135
        req->http1_is_persistent = src->http1_is_persistent;
291
135
        req->timestamps = src->timestamps;
292
135
        if (src->upgrade.base != NULL) {
293
0
            COPY(upgrade);
294
135
        } else {
295
135
            req->upgrade.base = NULL;
296
135
            req->upgrade.len = 0;
297
135
        }
298
135
#undef COPY
299
135
        h2o_vector_reserve(&req->pool, &req->headers, src->headers.size);
300
135
        req->headers.size = src->headers.size;
301
1.10k
        for (i = 0; i != src->headers.size; ++i) {
302
972
            h2o_header_t *dst_header = req->headers.entries + i, *src_header = src->headers.entries + i;
303
972
            if (h2o_iovec_is_token(src_header->name)) {
304
482
                dst_header->name = src_header->name;
305
490
            } else {
306
490
                dst_header->name = h2o_mem_alloc_pool(&req->pool, *dst_header->name, 1);
307
490
                *dst_header->name = h2o_strdup(&req->pool, src_header->name->base, src_header->name->len);
308
490
            }
309
972
            dst_header->value = h2o_strdup(&req->pool, src_header->value.base, src_header->value.len);
310
972
            dst_header->flags = src_header->flags;
311
972
            if (!src_header->orig_name)
312
0
                dst_header->orig_name = NULL;
313
972
            else
314
972
                dst_header->orig_name = h2o_strdup(&req->pool, src_header->orig_name, src_header->name->len).base;
315
972
        }
316
135
        if (src->env.size != 0) {
317
0
            h2o_vector_reserve(&req->pool, &req->env, src->env.size);
318
0
            req->env.size = src->env.size;
319
0
            for (i = 0; i != req->env.size; ++i)
320
0
                req->env.entries[i] = h2o_strdup(&req->pool, src->env.entries[i].base, src->env.entries[i].len);
321
0
        }
322
135
    }
323
70.8k
}
324
325
void h2o_dispose_request(h2o_req_t *req)
326
70.8k
{
327
70.8k
    close_generator_and_filters(req);
328
329
70.8k
    h2o_timer_unlink(&req->_timeout_entry);
330
331
70.8k
    if (req->pathconf != NULL && req->num_loggers != 0) {
332
0
        for (h2o_logger_t **logger = req->loggers, **end = logger + req->num_loggers; logger != end; ++logger) {
333
0
            (*logger)->log_access((*logger), req);
334
0
        }
335
0
    }
336
337
70.8k
    if (req->error_logs != NULL)
338
121
        h2o_buffer_dispose(&req->error_logs);
339
340
70.8k
    h2o_mem_clear_pool(&req->pool);
341
70.8k
}
342
343
int h2o_req_validate_pseudo_headers(h2o_req_t *req)
344
10.6k
{
345
10.6k
    if (h2o_memis(req->input.method.base, req->input.method.len, H2O_STRLIT("CONNECT-UDP"))) {
346
        /* The draft requires "masque" in `:scheme` but we need to support clients that put "https" there instead. */
347
3
        if (req->input.scheme != &H2O_URL_SCHEME_MASQUE && req->input.scheme != &H2O_URL_SCHEME_HTTPS)
348
2
            return 0;
349
1
        if (!h2o_memis(req->input.path.base, req->input.path.len, H2O_STRLIT("/")))
350
0
            return 0;
351
10.6k
    } else {
352
10.6k
        if (req->input.scheme == &H2O_URL_SCHEME_MASQUE)
353
1
            return 0;
354
10.6k
    }
355
356
10.6k
    return 1;
357
10.6k
}
358
359
h2o_handler_t *h2o_get_first_handler(h2o_req_t *req)
360
1.77k
{
361
1.77k
    if (req->pathconf == NULL) {
362
1.77k
        h2o_hostconf_t *hostconf = h2o_req_setup(req);
363
1.77k
        setup_pathconf(req, hostconf);
364
1.77k
    }
365
1.77k
    return req->pathconf->handlers.size != 0 ? req->pathconf->handlers.entries[0] : NULL;
366
1.77k
}
367
368
void h2o_process_request(h2o_req_t *req)
369
14.9k
{
370
14.9k
    assert(!req->process_called);
371
14.9k
    req->process_called = 1;
372
373
14.9k
    if (req->pathconf == NULL) {
374
13.7k
        h2o_hostconf_t *hostconf = h2o_req_setup(req);
375
13.7k
        setup_pathconf(req, hostconf);
376
13.7k
    }
377
14.9k
    call_handlers(req, req->pathconf->handlers.entries);
378
14.9k
}
379
380
void h2o_delegate_request(h2o_req_t *req)
381
0
{
382
0
    h2o_handler_t **handler = req->pathconf->handlers.entries, **end = handler + req->pathconf->handlers.size;
383
0
    for (;; ++handler) {
384
0
        assert(handler != end);
385
0
        if (*handler == req->handler)
386
0
            break;
387
0
    }
388
0
    ++handler;
389
0
    call_handlers(req, handler);
390
0
}
391
392
static void on_delegate_request_cb(h2o_timer_t *entry)
393
0
{
394
0
    struct st_deferred_request_action_t *args = H2O_STRUCT_FROM_MEMBER(struct st_deferred_request_action_t, timeout, entry);
395
0
    h2o_delegate_request(args->req);
396
0
}
397
398
void h2o_delegate_request_deferred(h2o_req_t *req)
399
0
{
400
0
    create_deferred_action(req, sizeof(struct st_deferred_request_action_t), on_delegate_request_cb);
401
0
}
402
403
static void process_resolved_request(h2o_req_t *req, h2o_hostconf_t **hosts)
404
3.94k
{
405
3.94k
    h2o_hostconf_t *hostconf;
406
3.94k
    if (req->overrides == NULL &&
407
3.94k
        (hostconf = find_hostconf(hosts, req->authority, req->scheme->default_port, &req->authority_wildcard_match)) != NULL) {
408
0
        setup_pathconf(req, hostconf);
409
0
        call_handlers(req, req->pathconf->handlers.entries);
410
0
        return;
411
0
    }
412
413
    /* uses the current pathconf, in other words, proxy uses the previous pathconf for building filters */
414
3.94k
    h2o__proxy_process_request(req);
415
3.94k
}
416
417
void h2o_reprocess_request(h2o_req_t *req, h2o_iovec_t method, const h2o_url_scheme_t *scheme, h2o_iovec_t authority,
418
                           h2o_iovec_t path, h2o_req_overrides_t *overrides, int is_delegated)
419
3.94k
{
420
3.94k
    retain_original_response(req);
421
422
    /* close generators and filters that are already running */
423
3.94k
    close_generator_and_filters(req);
424
425
    /* setup the request/response parameters */
426
3.94k
    req->handler = NULL;
427
3.94k
    req->method = method;
428
3.94k
    req->scheme = scheme;
429
3.94k
    req->authority = authority;
430
3.94k
    req->path = path;
431
3.94k
    req->path_normalized = h2o_url_normalize_path(&req->pool, req->path.base, req->path.len, &req->query_at, &req->norm_indexes);
432
3.94k
    req->authority_wildcard_match = h2o_iovec_init(NULL, 0);
433
3.94k
    req->overrides = overrides;
434
3.94k
    req->res_is_delegated |= is_delegated;
435
3.94k
    req->reprocess_if_too_early = 0;
436
3.94k
    reset_response(req);
437
438
    /* check the delegation (or reprocess) counter */
439
3.94k
    if (req->res_is_delegated) {
440
0
        if (req->remaining_delegations == 0) {
441
            /* TODO log */
442
0
            h2o_send_error_502(req, "Gateway Error", "too many internal delegations", 0);
443
0
            return;
444
0
        }
445
0
        --req->remaining_delegations;
446
3.94k
    } else {
447
3.94k
        if (req->remaining_reprocesses == 0) {
448
            /* TODO log */
449
0
            h2o_send_error_502(req, "Gateway Error", "too many internal reprocesses", 0);
450
0
            return;
451
0
        }
452
3.94k
        --req->remaining_reprocesses;
453
3.94k
    }
454
455
3.94k
    process_resolved_request(req, req->conn->ctx->globalconf->hosts);
456
3.94k
}
457
458
static void on_reprocess_request_cb(h2o_timer_t *entry)
459
0
{
460
0
    struct st_reprocess_request_deferred_t *args =
461
0
        H2O_STRUCT_FROM_MEMBER(struct st_reprocess_request_deferred_t, super.timeout, entry);
462
0
    h2o_reprocess_request(args->super.req, args->method, args->scheme, args->authority, args->path, args->overrides,
463
0
                          args->is_delegated);
464
0
}
465
466
void h2o_reprocess_request_deferred(h2o_req_t *req, h2o_iovec_t method, const h2o_url_scheme_t *scheme, h2o_iovec_t authority,
467
                                    h2o_iovec_t path, h2o_req_overrides_t *overrides, int is_delegated)
468
0
{
469
0
    struct st_reprocess_request_deferred_t *args =
470
0
        (struct st_reprocess_request_deferred_t *)create_deferred_action(req, sizeof(*args), on_reprocess_request_cb);
471
0
    args->method = method;
472
0
    args->scheme = scheme;
473
0
    args->authority = authority;
474
0
    args->path = path;
475
0
    args->overrides = overrides;
476
0
    args->is_delegated = is_delegated;
477
0
}
478
479
void h2o_replay_request(h2o_req_t *req)
480
0
{
481
0
    close_generator_and_filters(req);
482
0
    reset_response(req);
483
484
0
    if (req->handler != NULL) {
485
0
        h2o_handler_t **handler = req->pathconf->handlers.entries, **end = handler + req->pathconf->handlers.size;
486
0
        for (;; ++handler) {
487
0
            assert(handler != end);
488
0
            if (*handler == req->handler)
489
0
                break;
490
0
        }
491
0
        call_handlers(req, handler);
492
0
    } else {
493
0
        process_resolved_request(req, req->conn->hosts);
494
0
    }
495
0
}
496
497
static void on_replay_request_cb(h2o_timer_t *entry)
498
0
{
499
0
    struct st_deferred_request_action_t *args = H2O_STRUCT_FROM_MEMBER(struct st_deferred_request_action_t, timeout, entry);
500
0
    h2o_replay_request(args->req);
501
0
}
502
503
void h2o_replay_request_deferred(h2o_req_t *req)
504
0
{
505
0
    create_deferred_action(req, sizeof(struct st_deferred_request_action_t), on_replay_request_cb);
506
0
}
507
508
void h2o_start_response(h2o_req_t *req, h2o_generator_t *generator)
509
20.4k
{
510
20.4k
    retain_original_response(req);
511
512
    /* set generator */
513
20.4k
    assert(req->_generator == NULL);
514
20.4k
    req->_generator = generator;
515
516
20.4k
    if (req->is_tunnel_req && (req->res.status == 101 || req->res.status == 200)) {
517
        /* a tunnel has been established; forward response as is */
518
20.4k
    } else {
519
        /* setup response filters */
520
20.4k
        if (req->prefilters != NULL) {
521
0
            req->prefilters->on_setup_ostream(req->prefilters, req, &req->_ostr_top);
522
20.4k
        } else {
523
20.4k
            h2o_setup_next_ostream(req, &req->_ostr_top);
524
20.4k
        }
525
20.4k
    }
526
20.4k
}
527
528
static void do_sendvec(h2o_req_t *req, h2o_sendvec_t *bufs, size_t bufcnt, h2o_send_state_t state)
529
23.1k
{
530
23.1k
    assert(req->_generator != NULL);
531
532
23.1k
    if (!h2o_send_state_is_in_progress(state))
533
20.3k
        req->_generator = NULL;
534
535
23.1k
    req->_ostr_top->do_send(req->_ostr_top, req, bufs, bufcnt, state);
536
23.1k
}
537
538
void h2o_send(h2o_req_t *req, h2o_iovec_t *bufs, size_t bufcnt, h2o_send_state_t state)
539
23.1k
{
540
23.1k
    h2o_sendvec_t *vecs = alloca(sizeof(*vecs) * bufcnt);
541
23.1k
    size_t i;
542
543
42.6k
    for (i = 0; i != bufcnt; ++i)
544
19.5k
        h2o_sendvec_init_raw(vecs + i, bufs[i].base, bufs[i].len);
545
546
23.1k
    do_sendvec(req, vecs, bufcnt, state);
547
23.1k
}
548
549
void h2o_sendvec(h2o_req_t *req, h2o_sendvec_t *bufs, size_t bufcnt, h2o_send_state_t state)
550
0
{
551
0
    assert(bufcnt == 0 || (bufs[0].callbacks->read_ == &h2o_sendvec_read_raw || bufcnt == 1));
552
0
    do_sendvec(req, bufs, bufcnt, state);
553
0
}
554
555
static int from_pipe_read(h2o_sendvec_t *vec, void *dst, size_t len)
556
0
{
557
0
    int fd = vec->cb_arg[0];
558
559
0
    while (len != 0) {
560
0
        ssize_t ret;
561
0
        while ((ret = read(fd, dst, len)) == -1 && errno == EINTR)
562
0
            ;
563
0
        if (ret <= 0) {
564
0
            assert(errno != EAGAIN);
565
0
            return 0;
566
0
        }
567
0
        dst += ret;
568
0
        len -= ret;
569
0
        vec->len -= ret;
570
0
    }
571
572
0
    return 1;
573
0
}
574
575
#ifdef __linux__
576
static size_t from_pipe_send(h2o_sendvec_t *vec, int sockfd, size_t len)
577
0
{
578
0
    int fd = vec->cb_arg[0];
579
580
0
    ssize_t bytes_sent;
581
0
    while ((bytes_sent = splice(fd, NULL, sockfd, NULL, len, SPLICE_F_NONBLOCK)) == -1 && errno == EINTR)
582
0
        ;
583
0
    if (bytes_sent == -1 && errno == EAGAIN)
584
0
        return 0;
585
0
    if (bytes_sent <= 0)
586
0
        return SIZE_MAX;
587
588
0
    vec->len -= bytes_sent;
589
590
0
    return bytes_sent;
591
0
}
592
#else
593
#define from_pipe_send NULL
594
#endif
595
596
void h2o_send_from_pipe(h2o_req_t *req, int pipefd, size_t len, h2o_send_state_t send_state)
597
0
{
598
0
    static const h2o_sendvec_callbacks_t callbacks = {.read_ = from_pipe_read, .send_ = from_pipe_send};
599
0
    h2o_sendvec_t vec = {.callbacks = &callbacks, .len = len, .cb_arg[0] = pipefd};
600
601
0
    h2o_sendvec(req, &vec, 1, send_state);
602
0
}
603
604
h2o_req_prefilter_t *h2o_add_prefilter(h2o_req_t *req, size_t alignment, size_t sz)
605
0
{
606
0
    h2o_req_prefilter_t *prefilter = h2o_mem_alloc_pool_aligned(&req->pool, alignment, sz);
607
0
    prefilter->next = req->prefilters;
608
0
    req->prefilters = prefilter;
609
0
    return prefilter;
610
0
}
611
612
h2o_ostream_t *h2o_add_ostream(h2o_req_t *req, size_t alignment, size_t sz, h2o_ostream_t **slot)
613
0
{
614
0
    h2o_ostream_t *ostr = h2o_mem_alloc_pool_aligned(&req->pool, alignment, sz);
615
0
    ostr->next = *slot;
616
0
    ostr->do_send = NULL;
617
0
    ostr->stop = NULL;
618
0
    ostr->send_informational = NULL;
619
620
0
    *slot = ostr;
621
622
0
    return ostr;
623
0
}
624
625
void h2o_req_apply_env(h2o_req_t *req, h2o_envconf_t *env)
626
0
{
627
0
    size_t i;
628
629
0
    if (env->parent != NULL)
630
0
        h2o_req_apply_env(req, env->parent);
631
0
    for (i = 0; i != env->unsets.size; ++i)
632
0
        h2o_req_unsetenv(req, env->unsets.entries[i].base, env->unsets.entries[i].len);
633
0
    for (i = 0; i != env->sets.size; i += 2)
634
0
        *h2o_req_getenv(req, env->sets.entries[i].base, env->sets.entries[i].len, 1) = env->sets.entries[i + 1];
635
0
}
636
637
void h2o_req_bind_conf(h2o_req_t *req, h2o_hostconf_t *hostconf, h2o_pathconf_t *pathconf)
638
21.3k
{
639
21.3k
    req->hostconf = hostconf;
640
21.3k
    req->pathconf = pathconf;
641
642
    /* copy filters and loggers */
643
21.3k
    req->filters = pathconf->_filters.entries;
644
21.3k
    req->num_filters = pathconf->_filters.size;
645
21.3k
    req->loggers = pathconf->_loggers.entries;
646
21.3k
    req->num_loggers = pathconf->_loggers.size;
647
648
21.3k
    if (pathconf->env != NULL)
649
0
        h2o_req_apply_env(req, pathconf->env);
650
21.3k
}
651
652
void h2o_proceed_response_deferred(h2o_req_t *req)
653
0
{
654
0
    h2o_timer_link(req->conn->ctx->loop, 0, &req->_timeout_entry);
655
0
}
656
657
void h2o_ostream_send_next(h2o_ostream_t *ostream, h2o_req_t *req, h2o_sendvec_t *bufs, size_t bufcnt, h2o_send_state_t state)
658
0
{
659
0
    if (!h2o_send_state_is_in_progress(state)) {
660
0
        assert(req->_ostr_top == ostream);
661
0
        req->_ostr_top = ostream->next;
662
0
    }
663
0
    ostream->next->do_send(ostream->next, req, bufs, bufcnt, state);
664
0
}
665
666
void h2o_req_fill_mime_attributes(h2o_req_t *req)
667
0
{
668
0
    ssize_t content_type_index;
669
0
    h2o_mimemap_type_t *mime;
670
671
0
    if (req->res.mime_attr != NULL)
672
0
        return;
673
674
0
    if ((content_type_index = h2o_find_header(&req->res.headers, H2O_TOKEN_CONTENT_TYPE, -1)) != -1 &&
675
0
        (mime = h2o_mimemap_get_type_by_mimetype(req->pathconf->mimemap, req->res.headers.entries[content_type_index].value, 0)) !=
676
0
            NULL)
677
0
        req->res.mime_attr = &mime->data.attr;
678
0
    else
679
0
        req->res.mime_attr = &h2o_mime_attributes_as_is;
680
0
}
681
682
void h2o_send_inline(h2o_req_t *req, const char *body, size_t len)
683
16.9k
{
684
16.9k
    static h2o_generator_t generator = {NULL, NULL};
685
686
16.9k
    h2o_iovec_t buf = h2o_strdup(&req->pool, body, len);
687
    /* the function intentionally does not set the content length, since it may be used for generating 304 response, etc. */
688
    /* req->res.content_length = buf.len; */
689
690
16.9k
    h2o_start_response(req, &generator);
691
692
16.9k
    if (h2o_memis(req->input.method.base, req->input.method.len, H2O_STRLIT("HEAD")))
693
232
        h2o_send(req, NULL, 0, H2O_SEND_STATE_FINAL);
694
16.6k
    else
695
16.6k
        h2o_send(req, &buf, 1, H2O_SEND_STATE_FINAL);
696
16.9k
}
697
698
void h2o_send_error_generic(h2o_req_t *req, int status, const char *reason, const char *body, int flags)
699
16.9k
{
700
16.9k
    if (req->pathconf == NULL) {
701
5.84k
        h2o_hostconf_t *hostconf = h2o_req_setup(req);
702
5.84k
        h2o_req_bind_conf(req, hostconf, &hostconf->fallback_path);
703
5.84k
    }
704
705
    /* If the request is broken or incomplete, do not apply filters, as it would be dangerous to do so. Legitimate clients would not
706
     * send broken requests, so we do not need to decorate error responses using errordoc handler or anything else. */
707
16.9k
    if ((flags & H2O_SEND_ERROR_BROKEN_REQUEST) != 0)
708
1.27k
        req->_next_filter_index = SIZE_MAX;
709
710
16.9k
    if ((flags & H2O_SEND_ERROR_HTTP1_CLOSE_CONNECTION) != 0)
711
1.57k
        req->http1_is_persistent = 0;
712
713
16.9k
    req->res.status = status;
714
16.9k
    req->res.reason = reason;
715
16.9k
    req->res.content_length = strlen(body);
716
717
16.9k
    if ((flags & H2O_SEND_ERROR_KEEP_HEADERS) == 0)
718
16.9k
        memset(&req->res.headers, 0, sizeof(req->res.headers));
719
720
16.9k
    h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_CONTENT_TYPE, NULL, H2O_STRLIT("text/plain; charset=utf-8"));
721
722
16.9k
    h2o_send_inline(req, body, SIZE_MAX);
723
16.9k
}
724
725
#define DECL_SEND_ERROR_DEFERRED(status_)                                                                                          \
726
    static void send_error_deferred_cb_##status_(h2o_timer_t *entry)                                                               \
727
0
    {                                                                                                                              \
728
0
        struct st_send_error_deferred_t *args = H2O_STRUCT_FROM_MEMBER(struct st_send_error_deferred_t, _timeout, entry);          \
729
0
        reset_response(args->req);                                                                                                 \
730
0
        args->req->conn->ctx->emitted_error_status[H2O_STATUS_ERROR_##status_]++;                                                  \
731
0
        h2o_send_error_generic(args->req, args->status, args->reason, args->body, args->flags);                                    \
732
0
    }                                                                                                                              \
733
                                                                                                                                   \
734
    static void h2o_send_error_deferred_##status_(h2o_req_t *req, const char *reason, const char *body, int flags)                 \
735
0
    {                                                                                                                              \
736
0
        struct st_send_error_deferred_t *args = h2o_mem_alloc_pool(&req->pool, *args, 1);                                          \
737
0
        *args = (struct st_send_error_deferred_t){req, status_, reason, body, flags};                                              \
738
0
        h2o_timer_init(&args->_timeout, send_error_deferred_cb_##status_);                                                         \
739
0
        h2o_timer_link(req->conn->ctx->loop, 0, &args->_timeout);                                                                  \
740
0
    }
741
742
DECL_SEND_ERROR_DEFERRED(502)
743
744
#undef DECL_SEND_ERROR_DEFERRED
745
746
static size_t append_with_limit(char *dst, h2o_iovec_t input, size_t limit)
747
242
{
748
242
    if (input.len < limit) {
749
209
        memcpy(dst, input.base, input.len);
750
209
        return input.len;
751
209
    } else {
752
33
        memcpy(dst, input.base, (limit - 3));
753
33
        memcpy(dst + (limit - 3), "...", 3);
754
33
        return limit;
755
33
    }
756
242
}
757
758
void h2o_req_log_error(h2o_req_t *req, const char *module, const char *fmt, ...)
759
121
{
760
242
#define INITIAL_BUF_SIZE 256
761
762
121
    char *errbuf = h2o_mem_alloc_pool(&req->pool, char, INITIAL_BUF_SIZE);
763
121
    int errlen;
764
121
    va_list args;
765
766
121
    va_start(args, fmt);
767
121
    errlen = vsnprintf(errbuf, INITIAL_BUF_SIZE, fmt, args);
768
121
    va_end(args);
769
770
121
    if (errlen >= INITIAL_BUF_SIZE) {
771
0
        errbuf = h2o_mem_alloc_pool(&req->pool, char, errlen + 1);
772
0
        va_start(args, fmt);
773
0
        errlen = vsnprintf(errbuf, errlen + 1, fmt, args);
774
0
        va_end(args);
775
0
    }
776
121
    h2o_iovec_t msg = h2o_iovec_init(errbuf, errlen);
777
778
121
#undef INITIAL_BUF_SIZE
779
780
    /* build prefix */
781
121
    char *pbuf = h2o_mem_alloc_pool(&req->pool, char, sizeof("[] in request::") + strlen(module) + 64 + 32);
782
121
    char *p = pbuf;
783
121
    p += sprintf(p, "[%s] in request:", module);
784
121
    p += append_with_limit(p, req->authority, 64);
785
121
    p += append_with_limit(p, req->path, 32);
786
121
    *p++ = ':';
787
121
    h2o_iovec_t prefix = h2o_iovec_init(pbuf, p - pbuf);
788
789
    /* run error callback (save and emit the log if needed) */
790
121
    req->error_log_delegate.cb(req->error_log_delegate.data, prefix, msg);
791
121
}
792
793
void h2o_send_redirect(h2o_req_t *req, int status, const char *reason, const char *url, size_t url_len)
794
0
{
795
0
    if (req->res_is_delegated) {
796
0
        h2o_iovec_t method = h2o_get_redirect_method(req->method, status);
797
0
        h2o_send_redirect_internal(req, method, url, url_len, 0);
798
0
        return;
799
0
    }
800
801
0
    static h2o_generator_t generator = {NULL, NULL};
802
0
    static const h2o_iovec_t body_prefix = {H2O_STRLIT("<!DOCTYPE html><TITLE>Moved</TITLE><P>The document has moved <A HREF=\"")};
803
0
    static const h2o_iovec_t body_suffix = {H2O_STRLIT("\">here</A>")};
804
805
    /* build and send response */
806
0
    h2o_iovec_t bufs[3];
807
0
    size_t bufcnt;
808
0
    if (h2o_memis(req->input.method.base, req->input.method.len, H2O_STRLIT("HEAD"))) {
809
0
        req->res.content_length = SIZE_MAX;
810
0
        bufcnt = 0;
811
0
    } else {
812
0
        bufs[0] = body_prefix;
813
0
        bufs[1] = h2o_htmlescape(&req->pool, url, url_len);
814
0
        bufs[2] = body_suffix;
815
0
        bufcnt = 3;
816
0
        req->res.content_length = body_prefix.len + bufs[1].len + body_suffix.len;
817
0
    }
818
0
    req->res.status = status;
819
0
    req->res.reason = reason;
820
0
    req->res.headers = (h2o_headers_t){NULL};
821
0
    h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_LOCATION, NULL, url, url_len);
822
0
    h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_CONTENT_TYPE, NULL, H2O_STRLIT("text/html; charset=utf-8"));
823
0
    h2o_start_response(req, &generator);
824
0
    h2o_send(req, bufs, bufcnt, H2O_SEND_STATE_FINAL);
825
0
}
826
827
void h2o_send_redirect_internal(h2o_req_t *req, h2o_iovec_t method, const char *url_str, size_t url_len, int preserve_overrides)
828
0
{
829
0
    h2o_url_t url;
830
831
    /* parse the location URL */
832
0
    if (h2o_url_parse_relative(&req->pool, url_str, url_len, &url) != 0) {
833
        /* TODO log h2o_error_printf("[proxy] cannot handle location header: %.*s\n", (int)url_len, url); */
834
0
        h2o_send_error_deferred_502(req, "Gateway Error", "internal error", 0);
835
0
        return;
836
0
    }
837
    /* convert the location to absolute (while creating copies of the values passed to the deferred call) */
838
0
    if (url.scheme == NULL)
839
0
        url.scheme = req->scheme;
840
0
    if (url.authority.base == NULL) {
841
0
        if (req->hostconf != NULL)
842
0
            url.authority = req->hostconf->authority.hostport;
843
0
        else
844
0
            url.authority = req->authority;
845
0
    } else {
846
0
        if (h2o_lcstris(url.authority.base, url.authority.len, req->authority.base, req->authority.len)) {
847
0
            url.authority = req->authority;
848
0
        } else {
849
0
            url.authority = h2o_strdup(&req->pool, url.authority.base, url.authority.len);
850
0
            preserve_overrides = 0;
851
0
        }
852
0
    }
853
0
    h2o_iovec_t base_path = req->path;
854
0
    h2o_url_resolve_path(&base_path, &url.path);
855
0
    url.path = h2o_concat(&req->pool, base_path, url.path);
856
857
0
    h2o_reprocess_request_deferred(req, method, url.scheme, url.authority, url.path, preserve_overrides ? req->overrides : NULL, 1);
858
0
}
859
860
h2o_iovec_t h2o_get_redirect_method(h2o_iovec_t method, int status)
861
0
{
862
0
    if (h2o_memis(method.base, method.len, H2O_STRLIT("POST")) && !(status == 307 || status == 308))
863
0
        method = h2o_iovec_init(H2O_STRLIT("GET"));
864
0
    return method;
865
0
}
866
867
static void do_push_path(void *_req, const char *path, size_t path_len, int is_critical)
868
0
{
869
0
    h2o_req_t *req = _req;
870
871
0
    if (req->conn->callbacks->push_path != NULL)
872
0
        req->conn->callbacks->push_path(req, path, path_len, is_critical);
873
0
}
874
875
h2o_iovec_t h2o_push_path_in_link_header(h2o_req_t *req, const char *value, size_t value_len)
876
0
{
877
0
    h2o_iovec_t ret = h2o_iovec_init(value, value_len);
878
879
0
    h2o_extract_push_path_from_link_header(&req->pool, value, value_len, req->path_normalized, req->input.scheme,
880
0
                                           req->input.authority, req->res_is_delegated ? req->scheme : NULL,
881
0
                                           req->res_is_delegated ? &req->authority : NULL, do_push_path, req, &ret,
882
0
                                           req->hostconf->http2.allow_cross_origin_push);
883
884
0
    return ret;
885
0
}
886
887
void h2o_resp_add_date_header(h2o_req_t *req)
888
14.2k
{
889
14.2k
    h2o_timestamp_t ts = h2o_get_timestamp(req->conn->ctx, &req->pool);
890
14.2k
    h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_DATE, NULL, ts.str->rfc1123, strlen(ts.str->rfc1123));
891
14.2k
}
892
893
void h2o_send_informational(h2o_req_t *req)
894
0
{
895
    /* 1xx must be sent before h2o_start_response is called*/
896
0
    assert(req->_generator == NULL);
897
0
    assert(req->_ostr_top->next == NULL);
898
0
    assert(100 <= req->res.status && req->res.status <= 199 && req->res.status != 101);
899
900
0
    if (req->_ostr_top->send_informational == NULL)
901
0
        goto Clear;
902
903
0
    size_t index;
904
0
    if ((index = h2o_find_header(&req->headers, H2O_TOKEN_NO_EARLY_HINTS, -1)) != -1) {
905
0
        h2o_iovec_t value = req->headers.entries[index].value;
906
0
        if (value.len == 1 && value.base[0] == '1')
907
0
            goto Clear;
908
0
    }
909
910
0
    int i = 0;
911
0
    for (i = 0; i != req->num_filters; ++i) {
912
0
        h2o_filter_t *filter = req->filters[i];
913
0
        if (filter->on_informational != NULL)
914
0
            filter->on_informational(filter, req);
915
0
    }
916
917
0
    if (req->res.status == 103 && req->res.headers.size == 0)
918
0
        goto Clear;
919
920
0
    req->_ostr_top->send_informational(req->_ostr_top, req);
921
922
0
Clear:
923
    /* clear status and headers */
924
0
    req->res.status = 0;
925
0
    req->res.headers = (h2o_headers_t){NULL, 0, 0};
926
0
}
927
928
int h2o_req_resolve_internal_redirect_url(h2o_req_t *req, h2o_iovec_t dest, h2o_url_t *resolved)
929
0
{
930
0
    h2o_url_t input;
931
932
    /* resolve the URL */
933
0
    if (h2o_url_parse_relative(&req->pool, dest.base, dest.len, &input) != 0) {
934
0
        return -1;
935
0
    }
936
0
    if (input.scheme != NULL && input.authority.base != NULL) {
937
0
        *resolved = input;
938
0
    } else {
939
0
        h2o_url_t base;
940
        /* we MUST to set authority to that of hostconf, or internal redirect might create a TCP connection */
941
0
        if (h2o_url_init(&base, req->scheme, req->hostconf->authority.hostport, req->path) != 0) {
942
0
            return -1;
943
0
        }
944
0
        h2o_url_resolve(&req->pool, &base, &input, resolved);
945
0
    }
946
947
0
    return 0;
948
0
}