/src/h2o/lib/core/context.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2014 DeNA Co., Ltd. |
3 | | * |
4 | | * Permission is hereby granted, free of charge, to any person obtaining a copy |
5 | | * of this software and associated documentation files (the "Software"), to |
6 | | * deal in the Software without restriction, including without limitation the |
7 | | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
8 | | * sell copies of the Software, and to permit persons to whom the Software is |
9 | | * furnished to do so, subject to the following conditions: |
10 | | * |
11 | | * The above copyright notice and this permission notice shall be included in |
12 | | * all copies or substantial portions of the Software. |
13 | | * |
14 | | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
17 | | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
20 | | * IN THE SOFTWARE. |
21 | | */ |
22 | | #include <fcntl.h> |
23 | | #include <stddef.h> |
24 | | #include <stdlib.h> |
25 | | #include <sys/time.h> |
26 | | #include "cloexec.h" |
27 | | #include "h2o.h" |
28 | | #include "h2o/memcached.h" |
29 | | |
30 | | void h2o_context_init_pathconf_context(h2o_context_t *ctx, h2o_pathconf_t *pathconf) |
31 | 11 | { |
32 | | /* add pathconf to the inited list (or return if already inited) */ |
33 | 11 | size_t i; |
34 | 26 | for (i = 0; i != ctx->_pathconfs_inited.size; ++i) |
35 | 15 | if (ctx->_pathconfs_inited.entries[i] == pathconf) |
36 | 0 | return; |
37 | 11 | h2o_vector_reserve(NULL, &ctx->_pathconfs_inited, ctx->_pathconfs_inited.size + 1); |
38 | 11 | ctx->_pathconfs_inited.entries[ctx->_pathconfs_inited.size++] = pathconf; |
39 | | |
40 | 11 | #define DOIT(type, list) \ |
41 | 33 | do { \ |
42 | 33 | size_t i; \ |
43 | 41 | for (i = 0; i != pathconf->list.size; ++i) { \ |
44 | 8 | type *o = pathconf->list.entries[i]; \ |
45 | 8 | if (o->on_context_init != NULL) \ |
46 | 8 | o->on_context_init(o, ctx); \ |
47 | 8 | } \ |
48 | 33 | } while (0) |
49 | | |
50 | 11 | DOIT(h2o_handler_t, handlers); |
51 | 11 | DOIT(h2o_filter_t, _filters); |
52 | 11 | DOIT(h2o_logger_t, _loggers); |
53 | | |
54 | 11 | #undef DOIT |
55 | 11 | } |
56 | | |
57 | | void h2o_context_dispose_pathconf_context(h2o_context_t *ctx, h2o_pathconf_t *pathconf) |
58 | 0 | { |
59 | | /* nullify pathconf in the inited list (or return if already disposed) */ |
60 | 0 | size_t i; |
61 | 0 | for (i = 0; i != ctx->_pathconfs_inited.size; ++i) |
62 | 0 | if (ctx->_pathconfs_inited.entries[i] == pathconf) |
63 | 0 | break; |
64 | 0 | if (i == ctx->_pathconfs_inited.size) |
65 | 0 | return; |
66 | 0 | ctx->_pathconfs_inited.entries[i] = NULL; |
67 | |
|
68 | 0 | #define DOIT(type, list) \ |
69 | 0 | do { \ |
70 | 0 | size_t i; \ |
71 | 0 | for (i = 0; i != pathconf->list.size; ++i) { \ |
72 | 0 | type *o = pathconf->list.entries[i]; \ |
73 | 0 | if (o->on_context_dispose != NULL) \ |
74 | 0 | o->on_context_dispose(o, ctx); \ |
75 | 0 | } \ |
76 | 0 | } while (0) |
77 | |
|
78 | 0 | DOIT(h2o_handler_t, handlers); |
79 | 0 | DOIT(h2o_filter_t, _filters); |
80 | 0 | DOIT(h2o_logger_t, _loggers); |
81 | |
|
82 | 0 | #undef DOIT |
83 | 0 | } |
84 | | |
85 | | void h2o_context_init(h2o_context_t *ctx, h2o_loop_t *loop, h2o_globalconf_t *config) |
86 | 3 | { |
87 | 3 | size_t i, j; |
88 | | |
89 | 3 | assert(config->hosts[0] != NULL); |
90 | | |
91 | 3 | memset(ctx, 0, sizeof(*ctx)); |
92 | 3 | ctx->loop = loop; |
93 | 3 | ctx->globalconf = config; |
94 | 3 | ctx->queue = h2o_multithread_create_queue(loop); |
95 | 3 | h2o_multithread_register_receiver(ctx->queue, &ctx->receivers.hostinfo_getaddr, h2o_hostinfo_getaddr_receiver); |
96 | 3 | ctx->filecache = h2o_filecache_create(config->filecache.capacity); |
97 | 3 | ctx->spare_pipes.pipes = h2o_mem_alloc(sizeof(ctx->spare_pipes.pipes[0]) * config->max_spare_pipes); |
98 | | |
99 | 3 | h2o_linklist_init_anchor(&ctx->_conns.active); |
100 | 3 | h2o_linklist_init_anchor(&ctx->_conns.idle); |
101 | 3 | h2o_linklist_init_anchor(&ctx->_conns.shutdown); |
102 | 3 | ctx->proxy.client_ctx.loop = loop; |
103 | 3 | ctx->proxy.client_ctx.io_timeout = ctx->globalconf->proxy.io_timeout; |
104 | 3 | ctx->proxy.client_ctx.connect_timeout = ctx->globalconf->proxy.connect_timeout; |
105 | 3 | ctx->proxy.client_ctx.first_byte_timeout = ctx->globalconf->proxy.first_byte_timeout; |
106 | 3 | ctx->proxy.client_ctx.keepalive_timeout = ctx->globalconf->proxy.keepalive_timeout; |
107 | 3 | ctx->proxy.client_ctx.getaddr_receiver = &ctx->receivers.hostinfo_getaddr; |
108 | 3 | ctx->proxy.client_ctx.http2.latency_optimization = ctx->globalconf->http2.latency_optimization; |
109 | 3 | ctx->proxy.client_ctx.max_buffer_size = ctx->globalconf->proxy.max_buffer_size; |
110 | 3 | ctx->proxy.client_ctx.http2.max_concurrent_streams = ctx->globalconf->proxy.http2.max_concurrent_streams; |
111 | 3 | ctx->proxy.client_ctx.protocol_selector.ratio.http2 = ctx->globalconf->proxy.protocol_ratio.http2; |
112 | 3 | ctx->proxy.client_ctx.protocol_selector.ratio.http3 = ctx->globalconf->proxy.protocol_ratio.http3; |
113 | 3 | ctx->proxy.connpool.socketpool = &ctx->globalconf->proxy.global_socketpool; |
114 | 3 | h2o_linklist_init_anchor(&ctx->proxy.connpool.http2.conns); |
115 | | |
116 | 3 | ctx->_module_configs = h2o_mem_alloc(sizeof(*ctx->_module_configs) * config->_num_config_slots); |
117 | 3 | memset(ctx->_module_configs, 0, sizeof(*ctx->_module_configs) * config->_num_config_slots); |
118 | | |
119 | 3 | static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; |
120 | 3 | pthread_mutex_lock(&mutex); |
121 | | |
122 | 3 | h2o_socketpool_register_loop(&ctx->globalconf->proxy.global_socketpool, loop); |
123 | | |
124 | 6 | for (i = 0; config->hosts[i] != NULL; ++i) { |
125 | 3 | h2o_hostconf_t *hostconf = config->hosts[i]; |
126 | 11 | for (j = 0; j != hostconf->paths.size; ++j) { |
127 | 8 | h2o_pathconf_t *pathconf = hostconf->paths.entries[j]; |
128 | 8 | h2o_context_init_pathconf_context(ctx, pathconf); |
129 | 8 | } |
130 | 3 | h2o_context_init_pathconf_context(ctx, &hostconf->fallback_path); |
131 | 3 | } |
132 | | |
133 | 3 | pthread_mutex_unlock(&mutex); |
134 | 3 | } |
135 | | |
136 | | void h2o_context_dispose(h2o_context_t *ctx) |
137 | 0 | { |
138 | 0 | h2o_globalconf_t *config = ctx->globalconf; |
139 | |
|
140 | 0 | h2o_socketpool_unregister_loop(&ctx->globalconf->proxy.global_socketpool, ctx->loop); |
141 | |
|
142 | 0 | for (size_t i = 0; config->hosts[i] != NULL; ++i) { |
143 | 0 | h2o_hostconf_t *hostconf = config->hosts[i]; |
144 | 0 | for (size_t j = 0; j != hostconf->paths.size; ++j) { |
145 | 0 | h2o_pathconf_t *pathconf = hostconf->paths.entries[j]; |
146 | 0 | h2o_context_dispose_pathconf_context(ctx, pathconf); |
147 | 0 | } |
148 | 0 | h2o_context_dispose_pathconf_context(ctx, &hostconf->fallback_path); |
149 | 0 | } |
150 | 0 | free(ctx->_pathconfs_inited.entries); |
151 | 0 | free(ctx->_module_configs); |
152 | | /* what should we do here? assert(!h2o_linklist_is_empty(&ctx->http2._conns); */ |
153 | |
|
154 | 0 | for (size_t i = 0; i < ctx->spare_pipes.count; ++i) { |
155 | 0 | close(ctx->spare_pipes.pipes[i][0]); |
156 | 0 | close(ctx->spare_pipes.pipes[i][1]); |
157 | 0 | } |
158 | 0 | free(ctx->spare_pipes.pipes); |
159 | |
|
160 | 0 | h2o_filecache_destroy(ctx->filecache); |
161 | 0 | ctx->filecache = NULL; |
162 | | |
163 | | /* clear storage */ |
164 | 0 | for (size_t i = 0; i != ctx->storage.size; ++i) { |
165 | 0 | h2o_context_storage_item_t *item = ctx->storage.entries + i; |
166 | 0 | if (item->dispose != NULL) { |
167 | 0 | item->dispose(item->data); |
168 | 0 | } |
169 | 0 | } |
170 | 0 | free(ctx->storage.entries); |
171 | | |
172 | | /* TODO assert that the all the getaddrinfo threads are idle */ |
173 | 0 | h2o_multithread_unregister_receiver(ctx->queue, &ctx->receivers.hostinfo_getaddr); |
174 | 0 | h2o_multithread_destroy_queue(ctx->queue); |
175 | |
|
176 | 0 | if (ctx->_timestamp_cache.value != NULL) |
177 | 0 | h2o_mem_release_shared(ctx->_timestamp_cache.value); |
178 | 0 | } |
179 | | |
180 | | void h2o_context_request_shutdown(h2o_context_t *ctx) |
181 | 0 | { |
182 | 0 | ctx->shutdown_requested = 1; |
183 | |
|
184 | 0 | H2O_CONN_LIST_FOREACH(h2o_conn_t * conn, ({&ctx->_conns.active, &ctx->_conns.idle}), { |
185 | 0 | if (conn->callbacks->request_shutdown != NULL) { |
186 | 0 | conn->callbacks->request_shutdown(conn); |
187 | 0 | } |
188 | 0 | }); |
189 | 0 | } |
190 | | |
191 | | void h2o_context_update_timestamp_string_cache(h2o_context_t *ctx) |
192 | 230 | { |
193 | 230 | struct tm gmt; |
194 | 230 | if (ctx->_timestamp_cache.value != NULL) |
195 | 227 | h2o_mem_release_shared(ctx->_timestamp_cache.value); |
196 | 230 | ctx->_timestamp_cache.value = h2o_mem_alloc_shared(NULL, sizeof(h2o_timestamp_string_t), NULL); |
197 | 230 | gmtime_r(&ctx->_timestamp_cache.tv_at.tv_sec, &gmt); |
198 | 230 | h2o_time2str_rfc1123(ctx->_timestamp_cache.value->rfc1123, &gmt); |
199 | 230 | h2o_time2str_log(ctx->_timestamp_cache.value->log, ctx->_timestamp_cache.tv_at.tv_sec); |
200 | 230 | } |
201 | | |
202 | | void h2o_context_close_idle_connections(h2o_context_t *ctx, size_t max_connections_to_close, uint64_t min_age) |
203 | 0 | { |
204 | 0 | if (max_connections_to_close <= 0) |
205 | 0 | return; |
206 | | |
207 | 0 | size_t closed = ctx->_conns.num_conns.shutdown; |
208 | |
|
209 | 0 | if (closed >= max_connections_to_close) |
210 | 0 | return; |
211 | | |
212 | 0 | H2O_CONN_LIST_FOREACH(h2o_conn_t * conn, ({&ctx->_conns.idle}), { |
213 | 0 | struct timeval now = h2o_gettimeofday(ctx->loop); |
214 | 0 | if (h2o_timeval_subtract(&conn->connected_at, &now) < (min_age * 1000)) |
215 | 0 | continue; |
216 | 0 | ctx->connection_stats.idle_closed++; |
217 | 0 | conn->callbacks->close_idle_connection(conn); |
218 | 0 | closed++; |
219 | 0 | if (closed == max_connections_to_close) |
220 | 0 | return; |
221 | 0 | }); |
222 | 0 | } |
223 | | |
224 | | static size_t *get_connection_state_counter(h2o_context_t *ctx, h2o_conn_state_t state) |
225 | 108k | { |
226 | 108k | return ctx->_conns.num_conns.counters + (size_t)state; |
227 | 108k | } |
228 | | |
229 | | static void unlink_conn(h2o_conn_t *conn) |
230 | 54.0k | { |
231 | 54.0k | --*get_connection_state_counter(conn->ctx, conn->state); |
232 | 54.0k | h2o_linklist_unlink(&conn->_conns); |
233 | 54.0k | } |
234 | | |
235 | | static void link_conn(h2o_conn_t *conn) |
236 | 54.0k | { |
237 | 54.0k | switch (conn->state) { |
238 | 20.6k | case H2O_CONN_STATE_IDLE: |
239 | 20.6k | h2o_linklist_insert(&conn->ctx->_conns.idle, &conn->_conns); |
240 | 20.6k | break; |
241 | 33.3k | case H2O_CONN_STATE_ACTIVE: |
242 | 33.3k | h2o_linklist_insert(&conn->ctx->_conns.active, &conn->_conns); |
243 | 33.3k | break; |
244 | 0 | case H2O_CONN_STATE_SHUTDOWN: |
245 | 0 | h2o_linklist_insert(&conn->ctx->_conns.shutdown, &conn->_conns); |
246 | 0 | break; |
247 | 54.0k | } |
248 | 54.0k | ++*get_connection_state_counter(conn->ctx, conn->state); |
249 | 54.0k | } |
250 | | |
251 | | h2o_conn_t *h2o_create_connection(size_t sz, h2o_context_t *ctx, h2o_hostconf_t **hosts, struct timeval connected_at, |
252 | | const h2o_conn_callbacks_t *callbacks) |
253 | 29.9k | { |
254 | 29.9k | h2o_conn_t *conn = (h2o_conn_t *)h2o_mem_alloc(sz); |
255 | | |
256 | 29.9k | conn->ctx = ctx; |
257 | 29.9k | conn->hosts = hosts; |
258 | 29.9k | conn->connected_at = connected_at; |
259 | | #ifdef H2O_NO_64BIT_ATOMICS |
260 | | pthread_mutex_lock(&h2o_conn_id_mutex); |
261 | | conn->id = ++h2o_connection_id; |
262 | | pthread_mutex_unlock(&h2o_conn_id_mutex); |
263 | | #else |
264 | 29.9k | conn->id = __sync_add_and_fetch(&h2o_connection_id, 1); |
265 | 29.9k | #endif |
266 | 29.9k | conn->callbacks = callbacks; |
267 | 29.9k | conn->_uuid.is_initialized = 0; |
268 | | |
269 | 29.9k | conn->state = H2O_CONN_STATE_ACTIVE; |
270 | 29.9k | conn->_conns = (h2o_linklist_t){}; |
271 | 29.9k | link_conn(conn); |
272 | | |
273 | 29.9k | return conn; |
274 | 29.9k | } |
275 | | |
276 | | void h2o_destroy_connection(h2o_conn_t *conn) |
277 | 29.9k | { |
278 | 29.9k | unlink_conn(conn); |
279 | 29.9k | free(conn); |
280 | 29.9k | } |
281 | | |
282 | | void h2o_conn_set_state(h2o_conn_t *conn, h2o_conn_state_t state) |
283 | 138k | { |
284 | 138k | if (conn->state != state) { |
285 | 24.1k | unlink_conn(conn); |
286 | 24.1k | conn->state = state; |
287 | 24.1k | link_conn(conn); |
288 | 24.1k | } |
289 | 138k | } |
290 | | |
291 | | int h2o_context_new_pipe(h2o_context_t *ctx, int fds[2]) |
292 | 0 | { |
293 | 0 | if (ctx->spare_pipes.count > 0) { |
294 | 0 | int *src = ctx->spare_pipes.pipes[--ctx->spare_pipes.count]; |
295 | 0 | fds[0] = src[0]; |
296 | 0 | fds[1] = src[1]; |
297 | 0 | return 1; |
298 | 0 | } |
299 | | |
300 | 0 | #ifdef __linux__ |
301 | 0 | return pipe2(fds, O_NONBLOCK | O_CLOEXEC) == 0; |
302 | | #else |
303 | | if (cloexec_pipe(fds) != 0) |
304 | | return 0; |
305 | | fcntl(fds[0], F_SETFL, O_NONBLOCK); |
306 | | fcntl(fds[1], F_SETFL, O_NONBLOCK); |
307 | | return 1; |
308 | | #endif |
309 | 0 | } |
310 | | |
311 | | static int empty_pipe(int fd) |
312 | 0 | { |
313 | 0 | ssize_t ret; |
314 | 0 | char buf[1024]; |
315 | |
|
316 | 0 | drain_more: |
317 | 0 | while ((ret = read(fd, buf, sizeof(buf))) == -1 && errno == EINTR) |
318 | 0 | ; |
319 | 0 | if (ret == 0) { |
320 | 0 | return 0; |
321 | 0 | } else if (ret == -1) { |
322 | 0 | if (errno == EAGAIN) |
323 | 0 | return 1; |
324 | 0 | return 0; |
325 | 0 | } else if (ret == sizeof(buf)) { |
326 | 0 | goto drain_more; |
327 | 0 | } |
328 | | |
329 | 0 | return 1; |
330 | 0 | } |
331 | | |
332 | | void h2o_context_return_spare_pipe(h2o_context_t *ctx, int fds[2]) |
333 | 0 | { |
334 | 0 | assert(fds[0] != -1); |
335 | 0 | assert(fds[1] != -1); |
336 | | |
337 | 0 | if (ctx->spare_pipes.count < ctx->globalconf->max_spare_pipes && empty_pipe(fds[0])) { |
338 | 0 | int *dst = ctx->spare_pipes.pipes[ctx->spare_pipes.count++]; |
339 | 0 | dst[0] = fds[0]; |
340 | 0 | dst[1] = fds[1]; |
341 | 0 | } else { |
342 | 0 | close(fds[0]); |
343 | 0 | close(fds[1]); |
344 | 0 | } |
345 | 0 | } |