/src/h2o/lib/core/context.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2014 DeNA Co., Ltd. |
3 | | * |
4 | | * Permission is hereby granted, free of charge, to any person obtaining a copy |
5 | | * of this software and associated documentation files (the "Software"), to |
6 | | * deal in the Software without restriction, including without limitation the |
7 | | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
8 | | * sell copies of the Software, and to permit persons to whom the Software is |
9 | | * furnished to do so, subject to the following conditions: |
10 | | * |
11 | | * The above copyright notice and this permission notice shall be included in |
12 | | * all copies or substantial portions of the Software. |
13 | | * |
14 | | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
17 | | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
20 | | * IN THE SOFTWARE. |
21 | | */ |
22 | | #include <stddef.h> |
23 | | #include <stdlib.h> |
24 | | #include <sys/time.h> |
25 | | #include "h2o.h" |
26 | | #include "h2o/memcached.h" |
27 | | |
28 | | void h2o_context_init_pathconf_context(h2o_context_t *ctx, h2o_pathconf_t *pathconf) |
29 | 11 | { |
30 | | /* add pathconf to the inited list (or return if already inited) */ |
31 | 11 | size_t i; |
32 | 26 | for (i = 0; i != ctx->_pathconfs_inited.size; ++i) |
33 | 15 | if (ctx->_pathconfs_inited.entries[i] == pathconf) |
34 | 0 | return; |
35 | 11 | h2o_vector_reserve(NULL, &ctx->_pathconfs_inited, ctx->_pathconfs_inited.size + 1); |
36 | 11 | ctx->_pathconfs_inited.entries[ctx->_pathconfs_inited.size++] = pathconf; |
37 | | |
38 | 11 | #define DOIT(type, list) \ |
39 | 33 | do { \ |
40 | 33 | size_t i; \ |
41 | 41 | for (i = 0; i != pathconf->list.size; ++i) { \ |
42 | 8 | type *o = pathconf->list.entries[i]; \ |
43 | 8 | if (o->on_context_init != NULL) \ |
44 | 8 | o->on_context_init(o, ctx); \ |
45 | 8 | } \ |
46 | 33 | } while (0) |
47 | | |
48 | 11 | DOIT(h2o_handler_t, handlers); |
49 | 11 | DOIT(h2o_filter_t, _filters); |
50 | 11 | DOIT(h2o_logger_t, _loggers); |
51 | | |
52 | 11 | #undef DOIT |
53 | 11 | } |
54 | | |
55 | | void h2o_context_dispose_pathconf_context(h2o_context_t *ctx, h2o_pathconf_t *pathconf) |
56 | 0 | { |
57 | | /* nullify pathconf in the inited list (or return if already disposed) */ |
58 | 0 | size_t i; |
59 | 0 | for (i = 0; i != ctx->_pathconfs_inited.size; ++i) |
60 | 0 | if (ctx->_pathconfs_inited.entries[i] == pathconf) |
61 | 0 | break; |
62 | 0 | if (i == ctx->_pathconfs_inited.size) |
63 | 0 | return; |
64 | 0 | ctx->_pathconfs_inited.entries[i] = NULL; |
65 | |
|
66 | 0 | #define DOIT(type, list) \ |
67 | 0 | do { \ |
68 | 0 | size_t i; \ |
69 | 0 | for (i = 0; i != pathconf->list.size; ++i) { \ |
70 | 0 | type *o = pathconf->list.entries[i]; \ |
71 | 0 | if (o->on_context_dispose != NULL) \ |
72 | 0 | o->on_context_dispose(o, ctx); \ |
73 | 0 | } \ |
74 | 0 | } while (0) |
75 | |
|
76 | 0 | DOIT(h2o_handler_t, handlers); |
77 | 0 | DOIT(h2o_filter_t, _filters); |
78 | 0 | DOIT(h2o_logger_t, _loggers); |
79 | |
|
80 | 0 | #undef DOIT |
81 | 0 | } |
82 | | |
83 | | void h2o_context_init(h2o_context_t *ctx, h2o_loop_t *loop, h2o_globalconf_t *config) |
84 | 3 | { |
85 | 3 | size_t i, j; |
86 | | |
87 | 3 | assert(config->hosts[0] != NULL); |
88 | | |
89 | 0 | memset(ctx, 0, sizeof(*ctx)); |
90 | 3 | ctx->loop = loop; |
91 | 3 | ctx->globalconf = config; |
92 | 3 | ctx->queue = h2o_multithread_create_queue(loop); |
93 | 3 | h2o_multithread_register_receiver(ctx->queue, &ctx->receivers.hostinfo_getaddr, h2o_hostinfo_getaddr_receiver); |
94 | 3 | ctx->filecache = h2o_filecache_create(config->filecache.capacity); |
95 | | |
96 | 3 | h2o_linklist_init_anchor(&ctx->_conns.active); |
97 | 3 | h2o_linklist_init_anchor(&ctx->_conns.idle); |
98 | 3 | h2o_linklist_init_anchor(&ctx->_conns.shutdown); |
99 | 3 | ctx->proxy.client_ctx.loop = loop; |
100 | 3 | ctx->proxy.client_ctx.io_timeout = ctx->globalconf->proxy.io_timeout; |
101 | 3 | ctx->proxy.client_ctx.connect_timeout = ctx->globalconf->proxy.connect_timeout; |
102 | 3 | ctx->proxy.client_ctx.first_byte_timeout = ctx->globalconf->proxy.first_byte_timeout; |
103 | 3 | ctx->proxy.client_ctx.keepalive_timeout = ctx->globalconf->proxy.keepalive_timeout; |
104 | 3 | ctx->proxy.client_ctx.getaddr_receiver = &ctx->receivers.hostinfo_getaddr; |
105 | 3 | ctx->proxy.client_ctx.http2.latency_optimization = ctx->globalconf->http2.latency_optimization; |
106 | 3 | ctx->proxy.client_ctx.max_buffer_size = ctx->globalconf->proxy.max_buffer_size; |
107 | 3 | ctx->proxy.client_ctx.http2.max_concurrent_streams = ctx->globalconf->proxy.http2.max_concurrent_streams; |
108 | 3 | ctx->proxy.client_ctx.protocol_selector.ratio.http2 = ctx->globalconf->proxy.protocol_ratio.http2; |
109 | 3 | ctx->proxy.client_ctx.protocol_selector.ratio.http3 = ctx->globalconf->proxy.protocol_ratio.http3; |
110 | 3 | ctx->proxy.connpool.socketpool = &ctx->globalconf->proxy.global_socketpool; |
111 | 3 | h2o_linklist_init_anchor(&ctx->proxy.connpool.http2.conns); |
112 | | |
113 | 3 | ctx->_module_configs = h2o_mem_alloc(sizeof(*ctx->_module_configs) * config->_num_config_slots); |
114 | 3 | memset(ctx->_module_configs, 0, sizeof(*ctx->_module_configs) * config->_num_config_slots); |
115 | | |
116 | 3 | static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; |
117 | 3 | pthread_mutex_lock(&mutex); |
118 | | |
119 | 3 | h2o_socketpool_register_loop(&ctx->globalconf->proxy.global_socketpool, loop); |
120 | | |
121 | 6 | for (i = 0; config->hosts[i] != NULL; ++i) { |
122 | 3 | h2o_hostconf_t *hostconf = config->hosts[i]; |
123 | 11 | for (j = 0; j != hostconf->paths.size; ++j) { |
124 | 8 | h2o_pathconf_t *pathconf = hostconf->paths.entries[j]; |
125 | 8 | h2o_context_init_pathconf_context(ctx, pathconf); |
126 | 8 | } |
127 | 3 | h2o_context_init_pathconf_context(ctx, &hostconf->fallback_path); |
128 | 3 | } |
129 | | |
130 | 3 | pthread_mutex_unlock(&mutex); |
131 | 3 | } |
132 | | |
133 | | void h2o_context_dispose(h2o_context_t *ctx) |
134 | 0 | { |
135 | 0 | h2o_globalconf_t *config = ctx->globalconf; |
136 | 0 | size_t i, j; |
137 | |
|
138 | 0 | h2o_socketpool_unregister_loop(&ctx->globalconf->proxy.global_socketpool, ctx->loop); |
139 | |
|
140 | 0 | for (i = 0; config->hosts[i] != NULL; ++i) { |
141 | 0 | h2o_hostconf_t *hostconf = config->hosts[i]; |
142 | 0 | for (j = 0; j != hostconf->paths.size; ++j) { |
143 | 0 | h2o_pathconf_t *pathconf = hostconf->paths.entries[j]; |
144 | 0 | h2o_context_dispose_pathconf_context(ctx, pathconf); |
145 | 0 | } |
146 | 0 | h2o_context_dispose_pathconf_context(ctx, &hostconf->fallback_path); |
147 | 0 | } |
148 | 0 | free(ctx->_pathconfs_inited.entries); |
149 | 0 | free(ctx->_module_configs); |
150 | | /* what should we do here? assert(!h2o_linklist_is_empty(&ctx->http2._conns); */ |
151 | |
|
152 | 0 | h2o_filecache_destroy(ctx->filecache); |
153 | 0 | ctx->filecache = NULL; |
154 | | |
155 | | /* clear storage */ |
156 | 0 | for (i = 0; i != ctx->storage.size; ++i) { |
157 | 0 | h2o_context_storage_item_t *item = ctx->storage.entries + i; |
158 | 0 | if (item->dispose != NULL) { |
159 | 0 | item->dispose(item->data); |
160 | 0 | } |
161 | 0 | } |
162 | 0 | free(ctx->storage.entries); |
163 | | |
164 | | /* TODO assert that the all the getaddrinfo threads are idle */ |
165 | 0 | h2o_multithread_unregister_receiver(ctx->queue, &ctx->receivers.hostinfo_getaddr); |
166 | 0 | h2o_multithread_destroy_queue(ctx->queue); |
167 | |
|
168 | 0 | if (ctx->_timestamp_cache.value != NULL) |
169 | 0 | h2o_mem_release_shared(ctx->_timestamp_cache.value); |
170 | 0 | } |
171 | | |
172 | | void h2o_context_request_shutdown(h2o_context_t *ctx) |
173 | 0 | { |
174 | 0 | ctx->shutdown_requested = 1; |
175 | |
|
176 | 0 | H2O_CONN_LIST_FOREACH(h2o_conn_t * conn, ({&ctx->_conns.active, &ctx->_conns.idle}), { |
177 | 0 | if (conn->callbacks->request_shutdown != NULL) { |
178 | 0 | conn->callbacks->request_shutdown(conn); |
179 | 0 | } |
180 | 0 | }); |
181 | 0 | } |
182 | | |
183 | | void h2o_context_update_timestamp_string_cache(h2o_context_t *ctx) |
184 | 192 | { |
185 | 192 | struct tm gmt; |
186 | 192 | if (ctx->_timestamp_cache.value != NULL) |
187 | 189 | h2o_mem_release_shared(ctx->_timestamp_cache.value); |
188 | 192 | ctx->_timestamp_cache.value = h2o_mem_alloc_shared(NULL, sizeof(h2o_timestamp_string_t), NULL); |
189 | 192 | gmtime_r(&ctx->_timestamp_cache.tv_at.tv_sec, &gmt); |
190 | 192 | h2o_time2str_rfc1123(ctx->_timestamp_cache.value->rfc1123, &gmt); |
191 | 192 | h2o_time2str_log(ctx->_timestamp_cache.value->log, ctx->_timestamp_cache.tv_at.tv_sec); |
192 | 192 | } |
193 | | |
194 | | void h2o_context_close_idle_connections(h2o_context_t *ctx, size_t max_connections_to_close, uint64_t min_age) |
195 | 0 | { |
196 | 0 | if (max_connections_to_close <= 0) |
197 | 0 | return; |
198 | | |
199 | 0 | size_t closed = ctx->_conns.num_conns.shutdown; |
200 | |
|
201 | 0 | if (closed >= max_connections_to_close) |
202 | 0 | return; |
203 | | |
204 | 0 | H2O_CONN_LIST_FOREACH(h2o_conn_t * conn, ({&ctx->_conns.idle}), { |
205 | 0 | struct timeval now = h2o_gettimeofday(ctx->loop); |
206 | 0 | if (h2o_timeval_subtract(&conn->connected_at, &now) < (min_age * 1000)) |
207 | 0 | continue; |
208 | 0 | ctx->connection_stats.idle_closed++; |
209 | 0 | conn->callbacks->close_idle_connection(conn); |
210 | 0 | closed++; |
211 | 0 | if (closed == max_connections_to_close) |
212 | 0 | return; |
213 | 0 | }); |
214 | 0 | } |
215 | | |
216 | | static size_t *get_connection_state_counter(h2o_context_t *ctx, h2o_conn_state_t state) |
217 | 105k | { |
218 | 105k | return ctx->_conns.num_conns.counters + (size_t)state; |
219 | 105k | } |
220 | | |
221 | | static void unlink_conn(h2o_conn_t *conn) |
222 | 52.5k | { |
223 | 52.5k | --*get_connection_state_counter(conn->ctx, conn->state); |
224 | 52.5k | h2o_linklist_unlink(&conn->_conns); |
225 | 52.5k | } |
226 | | |
227 | | static void link_conn(h2o_conn_t *conn) |
228 | 52.5k | { |
229 | 52.5k | switch (conn->state) { |
230 | 20.2k | case H2O_CONN_STATE_IDLE: |
231 | 20.2k | h2o_linklist_insert(&conn->ctx->_conns.idle, &conn->_conns); |
232 | 20.2k | break; |
233 | 32.3k | case H2O_CONN_STATE_ACTIVE: |
234 | 32.3k | h2o_linklist_insert(&conn->ctx->_conns.active, &conn->_conns); |
235 | 32.3k | break; |
236 | 0 | case H2O_CONN_STATE_SHUTDOWN: |
237 | 0 | h2o_linklist_insert(&conn->ctx->_conns.shutdown, &conn->_conns); |
238 | 0 | break; |
239 | 52.5k | } |
240 | 52.5k | ++*get_connection_state_counter(conn->ctx, conn->state); |
241 | 52.5k | } |
242 | | |
243 | | h2o_conn_t *h2o_create_connection(size_t sz, h2o_context_t *ctx, h2o_hostconf_t **hosts, struct timeval connected_at, |
244 | | const h2o_conn_callbacks_t *callbacks) |
245 | 29.8k | { |
246 | 29.8k | h2o_conn_t *conn = (h2o_conn_t *)h2o_mem_alloc(sz); |
247 | | |
248 | 29.8k | conn->ctx = ctx; |
249 | 29.8k | conn->hosts = hosts; |
250 | 29.8k | conn->connected_at = connected_at; |
251 | | #ifdef H2O_NO_64BIT_ATOMICS |
252 | | pthread_mutex_lock(&h2o_conn_id_mutex); |
253 | | conn->id = ++h2o_connection_id; |
254 | | pthread_mutex_unlock(&h2o_conn_id_mutex); |
255 | | #else |
256 | 29.8k | conn->id = __sync_add_and_fetch(&h2o_connection_id, 1); |
257 | 29.8k | #endif |
258 | 29.8k | conn->callbacks = callbacks; |
259 | 29.8k | conn->_uuid.is_initialized = 0; |
260 | | |
261 | 29.8k | conn->state = H2O_CONN_STATE_ACTIVE; |
262 | 29.8k | conn->_conns = (h2o_linklist_t){}; |
263 | 29.8k | link_conn(conn); |
264 | | |
265 | 29.8k | return conn; |
266 | 29.8k | } |
267 | | |
268 | | void h2o_destroy_connection(h2o_conn_t *conn) |
269 | 29.8k | { |
270 | 29.8k | unlink_conn(conn); |
271 | 29.8k | free(conn); |
272 | 29.8k | } |
273 | | |
274 | | void h2o_conn_set_state(h2o_conn_t *conn, h2o_conn_state_t state) |
275 | 133k | { |
276 | 133k | if (conn->state != state) { |
277 | 22.7k | unlink_conn(conn); |
278 | 22.7k | conn->state = state; |
279 | 22.7k | link_conn(conn); |
280 | 22.7k | } |
281 | 133k | } |