/src/httpd/server/mpm/event/event.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Licensed to the Apache Software Foundation (ASF) under one or more |
2 | | * contributor license agreements. See the NOTICE file distributed with |
3 | | * this work for additional information regarding copyright ownership. |
4 | | * The ASF licenses this file to You under the Apache License, Version 2.0 |
5 | | * (the "License"); you may not use this file except in compliance with |
6 | | * the License. You may obtain a copy of the License at |
7 | | * |
8 | | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | | * |
10 | | * Unless required by applicable law or agreed to in writing, software |
11 | | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | | * See the License for the specific language governing permissions and |
14 | | * limitations under the License. |
15 | | */ |
16 | | |
17 | | /** |
18 | | * This MPM tries to fix the 'keep alive problem' in HTTP. |
19 | | * |
20 | | * After a client completes the first request, the client can keep the |
21 | | * connection open to send more requests with the same socket. This can save |
22 | | * significant overhead in creating TCP connections. However, the major |
23 | | * disadvantage is that Apache traditionally keeps an entire child |
24 | | * process/thread waiting for data from the client. To solve this problem, |
25 | | * this MPM has a dedicated thread for handling both the Listening sockets, |
26 | | * and all sockets that are in a Keep Alive status. |
27 | | * |
28 | | * The MPM assumes the underlying apr_pollset implementation is somewhat |
29 | | * threadsafe. This currently is only compatible with KQueue and EPoll. This |
30 | | * enables the MPM to avoid extra high level locking or having to wake up the |
31 | | * listener thread when a keep-alive socket needs to be sent to it. |
32 | | * |
33 | | * This MPM does not perform well on older platforms that do not have very good |
34 | | * threading, like Linux with a 2.4 kernel, but this does not matter, since we |
35 | | * require EPoll or KQueue. |
36 | | * |
37 | | * For FreeBSD, use 5.3. It is possible to run this MPM on FreeBSD 5.2.1, if |
38 | | * you use libkse (see `man libmap.conf`). |
39 | | * |
40 | | * For NetBSD, use at least 2.0. |
41 | | * |
42 | | * For Linux, you should use a 2.6 kernel, and make sure your glibc has epoll |
43 | | * support compiled in. |
44 | | * |
45 | | */ |
46 | | |
47 | | #include "apr.h" |
48 | | #include "apr_portable.h" |
49 | | #include "apr_strings.h" |
50 | | #include "apr_file_io.h" |
51 | | #include "apr_thread_proc.h" |
52 | | #include "apr_signal.h" |
53 | | #include "apr_thread_mutex.h" |
54 | | #include "apr_poll.h" |
55 | | #include "apr_ring.h" |
56 | | #include "apr_queue.h" |
57 | | #include "apr_atomic.h" |
58 | | #define APR_WANT_STRFUNC |
59 | | #include "apr_want.h" |
60 | | #include "apr_version.h" |
61 | | |
62 | | #include <stdlib.h> |
63 | | |
64 | | #if APR_HAVE_UNISTD_H |
65 | | #include <unistd.h> |
66 | | #endif |
67 | | #if APR_HAVE_SYS_SOCKET_H |
68 | | #include <sys/socket.h> |
69 | | #endif |
70 | | #if APR_HAVE_SYS_WAIT_H |
71 | | #include <sys/wait.h> |
72 | | #endif |
73 | | #ifdef HAVE_SYS_PROCESSOR_H |
74 | | #include <sys/processor.h> /* for bindprocessor() */ |
75 | | #endif |
76 | | |
77 | | #if !APR_HAS_THREADS |
78 | | #error The Event MPM requires APR threads, but they are unavailable. |
79 | | #endif |
80 | | |
81 | | #include "ap_config.h" |
82 | | #include "httpd.h" |
83 | | #include "http_main.h" |
84 | | #include "http_log.h" |
85 | | #include "http_config.h" /* for read_config */ |
86 | | #include "http_core.h" /* for get_remote_host */ |
87 | | #include "http_connection.h" |
88 | | #include "http_protocol.h" |
89 | | #include "ap_mpm.h" |
90 | | #include "mpm_common.h" |
91 | | #include "ap_listen.h" |
92 | | #include "scoreboard.h" |
93 | | #include "mpm_fdqueue.h" |
94 | | #include "mpm_default.h" |
95 | | #include "http_vhost.h" |
96 | | #include "unixd.h" |
97 | | #include "apr_skiplist.h" |
98 | | #include "util_time.h" |
99 | | |
100 | | #include <signal.h> |
101 | | #include <limits.h> /* for INT_MAX */ |
102 | | |
103 | | |
104 | | #if HAVE_SERF |
105 | | #include "mod_serf.h" |
106 | | #include "serf.h" |
107 | | #endif |
108 | | |
109 | | /* Limit on the total --- clients will be locked out if more servers than |
110 | | * this are needed. It is intended solely to keep the server from crashing |
111 | | * when things get out of hand. |
112 | | * |
113 | | * We keep a hard maximum number of servers, for two reasons --- first off, |
114 | | * in case something goes seriously wrong, we want to stop the fork bomb |
115 | | * short of actually crashing the machine we're running on by filling some |
116 | | * kernel table. Secondly, it keeps the size of the scoreboard file small |
117 | | * enough that we can read the whole thing without worrying too much about |
118 | | * the overhead. |
119 | | */ |
120 | | #ifndef DEFAULT_SERVER_LIMIT |
121 | 0 | #define DEFAULT_SERVER_LIMIT 16 |
122 | | #endif |
123 | | |
124 | | /* Admin can't tune ServerLimit beyond MAX_SERVER_LIMIT. We want |
125 | | * some sort of compile-time limit to help catch typos. |
126 | | */ |
127 | | #ifndef MAX_SERVER_LIMIT |
128 | 0 | #define MAX_SERVER_LIMIT 20000 |
129 | | #endif |
130 | | |
131 | | /* Limit on the threads per process. Clients will be locked out if more than |
132 | | * this are needed. |
133 | | * |
134 | | * We keep this for one reason it keeps the size of the scoreboard file small |
135 | | * enough that we can read the whole thing without worrying too much about |
136 | | * the overhead. |
137 | | */ |
138 | | #ifndef DEFAULT_THREAD_LIMIT |
139 | 0 | #define DEFAULT_THREAD_LIMIT 64 |
140 | | #endif |
141 | | |
142 | | /* Admin can't tune ThreadLimit beyond MAX_THREAD_LIMIT. We want |
143 | | * some sort of compile-time limit to help catch typos. |
144 | | */ |
145 | | #ifndef MAX_THREAD_LIMIT |
146 | 0 | #define MAX_THREAD_LIMIT 100000 |
147 | | #endif |
148 | | |
149 | 0 | #define MPM_CHILD_PID(i) (ap_scoreboard_image->parent[i].pid) |
150 | | |
151 | | #if !APR_VERSION_AT_LEAST(1,4,0) |
152 | | #define apr_time_from_msec(x) (x * 1000) |
153 | | #endif |
154 | | |
155 | | #ifndef MAX_SECS_TO_LINGER |
156 | | #define MAX_SECS_TO_LINGER 30 |
157 | | #endif |
158 | | #define SECONDS_TO_LINGER 2 |
159 | | |
160 | | /* |
161 | | * Actual definitions of config globals |
162 | | */ |
163 | | |
164 | | #ifndef DEFAULT_WORKER_FACTOR |
165 | | #define DEFAULT_WORKER_FACTOR 2 |
166 | | #endif |
167 | 0 | #define WORKER_FACTOR_SCALE 16 /* scale factor to allow fractional values */ |
168 | | static unsigned int worker_factor = DEFAULT_WORKER_FACTOR * WORKER_FACTOR_SCALE; |
169 | | /* AsyncRequestWorkerFactor * 16 */ |
170 | | |
171 | | static int threads_per_child = 0; /* ThreadsPerChild */ |
172 | | static int ap_daemons_to_start = 0; /* StartServers */ |
173 | | static int min_spare_threads = 0; /* MinSpareThreads */ |
174 | | static int max_spare_threads = 0; /* MaxSpareThreads */ |
175 | | static int active_daemons_limit = 0; /* MaxRequestWorkers / ThreadsPerChild */ |
176 | | static int max_workers = 0; /* MaxRequestWorkers */ |
177 | | static int server_limit = 0; /* ServerLimit */ |
178 | | static int thread_limit = 0; /* ThreadLimit */ |
179 | | static int had_healthy_child = 0; |
180 | | static volatile int dying = 0; |
181 | | static volatile int workers_may_exit = 0; |
182 | | static volatile int start_thread_may_exit = 0; |
183 | | static volatile int listener_may_exit = 0; |
184 | | static int listener_is_wakeable = 0; /* Pollset supports APR_POLLSET_WAKEABLE */ |
185 | | static int num_listensocks = 0; |
186 | | static apr_int32_t conns_this_child; /* MaxConnectionsPerChild, only access |
187 | | in listener thread */ |
188 | | static apr_uint32_t connection_count = 0; /* Number of open connections */ |
189 | | static apr_uint32_t lingering_count = 0; /* Number of connections in lingering close */ |
190 | | static apr_uint32_t suspended_count = 0; /* Number of suspended connections */ |
191 | | static apr_uint32_t clogged_count = 0; /* Number of threads processing ssl conns */ |
192 | | static apr_uint32_t threads_shutdown = 0; /* Number of threads that have shutdown |
193 | | early during graceful termination */ |
194 | | static int resource_shortage = 0; |
195 | | static fd_queue_t *worker_queue; |
196 | | static fd_queue_info_t *worker_queue_info; |
197 | | |
198 | | static apr_thread_mutex_t *timeout_mutex; |
199 | | |
200 | | module AP_MODULE_DECLARE_DATA mpm_event_module; |
201 | | |
202 | | /* forward declare */ |
203 | | struct event_srv_cfg_s; |
204 | | typedef struct event_srv_cfg_s event_srv_cfg; |
205 | | |
206 | | static apr_pollfd_t *listener_pollfd; |
207 | | |
208 | | /* |
209 | | * The pollset for sockets that are in any of the timeout queues. Currently |
210 | | * we use the timeout_mutex to make sure that connections are added/removed |
211 | | * atomically to/from both event_pollset and a timeout queue. Otherwise |
212 | | * some confusion can happen under high load if timeout queues and pollset |
213 | | * get out of sync. |
214 | | * XXX: It should be possible to make the lock unnecessary in many or even all |
215 | | * XXX: cases. |
216 | | */ |
217 | | static apr_pollset_t *event_pollset; |
218 | | |
219 | | typedef struct event_conn_state_t event_conn_state_t; |
220 | | |
221 | | /* |
222 | | * The chain of connections to be shutdown by a worker thread (deferred), |
223 | | * linked list updated atomically. |
224 | | */ |
225 | | static event_conn_state_t *volatile defer_linger_chain; |
226 | | |
227 | | struct event_conn_state_t { |
228 | | /** APR_RING of expiration timeouts */ |
229 | | APR_RING_ENTRY(event_conn_state_t) timeout_list; |
230 | | /** the time when the entry was queued */ |
231 | | apr_time_t queue_timestamp; |
232 | | /** connection record this struct refers to */ |
233 | | conn_rec *c; |
234 | | /** request record (if any) this struct refers to */ |
235 | | request_rec *r; |
236 | | /** server config this struct refers to */ |
237 | | event_srv_cfg *sc; |
238 | | /** scoreboard handle for the conn_rec */ |
239 | | ap_sb_handle_t *sbh; |
240 | | /** is the current conn_rec suspended? (disassociated with |
241 | | * a particular MPM thread; for suspend_/resume_connection |
242 | | * hooks) |
243 | | */ |
244 | | int suspended; |
245 | | /** memory pool to allocate from */ |
246 | | apr_pool_t *p; |
247 | | /** bucket allocator */ |
248 | | apr_bucket_alloc_t *bucket_alloc; |
249 | | /** poll file descriptor information */ |
250 | | apr_pollfd_t pfd; |
251 | | /** public parts of the connection state */ |
252 | | conn_state_t pub; |
253 | | /** chaining in defer_linger_chain */ |
254 | | struct event_conn_state_t *chain; |
255 | | /** Is lingering close from defer_lingering_close()? */ |
256 | | int deferred_linger; |
257 | | }; |
258 | | |
259 | | APR_RING_HEAD(timeout_head_t, event_conn_state_t); |
260 | | |
261 | | struct timeout_queue { |
262 | | struct timeout_head_t head; |
263 | | apr_interval_time_t timeout; |
264 | | apr_uint32_t count; /* for this queue */ |
265 | | apr_uint32_t *total; /* for all chained/related queues */ |
266 | | struct timeout_queue *next; /* chaining */ |
267 | | }; |
268 | | /* |
269 | | * Several timeout queues that use different timeouts, so that we always can |
270 | | * simply append to the end. |
271 | | * write_completion_q uses vhost's TimeOut |
272 | | * keepalive_q uses vhost's KeepAliveTimeOut |
273 | | * linger_q uses MAX_SECS_TO_LINGER |
274 | | * short_linger_q uses SECONDS_TO_LINGER |
275 | | */ |
276 | | static struct timeout_queue *write_completion_q, |
277 | | *keepalive_q, |
278 | | *linger_q, |
279 | | *short_linger_q; |
280 | | static volatile apr_time_t queues_next_expiry; |
281 | | |
282 | | /* Prevent extra poll/wakeup calls for timeouts close in the future (queues |
283 | | * have the granularity of a second anyway). |
284 | | * XXX: Wouldn't 0.5s (instead of 0.1s) be "enough"? |
285 | | */ |
286 | 0 | #define TIMEOUT_FUDGE_FACTOR apr_time_from_msec(100) |
287 | | |
288 | | /* |
289 | | * Macros for accessing struct timeout_queue. |
290 | | * For TO_QUEUE_APPEND and TO_QUEUE_REMOVE, timeout_mutex must be held. |
291 | | */ |
292 | | static void TO_QUEUE_APPEND(struct timeout_queue *q, event_conn_state_t *el) |
293 | 0 | { |
294 | 0 | apr_time_t elem_expiry; |
295 | 0 | apr_time_t next_expiry; |
296 | |
|
297 | 0 | APR_RING_INSERT_TAIL(&q->head, el, event_conn_state_t, timeout_list); |
298 | 0 | ++*q->total; |
299 | 0 | ++q->count; |
300 | | |
301 | | /* Cheaply update the global queues_next_expiry with the one of the |
302 | | * first entry of this queue (oldest) if it expires before. |
303 | | */ |
304 | 0 | el = APR_RING_FIRST(&q->head); |
305 | 0 | elem_expiry = el->queue_timestamp + q->timeout; |
306 | 0 | next_expiry = queues_next_expiry; |
307 | 0 | if (!next_expiry || next_expiry > elem_expiry + TIMEOUT_FUDGE_FACTOR) { |
308 | 0 | queues_next_expiry = elem_expiry; |
309 | | /* Unblock the poll()ing listener for it to update its timeout. */ |
310 | 0 | if (listener_is_wakeable) { |
311 | 0 | apr_pollset_wakeup(event_pollset); |
312 | 0 | } |
313 | 0 | } |
314 | 0 | } |
315 | | |
316 | | static void TO_QUEUE_REMOVE(struct timeout_queue *q, event_conn_state_t *el) |
317 | 0 | { |
318 | 0 | APR_RING_REMOVE(el, timeout_list); |
319 | 0 | APR_RING_ELEM_INIT(el, timeout_list); |
320 | 0 | --*q->total; |
321 | 0 | --q->count; |
322 | 0 | } |
323 | | |
324 | | static struct timeout_queue *TO_QUEUE_MAKE(apr_pool_t *p, apr_time_t t, |
325 | | struct timeout_queue *ref) |
326 | 0 | { |
327 | 0 | struct timeout_queue *q; |
328 | | |
329 | 0 | q = apr_pcalloc(p, sizeof *q); |
330 | 0 | APR_RING_INIT(&q->head, event_conn_state_t, timeout_list); |
331 | 0 | q->total = (ref) ? ref->total : apr_pcalloc(p, sizeof *q->total); |
332 | 0 | q->timeout = t; |
333 | |
|
334 | 0 | return q; |
335 | 0 | } |
336 | | |
337 | | #define TO_QUEUE_ELEM_INIT(el) \ |
338 | 0 | APR_RING_ELEM_INIT((el), timeout_list) |
339 | | |
340 | | #if HAVE_SERF |
341 | | typedef struct { |
342 | | apr_pollset_t *pollset; |
343 | | apr_pool_t *pool; |
344 | | } s_baton_t; |
345 | | |
346 | | static serf_context_t *g_serf; |
347 | | #endif |
348 | | |
349 | | /* The structure used to pass unique initialization info to each thread */ |
350 | | typedef struct |
351 | | { |
352 | | int pslot; /* process slot */ |
353 | | int tslot; /* worker slot of the thread */ |
354 | | } proc_info; |
355 | | |
356 | | /* Structure used to pass information to the thread responsible for |
357 | | * creating the rest of the threads. |
358 | | */ |
359 | | typedef struct |
360 | | { |
361 | | apr_thread_t **threads; |
362 | | apr_thread_t *listener; |
363 | | int child_num_arg; |
364 | | apr_threadattr_t *threadattr; |
365 | | } thread_starter; |
366 | | |
367 | | typedef enum |
368 | | { |
369 | | PT_CSD, |
370 | | PT_ACCEPT |
371 | | #if HAVE_SERF |
372 | | , PT_SERF |
373 | | #endif |
374 | | , PT_USER |
375 | | } poll_type_e; |
376 | | |
377 | | typedef struct |
378 | | { |
379 | | poll_type_e type; |
380 | | void *baton; |
381 | | } listener_poll_type; |
382 | | |
383 | | typedef struct socket_callback_baton |
384 | | { |
385 | | ap_mpm_callback_fn_t *cbfunc; |
386 | | void *user_baton; |
387 | | apr_array_header_t *pfds; |
388 | | timer_event_t *cancel_event; /* If a timeout was requested, a pointer to the timer event */ |
389 | | struct socket_callback_baton *next; |
390 | | unsigned int signaled :1; |
391 | | } socket_callback_baton_t; |
392 | | |
393 | | typedef struct event_child_bucket { |
394 | | ap_pod_t *pod; |
395 | | ap_listen_rec *listeners; |
396 | | } event_child_bucket; |
397 | | static event_child_bucket *my_bucket; /* Current child bucket */ |
398 | | |
399 | | /* data retained by event across load/unload of the module |
400 | | * allocated on first call to pre-config hook; located on |
401 | | * subsequent calls to pre-config hook |
402 | | */ |
403 | | typedef struct event_retained_data { |
404 | | ap_unixd_mpm_retained_data *mpm; |
405 | | |
406 | | apr_pool_t *gen_pool; /* generation pool (children start->stop lifetime) */ |
407 | | event_child_bucket *buckets; /* children buckets (reset per generation) */ |
408 | | |
409 | | int first_server_limit; |
410 | | int first_thread_limit; |
411 | | int sick_child_detected; |
412 | | int maxclients_reported; |
413 | | int near_maxclients_reported; |
414 | | /* |
415 | | * The max child slot ever assigned, preserved across restarts. Necessary |
416 | | * to deal with MaxRequestWorkers changes across AP_SIG_GRACEFUL restarts. |
417 | | * We use this value to optimize routines that have to scan the entire |
418 | | * scoreboard. |
419 | | */ |
420 | | int max_daemon_used; |
421 | | |
422 | | /* |
423 | | * All running workers, active and shutting down, including those that |
424 | | * may be left from before a graceful restart. |
425 | | * Not kept up-to-date when shutdown is pending. |
426 | | */ |
427 | | int total_daemons; |
428 | | /* |
429 | | * Workers that still active, i.e. are not shutting down gracefully. |
430 | | */ |
431 | | int active_daemons; |
432 | | /* |
433 | | * idle_spawn_rate is the number of children that will be spawned on the |
434 | | * next maintenance cycle if there aren't enough idle servers. It is |
435 | | * maintained per listeners bucket, doubled up to MAX_SPAWN_RATE, and |
436 | | * reset only when a cycle goes by without the need to spawn. |
437 | | */ |
438 | | int *idle_spawn_rate; |
439 | | int hold_off_on_exponential_spawning; |
440 | | } event_retained_data; |
441 | | static event_retained_data *retained; |
442 | | |
443 | | #ifndef MAX_SPAWN_RATE |
444 | 0 | #define MAX_SPAWN_RATE 32 |
445 | | #endif |
446 | | static int max_spawn_rate_per_bucket = MAX_SPAWN_RATE / 1; |
447 | | |
448 | | struct event_srv_cfg_s { |
449 | | struct timeout_queue *wc_q, |
450 | | *ka_q; |
451 | | }; |
452 | | |
453 | 0 | #define ID_FROM_CHILD_THREAD(c, t) ((c * thread_limit) + t) |
454 | | |
455 | | /* The event MPM respects a couple of runtime flags that can aid |
456 | | * in debugging. Setting the -DNO_DETACH flag will prevent the root process |
457 | | * from detaching from its controlling terminal. Additionally, setting |
458 | | * the -DONE_PROCESS flag (which implies -DNO_DETACH) will get you the |
459 | | * child_main loop running in the process which originally started up. |
460 | | * This gives you a pretty nice debugging environment. (You'll get a SIGHUP |
461 | | * early in standalone_main; just continue through. This is the server |
462 | | * trying to kill off any child processes which it might have lying |
463 | | * around --- Apache doesn't keep track of their pids, it just sends |
464 | | * SIGHUP to the process group, ignoring it in the root process. |
465 | | * Continue through and you'll be fine.). |
466 | | */ |
467 | | |
468 | | static int one_process = 0; |
469 | | |
470 | | #ifdef DEBUG_SIGSTOP |
471 | | int raise_sigstop_flags; |
472 | | #endif |
473 | | |
474 | | static apr_pool_t *pconf; /* Pool for config stuff */ |
475 | | static apr_pool_t *pchild; /* Pool for httpd child stuff */ |
476 | | static apr_pool_t *pruntime; /* Pool for MPM threads stuff */ |
477 | | |
478 | | static pid_t ap_my_pid; /* Linux getpid() doesn't work except in main |
479 | | thread. Use this instead */ |
480 | | static pid_t parent_pid; |
481 | | static apr_os_thread_t *listener_os_thread; |
482 | | |
483 | | static int ap_child_slot; /* Current child process slot in scoreboard */ |
484 | | |
485 | | /* The LISTENER_SIGNAL signal will be sent from the main thread to the |
486 | | * listener thread to wake it up for graceful termination (what a child |
487 | | * process from an old generation does when the admin does "apachectl |
488 | | * graceful"). This signal will be blocked in all threads of a child |
489 | | * process except for the listener thread. |
490 | | */ |
491 | 0 | #define LISTENER_SIGNAL SIGHUP |
492 | | |
493 | | /* An array of socket descriptors in use by each thread used to |
494 | | * perform a non-graceful (forced) shutdown of the server. |
495 | | */ |
496 | | static apr_socket_t **worker_sockets; |
497 | | |
498 | | static volatile apr_uint32_t listensocks_disabled; |
499 | | |
500 | | static void disable_listensocks(void) |
501 | 0 | { |
502 | 0 | int i; |
503 | 0 | if (apr_atomic_cas32(&listensocks_disabled, 1, 0) != 0) { |
504 | 0 | return; |
505 | 0 | } |
506 | 0 | if (event_pollset) { |
507 | 0 | for (i = 0; i < num_listensocks; i++) { |
508 | 0 | apr_pollset_remove(event_pollset, &listener_pollfd[i]); |
509 | 0 | } |
510 | 0 | } |
511 | 0 | ap_scoreboard_image->parent[ap_child_slot].not_accepting = 1; |
512 | 0 | } |
513 | | |
514 | | static void enable_listensocks(void) |
515 | 0 | { |
516 | 0 | int i; |
517 | 0 | if (listener_may_exit |
518 | 0 | || apr_atomic_cas32(&listensocks_disabled, 0, 1) != 1) { |
519 | 0 | return; |
520 | 0 | } |
521 | 0 | ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(00457) |
522 | 0 | "Accepting new connections again: " |
523 | 0 | "%u active conns (%u lingering/%u clogged/%u suspended), " |
524 | 0 | "%u idle workers", |
525 | 0 | apr_atomic_read32(&connection_count), |
526 | 0 | apr_atomic_read32(&lingering_count), |
527 | 0 | apr_atomic_read32(&clogged_count), |
528 | 0 | apr_atomic_read32(&suspended_count), |
529 | 0 | ap_queue_info_num_idlers(worker_queue_info)); |
530 | 0 | for (i = 0; i < num_listensocks; i++) |
531 | 0 | apr_pollset_add(event_pollset, &listener_pollfd[i]); |
532 | | /* |
533 | | * XXX: This is not yet optimal. If many workers suddenly become available, |
534 | | * XXX: the parent may kill some processes off too soon. |
535 | | */ |
536 | 0 | ap_scoreboard_image->parent[ap_child_slot].not_accepting = 0; |
537 | 0 | } |
538 | | |
539 | | static APR_INLINE apr_uint32_t listeners_disabled(void) |
540 | 0 | { |
541 | 0 | return apr_atomic_read32(&listensocks_disabled); |
542 | 0 | } |
543 | | |
544 | | static APR_INLINE int connections_above_limit(int *busy) |
545 | 0 | { |
546 | 0 | apr_uint32_t i_count = ap_queue_info_num_idlers(worker_queue_info); |
547 | 0 | if (i_count > 0) { |
548 | 0 | apr_uint32_t c_count = apr_atomic_read32(&connection_count); |
549 | 0 | apr_uint32_t l_count = apr_atomic_read32(&lingering_count); |
550 | 0 | if (c_count <= l_count |
551 | | /* Off by 'listeners_disabled()' to avoid flip flop */ |
552 | 0 | || c_count - l_count < (apr_uint32_t)threads_per_child + |
553 | 0 | (i_count - listeners_disabled()) * |
554 | 0 | (worker_factor / WORKER_FACTOR_SCALE)) { |
555 | 0 | return 0; |
556 | 0 | } |
557 | 0 | } |
558 | 0 | else if (busy) { |
559 | 0 | *busy = 1; |
560 | 0 | } |
561 | 0 | return 1; |
562 | 0 | } |
563 | | |
564 | | static APR_INLINE int should_enable_listensocks(void) |
565 | 0 | { |
566 | 0 | return !dying && listeners_disabled() && !connections_above_limit(NULL); |
567 | 0 | } |
568 | | |
569 | | static void close_socket_nonblocking_(apr_socket_t *csd, |
570 | | const char *from, int line) |
571 | 0 | { |
572 | 0 | apr_status_t rv; |
573 | 0 | apr_os_sock_t fd = -1; |
574 | | |
575 | | /* close_worker_sockets() may have closed it already */ |
576 | 0 | rv = apr_os_sock_get(&fd, csd); |
577 | 0 | ap_log_error(APLOG_MARK, APLOG_TRACE8, 0, ap_server_conf, |
578 | 0 | "closing socket %i/%pp from %s:%i", (int)fd, csd, from, line); |
579 | 0 | if (rv == APR_SUCCESS && fd == -1) { |
580 | 0 | return; |
581 | 0 | } |
582 | | |
583 | 0 | apr_socket_timeout_set(csd, 0); |
584 | 0 | rv = apr_socket_close(csd); |
585 | 0 | if (rv != APR_SUCCESS) { |
586 | 0 | ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(00468) |
587 | 0 | "error closing socket"); |
588 | 0 | AP_DEBUG_ASSERT(0); |
589 | 0 | } |
590 | 0 | } |
591 | | #define close_socket_nonblocking(csd) \ |
592 | 0 | close_socket_nonblocking_(csd, __FUNCTION__, __LINE__) |
593 | | |
594 | | static void close_worker_sockets(void) |
595 | 0 | { |
596 | 0 | int i; |
597 | 0 | for (i = 0; i < threads_per_child; i++) { |
598 | 0 | apr_socket_t *csd = worker_sockets[i]; |
599 | 0 | if (csd) { |
600 | 0 | worker_sockets[i] = NULL; |
601 | 0 | close_socket_nonblocking(csd); |
602 | 0 | } |
603 | 0 | } |
604 | 0 | } |
605 | | |
606 | | static void wakeup_listener(void) |
607 | 0 | { |
608 | 0 | ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, |
609 | 0 | "wake up listener%s", listener_may_exit ? " again" : ""); |
610 | |
|
611 | 0 | listener_may_exit = 1; |
612 | 0 | disable_listensocks(); |
613 | | |
614 | | /* Unblock the listener if it's poll()ing */ |
615 | 0 | if (event_pollset && listener_is_wakeable) { |
616 | 0 | apr_pollset_wakeup(event_pollset); |
617 | 0 | } |
618 | | |
619 | | /* unblock the listener if it's waiting for a worker */ |
620 | 0 | if (worker_queue_info) { |
621 | 0 | ap_queue_info_term(worker_queue_info); |
622 | 0 | } |
623 | |
|
624 | 0 | if (!listener_os_thread) { |
625 | | /* XXX there is an obscure path that this doesn't handle perfectly: |
626 | | * right after listener thread is created but before |
627 | | * listener_os_thread is set, the first worker thread hits an |
628 | | * error and starts graceful termination |
629 | | */ |
630 | 0 | return; |
631 | 0 | } |
632 | | /* |
633 | | * we should just be able to "kill(ap_my_pid, LISTENER_SIGNAL)" on all |
634 | | * platforms and wake up the listener thread since it is the only thread |
635 | | * with SIGHUP unblocked, but that doesn't work on Linux |
636 | | */ |
637 | 0 | #ifdef HAVE_PTHREAD_KILL |
638 | 0 | pthread_kill(*listener_os_thread, LISTENER_SIGNAL); |
639 | | #else |
640 | | kill(ap_my_pid, LISTENER_SIGNAL); |
641 | | #endif |
642 | 0 | } |
643 | | |
644 | 0 | #define ST_INIT 0 |
645 | 0 | #define ST_GRACEFUL 1 |
646 | 0 | #define ST_UNGRACEFUL 2 |
647 | | |
648 | | static int terminate_mode = ST_INIT; |
649 | | |
650 | | static void signal_threads(int mode) |
651 | 0 | { |
652 | 0 | if (terminate_mode >= mode) { |
653 | 0 | return; |
654 | 0 | } |
655 | 0 | terminate_mode = mode; |
656 | 0 | retained->mpm->mpm_state = AP_MPMQ_STOPPING; |
657 | | |
658 | | /* in case we weren't called from the listener thread, wake up the |
659 | | * listener thread |
660 | | */ |
661 | 0 | wakeup_listener(); |
662 | | |
663 | | /* for ungraceful termination, let the workers exit now; |
664 | | * for graceful termination, the listener thread will notify the |
665 | | * workers to exit once it has stopped accepting new connections |
666 | | */ |
667 | 0 | if (mode == ST_UNGRACEFUL) { |
668 | 0 | workers_may_exit = 1; |
669 | 0 | ap_queue_interrupt_all(worker_queue); |
670 | 0 | close_worker_sockets(); /* forcefully kill all current connections */ |
671 | 0 | } |
672 | |
|
673 | 0 | ap_run_child_stopping(pchild, mode == ST_GRACEFUL); |
674 | 0 | } |
675 | | |
676 | | static int event_query(int query_code, int *result, apr_status_t *rv) |
677 | 0 | { |
678 | 0 | *rv = APR_SUCCESS; |
679 | 0 | switch (query_code) { |
680 | 0 | case AP_MPMQ_MAX_DAEMON_USED: |
681 | 0 | *result = retained->max_daemon_used; |
682 | 0 | break; |
683 | 0 | case AP_MPMQ_IS_THREADED: |
684 | 0 | *result = AP_MPMQ_STATIC; |
685 | 0 | break; |
686 | 0 | case AP_MPMQ_IS_FORKED: |
687 | 0 | *result = AP_MPMQ_DYNAMIC; |
688 | 0 | break; |
689 | 0 | case AP_MPMQ_IS_ASYNC: |
690 | 0 | *result = 1; |
691 | 0 | break; |
692 | 0 | case AP_MPMQ_HAS_SERF: |
693 | 0 | *result = 1; |
694 | 0 | break; |
695 | 0 | case AP_MPMQ_HARD_LIMIT_DAEMONS: |
696 | 0 | *result = server_limit; |
697 | 0 | break; |
698 | 0 | case AP_MPMQ_HARD_LIMIT_THREADS: |
699 | 0 | *result = thread_limit; |
700 | 0 | break; |
701 | 0 | case AP_MPMQ_MAX_THREADS: |
702 | 0 | *result = threads_per_child; |
703 | 0 | break; |
704 | 0 | case AP_MPMQ_MIN_SPARE_DAEMONS: |
705 | 0 | *result = 0; |
706 | 0 | break; |
707 | 0 | case AP_MPMQ_MIN_SPARE_THREADS: |
708 | 0 | *result = min_spare_threads; |
709 | 0 | break; |
710 | 0 | case AP_MPMQ_MAX_SPARE_DAEMONS: |
711 | 0 | *result = 0; |
712 | 0 | break; |
713 | 0 | case AP_MPMQ_MAX_SPARE_THREADS: |
714 | 0 | *result = max_spare_threads; |
715 | 0 | break; |
716 | 0 | case AP_MPMQ_MAX_REQUESTS_DAEMON: |
717 | 0 | *result = ap_max_requests_per_child; |
718 | 0 | break; |
719 | 0 | case AP_MPMQ_MAX_DAEMONS: |
720 | 0 | *result = active_daemons_limit; |
721 | 0 | break; |
722 | 0 | case AP_MPMQ_MPM_STATE: |
723 | 0 | *result = retained->mpm->mpm_state; |
724 | 0 | break; |
725 | 0 | case AP_MPMQ_GENERATION: |
726 | 0 | *result = retained->mpm->my_generation; |
727 | 0 | break; |
728 | 0 | case AP_MPMQ_CAN_SUSPEND: |
729 | 0 | *result = 1; |
730 | 0 | break; |
731 | 0 | case AP_MPMQ_CAN_POLL: |
732 | 0 | *result = 1; |
733 | 0 | break; |
734 | 0 | default: |
735 | 0 | *rv = APR_ENOTIMPL; |
736 | 0 | break; |
737 | 0 | } |
738 | 0 | return OK; |
739 | 0 | } |
740 | | |
741 | | static void event_note_child_stopped(int slot, pid_t pid, ap_generation_t gen) |
742 | 0 | { |
743 | 0 | if (slot != -1) { /* child had a scoreboard slot? */ |
744 | 0 | process_score *ps = &ap_scoreboard_image->parent[slot]; |
745 | 0 | int i; |
746 | |
|
747 | 0 | pid = ps->pid; |
748 | 0 | gen = ps->generation; |
749 | 0 | for (i = 0; i < threads_per_child; i++) { |
750 | 0 | ap_update_child_status_from_indexes(slot, i, SERVER_DEAD, NULL); |
751 | 0 | } |
752 | 0 | ap_run_child_status(ap_server_conf, pid, gen, slot, MPM_CHILD_EXITED); |
753 | 0 | if (ps->quiescing != 2) { /* vs perform_idle_server_maintenance() */ |
754 | 0 | retained->active_daemons--; |
755 | 0 | } |
756 | 0 | retained->total_daemons--; |
757 | 0 | ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, |
758 | 0 | "Child %d stopped: pid %d, gen %d, " |
759 | 0 | "active %d/%d, total %d/%d/%d, quiescing %d", |
760 | 0 | slot, (int)pid, (int)gen, |
761 | 0 | retained->active_daemons, active_daemons_limit, |
762 | 0 | retained->total_daemons, retained->max_daemon_used, |
763 | 0 | server_limit, ps->quiescing); |
764 | 0 | ps->not_accepting = 0; |
765 | 0 | ps->quiescing = 0; |
766 | 0 | ps->pid = 0; |
767 | 0 | } |
768 | 0 | else { |
769 | 0 | ap_run_child_status(ap_server_conf, pid, gen, -1, MPM_CHILD_EXITED); |
770 | 0 | } |
771 | 0 | } |
772 | | |
773 | | static void event_note_child_started(int slot, pid_t pid) |
774 | 0 | { |
775 | 0 | ap_generation_t gen = retained->mpm->my_generation; |
776 | |
|
777 | 0 | retained->total_daemons++; |
778 | 0 | retained->active_daemons++; |
779 | 0 | ap_scoreboard_image->parent[slot].pid = pid; |
780 | 0 | ap_scoreboard_image->parent[slot].generation = gen; |
781 | 0 | ap_run_child_status(ap_server_conf, pid, gen, slot, MPM_CHILD_STARTED); |
782 | 0 | ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, |
783 | 0 | "Child %d started: pid %d, gen %d, " |
784 | 0 | "active %d/%d, total %d/%d/%d", |
785 | 0 | slot, (int)pid, (int)gen, |
786 | 0 | retained->active_daemons, active_daemons_limit, |
787 | 0 | retained->total_daemons, retained->max_daemon_used, |
788 | 0 | server_limit); |
789 | 0 | } |
790 | | |
791 | | static const char *event_get_name(void) |
792 | 0 | { |
793 | 0 | return "event"; |
794 | 0 | } |
795 | | |
796 | | /* a clean exit from a child with proper cleanup */ |
797 | | static void clean_child_exit(int code) __attribute__ ((noreturn)); |
798 | | static void clean_child_exit(int code) |
799 | 0 | { |
800 | 0 | retained->mpm->mpm_state = AP_MPMQ_STOPPING; |
801 | 0 | if (terminate_mode == ST_INIT) { |
802 | 0 | ap_run_child_stopping(pchild, 0); |
803 | 0 | } |
804 | |
|
805 | 0 | if (pchild) { |
806 | 0 | ap_run_child_stopped(pchild, terminate_mode == ST_GRACEFUL); |
807 | 0 | apr_pool_destroy(pchild); |
808 | 0 | } |
809 | |
|
810 | 0 | if (one_process) { |
811 | 0 | event_note_child_stopped(/* slot */ 0, 0, 0); |
812 | 0 | } |
813 | |
|
814 | 0 | exit(code); |
815 | 0 | } |
816 | | |
817 | | static void just_die(int sig) |
818 | 0 | { |
819 | 0 | clean_child_exit(0); |
820 | 0 | } |
821 | | |
822 | | /***************************************************************** |
823 | | * Connection structures and accounting... |
824 | | */ |
825 | | |
826 | | static int child_fatal; |
827 | | |
828 | | static apr_status_t decrement_connection_count(void *cs_) |
829 | 0 | { |
830 | 0 | int is_last_connection; |
831 | 0 | event_conn_state_t *cs = cs_; |
832 | 0 | ap_log_cerror(APLOG_MARK, APLOG_TRACE8, 0, cs->c, |
833 | 0 | "cleanup connection from state %i", (int)cs->pub.state); |
834 | 0 | switch (cs->pub.state) { |
835 | 0 | case CONN_STATE_LINGER: |
836 | 0 | case CONN_STATE_LINGER_NORMAL: |
837 | 0 | case CONN_STATE_LINGER_SHORT: |
838 | 0 | apr_atomic_dec32(&lingering_count); |
839 | 0 | break; |
840 | 0 | case CONN_STATE_SUSPENDED: |
841 | 0 | apr_atomic_dec32(&suspended_count); |
842 | 0 | break; |
843 | 0 | default: |
844 | 0 | break; |
845 | 0 | } |
846 | | /* Unblock the listener if it's waiting for connection_count = 0, |
847 | | * or if the listening sockets were disabled due to limits and can |
848 | | * now accept new connections. |
849 | | */ |
850 | 0 | is_last_connection = !apr_atomic_dec32(&connection_count); |
851 | 0 | if (listener_is_wakeable |
852 | 0 | && ((is_last_connection && listener_may_exit) |
853 | 0 | || should_enable_listensocks())) { |
854 | 0 | apr_pollset_wakeup(event_pollset); |
855 | 0 | } |
856 | 0 | if (dying) { |
857 | | /* Help worker_thread_should_exit_early() */ |
858 | 0 | ap_queue_interrupt_one(worker_queue); |
859 | 0 | } |
860 | 0 | return APR_SUCCESS; |
861 | 0 | } |
862 | | |
863 | | static void notify_suspend(event_conn_state_t *cs) |
864 | 0 | { |
865 | 0 | ap_run_suspend_connection(cs->c, cs->r); |
866 | 0 | cs->c->sbh = NULL; |
867 | 0 | cs->suspended = 1; |
868 | 0 | } |
869 | | |
870 | | static void notify_resume(event_conn_state_t *cs, int cleanup) |
871 | 0 | { |
872 | 0 | cs->suspended = 0; |
873 | 0 | cs->c->sbh = cleanup ? NULL : cs->sbh; |
874 | 0 | ap_run_resume_connection(cs->c, cs->r); |
875 | 0 | } |
876 | | |
877 | | /* |
878 | | * Defer flush and close of the connection by adding it to defer_linger_chain, |
879 | | * for a worker to grab it and do the job (should that be blocking). |
880 | | * Pre-condition: nonblocking, can be called from anywhere provided cs is not |
881 | | * in any timeout queue or in the pollset. |
882 | | */ |
883 | | static int defer_lingering_close(event_conn_state_t *cs) |
884 | 0 | { |
885 | 0 | ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, cs->c, |
886 | 0 | "deferring close from state %i", (int)cs->pub.state); |
887 | | |
888 | | /* The connection is not shutdown() yet strictly speaking, but it's not |
889 | | * in any queue nor handled by a worker either (will be very soon), so |
890 | | * to account for it somewhere we bump lingering_count now (and set |
891 | | * deferred_linger for process_lingering_close() to know). |
892 | | */ |
893 | 0 | cs->pub.state = CONN_STATE_LINGER; |
894 | 0 | apr_atomic_inc32(&lingering_count); |
895 | 0 | cs->deferred_linger = 1; |
896 | 0 | for (;;) { |
897 | 0 | event_conn_state_t *chain = cs->chain = defer_linger_chain; |
898 | 0 | if (apr_atomic_casptr((void *)&defer_linger_chain, cs, |
899 | 0 | chain) != chain) { |
900 | | /* Race lost, try again */ |
901 | 0 | continue; |
902 | 0 | } |
903 | 0 | return 1; |
904 | 0 | } |
905 | 0 | } |
906 | | |
907 | | /* Close the connection and release its resources (ptrans), either because an |
908 | | * unrecoverable error occured (queues or pollset add/remove) or more usually |
909 | | * if lingering close timed out. |
910 | | * Pre-condition: nonblocking, can be called from anywhere provided cs is not |
911 | | * in any timeout queue or in the pollset. |
912 | | */ |
913 | | static void close_connection(event_conn_state_t *cs) |
914 | 0 | { |
915 | 0 | ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, cs->c, |
916 | 0 | "closing connection from state %i", (int)cs->pub.state); |
917 | |
|
918 | 0 | close_socket_nonblocking(cs->pfd.desc.s); |
919 | 0 | ap_queue_info_push_pool(worker_queue_info, cs->p); |
920 | 0 | } |
921 | | |
922 | | /* Shutdown the connection in case of timeout, error or resources shortage. |
923 | | * This starts short lingering close if not already there, or directly closes |
924 | | * the connection otherwise. |
925 | | * Pre-condition: nonblocking, can be called from anywhere provided cs is not |
926 | | * in any timeout queue or in the pollset. |
927 | | */ |
928 | | static int shutdown_connection(event_conn_state_t *cs) |
929 | 0 | { |
930 | 0 | if (cs->pub.state < CONN_STATE_LINGER) { |
931 | 0 | apr_table_setn(cs->c->notes, "short-lingering-close", "1"); |
932 | 0 | defer_lingering_close(cs); |
933 | 0 | } |
934 | 0 | else { |
935 | 0 | close_connection(cs); |
936 | 0 | } |
937 | 0 | return 1; |
938 | 0 | } |
939 | | |
940 | | /* |
941 | | * This runs before any non-MPM cleanup code on the connection; |
942 | | * if the connection is currently suspended as far as modules |
943 | | * know, provide notification of resumption. |
944 | | */ |
945 | | static apr_status_t ptrans_pre_cleanup(void *dummy) |
946 | 0 | { |
947 | 0 | event_conn_state_t *cs = dummy; |
948 | |
|
949 | 0 | if (cs->suspended) { |
950 | 0 | notify_resume(cs, 1); |
951 | 0 | } |
952 | 0 | return APR_SUCCESS; |
953 | 0 | } |
954 | | |
955 | | /* |
956 | | * event_pre_read_request() and event_request_cleanup() track the |
957 | | * current r for a given connection. |
958 | | */ |
959 | | static apr_status_t event_request_cleanup(void *dummy) |
960 | 0 | { |
961 | 0 | conn_rec *c = dummy; |
962 | 0 | event_conn_state_t *cs = ap_get_module_config(c->conn_config, |
963 | 0 | &mpm_event_module); |
964 | |
|
965 | 0 | cs->r = NULL; |
966 | 0 | return APR_SUCCESS; |
967 | 0 | } |
968 | | |
969 | | static void event_pre_read_request(request_rec *r, conn_rec *c) |
970 | 0 | { |
971 | 0 | event_conn_state_t *cs = ap_get_module_config(c->conn_config, |
972 | 0 | &mpm_event_module); |
973 | |
|
974 | 0 | cs->r = r; |
975 | 0 | cs->sc = ap_get_module_config(ap_server_conf->module_config, |
976 | 0 | &mpm_event_module); |
977 | 0 | apr_pool_cleanup_register(r->pool, c, event_request_cleanup, |
978 | 0 | apr_pool_cleanup_null); |
979 | 0 | } |
980 | | |
981 | | /* |
982 | | * event_post_read_request() tracks the current server config for a |
983 | | * given request. |
984 | | */ |
985 | | static int event_post_read_request(request_rec *r) |
986 | 0 | { |
987 | 0 | conn_rec *c = r->connection; |
988 | 0 | event_conn_state_t *cs = ap_get_module_config(c->conn_config, |
989 | 0 | &mpm_event_module); |
990 | | |
991 | | /* To preserve legacy behaviour (consistent with other MPMs), use |
992 | | * the keepalive timeout from the base server (first on this IP:port) |
993 | | * when none is explicitly configured on this server. |
994 | | */ |
995 | 0 | if (r->server->keep_alive_timeout_set) { |
996 | 0 | cs->sc = ap_get_module_config(r->server->module_config, |
997 | 0 | &mpm_event_module); |
998 | 0 | } |
999 | 0 | else { |
1000 | 0 | cs->sc = ap_get_module_config(c->base_server->module_config, |
1001 | 0 | &mpm_event_module); |
1002 | 0 | } |
1003 | 0 | return OK; |
1004 | 0 | } |
1005 | | |
1006 | | /* Forward declare */ |
1007 | | static void process_lingering_close(event_conn_state_t *cs); |
1008 | | |
1009 | | static void update_reqevents_from_sense(event_conn_state_t *cs, int sense) |
1010 | 0 | { |
1011 | 0 | if (sense < 0) { |
1012 | 0 | sense = cs->pub.sense; |
1013 | 0 | } |
1014 | 0 | if (sense == CONN_SENSE_WANT_READ) { |
1015 | 0 | cs->pfd.reqevents = APR_POLLIN | APR_POLLHUP; |
1016 | 0 | } |
1017 | 0 | else { |
1018 | 0 | cs->pfd.reqevents = APR_POLLOUT; |
1019 | 0 | } |
1020 | | /* POLLERR is usually returned event only, but some pollset |
1021 | | * backends may require it in reqevents to do the right thing, |
1022 | | * so it shouldn't hurt (ignored otherwise). |
1023 | | */ |
1024 | 0 | cs->pfd.reqevents |= APR_POLLERR; |
1025 | | |
1026 | | /* Reset to default for the next round */ |
1027 | 0 | cs->pub.sense = CONN_SENSE_DEFAULT; |
1028 | 0 | } |
1029 | | |
1030 | | /* |
1031 | | * process one connection in the worker |
1032 | | */ |
1033 | | static void process_socket(apr_thread_t *thd, apr_pool_t * p, apr_socket_t * sock, |
1034 | | event_conn_state_t * cs, int my_child_num, |
1035 | | int my_thread_num) |
1036 | 0 | { |
1037 | 0 | conn_rec *c; |
1038 | 0 | long conn_id = ID_FROM_CHILD_THREAD(my_child_num, my_thread_num); |
1039 | 0 | int clogging = 0, from_wc_q = 0; |
1040 | 0 | apr_status_t rv; |
1041 | 0 | int rc = OK; |
1042 | |
|
1043 | 0 | if (cs == NULL) { /* This is a new connection */ |
1044 | 0 | listener_poll_type *pt = apr_pcalloc(p, sizeof(*pt)); |
1045 | 0 | cs = apr_pcalloc(p, sizeof(event_conn_state_t)); |
1046 | 0 | cs->bucket_alloc = apr_bucket_alloc_create(p); |
1047 | 0 | ap_create_sb_handle(&cs->sbh, p, my_child_num, my_thread_num); |
1048 | 0 | c = ap_run_create_connection(p, ap_server_conf, sock, |
1049 | 0 | conn_id, cs->sbh, cs->bucket_alloc); |
1050 | 0 | if (!c) { |
1051 | 0 | ap_queue_info_push_pool(worker_queue_info, p); |
1052 | 0 | return; |
1053 | 0 | } |
1054 | 0 | apr_atomic_inc32(&connection_count); |
1055 | 0 | apr_pool_cleanup_register(c->pool, cs, decrement_connection_count, |
1056 | 0 | apr_pool_cleanup_null); |
1057 | 0 | ap_set_module_config(c->conn_config, &mpm_event_module, cs); |
1058 | 0 | c->current_thread = thd; |
1059 | 0 | c->cs = &cs->pub; |
1060 | 0 | cs->c = c; |
1061 | 0 | cs->p = p; |
1062 | 0 | cs->sc = ap_get_module_config(ap_server_conf->module_config, |
1063 | 0 | &mpm_event_module); |
1064 | 0 | cs->pfd.desc_type = APR_POLL_SOCKET; |
1065 | 0 | cs->pfd.desc.s = sock; |
1066 | 0 | update_reqevents_from_sense(cs, CONN_SENSE_WANT_READ); |
1067 | 0 | pt->type = PT_CSD; |
1068 | 0 | pt->baton = cs; |
1069 | 0 | cs->pfd.client_data = pt; |
1070 | 0 | apr_pool_pre_cleanup_register(p, cs, ptrans_pre_cleanup); |
1071 | 0 | TO_QUEUE_ELEM_INIT(cs); |
1072 | |
|
1073 | 0 | ap_update_vhost_given_ip(c); |
1074 | |
|
1075 | 0 | rc = ap_pre_connection(c, sock); |
1076 | 0 | if (rc != OK && rc != DONE) { |
1077 | 0 | ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(00469) |
1078 | 0 | "process_socket: connection aborted"); |
1079 | 0 | } |
1080 | | |
1081 | | /** |
1082 | | * XXX If the platform does not have a usable way of bundling |
1083 | | * accept() with a socket readability check, like Win32, |
1084 | | * and there are measurable delays before the |
1085 | | * socket is readable due to the first data packet arriving, |
1086 | | * it might be better to create the cs on the listener thread |
1087 | | * with the state set to CONN_STATE_CHECK_REQUEST_LINE_READABLE |
1088 | | * |
1089 | | * FreeBSD users will want to enable the HTTP accept filter |
1090 | | * module in their kernel for the highest performance |
1091 | | * When the accept filter is active, sockets are kept in the |
1092 | | * kernel until a HTTP request is received. |
1093 | | */ |
1094 | 0 | cs->pub.state = CONN_STATE_READ_REQUEST_LINE; |
1095 | |
|
1096 | 0 | cs->pub.sense = CONN_SENSE_DEFAULT; |
1097 | 0 | rc = OK; |
1098 | 0 | } |
1099 | 0 | else { |
1100 | 0 | c = cs->c; |
1101 | 0 | ap_update_sb_handle(cs->sbh, my_child_num, my_thread_num); |
1102 | 0 | notify_resume(cs, 0); |
1103 | 0 | c->current_thread = thd; |
1104 | | /* Subsequent request on a conn, and thread number is part of ID */ |
1105 | 0 | c->id = conn_id; |
1106 | 0 | } |
1107 | | |
1108 | 0 | if (c->aborted) { |
1109 | | /* do lingering close below */ |
1110 | 0 | cs->pub.state = CONN_STATE_LINGER; |
1111 | 0 | } |
1112 | 0 | else if (cs->pub.state >= CONN_STATE_LINGER) { |
1113 | | /* fall through */ |
1114 | 0 | } |
1115 | 0 | else { |
1116 | 0 | if (cs->pub.state == CONN_STATE_READ_REQUEST_LINE |
1117 | | /* If we have an input filter which 'clogs' the input stream, |
1118 | | * like mod_ssl used to, lets just do the normal read from input |
1119 | | * filters, like the Worker MPM does. Filters that need to write |
1120 | | * where they would otherwise read, or read where they would |
1121 | | * otherwise write, should set the sense appropriately. |
1122 | | */ |
1123 | 0 | || c->clogging_input_filters) { |
1124 | 0 | read_request: |
1125 | 0 | clogging = c->clogging_input_filters; |
1126 | 0 | if (clogging) { |
1127 | 0 | apr_atomic_inc32(&clogged_count); |
1128 | 0 | } |
1129 | 0 | rc = ap_run_process_connection(c); |
1130 | 0 | if (clogging) { |
1131 | 0 | apr_atomic_dec32(&clogged_count); |
1132 | 0 | } |
1133 | 0 | if (cs->pub.state > CONN_STATE_LINGER) { |
1134 | 0 | cs->pub.state = CONN_STATE_LINGER; |
1135 | 0 | } |
1136 | 0 | if (rc == DONE) { |
1137 | 0 | rc = OK; |
1138 | 0 | } |
1139 | 0 | } |
1140 | 0 | else if (cs->pub.state == CONN_STATE_WRITE_COMPLETION) { |
1141 | 0 | from_wc_q = 1; |
1142 | 0 | } |
1143 | 0 | } |
1144 | | /* |
1145 | | * The process_connection hooks above should set the connection state |
1146 | | * appropriately upon return, for event MPM to either: |
1147 | | * - do lingering close (CONN_STATE_LINGER), |
1148 | | * - wait for readability of the next request with respect to the keepalive |
1149 | | * timeout (state CONN_STATE_CHECK_REQUEST_LINE_READABLE), |
1150 | | * - wait for read/write-ability of the underlying socket with respect to |
1151 | | * its timeout by setting c->clogging_input_filters to 1 and the sense |
1152 | | * to CONN_SENSE_WANT_READ/WRITE (state CONN_STATE_WRITE_COMPLETION), |
1153 | | * - keep flushing the output filters stack in nonblocking mode, and then |
1154 | | * if required wait for read/write-ability of the underlying socket with |
1155 | | * respect to its own timeout (state CONN_STATE_WRITE_COMPLETION); since |
1156 | | * completion at some point may require reads (e.g. SSL_ERROR_WANT_READ), |
1157 | | * an output filter can also set the sense to CONN_SENSE_WANT_READ at any |
1158 | | * time for event MPM to do the right thing, |
1159 | | * - suspend the connection (SUSPENDED) such that it now interacts with |
1160 | | * the MPM through suspend/resume_connection() hooks, and/or registered |
1161 | | * poll callbacks (PT_USER), and/or registered timed callbacks triggered |
1162 | | * by timer events. |
1163 | | * If a process_connection hook returns an error or no hook sets the state |
1164 | | * to one of the above expected value, we forcibly close the connection w/ |
1165 | | * CONN_STATE_LINGER. This covers the cases where no process_connection |
1166 | | * hook executes (DECLINED), or one returns OK w/o touching the state (i.e. |
1167 | | * CONN_STATE_READ_REQUEST_LINE remains after the call) which can happen |
1168 | | * with third-party modules not updated to work specifically with event MPM |
1169 | | * while this was expected to do lingering close unconditionally with |
1170 | | * worker or prefork MPMs for instance. |
1171 | | */ |
1172 | 0 | if (rc != OK || (cs->pub.state >= CONN_STATE_NUM) |
1173 | 0 | || (cs->pub.state < CONN_STATE_LINGER |
1174 | 0 | && cs->pub.state != CONN_STATE_WRITE_COMPLETION |
1175 | 0 | && cs->pub.state != CONN_STATE_CHECK_REQUEST_LINE_READABLE |
1176 | 0 | && cs->pub.state != CONN_STATE_SUSPENDED)) { |
1177 | 0 | ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(10111) |
1178 | 0 | "process_socket: connection processing %s: closing", |
1179 | 0 | rc ? apr_psprintf(c->pool, "returned error %i", rc) |
1180 | 0 | : apr_psprintf(c->pool, "unexpected state %i", |
1181 | 0 | (int)cs->pub.state)); |
1182 | 0 | cs->pub.state = CONN_STATE_LINGER; |
1183 | 0 | } |
1184 | |
|
1185 | 0 | if (cs->pub.state == CONN_STATE_WRITE_COMPLETION) { |
1186 | 0 | int pending = DECLINED; |
1187 | |
|
1188 | 0 | ap_update_child_status(cs->sbh, SERVER_BUSY_WRITE, NULL); |
1189 | |
|
1190 | 0 | if (from_wc_q) { |
1191 | 0 | from_wc_q = 0; /* one shot */ |
1192 | 0 | pending = ap_run_output_pending(c); |
1193 | 0 | } |
1194 | 0 | else if (ap_filter_should_yield(c->output_filters)) { |
1195 | 0 | pending = OK; |
1196 | 0 | } |
1197 | 0 | if (pending == OK || (pending == DECLINED && |
1198 | 0 | cs->pub.sense == CONN_SENSE_WANT_READ)) { |
1199 | | /* Still in WRITE_COMPLETION_STATE: |
1200 | | * Set a read/write timeout for this connection, and let the |
1201 | | * event thread poll for read/writeability. |
1202 | | */ |
1203 | 0 | cs->queue_timestamp = apr_time_now(); |
1204 | 0 | notify_suspend(cs); |
1205 | |
|
1206 | 0 | update_reqevents_from_sense(cs, -1); |
1207 | 0 | apr_thread_mutex_lock(timeout_mutex); |
1208 | 0 | TO_QUEUE_APPEND(cs->sc->wc_q, cs); |
1209 | 0 | rv = apr_pollset_add(event_pollset, &cs->pfd); |
1210 | 0 | if (rv != APR_SUCCESS && !APR_STATUS_IS_EEXIST(rv)) { |
1211 | 0 | AP_DEBUG_ASSERT(0); |
1212 | 0 | TO_QUEUE_REMOVE(cs->sc->wc_q, cs); |
1213 | 0 | apr_thread_mutex_unlock(timeout_mutex); |
1214 | 0 | ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(03465) |
1215 | 0 | "process_socket: apr_pollset_add failure for " |
1216 | 0 | "write completion"); |
1217 | 0 | close_connection(cs); |
1218 | 0 | signal_threads(ST_GRACEFUL); |
1219 | 0 | } |
1220 | 0 | else { |
1221 | 0 | apr_thread_mutex_unlock(timeout_mutex); |
1222 | 0 | } |
1223 | 0 | return; |
1224 | 0 | } |
1225 | 0 | if (pending != DECLINED |
1226 | 0 | || c->aborted |
1227 | 0 | || c->keepalive != AP_CONN_KEEPALIVE) { |
1228 | 0 | cs->pub.state = CONN_STATE_LINGER; |
1229 | 0 | } |
1230 | 0 | else if (ap_run_input_pending(c) == OK) { |
1231 | 0 | cs->pub.state = CONN_STATE_READ_REQUEST_LINE; |
1232 | 0 | goto read_request; |
1233 | 0 | } |
1234 | 0 | else if (!listener_may_exit) { |
1235 | 0 | cs->pub.state = CONN_STATE_CHECK_REQUEST_LINE_READABLE; |
1236 | 0 | } |
1237 | 0 | else { |
1238 | 0 | cs->pub.state = CONN_STATE_LINGER; |
1239 | 0 | } |
1240 | 0 | } |
1241 | | |
1242 | 0 | if (cs->pub.state == CONN_STATE_CHECK_REQUEST_LINE_READABLE) { |
1243 | 0 | ap_update_child_status(cs->sbh, SERVER_BUSY_KEEPALIVE, NULL); |
1244 | | |
1245 | | /* It greatly simplifies the logic to use a single timeout value per q |
1246 | | * because the new element can just be added to the end of the list and |
1247 | | * it will stay sorted in expiration time sequence. If brand new |
1248 | | * sockets are sent to the event thread for a readability check, this |
1249 | | * will be a slight behavior change - they use the non-keepalive |
1250 | | * timeout today. With a normal client, the socket will be readable in |
1251 | | * a few milliseconds anyway. |
1252 | | */ |
1253 | 0 | cs->queue_timestamp = apr_time_now(); |
1254 | 0 | notify_suspend(cs); |
1255 | | |
1256 | | /* Add work to pollset. */ |
1257 | 0 | update_reqevents_from_sense(cs, CONN_SENSE_WANT_READ); |
1258 | 0 | apr_thread_mutex_lock(timeout_mutex); |
1259 | 0 | TO_QUEUE_APPEND(cs->sc->ka_q, cs); |
1260 | 0 | rv = apr_pollset_add(event_pollset, &cs->pfd); |
1261 | 0 | if (rv != APR_SUCCESS && !APR_STATUS_IS_EEXIST(rv)) { |
1262 | 0 | AP_DEBUG_ASSERT(0); |
1263 | 0 | TO_QUEUE_REMOVE(cs->sc->ka_q, cs); |
1264 | 0 | apr_thread_mutex_unlock(timeout_mutex); |
1265 | 0 | ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(03093) |
1266 | 0 | "process_socket: apr_pollset_add failure for " |
1267 | 0 | "keep alive"); |
1268 | 0 | close_connection(cs); |
1269 | 0 | signal_threads(ST_GRACEFUL); |
1270 | 0 | } |
1271 | 0 | else { |
1272 | 0 | apr_thread_mutex_unlock(timeout_mutex); |
1273 | 0 | } |
1274 | 0 | return; |
1275 | 0 | } |
1276 | | |
1277 | 0 | if (cs->pub.state == CONN_STATE_SUSPENDED) { |
1278 | 0 | cs->c->suspended_baton = cs; |
1279 | 0 | apr_atomic_inc32(&suspended_count); |
1280 | 0 | notify_suspend(cs); |
1281 | 0 | return; |
1282 | 0 | } |
1283 | | |
1284 | | /* CONN_STATE_LINGER[_*] fall through process_lingering_close() */ |
1285 | 0 | if (cs->pub.state >= CONN_STATE_LINGER) { |
1286 | 0 | process_lingering_close(cs); |
1287 | 0 | return; |
1288 | 0 | } |
1289 | 0 | } |
1290 | | |
1291 | | /* Put a SUSPENDED connection back into a queue. */ |
1292 | | static apr_status_t event_resume_suspended (conn_rec *c) |
1293 | 0 | { |
1294 | 0 | event_conn_state_t* cs = (event_conn_state_t*) c->suspended_baton; |
1295 | 0 | if (cs == NULL) { |
1296 | 0 | ap_log_cerror (APLOG_MARK, LOG_WARNING, 0, c, APLOGNO(02615) |
1297 | 0 | "event_resume_suspended: suspended_baton is NULL"); |
1298 | 0 | return APR_EGENERAL; |
1299 | 0 | } else if (!cs->suspended) { |
1300 | 0 | ap_log_cerror (APLOG_MARK, LOG_WARNING, 0, c, APLOGNO(02616) |
1301 | 0 | "event_resume_suspended: Thread isn't suspended"); |
1302 | 0 | return APR_EGENERAL; |
1303 | 0 | } |
1304 | 0 | apr_atomic_dec32(&suspended_count); |
1305 | 0 | c->suspended_baton = NULL; |
1306 | |
|
1307 | 0 | if (cs->pub.state < CONN_STATE_LINGER) { |
1308 | 0 | cs->queue_timestamp = apr_time_now(); |
1309 | 0 | cs->pub.state = CONN_STATE_WRITE_COMPLETION; |
1310 | 0 | notify_suspend(cs); |
1311 | |
|
1312 | 0 | update_reqevents_from_sense(cs, -1); |
1313 | 0 | apr_thread_mutex_lock(timeout_mutex); |
1314 | 0 | TO_QUEUE_APPEND(cs->sc->wc_q, cs); |
1315 | 0 | apr_pollset_add(event_pollset, &cs->pfd); |
1316 | 0 | apr_thread_mutex_unlock(timeout_mutex); |
1317 | 0 | } |
1318 | 0 | else { |
1319 | 0 | cs->pub.state = CONN_STATE_LINGER; |
1320 | 0 | process_lingering_close(cs); |
1321 | 0 | } |
1322 | |
|
1323 | 0 | return OK; |
1324 | 0 | } |
1325 | | |
1326 | | /* conns_this_child has gone to zero or below. See if the admin coded |
1327 | | "MaxConnectionsPerChild 0", and keep going in that case. Doing it this way |
1328 | | simplifies the hot path in worker_thread */ |
1329 | | static void check_infinite_requests(void) |
1330 | 0 | { |
1331 | 0 | if (ap_max_requests_per_child) { |
1332 | 0 | ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf, |
1333 | 0 | "Stopping process due to MaxConnectionsPerChild"); |
1334 | 0 | signal_threads(ST_GRACEFUL); |
1335 | 0 | } |
1336 | 0 | else { |
1337 | | /* keep going */ |
1338 | 0 | conns_this_child = APR_INT32_MAX; |
1339 | 0 | } |
1340 | 0 | } |
1341 | | |
1342 | | static int close_listeners(int *closed) |
1343 | 0 | { |
1344 | 0 | ap_log_error(APLOG_MARK, APLOG_TRACE6, 0, ap_server_conf, |
1345 | 0 | "clos%s listeners (connection_count=%u)", |
1346 | 0 | *closed ? "ed" : "ing", apr_atomic_read32(&connection_count)); |
1347 | 0 | if (!*closed) { |
1348 | 0 | int i; |
1349 | |
|
1350 | 0 | ap_close_listeners_ex(my_bucket->listeners); |
1351 | 0 | *closed = 1; /* once */ |
1352 | |
|
1353 | 0 | dying = 1; |
1354 | 0 | ap_scoreboard_image->parent[ap_child_slot].quiescing = 1; |
1355 | 0 | for (i = 0; i < threads_per_child; ++i) { |
1356 | 0 | ap_update_child_status_from_indexes(ap_child_slot, i, |
1357 | 0 | SERVER_GRACEFUL, NULL); |
1358 | 0 | } |
1359 | | /* wake up the main thread */ |
1360 | 0 | kill(ap_my_pid, SIGTERM); |
1361 | |
|
1362 | 0 | ap_queue_info_free_idle_pools(worker_queue_info); |
1363 | 0 | ap_queue_interrupt_all(worker_queue); |
1364 | |
|
1365 | 0 | return 1; |
1366 | 0 | } |
1367 | 0 | return 0; |
1368 | 0 | } |
1369 | | |
1370 | | static void unblock_signal(int sig) |
1371 | 0 | { |
1372 | 0 | sigset_t sig_mask; |
1373 | |
|
1374 | 0 | sigemptyset(&sig_mask); |
1375 | 0 | sigaddset(&sig_mask, sig); |
1376 | | #if defined(SIGPROCMASK_SETS_THREAD_MASK) |
1377 | | sigprocmask(SIG_UNBLOCK, &sig_mask, NULL); |
1378 | | #else |
1379 | 0 | pthread_sigmask(SIG_UNBLOCK, &sig_mask, NULL); |
1380 | 0 | #endif |
1381 | 0 | } |
1382 | | |
1383 | | static void dummy_signal_handler(int sig) |
1384 | 0 | { |
1385 | | /* XXX If specifying SIG_IGN is guaranteed to unblock a syscall, |
1386 | | * then we don't need this goofy function. |
1387 | | */ |
1388 | 0 | } |
1389 | | |
1390 | | |
1391 | | #if HAVE_SERF |
1392 | | static apr_status_t s_socket_add(void *user_baton, |
1393 | | apr_pollfd_t *pfd, |
1394 | | void *serf_baton) |
1395 | | { |
1396 | | s_baton_t *s = (s_baton_t*)user_baton; |
1397 | | /* XXXXX: recycle listener_poll_types */ |
1398 | | listener_poll_type *pt = ap_malloc(sizeof(*pt)); |
1399 | | pt->type = PT_SERF; |
1400 | | pt->baton = serf_baton; |
1401 | | pfd->client_data = pt; |
1402 | | return apr_pollset_add(s->pollset, pfd); |
1403 | | } |
1404 | | |
1405 | | static apr_status_t s_socket_remove(void *user_baton, |
1406 | | apr_pollfd_t *pfd, |
1407 | | void *serf_baton) |
1408 | | { |
1409 | | s_baton_t *s = (s_baton_t*)user_baton; |
1410 | | listener_poll_type *pt = pfd->client_data; |
1411 | | free(pt); |
1412 | | return apr_pollset_remove(s->pollset, pfd); |
1413 | | } |
1414 | | #endif |
1415 | | |
1416 | | #if HAVE_SERF |
1417 | | static void init_serf(apr_pool_t *p) |
1418 | | { |
1419 | | s_baton_t *baton = NULL; |
1420 | | |
1421 | | baton = apr_pcalloc(p, sizeof(*baton)); |
1422 | | baton->pollset = event_pollset; |
1423 | | /* TODO: subpools, threads, reuse, etc. -- currently use malloc() inside :( */ |
1424 | | baton->pool = p; |
1425 | | |
1426 | | g_serf = serf_context_create_ex(baton, |
1427 | | s_socket_add, |
1428 | | s_socket_remove, p); |
1429 | | |
1430 | | ap_register_provider(p, "mpm_serf", |
1431 | | "instance", "0", g_serf); |
1432 | | } |
1433 | | #endif |
1434 | | |
1435 | | static apr_status_t push_timer2worker(timer_event_t* te) |
1436 | 0 | { |
1437 | 0 | return ap_queue_push_timer(worker_queue, te); |
1438 | 0 | } |
1439 | | |
1440 | | /* |
1441 | | * Pre-condition: cs is neither in event_pollset nor a timeout queue |
1442 | | * this function may only be called by the listener |
1443 | | */ |
1444 | | static apr_status_t push2worker(event_conn_state_t *cs, apr_socket_t *csd, |
1445 | | apr_pool_t *ptrans) |
1446 | 0 | { |
1447 | 0 | apr_status_t rc; |
1448 | |
|
1449 | 0 | if (cs) { |
1450 | 0 | csd = cs->pfd.desc.s; |
1451 | 0 | ptrans = cs->p; |
1452 | 0 | } |
1453 | 0 | rc = ap_queue_push_socket(worker_queue, csd, cs, ptrans); |
1454 | 0 | if (rc != APR_SUCCESS) { |
1455 | 0 | ap_log_error(APLOG_MARK, APLOG_CRIT, rc, ap_server_conf, APLOGNO(00471) |
1456 | 0 | "push2worker: ap_queue_push_socket failed"); |
1457 | | /* trash the connection; we couldn't queue the connected |
1458 | | * socket to a worker |
1459 | | */ |
1460 | 0 | if (cs) { |
1461 | 0 | shutdown_connection(cs); |
1462 | 0 | } |
1463 | 0 | else { |
1464 | 0 | if (csd) { |
1465 | 0 | close_socket_nonblocking(csd); |
1466 | 0 | } |
1467 | 0 | if (ptrans) { |
1468 | 0 | ap_queue_info_push_pool(worker_queue_info, ptrans); |
1469 | 0 | } |
1470 | 0 | } |
1471 | 0 | signal_threads(ST_GRACEFUL); |
1472 | 0 | } |
1473 | |
|
1474 | 0 | return rc; |
1475 | 0 | } |
1476 | | |
1477 | | /* get_worker: |
1478 | | * If *have_idle_worker_p == 0, reserve a worker thread, and set |
1479 | | * *have_idle_worker_p = 1. |
1480 | | * If *have_idle_worker_p is already 1, will do nothing. |
1481 | | * If blocking == 1, block if all workers are currently busy. |
1482 | | * If no worker was available immediately, will set *all_busy to 1. |
1483 | | * XXX: If there are no workers, we should not block immediately but |
1484 | | * XXX: close all keep-alive connections first. |
1485 | | */ |
1486 | | static void get_worker(int *have_idle_worker_p, int blocking, int *all_busy) |
1487 | 0 | { |
1488 | 0 | apr_status_t rc; |
1489 | |
|
1490 | 0 | if (*have_idle_worker_p) { |
1491 | | /* already reserved a worker thread - must have hit a |
1492 | | * transient error on a previous pass |
1493 | | */ |
1494 | 0 | return; |
1495 | 0 | } |
1496 | | |
1497 | 0 | if (blocking) |
1498 | 0 | rc = ap_queue_info_wait_for_idler(worker_queue_info, all_busy); |
1499 | 0 | else |
1500 | 0 | rc = ap_queue_info_try_get_idler(worker_queue_info); |
1501 | |
|
1502 | 0 | if (rc == APR_SUCCESS || APR_STATUS_IS_EOF(rc)) { |
1503 | 0 | *have_idle_worker_p = 1; |
1504 | 0 | } |
1505 | 0 | else if (!blocking && rc == APR_EAGAIN) { |
1506 | 0 | *all_busy = 1; |
1507 | 0 | } |
1508 | 0 | else { |
1509 | 0 | ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf, APLOGNO(00472) |
1510 | 0 | "ap_queue_info_wait_for_idler failed. " |
1511 | 0 | "Attempting to shutdown process gracefully"); |
1512 | 0 | signal_threads(ST_GRACEFUL); |
1513 | 0 | } |
1514 | 0 | } |
1515 | | |
1516 | | /* Structures to reuse */ |
1517 | | static timer_event_t timer_free_ring; |
1518 | | |
1519 | | static apr_skiplist *timer_skiplist; |
1520 | | static volatile apr_time_t timers_next_expiry; |
1521 | | |
1522 | | /* Same goal as for TIMEOUT_FUDGE_FACTOR (avoid extra poll calls), but applied |
1523 | | * to timers. Since their timeouts are custom (user defined), we can't be too |
1524 | | * approximative here (hence using 0.01s). |
1525 | | */ |
1526 | 0 | #define EVENT_FUDGE_FACTOR apr_time_from_msec(10) |
1527 | | |
1528 | | /* The following compare function is used by apr_skiplist_insert() to keep the |
1529 | | * elements (timers) sorted and provide O(log n) complexity (this is also true |
1530 | | * for apr_skiplist_{find,remove}(), but those are not used in MPM event where |
1531 | | * inserted timers are not searched nor removed, but with apr_skiplist_pop() |
1532 | | * which does use any compare function). It is meant to return 0 when a == b, |
1533 | | * <0 when a < b, and >0 when a > b. However apr_skiplist_insert() will not |
1534 | | * add duplicates (i.e. a == b), and apr_skiplist_add() is only available in |
1535 | | * APR 1.6, yet multiple timers could possibly be created in the same micro- |
1536 | | * second (duplicates with regard to apr_time_t); therefore we implement the |
1537 | | * compare function to return +1 instead of 0 when compared timers are equal, |
1538 | | * thus duplicates are still added after each other (in order of insertion). |
1539 | | */ |
1540 | | static int timer_comp(void *a, void *b) |
1541 | 0 | { |
1542 | 0 | apr_time_t t1 = (apr_time_t) ((timer_event_t *)a)->when; |
1543 | 0 | apr_time_t t2 = (apr_time_t) ((timer_event_t *)b)->when; |
1544 | 0 | AP_DEBUG_ASSERT(t1); |
1545 | 0 | AP_DEBUG_ASSERT(t2); |
1546 | 0 | return ((t1 < t2) ? -1 : 1); |
1547 | 0 | } |
1548 | | |
1549 | | static apr_thread_mutex_t *g_timer_skiplist_mtx; |
1550 | | |
1551 | | static timer_event_t * event_get_timer_event(apr_time_t t, |
1552 | | ap_mpm_callback_fn_t *cbfn, |
1553 | | void *baton, |
1554 | | int insert, |
1555 | | apr_array_header_t *pfds) |
1556 | 0 | { |
1557 | 0 | timer_event_t *te; |
1558 | 0 | apr_time_t now = (t < 0) ? 0 : apr_time_now(); |
1559 | | |
1560 | | /* oh yeah, and make locking smarter/fine grained. */ |
1561 | |
|
1562 | 0 | apr_thread_mutex_lock(g_timer_skiplist_mtx); |
1563 | |
|
1564 | 0 | if (!APR_RING_EMPTY(&timer_free_ring.link, timer_event_t, link)) { |
1565 | 0 | te = APR_RING_FIRST(&timer_free_ring.link); |
1566 | 0 | APR_RING_REMOVE(te, link); |
1567 | 0 | } |
1568 | 0 | else { |
1569 | 0 | te = apr_skiplist_alloc(timer_skiplist, sizeof(timer_event_t)); |
1570 | 0 | APR_RING_ELEM_INIT(te, link); |
1571 | 0 | } |
1572 | |
|
1573 | 0 | te->cbfunc = cbfn; |
1574 | 0 | te->baton = baton; |
1575 | 0 | te->canceled = 0; |
1576 | 0 | te->when = now + t; |
1577 | 0 | te->pfds = pfds; |
1578 | |
|
1579 | 0 | if (insert) { |
1580 | 0 | apr_time_t next_expiry; |
1581 | | |
1582 | | /* Okay, add sorted by when.. */ |
1583 | 0 | apr_skiplist_insert(timer_skiplist, te); |
1584 | | |
1585 | | /* Cheaply update the global timers_next_expiry with this event's |
1586 | | * if it expires before. |
1587 | | */ |
1588 | 0 | next_expiry = timers_next_expiry; |
1589 | 0 | if (!next_expiry || next_expiry > te->when + EVENT_FUDGE_FACTOR) { |
1590 | 0 | timers_next_expiry = te->when; |
1591 | | /* Unblock the poll()ing listener for it to update its timeout. */ |
1592 | 0 | if (listener_is_wakeable) { |
1593 | 0 | apr_pollset_wakeup(event_pollset); |
1594 | 0 | } |
1595 | 0 | } |
1596 | 0 | } |
1597 | 0 | apr_thread_mutex_unlock(g_timer_skiplist_mtx); |
1598 | |
|
1599 | 0 | return te; |
1600 | 0 | } |
1601 | | |
1602 | | static apr_status_t event_register_timed_callback_ex(apr_time_t t, |
1603 | | ap_mpm_callback_fn_t *cbfn, |
1604 | | void *baton, |
1605 | | apr_array_header_t *pfds) |
1606 | 0 | { |
1607 | 0 | event_get_timer_event(t, cbfn, baton, 1, pfds); |
1608 | 0 | return APR_SUCCESS; |
1609 | 0 | } |
1610 | | |
1611 | | static apr_status_t event_register_timed_callback(apr_time_t t, |
1612 | | ap_mpm_callback_fn_t *cbfn, |
1613 | | void *baton) |
1614 | 0 | { |
1615 | 0 | event_register_timed_callback_ex(t, cbfn, baton, NULL); |
1616 | 0 | return APR_SUCCESS; |
1617 | 0 | } |
1618 | | |
1619 | | static apr_status_t event_cleanup_poll_callback(void *data) |
1620 | 0 | { |
1621 | 0 | apr_status_t final_rc = APR_SUCCESS; |
1622 | 0 | apr_array_header_t *pfds = data; |
1623 | 0 | int i; |
1624 | |
|
1625 | 0 | for (i = 0; i < pfds->nelts; i++) { |
1626 | 0 | apr_pollfd_t *pfd = (apr_pollfd_t *)pfds->elts + i; |
1627 | 0 | if (pfd->client_data) { |
1628 | 0 | apr_status_t rc; |
1629 | 0 | rc = apr_pollset_remove(event_pollset, pfd); |
1630 | 0 | if (rc != APR_SUCCESS && !APR_STATUS_IS_NOTFOUND(rc)) { |
1631 | 0 | final_rc = rc; |
1632 | 0 | } |
1633 | 0 | pfd->client_data = NULL; |
1634 | 0 | } |
1635 | 0 | } |
1636 | |
|
1637 | 0 | return final_rc; |
1638 | 0 | } |
1639 | | |
1640 | | static apr_status_t event_register_poll_callback_ex(apr_pool_t *p, |
1641 | | const apr_array_header_t *pfds, |
1642 | | ap_mpm_callback_fn_t *cbfn, |
1643 | | ap_mpm_callback_fn_t *tofn, |
1644 | | void *baton, |
1645 | | apr_time_t timeout) |
1646 | 0 | { |
1647 | 0 | socket_callback_baton_t *scb = apr_pcalloc(p, sizeof(*scb)); |
1648 | 0 | listener_poll_type *pt = apr_palloc(p, sizeof(*pt)); |
1649 | 0 | apr_status_t rc, final_rc = APR_SUCCESS; |
1650 | 0 | int i; |
1651 | |
|
1652 | 0 | pt->type = PT_USER; |
1653 | 0 | pt->baton = scb; |
1654 | |
|
1655 | 0 | scb->cbfunc = cbfn; |
1656 | 0 | scb->user_baton = baton; |
1657 | 0 | scb->pfds = apr_array_copy(p, pfds); |
1658 | |
|
1659 | 0 | apr_pool_pre_cleanup_register(p, scb->pfds, event_cleanup_poll_callback); |
1660 | |
|
1661 | 0 | for (i = 0; i < scb->pfds->nelts; i++) { |
1662 | 0 | apr_pollfd_t *pfd = (apr_pollfd_t *)scb->pfds->elts + i; |
1663 | 0 | if (pfd->reqevents) { |
1664 | 0 | if (pfd->reqevents & APR_POLLIN) { |
1665 | 0 | pfd->reqevents |= APR_POLLHUP; |
1666 | 0 | } |
1667 | 0 | pfd->reqevents |= APR_POLLERR; |
1668 | 0 | pfd->client_data = pt; |
1669 | 0 | } |
1670 | 0 | else { |
1671 | 0 | pfd->client_data = NULL; |
1672 | 0 | } |
1673 | 0 | } |
1674 | |
|
1675 | 0 | if (timeout > 0) { |
1676 | | /* XXX: This cancel timer event can fire before the pollset is updated */ |
1677 | 0 | scb->cancel_event = event_get_timer_event(timeout, tofn, baton, 1, scb->pfds); |
1678 | 0 | } |
1679 | 0 | for (i = 0; i < scb->pfds->nelts; i++) { |
1680 | 0 | apr_pollfd_t *pfd = (apr_pollfd_t *)scb->pfds->elts + i; |
1681 | 0 | if (pfd->client_data) { |
1682 | 0 | rc = apr_pollset_add(event_pollset, pfd); |
1683 | 0 | if (rc != APR_SUCCESS) { |
1684 | 0 | final_rc = rc; |
1685 | 0 | } |
1686 | 0 | } |
1687 | 0 | } |
1688 | 0 | return final_rc; |
1689 | 0 | } |
1690 | | |
1691 | | static apr_status_t event_register_poll_callback(apr_pool_t *p, |
1692 | | const apr_array_header_t *pfds, |
1693 | | ap_mpm_callback_fn_t *cbfn, |
1694 | | void *baton) |
1695 | 0 | { |
1696 | 0 | return event_register_poll_callback_ex(p, |
1697 | 0 | pfds, |
1698 | 0 | cbfn, |
1699 | 0 | NULL, /* no timeout function */ |
1700 | 0 | baton, |
1701 | 0 | 0 /* no timeout */); |
1702 | 0 | } |
1703 | | |
1704 | | /* |
1705 | | * Flush data and close our side of the connection, then drain incoming data. |
1706 | | * If the latter would block put the connection in one of the linger timeout |
1707 | | * queues to be called back when ready, and repeat until it's closed by peer. |
1708 | | * Only to be called in the worker thread, and since it's in immediate call |
1709 | | * stack, we can afford a comfortable buffer size to consume data quickly. |
1710 | | * Pre-condition: cs is not in any timeout queue and not in the pollset, |
1711 | | * timeout_mutex is not locked |
1712 | | */ |
1713 | | #define LINGERING_BUF_SIZE (32 * 1024) |
1714 | | static void process_lingering_close(event_conn_state_t *cs) |
1715 | 0 | { |
1716 | 0 | apr_socket_t *csd = ap_get_conn_socket(cs->c); |
1717 | 0 | char dummybuf[LINGERING_BUF_SIZE]; |
1718 | 0 | apr_size_t nbytes; |
1719 | 0 | apr_status_t rv; |
1720 | 0 | struct timeout_queue *q; |
1721 | |
|
1722 | 0 | ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, cs->c, |
1723 | 0 | "lingering close from state %i", (int)cs->pub.state); |
1724 | 0 | AP_DEBUG_ASSERT(cs->pub.state >= CONN_STATE_LINGER); |
1725 | |
|
1726 | 0 | if (cs->pub.state == CONN_STATE_LINGER) { |
1727 | | /* defer_lingering_close() may have bumped lingering_count already */ |
1728 | 0 | if (!cs->deferred_linger) { |
1729 | 0 | apr_atomic_inc32(&lingering_count); |
1730 | 0 | } |
1731 | |
|
1732 | 0 | apr_socket_timeout_set(csd, apr_time_from_sec(SECONDS_TO_LINGER)); |
1733 | 0 | if (ap_start_lingering_close(cs->c)) { |
1734 | 0 | notify_suspend(cs); |
1735 | 0 | close_connection(cs); |
1736 | 0 | return; |
1737 | 0 | } |
1738 | | |
1739 | 0 | cs->queue_timestamp = apr_time_now(); |
1740 | | /* Clear APR_INCOMPLETE_READ if it was ever set, we'll do the poll() |
1741 | | * at the listener only from now, if needed. |
1742 | | */ |
1743 | 0 | apr_socket_opt_set(csd, APR_INCOMPLETE_READ, 0); |
1744 | | /* |
1745 | | * If some module requested a shortened waiting period, only wait for |
1746 | | * 2s (SECONDS_TO_LINGER). This is useful for mitigating certain |
1747 | | * DoS attacks. |
1748 | | */ |
1749 | 0 | if (apr_table_get(cs->c->notes, "short-lingering-close")) { |
1750 | 0 | cs->pub.state = CONN_STATE_LINGER_SHORT; |
1751 | 0 | } |
1752 | 0 | else { |
1753 | 0 | cs->pub.state = CONN_STATE_LINGER_NORMAL; |
1754 | 0 | } |
1755 | 0 | notify_suspend(cs); |
1756 | 0 | } |
1757 | | |
1758 | 0 | apr_socket_timeout_set(csd, 0); |
1759 | 0 | do { |
1760 | 0 | nbytes = sizeof(dummybuf); |
1761 | 0 | rv = apr_socket_recv(csd, dummybuf, &nbytes); |
1762 | 0 | } while (rv == APR_SUCCESS); |
1763 | |
|
1764 | 0 | if (!APR_STATUS_IS_EAGAIN(rv)) { |
1765 | 0 | close_connection(cs); |
1766 | 0 | return; |
1767 | 0 | } |
1768 | | |
1769 | | /* (Re)queue the connection to come back when readable */ |
1770 | 0 | update_reqevents_from_sense(cs, CONN_SENSE_WANT_READ); |
1771 | 0 | q = (cs->pub.state == CONN_STATE_LINGER_SHORT) ? short_linger_q : linger_q; |
1772 | 0 | apr_thread_mutex_lock(timeout_mutex); |
1773 | 0 | TO_QUEUE_APPEND(q, cs); |
1774 | 0 | rv = apr_pollset_add(event_pollset, &cs->pfd); |
1775 | 0 | if (rv != APR_SUCCESS && !APR_STATUS_IS_EEXIST(rv)) { |
1776 | 0 | AP_DEBUG_ASSERT(0); |
1777 | 0 | TO_QUEUE_REMOVE(q, cs); |
1778 | 0 | apr_thread_mutex_unlock(timeout_mutex); |
1779 | 0 | ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(03092) |
1780 | 0 | "process_lingering_close: apr_pollset_add failure"); |
1781 | 0 | close_connection(cs); |
1782 | 0 | signal_threads(ST_GRACEFUL); |
1783 | 0 | return; |
1784 | 0 | } |
1785 | 0 | apr_thread_mutex_unlock(timeout_mutex); |
1786 | 0 | } |
1787 | | |
1788 | | /* call 'func' for all elements of 'q' above 'expiry'. |
1789 | | * Pre-condition: timeout_mutex must already be locked |
1790 | | * Post-condition: timeout_mutex will be locked again |
1791 | | */ |
1792 | | static void process_timeout_queue(struct timeout_queue *q, apr_time_t expiry, |
1793 | | int (*func)(event_conn_state_t *)) |
1794 | 0 | { |
1795 | 0 | apr_uint32_t total = 0, count; |
1796 | 0 | event_conn_state_t *first, *cs, *last; |
1797 | 0 | struct event_conn_state_t trash; |
1798 | 0 | struct timeout_queue *qp; |
1799 | 0 | apr_status_t rv; |
1800 | |
|
1801 | 0 | if (!*q->total) { |
1802 | 0 | return; |
1803 | 0 | } |
1804 | | |
1805 | 0 | APR_RING_INIT(&trash.timeout_list, event_conn_state_t, timeout_list); |
1806 | 0 | for (qp = q; qp; qp = qp->next) { |
1807 | 0 | count = 0; |
1808 | 0 | cs = first = last = APR_RING_FIRST(&qp->head); |
1809 | 0 | while (cs != APR_RING_SENTINEL(&qp->head, event_conn_state_t, |
1810 | 0 | timeout_list)) { |
1811 | | /* Trash the entry if: |
1812 | | * - no expiry was given (zero means all), or |
1813 | | * - it expired (according to the queue timeout), or |
1814 | | * - the system clock skewed in the past: no entry should be |
1815 | | * registered above the given expiry (~now) + the queue |
1816 | | * timeout, we won't keep any here (eg. for centuries). |
1817 | | * |
1818 | | * Otherwise stop, no following entry will match thanks to the |
1819 | | * single timeout per queue (entries are added to the end!). |
1820 | | * This allows maintenance in O(1). |
1821 | | */ |
1822 | 0 | if (expiry && cs->queue_timestamp + qp->timeout > expiry |
1823 | 0 | && cs->queue_timestamp < expiry + qp->timeout) { |
1824 | | /* Since this is the next expiring entry of this queue, update |
1825 | | * the global queues_next_expiry if it's later than this one. |
1826 | | */ |
1827 | 0 | apr_time_t elem_expiry = cs->queue_timestamp + qp->timeout; |
1828 | 0 | apr_time_t next_expiry = queues_next_expiry; |
1829 | 0 | if (!next_expiry |
1830 | 0 | || next_expiry > elem_expiry + TIMEOUT_FUDGE_FACTOR) { |
1831 | 0 | queues_next_expiry = elem_expiry; |
1832 | 0 | } |
1833 | 0 | break; |
1834 | 0 | } |
1835 | | |
1836 | 0 | last = cs; |
1837 | 0 | rv = apr_pollset_remove(event_pollset, &cs->pfd); |
1838 | 0 | if (rv != APR_SUCCESS && !APR_STATUS_IS_NOTFOUND(rv)) { |
1839 | 0 | AP_DEBUG_ASSERT(0); |
1840 | 0 | ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, cs->c, APLOGNO(00473) |
1841 | 0 | "apr_pollset_remove failed"); |
1842 | 0 | } |
1843 | 0 | cs = APR_RING_NEXT(cs, timeout_list); |
1844 | 0 | count++; |
1845 | 0 | } |
1846 | 0 | if (!count) |
1847 | 0 | continue; |
1848 | | |
1849 | 0 | APR_RING_UNSPLICE(first, last, timeout_list); |
1850 | 0 | APR_RING_SPLICE_TAIL(&trash.timeout_list, first, last, event_conn_state_t, |
1851 | 0 | timeout_list); |
1852 | 0 | AP_DEBUG_ASSERT(*q->total >= count && qp->count >= count); |
1853 | 0 | *q->total -= count; |
1854 | 0 | qp->count -= count; |
1855 | 0 | total += count; |
1856 | 0 | } |
1857 | 0 | if (!total) |
1858 | 0 | return; |
1859 | | |
1860 | 0 | apr_thread_mutex_unlock(timeout_mutex); |
1861 | 0 | first = APR_RING_FIRST(&trash.timeout_list); |
1862 | 0 | do { |
1863 | 0 | cs = APR_RING_NEXT(first, timeout_list); |
1864 | 0 | TO_QUEUE_ELEM_INIT(first); |
1865 | 0 | func(first); |
1866 | 0 | first = cs; |
1867 | 0 | } while (--total); |
1868 | 0 | apr_thread_mutex_lock(timeout_mutex); |
1869 | 0 | } |
1870 | | |
1871 | | static void process_keepalive_queue(apr_time_t expiry) |
1872 | 0 | { |
1873 | | /* If all workers are busy, we kill older keep-alive connections so |
1874 | | * that they may connect to another process. |
1875 | | */ |
1876 | 0 | if (!expiry && *keepalive_q->total) { |
1877 | 0 | ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf, |
1878 | 0 | "All workers are busy or dying, will shutdown %u " |
1879 | 0 | "keep-alive connections", *keepalive_q->total); |
1880 | 0 | } |
1881 | 0 | process_timeout_queue(keepalive_q, expiry, shutdown_connection); |
1882 | 0 | } |
1883 | | |
1884 | | static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy) |
1885 | 0 | { |
1886 | 0 | apr_status_t rc; |
1887 | 0 | proc_info *ti = dummy; |
1888 | 0 | int process_slot = ti->pslot; |
1889 | 0 | struct process_score *ps = ap_get_scoreboard_process(process_slot); |
1890 | 0 | int closed = 0; |
1891 | 0 | int have_idle_worker = 0; |
1892 | 0 | apr_time_t last_log; |
1893 | |
|
1894 | 0 | last_log = apr_time_now(); |
1895 | 0 | free(ti); |
1896 | |
|
1897 | | #if HAVE_SERF |
1898 | | init_serf(apr_thread_pool_get(thd)); |
1899 | | #endif |
1900 | | |
1901 | | /* Unblock the signal used to wake this thread up, and set a handler for |
1902 | | * it. |
1903 | | */ |
1904 | 0 | apr_signal(LISTENER_SIGNAL, dummy_signal_handler); |
1905 | 0 | unblock_signal(LISTENER_SIGNAL); |
1906 | |
|
1907 | 0 | for (;;) { |
1908 | 0 | timer_event_t *te; |
1909 | 0 | const apr_pollfd_t *out_pfd; |
1910 | 0 | apr_int32_t num = 0; |
1911 | 0 | apr_interval_time_t timeout; |
1912 | 0 | socket_callback_baton_t *user_chain; |
1913 | 0 | apr_time_t now, expiry = -1; |
1914 | 0 | int workers_were_busy = 0; |
1915 | |
|
1916 | 0 | if (conns_this_child <= 0) |
1917 | 0 | check_infinite_requests(); |
1918 | |
|
1919 | 0 | if (listener_may_exit) { |
1920 | 0 | int first_close = close_listeners(&closed); |
1921 | |
|
1922 | 0 | if (terminate_mode == ST_UNGRACEFUL |
1923 | 0 | || apr_atomic_read32(&connection_count) == 0) |
1924 | 0 | break; |
1925 | | |
1926 | | /* Don't wait in poll() for the first close (i.e. dying now), we |
1927 | | * want to maintain the queues and schedule defer_linger_chain ASAP |
1928 | | * to kill kept-alive connection and shutdown the workers and child |
1929 | | * faster. |
1930 | | */ |
1931 | 0 | if (first_close) { |
1932 | 0 | goto do_maintenance; /* with expiry == -1 */ |
1933 | 0 | } |
1934 | 0 | } |
1935 | | |
1936 | 0 | if (APLOGtrace6(ap_server_conf)) { |
1937 | 0 | now = apr_time_now(); |
1938 | | /* trace log status every second */ |
1939 | 0 | if (now - last_log > apr_time_from_sec(1)) { |
1940 | 0 | last_log = now; |
1941 | 0 | apr_thread_mutex_lock(timeout_mutex); |
1942 | 0 | ap_log_error(APLOG_MARK, APLOG_TRACE6, 0, ap_server_conf, |
1943 | 0 | "connections: %u (clogged: %u write-completion: %d " |
1944 | 0 | "keep-alive: %d lingering: %d suspended: %u)", |
1945 | 0 | apr_atomic_read32(&connection_count), |
1946 | 0 | apr_atomic_read32(&clogged_count), |
1947 | 0 | apr_atomic_read32(write_completion_q->total), |
1948 | 0 | apr_atomic_read32(keepalive_q->total), |
1949 | 0 | apr_atomic_read32(&lingering_count), |
1950 | 0 | apr_atomic_read32(&suspended_count)); |
1951 | 0 | if (dying) { |
1952 | 0 | ap_log_error(APLOG_MARK, APLOG_TRACE6, 0, ap_server_conf, |
1953 | 0 | "%u/%u workers shutdown", |
1954 | 0 | apr_atomic_read32(&threads_shutdown), |
1955 | 0 | threads_per_child); |
1956 | 0 | } |
1957 | 0 | apr_thread_mutex_unlock(timeout_mutex); |
1958 | 0 | } |
1959 | 0 | } |
1960 | |
|
1961 | | #if HAVE_SERF |
1962 | | rc = serf_context_prerun(g_serf); |
1963 | | if (rc != APR_SUCCESS) { |
1964 | | /* TODO: what should we do here? ugh. */ |
1965 | | } |
1966 | | #endif |
1967 | | |
1968 | | /* Start with an infinite poll() timeout and update it according to |
1969 | | * the next expiring timer or queue entry. If there are none, either |
1970 | | * the listener is wakeable and it can poll() indefinitely until a wake |
1971 | | * up occurs, otherwise periodic checks (maintenance, shutdown, ...) |
1972 | | * must be performed. |
1973 | | */ |
1974 | 0 | now = apr_time_now(); |
1975 | 0 | timeout = -1; |
1976 | | |
1977 | | /* Push expired timers to a worker, the first remaining one determines |
1978 | | * the maximum time to poll() below, if any. |
1979 | | */ |
1980 | 0 | expiry = timers_next_expiry; |
1981 | 0 | if (expiry && expiry < now) { |
1982 | 0 | apr_thread_mutex_lock(g_timer_skiplist_mtx); |
1983 | 0 | while ((te = apr_skiplist_peek(timer_skiplist))) { |
1984 | 0 | if (te->when > now) { |
1985 | 0 | timers_next_expiry = te->when; |
1986 | 0 | timeout = te->when - now; |
1987 | 0 | break; |
1988 | 0 | } |
1989 | 0 | apr_skiplist_pop(timer_skiplist, NULL); |
1990 | 0 | if (!te->canceled) { |
1991 | 0 | if (te->pfds) { |
1992 | | /* remove all sockets from the pollset */ |
1993 | 0 | apr_pool_cleanup_run(te->pfds->pool, te->pfds, |
1994 | 0 | event_cleanup_poll_callback); |
1995 | 0 | } |
1996 | 0 | push_timer2worker(te); |
1997 | 0 | } |
1998 | 0 | else { |
1999 | 0 | APR_RING_INSERT_TAIL(&timer_free_ring.link, te, |
2000 | 0 | timer_event_t, link); |
2001 | 0 | } |
2002 | 0 | } |
2003 | 0 | if (!te) { |
2004 | 0 | timers_next_expiry = 0; |
2005 | 0 | } |
2006 | 0 | apr_thread_mutex_unlock(g_timer_skiplist_mtx); |
2007 | 0 | } |
2008 | | |
2009 | | /* Same for queues, use their next expiry, if any. */ |
2010 | 0 | expiry = queues_next_expiry; |
2011 | 0 | if (expiry |
2012 | 0 | && (timeout < 0 |
2013 | 0 | || expiry <= now |
2014 | 0 | || timeout > expiry - now)) { |
2015 | 0 | timeout = expiry > now ? expiry - now : 0; |
2016 | 0 | } |
2017 | | |
2018 | | /* When non-wakeable, don't wait more than 100 ms, in any case. */ |
2019 | 0 | #define NON_WAKEABLE_POLL_TIMEOUT apr_time_from_msec(100) |
2020 | 0 | if (!listener_is_wakeable |
2021 | 0 | && (timeout < 0 |
2022 | 0 | || timeout > NON_WAKEABLE_POLL_TIMEOUT)) { |
2023 | 0 | timeout = NON_WAKEABLE_POLL_TIMEOUT; |
2024 | 0 | } |
2025 | 0 | else if (timeout > 0) { |
2026 | | /* apr_pollset_poll() might round down the timeout to milliseconds, |
2027 | | * let's forcibly round up here to never return before the timeout. |
2028 | | */ |
2029 | 0 | timeout = apr_time_from_msec( |
2030 | 0 | apr_time_as_msec(timeout + apr_time_from_msec(1) - 1) |
2031 | 0 | ); |
2032 | 0 | } |
2033 | |
|
2034 | 0 | ap_log_error(APLOG_MARK, APLOG_TRACE7, 0, ap_server_conf, |
2035 | 0 | "polling with timeout=%" APR_TIME_T_FMT |
2036 | 0 | " queues_timeout=%" APR_TIME_T_FMT |
2037 | 0 | " timers_timeout=%" APR_TIME_T_FMT, |
2038 | 0 | timeout, queues_next_expiry - now, |
2039 | 0 | timers_next_expiry - now); |
2040 | |
|
2041 | 0 | rc = apr_pollset_poll(event_pollset, timeout, &num, &out_pfd); |
2042 | 0 | if (rc != APR_SUCCESS) { |
2043 | 0 | if (!APR_STATUS_IS_EINTR(rc) && !APR_STATUS_IS_TIMEUP(rc)) { |
2044 | 0 | ap_log_error(APLOG_MARK, APLOG_CRIT, rc, ap_server_conf, |
2045 | 0 | APLOGNO(03267) |
2046 | 0 | "apr_pollset_poll failed. Attempting to " |
2047 | 0 | "shutdown process gracefully"); |
2048 | 0 | signal_threads(ST_GRACEFUL); |
2049 | 0 | } |
2050 | 0 | num = 0; |
2051 | 0 | } |
2052 | |
|
2053 | 0 | if (APLOGtrace7(ap_server_conf)) { |
2054 | 0 | now = apr_time_now(); |
2055 | 0 | ap_log_error(APLOG_MARK, APLOG_TRACE7, rc, ap_server_conf, |
2056 | 0 | "polled with num=%u exit=%d/%d conns=%d" |
2057 | 0 | " queues_timeout=%" APR_TIME_T_FMT |
2058 | 0 | " timers_timeout=%" APR_TIME_T_FMT, |
2059 | 0 | num, listener_may_exit, dying, |
2060 | 0 | apr_atomic_read32(&connection_count), |
2061 | 0 | queues_next_expiry - now, timers_next_expiry - now); |
2062 | 0 | } |
2063 | | |
2064 | | /* XXX possible optimization: stash the current time for use as |
2065 | | * r->request_time for new requests or queues maintenance |
2066 | | */ |
2067 | |
|
2068 | 0 | for (user_chain = NULL; num; --num, ++out_pfd) { |
2069 | 0 | listener_poll_type *pt = (listener_poll_type *) out_pfd->client_data; |
2070 | 0 | if (pt->type == PT_CSD) { |
2071 | | /* one of the sockets is readable */ |
2072 | 0 | event_conn_state_t *cs = (event_conn_state_t *) pt->baton; |
2073 | 0 | struct timeout_queue *remove_from_q = NULL; |
2074 | | /* don't wait for a worker for a keepalive request or |
2075 | | * lingering close processing. */ |
2076 | 0 | int blocking = 0; |
2077 | |
|
2078 | 0 | switch (cs->pub.state) { |
2079 | 0 | case CONN_STATE_WRITE_COMPLETION: |
2080 | 0 | remove_from_q = cs->sc->wc_q; |
2081 | 0 | blocking = 1; |
2082 | 0 | break; |
2083 | | |
2084 | 0 | case CONN_STATE_CHECK_REQUEST_LINE_READABLE: |
2085 | 0 | cs->pub.state = CONN_STATE_READ_REQUEST_LINE; |
2086 | 0 | remove_from_q = cs->sc->ka_q; |
2087 | 0 | break; |
2088 | | |
2089 | 0 | case CONN_STATE_LINGER_NORMAL: |
2090 | 0 | remove_from_q = linger_q; |
2091 | 0 | break; |
2092 | | |
2093 | 0 | case CONN_STATE_LINGER_SHORT: |
2094 | 0 | remove_from_q = short_linger_q; |
2095 | 0 | break; |
2096 | | |
2097 | 0 | default: |
2098 | 0 | ap_log_error(APLOG_MARK, APLOG_CRIT, rc, |
2099 | 0 | ap_server_conf, APLOGNO(03096) |
2100 | 0 | "event_loop: unexpected state %d", |
2101 | 0 | cs->pub.state); |
2102 | 0 | ap_assert(0); |
2103 | 0 | } |
2104 | | |
2105 | 0 | if (remove_from_q) { |
2106 | 0 | apr_thread_mutex_lock(timeout_mutex); |
2107 | 0 | TO_QUEUE_REMOVE(remove_from_q, cs); |
2108 | 0 | rc = apr_pollset_remove(event_pollset, &cs->pfd); |
2109 | 0 | apr_thread_mutex_unlock(timeout_mutex); |
2110 | | /* |
2111 | | * Some of the pollset backends, like KQueue or Epoll |
2112 | | * automagically remove the FD if the socket is closed, |
2113 | | * therefore, we can accept _SUCCESS or _NOTFOUND, |
2114 | | * and we still want to keep going |
2115 | | */ |
2116 | 0 | if (rc != APR_SUCCESS && !APR_STATUS_IS_NOTFOUND(rc)) { |
2117 | 0 | AP_DEBUG_ASSERT(0); |
2118 | 0 | ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf, |
2119 | 0 | APLOGNO(03094) "pollset remove failed"); |
2120 | 0 | close_connection(cs); |
2121 | 0 | signal_threads(ST_GRACEFUL); |
2122 | 0 | break; |
2123 | 0 | } |
2124 | | |
2125 | | /* If we don't get a worker immediately (nonblocking), we |
2126 | | * close the connection; the client can re-connect to a |
2127 | | * different process for keepalive, and for lingering close |
2128 | | * the connection will be shutdown so the choice is to favor |
2129 | | * incoming/alive connections. |
2130 | | */ |
2131 | 0 | get_worker(&have_idle_worker, blocking, |
2132 | 0 | &workers_were_busy); |
2133 | 0 | if (!have_idle_worker) { |
2134 | 0 | shutdown_connection(cs); |
2135 | 0 | } |
2136 | 0 | else if (push2worker(cs, NULL, NULL) == APR_SUCCESS) { |
2137 | 0 | have_idle_worker = 0; |
2138 | 0 | } |
2139 | 0 | } |
2140 | 0 | } |
2141 | 0 | else if (pt->type == PT_ACCEPT && !listeners_disabled()) { |
2142 | | /* A Listener Socket is ready for an accept() */ |
2143 | 0 | if (workers_were_busy) { |
2144 | 0 | disable_listensocks(); |
2145 | 0 | ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, |
2146 | 0 | APLOGNO(03268) |
2147 | 0 | "All workers busy, not accepting new conns " |
2148 | 0 | "in this process"); |
2149 | 0 | } |
2150 | 0 | else if (connections_above_limit(&workers_were_busy)) { |
2151 | 0 | disable_listensocks(); |
2152 | 0 | ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, |
2153 | 0 | APLOGNO(03269) |
2154 | 0 | "Too many open connections (%u), " |
2155 | 0 | "not accepting new conns in this process", |
2156 | 0 | apr_atomic_read32(&connection_count)); |
2157 | 0 | ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf, |
2158 | 0 | "Idle workers: %u", |
2159 | 0 | ap_queue_info_num_idlers(worker_queue_info)); |
2160 | 0 | } |
2161 | 0 | else if (!listener_may_exit) { |
2162 | 0 | void *csd = NULL; |
2163 | 0 | ap_listen_rec *lr = (ap_listen_rec *) pt->baton; |
2164 | 0 | apr_pool_t *ptrans; /* Pool for per-transaction stuff */ |
2165 | 0 | ap_queue_info_pop_pool(worker_queue_info, &ptrans); |
2166 | |
|
2167 | 0 | if (ptrans == NULL) { |
2168 | | /* create a new transaction pool for each accepted socket */ |
2169 | 0 | apr_allocator_t *allocator = NULL; |
2170 | |
|
2171 | 0 | rc = apr_allocator_create(&allocator); |
2172 | 0 | if (rc == APR_SUCCESS) { |
2173 | 0 | apr_allocator_max_free_set(allocator, |
2174 | 0 | ap_max_mem_free); |
2175 | 0 | rc = apr_pool_create_ex(&ptrans, pconf, NULL, |
2176 | 0 | allocator); |
2177 | 0 | if (rc == APR_SUCCESS) { |
2178 | 0 | apr_pool_tag(ptrans, "transaction"); |
2179 | 0 | apr_allocator_owner_set(allocator, ptrans); |
2180 | 0 | } |
2181 | 0 | } |
2182 | 0 | if (rc != APR_SUCCESS) { |
2183 | 0 | ap_log_error(APLOG_MARK, APLOG_CRIT, rc, |
2184 | 0 | ap_server_conf, APLOGNO(03097) |
2185 | 0 | "Failed to create transaction pool"); |
2186 | 0 | if (allocator) { |
2187 | 0 | apr_allocator_destroy(allocator); |
2188 | 0 | } |
2189 | 0 | resource_shortage = 1; |
2190 | 0 | signal_threads(ST_GRACEFUL); |
2191 | 0 | continue; |
2192 | 0 | } |
2193 | 0 | } |
2194 | | |
2195 | 0 | get_worker(&have_idle_worker, 1, &workers_were_busy); |
2196 | 0 | rc = lr->accept_func(&csd, lr, ptrans); |
2197 | | |
2198 | | /* later we trash rv and rely on csd to indicate |
2199 | | * success/failure |
2200 | | */ |
2201 | 0 | AP_DEBUG_ASSERT(rc == APR_SUCCESS || !csd); |
2202 | |
|
2203 | 0 | if (rc == APR_EGENERAL) { |
2204 | | /* E[NM]FILE, ENOMEM, etc */ |
2205 | 0 | resource_shortage = 1; |
2206 | 0 | signal_threads(ST_GRACEFUL); |
2207 | 0 | } |
2208 | 0 | else if (ap_accept_error_is_nonfatal(rc)) { |
2209 | 0 | ap_log_error(APLOG_MARK, APLOG_DEBUG, rc, ap_server_conf, |
2210 | 0 | "accept() on client socket failed"); |
2211 | 0 | } |
2212 | |
|
2213 | 0 | if (csd != NULL) { |
2214 | 0 | conns_this_child--; |
2215 | 0 | if (push2worker(NULL, csd, ptrans) == APR_SUCCESS) { |
2216 | 0 | have_idle_worker = 0; |
2217 | 0 | } |
2218 | 0 | } |
2219 | 0 | else { |
2220 | 0 | ap_queue_info_push_pool(worker_queue_info, ptrans); |
2221 | 0 | } |
2222 | 0 | } |
2223 | 0 | } /* if:else on pt->type */ |
2224 | | #if HAVE_SERF |
2225 | | else if (pt->type == PT_SERF) { |
2226 | | /* send socket to serf. */ |
2227 | | /* XXXX: this doesn't require get_worker() */ |
2228 | | serf_event_trigger(g_serf, pt->baton, out_pfd); |
2229 | | } |
2230 | | |
2231 | | #endif |
2232 | 0 | else if (pt->type == PT_USER) { |
2233 | 0 | socket_callback_baton_t *baton = pt->baton; |
2234 | 0 | if (baton->cancel_event) { |
2235 | 0 | baton->cancel_event->canceled = 1; |
2236 | 0 | } |
2237 | | |
2238 | | /* We only signal once per N sockets with this baton, |
2239 | | * and after this loop to avoid any race/lifetime issue |
2240 | | * with the user callback being called while we handle |
2241 | | * the same baton multiple times here. |
2242 | | */ |
2243 | 0 | if (!baton->signaled) { |
2244 | 0 | baton->signaled = 1; |
2245 | 0 | baton->next = user_chain; |
2246 | 0 | user_chain = baton; |
2247 | 0 | } |
2248 | 0 | } |
2249 | 0 | } /* for processing poll */ |
2250 | | |
2251 | | /* Time to handle user callbacks chained above */ |
2252 | 0 | while (user_chain) { |
2253 | 0 | socket_callback_baton_t *baton = user_chain; |
2254 | 0 | user_chain = user_chain->next; |
2255 | 0 | baton->next = NULL; |
2256 | | |
2257 | | /* remove all sockets from the pollset */ |
2258 | 0 | apr_pool_cleanup_run(baton->pfds->pool, baton->pfds, |
2259 | 0 | event_cleanup_poll_callback); |
2260 | | |
2261 | | /* masquerade as a timer event that is firing */ |
2262 | 0 | te = event_get_timer_event(-1 /* fake timer */, |
2263 | 0 | baton->cbfunc, |
2264 | 0 | baton->user_baton, |
2265 | 0 | 0, /* don't insert it */ |
2266 | 0 | NULL /* no associated socket callback */); |
2267 | 0 | push_timer2worker(te); |
2268 | 0 | } |
2269 | | |
2270 | | /* We process the timeout queues here only when the global |
2271 | | * queues_next_expiry is passed. This happens accurately since |
2272 | | * adding to the queues (in workers) can only decrease this expiry, |
2273 | | * while latest ones are only taken into account here (in listener) |
2274 | | * during queues' processing, with the lock held. This works both |
2275 | | * with and without wake-ability. |
2276 | | */ |
2277 | 0 | expiry = queues_next_expiry; |
2278 | 0 | do_maintenance: |
2279 | 0 | if (expiry && expiry < (now = apr_time_now())) { |
2280 | 0 | ap_log_error(APLOG_MARK, APLOG_TRACE7, 0, ap_server_conf, |
2281 | 0 | "queues maintenance with timeout=%" APR_TIME_T_FMT, |
2282 | 0 | expiry > 0 ? expiry - now : -1); |
2283 | 0 | apr_thread_mutex_lock(timeout_mutex); |
2284 | | |
2285 | | /* Steps below will recompute this. */ |
2286 | 0 | queues_next_expiry = 0; |
2287 | | |
2288 | | /* Step 1: keepalive timeouts */ |
2289 | 0 | if (workers_were_busy || dying) { |
2290 | 0 | process_keepalive_queue(0); /* kill'em all \m/ */ |
2291 | 0 | } |
2292 | 0 | else { |
2293 | 0 | process_keepalive_queue(now); |
2294 | 0 | } |
2295 | | /* Step 2: write completion timeouts */ |
2296 | 0 | process_timeout_queue(write_completion_q, now, |
2297 | 0 | defer_lingering_close); |
2298 | | /* Step 3: (normal) lingering close completion timeouts */ |
2299 | 0 | if (dying && linger_q->timeout > short_linger_q->timeout) { |
2300 | | /* Dying, force short timeout for normal lingering close */ |
2301 | 0 | linger_q->timeout = short_linger_q->timeout; |
2302 | 0 | } |
2303 | 0 | process_timeout_queue(linger_q, now, shutdown_connection); |
2304 | | /* Step 4: (short) lingering close completion timeouts */ |
2305 | 0 | process_timeout_queue(short_linger_q, now, shutdown_connection); |
2306 | |
|
2307 | 0 | apr_thread_mutex_unlock(timeout_mutex); |
2308 | 0 | ap_log_error(APLOG_MARK, APLOG_TRACE7, 0, ap_server_conf, |
2309 | 0 | "queues maintained with timeout=%" APR_TIME_T_FMT, |
2310 | 0 | queues_next_expiry > now ? queues_next_expiry - now |
2311 | 0 | : -1); |
2312 | |
|
2313 | 0 | ps->keep_alive = apr_atomic_read32(keepalive_q->total); |
2314 | 0 | ps->write_completion = apr_atomic_read32(write_completion_q->total); |
2315 | 0 | ps->connections = apr_atomic_read32(&connection_count); |
2316 | 0 | ps->suspended = apr_atomic_read32(&suspended_count); |
2317 | 0 | ps->lingering_close = apr_atomic_read32(&lingering_count); |
2318 | 0 | } |
2319 | 0 | else if ((workers_were_busy || dying) |
2320 | 0 | && apr_atomic_read32(keepalive_q->total)) { |
2321 | 0 | apr_thread_mutex_lock(timeout_mutex); |
2322 | 0 | process_keepalive_queue(0); /* kill'em all \m/ */ |
2323 | 0 | apr_thread_mutex_unlock(timeout_mutex); |
2324 | 0 | ps->keep_alive = 0; |
2325 | 0 | } |
2326 | | |
2327 | | /* If there are some lingering closes to defer (to a worker), schedule |
2328 | | * them now. We might wakeup a worker spuriously if another one empties |
2329 | | * defer_linger_chain in the meantime, but there also may be no active |
2330 | | * or all busy workers for an undefined time. In any case a deferred |
2331 | | * lingering close can't starve if we do that here since the chain is |
2332 | | * filled only above in the listener and it's emptied only in the |
2333 | | * worker(s); thus a NULL here means it will stay so while the listener |
2334 | | * waits (possibly indefinitely) in poll(). |
2335 | | */ |
2336 | 0 | if (defer_linger_chain) { |
2337 | 0 | get_worker(&have_idle_worker, 0, &workers_were_busy); |
2338 | 0 | if (have_idle_worker |
2339 | 0 | && defer_linger_chain /* re-test */ |
2340 | 0 | && push2worker(NULL, NULL, NULL) == APR_SUCCESS) { |
2341 | 0 | have_idle_worker = 0; |
2342 | 0 | } |
2343 | 0 | } |
2344 | |
|
2345 | 0 | if (!workers_were_busy && should_enable_listensocks()) { |
2346 | 0 | enable_listensocks(); |
2347 | 0 | } |
2348 | 0 | } /* listener main loop */ |
2349 | | |
2350 | 0 | ap_queue_term(worker_queue); |
2351 | |
|
2352 | 0 | apr_thread_exit(thd, APR_SUCCESS); |
2353 | 0 | return NULL; |
2354 | 0 | } |
2355 | | |
2356 | | /* |
2357 | | * During graceful shutdown, if there are more running worker threads than |
2358 | | * open connections, exit one worker thread. |
2359 | | * |
2360 | | * return 1 if thread should exit, 0 if it should continue running. |
2361 | | */ |
2362 | | static int worker_thread_should_exit_early(void) |
2363 | 0 | { |
2364 | 0 | for (;;) { |
2365 | 0 | apr_uint32_t conns = apr_atomic_read32(&connection_count); |
2366 | 0 | apr_uint32_t dead = apr_atomic_read32(&threads_shutdown); |
2367 | 0 | apr_uint32_t newdead; |
2368 | |
|
2369 | 0 | AP_DEBUG_ASSERT(dead <= threads_per_child); |
2370 | 0 | if (conns >= threads_per_child - dead) |
2371 | 0 | return 0; |
2372 | | |
2373 | 0 | newdead = dead + 1; |
2374 | 0 | if (apr_atomic_cas32(&threads_shutdown, newdead, dead) == dead) { |
2375 | | /* |
2376 | | * No other thread has exited in the mean time, safe to exit |
2377 | | * this one. |
2378 | | */ |
2379 | 0 | return 1; |
2380 | 0 | } |
2381 | 0 | } |
2382 | 0 | } |
2383 | | |
2384 | | /* XXX For ungraceful termination/restart, we definitely don't want to |
2385 | | * wait for active connections to finish but we may want to wait |
2386 | | * for idle workers to get out of the queue code and release mutexes, |
2387 | | * since those mutexes are cleaned up pretty soon and some systems |
2388 | | * may not react favorably (i.e., segfault) if operations are attempted |
2389 | | * on cleaned-up mutexes. |
2390 | | */ |
2391 | | static void *APR_THREAD_FUNC worker_thread(apr_thread_t * thd, void *dummy) |
2392 | 0 | { |
2393 | 0 | proc_info *ti = dummy; |
2394 | 0 | int process_slot = ti->pslot; |
2395 | 0 | int thread_slot = ti->tslot; |
2396 | 0 | apr_status_t rv; |
2397 | 0 | int is_idle = 0; |
2398 | |
|
2399 | 0 | free(ti); |
2400 | |
|
2401 | 0 | ap_scoreboard_image->servers[process_slot][thread_slot].pid = ap_my_pid; |
2402 | 0 | ap_scoreboard_image->servers[process_slot][thread_slot].tid = apr_os_thread_current(); |
2403 | 0 | ap_scoreboard_image->servers[process_slot][thread_slot].generation = retained->mpm->my_generation; |
2404 | 0 | ap_update_child_status_from_indexes(process_slot, thread_slot, |
2405 | 0 | SERVER_STARTING, NULL); |
2406 | |
|
2407 | 0 | for (;;) { |
2408 | 0 | apr_socket_t *csd = NULL; |
2409 | 0 | event_conn_state_t *cs; |
2410 | 0 | timer_event_t *te = NULL; |
2411 | 0 | apr_pool_t *ptrans; /* Pool for per-transaction stuff */ |
2412 | |
|
2413 | 0 | if (!is_idle) { |
2414 | 0 | rv = ap_queue_info_set_idle(worker_queue_info, NULL); |
2415 | 0 | if (rv != APR_SUCCESS) { |
2416 | 0 | ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf, |
2417 | 0 | APLOGNO(03270) |
2418 | 0 | "ap_queue_info_set_idle failed. Attempting to " |
2419 | 0 | "shutdown process gracefully."); |
2420 | 0 | signal_threads(ST_GRACEFUL); |
2421 | 0 | break; |
2422 | 0 | } |
2423 | | /* A new idler may have changed connections_above_limit(), |
2424 | | * let the listener know and decide. |
2425 | | */ |
2426 | 0 | if (listener_is_wakeable && should_enable_listensocks()) { |
2427 | 0 | apr_pollset_wakeup(event_pollset); |
2428 | 0 | } |
2429 | 0 | is_idle = 1; |
2430 | 0 | } |
2431 | | |
2432 | 0 | ap_update_child_status_from_indexes(process_slot, thread_slot, |
2433 | 0 | dying ? SERVER_GRACEFUL |
2434 | 0 | : SERVER_READY, NULL); |
2435 | 0 | worker_pop: |
2436 | 0 | if (workers_may_exit) { |
2437 | 0 | break; |
2438 | 0 | } |
2439 | 0 | if (dying && worker_thread_should_exit_early()) { |
2440 | 0 | break; |
2441 | 0 | } |
2442 | | |
2443 | 0 | rv = ap_queue_pop_something(worker_queue, &csd, (void **)&cs, |
2444 | 0 | &ptrans, &te); |
2445 | |
|
2446 | 0 | if (rv != APR_SUCCESS) { |
2447 | | /* We get APR_EOF during a graceful shutdown once all the |
2448 | | * connections accepted by this server process have been handled. |
2449 | | */ |
2450 | 0 | if (APR_STATUS_IS_EOF(rv)) { |
2451 | 0 | break; |
2452 | 0 | } |
2453 | | /* We get APR_EINTR whenever ap_queue_pop_*() has been interrupted |
2454 | | * from an explicit call to ap_queue_interrupt_all(). This allows |
2455 | | * us to unblock threads stuck in ap_queue_pop_*() when a shutdown |
2456 | | * is pending. |
2457 | | * |
2458 | | * If workers_may_exit is set and this is ungraceful termination/ |
2459 | | * restart, we are bound to get an error on some systems (e.g., |
2460 | | * AIX, which sanity-checks mutex operations) since the queue |
2461 | | * may have already been cleaned up. Don't log the "error" if |
2462 | | * workers_may_exit is set. |
2463 | | */ |
2464 | 0 | else if (APR_STATUS_IS_EINTR(rv)) { |
2465 | 0 | goto worker_pop; |
2466 | 0 | } |
2467 | | /* We got some other error. */ |
2468 | 0 | else if (!workers_may_exit) { |
2469 | 0 | ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf, |
2470 | 0 | APLOGNO(03099) "ap_queue_pop_socket failed"); |
2471 | 0 | } |
2472 | 0 | continue; |
2473 | 0 | } |
2474 | 0 | if (te != NULL) { |
2475 | 0 | te->cbfunc(te->baton); |
2476 | 0 | { |
2477 | 0 | apr_thread_mutex_lock(g_timer_skiplist_mtx); |
2478 | 0 | APR_RING_INSERT_TAIL(&timer_free_ring.link, te, timer_event_t, link); |
2479 | 0 | apr_thread_mutex_unlock(g_timer_skiplist_mtx); |
2480 | 0 | } |
2481 | 0 | } |
2482 | 0 | else { |
2483 | 0 | is_idle = 0; |
2484 | 0 | if (csd != NULL) { |
2485 | 0 | worker_sockets[thread_slot] = csd; |
2486 | 0 | process_socket(thd, ptrans, csd, cs, process_slot, thread_slot); |
2487 | 0 | worker_sockets[thread_slot] = NULL; |
2488 | 0 | } |
2489 | 0 | } |
2490 | | |
2491 | | /* If there are deferred lingering closes, handle them now. */ |
2492 | 0 | while (!workers_may_exit) { |
2493 | 0 | cs = defer_linger_chain; |
2494 | 0 | if (!cs) { |
2495 | 0 | break; |
2496 | 0 | } |
2497 | 0 | if (apr_atomic_casptr((void *)&defer_linger_chain, cs->chain, |
2498 | 0 | cs) != cs) { |
2499 | | /* Race lost, try again */ |
2500 | 0 | continue; |
2501 | 0 | } |
2502 | 0 | cs->chain = NULL; |
2503 | 0 | AP_DEBUG_ASSERT(cs->pub.state == CONN_STATE_LINGER); |
2504 | |
|
2505 | 0 | worker_sockets[thread_slot] = csd = cs->pfd.desc.s; |
2506 | 0 | process_socket(thd, cs->p, csd, cs, process_slot, thread_slot); |
2507 | 0 | worker_sockets[thread_slot] = NULL; |
2508 | 0 | } |
2509 | 0 | } |
2510 | | |
2511 | 0 | ap_update_child_status_from_indexes(process_slot, thread_slot, |
2512 | 0 | dying ? SERVER_DEAD |
2513 | 0 | : SERVER_GRACEFUL, NULL); |
2514 | |
|
2515 | 0 | apr_thread_exit(thd, APR_SUCCESS); |
2516 | 0 | return NULL; |
2517 | 0 | } |
2518 | | |
2519 | | static int check_signal(int signum) |
2520 | 0 | { |
2521 | 0 | switch (signum) { |
2522 | 0 | case SIGTERM: |
2523 | 0 | case SIGINT: |
2524 | 0 | return 1; |
2525 | 0 | } |
2526 | 0 | return 0; |
2527 | 0 | } |
2528 | | |
2529 | | static void create_listener_thread(thread_starter * ts) |
2530 | 0 | { |
2531 | 0 | int my_child_num = ts->child_num_arg; |
2532 | 0 | apr_threadattr_t *thread_attr = ts->threadattr; |
2533 | 0 | proc_info *my_info; |
2534 | 0 | apr_status_t rv; |
2535 | |
|
2536 | 0 | my_info = (proc_info *) ap_malloc(sizeof(proc_info)); |
2537 | 0 | my_info->pslot = my_child_num; |
2538 | 0 | my_info->tslot = -1; /* listener thread doesn't have a thread slot */ |
2539 | 0 | rv = ap_thread_create(&ts->listener, thread_attr, listener_thread, |
2540 | 0 | my_info, pruntime); |
2541 | 0 | if (rv != APR_SUCCESS) { |
2542 | 0 | ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(00474) |
2543 | 0 | "ap_thread_create: unable to create listener thread"); |
2544 | | /* let the parent decide how bad this really is */ |
2545 | 0 | clean_child_exit(APEXIT_CHILDSICK); |
2546 | 0 | } |
2547 | 0 | apr_os_thread_get(&listener_os_thread, ts->listener); |
2548 | 0 | } |
2549 | | |
2550 | | static void setup_threads_runtime(void) |
2551 | 0 | { |
2552 | 0 | apr_status_t rv; |
2553 | 0 | ap_listen_rec *lr; |
2554 | 0 | apr_pool_t *pskip = NULL; |
2555 | 0 | int max_recycled_pools = -1, i; |
2556 | 0 | const int good_methods[] = { APR_POLLSET_KQUEUE, |
2557 | 0 | APR_POLLSET_PORT, |
2558 | 0 | APR_POLLSET_EPOLL }; |
2559 | | /* XXX: K-A or lingering close connection included in the async factor */ |
2560 | 0 | const apr_uint32_t async_factor = worker_factor / WORKER_FACTOR_SCALE; |
2561 | 0 | const apr_uint32_t pollset_size = (apr_uint32_t)num_listensocks + |
2562 | 0 | (apr_uint32_t)threads_per_child * |
2563 | 0 | (async_factor > 2 ? async_factor : 2); |
2564 | 0 | int pollset_flags; |
2565 | | |
2566 | | /* Event's skiplist operations will happen concurrently with other modules' |
2567 | | * runtime so they need their own pool for allocations, and its lifetime |
2568 | | * should be at least the one of the connections (ptrans). Thus pskip is |
2569 | | * created as a subpool of pconf like/before ptrans (before so that it's |
2570 | | * destroyed after). In forked mode pconf is never destroyed so we are good |
2571 | | * anyway, but in ONE_PROCESS mode this ensures that the skiplist works |
2572 | | * from connection/ptrans cleanups (even after pchild is destroyed). |
2573 | | */ |
2574 | 0 | apr_pool_create(&pskip, pconf); |
2575 | 0 | apr_pool_tag(pskip, "mpm_skiplist"); |
2576 | 0 | apr_thread_mutex_create(&g_timer_skiplist_mtx, APR_THREAD_MUTEX_DEFAULT, pskip); |
2577 | 0 | APR_RING_INIT(&timer_free_ring.link, timer_event_t, link); |
2578 | 0 | apr_skiplist_init(&timer_skiplist, pskip); |
2579 | 0 | apr_skiplist_set_compare(timer_skiplist, timer_comp, timer_comp); |
2580 | | |
2581 | | /* All threads (listener, workers) and synchronization objects (queues, |
2582 | | * pollset, mutexes...) created here should have at least the lifetime of |
2583 | | * the connections they handle (i.e. ptrans). We can't use this thread's |
2584 | | * self pool because all these objects survive it, nor use pchild or pconf |
2585 | | * directly because this starter thread races with other modules' runtime, |
2586 | | * nor finally pchild (or subpool thereof) because it is killed explicitly |
2587 | | * before pconf (thus connections/ptrans can live longer, which matters in |
2588 | | * ONE_PROCESS mode). So this leaves us with a subpool of pconf, created |
2589 | | * before any ptrans hence destroyed after. |
2590 | | */ |
2591 | 0 | apr_pool_create(&pruntime, pconf); |
2592 | 0 | apr_pool_tag(pruntime, "mpm_runtime"); |
2593 | | |
2594 | | /* We must create the fd queues before we start up the listener |
2595 | | * and worker threads. */ |
2596 | 0 | rv = ap_queue_create(&worker_queue, threads_per_child, pruntime); |
2597 | 0 | if (rv != APR_SUCCESS) { |
2598 | 0 | ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(03100) |
2599 | 0 | "ap_queue_create() failed"); |
2600 | 0 | clean_child_exit(APEXIT_CHILDFATAL); |
2601 | 0 | } |
2602 | | |
2603 | 0 | if (ap_max_mem_free != APR_ALLOCATOR_MAX_FREE_UNLIMITED) { |
2604 | | /* If we want to conserve memory, let's not keep an unlimited number of |
2605 | | * pools & allocators. |
2606 | | * XXX: This should probably be a separate config directive |
2607 | | */ |
2608 | 0 | max_recycled_pools = threads_per_child * 3 / 4 ; |
2609 | 0 | } |
2610 | 0 | rv = ap_queue_info_create(&worker_queue_info, pruntime, |
2611 | 0 | threads_per_child, max_recycled_pools); |
2612 | 0 | if (rv != APR_SUCCESS) { |
2613 | 0 | ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(03101) |
2614 | 0 | "ap_queue_info_create() failed"); |
2615 | 0 | clean_child_exit(APEXIT_CHILDFATAL); |
2616 | 0 | } |
2617 | | |
2618 | | /* Create the timeout mutex and main pollset before the listener |
2619 | | * thread starts. |
2620 | | */ |
2621 | 0 | rv = apr_thread_mutex_create(&timeout_mutex, APR_THREAD_MUTEX_DEFAULT, |
2622 | 0 | pruntime); |
2623 | 0 | if (rv != APR_SUCCESS) { |
2624 | 0 | ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(03102) |
2625 | 0 | "creation of the timeout mutex failed."); |
2626 | 0 | clean_child_exit(APEXIT_CHILDFATAL); |
2627 | 0 | } |
2628 | | |
2629 | | /* Create the main pollset */ |
2630 | 0 | pollset_flags = APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY | |
2631 | 0 | APR_POLLSET_NODEFAULT | APR_POLLSET_WAKEABLE; |
2632 | 0 | for (i = 0; i < sizeof(good_methods) / sizeof(good_methods[0]); i++) { |
2633 | 0 | rv = apr_pollset_create_ex(&event_pollset, pollset_size, pruntime, |
2634 | 0 | pollset_flags, good_methods[i]); |
2635 | 0 | if (rv == APR_SUCCESS) { |
2636 | 0 | listener_is_wakeable = 1; |
2637 | 0 | break; |
2638 | 0 | } |
2639 | 0 | } |
2640 | 0 | if (rv != APR_SUCCESS) { |
2641 | 0 | pollset_flags &= ~APR_POLLSET_WAKEABLE; |
2642 | 0 | for (i = 0; i < sizeof(good_methods) / sizeof(good_methods[0]); i++) { |
2643 | 0 | rv = apr_pollset_create_ex(&event_pollset, pollset_size, pruntime, |
2644 | 0 | pollset_flags, good_methods[i]); |
2645 | 0 | if (rv == APR_SUCCESS) { |
2646 | 0 | break; |
2647 | 0 | } |
2648 | 0 | } |
2649 | 0 | } |
2650 | 0 | if (rv != APR_SUCCESS) { |
2651 | 0 | pollset_flags &= ~APR_POLLSET_NODEFAULT; |
2652 | 0 | rv = apr_pollset_create(&event_pollset, pollset_size, pruntime, |
2653 | 0 | pollset_flags); |
2654 | 0 | } |
2655 | 0 | if (rv != APR_SUCCESS) { |
2656 | 0 | ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(03103) |
2657 | 0 | "apr_pollset_create with Thread Safety failed."); |
2658 | 0 | clean_child_exit(APEXIT_CHILDFATAL); |
2659 | 0 | } |
2660 | | |
2661 | | /* Add listeners to the main pollset */ |
2662 | 0 | listener_pollfd = apr_pcalloc(pruntime, num_listensocks * |
2663 | 0 | sizeof(apr_pollfd_t)); |
2664 | 0 | for (i = 0, lr = my_bucket->listeners; lr; lr = lr->next, i++) { |
2665 | 0 | apr_pollfd_t *pfd; |
2666 | 0 | listener_poll_type *pt; |
2667 | |
|
2668 | 0 | AP_DEBUG_ASSERT(i < num_listensocks); |
2669 | 0 | pfd = &listener_pollfd[i]; |
2670 | |
|
2671 | 0 | pfd->reqevents = APR_POLLIN | APR_POLLHUP | APR_POLLERR; |
2672 | 0 | #ifdef APR_POLLEXCL |
2673 | | /* If APR_POLLEXCL is available, use it to prevent the thundering |
2674 | | * herd issue. The listening sockets are potentially polled by all |
2675 | | * the children at the same time, when new connections arrive this |
2676 | | * avoids all of them to be woken up while most would get EAGAIN |
2677 | | * on accept(). |
2678 | | */ |
2679 | 0 | pfd->reqevents |= APR_POLLEXCL; |
2680 | 0 | #endif |
2681 | 0 | pfd->desc_type = APR_POLL_SOCKET; |
2682 | 0 | pfd->desc.s = lr->sd; |
2683 | |
|
2684 | 0 | pt = apr_pcalloc(pruntime, sizeof(*pt)); |
2685 | 0 | pfd->client_data = pt; |
2686 | 0 | pt->type = PT_ACCEPT; |
2687 | 0 | pt->baton = lr; |
2688 | |
|
2689 | 0 | apr_socket_opt_set(pfd->desc.s, APR_SO_NONBLOCK, 1); |
2690 | 0 | apr_pollset_add(event_pollset, pfd); |
2691 | |
|
2692 | 0 | lr->accept_func = ap_unixd_accept; |
2693 | 0 | } |
2694 | |
|
2695 | 0 | worker_sockets = apr_pcalloc(pruntime, threads_per_child * |
2696 | 0 | sizeof(apr_socket_t *)); |
2697 | 0 | } |
2698 | | |
2699 | | /* XXX under some circumstances not understood, children can get stuck |
2700 | | * in start_threads forever trying to take over slots which will |
2701 | | * never be cleaned up; for now there is an APLOG_DEBUG message issued |
2702 | | * every so often when this condition occurs |
2703 | | */ |
2704 | | static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy) |
2705 | 0 | { |
2706 | 0 | thread_starter *ts = dummy; |
2707 | 0 | apr_thread_t **threads = ts->threads; |
2708 | 0 | apr_threadattr_t *thread_attr = ts->threadattr; |
2709 | 0 | int my_child_num = ts->child_num_arg; |
2710 | 0 | proc_info *my_info; |
2711 | 0 | apr_status_t rv; |
2712 | 0 | int threads_created = 0; |
2713 | 0 | int listener_started = 0; |
2714 | 0 | int prev_threads_created; |
2715 | 0 | int loops, i; |
2716 | |
|
2717 | 0 | ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(02471) |
2718 | 0 | "start_threads: Using %s (%swakeable)", |
2719 | 0 | apr_pollset_method_name(event_pollset), |
2720 | 0 | listener_is_wakeable ? "" : "not "); |
2721 | |
|
2722 | 0 | loops = prev_threads_created = 0; |
2723 | 0 | while (1) { |
2724 | | /* threads_per_child does not include the listener thread */ |
2725 | 0 | for (i = 0; i < threads_per_child; i++) { |
2726 | 0 | int status = |
2727 | 0 | ap_scoreboard_image->servers[my_child_num][i].status; |
2728 | |
|
2729 | 0 | if (status != SERVER_DEAD) { |
2730 | 0 | continue; |
2731 | 0 | } |
2732 | | |
2733 | 0 | my_info = (proc_info *) ap_malloc(sizeof(proc_info)); |
2734 | 0 | my_info->pslot = my_child_num; |
2735 | 0 | my_info->tslot = i; |
2736 | | |
2737 | | /* We are creating threads right now */ |
2738 | 0 | ap_update_child_status_from_indexes(my_child_num, i, |
2739 | 0 | SERVER_STARTING, NULL); |
2740 | | /* We let each thread update its own scoreboard entry. This is |
2741 | | * done because it lets us deal with tid better. |
2742 | | */ |
2743 | 0 | rv = ap_thread_create(&threads[i], thread_attr, |
2744 | 0 | worker_thread, my_info, pruntime); |
2745 | 0 | if (rv != APR_SUCCESS) { |
2746 | 0 | ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, |
2747 | 0 | APLOGNO(03104) |
2748 | 0 | "ap_thread_create: unable to create worker thread"); |
2749 | | /* let the parent decide how bad this really is */ |
2750 | 0 | clean_child_exit(APEXIT_CHILDSICK); |
2751 | 0 | } |
2752 | 0 | threads_created++; |
2753 | 0 | } |
2754 | | |
2755 | | /* Start the listener only when there are workers available */ |
2756 | 0 | if (!listener_started && threads_created) { |
2757 | 0 | create_listener_thread(ts); |
2758 | 0 | listener_started = 1; |
2759 | 0 | } |
2760 | | |
2761 | |
|
2762 | 0 | if (start_thread_may_exit || threads_created == threads_per_child) { |
2763 | 0 | break; |
2764 | 0 | } |
2765 | | /* wait for previous generation to clean up an entry */ |
2766 | 0 | apr_sleep(apr_time_from_sec(1)); |
2767 | 0 | ++loops; |
2768 | 0 | if (loops % 120 == 0) { /* every couple of minutes */ |
2769 | 0 | if (prev_threads_created == threads_created) { |
2770 | 0 | ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, |
2771 | 0 | APLOGNO(03271) |
2772 | 0 | "child %" APR_PID_T_FMT " isn't taking over " |
2773 | 0 | "slots very quickly (%d of %d)", |
2774 | 0 | ap_my_pid, threads_created, |
2775 | 0 | threads_per_child); |
2776 | 0 | } |
2777 | 0 | prev_threads_created = threads_created; |
2778 | 0 | } |
2779 | 0 | } |
2780 | | |
2781 | | /* What state should this child_main process be listed as in the |
2782 | | * scoreboard...? |
2783 | | * ap_update_child_status_from_indexes(my_child_num, i, SERVER_STARTING, |
2784 | | * (request_rec *) NULL); |
2785 | | * |
2786 | | * This state should be listed separately in the scoreboard, in some kind |
2787 | | * of process_status, not mixed in with the worker threads' status. |
2788 | | * "life_status" is almost right, but it's in the worker's structure, and |
2789 | | * the name could be clearer. gla |
2790 | | */ |
2791 | 0 | apr_thread_exit(thd, APR_SUCCESS); |
2792 | 0 | return NULL; |
2793 | 0 | } |
2794 | | |
2795 | | static void join_workers(apr_thread_t * listener, apr_thread_t ** threads) |
2796 | 0 | { |
2797 | 0 | int i; |
2798 | 0 | apr_status_t rv, thread_rv; |
2799 | |
|
2800 | 0 | if (listener) { |
2801 | 0 | int iter; |
2802 | | |
2803 | | /* deal with a rare timing window which affects waking up the |
2804 | | * listener thread... if the signal sent to the listener thread |
2805 | | * is delivered between the time it verifies that the |
2806 | | * listener_may_exit flag is clear and the time it enters a |
2807 | | * blocking syscall, the signal didn't do any good... work around |
2808 | | * that by sleeping briefly and sending it again |
2809 | | */ |
2810 | |
|
2811 | 0 | iter = 0; |
2812 | 0 | while (!dying) { |
2813 | 0 | apr_sleep(apr_time_from_msec(500)); |
2814 | 0 | if (dying || ++iter > 10) { |
2815 | 0 | break; |
2816 | 0 | } |
2817 | | /* listener has not stopped accepting yet */ |
2818 | 0 | ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf, |
2819 | 0 | "listener has not stopped accepting yet (%d iter)", iter); |
2820 | 0 | wakeup_listener(); |
2821 | 0 | } |
2822 | 0 | if (iter > 10) { |
2823 | 0 | ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(00475) |
2824 | 0 | "the listener thread didn't stop accepting"); |
2825 | 0 | } |
2826 | 0 | else { |
2827 | 0 | rv = apr_thread_join(&thread_rv, listener); |
2828 | 0 | if (rv != APR_SUCCESS) { |
2829 | 0 | ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf, APLOGNO(00476) |
2830 | 0 | "apr_thread_join: unable to join listener thread"); |
2831 | 0 | } |
2832 | 0 | } |
2833 | 0 | } |
2834 | |
|
2835 | 0 | for (i = 0; i < threads_per_child; i++) { |
2836 | 0 | if (threads[i]) { /* if we ever created this thread */ |
2837 | 0 | rv = apr_thread_join(&thread_rv, threads[i]); |
2838 | 0 | if (rv != APR_SUCCESS) { |
2839 | 0 | ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf, APLOGNO(00477) |
2840 | 0 | "apr_thread_join: unable to join worker " |
2841 | 0 | "thread %d", i); |
2842 | 0 | } |
2843 | 0 | } |
2844 | 0 | } |
2845 | 0 | } |
2846 | | |
2847 | | static void join_start_thread(apr_thread_t * start_thread_id) |
2848 | 0 | { |
2849 | 0 | apr_status_t rv, thread_rv; |
2850 | |
|
2851 | 0 | start_thread_may_exit = 1; /* tell it to give up in case it is still |
2852 | | * trying to take over slots from a |
2853 | | * previous generation |
2854 | | */ |
2855 | 0 | rv = apr_thread_join(&thread_rv, start_thread_id); |
2856 | 0 | if (rv != APR_SUCCESS) { |
2857 | 0 | ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf, APLOGNO(00478) |
2858 | 0 | "apr_thread_join: unable to join the start " "thread"); |
2859 | 0 | } |
2860 | 0 | } |
2861 | | |
2862 | | static void child_main(int child_num_arg, int child_bucket) |
2863 | 0 | { |
2864 | 0 | apr_thread_t **threads; |
2865 | 0 | apr_status_t rv; |
2866 | 0 | thread_starter *ts; |
2867 | 0 | apr_threadattr_t *thread_attr; |
2868 | 0 | apr_thread_t *start_thread_id; |
2869 | 0 | int i; |
2870 | | |
2871 | | /* for benefit of any hooks that run as this child initializes */ |
2872 | 0 | retained->mpm->mpm_state = AP_MPMQ_STARTING; |
2873 | |
|
2874 | 0 | ap_my_pid = getpid(); |
2875 | 0 | ap_child_slot = child_num_arg; |
2876 | 0 | ap_fatal_signal_child_setup(ap_server_conf); |
2877 | | |
2878 | | /* Get a sub context for global allocations in this child, so that |
2879 | | * we can have cleanups occur when the child exits. |
2880 | | */ |
2881 | 0 | apr_pool_create(&pchild, pconf); |
2882 | 0 | apr_pool_tag(pchild, "pchild"); |
2883 | |
|
2884 | 0 | #if AP_HAS_THREAD_LOCAL |
2885 | 0 | if (!one_process) { |
2886 | 0 | apr_thread_t *thd = NULL; |
2887 | 0 | if ((rv = ap_thread_main_create(&thd, pchild))) { |
2888 | 0 | ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf, APLOGNO(10377) |
2889 | 0 | "Couldn't initialize child main thread"); |
2890 | 0 | clean_child_exit(APEXIT_CHILDFATAL); |
2891 | 0 | } |
2892 | 0 | } |
2893 | 0 | #endif |
2894 | | |
2895 | | /* close unused listeners and pods */ |
2896 | 0 | for (i = 0; i < retained->mpm->num_buckets; i++) { |
2897 | 0 | if (i != child_bucket) { |
2898 | 0 | ap_close_listeners_ex(retained->buckets[i].listeners); |
2899 | 0 | ap_mpm_podx_close(retained->buckets[i].pod); |
2900 | 0 | } |
2901 | 0 | } |
2902 | | |
2903 | | /*stuff to do before we switch id's, so we have permissions. */ |
2904 | 0 | ap_reopen_scoreboard(pchild, NULL, 0); |
2905 | | |
2906 | | /* done with init critical section */ |
2907 | 0 | if (ap_run_drop_privileges(pchild, ap_server_conf)) { |
2908 | 0 | clean_child_exit(APEXIT_CHILDFATAL); |
2909 | 0 | } |
2910 | | |
2911 | | /* Just use the standard apr_setup_signal_thread to block all signals |
2912 | | * from being received. The child processes no longer use signals for |
2913 | | * any communication with the parent process. Let's also do this before |
2914 | | * child_init() hooks are called and possibly create threads that |
2915 | | * otherwise could "steal" (implicitly) MPM's signals. |
2916 | | */ |
2917 | 0 | rv = apr_setup_signal_thread(); |
2918 | 0 | if (rv != APR_SUCCESS) { |
2919 | 0 | ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf, APLOGNO(00479) |
2920 | 0 | "Couldn't initialize signal thread"); |
2921 | 0 | clean_child_exit(APEXIT_CHILDFATAL); |
2922 | 0 | } |
2923 | | |
2924 | | /* For rand() users (e.g. skiplist). */ |
2925 | 0 | srand((unsigned int)apr_time_now()); |
2926 | |
|
2927 | 0 | ap_run_child_init(pchild, ap_server_conf); |
2928 | |
|
2929 | 0 | if (ap_max_requests_per_child) { |
2930 | 0 | conns_this_child = ap_max_requests_per_child; |
2931 | 0 | } |
2932 | 0 | else { |
2933 | | /* coding a value of zero means infinity */ |
2934 | 0 | conns_this_child = APR_INT32_MAX; |
2935 | 0 | } |
2936 | | |
2937 | | /* Setup threads */ |
2938 | | |
2939 | | /* Globals used by signal_threads() so to be initialized before */ |
2940 | 0 | setup_threads_runtime(); |
2941 | | |
2942 | | /* clear the storage; we may not create all our threads immediately, |
2943 | | * and we want a 0 entry to indicate a thread which was not created |
2944 | | */ |
2945 | 0 | threads = ap_calloc(threads_per_child, sizeof(apr_thread_t *)); |
2946 | 0 | ts = apr_palloc(pchild, sizeof(*ts)); |
2947 | |
|
2948 | 0 | apr_threadattr_create(&thread_attr, pchild); |
2949 | | /* 0 means PTHREAD_CREATE_JOINABLE */ |
2950 | 0 | apr_threadattr_detach_set(thread_attr, 0); |
2951 | |
|
2952 | 0 | if (ap_thread_stacksize != 0) { |
2953 | 0 | rv = apr_threadattr_stacksize_set(thread_attr, ap_thread_stacksize); |
2954 | 0 | if (rv != APR_SUCCESS && rv != APR_ENOTIMPL) { |
2955 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING, rv, ap_server_conf, APLOGNO(02436) |
2956 | 0 | "WARNING: ThreadStackSize of %" APR_SIZE_T_FMT " is " |
2957 | 0 | "inappropriate, using default", |
2958 | 0 | ap_thread_stacksize); |
2959 | 0 | } |
2960 | 0 | } |
2961 | |
|
2962 | 0 | ts->threads = threads; |
2963 | 0 | ts->listener = NULL; |
2964 | 0 | ts->child_num_arg = child_num_arg; |
2965 | 0 | ts->threadattr = thread_attr; |
2966 | |
|
2967 | 0 | rv = ap_thread_create(&start_thread_id, thread_attr, start_threads, |
2968 | 0 | ts, pchild); |
2969 | 0 | if (rv != APR_SUCCESS) { |
2970 | 0 | ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(00480) |
2971 | 0 | "ap_thread_create: unable to create worker thread"); |
2972 | | /* let the parent decide how bad this really is */ |
2973 | 0 | clean_child_exit(APEXIT_CHILDSICK); |
2974 | 0 | } |
2975 | | |
2976 | 0 | retained->mpm->mpm_state = AP_MPMQ_RUNNING; |
2977 | | |
2978 | | /* If we are only running in one_process mode, we will want to |
2979 | | * still handle signals. */ |
2980 | 0 | if (one_process) { |
2981 | | /* Block until we get a terminating signal. */ |
2982 | 0 | apr_signal_thread(check_signal); |
2983 | | /* make sure the start thread has finished; signal_threads() |
2984 | | * and join_workers() depend on that |
2985 | | */ |
2986 | | /* XXX join_start_thread() won't be awakened if one of our |
2987 | | * threads encounters a critical error and attempts to |
2988 | | * shutdown this child |
2989 | | */ |
2990 | 0 | join_start_thread(start_thread_id); |
2991 | | |
2992 | | /* helps us terminate a little more quickly than the dispatch of the |
2993 | | * signal thread; beats the Pipe of Death and the browsers |
2994 | | */ |
2995 | 0 | signal_threads(ST_UNGRACEFUL); |
2996 | | |
2997 | | /* A terminating signal was received. Now join each of the |
2998 | | * workers to clean them up. |
2999 | | * If the worker already exited, then the join frees |
3000 | | * their resources and returns. |
3001 | | * If the worker hasn't exited, then this blocks until |
3002 | | * they have (then cleans up). |
3003 | | */ |
3004 | 0 | join_workers(ts->listener, threads); |
3005 | 0 | } |
3006 | 0 | else { /* !one_process */ |
3007 | | /* remove SIGTERM from the set of blocked signals... if one of |
3008 | | * the other threads in the process needs to take us down |
3009 | | * (e.g., for MaxConnectionsPerChild) it will send us SIGTERM |
3010 | | */ |
3011 | 0 | apr_signal(SIGTERM, dummy_signal_handler); |
3012 | 0 | unblock_signal(SIGTERM); |
3013 | | /* Watch for any messages from the parent over the POD */ |
3014 | 0 | while (1) { |
3015 | 0 | rv = ap_mpm_podx_check(my_bucket->pod); |
3016 | 0 | if (rv == AP_MPM_PODX_NORESTART) { |
3017 | | /* see if termination was triggered while we slept */ |
3018 | 0 | switch (terminate_mode) { |
3019 | 0 | case ST_GRACEFUL: |
3020 | 0 | rv = AP_MPM_PODX_GRACEFUL; |
3021 | 0 | break; |
3022 | 0 | case ST_UNGRACEFUL: |
3023 | 0 | rv = AP_MPM_PODX_RESTART; |
3024 | 0 | break; |
3025 | 0 | } |
3026 | 0 | } |
3027 | 0 | if (rv == AP_MPM_PODX_GRACEFUL || rv == AP_MPM_PODX_RESTART) { |
3028 | | /* make sure the start thread has finished; |
3029 | | * signal_threads() and join_workers depend on that |
3030 | | */ |
3031 | 0 | join_start_thread(start_thread_id); |
3032 | 0 | signal_threads(rv == |
3033 | 0 | AP_MPM_PODX_GRACEFUL ? ST_GRACEFUL : ST_UNGRACEFUL); |
3034 | 0 | break; |
3035 | 0 | } |
3036 | 0 | } |
3037 | | |
3038 | | /* A terminating signal was received. Now join each of the |
3039 | | * workers to clean them up. |
3040 | | * If the worker already exited, then the join frees |
3041 | | * their resources and returns. |
3042 | | * If the worker hasn't exited, then this blocks until |
3043 | | * they have (then cleans up). |
3044 | | */ |
3045 | 0 | ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf, |
3046 | 0 | "%s termination received, joining workers", |
3047 | 0 | rv == AP_MPM_PODX_GRACEFUL ? "graceful" : "ungraceful"); |
3048 | 0 | join_workers(ts->listener, threads); |
3049 | 0 | ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf, |
3050 | 0 | "%s termination, workers joined, exiting", |
3051 | 0 | rv == AP_MPM_PODX_GRACEFUL ? "graceful" : "ungraceful"); |
3052 | 0 | } |
3053 | | |
3054 | 0 | free(threads); |
3055 | |
|
3056 | 0 | clean_child_exit(resource_shortage ? APEXIT_CHILDSICK : 0); |
3057 | 0 | } |
3058 | | |
3059 | | static int make_child(server_rec * s, int slot, int bucket) |
3060 | 0 | { |
3061 | 0 | int pid; |
3062 | |
|
3063 | 0 | if (slot + 1 > retained->max_daemon_used) { |
3064 | 0 | retained->max_daemon_used = slot + 1; |
3065 | 0 | } |
3066 | |
|
3067 | 0 | if (ap_scoreboard_image->parent[slot].pid != 0) { |
3068 | | /* XXX replace with assert or remove ? */ |
3069 | 0 | ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(03455) |
3070 | 0 | "BUG: Scoreboard slot %d should be empty but is " |
3071 | 0 | "in use by pid %" APR_PID_T_FMT, |
3072 | 0 | slot, ap_scoreboard_image->parent[slot].pid); |
3073 | 0 | return -1; |
3074 | 0 | } |
3075 | | |
3076 | 0 | if (one_process) { |
3077 | 0 | my_bucket = &retained->buckets[0]; |
3078 | |
|
3079 | 0 | event_note_child_started(slot, getpid()); |
3080 | 0 | child_main(slot, 0); |
3081 | | /* NOTREACHED */ |
3082 | 0 | ap_assert(0); |
3083 | 0 | return -1; |
3084 | 0 | } |
3085 | | |
3086 | 0 | if ((pid = fork()) == -1) { |
3087 | 0 | ap_log_error(APLOG_MARK, APLOG_ERR, errno, s, APLOGNO(00481) |
3088 | 0 | "fork: Unable to fork new process"); |
3089 | | |
3090 | | /* fork didn't succeed. There's no need to touch the scoreboard; |
3091 | | * if we were trying to replace a failed child process, then |
3092 | | * server_main_loop() marked its workers SERVER_DEAD, and if |
3093 | | * we were trying to replace a child process that exited normally, |
3094 | | * its worker_thread()s left SERVER_DEAD or SERVER_GRACEFUL behind. |
3095 | | */ |
3096 | | |
3097 | | /* In case system resources are maxxed out, we don't want |
3098 | | Apache running away with the CPU trying to fork over and |
3099 | | over and over again. */ |
3100 | 0 | apr_sleep(apr_time_from_sec(10)); |
3101 | |
|
3102 | 0 | return -1; |
3103 | 0 | } |
3104 | | |
3105 | 0 | if (!pid) { |
3106 | 0 | #if AP_HAS_THREAD_LOCAL |
3107 | 0 | ap_thread_current_after_fork(); |
3108 | 0 | #endif |
3109 | |
|
3110 | 0 | my_bucket = &retained->buckets[bucket]; |
3111 | |
|
3112 | | #ifdef HAVE_BINDPROCESSOR |
3113 | | /* By default, AIX binds to a single processor. This bit unbinds |
3114 | | * children which will then bind to another CPU. |
3115 | | */ |
3116 | | int status = bindprocessor(BINDPROCESS, (int) getpid(), |
3117 | | PROCESSOR_CLASS_ANY); |
3118 | | if (status != OK) |
3119 | | ap_log_error(APLOG_MARK, APLOG_DEBUG, errno, |
3120 | | ap_server_conf, APLOGNO(00482) |
3121 | | "processor unbind failed"); |
3122 | | #endif |
3123 | 0 | RAISE_SIGSTOP(MAKE_CHILD); |
3124 | |
|
3125 | 0 | apr_signal(SIGTERM, just_die); |
3126 | 0 | child_main(slot, bucket); |
3127 | | /* NOTREACHED */ |
3128 | 0 | ap_assert(0); |
3129 | 0 | return -1; |
3130 | 0 | } |
3131 | | |
3132 | 0 | event_note_child_started(slot, pid); |
3133 | 0 | return 0; |
3134 | 0 | } |
3135 | | |
3136 | | /* start up a bunch of children */ |
3137 | | static void startup_children(int number_to_start) |
3138 | 0 | { |
3139 | 0 | int i; |
3140 | |
|
3141 | 0 | for (i = 0; number_to_start && i < server_limit; ++i) { |
3142 | 0 | if (ap_scoreboard_image->parent[i].pid != 0) { |
3143 | 0 | continue; |
3144 | 0 | } |
3145 | 0 | if (make_child(ap_server_conf, i, i % retained->mpm->num_buckets) < 0) { |
3146 | 0 | break; |
3147 | 0 | } |
3148 | 0 | --number_to_start; |
3149 | 0 | } |
3150 | 0 | } |
3151 | | |
3152 | | static void perform_idle_server_maintenance(int child_bucket, |
3153 | | int *max_daemon_used) |
3154 | 0 | { |
3155 | 0 | int num_buckets = retained->mpm->num_buckets; |
3156 | 0 | int idle_thread_count = 0; |
3157 | 0 | process_score *ps; |
3158 | 0 | int free_length = 0; |
3159 | 0 | int free_slots[MAX_SPAWN_RATE]; |
3160 | 0 | int last_non_dead = -1; |
3161 | 0 | int active_thread_count = 0; |
3162 | 0 | int i, j; |
3163 | |
|
3164 | 0 | for (i = 0; i < server_limit; ++i) { |
3165 | 0 | if (num_buckets > 1 && (i % num_buckets) != child_bucket) { |
3166 | | /* We only care about child_bucket in this call */ |
3167 | 0 | continue; |
3168 | 0 | } |
3169 | 0 | if (i >= retained->max_daemon_used && |
3170 | 0 | free_length == retained->idle_spawn_rate[child_bucket]) { |
3171 | | /* short cut if all active processes have been examined and |
3172 | | * enough empty scoreboard slots have been found |
3173 | | */ |
3174 | 0 | break; |
3175 | 0 | } |
3176 | | |
3177 | 0 | ps = &ap_scoreboard_image->parent[i]; |
3178 | 0 | if (ps->pid != 0) { |
3179 | 0 | int child_threads_active = 0; |
3180 | 0 | if (ps->quiescing == 1) { |
3181 | 0 | ps->quiescing = 2; |
3182 | 0 | retained->active_daemons--; |
3183 | 0 | ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, |
3184 | 0 | "Child %d quiescing: pid %d, gen %d, " |
3185 | 0 | "active %d/%d, total %d/%d/%d", |
3186 | 0 | i, (int)ps->pid, (int)ps->generation, |
3187 | 0 | retained->active_daemons, active_daemons_limit, |
3188 | 0 | retained->total_daemons, retained->max_daemon_used, |
3189 | 0 | server_limit); |
3190 | 0 | } |
3191 | 0 | for (j = 0; j < threads_per_child; j++) { |
3192 | 0 | int status = ap_scoreboard_image->servers[i][j].status; |
3193 | | |
3194 | | /* We consider a starting server as idle because we started it |
3195 | | * at least a cycle ago, and if it still hasn't finished starting |
3196 | | * then we're just going to swamp things worse by forking more. |
3197 | | * So we hopefully won't need to fork more if we count it. |
3198 | | * This depends on the ordering of SERVER_READY and SERVER_STARTING. |
3199 | | */ |
3200 | 0 | if (status <= SERVER_READY && !ps->quiescing && !ps->not_accepting |
3201 | 0 | && ps->generation == retained->mpm->my_generation) { |
3202 | 0 | ++idle_thread_count; |
3203 | 0 | } |
3204 | 0 | if (status >= SERVER_READY && status < SERVER_GRACEFUL) { |
3205 | 0 | ++child_threads_active; |
3206 | 0 | } |
3207 | 0 | } |
3208 | 0 | active_thread_count += child_threads_active; |
3209 | 0 | if (child_threads_active == threads_per_child) { |
3210 | 0 | had_healthy_child = 1; |
3211 | 0 | } |
3212 | 0 | last_non_dead = i; |
3213 | 0 | } |
3214 | 0 | else if (free_length < retained->idle_spawn_rate[child_bucket]) { |
3215 | 0 | free_slots[free_length++] = i; |
3216 | 0 | } |
3217 | 0 | } |
3218 | 0 | if (*max_daemon_used < last_non_dead + 1) { |
3219 | 0 | *max_daemon_used = last_non_dead + 1; |
3220 | 0 | } |
3221 | |
|
3222 | 0 | if (retained->sick_child_detected) { |
3223 | 0 | if (had_healthy_child) { |
3224 | | /* Assume this is a transient error, even though it may not be. Leave |
3225 | | * the server up in case it is able to serve some requests or the |
3226 | | * problem will be resolved. |
3227 | | */ |
3228 | 0 | retained->sick_child_detected = 0; |
3229 | 0 | } |
3230 | 0 | else if (child_bucket < num_buckets - 1) { |
3231 | | /* check for had_healthy_child up to the last child bucket */ |
3232 | 0 | return; |
3233 | 0 | } |
3234 | 0 | else { |
3235 | | /* looks like a basket case, as no child ever fully initialized; give up. |
3236 | | */ |
3237 | 0 | retained->mpm->shutdown_pending = 1; |
3238 | 0 | child_fatal = 1; |
3239 | 0 | ap_log_error(APLOG_MARK, APLOG_ALERT, 0, |
3240 | 0 | ap_server_conf, APLOGNO(02324) |
3241 | 0 | "A resource shortage or other unrecoverable failure " |
3242 | 0 | "was encountered before any child process initialized " |
3243 | 0 | "successfully... httpd is exiting!"); |
3244 | | /* the child already logged the failure details */ |
3245 | 0 | return; |
3246 | 0 | } |
3247 | 0 | } |
3248 | | |
3249 | 0 | AP_DEBUG_ASSERT(retained->active_daemons <= retained->total_daemons |
3250 | 0 | && retained->total_daemons <= retained->max_daemon_used |
3251 | 0 | && retained->max_daemon_used <= server_limit); |
3252 | |
|
3253 | 0 | if (idle_thread_count > max_spare_threads / num_buckets) { |
3254 | | /* |
3255 | | * Child processes that we ask to shut down won't die immediately |
3256 | | * but may stay around for a long time when they finish their |
3257 | | * requests. If the server load changes many times, many such |
3258 | | * gracefully finishing processes may accumulate, filling up the |
3259 | | * scoreboard. To avoid running out of scoreboard entries, we |
3260 | | * don't shut down more processes if there are stopping ones |
3261 | | * already (i.e. active_daemons != total_daemons) and not enough |
3262 | | * slack space in the scoreboard for a graceful restart. |
3263 | | * |
3264 | | * XXX It would be nice if we could |
3265 | | * XXX - kill processes without keepalive connections first |
3266 | | * XXX - tell children to stop accepting new connections, and |
3267 | | * XXX depending on server load, later be able to resurrect them |
3268 | | * or kill them |
3269 | | */ |
3270 | 0 | int do_kill = (retained->active_daemons == retained->total_daemons |
3271 | 0 | || (server_limit - retained->total_daemons > |
3272 | 0 | active_daemons_limit)); |
3273 | 0 | ap_log_error(APLOG_MARK, APLOG_TRACE5, 0, ap_server_conf, |
3274 | 0 | "%shutting down one child: " |
3275 | 0 | "active %d/%d, total %d/%d/%d, " |
3276 | 0 | "idle threads %d, max workers %d", |
3277 | 0 | (do_kill) ? "S" : "Not s", |
3278 | 0 | retained->active_daemons, active_daemons_limit, |
3279 | 0 | retained->total_daemons, retained->max_daemon_used, |
3280 | 0 | server_limit, idle_thread_count, max_workers); |
3281 | 0 | if (do_kill) { |
3282 | 0 | ap_mpm_podx_signal(retained->buckets[child_bucket].pod, |
3283 | 0 | AP_MPM_PODX_GRACEFUL); |
3284 | 0 | } |
3285 | 0 | else { |
3286 | | /* Wait for dying daemon(s) to exit */ |
3287 | 0 | } |
3288 | 0 | retained->idle_spawn_rate[child_bucket] = 1; |
3289 | 0 | } |
3290 | 0 | else if (idle_thread_count < min_spare_threads / num_buckets) { |
3291 | 0 | if (active_thread_count >= max_workers / num_buckets) { |
3292 | 0 | if (0 == idle_thread_count) { |
3293 | 0 | if (!retained->maxclients_reported) { |
3294 | 0 | ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(00484) |
3295 | 0 | "server reached MaxRequestWorkers setting, " |
3296 | 0 | "consider raising the MaxRequestWorkers " |
3297 | 0 | "setting"); |
3298 | 0 | retained->maxclients_reported = 1; |
3299 | 0 | } |
3300 | 0 | } |
3301 | 0 | else { |
3302 | 0 | if (!retained->near_maxclients_reported) { |
3303 | 0 | ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(10159) |
3304 | 0 | "server is within MinSpareThreads of " |
3305 | 0 | "MaxRequestWorkers, consider raising the " |
3306 | 0 | "MaxRequestWorkers setting"); |
3307 | 0 | retained->near_maxclients_reported = 1; |
3308 | 0 | } |
3309 | 0 | } |
3310 | 0 | retained->idle_spawn_rate[child_bucket] = 1; |
3311 | 0 | } |
3312 | 0 | else if (free_length == 0) { /* scoreboard is full, can't fork */ |
3313 | 0 | ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(03490) |
3314 | 0 | "scoreboard is full, not at MaxRequestWorkers." |
3315 | 0 | "Increase ServerLimit."); |
3316 | 0 | retained->idle_spawn_rate[child_bucket] = 1; |
3317 | 0 | } |
3318 | 0 | else { |
3319 | 0 | if (free_length > retained->idle_spawn_rate[child_bucket]) { |
3320 | 0 | free_length = retained->idle_spawn_rate[child_bucket]; |
3321 | 0 | } |
3322 | 0 | if (free_length + retained->active_daemons > active_daemons_limit) { |
3323 | 0 | if (retained->active_daemons < active_daemons_limit) { |
3324 | 0 | free_length = active_daemons_limit - retained->active_daemons; |
3325 | 0 | } |
3326 | 0 | else { |
3327 | 0 | ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf, |
3328 | 0 | "server is at active daemons limit, spawning " |
3329 | 0 | "of %d children cancelled: active %d/%d, " |
3330 | 0 | "total %d/%d/%d, rate %d", free_length, |
3331 | 0 | retained->active_daemons, active_daemons_limit, |
3332 | 0 | retained->total_daemons, retained->max_daemon_used, |
3333 | 0 | server_limit, retained->idle_spawn_rate[child_bucket]); |
3334 | | /* reset the spawning rate and prevent its growth below */ |
3335 | 0 | retained->idle_spawn_rate[child_bucket] = 1; |
3336 | 0 | ++retained->hold_off_on_exponential_spawning; |
3337 | 0 | free_length = 0; |
3338 | 0 | } |
3339 | 0 | } |
3340 | 0 | if (retained->idle_spawn_rate[child_bucket] >= 8) { |
3341 | 0 | ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf, APLOGNO(00486) |
3342 | 0 | "server seems busy, (you may need " |
3343 | 0 | "to increase StartServers, ThreadsPerChild " |
3344 | 0 | "or Min/MaxSpareThreads), " |
3345 | 0 | "spawning %d children, there are around %d idle " |
3346 | 0 | "threads, %d active children, and %d children " |
3347 | 0 | "that are shutting down", free_length, |
3348 | 0 | idle_thread_count, retained->active_daemons, |
3349 | 0 | retained->total_daemons); |
3350 | 0 | } |
3351 | 0 | for (i = 0; i < free_length; ++i) { |
3352 | 0 | int slot = free_slots[i]; |
3353 | 0 | if (make_child(ap_server_conf, slot, child_bucket) < 0) { |
3354 | 0 | continue; |
3355 | 0 | } |
3356 | 0 | if (*max_daemon_used < slot + 1) { |
3357 | 0 | *max_daemon_used = slot + 1; |
3358 | 0 | } |
3359 | 0 | } |
3360 | | /* the next time around we want to spawn twice as many if this |
3361 | | * wasn't good enough, but not if we've just done a graceful |
3362 | | */ |
3363 | 0 | if (retained->hold_off_on_exponential_spawning) { |
3364 | 0 | --retained->hold_off_on_exponential_spawning; |
3365 | 0 | } |
3366 | 0 | else if (retained->idle_spawn_rate[child_bucket] |
3367 | 0 | < max_spawn_rate_per_bucket) { |
3368 | 0 | int new_rate = retained->idle_spawn_rate[child_bucket] * 2; |
3369 | 0 | if (new_rate > max_spawn_rate_per_bucket) { |
3370 | 0 | new_rate = max_spawn_rate_per_bucket; |
3371 | 0 | } |
3372 | 0 | retained->idle_spawn_rate[child_bucket] = new_rate; |
3373 | 0 | } |
3374 | 0 | } |
3375 | 0 | } |
3376 | 0 | else { |
3377 | 0 | retained->idle_spawn_rate[child_bucket] = 1; |
3378 | 0 | } |
3379 | 0 | } |
3380 | | |
3381 | | static void server_main_loop(int remaining_children_to_start) |
3382 | 0 | { |
3383 | 0 | int num_buckets = retained->mpm->num_buckets; |
3384 | 0 | int max_daemon_used = 0; |
3385 | 0 | int successive_kills = 0; |
3386 | 0 | int child_slot; |
3387 | 0 | apr_exit_why_e exitwhy; |
3388 | 0 | int status, processed_status; |
3389 | 0 | apr_proc_t pid; |
3390 | 0 | int i; |
3391 | |
|
3392 | 0 | while (!retained->mpm->restart_pending && !retained->mpm->shutdown_pending) { |
3393 | 0 | ap_wait_or_timeout(&exitwhy, &status, &pid, pconf, ap_server_conf); |
3394 | |
|
3395 | 0 | if (pid.pid != -1) { |
3396 | 0 | processed_status = ap_process_child_status(&pid, exitwhy, status); |
3397 | 0 | child_slot = ap_find_child_by_pid(&pid); |
3398 | 0 | if (processed_status == APEXIT_CHILDFATAL) { |
3399 | | /* fix race condition found in PR 39311 |
3400 | | * A child created at the same time as a graceful happens |
3401 | | * can find the lock missing and create a fatal error. |
3402 | | * It is not fatal for the last generation to be in this state. |
3403 | | */ |
3404 | 0 | if (child_slot < 0 |
3405 | 0 | || ap_get_scoreboard_process(child_slot)->generation |
3406 | 0 | == retained->mpm->my_generation) { |
3407 | 0 | retained->mpm->shutdown_pending = 1; |
3408 | 0 | child_fatal = 1; |
3409 | | /* |
3410 | | * total_daemons counting will be off now, but as we |
3411 | | * are shutting down, that is not an issue anymore. |
3412 | | */ |
3413 | 0 | return; |
3414 | 0 | } |
3415 | 0 | else { |
3416 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ap_server_conf, APLOGNO(00487) |
3417 | 0 | "Ignoring fatal error in child of previous " |
3418 | 0 | "generation (pid %ld).", |
3419 | 0 | (long)pid.pid); |
3420 | 0 | retained->sick_child_detected = 1; |
3421 | 0 | } |
3422 | 0 | } |
3423 | 0 | else if (processed_status == APEXIT_CHILDSICK) { |
3424 | | /* tell perform_idle_server_maintenance to check into this |
3425 | | * on the next timer pop |
3426 | | */ |
3427 | 0 | retained->sick_child_detected = 1; |
3428 | 0 | } |
3429 | | /* non-fatal death... note that it's gone in the scoreboard. */ |
3430 | 0 | if (child_slot >= 0) { |
3431 | 0 | event_note_child_stopped(child_slot, 0, 0); |
3432 | |
|
3433 | 0 | if (processed_status == APEXIT_CHILDSICK) { |
3434 | | /* resource shortage, minimize the fork rate */ |
3435 | 0 | retained->idle_spawn_rate[child_slot % num_buckets] = 1; |
3436 | 0 | } |
3437 | 0 | else if (remaining_children_to_start) { |
3438 | | /* we're still doing a 1-for-1 replacement of dead |
3439 | | * children with new children |
3440 | | */ |
3441 | 0 | make_child(ap_server_conf, child_slot, |
3442 | 0 | child_slot % num_buckets); |
3443 | 0 | --remaining_children_to_start; |
3444 | 0 | } |
3445 | 0 | } |
3446 | 0 | #if APR_HAS_OTHER_CHILD |
3447 | 0 | else if (apr_proc_other_child_alert(&pid, APR_OC_REASON_DEATH, |
3448 | 0 | status) == 0) { |
3449 | | /* handled */ |
3450 | 0 | } |
3451 | 0 | #endif |
3452 | 0 | else if (retained->mpm->was_graceful) { |
3453 | | /* Great, we've probably just lost a slot in the |
3454 | | * scoreboard. Somehow we don't know about this child. |
3455 | | */ |
3456 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING, 0, |
3457 | 0 | ap_server_conf, APLOGNO(00488) |
3458 | 0 | "long lost child came home! (pid %ld)", |
3459 | 0 | (long) pid.pid); |
3460 | 0 | } |
3461 | | /* Don't perform idle maintenance when a child dies, |
3462 | | * only do it when there's a timeout. Remember only a |
3463 | | * finite number of children can die, and it's pretty |
3464 | | * pathological for a lot to die suddenly. If a child is |
3465 | | * killed by a signal (faulting) we want to restart it ASAP |
3466 | | * though, up to 3 successive faults or we stop this until |
3467 | | * a timeout happens again (to avoid the flood of fork()ed |
3468 | | * processes that keep being killed early). |
3469 | | */ |
3470 | 0 | if (child_slot < 0 || !APR_PROC_CHECK_SIGNALED(exitwhy)) { |
3471 | 0 | continue; |
3472 | 0 | } |
3473 | 0 | if (++successive_kills >= 3) { |
3474 | 0 | if (successive_kills % 10 == 3) { |
3475 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING, 0, |
3476 | 0 | ap_server_conf, APLOGNO(10392) |
3477 | 0 | "children are killed successively!"); |
3478 | 0 | } |
3479 | 0 | continue; |
3480 | 0 | } |
3481 | 0 | ++remaining_children_to_start; |
3482 | 0 | } |
3483 | 0 | else { |
3484 | 0 | successive_kills = 0; |
3485 | 0 | } |
3486 | | |
3487 | 0 | if (remaining_children_to_start) { |
3488 | | /* we hit a 1 second timeout in which none of the previous |
3489 | | * generation of children needed to be reaped... so assume |
3490 | | * they're all done, and pick up the slack if any is left. |
3491 | | */ |
3492 | 0 | startup_children(remaining_children_to_start); |
3493 | 0 | remaining_children_to_start = 0; |
3494 | | /* In any event we really shouldn't do the code below because |
3495 | | * few of the servers we just started are in the IDLE state |
3496 | | * yet, so we'd mistakenly create an extra server. |
3497 | | */ |
3498 | 0 | continue; |
3499 | 0 | } |
3500 | | |
3501 | 0 | max_daemon_used = 0; |
3502 | 0 | for (i = 0; i < num_buckets; i++) { |
3503 | 0 | perform_idle_server_maintenance(i, &max_daemon_used); |
3504 | 0 | } |
3505 | 0 | retained->max_daemon_used = max_daemon_used; |
3506 | 0 | } |
3507 | 0 | } |
3508 | | |
3509 | | static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s) |
3510 | 0 | { |
3511 | 0 | ap_listen_rec **listen_buckets = NULL; |
3512 | 0 | int num_buckets = retained->mpm->num_buckets; |
3513 | 0 | int remaining_children_to_start; |
3514 | 0 | apr_status_t rv; |
3515 | 0 | int i; |
3516 | |
|
3517 | 0 | ap_log_pid(pconf, ap_pid_fname); |
3518 | | |
3519 | | /* On first startup create gen_pool to satisfy the lifetime of the |
3520 | | * parent's PODs and listeners; on restart stop the children from the |
3521 | | * previous generation and clear gen_pool for the next one. |
3522 | | */ |
3523 | 0 | if (!retained->gen_pool) { |
3524 | 0 | apr_pool_create(&retained->gen_pool, ap_pglobal); |
3525 | 0 | } |
3526 | 0 | else { |
3527 | 0 | if (retained->mpm->was_graceful) { |
3528 | | /* wake up the children...time to die. But we'll have more soon */ |
3529 | 0 | for (i = 0; i < num_buckets; i++) { |
3530 | 0 | ap_mpm_podx_killpg(retained->buckets[i].pod, |
3531 | 0 | active_daemons_limit, AP_MPM_PODX_GRACEFUL); |
3532 | 0 | } |
3533 | 0 | } |
3534 | 0 | else { |
3535 | | /* Kill 'em all. Since the child acts the same on the parents SIGTERM |
3536 | | * and a SIGHUP, we may as well use the same signal, because some user |
3537 | | * pthreads are stealing signals from us left and right. |
3538 | | */ |
3539 | 0 | for (i = 0; i < num_buckets; i++) { |
3540 | 0 | ap_mpm_podx_killpg(retained->buckets[i].pod, |
3541 | 0 | active_daemons_limit, AP_MPM_PODX_RESTART); |
3542 | 0 | } |
3543 | 0 | ap_reclaim_child_processes(1, /* Start with SIGTERM */ |
3544 | 0 | event_note_child_stopped); |
3545 | 0 | } |
3546 | 0 | apr_pool_clear(retained->gen_pool); |
3547 | 0 | retained->buckets = NULL; |
3548 | | |
3549 | | /* advance to the next generation */ |
3550 | | /* XXX: we really need to make sure this new generation number isn't in |
3551 | | * use by any of the previous children. |
3552 | | */ |
3553 | 0 | ++retained->mpm->my_generation; |
3554 | 0 | } |
3555 | | |
3556 | | /* On graceful restart, preserve the scoreboard and the listeners buckets. |
3557 | | * When ungraceful, clear the scoreboard and set num_buckets to zero to let |
3558 | | * ap_duplicate_listeners() below determine how many are needed/configured. |
3559 | | */ |
3560 | 0 | if (!retained->mpm->was_graceful) { |
3561 | 0 | if (ap_run_pre_mpm(s->process->pool, SB_SHARED) != OK) { |
3562 | 0 | retained->mpm->mpm_state = AP_MPMQ_STOPPING; |
3563 | 0 | return !OK; |
3564 | 0 | } |
3565 | 0 | num_buckets = (one_process) ? 1 : 0; /* one_process => one bucket */ |
3566 | 0 | retained->mpm->num_buckets = 0; /* reset idle_spawn_rate below */ |
3567 | 0 | } |
3568 | | |
3569 | | /* Now on for the new generation. */ |
3570 | 0 | ap_scoreboard_image->global->running_generation = retained->mpm->my_generation; |
3571 | 0 | ap_unixd_mpm_set_signals(pconf, one_process); |
3572 | |
|
3573 | 0 | if ((rv = ap_duplicate_listeners(retained->gen_pool, ap_server_conf, |
3574 | 0 | &listen_buckets, &num_buckets))) { |
3575 | 0 | ap_log_error(APLOG_MARK, APLOG_CRIT, rv, |
3576 | 0 | ap_server_conf, APLOGNO(03273) |
3577 | 0 | "could not duplicate listeners"); |
3578 | 0 | return !OK; |
3579 | 0 | } |
3580 | | |
3581 | 0 | retained->buckets = apr_pcalloc(retained->gen_pool, |
3582 | 0 | num_buckets * sizeof(event_child_bucket)); |
3583 | 0 | for (i = 0; i < num_buckets; i++) { |
3584 | 0 | if (!one_process /* no POD in one_process mode */ |
3585 | 0 | && (rv = ap_mpm_podx_open(retained->gen_pool, |
3586 | 0 | &retained->buckets[i].pod))) { |
3587 | 0 | ap_log_error(APLOG_MARK, APLOG_CRIT, rv, |
3588 | 0 | ap_server_conf, APLOGNO(03274) |
3589 | 0 | "could not open pipe-of-death"); |
3590 | 0 | return !OK; |
3591 | 0 | } |
3592 | 0 | retained->buckets[i].listeners = listen_buckets[i]; |
3593 | 0 | } |
3594 | | |
3595 | 0 | if (retained->mpm->max_buckets < num_buckets) { |
3596 | 0 | int new_max, *new_ptr; |
3597 | 0 | new_max = retained->mpm->max_buckets * 2; |
3598 | 0 | if (new_max < num_buckets) { |
3599 | 0 | new_max = num_buckets; |
3600 | 0 | } |
3601 | 0 | new_ptr = (int *)apr_palloc(ap_pglobal, new_max * sizeof(int)); |
3602 | 0 | if (retained->mpm->num_buckets) /* idle_spawn_rate NULL at startup */ |
3603 | 0 | memcpy(new_ptr, retained->idle_spawn_rate, |
3604 | 0 | retained->mpm->num_buckets * sizeof(int)); |
3605 | 0 | retained->idle_spawn_rate = new_ptr; |
3606 | 0 | retained->mpm->max_buckets = new_max; |
3607 | 0 | } |
3608 | 0 | if (retained->mpm->num_buckets < num_buckets) { |
3609 | 0 | int rate_max = 1; |
3610 | | /* If new buckets are added, set their idle spawn rate to |
3611 | | * the highest so far, so that they get filled as quickly |
3612 | | * as the existing ones. |
3613 | | */ |
3614 | 0 | for (i = 0; i < retained->mpm->num_buckets; i++) { |
3615 | 0 | if (rate_max < retained->idle_spawn_rate[i]) { |
3616 | 0 | rate_max = retained->idle_spawn_rate[i]; |
3617 | 0 | } |
3618 | 0 | } |
3619 | 0 | for (/* up to date i */; i < num_buckets; i++) { |
3620 | 0 | retained->idle_spawn_rate[i] = rate_max; |
3621 | 0 | } |
3622 | 0 | } |
3623 | 0 | retained->mpm->num_buckets = num_buckets; |
3624 | | |
3625 | | /* Don't thrash since num_buckets depends on the |
3626 | | * system and the number of online CPU cores... |
3627 | | */ |
3628 | 0 | if (active_daemons_limit < num_buckets) |
3629 | 0 | active_daemons_limit = num_buckets; |
3630 | 0 | if (ap_daemons_to_start < num_buckets) |
3631 | 0 | ap_daemons_to_start = num_buckets; |
3632 | | /* We want to create as much children at a time as the number of buckets, |
3633 | | * so to optimally accept connections (evenly distributed across buckets). |
3634 | | * Thus min_spare_threads should at least maintain num_buckets children, |
3635 | | * and max_spare_threads allow num_buckets more children w/o triggering |
3636 | | * immediately (e.g. num_buckets idle threads margin, one per bucket). |
3637 | | */ |
3638 | 0 | if (min_spare_threads < threads_per_child * (num_buckets - 1) + num_buckets) |
3639 | 0 | min_spare_threads = threads_per_child * (num_buckets - 1) + num_buckets; |
3640 | 0 | if (max_spare_threads < min_spare_threads + (threads_per_child + 1) * num_buckets) |
3641 | 0 | max_spare_threads = min_spare_threads + (threads_per_child + 1) * num_buckets; |
3642 | |
|
3643 | 0 | max_spawn_rate_per_bucket = (MAX_SPAWN_RATE + num_buckets - 1) / num_buckets; |
3644 | 0 | if (max_spawn_rate_per_bucket < 1) { |
3645 | 0 | max_spawn_rate_per_bucket = 1; |
3646 | 0 | } |
3647 | | |
3648 | | /* If we're doing a graceful_restart then we're going to see a lot |
3649 | | * of children exiting immediately when we get into the main loop |
3650 | | * below (because we just sent them AP_SIG_GRACEFUL). This happens pretty |
3651 | | * rapidly... and for each one that exits we may start a new one, until |
3652 | | * there are at least min_spare_threads idle threads, counting across |
3653 | | * all children. But we may be permitted to start more children than |
3654 | | * that, so we'll just keep track of how many we're |
3655 | | * supposed to start up without the 1 second penalty between each fork. |
3656 | | */ |
3657 | 0 | remaining_children_to_start = ap_daemons_to_start; |
3658 | 0 | if (remaining_children_to_start > active_daemons_limit) { |
3659 | 0 | remaining_children_to_start = active_daemons_limit; |
3660 | 0 | } |
3661 | 0 | if (!retained->mpm->was_graceful) { |
3662 | 0 | startup_children(remaining_children_to_start); |
3663 | 0 | remaining_children_to_start = 0; |
3664 | 0 | } |
3665 | 0 | else { |
3666 | | /* give the system some time to recover before kicking into |
3667 | | * exponential mode */ |
3668 | 0 | retained->hold_off_on_exponential_spawning = 10; |
3669 | 0 | } |
3670 | |
|
3671 | 0 | ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00489) |
3672 | 0 | "%s configured -- resuming normal operations", |
3673 | 0 | ap_get_server_description()); |
3674 | 0 | ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf, APLOGNO(00490) |
3675 | 0 | "Server built: %s", ap_get_server_built()); |
3676 | 0 | ap_log_command_line(plog, s); |
3677 | 0 | ap_log_mpm_common(s); |
3678 | |
|
3679 | 0 | retained->mpm->mpm_state = AP_MPMQ_RUNNING; |
3680 | |
|
3681 | 0 | server_main_loop(remaining_children_to_start); |
3682 | 0 | retained->mpm->mpm_state = AP_MPMQ_STOPPING; |
3683 | |
|
3684 | 0 | if (retained->mpm->shutdown_pending && retained->mpm->is_ungraceful) { |
3685 | | /* Time to shut down: |
3686 | | * Kill child processes, tell them to call child_exit, etc... |
3687 | | */ |
3688 | 0 | for (i = 0; i < num_buckets; i++) { |
3689 | 0 | ap_mpm_podx_killpg(retained->buckets[i].pod, |
3690 | 0 | active_daemons_limit, AP_MPM_PODX_RESTART); |
3691 | 0 | } |
3692 | 0 | ap_reclaim_child_processes(1, /* Start with SIGTERM */ |
3693 | 0 | event_note_child_stopped); |
3694 | |
|
3695 | 0 | if (!child_fatal) { |
3696 | | /* cleanup pid file on normal shutdown */ |
3697 | 0 | ap_remove_pid(pconf, ap_pid_fname); |
3698 | 0 | ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, |
3699 | 0 | ap_server_conf, APLOGNO(00491) "caught SIGTERM, shutting down"); |
3700 | 0 | } |
3701 | |
|
3702 | 0 | return DONE; |
3703 | 0 | } |
3704 | | |
3705 | 0 | if (retained->mpm->shutdown_pending) { |
3706 | | /* Time to gracefully shut down: |
3707 | | * Kill child processes, tell them to call child_exit, etc... |
3708 | | */ |
3709 | 0 | int active_children; |
3710 | 0 | int index; |
3711 | 0 | apr_time_t cutoff = 0; |
3712 | | |
3713 | | /* Close our listeners, and then ask our children to do same */ |
3714 | 0 | ap_close_listeners(); |
3715 | 0 | for (i = 0; i < num_buckets; i++) { |
3716 | 0 | ap_mpm_podx_killpg(retained->buckets[i].pod, |
3717 | 0 | active_daemons_limit, AP_MPM_PODX_GRACEFUL); |
3718 | 0 | } |
3719 | 0 | ap_relieve_child_processes(event_note_child_stopped); |
3720 | |
|
3721 | 0 | if (!child_fatal) { |
3722 | | /* cleanup pid file on normal shutdown */ |
3723 | 0 | ap_remove_pid(pconf, ap_pid_fname); |
3724 | 0 | ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00492) |
3725 | 0 | "caught " AP_SIG_GRACEFUL_STOP_STRING |
3726 | 0 | ", shutting down gracefully"); |
3727 | 0 | } |
3728 | |
|
3729 | 0 | if (ap_graceful_shutdown_timeout) { |
3730 | 0 | cutoff = apr_time_now() + |
3731 | 0 | apr_time_from_sec(ap_graceful_shutdown_timeout); |
3732 | 0 | } |
3733 | | |
3734 | | /* Don't really exit until each child has finished */ |
3735 | 0 | retained->mpm->shutdown_pending = 0; |
3736 | 0 | do { |
3737 | | /* Pause for a second */ |
3738 | 0 | apr_sleep(apr_time_from_sec(1)); |
3739 | | |
3740 | | /* Relieve any children which have now exited */ |
3741 | 0 | ap_relieve_child_processes(event_note_child_stopped); |
3742 | |
|
3743 | 0 | active_children = 0; |
3744 | 0 | for (index = 0; index < retained->max_daemon_used; ++index) { |
3745 | 0 | if (ap_mpm_safe_kill(MPM_CHILD_PID(index), 0) == APR_SUCCESS) { |
3746 | 0 | active_children = 1; |
3747 | | /* Having just one child is enough to stay around */ |
3748 | 0 | break; |
3749 | 0 | } |
3750 | 0 | } |
3751 | 0 | } while (!retained->mpm->shutdown_pending && active_children && |
3752 | 0 | (!ap_graceful_shutdown_timeout || apr_time_now() < cutoff)); |
3753 | | |
3754 | | /* We might be here because we received SIGTERM, either |
3755 | | * way, try and make sure that all of our processes are |
3756 | | * really dead. |
3757 | | */ |
3758 | 0 | for (i = 0; i < num_buckets; i++) { |
3759 | 0 | ap_mpm_podx_killpg(retained->buckets[i].pod, |
3760 | 0 | active_daemons_limit, AP_MPM_PODX_RESTART); |
3761 | 0 | } |
3762 | 0 | ap_reclaim_child_processes(1, event_note_child_stopped); |
3763 | |
|
3764 | 0 | return DONE; |
3765 | 0 | } |
3766 | | |
3767 | | /* we've been told to restart */ |
3768 | 0 | if (one_process) { |
3769 | | /* not worth thinking about */ |
3770 | 0 | return DONE; |
3771 | 0 | } |
3772 | | |
3773 | 0 | if (!retained->mpm->is_ungraceful) { |
3774 | 0 | ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00493) |
3775 | 0 | "%s received. Doing graceful restart", |
3776 | 0 | AP_SIG_GRACEFUL_STRING); |
3777 | 0 | } |
3778 | 0 | else { |
3779 | 0 | ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00494) |
3780 | 0 | "SIGHUP received. Attempting to restart"); |
3781 | 0 | } |
3782 | 0 | return OK; |
3783 | 0 | } |
3784 | | |
3785 | | static void setup_slave_conn(conn_rec *c, void *csd) |
3786 | 0 | { |
3787 | 0 | event_conn_state_t *mcs; |
3788 | 0 | event_conn_state_t *cs; |
3789 | | |
3790 | 0 | mcs = ap_get_module_config(c->master->conn_config, &mpm_event_module); |
3791 | | |
3792 | 0 | cs = apr_pcalloc(c->pool, sizeof(*cs)); |
3793 | 0 | cs->c = c; |
3794 | 0 | cs->r = NULL; |
3795 | 0 | cs->sc = mcs->sc; |
3796 | 0 | cs->suspended = 0; |
3797 | 0 | cs->p = c->pool; |
3798 | 0 | cs->bucket_alloc = c->bucket_alloc; |
3799 | 0 | cs->pfd = mcs->pfd; |
3800 | 0 | cs->pub = mcs->pub; |
3801 | 0 | cs->pub.state = CONN_STATE_READ_REQUEST_LINE; |
3802 | 0 | cs->pub.sense = CONN_SENSE_DEFAULT; |
3803 | | |
3804 | 0 | c->cs = &(cs->pub); |
3805 | 0 | ap_set_module_config(c->conn_config, &mpm_event_module, cs); |
3806 | 0 | } |
3807 | | |
3808 | | static int event_pre_connection(conn_rec *c, void *csd) |
3809 | 0 | { |
3810 | 0 | if (c->master && (!c->cs || c->cs == c->master->cs)) { |
3811 | 0 | setup_slave_conn(c, csd); |
3812 | 0 | } |
3813 | 0 | return OK; |
3814 | 0 | } |
3815 | | |
3816 | | static int event_protocol_switch(conn_rec *c, request_rec *r, server_rec *s, |
3817 | | const char *protocol) |
3818 | 0 | { |
3819 | 0 | if (!r && s) { |
3820 | | /* connection based switching of protocol, set the correct server |
3821 | | * configuration, so that timeouts, keepalives and such are used |
3822 | | * for the server that the connection was switched on. |
3823 | | * Normally, we set this on post_read_request, but on a protocol |
3824 | | * other than http/1.1, this might never happen. |
3825 | | */ |
3826 | 0 | event_conn_state_t *cs; |
3827 | | |
3828 | 0 | cs = ap_get_module_config(c->conn_config, &mpm_event_module); |
3829 | 0 | cs->sc = ap_get_module_config(s->module_config, &mpm_event_module); |
3830 | 0 | } |
3831 | 0 | return DECLINED; |
3832 | 0 | } |
3833 | | |
3834 | | /* This really should be a post_config hook, but the error log is already |
3835 | | * redirected by that point, so we need to do this in the open_logs phase. |
3836 | | */ |
3837 | | static int event_open_logs(apr_pool_t * p, apr_pool_t * plog, |
3838 | | apr_pool_t * ptemp, server_rec * s) |
3839 | 0 | { |
3840 | 0 | int startup = 0; |
3841 | 0 | int level_flags = 0; |
3842 | |
|
3843 | 0 | pconf = p; |
3844 | | |
3845 | | /* the reverse of pre_config, we want this only the first time around */ |
3846 | 0 | if (retained->mpm->module_loads == 1) { |
3847 | 0 | startup = 1; |
3848 | 0 | level_flags |= APLOG_STARTUP; |
3849 | 0 | } |
3850 | |
|
3851 | 0 | if ((num_listensocks = ap_setup_listeners(ap_server_conf)) < 1) { |
3852 | 0 | ap_log_error(APLOG_MARK, APLOG_ALERT | level_flags, 0, |
3853 | 0 | (startup ? NULL : s), APLOGNO(03272) |
3854 | 0 | "no listening sockets available, shutting down"); |
3855 | 0 | return !OK; |
3856 | 0 | } |
3857 | | |
3858 | 0 | return OK; |
3859 | 0 | } |
3860 | | |
3861 | | static int event_pre_config(apr_pool_t * pconf, apr_pool_t * plog, |
3862 | | apr_pool_t * ptemp) |
3863 | 0 | { |
3864 | 0 | int no_detach, debug, foreground; |
3865 | 0 | apr_status_t rv; |
3866 | 0 | const char *userdata_key = "mpm_event_module"; |
3867 | 0 | int test_atomics = 0; |
3868 | |
|
3869 | 0 | debug = ap_exists_config_define("DEBUG"); |
3870 | |
|
3871 | 0 | if (debug) { |
3872 | 0 | foreground = one_process = 1; |
3873 | 0 | no_detach = 0; |
3874 | 0 | } |
3875 | 0 | else { |
3876 | 0 | one_process = ap_exists_config_define("ONE_PROCESS"); |
3877 | 0 | no_detach = ap_exists_config_define("NO_DETACH"); |
3878 | 0 | foreground = ap_exists_config_define("FOREGROUND"); |
3879 | 0 | } |
3880 | |
|
3881 | 0 | retained = ap_retained_data_get(userdata_key); |
3882 | 0 | if (!retained) { |
3883 | 0 | retained = ap_retained_data_create(userdata_key, sizeof(*retained)); |
3884 | 0 | retained->mpm = ap_unixd_mpm_get_retained_data(); |
3885 | 0 | retained->mpm->baton = retained; |
3886 | 0 | if (retained->mpm->module_loads) { |
3887 | 0 | test_atomics = 1; |
3888 | 0 | } |
3889 | 0 | } |
3890 | 0 | else if (retained->mpm->baton != retained) { |
3891 | | /* If the MPM changes on restart, be ungraceful */ |
3892 | 0 | retained->mpm->baton = retained; |
3893 | 0 | retained->mpm->was_graceful = 0; |
3894 | 0 | } |
3895 | 0 | retained->mpm->mpm_state = AP_MPMQ_STARTING; |
3896 | 0 | ++retained->mpm->module_loads; |
3897 | | |
3898 | | /* test once for correct operation of fdqueue */ |
3899 | 0 | if (test_atomics || retained->mpm->module_loads == 2) { |
3900 | 0 | static apr_uint32_t foo1, foo2; |
3901 | |
|
3902 | 0 | apr_atomic_set32(&foo1, 100); |
3903 | 0 | foo2 = apr_atomic_add32(&foo1, -10); |
3904 | 0 | if (foo2 != 100 || foo1 != 90) { |
3905 | 0 | ap_log_error(APLOG_MARK, APLOG_CRIT, 0, NULL, APLOGNO(02405) |
3906 | 0 | "atomics not working as expected - add32 of negative number"); |
3907 | 0 | return HTTP_INTERNAL_SERVER_ERROR; |
3908 | 0 | } |
3909 | 0 | } |
3910 | | |
3911 | | /* sigh, want this only the second time around */ |
3912 | 0 | if (retained->mpm->module_loads == 2) { |
3913 | 0 | rv = apr_pollset_create(&event_pollset, 1, plog, |
3914 | 0 | APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY); |
3915 | 0 | if (rv != APR_SUCCESS) { |
3916 | 0 | ap_log_error(APLOG_MARK, APLOG_CRIT, rv, NULL, APLOGNO(00495) |
3917 | 0 | "Couldn't create a Thread Safe Pollset. " |
3918 | 0 | "Is it supported on your platform?" |
3919 | 0 | "Also check system or user limits!"); |
3920 | 0 | return HTTP_INTERNAL_SERVER_ERROR; |
3921 | 0 | } |
3922 | 0 | apr_pollset_destroy(event_pollset); |
3923 | |
|
3924 | 0 | if (!one_process && !foreground) { |
3925 | | /* before we detach, setup crash handlers to log to errorlog */ |
3926 | 0 | ap_fatal_signal_setup(ap_server_conf, pconf); |
3927 | 0 | rv = apr_proc_detach(no_detach ? APR_PROC_DETACH_FOREGROUND |
3928 | 0 | : APR_PROC_DETACH_DAEMONIZE); |
3929 | 0 | if (rv != APR_SUCCESS) { |
3930 | 0 | ap_log_error(APLOG_MARK, APLOG_CRIT, rv, NULL, APLOGNO(00496) |
3931 | 0 | "apr_proc_detach failed"); |
3932 | 0 | return HTTP_INTERNAL_SERVER_ERROR; |
3933 | 0 | } |
3934 | 0 | } |
3935 | 0 | } |
3936 | | |
3937 | 0 | parent_pid = ap_my_pid = getpid(); |
3938 | |
|
3939 | 0 | ap_listen_pre_config(); |
3940 | 0 | ap_daemons_to_start = DEFAULT_START_DAEMON; |
3941 | 0 | min_spare_threads = DEFAULT_MIN_FREE_DAEMON * DEFAULT_THREADS_PER_CHILD; |
3942 | 0 | max_spare_threads = DEFAULT_MAX_FREE_DAEMON * DEFAULT_THREADS_PER_CHILD; |
3943 | 0 | server_limit = DEFAULT_SERVER_LIMIT; |
3944 | 0 | thread_limit = DEFAULT_THREAD_LIMIT; |
3945 | 0 | active_daemons_limit = server_limit; |
3946 | 0 | threads_per_child = DEFAULT_THREADS_PER_CHILD; |
3947 | 0 | max_workers = active_daemons_limit * threads_per_child; |
3948 | 0 | defer_linger_chain = NULL; |
3949 | 0 | had_healthy_child = 0; |
3950 | 0 | ap_extended_status = 0; |
3951 | |
|
3952 | 0 | event_pollset = NULL; |
3953 | 0 | worker_queue_info = NULL; |
3954 | 0 | listener_os_thread = NULL; |
3955 | 0 | listensocks_disabled = 0; |
3956 | 0 | listener_is_wakeable = 0; |
3957 | |
|
3958 | 0 | return OK; |
3959 | 0 | } |
3960 | | |
3961 | | static int event_post_config(apr_pool_t *pconf, apr_pool_t *plog, |
3962 | | apr_pool_t *ptemp, server_rec *s) |
3963 | 0 | { |
3964 | 0 | struct { |
3965 | 0 | struct timeout_queue *tail, *q; |
3966 | 0 | apr_hash_t *hash; |
3967 | 0 | } wc, ka; |
3968 | | |
3969 | | /* Not needed in pre_config stage */ |
3970 | 0 | if (ap_state_query(AP_SQ_MAIN_STATE) == AP_SQ_MS_CREATE_PRE_CONFIG) { |
3971 | 0 | return OK; |
3972 | 0 | } |
3973 | | |
3974 | 0 | wc.tail = ka.tail = NULL; |
3975 | 0 | wc.hash = apr_hash_make(ptemp); |
3976 | 0 | ka.hash = apr_hash_make(ptemp); |
3977 | |
|
3978 | 0 | linger_q = TO_QUEUE_MAKE(pconf, apr_time_from_sec(MAX_SECS_TO_LINGER), |
3979 | 0 | NULL); |
3980 | 0 | short_linger_q = TO_QUEUE_MAKE(pconf, apr_time_from_sec(SECONDS_TO_LINGER), |
3981 | 0 | NULL); |
3982 | |
|
3983 | 0 | for (; s; s = s->next) { |
3984 | 0 | event_srv_cfg *sc = apr_pcalloc(pconf, sizeof *sc); |
3985 | |
|
3986 | 0 | ap_set_module_config(s->module_config, &mpm_event_module, sc); |
3987 | 0 | if (!wc.tail) { |
3988 | | /* The main server uses the global queues */ |
3989 | 0 | wc.q = TO_QUEUE_MAKE(pconf, s->timeout, NULL); |
3990 | 0 | apr_hash_set(wc.hash, &s->timeout, sizeof s->timeout, wc.q); |
3991 | 0 | wc.tail = write_completion_q = wc.q; |
3992 | |
|
3993 | 0 | ka.q = TO_QUEUE_MAKE(pconf, s->keep_alive_timeout, NULL); |
3994 | 0 | apr_hash_set(ka.hash, &s->keep_alive_timeout, |
3995 | 0 | sizeof s->keep_alive_timeout, ka.q); |
3996 | 0 | ka.tail = keepalive_q = ka.q; |
3997 | 0 | } |
3998 | 0 | else { |
3999 | | /* The vhosts use any existing queue with the same timeout, |
4000 | | * or their own queue(s) if there isn't */ |
4001 | 0 | wc.q = apr_hash_get(wc.hash, &s->timeout, sizeof s->timeout); |
4002 | 0 | if (!wc.q) { |
4003 | 0 | wc.q = TO_QUEUE_MAKE(pconf, s->timeout, wc.tail); |
4004 | 0 | apr_hash_set(wc.hash, &s->timeout, sizeof s->timeout, wc.q); |
4005 | 0 | wc.tail = wc.tail->next = wc.q; |
4006 | 0 | } |
4007 | |
|
4008 | 0 | ka.q = apr_hash_get(ka.hash, &s->keep_alive_timeout, |
4009 | 0 | sizeof s->keep_alive_timeout); |
4010 | 0 | if (!ka.q) { |
4011 | 0 | ka.q = TO_QUEUE_MAKE(pconf, s->keep_alive_timeout, ka.tail); |
4012 | 0 | apr_hash_set(ka.hash, &s->keep_alive_timeout, |
4013 | 0 | sizeof s->keep_alive_timeout, ka.q); |
4014 | 0 | ka.tail = ka.tail->next = ka.q; |
4015 | 0 | } |
4016 | 0 | } |
4017 | 0 | sc->wc_q = wc.q; |
4018 | 0 | sc->ka_q = ka.q; |
4019 | 0 | } |
4020 | |
|
4021 | 0 | return OK; |
4022 | 0 | } |
4023 | | |
4024 | | static int event_check_config(apr_pool_t *p, apr_pool_t *plog, |
4025 | | apr_pool_t *ptemp, server_rec *s) |
4026 | 0 | { |
4027 | 0 | int startup = 0; |
4028 | | |
4029 | | /* the reverse of pre_config, we want this only the first time around */ |
4030 | 0 | if (retained->mpm->module_loads == 1) { |
4031 | 0 | startup = 1; |
4032 | 0 | } |
4033 | |
|
4034 | 0 | if (server_limit > MAX_SERVER_LIMIT) { |
4035 | 0 | if (startup) { |
4036 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00497) |
4037 | 0 | "WARNING: ServerLimit of %d exceeds compile-time " |
4038 | 0 | "limit of %d servers, decreasing to %d.", |
4039 | 0 | server_limit, MAX_SERVER_LIMIT, MAX_SERVER_LIMIT); |
4040 | 0 | } else { |
4041 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00498) |
4042 | 0 | "ServerLimit of %d exceeds compile-time limit " |
4043 | 0 | "of %d, decreasing to match", |
4044 | 0 | server_limit, MAX_SERVER_LIMIT); |
4045 | 0 | } |
4046 | 0 | server_limit = MAX_SERVER_LIMIT; |
4047 | 0 | } |
4048 | 0 | else if (server_limit < 1) { |
4049 | 0 | if (startup) { |
4050 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00499) |
4051 | 0 | "WARNING: ServerLimit of %d not allowed, " |
4052 | 0 | "increasing to 1.", server_limit); |
4053 | 0 | } else { |
4054 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00500) |
4055 | 0 | "ServerLimit of %d not allowed, increasing to 1", |
4056 | 0 | server_limit); |
4057 | 0 | } |
4058 | 0 | server_limit = 1; |
4059 | 0 | } |
4060 | | |
4061 | | /* you cannot change ServerLimit across a restart; ignore |
4062 | | * any such attempts |
4063 | | */ |
4064 | 0 | if (!retained->first_server_limit) { |
4065 | 0 | retained->first_server_limit = server_limit; |
4066 | 0 | } |
4067 | 0 | else if (server_limit != retained->first_server_limit) { |
4068 | | /* don't need a startup console version here */ |
4069 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00501) |
4070 | 0 | "changing ServerLimit to %d from original value of %d " |
4071 | 0 | "not allowed during restart", |
4072 | 0 | server_limit, retained->first_server_limit); |
4073 | 0 | server_limit = retained->first_server_limit; |
4074 | 0 | } |
4075 | |
|
4076 | 0 | if (thread_limit > MAX_THREAD_LIMIT) { |
4077 | 0 | if (startup) { |
4078 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00502) |
4079 | 0 | "WARNING: ThreadLimit of %d exceeds compile-time " |
4080 | 0 | "limit of %d threads, decreasing to %d.", |
4081 | 0 | thread_limit, MAX_THREAD_LIMIT, MAX_THREAD_LIMIT); |
4082 | 0 | } else { |
4083 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00503) |
4084 | 0 | "ThreadLimit of %d exceeds compile-time limit " |
4085 | 0 | "of %d, decreasing to match", |
4086 | 0 | thread_limit, MAX_THREAD_LIMIT); |
4087 | 0 | } |
4088 | 0 | thread_limit = MAX_THREAD_LIMIT; |
4089 | 0 | } |
4090 | 0 | else if (thread_limit < 1) { |
4091 | 0 | if (startup) { |
4092 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00504) |
4093 | 0 | "WARNING: ThreadLimit of %d not allowed, " |
4094 | 0 | "increasing to 1.", thread_limit); |
4095 | 0 | } else { |
4096 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00505) |
4097 | 0 | "ThreadLimit of %d not allowed, increasing to 1", |
4098 | 0 | thread_limit); |
4099 | 0 | } |
4100 | 0 | thread_limit = 1; |
4101 | 0 | } |
4102 | | |
4103 | | /* you cannot change ThreadLimit across a restart; ignore |
4104 | | * any such attempts |
4105 | | */ |
4106 | 0 | if (!retained->first_thread_limit) { |
4107 | 0 | retained->first_thread_limit = thread_limit; |
4108 | 0 | } |
4109 | 0 | else if (thread_limit != retained->first_thread_limit) { |
4110 | | /* don't need a startup console version here */ |
4111 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00506) |
4112 | 0 | "changing ThreadLimit to %d from original value of %d " |
4113 | 0 | "not allowed during restart", |
4114 | 0 | thread_limit, retained->first_thread_limit); |
4115 | 0 | thread_limit = retained->first_thread_limit; |
4116 | 0 | } |
4117 | |
|
4118 | 0 | if (threads_per_child > thread_limit) { |
4119 | 0 | if (startup) { |
4120 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00507) |
4121 | 0 | "WARNING: ThreadsPerChild of %d exceeds ThreadLimit " |
4122 | 0 | "of %d threads, decreasing to %d. " |
4123 | 0 | "To increase, please see the ThreadLimit directive.", |
4124 | 0 | threads_per_child, thread_limit, thread_limit); |
4125 | 0 | } else { |
4126 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00508) |
4127 | 0 | "ThreadsPerChild of %d exceeds ThreadLimit " |
4128 | 0 | "of %d, decreasing to match", |
4129 | 0 | threads_per_child, thread_limit); |
4130 | 0 | } |
4131 | 0 | threads_per_child = thread_limit; |
4132 | 0 | } |
4133 | 0 | else if (threads_per_child < 1) { |
4134 | 0 | if (startup) { |
4135 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00509) |
4136 | 0 | "WARNING: ThreadsPerChild of %d not allowed, " |
4137 | 0 | "increasing to 1.", threads_per_child); |
4138 | 0 | } else { |
4139 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00510) |
4140 | 0 | "ThreadsPerChild of %d not allowed, increasing to 1", |
4141 | 0 | threads_per_child); |
4142 | 0 | } |
4143 | 0 | threads_per_child = 1; |
4144 | 0 | } |
4145 | |
|
4146 | 0 | if (max_workers < threads_per_child) { |
4147 | 0 | if (startup) { |
4148 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00511) |
4149 | 0 | "WARNING: MaxRequestWorkers of %d is less than " |
4150 | 0 | "ThreadsPerChild of %d, increasing to %d. " |
4151 | 0 | "MaxRequestWorkers must be at least as large " |
4152 | 0 | "as the number of threads in a single server.", |
4153 | 0 | max_workers, threads_per_child, threads_per_child); |
4154 | 0 | } else { |
4155 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00512) |
4156 | 0 | "MaxRequestWorkers of %d is less than ThreadsPerChild " |
4157 | 0 | "of %d, increasing to match", |
4158 | 0 | max_workers, threads_per_child); |
4159 | 0 | } |
4160 | 0 | max_workers = threads_per_child; |
4161 | 0 | } |
4162 | |
|
4163 | 0 | active_daemons_limit = max_workers / threads_per_child; |
4164 | |
|
4165 | 0 | if (max_workers % threads_per_child) { |
4166 | 0 | int tmp_max_workers = active_daemons_limit * threads_per_child; |
4167 | |
|
4168 | 0 | if (startup) { |
4169 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00513) |
4170 | 0 | "WARNING: MaxRequestWorkers of %d is not an integer " |
4171 | 0 | "multiple of ThreadsPerChild of %d, decreasing to nearest " |
4172 | 0 | "multiple %d, for a maximum of %d servers.", |
4173 | 0 | max_workers, threads_per_child, tmp_max_workers, |
4174 | 0 | active_daemons_limit); |
4175 | 0 | } else { |
4176 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00514) |
4177 | 0 | "MaxRequestWorkers of %d is not an integer multiple " |
4178 | 0 | "of ThreadsPerChild of %d, decreasing to nearest " |
4179 | 0 | "multiple %d", max_workers, threads_per_child, |
4180 | 0 | tmp_max_workers); |
4181 | 0 | } |
4182 | 0 | max_workers = tmp_max_workers; |
4183 | 0 | } |
4184 | |
|
4185 | 0 | if (active_daemons_limit > server_limit) { |
4186 | 0 | if (startup) { |
4187 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00515) |
4188 | 0 | "WARNING: MaxRequestWorkers of %d would require %d servers " |
4189 | 0 | "and would exceed ServerLimit of %d, decreasing to %d. " |
4190 | 0 | "To increase, please see the ServerLimit directive.", |
4191 | 0 | max_workers, active_daemons_limit, server_limit, |
4192 | 0 | server_limit * threads_per_child); |
4193 | 0 | } else { |
4194 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00516) |
4195 | 0 | "MaxRequestWorkers of %d would require %d servers and " |
4196 | 0 | "exceed ServerLimit of %d, decreasing to %d", |
4197 | 0 | max_workers, active_daemons_limit, server_limit, |
4198 | 0 | server_limit * threads_per_child); |
4199 | 0 | } |
4200 | 0 | active_daemons_limit = server_limit; |
4201 | 0 | } |
4202 | | |
4203 | | /* ap_daemons_to_start > active_daemons_limit checked in ap_mpm_run() */ |
4204 | 0 | if (ap_daemons_to_start < 1) { |
4205 | 0 | if (startup) { |
4206 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00517) |
4207 | 0 | "WARNING: StartServers of %d not allowed, " |
4208 | 0 | "increasing to 1.", ap_daemons_to_start); |
4209 | 0 | } else { |
4210 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00518) |
4211 | 0 | "StartServers of %d not allowed, increasing to 1", |
4212 | 0 | ap_daemons_to_start); |
4213 | 0 | } |
4214 | 0 | ap_daemons_to_start = 1; |
4215 | 0 | } |
4216 | |
|
4217 | 0 | if (min_spare_threads < 1) { |
4218 | 0 | if (startup) { |
4219 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00519) |
4220 | 0 | "WARNING: MinSpareThreads of %d not allowed, " |
4221 | 0 | "increasing to 1 to avoid almost certain server " |
4222 | 0 | "failure. Please read the documentation.", |
4223 | 0 | min_spare_threads); |
4224 | 0 | } else { |
4225 | 0 | ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00520) |
4226 | 0 | "MinSpareThreads of %d not allowed, increasing to 1", |
4227 | 0 | min_spare_threads); |
4228 | 0 | } |
4229 | 0 | min_spare_threads = 1; |
4230 | 0 | } |
4231 | | |
4232 | | /* max_spare_threads < min_spare_threads + threads_per_child |
4233 | | * checked in ap_mpm_run() |
4234 | | */ |
4235 | |
|
4236 | 0 | return OK; |
4237 | 0 | } |
4238 | | |
4239 | | static void event_hooks(apr_pool_t * p) |
4240 | 0 | { |
4241 | | /* Our open_logs hook function must run before the core's, or stderr |
4242 | | * will be redirected to a file, and the messages won't print to the |
4243 | | * console. |
4244 | | */ |
4245 | 0 | static const char *const aszSucc[] = { "core.c", NULL }; |
4246 | 0 | one_process = 0; |
4247 | 0 | ap_force_set_tz(p); |
4248 | |
|
4249 | 0 | ap_hook_open_logs(event_open_logs, NULL, aszSucc, APR_HOOK_REALLY_FIRST); |
4250 | | /* we need to set the MPM state before other pre-config hooks use MPM query |
4251 | | * to retrieve it, so register as REALLY_FIRST |
4252 | | */ |
4253 | 0 | ap_hook_pre_config(event_pre_config, NULL, NULL, APR_HOOK_REALLY_FIRST); |
4254 | 0 | ap_hook_post_config(event_post_config, NULL, NULL, APR_HOOK_MIDDLE); |
4255 | 0 | ap_hook_check_config(event_check_config, NULL, NULL, APR_HOOK_MIDDLE); |
4256 | 0 | ap_hook_mpm(event_run, NULL, NULL, APR_HOOK_MIDDLE); |
4257 | 0 | ap_hook_mpm_query(event_query, NULL, NULL, APR_HOOK_MIDDLE); |
4258 | 0 | ap_hook_mpm_register_timed_callback(event_register_timed_callback, NULL, NULL, |
4259 | 0 | APR_HOOK_MIDDLE); |
4260 | 0 | ap_hook_mpm_register_poll_callback(event_register_poll_callback, |
4261 | 0 | NULL, NULL, APR_HOOK_MIDDLE); |
4262 | 0 | ap_hook_mpm_register_poll_callback_timeout(event_register_poll_callback_ex, |
4263 | 0 | NULL, NULL, APR_HOOK_MIDDLE); |
4264 | 0 | ap_hook_pre_read_request(event_pre_read_request, NULL, NULL, APR_HOOK_MIDDLE); |
4265 | 0 | ap_hook_post_read_request(event_post_read_request, NULL, NULL, APR_HOOK_MIDDLE); |
4266 | 0 | ap_hook_mpm_get_name(event_get_name, NULL, NULL, APR_HOOK_MIDDLE); |
4267 | 0 | ap_hook_mpm_resume_suspended(event_resume_suspended, NULL, NULL, APR_HOOK_MIDDLE); |
4268 | |
|
4269 | 0 | ap_hook_pre_connection(event_pre_connection, NULL, NULL, APR_HOOK_REALLY_FIRST); |
4270 | 0 | ap_hook_protocol_switch(event_protocol_switch, NULL, NULL, APR_HOOK_REALLY_FIRST); |
4271 | 0 | } |
4272 | | |
4273 | | static const char *set_daemons_to_start(cmd_parms *cmd, void *dummy, |
4274 | | const char *arg) |
4275 | 0 | { |
4276 | 0 | const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY); |
4277 | 0 | if (err != NULL) { |
4278 | 0 | return err; |
4279 | 0 | } |
4280 | | |
4281 | 0 | ap_daemons_to_start = atoi(arg); |
4282 | 0 | return NULL; |
4283 | 0 | } |
4284 | | |
4285 | | static const char *set_min_spare_threads(cmd_parms * cmd, void *dummy, |
4286 | | const char *arg) |
4287 | 0 | { |
4288 | 0 | const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY); |
4289 | 0 | if (err != NULL) { |
4290 | 0 | return err; |
4291 | 0 | } |
4292 | | |
4293 | 0 | min_spare_threads = atoi(arg); |
4294 | 0 | return NULL; |
4295 | 0 | } |
4296 | | |
4297 | | static const char *set_max_spare_threads(cmd_parms * cmd, void *dummy, |
4298 | | const char *arg) |
4299 | 0 | { |
4300 | 0 | const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY); |
4301 | 0 | if (err != NULL) { |
4302 | 0 | return err; |
4303 | 0 | } |
4304 | | |
4305 | 0 | max_spare_threads = atoi(arg); |
4306 | 0 | return NULL; |
4307 | 0 | } |
4308 | | |
4309 | | static const char *set_max_workers(cmd_parms * cmd, void *dummy, |
4310 | | const char *arg) |
4311 | 0 | { |
4312 | 0 | const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY); |
4313 | 0 | if (err != NULL) { |
4314 | 0 | return err; |
4315 | 0 | } |
4316 | 0 | if (!strcasecmp(cmd->cmd->name, "MaxClients")) { |
4317 | 0 | ap_log_error(APLOG_MARK, APLOG_INFO, 0, NULL, APLOGNO(00521) |
4318 | 0 | "MaxClients is deprecated, use MaxRequestWorkers " |
4319 | 0 | "instead."); |
4320 | 0 | } |
4321 | 0 | max_workers = atoi(arg); |
4322 | 0 | return NULL; |
4323 | 0 | } |
4324 | | |
4325 | | static const char *set_threads_per_child(cmd_parms * cmd, void *dummy, |
4326 | | const char *arg) |
4327 | 0 | { |
4328 | 0 | const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY); |
4329 | 0 | if (err != NULL) { |
4330 | 0 | return err; |
4331 | 0 | } |
4332 | | |
4333 | 0 | threads_per_child = atoi(arg); |
4334 | 0 | return NULL; |
4335 | 0 | } |
4336 | | static const char *set_server_limit (cmd_parms *cmd, void *dummy, const char *arg) |
4337 | 0 | { |
4338 | 0 | const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY); |
4339 | 0 | if (err != NULL) { |
4340 | 0 | return err; |
4341 | 0 | } |
4342 | | |
4343 | 0 | server_limit = atoi(arg); |
4344 | 0 | return NULL; |
4345 | 0 | } |
4346 | | |
4347 | | static const char *set_thread_limit(cmd_parms * cmd, void *dummy, |
4348 | | const char *arg) |
4349 | 0 | { |
4350 | 0 | const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY); |
4351 | 0 | if (err != NULL) { |
4352 | 0 | return err; |
4353 | 0 | } |
4354 | | |
4355 | 0 | thread_limit = atoi(arg); |
4356 | 0 | return NULL; |
4357 | 0 | } |
4358 | | |
4359 | | static const char *set_worker_factor(cmd_parms * cmd, void *dummy, |
4360 | | const char *arg) |
4361 | 0 | { |
4362 | 0 | double val; |
4363 | 0 | char *endptr; |
4364 | 0 | const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY); |
4365 | 0 | if (err != NULL) { |
4366 | 0 | return err; |
4367 | 0 | } |
4368 | | |
4369 | 0 | val = strtod(arg, &endptr); |
4370 | 0 | if (*endptr) |
4371 | 0 | return "error parsing value"; |
4372 | | |
4373 | 0 | if (val <= 0) |
4374 | 0 | return "AsyncRequestWorkerFactor argument must be a positive number"; |
4375 | | |
4376 | 0 | worker_factor = val * WORKER_FACTOR_SCALE; |
4377 | 0 | if (worker_factor < WORKER_FACTOR_SCALE) { |
4378 | 0 | worker_factor = WORKER_FACTOR_SCALE; |
4379 | 0 | } |
4380 | 0 | return NULL; |
4381 | 0 | } |
4382 | | |
4383 | | |
4384 | | static const command_rec event_cmds[] = { |
4385 | | LISTEN_COMMANDS, |
4386 | | AP_INIT_TAKE1("StartServers", set_daemons_to_start, NULL, RSRC_CONF, |
4387 | | "Number of child processes launched at server startup"), |
4388 | | AP_INIT_TAKE1("ServerLimit", set_server_limit, NULL, RSRC_CONF, |
4389 | | "Maximum number of child processes for this run of Apache"), |
4390 | | AP_INIT_TAKE1("MinSpareThreads", set_min_spare_threads, NULL, RSRC_CONF, |
4391 | | "Minimum number of idle threads, to handle request spikes"), |
4392 | | AP_INIT_TAKE1("MaxSpareThreads", set_max_spare_threads, NULL, RSRC_CONF, |
4393 | | "Maximum number of idle threads"), |
4394 | | AP_INIT_TAKE1("MaxClients", set_max_workers, NULL, RSRC_CONF, |
4395 | | "Deprecated name of MaxRequestWorkers"), |
4396 | | AP_INIT_TAKE1("MaxRequestWorkers", set_max_workers, NULL, RSRC_CONF, |
4397 | | "Maximum number of threads alive at the same time"), |
4398 | | AP_INIT_TAKE1("ThreadsPerChild", set_threads_per_child, NULL, RSRC_CONF, |
4399 | | "Number of threads each child creates"), |
4400 | | AP_INIT_TAKE1("ThreadLimit", set_thread_limit, NULL, RSRC_CONF, |
4401 | | "Maximum number of worker threads per child process for this " |
4402 | | "run of Apache - Upper limit for ThreadsPerChild"), |
4403 | | AP_INIT_TAKE1("AsyncRequestWorkerFactor", set_worker_factor, NULL, RSRC_CONF, |
4404 | | "How many additional connects will be accepted per idle " |
4405 | | "worker thread"), |
4406 | | AP_GRACEFUL_SHUTDOWN_TIMEOUT_COMMAND, |
4407 | | {NULL} |
4408 | | }; |
4409 | | |
4410 | | AP_DECLARE_MODULE(mpm_event) = { |
4411 | | MPM20_MODULE_STUFF, |
4412 | | NULL, /* hook to run before apache parses args */ |
4413 | | NULL, /* create per-directory config structure */ |
4414 | | NULL, /* merge per-directory config structures */ |
4415 | | NULL, /* create per-server config structure */ |
4416 | | NULL, /* merge per-server config structures */ |
4417 | | event_cmds, /* command apr_table_t */ |
4418 | | event_hooks /* register_hooks */ |
4419 | | }; |