/src/ntp-dev/libntp/ntp_worker.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * ntp_worker.c |
3 | | */ |
4 | | #include <config.h> |
5 | | #include "ntp_workimpl.h" |
6 | | |
7 | | #ifdef WORKER |
8 | | |
9 | | #include <stdio.h> |
10 | | #include <ctype.h> |
11 | | #include <signal.h> |
12 | | |
13 | | #include "iosignal.h" |
14 | | #include "ntp_stdlib.h" |
15 | | #include "ntp_malloc.h" |
16 | | #include "ntp_syslog.h" |
17 | | #include "ntpd.h" |
18 | | #include "ntp_io.h" |
19 | | #include "ntp_assert.h" |
20 | | #include "ntp_unixtime.h" |
21 | | #include "intreswork.h" |
22 | | |
23 | | |
24 | 0 | #define CHILD_MAX_IDLE (3 * 60) /* seconds, idle worker limit */ |
25 | | |
26 | | blocking_child ** blocking_children; |
27 | | size_t blocking_children_alloc; |
28 | | int worker_per_query; /* boolean */ |
29 | | int intres_req_pending; |
30 | | volatile u_int blocking_child_ready_seen; |
31 | | volatile u_int blocking_child_ready_done; |
32 | | |
33 | | |
34 | | #ifndef HAVE_IO_COMPLETION_PORT |
35 | | /* |
36 | | * pipe_socketpair() |
37 | | * |
38 | | * Provides an AF_UNIX socketpair on systems which have them, otherwise |
39 | | * pair of unidirectional pipes. |
40 | | */ |
41 | | int |
42 | | pipe_socketpair( |
43 | | int caller_fds[2], |
44 | | int * is_pipe |
45 | | ) |
46 | 0 | { |
47 | 0 | int rc; |
48 | 0 | int fds[2]; |
49 | 0 | int called_pipe; |
50 | |
|
51 | 0 | #ifdef HAVE_SOCKETPAIR |
52 | 0 | rc = socketpair(AF_UNIX, SOCK_STREAM, 0, &fds[0]); |
53 | | #else |
54 | | rc = -1; |
55 | | #endif |
56 | |
|
57 | 0 | if (-1 == rc) { |
58 | 0 | rc = pipe(&fds[0]); |
59 | 0 | called_pipe = TRUE; |
60 | 0 | } else { |
61 | 0 | called_pipe = FALSE; |
62 | 0 | } |
63 | |
|
64 | 0 | if (-1 == rc) |
65 | 0 | return rc; |
66 | | |
67 | 0 | caller_fds[0] = fds[0]; |
68 | 0 | caller_fds[1] = fds[1]; |
69 | 0 | if (is_pipe != NULL) |
70 | 0 | *is_pipe = called_pipe; |
71 | |
|
72 | 0 | return 0; |
73 | 0 | } |
74 | | |
75 | | |
76 | | /* |
77 | | * close_all_except() |
78 | | * |
79 | | * Close all file descriptors except the given keep_fd. |
80 | | */ |
81 | | void |
82 | | close_all_except( |
83 | | int keep_fd |
84 | | ) |
85 | 0 | { |
86 | 0 | int fd; |
87 | |
|
88 | 0 | for (fd = 0; fd < keep_fd; fd++) |
89 | 0 | close(fd); |
90 | |
|
91 | 0 | close_all_beyond(keep_fd); |
92 | 0 | } |
93 | | |
94 | | |
95 | | /* |
96 | | * close_all_beyond() |
97 | | * |
98 | | * Close all file descriptors after the given keep_fd, which is the |
99 | | * highest fd to keep open. |
100 | | */ |
101 | | void |
102 | | close_all_beyond( |
103 | | int keep_fd |
104 | | ) |
105 | 0 | { |
106 | | # ifdef HAVE_CLOSEFROM |
107 | | closefrom(keep_fd + 1); |
108 | | # elif defined(F_CLOSEM) |
109 | | /* |
110 | | * From 'Writing Reliable AIX Daemons,' SG24-4946-00, |
111 | | * by Eric Agar (saves us from doing 32767 system |
112 | | * calls) |
113 | | */ |
114 | | if (fcntl(keep_fd + 1, F_CLOSEM, 0) == -1) |
115 | | msyslog(LOG_ERR, "F_CLOSEM(%d): %m", keep_fd + 1); |
116 | | # else /* !HAVE_CLOSEFROM && !F_CLOSEM follows */ |
117 | 0 | int fd; |
118 | 0 | int max_fd; |
119 | |
|
120 | 0 | max_fd = GETDTABLESIZE(); |
121 | 0 | for (fd = keep_fd + 1; fd < max_fd; fd++) |
122 | 0 | close(fd); |
123 | 0 | # endif /* !HAVE_CLOSEFROM && !F_CLOSEM */ |
124 | 0 | } |
125 | | #endif /* HAVE_IO_COMPLETION_PORT */ |
126 | | |
127 | | |
128 | | u_int |
129 | | available_blocking_child_slot(void) |
130 | 0 | { |
131 | 0 | const size_t each = sizeof(blocking_children[0]); |
132 | 0 | u_int slot; |
133 | 0 | size_t prev_alloc; |
134 | 0 | size_t new_alloc; |
135 | 0 | size_t prev_octets; |
136 | 0 | size_t octets; |
137 | |
|
138 | 0 | for (slot = 0; slot < blocking_children_alloc; slot++) { |
139 | 0 | if (NULL == blocking_children[slot]) |
140 | 0 | return slot; |
141 | 0 | if (blocking_children[slot]->reusable) { |
142 | 0 | blocking_children[slot]->reusable = FALSE; |
143 | 0 | return slot; |
144 | 0 | } |
145 | 0 | } |
146 | | |
147 | 0 | prev_alloc = blocking_children_alloc; |
148 | 0 | prev_octets = prev_alloc * each; |
149 | 0 | new_alloc = blocking_children_alloc + 4; |
150 | 0 | octets = new_alloc * each; |
151 | 0 | blocking_children = erealloc_zero(blocking_children, octets, |
152 | 0 | prev_octets); |
153 | 0 | blocking_children_alloc = new_alloc; |
154 | | |
155 | | /* assume we'll never have enough workers to overflow u_int */ |
156 | 0 | return (u_int)prev_alloc; |
157 | 0 | } |
158 | | |
159 | | |
160 | | int |
161 | | queue_blocking_request( |
162 | | blocking_work_req rtype, |
163 | | void * req, |
164 | | size_t reqsize, |
165 | | blocking_work_callback done_func, |
166 | | void * context |
167 | | ) |
168 | 0 | { |
169 | 0 | static u_int intres_slot = UINT_MAX; |
170 | 0 | u_int child_slot; |
171 | 0 | blocking_child * c; |
172 | 0 | blocking_pipe_header req_hdr; |
173 | |
|
174 | 0 | req_hdr.octets = sizeof(req_hdr) + reqsize; |
175 | 0 | req_hdr.magic_sig = BLOCKING_REQ_MAGIC; |
176 | 0 | req_hdr.rtype = rtype; |
177 | 0 | req_hdr.done_func = done_func; |
178 | 0 | req_hdr.context = context; |
179 | |
|
180 | 0 | child_slot = UINT_MAX; |
181 | 0 | if (worker_per_query || UINT_MAX == intres_slot || |
182 | 0 | blocking_children[intres_slot]->reusable) |
183 | 0 | child_slot = available_blocking_child_slot(); |
184 | 0 | if (!worker_per_query) { |
185 | 0 | if (UINT_MAX == intres_slot) |
186 | 0 | intres_slot = child_slot; |
187 | 0 | else |
188 | 0 | child_slot = intres_slot; |
189 | 0 | if (0 == intres_req_pending) |
190 | 0 | intres_timeout_req(0); |
191 | 0 | } |
192 | 0 | intres_req_pending++; |
193 | 0 | INSIST(UINT_MAX != child_slot); |
194 | 0 | c = blocking_children[child_slot]; |
195 | 0 | if (NULL == c) { |
196 | 0 | c = emalloc_zero(sizeof(*c)); |
197 | | #ifdef WORK_FORK |
198 | | c->req_read_pipe = -1; |
199 | | c->req_write_pipe = -1; |
200 | | #endif |
201 | 0 | #ifdef WORK_PIPE |
202 | 0 | c->resp_read_pipe = -1; |
203 | 0 | c->resp_write_pipe = -1; |
204 | 0 | #endif |
205 | 0 | blocking_children[child_slot] = c; |
206 | 0 | } |
207 | 0 | req_hdr.child_idx = child_slot; |
208 | |
|
209 | 0 | return send_blocking_req_internal(c, &req_hdr, req); |
210 | 0 | } |
211 | | |
212 | | |
213 | | int queue_blocking_response( |
214 | | blocking_child * c, |
215 | | blocking_pipe_header * resp, |
216 | | size_t respsize, |
217 | | const blocking_pipe_header * req |
218 | | ) |
219 | 0 | { |
220 | 0 | resp->octets = respsize; |
221 | 0 | resp->magic_sig = BLOCKING_RESP_MAGIC; |
222 | 0 | resp->rtype = req->rtype; |
223 | 0 | resp->context = req->context; |
224 | 0 | resp->done_func = req->done_func; |
225 | |
|
226 | 0 | return send_blocking_resp_internal(c, resp); |
227 | 0 | } |
228 | | |
229 | | |
230 | | void |
231 | | process_blocking_resp( |
232 | | blocking_child * c |
233 | | ) |
234 | 0 | { |
235 | 0 | blocking_pipe_header * resp; |
236 | 0 | void * data; |
237 | | |
238 | | /* |
239 | | * On Windows send_blocking_resp_internal() may signal the |
240 | | * blocking_response_ready event multiple times while we're |
241 | | * processing a response, so always consume all available |
242 | | * responses before returning to test the event again. |
243 | | */ |
244 | 0 | #ifdef WORK_THREAD |
245 | 0 | do { |
246 | 0 | #endif |
247 | 0 | resp = receive_blocking_resp_internal(c); |
248 | 0 | if (NULL != resp) { |
249 | 0 | DEBUG_REQUIRE(BLOCKING_RESP_MAGIC == |
250 | 0 | resp->magic_sig); |
251 | 0 | data = (char *)resp + sizeof(*resp); |
252 | 0 | intres_req_pending--; |
253 | 0 | (*resp->done_func)(resp->rtype, resp->context, |
254 | 0 | resp->octets - sizeof(*resp), |
255 | 0 | data); |
256 | 0 | free(resp); |
257 | 0 | } |
258 | 0 | #ifdef WORK_THREAD |
259 | 0 | } while (NULL != resp); |
260 | 0 | #endif |
261 | 0 | if (!worker_per_query && 0 == intres_req_pending) |
262 | 0 | intres_timeout_req(CHILD_MAX_IDLE); |
263 | 0 | else if (worker_per_query) |
264 | 0 | req_child_exit(c); |
265 | 0 | } |
266 | | |
267 | | void |
268 | | harvest_blocking_responses(void) |
269 | 0 | { |
270 | 0 | size_t idx; |
271 | 0 | blocking_child* cp; |
272 | 0 | u_int scseen, scdone; |
273 | |
|
274 | 0 | scseen = blocking_child_ready_seen; |
275 | 0 | scdone = blocking_child_ready_done; |
276 | 0 | if (scdone != scseen) { |
277 | 0 | blocking_child_ready_done = scseen; |
278 | 0 | for (idx = 0; idx < blocking_children_alloc; idx++) { |
279 | 0 | cp = blocking_children[idx]; |
280 | 0 | if (NULL == cp) |
281 | 0 | continue; |
282 | 0 | scseen = cp->resp_ready_seen; |
283 | 0 | scdone = cp->resp_ready_done; |
284 | 0 | if (scdone != scseen) { |
285 | 0 | cp->resp_ready_done = scseen; |
286 | 0 | process_blocking_resp(cp); |
287 | 0 | } |
288 | 0 | } |
289 | 0 | } |
290 | 0 | } |
291 | | |
292 | | |
293 | | /* |
294 | | * blocking_child_common runs as a forked child or a thread |
295 | | */ |
296 | | int |
297 | | blocking_child_common( |
298 | | blocking_child *c |
299 | | ) |
300 | 0 | { |
301 | 0 | int say_bye; |
302 | 0 | blocking_pipe_header *req; |
303 | |
|
304 | 0 | say_bye = FALSE; |
305 | 0 | while (!say_bye) { |
306 | 0 | req = receive_blocking_req_internal(c); |
307 | 0 | if (NULL == req) { |
308 | 0 | say_bye = TRUE; |
309 | 0 | continue; |
310 | 0 | } |
311 | | |
312 | 0 | DEBUG_REQUIRE(BLOCKING_REQ_MAGIC == req->magic_sig); |
313 | | |
314 | 0 | switch (req->rtype) { |
315 | 0 | case BLOCKING_GETADDRINFO: |
316 | 0 | if (blocking_getaddrinfo(c, req)) |
317 | 0 | say_bye = TRUE; |
318 | 0 | break; |
319 | | |
320 | 0 | case BLOCKING_GETNAMEINFO: |
321 | 0 | if (blocking_getnameinfo(c, req)) |
322 | 0 | say_bye = TRUE; |
323 | 0 | break; |
324 | | |
325 | 0 | default: |
326 | 0 | msyslog(LOG_ERR, "unknown req %d to blocking worker", req->rtype); |
327 | 0 | say_bye = TRUE; |
328 | 0 | } |
329 | | |
330 | 0 | free(req); |
331 | 0 | } |
332 | | |
333 | 0 | return 0; |
334 | 0 | } |
335 | | |
336 | | |
337 | | /* |
338 | | * worker_idle_timer_fired() |
339 | | * |
340 | | * The parent starts this timer when the last pending response has been |
341 | | * received from the child, making it idle, and clears the timer when a |
342 | | * request is dispatched to the child. Once the timer expires, the |
343 | | * child is sent packing. |
344 | | * |
345 | | * This is called when worker_idle_timer is nonzero and less than or |
346 | | * equal to current_time. |
347 | | */ |
348 | | void |
349 | | worker_idle_timer_fired(void) |
350 | 0 | { |
351 | 0 | u_int idx; |
352 | 0 | blocking_child * c; |
353 | |
|
354 | 0 | DEBUG_REQUIRE(0 == intres_req_pending); |
355 | | |
356 | 0 | intres_timeout_req(0); |
357 | 0 | for (idx = 0; idx < blocking_children_alloc; idx++) { |
358 | 0 | c = blocking_children[idx]; |
359 | 0 | if (NULL == c) |
360 | 0 | continue; |
361 | 0 | req_child_exit(c); |
362 | 0 | } |
363 | 0 | } |
364 | | |
365 | | |
366 | | #else /* !WORKER follows */ |
367 | | int ntp_worker_nonempty_compilation_unit; |
368 | | #endif |