/src/httpd/srclib/apr/locks/unix/proc_mutex.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Licensed to the Apache Software Foundation (ASF) under one or more |
2 | | * contributor license agreements. See the NOTICE file distributed with |
3 | | * this work for additional information regarding copyright ownership. |
4 | | * The ASF licenses this file to You under the Apache License, Version 2.0 |
5 | | * (the "License"); you may not use this file except in compliance with |
6 | | * the License. You may obtain a copy of the License at |
7 | | * |
8 | | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | | * |
10 | | * Unless required by applicable law or agreed to in writing, software |
11 | | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | | * See the License for the specific language governing permissions and |
14 | | * limitations under the License. |
15 | | */ |
16 | | |
17 | | #include "apr.h" |
18 | | #include "apr_strings.h" |
19 | | #include "apr_arch_proc_mutex.h" |
20 | | #include "apr_arch_file_io.h" /* for apr_mkstemp() */ |
21 | | #include "apr_md5.h" /* for apr_md5() */ |
22 | | #include "apr_atomic.h" |
23 | | |
24 | | APR_DECLARE(apr_status_t) apr_proc_mutex_destroy(apr_proc_mutex_t *mutex) |
25 | 0 | { |
26 | 0 | apr_status_t rv = apr_proc_mutex_cleanup(mutex); |
27 | 0 | if (rv == APR_SUCCESS) { |
28 | 0 | apr_pool_cleanup_kill(mutex->pool, mutex, apr_proc_mutex_cleanup); |
29 | 0 | } |
30 | 0 | return rv; |
31 | 0 | } |
32 | | |
33 | | #if APR_HAS_POSIXSEM_SERIALIZE || APR_HAS_FCNTL_SERIALIZE || \ |
34 | | APR_HAS_SYSVSEM_SERIALIZE |
35 | | static apr_status_t proc_mutex_no_child_init(apr_proc_mutex_t **mutex, |
36 | | apr_pool_t *cont, |
37 | | const char *fname) |
38 | 0 | { |
39 | 0 | return APR_SUCCESS; |
40 | 0 | } |
41 | | #endif |
42 | | |
43 | | #if APR_HAS_POSIXSEM_SERIALIZE || APR_HAS_PROC_PTHREAD_SERIALIZE |
44 | | static apr_status_t proc_mutex_no_perms_set(apr_proc_mutex_t *mutex, |
45 | | apr_fileperms_t perms, |
46 | | apr_uid_t uid, |
47 | | apr_gid_t gid) |
48 | 0 | { |
49 | 0 | return APR_ENOTIMPL; |
50 | 0 | } |
51 | | #endif |
52 | | |
53 | | #if APR_HAS_FCNTL_SERIALIZE \ |
54 | | || APR_HAS_FLOCK_SERIALIZE \ |
55 | | || (APR_HAS_SYSVSEM_SERIALIZE \ |
56 | | && !defined(HAVE_SEMTIMEDOP)) \ |
57 | | || (APR_HAS_POSIXSEM_SERIALIZE \ |
58 | | && !defined(HAVE_SEM_TIMEDWAIT)) \ |
59 | | || (APR_HAS_PROC_PTHREAD_SERIALIZE \ |
60 | | && !defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK) \ |
61 | | && !defined(HAVE_PTHREAD_CONDATTR_SETPSHARED)) |
62 | | static apr_status_t proc_mutex_spinsleep_timedacquire(apr_proc_mutex_t *mutex, |
63 | | apr_interval_time_t timeout) |
64 | 0 | { |
65 | 0 | #define SLEEP_TIME apr_time_from_msec(10) |
66 | 0 | apr_status_t rv; |
67 | 0 | for (;;) { |
68 | 0 | rv = apr_proc_mutex_trylock(mutex); |
69 | 0 | if (!APR_STATUS_IS_EBUSY(rv)) { |
70 | 0 | if (rv == APR_SUCCESS) { |
71 | 0 | mutex->curr_locked = 1; |
72 | 0 | } |
73 | 0 | break; |
74 | 0 | } |
75 | 0 | if (timeout <= 0) { |
76 | 0 | rv = APR_TIMEUP; |
77 | 0 | break; |
78 | 0 | } |
79 | 0 | if (timeout > SLEEP_TIME) { |
80 | 0 | apr_sleep(SLEEP_TIME); |
81 | 0 | timeout -= SLEEP_TIME; |
82 | 0 | } |
83 | 0 | else { |
84 | 0 | apr_sleep(timeout); |
85 | 0 | timeout = 0; |
86 | 0 | } |
87 | 0 | } |
88 | 0 | return rv; |
89 | 0 | } |
90 | | #endif |
91 | | |
92 | | #if APR_HAS_POSIXSEM_SERIALIZE |
93 | | |
94 | | #ifndef SEM_FAILED |
95 | | #define SEM_FAILED (-1) |
96 | | #endif |
97 | | |
98 | | static apr_status_t proc_mutex_posix_cleanup(void *mutex_) |
99 | 0 | { |
100 | 0 | apr_proc_mutex_t *mutex = mutex_; |
101 | |
|
102 | 0 | if (sem_close(mutex->os.psem_interproc) < 0) { |
103 | 0 | return errno; |
104 | 0 | } |
105 | | |
106 | 0 | return APR_SUCCESS; |
107 | 0 | } |
108 | | |
109 | | static apr_status_t proc_mutex_posix_create(apr_proc_mutex_t *new_mutex, |
110 | | const char *fname) |
111 | 0 | { |
112 | 0 | #define APR_POSIXSEM_NAME_MAX 30 |
113 | 0 | #define APR_POSIXSEM_NAME_MIN 13 |
114 | 0 | sem_t *psem; |
115 | 0 | char semname[APR_MD5_DIGESTSIZE * 2 + 2]; |
116 | | |
117 | | /* |
118 | | * This bogusness is to follow what appears to be the |
119 | | * lowest common denominator in Posix semaphore naming: |
120 | | * - start with '/' |
121 | | * - be at most 14 chars |
122 | | * - be unique and not match anything on the filesystem |
123 | | * |
124 | | * Because of this, we use fname to generate an md5 hex checksum |
125 | | * and use that as the name of the semaphore. If no filename was |
126 | | * given, we create one based on the time. We tuck the name |
127 | | * away, since it might be useful for debugging. |
128 | | * |
129 | | * To make this as robust as possible, we initially try something |
130 | | * larger (and hopefully more unique) and gracefully fail down to the |
131 | | * LCD above. |
132 | | * |
133 | | * NOTE: Darwin (Mac OS X) seems to be the most restrictive |
134 | | * implementation. Versions previous to Darwin 6.2 had the 14 |
135 | | * char limit, but later rev's allow up to 31 characters. |
136 | | * |
137 | | */ |
138 | 0 | if (fname) { |
139 | 0 | unsigned char digest[APR_MD5_DIGESTSIZE]; /* note dependency on semname here */ |
140 | 0 | const char *hex = "0123456789abcdef"; |
141 | 0 | char *p = semname; |
142 | 0 | int i; |
143 | 0 | apr_md5(digest, fname, strlen(fname)); |
144 | 0 | *p++ = '/'; /* must start with /, right? */ |
145 | 0 | for (i = 0; i < sizeof(digest); i++) { |
146 | 0 | *p++ = hex[digest[i] >> 4]; |
147 | 0 | *p++ = hex[digest[i] & 0xF]; |
148 | 0 | } |
149 | 0 | semname[APR_POSIXSEM_NAME_MAX] = '\0'; |
150 | 0 | } else { |
151 | 0 | apr_time_t now; |
152 | 0 | unsigned long sec; |
153 | 0 | unsigned long usec; |
154 | 0 | now = apr_time_now(); |
155 | 0 | sec = apr_time_sec(now); |
156 | 0 | usec = apr_time_usec(now); |
157 | 0 | apr_snprintf(semname, sizeof(semname), "/ApR.%lxZ%lx", sec, usec); |
158 | 0 | } |
159 | 0 | do { |
160 | 0 | psem = sem_open(semname, O_CREAT | O_EXCL, 0644, 1); |
161 | 0 | } while (psem == (sem_t *)SEM_FAILED && errno == EINTR); |
162 | 0 | if (psem == (sem_t *)SEM_FAILED) { |
163 | 0 | if (errno == ENAMETOOLONG) { |
164 | | /* Oh well, good try */ |
165 | 0 | semname[APR_POSIXSEM_NAME_MIN] = '\0'; |
166 | 0 | } else { |
167 | 0 | return errno; |
168 | 0 | } |
169 | 0 | do { |
170 | 0 | psem = sem_open(semname, O_CREAT | O_EXCL, 0644, 1); |
171 | 0 | } while (psem == (sem_t *)SEM_FAILED && errno == EINTR); |
172 | 0 | } |
173 | | |
174 | 0 | if (psem == (sem_t *)SEM_FAILED) { |
175 | 0 | return errno; |
176 | 0 | } |
177 | | /* Ahhh. The joys of Posix sems. Predelete it... */ |
178 | 0 | sem_unlink(semname); |
179 | 0 | new_mutex->os.psem_interproc = psem; |
180 | 0 | new_mutex->fname = apr_pstrdup(new_mutex->pool, semname); |
181 | 0 | apr_pool_cleanup_register(new_mutex->pool, (void *)new_mutex, |
182 | 0 | apr_proc_mutex_cleanup, |
183 | 0 | apr_pool_cleanup_null); |
184 | 0 | return APR_SUCCESS; |
185 | 0 | } |
186 | | |
187 | | static apr_status_t proc_mutex_posix_acquire(apr_proc_mutex_t *mutex) |
188 | 0 | { |
189 | 0 | int rc; |
190 | |
|
191 | 0 | do { |
192 | 0 | rc = sem_wait(mutex->os.psem_interproc); |
193 | 0 | } while (rc < 0 && errno == EINTR); |
194 | 0 | if (rc < 0) { |
195 | 0 | return errno; |
196 | 0 | } |
197 | 0 | mutex->curr_locked = 1; |
198 | 0 | return APR_SUCCESS; |
199 | 0 | } |
200 | | |
201 | | static apr_status_t proc_mutex_posix_tryacquire(apr_proc_mutex_t *mutex) |
202 | 0 | { |
203 | 0 | int rc; |
204 | |
|
205 | 0 | do { |
206 | 0 | rc = sem_trywait(mutex->os.psem_interproc); |
207 | 0 | } while (rc < 0 && errno == EINTR); |
208 | 0 | if (rc < 0) { |
209 | 0 | if (errno == EAGAIN) { |
210 | 0 | return APR_EBUSY; |
211 | 0 | } |
212 | 0 | return errno; |
213 | 0 | } |
214 | 0 | mutex->curr_locked = 1; |
215 | 0 | return APR_SUCCESS; |
216 | 0 | } |
217 | | |
218 | | #if defined(HAVE_SEM_TIMEDWAIT) |
219 | | static apr_status_t proc_mutex_posix_timedacquire(apr_proc_mutex_t *mutex, |
220 | | apr_interval_time_t timeout) |
221 | 0 | { |
222 | 0 | if (timeout <= 0) { |
223 | 0 | apr_status_t rv = proc_mutex_posix_tryacquire(mutex); |
224 | 0 | return (rv == APR_EBUSY) ? APR_TIMEUP : rv; |
225 | 0 | } |
226 | 0 | else { |
227 | 0 | int rc; |
228 | 0 | struct timespec abstime; |
229 | |
|
230 | 0 | timeout += apr_time_now(); |
231 | 0 | abstime.tv_sec = apr_time_sec(timeout); |
232 | 0 | abstime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */ |
233 | |
|
234 | 0 | do { |
235 | 0 | rc = sem_timedwait(mutex->os.psem_interproc, &abstime); |
236 | 0 | } while (rc < 0 && errno == EINTR); |
237 | 0 | if (rc < 0) { |
238 | 0 | if (errno == ETIMEDOUT) { |
239 | 0 | return APR_TIMEUP; |
240 | 0 | } |
241 | 0 | return errno; |
242 | 0 | } |
243 | 0 | } |
244 | 0 | mutex->curr_locked = 1; |
245 | 0 | return APR_SUCCESS; |
246 | 0 | } |
247 | | #endif |
248 | | |
249 | | static apr_status_t proc_mutex_posix_release(apr_proc_mutex_t *mutex) |
250 | 0 | { |
251 | 0 | mutex->curr_locked = 0; |
252 | 0 | if (sem_post(mutex->os.psem_interproc) < 0) { |
253 | | /* any failure is probably fatal, so no big deal to leave |
254 | | * ->curr_locked at 0. */ |
255 | 0 | return errno; |
256 | 0 | } |
257 | 0 | return APR_SUCCESS; |
258 | 0 | } |
259 | | |
260 | | static const apr_proc_mutex_unix_lock_methods_t mutex_posixsem_methods = |
261 | | { |
262 | | #if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS || defined(POSIXSEM_IS_GLOBAL) |
263 | | APR_PROCESS_LOCK_MECH_IS_GLOBAL, |
264 | | #else |
265 | | 0, |
266 | | #endif |
267 | | proc_mutex_posix_create, |
268 | | proc_mutex_posix_acquire, |
269 | | proc_mutex_posix_tryacquire, |
270 | | #if defined(HAVE_SEM_TIMEDWAIT) |
271 | | proc_mutex_posix_timedacquire, |
272 | | #else |
273 | | proc_mutex_spinsleep_timedacquire, |
274 | | #endif |
275 | | proc_mutex_posix_release, |
276 | | proc_mutex_posix_cleanup, |
277 | | proc_mutex_no_child_init, |
278 | | proc_mutex_no_perms_set, |
279 | | APR_LOCK_POSIXSEM, |
280 | | "posixsem" |
281 | | }; |
282 | | |
283 | | #endif /* Posix sem implementation */ |
284 | | |
285 | | #if APR_HAS_SYSVSEM_SERIALIZE |
286 | | |
287 | | static struct sembuf proc_mutex_op_on; |
288 | | static struct sembuf proc_mutex_op_try; |
289 | | static struct sembuf proc_mutex_op_off; |
290 | | |
291 | | static void proc_mutex_sysv_setup(void) |
292 | 0 | { |
293 | 0 | proc_mutex_op_on.sem_num = 0; |
294 | 0 | proc_mutex_op_on.sem_op = -1; |
295 | 0 | proc_mutex_op_on.sem_flg = SEM_UNDO; |
296 | 0 | proc_mutex_op_try.sem_num = 0; |
297 | 0 | proc_mutex_op_try.sem_op = -1; |
298 | 0 | proc_mutex_op_try.sem_flg = SEM_UNDO | IPC_NOWAIT; |
299 | 0 | proc_mutex_op_off.sem_num = 0; |
300 | 0 | proc_mutex_op_off.sem_op = 1; |
301 | 0 | proc_mutex_op_off.sem_flg = SEM_UNDO; |
302 | 0 | } |
303 | | |
304 | | static apr_status_t proc_mutex_sysv_cleanup(void *mutex_) |
305 | 0 | { |
306 | 0 | apr_proc_mutex_t *mutex=mutex_; |
307 | 0 | union semun ick; |
308 | |
|
309 | 0 | if (mutex->os.crossproc != -1) { |
310 | 0 | ick.val = 0; |
311 | 0 | semctl(mutex->os.crossproc, 0, IPC_RMID, ick); |
312 | 0 | } |
313 | 0 | return APR_SUCCESS; |
314 | 0 | } |
315 | | |
316 | | static apr_status_t proc_mutex_sysv_create(apr_proc_mutex_t *new_mutex, |
317 | | const char *fname) |
318 | 0 | { |
319 | 0 | union semun ick; |
320 | 0 | apr_status_t rv; |
321 | |
|
322 | 0 | new_mutex->os.crossproc = semget(IPC_PRIVATE, 1, IPC_CREAT | 0600); |
323 | 0 | if (new_mutex->os.crossproc == -1) { |
324 | 0 | rv = errno; |
325 | 0 | proc_mutex_sysv_cleanup(new_mutex); |
326 | 0 | return rv; |
327 | 0 | } |
328 | 0 | ick.val = 1; |
329 | 0 | if (semctl(new_mutex->os.crossproc, 0, SETVAL, ick) < 0) { |
330 | 0 | rv = errno; |
331 | 0 | proc_mutex_sysv_cleanup(new_mutex); |
332 | 0 | new_mutex->os.crossproc = -1; |
333 | 0 | return rv; |
334 | 0 | } |
335 | 0 | new_mutex->curr_locked = 0; |
336 | 0 | apr_pool_cleanup_register(new_mutex->pool, |
337 | 0 | (void *)new_mutex, apr_proc_mutex_cleanup, |
338 | 0 | apr_pool_cleanup_null); |
339 | 0 | return APR_SUCCESS; |
340 | 0 | } |
341 | | |
342 | | static apr_status_t proc_mutex_sysv_acquire(apr_proc_mutex_t *mutex) |
343 | 0 | { |
344 | 0 | int rc; |
345 | |
|
346 | 0 | do { |
347 | 0 | rc = semop(mutex->os.crossproc, &proc_mutex_op_on, 1); |
348 | 0 | } while (rc < 0 && errno == EINTR); |
349 | 0 | if (rc < 0) { |
350 | 0 | return errno; |
351 | 0 | } |
352 | 0 | mutex->curr_locked = 1; |
353 | 0 | return APR_SUCCESS; |
354 | 0 | } |
355 | | |
356 | | static apr_status_t proc_mutex_sysv_tryacquire(apr_proc_mutex_t *mutex) |
357 | 0 | { |
358 | 0 | int rc; |
359 | |
|
360 | 0 | do { |
361 | 0 | rc = semop(mutex->os.crossproc, &proc_mutex_op_try, 1); |
362 | 0 | } while (rc < 0 && errno == EINTR); |
363 | 0 | if (rc < 0) { |
364 | 0 | if (errno == EAGAIN) { |
365 | 0 | return APR_EBUSY; |
366 | 0 | } |
367 | 0 | return errno; |
368 | 0 | } |
369 | 0 | mutex->curr_locked = 1; |
370 | 0 | return APR_SUCCESS; |
371 | 0 | } |
372 | | |
373 | | #if defined(HAVE_SEMTIMEDOP) |
374 | | static apr_status_t proc_mutex_sysv_timedacquire(apr_proc_mutex_t *mutex, |
375 | | apr_interval_time_t timeout) |
376 | 0 | { |
377 | 0 | if (timeout <= 0) { |
378 | 0 | apr_status_t rv = proc_mutex_sysv_tryacquire(mutex); |
379 | 0 | return (rv == APR_EBUSY) ? APR_TIMEUP : rv; |
380 | 0 | } |
381 | 0 | else { |
382 | 0 | int rc; |
383 | 0 | struct timespec reltime; |
384 | |
|
385 | 0 | reltime.tv_sec = apr_time_sec(timeout); |
386 | 0 | reltime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */ |
387 | |
|
388 | 0 | do { |
389 | 0 | rc = semtimedop(mutex->os.crossproc, &proc_mutex_op_on, 1, |
390 | 0 | &reltime); |
391 | 0 | } while (rc < 0 && errno == EINTR); |
392 | 0 | if (rc < 0) { |
393 | 0 | if (errno == EAGAIN || errno == ETIMEDOUT) { |
394 | 0 | return APR_TIMEUP; |
395 | 0 | } |
396 | 0 | return errno; |
397 | 0 | } |
398 | 0 | } |
399 | 0 | mutex->curr_locked = 1; |
400 | 0 | return APR_SUCCESS; |
401 | 0 | } |
402 | | #endif |
403 | | |
404 | | static apr_status_t proc_mutex_sysv_release(apr_proc_mutex_t *mutex) |
405 | 0 | { |
406 | 0 | int rc; |
407 | |
|
408 | 0 | mutex->curr_locked = 0; |
409 | 0 | do { |
410 | 0 | rc = semop(mutex->os.crossproc, &proc_mutex_op_off, 1); |
411 | 0 | } while (rc < 0 && errno == EINTR); |
412 | 0 | if (rc < 0) { |
413 | 0 | return errno; |
414 | 0 | } |
415 | 0 | return APR_SUCCESS; |
416 | 0 | } |
417 | | |
418 | | static apr_status_t proc_mutex_sysv_perms_set(apr_proc_mutex_t *mutex, |
419 | | apr_fileperms_t perms, |
420 | | apr_uid_t uid, |
421 | | apr_gid_t gid) |
422 | 0 | { |
423 | |
|
424 | 0 | union semun ick; |
425 | 0 | struct semid_ds buf; |
426 | 0 | buf.sem_perm.uid = uid; |
427 | 0 | buf.sem_perm.gid = gid; |
428 | 0 | buf.sem_perm.mode = apr_unix_perms2mode(perms); |
429 | 0 | ick.buf = &buf; |
430 | 0 | if (semctl(mutex->os.crossproc, 0, IPC_SET, ick) < 0) { |
431 | 0 | return errno; |
432 | 0 | } |
433 | 0 | return APR_SUCCESS; |
434 | 0 | } |
435 | | |
436 | | static const apr_proc_mutex_unix_lock_methods_t mutex_sysv_methods = |
437 | | { |
438 | | #if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS || defined(SYSVSEM_IS_GLOBAL) |
439 | | APR_PROCESS_LOCK_MECH_IS_GLOBAL, |
440 | | #else |
441 | | 0, |
442 | | #endif |
443 | | proc_mutex_sysv_create, |
444 | | proc_mutex_sysv_acquire, |
445 | | proc_mutex_sysv_tryacquire, |
446 | | #if defined(HAVE_SEMTIMEDOP) |
447 | | proc_mutex_sysv_timedacquire, |
448 | | #else |
449 | | proc_mutex_spinsleep_timedacquire, |
450 | | #endif |
451 | | proc_mutex_sysv_release, |
452 | | proc_mutex_sysv_cleanup, |
453 | | proc_mutex_no_child_init, |
454 | | proc_mutex_sysv_perms_set, |
455 | | APR_LOCK_SYSVSEM, |
456 | | "sysvsem" |
457 | | }; |
458 | | |
459 | | #endif /* SysV sem implementation */ |
460 | | |
461 | | #if APR_HAS_PROC_PTHREAD_SERIALIZE |
462 | | |
463 | | #ifndef APR_USE_PROC_PTHREAD_MUTEX_COND |
464 | | #if defined(HAVE_PTHREAD_CONDATTR_SETPSHARED) \ |
465 | | && !defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK) |
466 | | #define APR_USE_PROC_PTHREAD_MUTEX_COND 1 |
467 | | #else |
468 | | #define APR_USE_PROC_PTHREAD_MUTEX_COND 0 |
469 | | #endif |
470 | | #endif |
471 | | |
472 | | /* The mmap()ed pthread_interproc is the native pthread_mutex_t followed |
473 | | * by a refcounter to track children using it. We want to avoid calling |
474 | | * pthread_mutex_destroy() on the shared mutex area while it is in use by |
475 | | * another process, because this may mark the shared pthread_mutex_t as |
476 | | * invalid for everyone, including forked children (unlike "sysvsem" for |
477 | | * example), causing unexpected errors or deadlocks (PR 49504). So the |
478 | | * last process (parent or child) referencing the mutex will effectively |
479 | | * destroy it. |
480 | | */ |
481 | | typedef struct { |
482 | | #define proc_pthread_cast(m) \ |
483 | 0 | ((proc_pthread_mutex_t *)(m)->os.pthread_interproc) |
484 | | pthread_mutex_t mutex; |
485 | | #define proc_pthread_mutex(m) \ |
486 | 0 | (proc_pthread_cast(m)->mutex) |
487 | | #if APR_USE_PROC_PTHREAD_MUTEX_COND |
488 | | pthread_cond_t cond; |
489 | | #define proc_pthread_mutex_cond(m) \ |
490 | | (proc_pthread_cast(m)->cond) |
491 | | apr_int32_t cond_locked; |
492 | | #define proc_pthread_mutex_cond_locked(m) \ |
493 | | (proc_pthread_cast(m)->cond_locked) |
494 | | apr_uint32_t cond_num_waiters; |
495 | | #define proc_pthread_mutex_cond_num_waiters(m) \ |
496 | | (proc_pthread_cast(m)->cond_num_waiters) |
497 | | #define proc_pthread_mutex_is_cond(m) \ |
498 | | ((m)->pthread_refcounting && proc_pthread_mutex_cond_locked(m) != -1) |
499 | | #endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */ |
500 | | apr_uint32_t refcount; |
501 | | #define proc_pthread_mutex_refcount(m) \ |
502 | 0 | (proc_pthread_cast(m)->refcount) |
503 | | } proc_pthread_mutex_t; |
504 | | |
505 | | |
506 | | static APR_INLINE int proc_pthread_mutex_inc(apr_proc_mutex_t *mutex) |
507 | 0 | { |
508 | 0 | if (mutex->pthread_refcounting) { |
509 | 0 | apr_atomic_inc32(&proc_pthread_mutex_refcount(mutex)); |
510 | 0 | return 1; |
511 | 0 | } |
512 | 0 | return 0; |
513 | 0 | } |
514 | | |
515 | | static APR_INLINE int proc_pthread_mutex_dec(apr_proc_mutex_t *mutex) |
516 | 0 | { |
517 | 0 | if (mutex->pthread_refcounting) { |
518 | 0 | return apr_atomic_dec32(&proc_pthread_mutex_refcount(mutex)); |
519 | 0 | } |
520 | 0 | return 0; |
521 | 0 | } |
522 | | |
523 | | static apr_status_t proc_pthread_mutex_unref(void *mutex_) |
524 | 0 | { |
525 | 0 | apr_proc_mutex_t *mutex=mutex_; |
526 | 0 | apr_status_t rv; |
527 | |
|
528 | | #if APR_USE_PROC_PTHREAD_MUTEX_COND |
529 | | if (proc_pthread_mutex_is_cond(mutex)) { |
530 | | mutex->curr_locked = 0; |
531 | | } |
532 | | else |
533 | | #endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */ |
534 | 0 | if (mutex->curr_locked == 1) { |
535 | 0 | if ((rv = pthread_mutex_unlock(&proc_pthread_mutex(mutex)))) { |
536 | | #ifdef HAVE_ZOS_PTHREADS |
537 | | rv = errno; |
538 | | #endif |
539 | 0 | return rv; |
540 | 0 | } |
541 | 0 | } |
542 | 0 | if (!proc_pthread_mutex_dec(mutex)) { |
543 | | #if APR_USE_PROC_PTHREAD_MUTEX_COND |
544 | | if (proc_pthread_mutex_is_cond(mutex) && |
545 | | (rv = pthread_cond_destroy(&proc_pthread_mutex_cond(mutex)))) { |
546 | | #ifdef HAVE_ZOS_PTHREADS |
547 | | rv = errno; |
548 | | #endif |
549 | | return rv; |
550 | | } |
551 | | #endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */ |
552 | |
|
553 | 0 | if ((rv = pthread_mutex_destroy(&proc_pthread_mutex(mutex)))) { |
554 | | #ifdef HAVE_ZOS_PTHREADS |
555 | | rv = errno; |
556 | | #endif |
557 | 0 | return rv; |
558 | 0 | } |
559 | 0 | } |
560 | 0 | return APR_SUCCESS; |
561 | 0 | } |
562 | | |
563 | | static apr_status_t proc_mutex_pthread_cleanup(void *mutex_) |
564 | 0 | { |
565 | 0 | apr_proc_mutex_t *mutex=mutex_; |
566 | 0 | apr_status_t rv; |
567 | | |
568 | | /* curr_locked is set to -1 until the mutex has been created */ |
569 | 0 | if (mutex->curr_locked != -1) { |
570 | 0 | if ((rv = proc_pthread_mutex_unref(mutex))) { |
571 | 0 | return rv; |
572 | 0 | } |
573 | 0 | } |
574 | 0 | if (munmap(mutex->os.pthread_interproc, sizeof(proc_pthread_mutex_t))) { |
575 | 0 | return errno; |
576 | 0 | } |
577 | 0 | return APR_SUCCESS; |
578 | 0 | } |
579 | | |
580 | | static apr_status_t proc_mutex_pthread_create(apr_proc_mutex_t *new_mutex, |
581 | | const char *fname) |
582 | 0 | { |
583 | 0 | apr_status_t rv; |
584 | 0 | int fd; |
585 | 0 | pthread_mutexattr_t mattr; |
586 | |
|
587 | 0 | fd = open("/dev/zero", O_RDWR); |
588 | 0 | if (fd < 0) { |
589 | 0 | return errno; |
590 | 0 | } |
591 | | |
592 | 0 | new_mutex->os.pthread_interproc = mmap(NULL, sizeof(proc_pthread_mutex_t), |
593 | 0 | PROT_READ | PROT_WRITE, MAP_SHARED, |
594 | 0 | fd, 0); |
595 | 0 | if (new_mutex->os.pthread_interproc == MAP_FAILED) { |
596 | 0 | new_mutex->os.pthread_interproc = NULL; |
597 | 0 | rv = errno; |
598 | 0 | close(fd); |
599 | 0 | return rv; |
600 | 0 | } |
601 | 0 | close(fd); |
602 | |
|
603 | 0 | new_mutex->pthread_refcounting = 1; |
604 | 0 | new_mutex->curr_locked = -1; /* until the mutex has been created */ |
605 | | #if APR_USE_PROC_PTHREAD_MUTEX_COND |
606 | | proc_pthread_mutex_cond_locked(new_mutex) = -1; |
607 | | #endif |
608 | |
|
609 | 0 | if ((rv = pthread_mutexattr_init(&mattr))) { |
610 | | #ifdef HAVE_ZOS_PTHREADS |
611 | | rv = errno; |
612 | | #endif |
613 | 0 | proc_mutex_pthread_cleanup(new_mutex); |
614 | 0 | return rv; |
615 | 0 | } |
616 | 0 | if ((rv = pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED))) { |
617 | | #ifdef HAVE_ZOS_PTHREADS |
618 | | rv = errno; |
619 | | #endif |
620 | 0 | proc_mutex_pthread_cleanup(new_mutex); |
621 | 0 | pthread_mutexattr_destroy(&mattr); |
622 | 0 | return rv; |
623 | 0 | } |
624 | | |
625 | 0 | #if defined(HAVE_PTHREAD_MUTEX_ROBUST) || defined(HAVE_PTHREAD_MUTEX_ROBUST_NP) |
626 | 0 | #ifdef HAVE_PTHREAD_MUTEX_ROBUST |
627 | 0 | rv = pthread_mutexattr_setrobust(&mattr, PTHREAD_MUTEX_ROBUST); |
628 | | #else |
629 | | rv = pthread_mutexattr_setrobust_np(&mattr, PTHREAD_MUTEX_ROBUST_NP); |
630 | | #endif |
631 | 0 | if (rv) { |
632 | | #ifdef HAVE_ZOS_PTHREADS |
633 | | rv = errno; |
634 | | #endif |
635 | 0 | proc_mutex_pthread_cleanup(new_mutex); |
636 | 0 | pthread_mutexattr_destroy(&mattr); |
637 | 0 | return rv; |
638 | 0 | } |
639 | 0 | if ((rv = pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT))) { |
640 | | #ifdef HAVE_ZOS_PTHREADS |
641 | | rv = errno; |
642 | | #endif |
643 | 0 | proc_mutex_pthread_cleanup(new_mutex); |
644 | 0 | pthread_mutexattr_destroy(&mattr); |
645 | 0 | return rv; |
646 | 0 | } |
647 | 0 | #endif /* HAVE_PTHREAD_MUTEX_ROBUST[_NP] */ |
648 | | |
649 | | #if defined(APR_THREAD_DEBUG) |
650 | | /* ignore errors. */ |
651 | | if ((rv = pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_ERRORCHECK))) { |
652 | | #ifdef HAVE_ZOS_PTHREADS |
653 | | rv = errno; |
654 | | #endif |
655 | | proc_mutex_pthread_cleanup(new_mutex); |
656 | | pthread_mutexattr_destroy(&mattr); |
657 | | return rv; |
658 | | } |
659 | | #endif |
660 | | |
661 | 0 | if ((rv = pthread_mutex_init(&proc_pthread_mutex(new_mutex), &mattr))) { |
662 | | #ifdef HAVE_ZOS_PTHREADS |
663 | | rv = errno; |
664 | | #endif |
665 | 0 | proc_mutex_pthread_cleanup(new_mutex); |
666 | 0 | pthread_mutexattr_destroy(&mattr); |
667 | 0 | return rv; |
668 | 0 | } |
669 | | |
670 | 0 | proc_pthread_mutex_refcount(new_mutex) = 1; /* first/parent reference */ |
671 | 0 | new_mutex->curr_locked = 0; /* mutex created now */ |
672 | |
|
673 | 0 | if ((rv = pthread_mutexattr_destroy(&mattr))) { |
674 | | #ifdef HAVE_ZOS_PTHREADS |
675 | | rv = errno; |
676 | | #endif |
677 | 0 | proc_mutex_pthread_cleanup(new_mutex); |
678 | 0 | return rv; |
679 | 0 | } |
680 | | |
681 | 0 | apr_pool_cleanup_register(new_mutex->pool, |
682 | 0 | (void *)new_mutex, |
683 | 0 | apr_proc_mutex_cleanup, |
684 | 0 | apr_pool_cleanup_null); |
685 | 0 | return APR_SUCCESS; |
686 | 0 | } |
687 | | |
688 | | static apr_status_t proc_mutex_pthread_child_init(apr_proc_mutex_t **mutex, |
689 | | apr_pool_t *pool, |
690 | | const char *fname) |
691 | 0 | { |
692 | 0 | (*mutex)->curr_locked = 0; |
693 | 0 | if (proc_pthread_mutex_inc(*mutex)) { |
694 | 0 | apr_pool_cleanup_register(pool, *mutex, proc_pthread_mutex_unref, |
695 | 0 | apr_pool_cleanup_null); |
696 | 0 | } |
697 | 0 | return APR_SUCCESS; |
698 | 0 | } |
699 | | |
700 | | static apr_status_t proc_mutex_pthread_acquire_ex(apr_proc_mutex_t *mutex, |
701 | | apr_interval_time_t timeout) |
702 | 0 | { |
703 | 0 | apr_status_t rv; |
704 | |
|
705 | | #if APR_USE_PROC_PTHREAD_MUTEX_COND |
706 | | if (proc_pthread_mutex_is_cond(mutex)) { |
707 | | if ((rv = pthread_mutex_lock(&proc_pthread_mutex(mutex)))) { |
708 | | #ifdef HAVE_ZOS_PTHREADS |
709 | | rv = errno; |
710 | | #endif |
711 | | #if defined(HAVE_PTHREAD_MUTEX_ROBUST) || defined(HAVE_PTHREAD_MUTEX_ROBUST_NP) |
712 | | /* Okay, our owner died. Let's try to make it consistent again. */ |
713 | | if (rv == EOWNERDEAD) { |
714 | | proc_pthread_mutex_dec(mutex); |
715 | | #ifdef HAVE_PTHREAD_MUTEX_ROBUST |
716 | | pthread_mutex_consistent(&proc_pthread_mutex(mutex)); |
717 | | #else |
718 | | pthread_mutex_consistent_np(&proc_pthread_mutex(mutex)); |
719 | | #endif |
720 | | } |
721 | | else |
722 | | #endif |
723 | | return rv; |
724 | | } |
725 | | |
726 | | if (!proc_pthread_mutex_cond_locked(mutex)) { |
727 | | rv = APR_SUCCESS; |
728 | | } |
729 | | else if (!timeout) { |
730 | | rv = APR_TIMEUP; |
731 | | } |
732 | | else { |
733 | | struct timespec abstime; |
734 | | |
735 | | if (timeout > 0) { |
736 | | timeout += apr_time_now(); |
737 | | abstime.tv_sec = apr_time_sec(timeout); |
738 | | abstime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */ |
739 | | } |
740 | | |
741 | | proc_pthread_mutex_cond_num_waiters(mutex)++; |
742 | | do { |
743 | | if (timeout < 0) { |
744 | | rv = pthread_cond_wait(&proc_pthread_mutex_cond(mutex), |
745 | | &proc_pthread_mutex(mutex)); |
746 | | if (rv) { |
747 | | #ifdef HAVE_ZOS_PTHREADS |
748 | | rv = errno; |
749 | | #endif |
750 | | break; |
751 | | } |
752 | | } |
753 | | else { |
754 | | rv = pthread_cond_timedwait(&proc_pthread_mutex_cond(mutex), |
755 | | &proc_pthread_mutex(mutex), |
756 | | &abstime); |
757 | | if (rv) { |
758 | | #ifdef HAVE_ZOS_PTHREADS |
759 | | rv = errno; |
760 | | #endif |
761 | | if (rv == ETIMEDOUT) { |
762 | | rv = APR_TIMEUP; |
763 | | } |
764 | | break; |
765 | | } |
766 | | } |
767 | | } while (proc_pthread_mutex_cond_locked(mutex)); |
768 | | proc_pthread_mutex_cond_num_waiters(mutex)--; |
769 | | } |
770 | | if (rv != APR_SUCCESS) { |
771 | | pthread_mutex_unlock(&proc_pthread_mutex(mutex)); |
772 | | return rv; |
773 | | } |
774 | | |
775 | | proc_pthread_mutex_cond_locked(mutex) = 1; |
776 | | |
777 | | rv = pthread_mutex_unlock(&proc_pthread_mutex(mutex)); |
778 | | if (rv) { |
779 | | #ifdef HAVE_ZOS_PTHREADS |
780 | | rv = errno; |
781 | | #endif |
782 | | return rv; |
783 | | } |
784 | | } |
785 | | else |
786 | | #endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */ |
787 | 0 | { |
788 | 0 | if (timeout < 0) { |
789 | 0 | rv = pthread_mutex_lock(&proc_pthread_mutex(mutex)); |
790 | 0 | if (rv) { |
791 | | #ifdef HAVE_ZOS_PTHREADS |
792 | | rv = errno; |
793 | | #endif |
794 | 0 | } |
795 | 0 | } |
796 | 0 | else if (!timeout) { |
797 | 0 | rv = pthread_mutex_trylock(&proc_pthread_mutex(mutex)); |
798 | 0 | if (rv) { |
799 | | #ifdef HAVE_ZOS_PTHREADS |
800 | | rv = errno; |
801 | | #endif |
802 | 0 | if (rv == EBUSY) { |
803 | 0 | return APR_TIMEUP; |
804 | 0 | } |
805 | 0 | } |
806 | 0 | } |
807 | 0 | else |
808 | 0 | #if defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK) |
809 | 0 | { |
810 | 0 | struct timespec abstime; |
811 | |
|
812 | 0 | timeout += apr_time_now(); |
813 | 0 | abstime.tv_sec = apr_time_sec(timeout); |
814 | 0 | abstime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */ |
815 | |
|
816 | 0 | rv = pthread_mutex_timedlock(&proc_pthread_mutex(mutex), &abstime); |
817 | 0 | if (rv) { |
818 | | #ifdef HAVE_ZOS_PTHREADS |
819 | | rv = errno; |
820 | | #endif |
821 | 0 | if (rv == ETIMEDOUT) { |
822 | 0 | return APR_TIMEUP; |
823 | 0 | } |
824 | 0 | } |
825 | 0 | } |
826 | 0 | if (rv) { |
827 | 0 | #if defined(HAVE_PTHREAD_MUTEX_ROBUST) || defined(HAVE_PTHREAD_MUTEX_ROBUST_NP) |
828 | | /* Okay, our owner died. Let's try to make it consistent again. */ |
829 | 0 | if (rv == EOWNERDEAD) { |
830 | 0 | proc_pthread_mutex_dec(mutex); |
831 | 0 | #ifdef HAVE_PTHREAD_MUTEX_ROBUST |
832 | 0 | pthread_mutex_consistent(&proc_pthread_mutex(mutex)); |
833 | | #else |
834 | | pthread_mutex_consistent_np(&proc_pthread_mutex(mutex)); |
835 | | #endif |
836 | 0 | } |
837 | 0 | else |
838 | 0 | #endif |
839 | 0 | return rv; |
840 | 0 | } |
841 | | #else /* !HAVE_PTHREAD_MUTEX_TIMEDLOCK */ |
842 | | return proc_mutex_spinsleep_timedacquire(mutex, timeout); |
843 | | #endif |
844 | 0 | } |
845 | | |
846 | 0 | mutex->curr_locked = 1; |
847 | 0 | return APR_SUCCESS; |
848 | 0 | } |
849 | | |
850 | | static apr_status_t proc_mutex_pthread_acquire(apr_proc_mutex_t *mutex) |
851 | 0 | { |
852 | 0 | return proc_mutex_pthread_acquire_ex(mutex, -1); |
853 | 0 | } |
854 | | |
855 | | static apr_status_t proc_mutex_pthread_tryacquire(apr_proc_mutex_t *mutex) |
856 | 0 | { |
857 | 0 | apr_status_t rv = proc_mutex_pthread_acquire_ex(mutex, 0); |
858 | 0 | return (rv == APR_TIMEUP) ? APR_EBUSY : rv; |
859 | 0 | } |
860 | | |
861 | | static apr_status_t proc_mutex_pthread_timedacquire(apr_proc_mutex_t *mutex, |
862 | | apr_interval_time_t timeout) |
863 | 0 | { |
864 | 0 | return proc_mutex_pthread_acquire_ex(mutex, (timeout <= 0) ? 0 : timeout); |
865 | 0 | } |
866 | | |
867 | | static apr_status_t proc_mutex_pthread_release(apr_proc_mutex_t *mutex) |
868 | 0 | { |
869 | 0 | apr_status_t rv; |
870 | |
|
871 | | #if APR_USE_PROC_PTHREAD_MUTEX_COND |
872 | | if (proc_pthread_mutex_is_cond(mutex)) { |
873 | | if ((rv = pthread_mutex_lock(&proc_pthread_mutex(mutex)))) { |
874 | | #ifdef HAVE_ZOS_PTHREADS |
875 | | rv = errno; |
876 | | #endif |
877 | | #if defined(HAVE_PTHREAD_MUTEX_ROBUST) || defined(HAVE_PTHREAD_MUTEX_ROBUST_NP) |
878 | | /* Okay, our owner died. Let's try to make it consistent again. */ |
879 | | if (rv == EOWNERDEAD) { |
880 | | proc_pthread_mutex_dec(mutex); |
881 | | #ifdef HAVE_PTHREAD_MUTEX_ROBUST |
882 | | pthread_mutex_consistent(&proc_pthread_mutex(mutex)); |
883 | | #else |
884 | | pthread_mutex_consistent_np(&proc_pthread_mutex(mutex)); |
885 | | #endif |
886 | | } |
887 | | else |
888 | | #endif |
889 | | return rv; |
890 | | } |
891 | | |
892 | | if (!proc_pthread_mutex_cond_locked(mutex)) { |
893 | | rv = APR_EINVAL; |
894 | | } |
895 | | else if (!proc_pthread_mutex_cond_num_waiters(mutex)) { |
896 | | rv = APR_SUCCESS; |
897 | | } |
898 | | else { |
899 | | rv = pthread_cond_signal(&proc_pthread_mutex_cond(mutex)); |
900 | | #ifdef HAVE_ZOS_PTHREADS |
901 | | if (rv) { |
902 | | rv = errno; |
903 | | } |
904 | | #endif |
905 | | } |
906 | | if (rv != APR_SUCCESS) { |
907 | | pthread_mutex_unlock(&proc_pthread_mutex(mutex)); |
908 | | return rv; |
909 | | } |
910 | | |
911 | | proc_pthread_mutex_cond_locked(mutex) = 0; |
912 | | } |
913 | | #endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */ |
914 | |
|
915 | 0 | mutex->curr_locked = 0; |
916 | 0 | if ((rv = pthread_mutex_unlock(&proc_pthread_mutex(mutex)))) { |
917 | | #ifdef HAVE_ZOS_PTHREADS |
918 | | rv = errno; |
919 | | #endif |
920 | 0 | return rv; |
921 | 0 | } |
922 | | |
923 | 0 | return APR_SUCCESS; |
924 | 0 | } |
925 | | |
926 | | static const apr_proc_mutex_unix_lock_methods_t mutex_proc_pthread_methods = |
927 | | { |
928 | | APR_PROCESS_LOCK_MECH_IS_GLOBAL, |
929 | | proc_mutex_pthread_create, |
930 | | proc_mutex_pthread_acquire, |
931 | | proc_mutex_pthread_tryacquire, |
932 | | proc_mutex_pthread_timedacquire, |
933 | | proc_mutex_pthread_release, |
934 | | proc_mutex_pthread_cleanup, |
935 | | proc_mutex_pthread_child_init, |
936 | | proc_mutex_no_perms_set, |
937 | | APR_LOCK_PROC_PTHREAD, |
938 | | "pthread" |
939 | | }; |
940 | | |
941 | | #if APR_USE_PROC_PTHREAD_MUTEX_COND |
942 | | static apr_status_t proc_mutex_pthread_cond_create(apr_proc_mutex_t *new_mutex, |
943 | | const char *fname) |
944 | | { |
945 | | apr_status_t rv; |
946 | | pthread_condattr_t cattr; |
947 | | |
948 | | rv = proc_mutex_pthread_create(new_mutex, fname); |
949 | | if (rv != APR_SUCCESS) { |
950 | | return rv; |
951 | | } |
952 | | |
953 | | if ((rv = pthread_condattr_init(&cattr))) { |
954 | | #ifdef HAVE_ZOS_PTHREADS |
955 | | rv = errno; |
956 | | #endif |
957 | | apr_pool_cleanup_run(new_mutex->pool, new_mutex, |
958 | | apr_proc_mutex_cleanup); |
959 | | return rv; |
960 | | } |
961 | | if ((rv = pthread_condattr_setpshared(&cattr, PTHREAD_PROCESS_SHARED))) { |
962 | | #ifdef HAVE_ZOS_PTHREADS |
963 | | rv = errno; |
964 | | #endif |
965 | | pthread_condattr_destroy(&cattr); |
966 | | apr_pool_cleanup_run(new_mutex->pool, new_mutex, |
967 | | apr_proc_mutex_cleanup); |
968 | | return rv; |
969 | | } |
970 | | if ((rv = pthread_cond_init(&proc_pthread_mutex_cond(new_mutex), |
971 | | &cattr))) { |
972 | | #ifdef HAVE_ZOS_PTHREADS |
973 | | rv = errno; |
974 | | #endif |
975 | | pthread_condattr_destroy(&cattr); |
976 | | apr_pool_cleanup_run(new_mutex->pool, new_mutex, |
977 | | apr_proc_mutex_cleanup); |
978 | | return rv; |
979 | | } |
980 | | pthread_condattr_destroy(&cattr); |
981 | | |
982 | | proc_pthread_mutex_cond_locked(new_mutex) = 0; |
983 | | proc_pthread_mutex_cond_num_waiters(new_mutex) = 0; |
984 | | |
985 | | return APR_SUCCESS; |
986 | | } |
987 | | |
988 | | static const apr_proc_mutex_unix_lock_methods_t mutex_proc_pthread_cond_methods = |
989 | | { |
990 | | APR_PROCESS_LOCK_MECH_IS_GLOBAL, |
991 | | proc_mutex_pthread_cond_create, |
992 | | proc_mutex_pthread_acquire, |
993 | | proc_mutex_pthread_tryacquire, |
994 | | proc_mutex_pthread_timedacquire, |
995 | | proc_mutex_pthread_release, |
996 | | proc_mutex_pthread_cleanup, |
997 | | proc_mutex_pthread_child_init, |
998 | | proc_mutex_no_perms_set, |
999 | | APR_LOCK_PROC_PTHREAD, |
1000 | | "pthread" |
1001 | | }; |
1002 | | #endif |
1003 | | |
1004 | | #endif |
1005 | | |
1006 | | #if APR_HAS_FCNTL_SERIALIZE |
1007 | | |
1008 | | static struct flock proc_mutex_lock_it; |
1009 | | static struct flock proc_mutex_unlock_it; |
1010 | | |
1011 | | static apr_status_t proc_mutex_fcntl_release(apr_proc_mutex_t *); |
1012 | | |
1013 | | static void proc_mutex_fcntl_setup(void) |
1014 | 0 | { |
1015 | 0 | proc_mutex_lock_it.l_whence = SEEK_SET; /* from current point */ |
1016 | 0 | proc_mutex_lock_it.l_start = 0; /* -"- */ |
1017 | 0 | proc_mutex_lock_it.l_len = 0; /* until end of file */ |
1018 | 0 | proc_mutex_lock_it.l_type = F_WRLCK; /* set exclusive/write lock */ |
1019 | 0 | proc_mutex_lock_it.l_pid = 0; /* pid not actually interesting */ |
1020 | 0 | proc_mutex_unlock_it.l_whence = SEEK_SET; /* from current point */ |
1021 | 0 | proc_mutex_unlock_it.l_start = 0; /* -"- */ |
1022 | 0 | proc_mutex_unlock_it.l_len = 0; /* until end of file */ |
1023 | 0 | proc_mutex_unlock_it.l_type = F_UNLCK; /* set exclusive/write lock */ |
1024 | 0 | proc_mutex_unlock_it.l_pid = 0; /* pid not actually interesting */ |
1025 | 0 | } |
1026 | | |
1027 | | static apr_status_t proc_mutex_fcntl_cleanup(void *mutex_) |
1028 | 0 | { |
1029 | 0 | apr_status_t status = APR_SUCCESS; |
1030 | 0 | apr_proc_mutex_t *mutex=mutex_; |
1031 | |
|
1032 | 0 | if (mutex->curr_locked == 1) { |
1033 | 0 | status = proc_mutex_fcntl_release(mutex); |
1034 | 0 | if (status != APR_SUCCESS) |
1035 | 0 | return status; |
1036 | 0 | } |
1037 | | |
1038 | 0 | if (mutex->interproc) { |
1039 | 0 | status = apr_file_close(mutex->interproc); |
1040 | 0 | } |
1041 | 0 | if (!mutex->interproc_closing |
1042 | 0 | && mutex->os.crossproc != -1 |
1043 | 0 | && close(mutex->os.crossproc) == -1 |
1044 | 0 | && status == APR_SUCCESS) { |
1045 | 0 | status = errno; |
1046 | 0 | } |
1047 | 0 | return status; |
1048 | 0 | } |
1049 | | |
1050 | | static apr_status_t proc_mutex_fcntl_create(apr_proc_mutex_t *new_mutex, |
1051 | | const char *fname) |
1052 | 0 | { |
1053 | 0 | int rv; |
1054 | |
|
1055 | 0 | if (fname) { |
1056 | 0 | new_mutex->fname = apr_pstrdup(new_mutex->pool, fname); |
1057 | 0 | rv = apr_file_open(&new_mutex->interproc, new_mutex->fname, |
1058 | 0 | APR_FOPEN_CREATE | APR_FOPEN_WRITE | APR_FOPEN_EXCL, |
1059 | 0 | APR_FPROT_UREAD | APR_FPROT_UWRITE | APR_FPROT_GREAD | APR_FPROT_WREAD, |
1060 | 0 | new_mutex->pool); |
1061 | 0 | } |
1062 | 0 | else { |
1063 | 0 | new_mutex->fname = apr_pstrdup(new_mutex->pool, "/tmp/aprXXXXXX"); |
1064 | 0 | rv = apr_file_mktemp(&new_mutex->interproc, new_mutex->fname, |
1065 | 0 | APR_FOPEN_CREATE | APR_FOPEN_WRITE | APR_FOPEN_EXCL, |
1066 | 0 | new_mutex->pool); |
1067 | 0 | } |
1068 | |
|
1069 | 0 | if (rv != APR_SUCCESS) { |
1070 | 0 | return rv; |
1071 | 0 | } |
1072 | | |
1073 | 0 | new_mutex->os.crossproc = new_mutex->interproc->filedes; |
1074 | 0 | new_mutex->interproc_closing = 1; |
1075 | 0 | new_mutex->curr_locked = 0; |
1076 | 0 | unlink(new_mutex->fname); |
1077 | 0 | apr_pool_cleanup_register(new_mutex->pool, |
1078 | 0 | (void*)new_mutex, |
1079 | 0 | apr_proc_mutex_cleanup, |
1080 | 0 | apr_pool_cleanup_null); |
1081 | 0 | return APR_SUCCESS; |
1082 | 0 | } |
1083 | | |
1084 | | static apr_status_t proc_mutex_fcntl_acquire(apr_proc_mutex_t *mutex) |
1085 | 0 | { |
1086 | 0 | int rc; |
1087 | |
|
1088 | 0 | do { |
1089 | 0 | rc = fcntl(mutex->os.crossproc, F_SETLKW, &proc_mutex_lock_it); |
1090 | 0 | } while (rc < 0 && errno == EINTR); |
1091 | 0 | if (rc < 0) { |
1092 | 0 | return errno; |
1093 | 0 | } |
1094 | 0 | mutex->curr_locked=1; |
1095 | 0 | return APR_SUCCESS; |
1096 | 0 | } |
1097 | | |
1098 | | static apr_status_t proc_mutex_fcntl_tryacquire(apr_proc_mutex_t *mutex) |
1099 | 0 | { |
1100 | 0 | int rc; |
1101 | |
|
1102 | 0 | do { |
1103 | 0 | rc = fcntl(mutex->os.crossproc, F_SETLK, &proc_mutex_lock_it); |
1104 | 0 | } while (rc < 0 && errno == EINTR); |
1105 | 0 | if (rc < 0) { |
1106 | | #if FCNTL_TRYACQUIRE_EACCES |
1107 | | if (errno == EACCES) { |
1108 | | #else |
1109 | 0 | if (errno == EAGAIN) { |
1110 | 0 | #endif |
1111 | 0 | return APR_EBUSY; |
1112 | 0 | } |
1113 | 0 | return errno; |
1114 | 0 | } |
1115 | 0 | mutex->curr_locked = 1; |
1116 | 0 | return APR_SUCCESS; |
1117 | 0 | } |
1118 | | |
1119 | | static apr_status_t proc_mutex_fcntl_release(apr_proc_mutex_t *mutex) |
1120 | 0 | { |
1121 | 0 | int rc; |
1122 | |
|
1123 | 0 | mutex->curr_locked=0; |
1124 | 0 | do { |
1125 | 0 | rc = fcntl(mutex->os.crossproc, F_SETLKW, &proc_mutex_unlock_it); |
1126 | 0 | } while (rc < 0 && errno == EINTR); |
1127 | 0 | if (rc < 0) { |
1128 | 0 | return errno; |
1129 | 0 | } |
1130 | 0 | return APR_SUCCESS; |
1131 | 0 | } |
1132 | | |
1133 | | static apr_status_t proc_mutex_fcntl_perms_set(apr_proc_mutex_t *mutex, |
1134 | | apr_fileperms_t perms, |
1135 | | apr_uid_t uid, |
1136 | | apr_gid_t gid) |
1137 | 0 | { |
1138 | |
|
1139 | 0 | if (mutex->fname) { |
1140 | 0 | if (!(perms & APR_FPROT_GSETID)) |
1141 | 0 | gid = -1; |
1142 | 0 | if (fchown(mutex->os.crossproc, uid, gid) < 0) { |
1143 | 0 | return errno; |
1144 | 0 | } |
1145 | 0 | } |
1146 | 0 | return APR_SUCCESS; |
1147 | 0 | } |
1148 | | |
1149 | | static const apr_proc_mutex_unix_lock_methods_t mutex_fcntl_methods = |
1150 | | { |
1151 | | #if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS || defined(FCNTL_IS_GLOBAL) |
1152 | | APR_PROCESS_LOCK_MECH_IS_GLOBAL, |
1153 | | #else |
1154 | | 0, |
1155 | | #endif |
1156 | | proc_mutex_fcntl_create, |
1157 | | proc_mutex_fcntl_acquire, |
1158 | | proc_mutex_fcntl_tryacquire, |
1159 | | proc_mutex_spinsleep_timedacquire, |
1160 | | proc_mutex_fcntl_release, |
1161 | | proc_mutex_fcntl_cleanup, |
1162 | | proc_mutex_no_child_init, |
1163 | | proc_mutex_fcntl_perms_set, |
1164 | | APR_LOCK_FCNTL, |
1165 | | "fcntl" |
1166 | | }; |
1167 | | |
1168 | | #endif /* fcntl implementation */ |
1169 | | |
1170 | | #if APR_HAS_FLOCK_SERIALIZE |
1171 | | |
1172 | | static apr_status_t proc_mutex_flock_release(apr_proc_mutex_t *); |
1173 | | |
1174 | | static apr_status_t proc_mutex_flock_cleanup(void *mutex_) |
1175 | 0 | { |
1176 | 0 | apr_status_t status = APR_SUCCESS; |
1177 | 0 | apr_proc_mutex_t *mutex=mutex_; |
1178 | |
|
1179 | 0 | if (mutex->curr_locked == 1) { |
1180 | 0 | status = proc_mutex_flock_release(mutex); |
1181 | 0 | if (status != APR_SUCCESS) |
1182 | 0 | return status; |
1183 | 0 | } |
1184 | 0 | if (mutex->interproc) { /* if it was opened properly */ |
1185 | 0 | status = apr_file_close(mutex->interproc); |
1186 | 0 | } |
1187 | 0 | if (!mutex->interproc_closing |
1188 | 0 | && mutex->os.crossproc != -1 |
1189 | 0 | && close(mutex->os.crossproc) == -1 |
1190 | 0 | && status == APR_SUCCESS) { |
1191 | 0 | status = errno; |
1192 | 0 | } |
1193 | 0 | if (mutex->fname) { |
1194 | 0 | unlink(mutex->fname); |
1195 | 0 | } |
1196 | 0 | return status; |
1197 | 0 | } |
1198 | | |
1199 | | static apr_status_t proc_mutex_flock_create(apr_proc_mutex_t *new_mutex, |
1200 | | const char *fname) |
1201 | 0 | { |
1202 | 0 | int rv; |
1203 | |
|
1204 | 0 | if (fname) { |
1205 | 0 | new_mutex->fname = apr_pstrdup(new_mutex->pool, fname); |
1206 | 0 | rv = apr_file_open(&new_mutex->interproc, new_mutex->fname, |
1207 | 0 | APR_FOPEN_CREATE | APR_FOPEN_WRITE | APR_FOPEN_EXCL, |
1208 | 0 | APR_FPROT_UREAD | APR_FPROT_UWRITE, |
1209 | 0 | new_mutex->pool); |
1210 | 0 | } |
1211 | 0 | else { |
1212 | 0 | new_mutex->fname = apr_pstrdup(new_mutex->pool, "/tmp/aprXXXXXX"); |
1213 | 0 | rv = apr_file_mktemp(&new_mutex->interproc, new_mutex->fname, |
1214 | 0 | APR_FOPEN_CREATE | APR_FOPEN_WRITE | APR_FOPEN_EXCL, |
1215 | 0 | new_mutex->pool); |
1216 | 0 | } |
1217 | |
|
1218 | 0 | if (rv != APR_SUCCESS) { |
1219 | 0 | proc_mutex_flock_cleanup(new_mutex); |
1220 | 0 | return rv; |
1221 | 0 | } |
1222 | | |
1223 | 0 | new_mutex->os.crossproc = new_mutex->interproc->filedes; |
1224 | 0 | new_mutex->interproc_closing = 1; |
1225 | 0 | new_mutex->curr_locked = 0; |
1226 | 0 | apr_pool_cleanup_register(new_mutex->pool, (void *)new_mutex, |
1227 | 0 | apr_proc_mutex_cleanup, |
1228 | 0 | apr_pool_cleanup_null); |
1229 | 0 | return APR_SUCCESS; |
1230 | 0 | } |
1231 | | |
1232 | | static apr_status_t proc_mutex_flock_acquire(apr_proc_mutex_t *mutex) |
1233 | 0 | { |
1234 | 0 | int rc; |
1235 | |
|
1236 | 0 | do { |
1237 | 0 | rc = flock(mutex->os.crossproc, LOCK_EX); |
1238 | 0 | } while (rc < 0 && errno == EINTR); |
1239 | 0 | if (rc < 0) { |
1240 | 0 | return errno; |
1241 | 0 | } |
1242 | 0 | mutex->curr_locked = 1; |
1243 | 0 | return APR_SUCCESS; |
1244 | 0 | } |
1245 | | |
1246 | | static apr_status_t proc_mutex_flock_tryacquire(apr_proc_mutex_t *mutex) |
1247 | 0 | { |
1248 | 0 | int rc; |
1249 | |
|
1250 | 0 | do { |
1251 | 0 | rc = flock(mutex->os.crossproc, LOCK_EX | LOCK_NB); |
1252 | 0 | } while (rc < 0 && errno == EINTR); |
1253 | 0 | if (rc < 0) { |
1254 | 0 | if (errno == EWOULDBLOCK || errno == EAGAIN) { |
1255 | 0 | return APR_EBUSY; |
1256 | 0 | } |
1257 | 0 | return errno; |
1258 | 0 | } |
1259 | 0 | mutex->curr_locked = 1; |
1260 | 0 | return APR_SUCCESS; |
1261 | 0 | } |
1262 | | |
1263 | | static apr_status_t proc_mutex_flock_release(apr_proc_mutex_t *mutex) |
1264 | 0 | { |
1265 | 0 | int rc; |
1266 | |
|
1267 | 0 | mutex->curr_locked = 0; |
1268 | 0 | do { |
1269 | 0 | rc = flock(mutex->os.crossproc, LOCK_UN); |
1270 | 0 | } while (rc < 0 && errno == EINTR); |
1271 | 0 | if (rc < 0) { |
1272 | 0 | return errno; |
1273 | 0 | } |
1274 | 0 | return APR_SUCCESS; |
1275 | 0 | } |
1276 | | |
1277 | | static apr_status_t proc_mutex_flock_child_init(apr_proc_mutex_t **mutex, |
1278 | | apr_pool_t *pool, |
1279 | | const char *fname) |
1280 | 0 | { |
1281 | 0 | apr_proc_mutex_t *new_mutex; |
1282 | 0 | int rv; |
1283 | |
|
1284 | 0 | if (!fname) { |
1285 | 0 | fname = (*mutex)->fname; |
1286 | 0 | if (!fname) { |
1287 | 0 | return APR_SUCCESS; |
1288 | 0 | } |
1289 | 0 | } |
1290 | | |
1291 | 0 | new_mutex = (apr_proc_mutex_t *)apr_pmemdup(pool, *mutex, |
1292 | 0 | sizeof(apr_proc_mutex_t)); |
1293 | 0 | new_mutex->pool = pool; |
1294 | 0 | new_mutex->fname = apr_pstrdup(pool, fname); |
1295 | 0 | rv = apr_file_open(&new_mutex->interproc, new_mutex->fname, |
1296 | 0 | APR_FOPEN_WRITE, 0, new_mutex->pool); |
1297 | 0 | if (rv != APR_SUCCESS) { |
1298 | 0 | return rv; |
1299 | 0 | } |
1300 | 0 | new_mutex->os.crossproc = new_mutex->interproc->filedes; |
1301 | 0 | new_mutex->interproc_closing = 1; |
1302 | |
|
1303 | 0 | *mutex = new_mutex; |
1304 | 0 | return APR_SUCCESS; |
1305 | 0 | } |
1306 | | |
1307 | | static apr_status_t proc_mutex_flock_perms_set(apr_proc_mutex_t *mutex, |
1308 | | apr_fileperms_t perms, |
1309 | | apr_uid_t uid, |
1310 | | apr_gid_t gid) |
1311 | 0 | { |
1312 | |
|
1313 | 0 | if (mutex->fname) { |
1314 | 0 | if (!(perms & APR_FPROT_GSETID)) |
1315 | 0 | gid = -1; |
1316 | 0 | if (fchown(mutex->os.crossproc, uid, gid) < 0) { |
1317 | 0 | return errno; |
1318 | 0 | } |
1319 | 0 | } |
1320 | 0 | return APR_SUCCESS; |
1321 | 0 | } |
1322 | | |
1323 | | static const apr_proc_mutex_unix_lock_methods_t mutex_flock_methods = |
1324 | | { |
1325 | | #if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS || defined(FLOCK_IS_GLOBAL) |
1326 | | APR_PROCESS_LOCK_MECH_IS_GLOBAL, |
1327 | | #else |
1328 | | 0, |
1329 | | #endif |
1330 | | proc_mutex_flock_create, |
1331 | | proc_mutex_flock_acquire, |
1332 | | proc_mutex_flock_tryacquire, |
1333 | | proc_mutex_spinsleep_timedacquire, |
1334 | | proc_mutex_flock_release, |
1335 | | proc_mutex_flock_cleanup, |
1336 | | proc_mutex_flock_child_init, |
1337 | | proc_mutex_flock_perms_set, |
1338 | | APR_LOCK_FLOCK, |
1339 | | "flock" |
1340 | | }; |
1341 | | |
1342 | | #endif /* flock implementation */ |
1343 | | |
1344 | | void apr_proc_mutex_unix_setup_lock(void) |
1345 | 0 | { |
1346 | | /* setup only needed for sysvsem and fnctl */ |
1347 | 0 | #if APR_HAS_SYSVSEM_SERIALIZE |
1348 | 0 | proc_mutex_sysv_setup(); |
1349 | 0 | #endif |
1350 | 0 | #if APR_HAS_FCNTL_SERIALIZE |
1351 | 0 | proc_mutex_fcntl_setup(); |
1352 | 0 | #endif |
1353 | 0 | } |
1354 | | |
1355 | | static apr_status_t proc_mutex_choose_method(apr_proc_mutex_t *new_mutex, |
1356 | | apr_lockmech_e mech, |
1357 | | apr_os_proc_mutex_t *ospmutex) |
1358 | 0 | { |
1359 | 0 | #if APR_HAS_PROC_PTHREAD_SERIALIZE |
1360 | 0 | new_mutex->os.pthread_interproc = NULL; |
1361 | 0 | #endif |
1362 | 0 | #if APR_HAS_POSIXSEM_SERIALIZE |
1363 | 0 | new_mutex->os.psem_interproc = NULL; |
1364 | 0 | #endif |
1365 | 0 | #if APR_HAS_SYSVSEM_SERIALIZE || APR_HAS_FCNTL_SERIALIZE || APR_HAS_FLOCK_SERIALIZE |
1366 | 0 | new_mutex->os.crossproc = -1; |
1367 | |
|
1368 | 0 | #if APR_HAS_FCNTL_SERIALIZE || APR_HAS_FLOCK_SERIALIZE |
1369 | 0 | new_mutex->interproc = NULL; |
1370 | 0 | new_mutex->interproc_closing = 0; |
1371 | 0 | #endif |
1372 | 0 | #endif |
1373 | |
|
1374 | 0 | switch (mech) { |
1375 | 0 | case APR_LOCK_FCNTL: |
1376 | 0 | #if APR_HAS_FCNTL_SERIALIZE |
1377 | 0 | new_mutex->meth = &mutex_fcntl_methods; |
1378 | 0 | if (ospmutex) { |
1379 | 0 | if (ospmutex->crossproc == -1) { |
1380 | 0 | return APR_EINVAL; |
1381 | 0 | } |
1382 | 0 | new_mutex->os.crossproc = ospmutex->crossproc; |
1383 | 0 | } |
1384 | | #else |
1385 | | return APR_ENOTIMPL; |
1386 | | #endif |
1387 | 0 | break; |
1388 | 0 | case APR_LOCK_FLOCK: |
1389 | 0 | #if APR_HAS_FLOCK_SERIALIZE |
1390 | 0 | new_mutex->meth = &mutex_flock_methods; |
1391 | 0 | if (ospmutex) { |
1392 | 0 | if (ospmutex->crossproc == -1) { |
1393 | 0 | return APR_EINVAL; |
1394 | 0 | } |
1395 | 0 | new_mutex->os.crossproc = ospmutex->crossproc; |
1396 | 0 | } |
1397 | | #else |
1398 | | return APR_ENOTIMPL; |
1399 | | #endif |
1400 | 0 | break; |
1401 | 0 | case APR_LOCK_SYSVSEM: |
1402 | 0 | #if APR_HAS_SYSVSEM_SERIALIZE |
1403 | 0 | new_mutex->meth = &mutex_sysv_methods; |
1404 | 0 | if (ospmutex) { |
1405 | 0 | if (ospmutex->crossproc == -1) { |
1406 | 0 | return APR_EINVAL; |
1407 | 0 | } |
1408 | 0 | new_mutex->os.crossproc = ospmutex->crossproc; |
1409 | 0 | } |
1410 | | #else |
1411 | | return APR_ENOTIMPL; |
1412 | | #endif |
1413 | 0 | break; |
1414 | 0 | case APR_LOCK_POSIXSEM: |
1415 | 0 | #if APR_HAS_POSIXSEM_SERIALIZE |
1416 | 0 | new_mutex->meth = &mutex_posixsem_methods; |
1417 | 0 | if (ospmutex) { |
1418 | 0 | if (ospmutex->psem_interproc == NULL) { |
1419 | 0 | return APR_EINVAL; |
1420 | 0 | } |
1421 | 0 | new_mutex->os.psem_interproc = ospmutex->psem_interproc; |
1422 | 0 | } |
1423 | | #else |
1424 | | return APR_ENOTIMPL; |
1425 | | #endif |
1426 | 0 | break; |
1427 | 0 | case APR_LOCK_PROC_PTHREAD: |
1428 | 0 | #if APR_HAS_PROC_PTHREAD_SERIALIZE |
1429 | 0 | new_mutex->meth = &mutex_proc_pthread_methods; |
1430 | 0 | if (ospmutex) { |
1431 | 0 | if (ospmutex->pthread_interproc == NULL) { |
1432 | 0 | return APR_EINVAL; |
1433 | 0 | } |
1434 | 0 | new_mutex->os.pthread_interproc = ospmutex->pthread_interproc; |
1435 | 0 | } |
1436 | | #else |
1437 | | return APR_ENOTIMPL; |
1438 | | #endif |
1439 | 0 | break; |
1440 | 0 | case APR_LOCK_DEFAULT_TIMED: |
1441 | 0 | #if APR_HAS_PROC_PTHREAD_SERIALIZE \ |
1442 | 0 | && (APR_USE_PROC_PTHREAD_MUTEX_COND \ |
1443 | 0 | || defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK)) \ |
1444 | 0 | && defined(HAVE_PTHREAD_MUTEX_ROBUST) |
1445 | | #if APR_USE_PROC_PTHREAD_MUTEX_COND |
1446 | | new_mutex->meth = &mutex_proc_pthread_cond_methods; |
1447 | | #else |
1448 | 0 | new_mutex->meth = &mutex_proc_pthread_methods; |
1449 | 0 | #endif |
1450 | 0 | if (ospmutex) { |
1451 | 0 | if (ospmutex->pthread_interproc == NULL) { |
1452 | 0 | return APR_EINVAL; |
1453 | 0 | } |
1454 | 0 | new_mutex->os.pthread_interproc = ospmutex->pthread_interproc; |
1455 | 0 | } |
1456 | 0 | break; |
1457 | | #elif APR_HAS_SYSVSEM_SERIALIZE && defined(HAVE_SEMTIMEDOP) |
1458 | | new_mutex->meth = &mutex_sysv_methods; |
1459 | | if (ospmutex) { |
1460 | | if (ospmutex->crossproc == -1) { |
1461 | | return APR_EINVAL; |
1462 | | } |
1463 | | new_mutex->os.crossproc = ospmutex->crossproc; |
1464 | | } |
1465 | | break; |
1466 | | #elif APR_HAS_POSIXSEM_SERIALIZE && defined(HAVE_SEM_TIMEDWAIT) |
1467 | | new_mutex->meth = &mutex_posixsem_methods; |
1468 | | if (ospmutex) { |
1469 | | if (ospmutex->psem_interproc == NULL) { |
1470 | | return APR_EINVAL; |
1471 | | } |
1472 | | new_mutex->os.psem_interproc = ospmutex->psem_interproc; |
1473 | | } |
1474 | | break; |
1475 | | #endif |
1476 | | /* fall trough */ |
1477 | 0 | case APR_LOCK_DEFAULT: |
1478 | | #if APR_USE_FLOCK_SERIALIZE |
1479 | | new_mutex->meth = &mutex_flock_methods; |
1480 | | if (ospmutex) { |
1481 | | if (ospmutex->crossproc == -1) { |
1482 | | return APR_EINVAL; |
1483 | | } |
1484 | | new_mutex->os.crossproc = ospmutex->crossproc; |
1485 | | } |
1486 | | #elif APR_USE_SYSVSEM_SERIALIZE |
1487 | | new_mutex->meth = &mutex_sysv_methods; |
1488 | | if (ospmutex) { |
1489 | | if (ospmutex->crossproc == -1) { |
1490 | | return APR_EINVAL; |
1491 | | } |
1492 | | new_mutex->os.crossproc = ospmutex->crossproc; |
1493 | | } |
1494 | | #elif APR_USE_FCNTL_SERIALIZE |
1495 | | new_mutex->meth = &mutex_fcntl_methods; |
1496 | | if (ospmutex) { |
1497 | | if (ospmutex->crossproc == -1) { |
1498 | | return APR_EINVAL; |
1499 | | } |
1500 | | new_mutex->os.crossproc = ospmutex->crossproc; |
1501 | | } |
1502 | | #elif APR_USE_PROC_PTHREAD_SERIALIZE |
1503 | | new_mutex->meth = &mutex_proc_pthread_methods; |
1504 | 0 | if (ospmutex) { |
1505 | 0 | if (ospmutex->pthread_interproc == NULL) { |
1506 | 0 | return APR_EINVAL; |
1507 | 0 | } |
1508 | 0 | new_mutex->os.pthread_interproc = ospmutex->pthread_interproc; |
1509 | 0 | } |
1510 | | #elif APR_USE_POSIXSEM_SERIALIZE |
1511 | | new_mutex->meth = &mutex_posixsem_methods; |
1512 | | if (ospmutex) { |
1513 | | if (ospmutex->psem_interproc == NULL) { |
1514 | | return APR_EINVAL; |
1515 | | } |
1516 | | new_mutex->os.psem_interproc = ospmutex->psem_interproc; |
1517 | | } |
1518 | | #else |
1519 | | return APR_ENOTIMPL; |
1520 | | #endif |
1521 | 0 | break; |
1522 | 0 | default: |
1523 | 0 | return APR_ENOTIMPL; |
1524 | 0 | } |
1525 | 0 | return APR_SUCCESS; |
1526 | 0 | } |
1527 | | |
1528 | | APR_DECLARE(const char *) apr_proc_mutex_defname(void) |
1529 | 0 | { |
1530 | 0 | apr_proc_mutex_t mutex; |
1531 | |
|
1532 | 0 | if (proc_mutex_choose_method(&mutex, APR_LOCK_DEFAULT, |
1533 | 0 | NULL) != APR_SUCCESS) { |
1534 | 0 | return "unknown"; |
1535 | 0 | } |
1536 | | |
1537 | 0 | return apr_proc_mutex_name(&mutex); |
1538 | 0 | } |
1539 | | |
1540 | | static apr_status_t proc_mutex_create(apr_proc_mutex_t *new_mutex, apr_lockmech_e mech, const char *fname) |
1541 | 0 | { |
1542 | 0 | apr_status_t rv; |
1543 | |
|
1544 | 0 | if ((rv = proc_mutex_choose_method(new_mutex, mech, |
1545 | 0 | NULL)) != APR_SUCCESS) { |
1546 | 0 | return rv; |
1547 | 0 | } |
1548 | | |
1549 | 0 | if ((rv = new_mutex->meth->create(new_mutex, fname)) != APR_SUCCESS) { |
1550 | 0 | return rv; |
1551 | 0 | } |
1552 | | |
1553 | 0 | return APR_SUCCESS; |
1554 | 0 | } |
1555 | | |
1556 | | APR_DECLARE(apr_status_t) apr_proc_mutex_create(apr_proc_mutex_t **mutex, |
1557 | | const char *fname, |
1558 | | apr_lockmech_e mech, |
1559 | | apr_pool_t *pool) |
1560 | 0 | { |
1561 | 0 | apr_proc_mutex_t *new_mutex; |
1562 | 0 | apr_status_t rv; |
1563 | |
|
1564 | 0 | new_mutex = apr_pcalloc(pool, sizeof(apr_proc_mutex_t)); |
1565 | 0 | new_mutex->pool = pool; |
1566 | |
|
1567 | 0 | if ((rv = proc_mutex_create(new_mutex, mech, fname)) != APR_SUCCESS) |
1568 | 0 | return rv; |
1569 | | |
1570 | 0 | *mutex = new_mutex; |
1571 | 0 | return APR_SUCCESS; |
1572 | 0 | } |
1573 | | |
1574 | | APR_DECLARE(apr_status_t) apr_proc_mutex_child_init(apr_proc_mutex_t **mutex, |
1575 | | const char *fname, |
1576 | | apr_pool_t *pool) |
1577 | 0 | { |
1578 | 0 | return (*mutex)->meth->child_init(mutex, pool, fname); |
1579 | 0 | } |
1580 | | |
1581 | | APR_DECLARE(apr_status_t) apr_proc_mutex_lock(apr_proc_mutex_t *mutex) |
1582 | 0 | { |
1583 | 0 | return mutex->meth->acquire(mutex); |
1584 | 0 | } |
1585 | | |
1586 | | APR_DECLARE(apr_status_t) apr_proc_mutex_trylock(apr_proc_mutex_t *mutex) |
1587 | 0 | { |
1588 | 0 | return mutex->meth->tryacquire(mutex); |
1589 | 0 | } |
1590 | | |
1591 | | APR_DECLARE(apr_status_t) apr_proc_mutex_timedlock(apr_proc_mutex_t *mutex, |
1592 | | apr_interval_time_t timeout) |
1593 | 0 | { |
1594 | 0 | return mutex->meth->timedacquire(mutex, timeout); |
1595 | 0 | } |
1596 | | |
1597 | | APR_DECLARE(apr_status_t) apr_proc_mutex_unlock(apr_proc_mutex_t *mutex) |
1598 | 0 | { |
1599 | 0 | return mutex->meth->release(mutex); |
1600 | 0 | } |
1601 | | |
1602 | | APR_DECLARE(apr_status_t) apr_proc_mutex_cleanup(void *mutex) |
1603 | 0 | { |
1604 | 0 | return ((apr_proc_mutex_t *)mutex)->meth->cleanup(mutex); |
1605 | 0 | } |
1606 | | |
1607 | | APR_DECLARE(apr_lockmech_e) apr_proc_mutex_mech(apr_proc_mutex_t *mutex) |
1608 | 0 | { |
1609 | 0 | return mutex->meth->mech; |
1610 | 0 | } |
1611 | | |
1612 | | APR_DECLARE(const char *) apr_proc_mutex_name(apr_proc_mutex_t *mutex) |
1613 | 0 | { |
1614 | 0 | return mutex->meth->name; |
1615 | 0 | } |
1616 | | |
1617 | | APR_DECLARE(const char *) apr_proc_mutex_lockfile(apr_proc_mutex_t *mutex) |
1618 | 0 | { |
1619 | | /* POSIX sems use the fname field but don't use a file, |
1620 | | * so be careful. */ |
1621 | 0 | #if APR_HAS_FLOCK_SERIALIZE |
1622 | 0 | if (mutex->meth == &mutex_flock_methods) { |
1623 | 0 | return mutex->fname; |
1624 | 0 | } |
1625 | 0 | #endif |
1626 | 0 | #if APR_HAS_FCNTL_SERIALIZE |
1627 | 0 | if (mutex->meth == &mutex_fcntl_methods) { |
1628 | 0 | return mutex->fname; |
1629 | 0 | } |
1630 | 0 | #endif |
1631 | 0 | return NULL; |
1632 | 0 | } |
1633 | | |
1634 | | APR_PERMS_SET_IMPLEMENT(proc_mutex) |
1635 | 0 | { |
1636 | 0 | apr_proc_mutex_t *mutex = (apr_proc_mutex_t *)theproc_mutex; |
1637 | 0 | return mutex->meth->perms_set(mutex, perms, uid, gid); |
1638 | 0 | } |
1639 | | |
1640 | | APR_POOL_IMPLEMENT_ACCESSOR(proc_mutex) |
1641 | | |
1642 | | /* Implement OS-specific accessors defined in apr_portable.h */ |
1643 | | |
1644 | | APR_DECLARE(apr_status_t) apr_os_proc_mutex_get_ex(apr_os_proc_mutex_t *ospmutex, |
1645 | | apr_proc_mutex_t *pmutex, |
1646 | | apr_lockmech_e *mech) |
1647 | 0 | { |
1648 | 0 | *ospmutex = pmutex->os; |
1649 | 0 | if (mech) { |
1650 | 0 | *mech = pmutex->meth->mech; |
1651 | 0 | } |
1652 | 0 | return APR_SUCCESS; |
1653 | 0 | } |
1654 | | |
1655 | | APR_DECLARE(apr_status_t) apr_os_proc_mutex_get(apr_os_proc_mutex_t *ospmutex, |
1656 | | apr_proc_mutex_t *pmutex) |
1657 | 0 | { |
1658 | 0 | return apr_os_proc_mutex_get_ex(ospmutex, pmutex, NULL); |
1659 | 0 | } |
1660 | | |
1661 | | APR_DECLARE(apr_status_t) apr_os_proc_mutex_put_ex(apr_proc_mutex_t **pmutex, |
1662 | | apr_os_proc_mutex_t *ospmutex, |
1663 | | apr_lockmech_e mech, |
1664 | | int register_cleanup, |
1665 | | apr_pool_t *pool) |
1666 | 0 | { |
1667 | 0 | apr_status_t rv; |
1668 | 0 | if (pool == NULL) { |
1669 | 0 | return APR_ENOPOOL; |
1670 | 0 | } |
1671 | | |
1672 | 0 | if ((*pmutex) == NULL) { |
1673 | 0 | (*pmutex) = (apr_proc_mutex_t *)apr_pcalloc(pool, |
1674 | 0 | sizeof(apr_proc_mutex_t)); |
1675 | 0 | (*pmutex)->pool = pool; |
1676 | 0 | } |
1677 | 0 | rv = proc_mutex_choose_method(*pmutex, mech, ospmutex); |
1678 | 0 | #if APR_HAS_FCNTL_SERIALIZE || APR_HAS_FLOCK_SERIALIZE |
1679 | 0 | if (rv == APR_SUCCESS) { |
1680 | 0 | rv = apr_os_file_put(&(*pmutex)->interproc, &(*pmutex)->os.crossproc, |
1681 | 0 | 0, pool); |
1682 | 0 | } |
1683 | 0 | #endif |
1684 | |
|
1685 | 0 | if (rv == APR_SUCCESS && register_cleanup) { |
1686 | 0 | apr_pool_cleanup_register(pool, *pmutex, apr_proc_mutex_cleanup, |
1687 | 0 | apr_pool_cleanup_null); |
1688 | 0 | } |
1689 | 0 | return rv; |
1690 | 0 | } |
1691 | | |
1692 | | APR_DECLARE(apr_status_t) apr_os_proc_mutex_put(apr_proc_mutex_t **pmutex, |
1693 | | apr_os_proc_mutex_t *ospmutex, |
1694 | | apr_pool_t *pool) |
1695 | 0 | { |
1696 | 0 | return apr_os_proc_mutex_put_ex(pmutex, ospmutex, APR_LOCK_DEFAULT, |
1697 | 0 | 0, pool); |
1698 | 0 | } |
1699 | | |