/src/httpd/srclib/apr/locks/unix/proc_mutex.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Licensed to the Apache Software Foundation (ASF) under one or more |
2 | | * contributor license agreements. See the NOTICE file distributed with |
3 | | * this work for additional information regarding copyright ownership. |
4 | | * The ASF licenses this file to You under the Apache License, Version 2.0 |
5 | | * (the "License"); you may not use this file except in compliance with |
6 | | * the License. You may obtain a copy of the License at |
7 | | * |
8 | | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | | * |
10 | | * Unless required by applicable law or agreed to in writing, software |
11 | | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | | * See the License for the specific language governing permissions and |
14 | | * limitations under the License. |
15 | | */ |
16 | | |
17 | | #include "apr.h" |
18 | | #include "apr_strings.h" |
19 | | #include "apr_arch_proc_mutex.h" |
20 | | #include "apr_arch_file_io.h" /* for apr_mkstemp() */ |
21 | | #include "apr_md5.h" /* for apr_md5() */ |
22 | | #include "apr_atomic.h" |
23 | | |
24 | | APR_DECLARE(apr_status_t) apr_proc_mutex_destroy(apr_proc_mutex_t *mutex) |
25 | 0 | { |
26 | 0 | apr_status_t rv = apr_proc_mutex_cleanup(mutex); |
27 | 0 | if (rv == APR_SUCCESS) { |
28 | 0 | apr_pool_cleanup_kill(mutex->pool, mutex, apr_proc_mutex_cleanup); |
29 | 0 | } |
30 | 0 | return rv; |
31 | 0 | } |
32 | | |
33 | | #if APR_HAS_POSIXSEM_SERIALIZE || APR_HAS_FCNTL_SERIALIZE || \ |
34 | | APR_HAS_SYSVSEM_SERIALIZE |
35 | | static apr_status_t proc_mutex_no_child_init(apr_proc_mutex_t **mutex, |
36 | | apr_pool_t *cont, |
37 | | const char *fname) |
38 | 0 | { |
39 | 0 | return APR_SUCCESS; |
40 | 0 | } |
41 | | #endif |
42 | | |
43 | | #if APR_HAS_POSIXSEM_SERIALIZE || APR_HAS_PROC_PTHREAD_SERIALIZE |
44 | | static apr_status_t proc_mutex_no_perms_set(apr_proc_mutex_t *mutex, |
45 | | apr_fileperms_t perms, |
46 | | apr_uid_t uid, |
47 | | apr_gid_t gid) |
48 | 0 | { |
49 | 0 | return APR_ENOTIMPL; |
50 | 0 | } |
51 | | #endif |
52 | | |
53 | | #if APR_HAS_FCNTL_SERIALIZE \ |
54 | | || APR_HAS_FLOCK_SERIALIZE \ |
55 | | || (APR_HAS_SYSVSEM_SERIALIZE \ |
56 | | && !defined(HAVE_SEMTIMEDOP)) \ |
57 | | || (APR_HAS_POSIXSEM_SERIALIZE \ |
58 | | && !defined(HAVE_SEM_TIMEDWAIT)) \ |
59 | | || (APR_HAS_PROC_PTHREAD_SERIALIZE \ |
60 | | && !defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK) \ |
61 | | && !defined(HAVE_PTHREAD_CONDATTR_SETPSHARED)) |
62 | | static apr_status_t proc_mutex_spinsleep_timedacquire(apr_proc_mutex_t *mutex, |
63 | | apr_interval_time_t timeout) |
64 | 0 | { |
65 | 0 | #define SLEEP_TIME apr_time_from_msec(10) |
66 | 0 | apr_status_t rv; |
67 | 0 | for (;;) { |
68 | 0 | rv = apr_proc_mutex_trylock(mutex); |
69 | 0 | if (!APR_STATUS_IS_EBUSY(rv)) { |
70 | 0 | if (rv == APR_SUCCESS) { |
71 | 0 | mutex->curr_locked = 1; |
72 | 0 | } |
73 | 0 | break; |
74 | 0 | } |
75 | 0 | if (timeout <= 0) { |
76 | 0 | rv = APR_TIMEUP; |
77 | 0 | break; |
78 | 0 | } |
79 | 0 | if (timeout > SLEEP_TIME) { |
80 | 0 | apr_sleep(SLEEP_TIME); |
81 | 0 | timeout -= SLEEP_TIME; |
82 | 0 | } |
83 | 0 | else { |
84 | 0 | apr_sleep(timeout); |
85 | 0 | timeout = 0; |
86 | 0 | } |
87 | 0 | } |
88 | 0 | return rv; |
89 | 0 | } |
90 | | #endif |
91 | | |
92 | | #if APR_HAS_POSIXSEM_SERIALIZE |
93 | | |
94 | | #ifndef SEM_FAILED |
95 | | #define SEM_FAILED (-1) |
96 | | #endif |
97 | | |
98 | | static apr_status_t proc_mutex_posix_cleanup(void *mutex_) |
99 | 0 | { |
100 | 0 | apr_proc_mutex_t *mutex = mutex_; |
101 | |
|
102 | 0 | if (sem_close(mutex->os.psem_interproc) < 0) { |
103 | 0 | return errno; |
104 | 0 | } |
105 | | |
106 | 0 | return APR_SUCCESS; |
107 | 0 | } |
108 | | |
109 | | static apr_status_t proc_mutex_posix_create(apr_proc_mutex_t *new_mutex, |
110 | | const char *fname) |
111 | 0 | { |
112 | 0 | #define APR_POSIXSEM_NAME_MAX 30 |
113 | 0 | #define APR_POSIXSEM_NAME_MIN 13 |
114 | 0 | sem_t *psem; |
115 | 0 | char semname[APR_MD5_DIGESTSIZE * 2 + 2]; |
116 | | |
117 | | /* |
118 | | * This bogusness is to follow what appears to be the |
119 | | * lowest common denominator in Posix semaphore naming: |
120 | | * - start with '/' |
121 | | * - be at most 14 chars |
122 | | * - be unique and not match anything on the filesystem |
123 | | * |
124 | | * Because of this, we use fname to generate an md5 hex checksum |
125 | | * and use that as the name of the semaphore. If no filename was |
126 | | * given, we create one based on the time. We tuck the name |
127 | | * away, since it might be useful for debugging. |
128 | | * |
129 | | * To make this as robust as possible, we initially try something |
130 | | * larger (and hopefully more unique) and gracefully fail down to the |
131 | | * LCD above. |
132 | | * |
133 | | * NOTE: Darwin (Mac OS X) seems to be the most restrictive |
134 | | * implementation. Versions previous to Darwin 6.2 had the 14 |
135 | | * char limit, but later rev's allow up to 31 characters. |
136 | | * |
137 | | */ |
138 | 0 | if (fname) { |
139 | 0 | unsigned char digest[APR_MD5_DIGESTSIZE]; /* note dependency on semname here */ |
140 | 0 | const char *hex = "0123456789abcdef"; |
141 | 0 | char *p = semname; |
142 | 0 | int i; |
143 | 0 | apr_md5(digest, fname, strlen(fname)); |
144 | 0 | *p++ = '/'; /* must start with /, right? */ |
145 | 0 | for (i = 0; i < sizeof(digest); i++) { |
146 | 0 | *p++ = hex[digest[i] >> 4]; |
147 | 0 | *p++ = hex[digest[i] & 0xF]; |
148 | 0 | } |
149 | 0 | semname[APR_POSIXSEM_NAME_MAX] = '\0'; |
150 | 0 | } else { |
151 | 0 | apr_time_t now; |
152 | 0 | unsigned long sec; |
153 | 0 | unsigned long usec; |
154 | 0 | now = apr_time_now(); |
155 | 0 | sec = apr_time_sec(now); |
156 | 0 | usec = apr_time_usec(now); |
157 | 0 | apr_snprintf(semname, sizeof(semname), "/ApR.%lxZ%lx", sec, usec); |
158 | 0 | } |
159 | 0 | do { |
160 | 0 | psem = sem_open(semname, O_CREAT | O_EXCL, 0644, 1); |
161 | 0 | } while (psem == (sem_t *)SEM_FAILED && errno == EINTR); |
162 | 0 | if (psem == (sem_t *)SEM_FAILED) { |
163 | 0 | if (errno == ENAMETOOLONG) { |
164 | | /* Oh well, good try */ |
165 | 0 | semname[APR_POSIXSEM_NAME_MIN] = '\0'; |
166 | 0 | } else { |
167 | 0 | return errno; |
168 | 0 | } |
169 | 0 | do { |
170 | 0 | psem = sem_open(semname, O_CREAT | O_EXCL, 0644, 1); |
171 | 0 | } while (psem == (sem_t *)SEM_FAILED && errno == EINTR); |
172 | 0 | } |
173 | | |
174 | 0 | if (psem == (sem_t *)SEM_FAILED) { |
175 | 0 | return errno; |
176 | 0 | } |
177 | | /* Ahhh. The joys of Posix sems. Predelete it... */ |
178 | 0 | sem_unlink(semname); |
179 | 0 | new_mutex->os.psem_interproc = psem; |
180 | 0 | new_mutex->fname = apr_pstrdup(new_mutex->pool, semname); |
181 | 0 | apr_pool_cleanup_register(new_mutex->pool, (void *)new_mutex, |
182 | 0 | apr_proc_mutex_cleanup, |
183 | 0 | apr_pool_cleanup_null); |
184 | 0 | return APR_SUCCESS; |
185 | 0 | } |
186 | | |
187 | | static apr_status_t proc_mutex_posix_acquire(apr_proc_mutex_t *mutex) |
188 | 0 | { |
189 | 0 | int rc; |
190 | |
|
191 | 0 | do { |
192 | 0 | rc = sem_wait(mutex->os.psem_interproc); |
193 | 0 | } while (rc < 0 && errno == EINTR); |
194 | 0 | if (rc < 0) { |
195 | 0 | return errno; |
196 | 0 | } |
197 | 0 | mutex->curr_locked = 1; |
198 | 0 | return APR_SUCCESS; |
199 | 0 | } |
200 | | |
201 | | static apr_status_t proc_mutex_posix_tryacquire(apr_proc_mutex_t *mutex) |
202 | 0 | { |
203 | 0 | int rc; |
204 | |
|
205 | 0 | do { |
206 | 0 | rc = sem_trywait(mutex->os.psem_interproc); |
207 | 0 | } while (rc < 0 && errno == EINTR); |
208 | 0 | if (rc < 0) { |
209 | 0 | if (errno == EAGAIN) { |
210 | 0 | return APR_EBUSY; |
211 | 0 | } |
212 | 0 | return errno; |
213 | 0 | } |
214 | 0 | mutex->curr_locked = 1; |
215 | 0 | return APR_SUCCESS; |
216 | 0 | } |
217 | | |
218 | | #if defined(HAVE_SEM_TIMEDWAIT) |
219 | | static apr_status_t proc_mutex_posix_timedacquire(apr_proc_mutex_t *mutex, |
220 | | apr_interval_time_t timeout) |
221 | 0 | { |
222 | 0 | if (timeout <= 0) { |
223 | 0 | apr_status_t rv = proc_mutex_posix_tryacquire(mutex); |
224 | 0 | return (rv == APR_EBUSY) ? APR_TIMEUP : rv; |
225 | 0 | } |
226 | 0 | else { |
227 | 0 | int rc; |
228 | 0 | struct timespec abstime; |
229 | |
|
230 | 0 | timeout += apr_time_now(); |
231 | 0 | abstime.tv_sec = apr_time_sec(timeout); |
232 | 0 | abstime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */ |
233 | |
|
234 | 0 | do { |
235 | 0 | rc = sem_timedwait(mutex->os.psem_interproc, &abstime); |
236 | 0 | } while (rc < 0 && errno == EINTR); |
237 | 0 | if (rc < 0) { |
238 | 0 | if (errno == ETIMEDOUT) { |
239 | 0 | return APR_TIMEUP; |
240 | 0 | } |
241 | 0 | return errno; |
242 | 0 | } |
243 | 0 | } |
244 | 0 | mutex->curr_locked = 1; |
245 | 0 | return APR_SUCCESS; |
246 | 0 | } |
247 | | #endif |
248 | | |
249 | | static apr_status_t proc_mutex_posix_release(apr_proc_mutex_t *mutex) |
250 | 0 | { |
251 | 0 | mutex->curr_locked = 0; |
252 | 0 | if (sem_post(mutex->os.psem_interproc) < 0) { |
253 | | /* any failure is probably fatal, so no big deal to leave |
254 | | * ->curr_locked at 0. */ |
255 | 0 | return errno; |
256 | 0 | } |
257 | 0 | return APR_SUCCESS; |
258 | 0 | } |
259 | | |
260 | | static const apr_proc_mutex_unix_lock_methods_t mutex_posixsem_methods = |
261 | | { |
262 | | #if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS || defined(POSIXSEM_IS_GLOBAL) |
263 | | APR_PROCESS_LOCK_MECH_IS_GLOBAL, |
264 | | #else |
265 | | 0, |
266 | | #endif |
267 | | proc_mutex_posix_create, |
268 | | proc_mutex_posix_acquire, |
269 | | proc_mutex_posix_tryacquire, |
270 | | #if defined(HAVE_SEM_TIMEDWAIT) |
271 | | proc_mutex_posix_timedacquire, |
272 | | #else |
273 | | proc_mutex_spinsleep_timedacquire, |
274 | | #endif |
275 | | proc_mutex_posix_release, |
276 | | proc_mutex_posix_cleanup, |
277 | | proc_mutex_no_child_init, |
278 | | proc_mutex_no_perms_set, |
279 | | APR_LOCK_POSIXSEM, |
280 | | "posixsem" |
281 | | }; |
282 | | |
283 | | #endif /* Posix sem implementation */ |
284 | | |
285 | | #if APR_HAS_SYSVSEM_SERIALIZE |
286 | | |
287 | | static struct sembuf proc_mutex_op_on; |
288 | | static struct sembuf proc_mutex_op_try; |
289 | | static struct sembuf proc_mutex_op_off; |
290 | | |
291 | | static void proc_mutex_sysv_setup(void) |
292 | 0 | { |
293 | 0 | proc_mutex_op_on.sem_num = 0; |
294 | 0 | proc_mutex_op_on.sem_op = -1; |
295 | 0 | proc_mutex_op_on.sem_flg = SEM_UNDO; |
296 | 0 | proc_mutex_op_try.sem_num = 0; |
297 | 0 | proc_mutex_op_try.sem_op = -1; |
298 | 0 | proc_mutex_op_try.sem_flg = SEM_UNDO | IPC_NOWAIT; |
299 | 0 | proc_mutex_op_off.sem_num = 0; |
300 | 0 | proc_mutex_op_off.sem_op = 1; |
301 | 0 | proc_mutex_op_off.sem_flg = SEM_UNDO; |
302 | 0 | } |
303 | | |
304 | | static apr_status_t proc_mutex_sysv_cleanup(void *mutex_) |
305 | 0 | { |
306 | 0 | apr_proc_mutex_t *mutex=mutex_; |
307 | 0 | union semun ick; |
308 | |
|
309 | 0 | if (mutex->os.crossproc != -1) { |
310 | 0 | ick.val = 0; |
311 | 0 | semctl(mutex->os.crossproc, 0, IPC_RMID, ick); |
312 | 0 | } |
313 | 0 | return APR_SUCCESS; |
314 | 0 | } |
315 | | |
316 | | static apr_status_t proc_mutex_sysv_create(apr_proc_mutex_t *new_mutex, |
317 | | const char *fname) |
318 | 0 | { |
319 | 0 | union semun ick; |
320 | 0 | apr_status_t rv; |
321 | |
|
322 | 0 | new_mutex->os.crossproc = semget(IPC_PRIVATE, 1, IPC_CREAT | 0600); |
323 | 0 | if (new_mutex->os.crossproc == -1) { |
324 | 0 | rv = errno; |
325 | 0 | proc_mutex_sysv_cleanup(new_mutex); |
326 | 0 | return rv; |
327 | 0 | } |
328 | 0 | ick.val = 1; |
329 | 0 | if (semctl(new_mutex->os.crossproc, 0, SETVAL, ick) < 0) { |
330 | 0 | rv = errno; |
331 | 0 | proc_mutex_sysv_cleanup(new_mutex); |
332 | 0 | new_mutex->os.crossproc = -1; |
333 | 0 | return rv; |
334 | 0 | } |
335 | 0 | new_mutex->curr_locked = 0; |
336 | 0 | apr_pool_cleanup_register(new_mutex->pool, |
337 | 0 | (void *)new_mutex, apr_proc_mutex_cleanup, |
338 | 0 | apr_pool_cleanup_null); |
339 | 0 | return APR_SUCCESS; |
340 | 0 | } |
341 | | |
342 | | static apr_status_t proc_mutex_sysv_acquire(apr_proc_mutex_t *mutex) |
343 | 0 | { |
344 | 0 | int rc; |
345 | |
|
346 | 0 | do { |
347 | 0 | rc = semop(mutex->os.crossproc, &proc_mutex_op_on, 1); |
348 | 0 | } while (rc < 0 && errno == EINTR); |
349 | 0 | if (rc < 0) { |
350 | 0 | return errno; |
351 | 0 | } |
352 | 0 | mutex->curr_locked = 1; |
353 | 0 | return APR_SUCCESS; |
354 | 0 | } |
355 | | |
356 | | static apr_status_t proc_mutex_sysv_tryacquire(apr_proc_mutex_t *mutex) |
357 | 0 | { |
358 | 0 | int rc; |
359 | |
|
360 | 0 | do { |
361 | 0 | rc = semop(mutex->os.crossproc, &proc_mutex_op_try, 1); |
362 | 0 | } while (rc < 0 && errno == EINTR); |
363 | 0 | if (rc < 0) { |
364 | 0 | if (errno == EAGAIN) { |
365 | 0 | return APR_EBUSY; |
366 | 0 | } |
367 | 0 | return errno; |
368 | 0 | } |
369 | 0 | mutex->curr_locked = 1; |
370 | 0 | return APR_SUCCESS; |
371 | 0 | } |
372 | | |
373 | | #if defined(HAVE_SEMTIMEDOP) |
374 | | static apr_status_t proc_mutex_sysv_timedacquire(apr_proc_mutex_t *mutex, |
375 | | apr_interval_time_t timeout) |
376 | 0 | { |
377 | 0 | if (timeout <= 0) { |
378 | 0 | apr_status_t rv = proc_mutex_sysv_tryacquire(mutex); |
379 | 0 | return (rv == APR_EBUSY) ? APR_TIMEUP : rv; |
380 | 0 | } |
381 | 0 | else { |
382 | 0 | int rc; |
383 | 0 | struct timespec reltime; |
384 | |
|
385 | 0 | reltime.tv_sec = apr_time_sec(timeout); |
386 | 0 | reltime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */ |
387 | |
|
388 | 0 | do { |
389 | 0 | rc = semtimedop(mutex->os.crossproc, &proc_mutex_op_on, 1, |
390 | 0 | &reltime); |
391 | 0 | } while (rc < 0 && errno == EINTR); |
392 | 0 | if (rc < 0) { |
393 | 0 | if (errno == EAGAIN) { |
394 | 0 | return APR_TIMEUP; |
395 | 0 | } |
396 | 0 | return errno; |
397 | 0 | } |
398 | 0 | } |
399 | 0 | mutex->curr_locked = 1; |
400 | 0 | return APR_SUCCESS; |
401 | 0 | } |
402 | | #endif |
403 | | |
404 | | static apr_status_t proc_mutex_sysv_release(apr_proc_mutex_t *mutex) |
405 | 0 | { |
406 | 0 | int rc; |
407 | |
|
408 | 0 | mutex->curr_locked = 0; |
409 | 0 | do { |
410 | 0 | rc = semop(mutex->os.crossproc, &proc_mutex_op_off, 1); |
411 | 0 | } while (rc < 0 && errno == EINTR); |
412 | 0 | if (rc < 0) { |
413 | 0 | return errno; |
414 | 0 | } |
415 | 0 | return APR_SUCCESS; |
416 | 0 | } |
417 | | |
418 | | static apr_status_t proc_mutex_sysv_perms_set(apr_proc_mutex_t *mutex, |
419 | | apr_fileperms_t perms, |
420 | | apr_uid_t uid, |
421 | | apr_gid_t gid) |
422 | 0 | { |
423 | |
|
424 | 0 | union semun ick; |
425 | 0 | struct semid_ds buf; |
426 | 0 | buf.sem_perm.uid = uid; |
427 | 0 | buf.sem_perm.gid = gid; |
428 | 0 | buf.sem_perm.mode = apr_unix_perms2mode(perms); |
429 | 0 | ick.buf = &buf; |
430 | 0 | if (semctl(mutex->os.crossproc, 0, IPC_SET, ick) < 0) { |
431 | 0 | return errno; |
432 | 0 | } |
433 | 0 | return APR_SUCCESS; |
434 | 0 | } |
435 | | |
436 | | static const apr_proc_mutex_unix_lock_methods_t mutex_sysv_methods = |
437 | | { |
438 | | #if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS || defined(SYSVSEM_IS_GLOBAL) |
439 | | APR_PROCESS_LOCK_MECH_IS_GLOBAL, |
440 | | #else |
441 | | 0, |
442 | | #endif |
443 | | proc_mutex_sysv_create, |
444 | | proc_mutex_sysv_acquire, |
445 | | proc_mutex_sysv_tryacquire, |
446 | | #if defined(HAVE_SEMTIMEDOP) |
447 | | proc_mutex_sysv_timedacquire, |
448 | | #else |
449 | | proc_mutex_spinsleep_timedacquire, |
450 | | #endif |
451 | | proc_mutex_sysv_release, |
452 | | proc_mutex_sysv_cleanup, |
453 | | proc_mutex_no_child_init, |
454 | | proc_mutex_sysv_perms_set, |
455 | | APR_LOCK_SYSVSEM, |
456 | | "sysvsem" |
457 | | }; |
458 | | |
459 | | #endif /* SysV sem implementation */ |
460 | | |
461 | | #if APR_HAS_PROC_PTHREAD_SERIALIZE |
462 | | |
463 | | #ifndef APR_USE_PROC_PTHREAD_MUTEX_COND |
464 | | #define APR_USE_PROC_PTHREAD_MUTEX_COND \ |
465 | | (defined(HAVE_PTHREAD_CONDATTR_SETPSHARED) \ |
466 | | && !defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK)) |
467 | | #endif |
468 | | |
469 | | /* The mmap()ed pthread_interproc is the native pthread_mutex_t followed |
470 | | * by a refcounter to track children using it. We want to avoid calling |
471 | | * pthread_mutex_destroy() on the shared mutex area while it is in use by |
472 | | * another process, because this may mark the shared pthread_mutex_t as |
473 | | * invalid for everyone, including forked children (unlike "sysvsem" for |
474 | | * example), causing unexpected errors or deadlocks (PR 49504). So the |
475 | | * last process (parent or child) referencing the mutex will effectively |
476 | | * destroy it. |
477 | | */ |
478 | | typedef struct { |
479 | | #define proc_pthread_cast(m) \ |
480 | 0 | ((proc_pthread_mutex_t *)(m)->os.pthread_interproc) |
481 | | pthread_mutex_t mutex; |
482 | | #define proc_pthread_mutex(m) \ |
483 | 0 | (proc_pthread_cast(m)->mutex) |
484 | | #if APR_USE_PROC_PTHREAD_MUTEX_COND |
485 | | pthread_cond_t cond; |
486 | | #define proc_pthread_mutex_cond(m) \ |
487 | | (proc_pthread_cast(m)->cond) |
488 | | apr_int32_t cond_locked; |
489 | | #define proc_pthread_mutex_cond_locked(m) \ |
490 | | (proc_pthread_cast(m)->cond_locked) |
491 | | apr_uint32_t cond_num_waiters; |
492 | | #define proc_pthread_mutex_cond_num_waiters(m) \ |
493 | | (proc_pthread_cast(m)->cond_num_waiters) |
494 | | #define proc_pthread_mutex_is_cond(m) \ |
495 | | ((m)->pthread_refcounting && proc_pthread_mutex_cond_locked(m) != -1) |
496 | | #endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */ |
497 | | apr_uint32_t refcount; |
498 | | #define proc_pthread_mutex_refcount(m) \ |
499 | 0 | (proc_pthread_cast(m)->refcount) |
500 | | } proc_pthread_mutex_t; |
501 | | |
502 | | |
503 | | static APR_INLINE int proc_pthread_mutex_inc(apr_proc_mutex_t *mutex) |
504 | 0 | { |
505 | 0 | if (mutex->pthread_refcounting) { |
506 | 0 | apr_atomic_inc32(&proc_pthread_mutex_refcount(mutex)); |
507 | 0 | return 1; |
508 | 0 | } |
509 | 0 | return 0; |
510 | 0 | } |
511 | | |
512 | | static APR_INLINE int proc_pthread_mutex_dec(apr_proc_mutex_t *mutex) |
513 | 0 | { |
514 | 0 | if (mutex->pthread_refcounting) { |
515 | 0 | return apr_atomic_dec32(&proc_pthread_mutex_refcount(mutex)); |
516 | 0 | } |
517 | 0 | return 0; |
518 | 0 | } |
519 | | |
520 | | static apr_status_t proc_pthread_mutex_unref(void *mutex_) |
521 | 0 | { |
522 | 0 | apr_proc_mutex_t *mutex=mutex_; |
523 | 0 | apr_status_t rv; |
524 | |
|
525 | | #if APR_USE_PROC_PTHREAD_MUTEX_COND |
526 | | if (proc_pthread_mutex_is_cond(mutex)) { |
527 | | mutex->curr_locked = 0; |
528 | | } |
529 | | else |
530 | | #endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */ |
531 | 0 | if (mutex->curr_locked == 1) { |
532 | 0 | if ((rv = pthread_mutex_unlock(&proc_pthread_mutex(mutex)))) { |
533 | | #ifdef HAVE_ZOS_PTHREADS |
534 | | rv = errno; |
535 | | #endif |
536 | 0 | return rv; |
537 | 0 | } |
538 | 0 | } |
539 | 0 | if (!proc_pthread_mutex_dec(mutex)) { |
540 | | #if APR_USE_PROC_PTHREAD_MUTEX_COND |
541 | | if (proc_pthread_mutex_is_cond(mutex) && |
542 | | (rv = pthread_cond_destroy(&proc_pthread_mutex_cond(mutex)))) { |
543 | | #ifdef HAVE_ZOS_PTHREADS |
544 | | rv = errno; |
545 | | #endif |
546 | | return rv; |
547 | | } |
548 | | #endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */ |
549 | |
|
550 | 0 | if ((rv = pthread_mutex_destroy(&proc_pthread_mutex(mutex)))) { |
551 | | #ifdef HAVE_ZOS_PTHREADS |
552 | | rv = errno; |
553 | | #endif |
554 | 0 | return rv; |
555 | 0 | } |
556 | 0 | } |
557 | 0 | return APR_SUCCESS; |
558 | 0 | } |
559 | | |
560 | | static apr_status_t proc_mutex_pthread_cleanup(void *mutex_) |
561 | 0 | { |
562 | 0 | apr_proc_mutex_t *mutex=mutex_; |
563 | 0 | apr_status_t rv; |
564 | | |
565 | | /* curr_locked is set to -1 until the mutex has been created */ |
566 | 0 | if (mutex->curr_locked != -1) { |
567 | 0 | if ((rv = proc_pthread_mutex_unref(mutex))) { |
568 | 0 | return rv; |
569 | 0 | } |
570 | 0 | } |
571 | 0 | if (munmap(mutex->os.pthread_interproc, sizeof(proc_pthread_mutex_t))) { |
572 | 0 | return errno; |
573 | 0 | } |
574 | 0 | return APR_SUCCESS; |
575 | 0 | } |
576 | | |
577 | | static apr_status_t proc_mutex_pthread_create(apr_proc_mutex_t *new_mutex, |
578 | | const char *fname) |
579 | 0 | { |
580 | 0 | apr_status_t rv; |
581 | 0 | int fd; |
582 | 0 | pthread_mutexattr_t mattr; |
583 | |
|
584 | 0 | fd = open("/dev/zero", O_RDWR); |
585 | 0 | if (fd < 0) { |
586 | 0 | return errno; |
587 | 0 | } |
588 | | |
589 | 0 | new_mutex->os.pthread_interproc = mmap(NULL, sizeof(proc_pthread_mutex_t), |
590 | 0 | PROT_READ | PROT_WRITE, MAP_SHARED, |
591 | 0 | fd, 0); |
592 | 0 | if (new_mutex->os.pthread_interproc == MAP_FAILED) { |
593 | 0 | new_mutex->os.pthread_interproc = NULL; |
594 | 0 | rv = errno; |
595 | 0 | close(fd); |
596 | 0 | return rv; |
597 | 0 | } |
598 | 0 | close(fd); |
599 | |
|
600 | 0 | new_mutex->pthread_refcounting = 1; |
601 | 0 | new_mutex->curr_locked = -1; /* until the mutex has been created */ |
602 | | #if APR_USE_PROC_PTHREAD_MUTEX_COND |
603 | | proc_pthread_mutex_cond_locked(new_mutex) = -1; |
604 | | #endif |
605 | |
|
606 | 0 | if ((rv = pthread_mutexattr_init(&mattr))) { |
607 | | #ifdef HAVE_ZOS_PTHREADS |
608 | | rv = errno; |
609 | | #endif |
610 | 0 | proc_mutex_pthread_cleanup(new_mutex); |
611 | 0 | return rv; |
612 | 0 | } |
613 | 0 | if ((rv = pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED))) { |
614 | | #ifdef HAVE_ZOS_PTHREADS |
615 | | rv = errno; |
616 | | #endif |
617 | 0 | proc_mutex_pthread_cleanup(new_mutex); |
618 | 0 | pthread_mutexattr_destroy(&mattr); |
619 | 0 | return rv; |
620 | 0 | } |
621 | | |
622 | 0 | #if defined(HAVE_PTHREAD_MUTEX_ROBUST) || defined(HAVE_PTHREAD_MUTEX_ROBUST_NP) |
623 | 0 | #ifdef HAVE_PTHREAD_MUTEX_ROBUST |
624 | 0 | rv = pthread_mutexattr_setrobust(&mattr, PTHREAD_MUTEX_ROBUST); |
625 | | #else |
626 | | rv = pthread_mutexattr_setrobust_np(&mattr, PTHREAD_MUTEX_ROBUST_NP); |
627 | | #endif |
628 | 0 | if (rv) { |
629 | | #ifdef HAVE_ZOS_PTHREADS |
630 | | rv = errno; |
631 | | #endif |
632 | 0 | proc_mutex_pthread_cleanup(new_mutex); |
633 | 0 | pthread_mutexattr_destroy(&mattr); |
634 | 0 | return rv; |
635 | 0 | } |
636 | 0 | if ((rv = pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT))) { |
637 | | #ifdef HAVE_ZOS_PTHREADS |
638 | | rv = errno; |
639 | | #endif |
640 | 0 | proc_mutex_pthread_cleanup(new_mutex); |
641 | 0 | pthread_mutexattr_destroy(&mattr); |
642 | 0 | return rv; |
643 | 0 | } |
644 | 0 | #endif /* HAVE_PTHREAD_MUTEX_ROBUST[_NP] */ |
645 | | |
646 | 0 | if ((rv = pthread_mutex_init(&proc_pthread_mutex(new_mutex), &mattr))) { |
647 | | #ifdef HAVE_ZOS_PTHREADS |
648 | | rv = errno; |
649 | | #endif |
650 | 0 | proc_mutex_pthread_cleanup(new_mutex); |
651 | 0 | pthread_mutexattr_destroy(&mattr); |
652 | 0 | return rv; |
653 | 0 | } |
654 | | |
655 | 0 | proc_pthread_mutex_refcount(new_mutex) = 1; /* first/parent reference */ |
656 | 0 | new_mutex->curr_locked = 0; /* mutex created now */ |
657 | |
|
658 | 0 | if ((rv = pthread_mutexattr_destroy(&mattr))) { |
659 | | #ifdef HAVE_ZOS_PTHREADS |
660 | | rv = errno; |
661 | | #endif |
662 | 0 | proc_mutex_pthread_cleanup(new_mutex); |
663 | 0 | return rv; |
664 | 0 | } |
665 | | |
666 | 0 | apr_pool_cleanup_register(new_mutex->pool, |
667 | 0 | (void *)new_mutex, |
668 | 0 | apr_proc_mutex_cleanup, |
669 | 0 | apr_pool_cleanup_null); |
670 | 0 | return APR_SUCCESS; |
671 | 0 | } |
672 | | |
673 | | static apr_status_t proc_mutex_pthread_child_init(apr_proc_mutex_t **mutex, |
674 | | apr_pool_t *pool, |
675 | | const char *fname) |
676 | 0 | { |
677 | 0 | (*mutex)->curr_locked = 0; |
678 | 0 | if (proc_pthread_mutex_inc(*mutex)) { |
679 | 0 | apr_pool_cleanup_register(pool, *mutex, proc_pthread_mutex_unref, |
680 | 0 | apr_pool_cleanup_null); |
681 | 0 | } |
682 | 0 | return APR_SUCCESS; |
683 | 0 | } |
684 | | |
685 | | static apr_status_t proc_mutex_pthread_acquire_ex(apr_proc_mutex_t *mutex, |
686 | | apr_interval_time_t timeout) |
687 | 0 | { |
688 | 0 | apr_status_t rv; |
689 | |
|
690 | | #if APR_USE_PROC_PTHREAD_MUTEX_COND |
691 | | if (proc_pthread_mutex_is_cond(mutex)) { |
692 | | if ((rv = pthread_mutex_lock(&proc_pthread_mutex(mutex)))) { |
693 | | #ifdef HAVE_ZOS_PTHREADS |
694 | | rv = errno; |
695 | | #endif |
696 | | #if defined(HAVE_PTHREAD_MUTEX_ROBUST) || defined(HAVE_PTHREAD_MUTEX_ROBUST_NP) |
697 | | /* Okay, our owner died. Let's try to make it consistent again. */ |
698 | | if (rv == EOWNERDEAD) { |
699 | | proc_pthread_mutex_dec(mutex); |
700 | | #ifdef HAVE_PTHREAD_MUTEX_ROBUST |
701 | | pthread_mutex_consistent(&proc_pthread_mutex(mutex)); |
702 | | #else |
703 | | pthread_mutex_consistent_np(&proc_pthread_mutex(mutex)); |
704 | | #endif |
705 | | } |
706 | | else |
707 | | #endif |
708 | | return rv; |
709 | | } |
710 | | |
711 | | if (!proc_pthread_mutex_cond_locked(mutex)) { |
712 | | rv = APR_SUCCESS; |
713 | | } |
714 | | else if (!timeout) { |
715 | | rv = APR_TIMEUP; |
716 | | } |
717 | | else { |
718 | | struct timespec abstime; |
719 | | |
720 | | if (timeout > 0) { |
721 | | timeout += apr_time_now(); |
722 | | abstime.tv_sec = apr_time_sec(timeout); |
723 | | abstime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */ |
724 | | } |
725 | | |
726 | | proc_pthread_mutex_cond_num_waiters(mutex)++; |
727 | | do { |
728 | | if (timeout < 0) { |
729 | | rv = pthread_cond_wait(&proc_pthread_mutex_cond(mutex), |
730 | | &proc_pthread_mutex(mutex)); |
731 | | if (rv) { |
732 | | #ifdef HAVE_ZOS_PTHREADS |
733 | | rv = errno; |
734 | | #endif |
735 | | break; |
736 | | } |
737 | | } |
738 | | else { |
739 | | rv = pthread_cond_timedwait(&proc_pthread_mutex_cond(mutex), |
740 | | &proc_pthread_mutex(mutex), |
741 | | &abstime); |
742 | | if (rv) { |
743 | | #ifdef HAVE_ZOS_PTHREADS |
744 | | rv = errno; |
745 | | #endif |
746 | | if (rv == ETIMEDOUT) { |
747 | | rv = APR_TIMEUP; |
748 | | } |
749 | | break; |
750 | | } |
751 | | } |
752 | | } while (proc_pthread_mutex_cond_locked(mutex)); |
753 | | proc_pthread_mutex_cond_num_waiters(mutex)--; |
754 | | } |
755 | | if (rv != APR_SUCCESS) { |
756 | | pthread_mutex_unlock(&proc_pthread_mutex(mutex)); |
757 | | return rv; |
758 | | } |
759 | | |
760 | | proc_pthread_mutex_cond_locked(mutex) = 1; |
761 | | |
762 | | rv = pthread_mutex_unlock(&proc_pthread_mutex(mutex)); |
763 | | if (rv) { |
764 | | #ifdef HAVE_ZOS_PTHREADS |
765 | | rv = errno; |
766 | | #endif |
767 | | return rv; |
768 | | } |
769 | | } |
770 | | else |
771 | | #endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */ |
772 | 0 | { |
773 | 0 | if (timeout < 0) { |
774 | 0 | rv = pthread_mutex_lock(&proc_pthread_mutex(mutex)); |
775 | 0 | if (rv) { |
776 | | #ifdef HAVE_ZOS_PTHREADS |
777 | | rv = errno; |
778 | | #endif |
779 | 0 | } |
780 | 0 | } |
781 | 0 | else if (!timeout) { |
782 | 0 | rv = pthread_mutex_trylock(&proc_pthread_mutex(mutex)); |
783 | 0 | if (rv) { |
784 | | #ifdef HAVE_ZOS_PTHREADS |
785 | | rv = errno; |
786 | | #endif |
787 | 0 | if (rv == EBUSY) { |
788 | 0 | return APR_TIMEUP; |
789 | 0 | } |
790 | 0 | } |
791 | 0 | } |
792 | 0 | else |
793 | 0 | #if defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK) |
794 | 0 | { |
795 | 0 | struct timespec abstime; |
796 | |
|
797 | 0 | timeout += apr_time_now(); |
798 | 0 | abstime.tv_sec = apr_time_sec(timeout); |
799 | 0 | abstime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */ |
800 | |
|
801 | 0 | rv = pthread_mutex_timedlock(&proc_pthread_mutex(mutex), &abstime); |
802 | 0 | if (rv) { |
803 | | #ifdef HAVE_ZOS_PTHREADS |
804 | | rv = errno; |
805 | | #endif |
806 | 0 | if (rv == ETIMEDOUT) { |
807 | 0 | return APR_TIMEUP; |
808 | 0 | } |
809 | 0 | } |
810 | 0 | } |
811 | 0 | if (rv) { |
812 | 0 | #if defined(HAVE_PTHREAD_MUTEX_ROBUST) || defined(HAVE_PTHREAD_MUTEX_ROBUST_NP) |
813 | | /* Okay, our owner died. Let's try to make it consistent again. */ |
814 | 0 | if (rv == EOWNERDEAD) { |
815 | 0 | proc_pthread_mutex_dec(mutex); |
816 | 0 | #ifdef HAVE_PTHREAD_MUTEX_ROBUST |
817 | 0 | pthread_mutex_consistent(&proc_pthread_mutex(mutex)); |
818 | | #else |
819 | | pthread_mutex_consistent_np(&proc_pthread_mutex(mutex)); |
820 | | #endif |
821 | 0 | } |
822 | 0 | else |
823 | 0 | #endif |
824 | 0 | return rv; |
825 | 0 | } |
826 | | #else /* !HAVE_PTHREAD_MUTEX_TIMEDLOCK */ |
827 | | return proc_mutex_spinsleep_timedacquire(mutex, timeout); |
828 | | #endif |
829 | 0 | } |
830 | | |
831 | 0 | mutex->curr_locked = 1; |
832 | 0 | return APR_SUCCESS; |
833 | 0 | } |
834 | | |
835 | | static apr_status_t proc_mutex_pthread_acquire(apr_proc_mutex_t *mutex) |
836 | 0 | { |
837 | 0 | return proc_mutex_pthread_acquire_ex(mutex, -1); |
838 | 0 | } |
839 | | |
840 | | static apr_status_t proc_mutex_pthread_tryacquire(apr_proc_mutex_t *mutex) |
841 | 0 | { |
842 | 0 | apr_status_t rv = proc_mutex_pthread_acquire_ex(mutex, 0); |
843 | 0 | return (rv == APR_TIMEUP) ? APR_EBUSY : rv; |
844 | 0 | } |
845 | | |
846 | | static apr_status_t proc_mutex_pthread_timedacquire(apr_proc_mutex_t *mutex, |
847 | | apr_interval_time_t timeout) |
848 | 0 | { |
849 | 0 | return proc_mutex_pthread_acquire_ex(mutex, (timeout <= 0) ? 0 : timeout); |
850 | 0 | } |
851 | | |
852 | | static apr_status_t proc_mutex_pthread_release(apr_proc_mutex_t *mutex) |
853 | 0 | { |
854 | 0 | apr_status_t rv; |
855 | |
|
856 | | #if APR_USE_PROC_PTHREAD_MUTEX_COND |
857 | | if (proc_pthread_mutex_is_cond(mutex)) { |
858 | | if ((rv = pthread_mutex_lock(&proc_pthread_mutex(mutex)))) { |
859 | | #ifdef HAVE_ZOS_PTHREADS |
860 | | rv = errno; |
861 | | #endif |
862 | | #if defined(HAVE_PTHREAD_MUTEX_ROBUST) || defined(HAVE_PTHREAD_MUTEX_ROBUST_NP) |
863 | | /* Okay, our owner died. Let's try to make it consistent again. */ |
864 | | if (rv == EOWNERDEAD) { |
865 | | proc_pthread_mutex_dec(mutex); |
866 | | #ifdef HAVE_PTHREAD_MUTEX_ROBUST |
867 | | pthread_mutex_consistent(&proc_pthread_mutex(mutex)); |
868 | | #else |
869 | | pthread_mutex_consistent_np(&proc_pthread_mutex(mutex)); |
870 | | #endif |
871 | | } |
872 | | else |
873 | | #endif |
874 | | return rv; |
875 | | } |
876 | | |
877 | | if (!proc_pthread_mutex_cond_locked(mutex)) { |
878 | | rv = APR_EINVAL; |
879 | | } |
880 | | else if (!proc_pthread_mutex_cond_num_waiters(mutex)) { |
881 | | rv = APR_SUCCESS; |
882 | | } |
883 | | else { |
884 | | rv = pthread_cond_signal(&proc_pthread_mutex_cond(mutex)); |
885 | | #ifdef HAVE_ZOS_PTHREADS |
886 | | if (rv) { |
887 | | rv = errno; |
888 | | } |
889 | | #endif |
890 | | } |
891 | | if (rv != APR_SUCCESS) { |
892 | | pthread_mutex_unlock(&proc_pthread_mutex(mutex)); |
893 | | return rv; |
894 | | } |
895 | | |
896 | | proc_pthread_mutex_cond_locked(mutex) = 0; |
897 | | } |
898 | | #endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */ |
899 | |
|
900 | 0 | mutex->curr_locked = 0; |
901 | 0 | if ((rv = pthread_mutex_unlock(&proc_pthread_mutex(mutex)))) { |
902 | | #ifdef HAVE_ZOS_PTHREADS |
903 | | rv = errno; |
904 | | #endif |
905 | 0 | return rv; |
906 | 0 | } |
907 | | |
908 | 0 | return APR_SUCCESS; |
909 | 0 | } |
910 | | |
911 | | static const apr_proc_mutex_unix_lock_methods_t mutex_proc_pthread_methods = |
912 | | { |
913 | | APR_PROCESS_LOCK_MECH_IS_GLOBAL, |
914 | | proc_mutex_pthread_create, |
915 | | proc_mutex_pthread_acquire, |
916 | | proc_mutex_pthread_tryacquire, |
917 | | proc_mutex_pthread_timedacquire, |
918 | | proc_mutex_pthread_release, |
919 | | proc_mutex_pthread_cleanup, |
920 | | proc_mutex_pthread_child_init, |
921 | | proc_mutex_no_perms_set, |
922 | | APR_LOCK_PROC_PTHREAD, |
923 | | "pthread" |
924 | | }; |
925 | | |
926 | | #if APR_USE_PROC_PTHREAD_MUTEX_COND |
927 | | static apr_status_t proc_mutex_pthread_cond_create(apr_proc_mutex_t *new_mutex, |
928 | | const char *fname) |
929 | | { |
930 | | apr_status_t rv; |
931 | | pthread_condattr_t cattr; |
932 | | |
933 | | rv = proc_mutex_pthread_create(new_mutex, fname); |
934 | | if (rv != APR_SUCCESS) { |
935 | | return rv; |
936 | | } |
937 | | |
938 | | if ((rv = pthread_condattr_init(&cattr))) { |
939 | | #ifdef HAVE_ZOS_PTHREADS |
940 | | rv = errno; |
941 | | #endif |
942 | | apr_pool_cleanup_run(new_mutex->pool, new_mutex, |
943 | | apr_proc_mutex_cleanup); |
944 | | return rv; |
945 | | } |
946 | | if ((rv = pthread_condattr_setpshared(&cattr, PTHREAD_PROCESS_SHARED))) { |
947 | | #ifdef HAVE_ZOS_PTHREADS |
948 | | rv = errno; |
949 | | #endif |
950 | | pthread_condattr_destroy(&cattr); |
951 | | apr_pool_cleanup_run(new_mutex->pool, new_mutex, |
952 | | apr_proc_mutex_cleanup); |
953 | | return rv; |
954 | | } |
955 | | if ((rv = pthread_cond_init(&proc_pthread_mutex_cond(new_mutex), |
956 | | &cattr))) { |
957 | | #ifdef HAVE_ZOS_PTHREADS |
958 | | rv = errno; |
959 | | #endif |
960 | | pthread_condattr_destroy(&cattr); |
961 | | apr_pool_cleanup_run(new_mutex->pool, new_mutex, |
962 | | apr_proc_mutex_cleanup); |
963 | | return rv; |
964 | | } |
965 | | pthread_condattr_destroy(&cattr); |
966 | | |
967 | | proc_pthread_mutex_cond_locked(new_mutex) = 0; |
968 | | proc_pthread_mutex_cond_num_waiters(new_mutex) = 0; |
969 | | |
970 | | return APR_SUCCESS; |
971 | | } |
972 | | |
973 | | static const apr_proc_mutex_unix_lock_methods_t mutex_proc_pthread_cond_methods = |
974 | | { |
975 | | APR_PROCESS_LOCK_MECH_IS_GLOBAL, |
976 | | proc_mutex_pthread_cond_create, |
977 | | proc_mutex_pthread_acquire, |
978 | | proc_mutex_pthread_tryacquire, |
979 | | proc_mutex_pthread_timedacquire, |
980 | | proc_mutex_pthread_release, |
981 | | proc_mutex_pthread_cleanup, |
982 | | proc_mutex_pthread_child_init, |
983 | | proc_mutex_no_perms_set, |
984 | | APR_LOCK_PROC_PTHREAD, |
985 | | "pthread" |
986 | | }; |
987 | | #endif |
988 | | |
989 | | #endif |
990 | | |
991 | | #if APR_HAS_FCNTL_SERIALIZE |
992 | | |
993 | | static struct flock proc_mutex_lock_it; |
994 | | static struct flock proc_mutex_unlock_it; |
995 | | |
996 | | static apr_status_t proc_mutex_fcntl_release(apr_proc_mutex_t *); |
997 | | |
998 | | static void proc_mutex_fcntl_setup(void) |
999 | 0 | { |
1000 | 0 | proc_mutex_lock_it.l_whence = SEEK_SET; /* from current point */ |
1001 | 0 | proc_mutex_lock_it.l_start = 0; /* -"- */ |
1002 | 0 | proc_mutex_lock_it.l_len = 0; /* until end of file */ |
1003 | 0 | proc_mutex_lock_it.l_type = F_WRLCK; /* set exclusive/write lock */ |
1004 | 0 | proc_mutex_lock_it.l_pid = 0; /* pid not actually interesting */ |
1005 | 0 | proc_mutex_unlock_it.l_whence = SEEK_SET; /* from current point */ |
1006 | 0 | proc_mutex_unlock_it.l_start = 0; /* -"- */ |
1007 | 0 | proc_mutex_unlock_it.l_len = 0; /* until end of file */ |
1008 | 0 | proc_mutex_unlock_it.l_type = F_UNLCK; /* set exclusive/write lock */ |
1009 | 0 | proc_mutex_unlock_it.l_pid = 0; /* pid not actually interesting */ |
1010 | 0 | } |
1011 | | |
1012 | | static apr_status_t proc_mutex_fcntl_cleanup(void *mutex_) |
1013 | 0 | { |
1014 | 0 | apr_status_t status = APR_SUCCESS; |
1015 | 0 | apr_proc_mutex_t *mutex=mutex_; |
1016 | |
|
1017 | 0 | if (mutex->curr_locked == 1) { |
1018 | 0 | status = proc_mutex_fcntl_release(mutex); |
1019 | 0 | if (status != APR_SUCCESS) |
1020 | 0 | return status; |
1021 | 0 | } |
1022 | | |
1023 | 0 | if (mutex->interproc) { |
1024 | 0 | status = apr_file_close(mutex->interproc); |
1025 | 0 | } |
1026 | 0 | if (!mutex->interproc_closing |
1027 | 0 | && mutex->os.crossproc != -1 |
1028 | 0 | && close(mutex->os.crossproc) == -1 |
1029 | 0 | && status == APR_SUCCESS) { |
1030 | 0 | status = errno; |
1031 | 0 | } |
1032 | 0 | return status; |
1033 | 0 | } |
1034 | | |
1035 | | static apr_status_t proc_mutex_fcntl_create(apr_proc_mutex_t *new_mutex, |
1036 | | const char *fname) |
1037 | 0 | { |
1038 | 0 | int rv; |
1039 | |
|
1040 | 0 | if (fname) { |
1041 | 0 | new_mutex->fname = apr_pstrdup(new_mutex->pool, fname); |
1042 | 0 | rv = apr_file_open(&new_mutex->interproc, new_mutex->fname, |
1043 | 0 | APR_FOPEN_CREATE | APR_FOPEN_WRITE | APR_FOPEN_EXCL, |
1044 | 0 | APR_FPROT_UREAD | APR_FPROT_UWRITE | APR_FPROT_GREAD | APR_FPROT_WREAD, |
1045 | 0 | new_mutex->pool); |
1046 | 0 | } |
1047 | 0 | else { |
1048 | 0 | new_mutex->fname = apr_pstrdup(new_mutex->pool, "/tmp/aprXXXXXX"); |
1049 | 0 | rv = apr_file_mktemp(&new_mutex->interproc, new_mutex->fname, |
1050 | 0 | APR_FOPEN_CREATE | APR_FOPEN_WRITE | APR_FOPEN_EXCL, |
1051 | 0 | new_mutex->pool); |
1052 | 0 | } |
1053 | |
|
1054 | 0 | if (rv != APR_SUCCESS) { |
1055 | 0 | return rv; |
1056 | 0 | } |
1057 | | |
1058 | 0 | new_mutex->os.crossproc = new_mutex->interproc->filedes; |
1059 | 0 | new_mutex->interproc_closing = 1; |
1060 | 0 | new_mutex->curr_locked = 0; |
1061 | 0 | unlink(new_mutex->fname); |
1062 | 0 | apr_pool_cleanup_register(new_mutex->pool, |
1063 | 0 | (void*)new_mutex, |
1064 | 0 | apr_proc_mutex_cleanup, |
1065 | 0 | apr_pool_cleanup_null); |
1066 | 0 | return APR_SUCCESS; |
1067 | 0 | } |
1068 | | |
1069 | | static apr_status_t proc_mutex_fcntl_acquire(apr_proc_mutex_t *mutex) |
1070 | 0 | { |
1071 | 0 | int rc; |
1072 | |
|
1073 | 0 | do { |
1074 | 0 | rc = fcntl(mutex->os.crossproc, F_SETLKW, &proc_mutex_lock_it); |
1075 | 0 | } while (rc < 0 && errno == EINTR); |
1076 | 0 | if (rc < 0) { |
1077 | 0 | return errno; |
1078 | 0 | } |
1079 | 0 | mutex->curr_locked=1; |
1080 | 0 | return APR_SUCCESS; |
1081 | 0 | } |
1082 | | |
1083 | | static apr_status_t proc_mutex_fcntl_tryacquire(apr_proc_mutex_t *mutex) |
1084 | 0 | { |
1085 | 0 | int rc; |
1086 | |
|
1087 | 0 | do { |
1088 | 0 | rc = fcntl(mutex->os.crossproc, F_SETLK, &proc_mutex_lock_it); |
1089 | 0 | } while (rc < 0 && errno == EINTR); |
1090 | 0 | if (rc < 0) { |
1091 | | #if FCNTL_TRYACQUIRE_EACCES |
1092 | | if (errno == EACCES) { |
1093 | | #else |
1094 | 0 | if (errno == EAGAIN) { |
1095 | 0 | #endif |
1096 | 0 | return APR_EBUSY; |
1097 | 0 | } |
1098 | 0 | return errno; |
1099 | 0 | } |
1100 | 0 | mutex->curr_locked = 1; |
1101 | 0 | return APR_SUCCESS; |
1102 | 0 | } |
1103 | | |
1104 | | static apr_status_t proc_mutex_fcntl_release(apr_proc_mutex_t *mutex) |
1105 | 0 | { |
1106 | 0 | int rc; |
1107 | |
|
1108 | 0 | mutex->curr_locked=0; |
1109 | 0 | do { |
1110 | 0 | rc = fcntl(mutex->os.crossproc, F_SETLKW, &proc_mutex_unlock_it); |
1111 | 0 | } while (rc < 0 && errno == EINTR); |
1112 | 0 | if (rc < 0) { |
1113 | 0 | return errno; |
1114 | 0 | } |
1115 | 0 | return APR_SUCCESS; |
1116 | 0 | } |
1117 | | |
1118 | | static apr_status_t proc_mutex_fcntl_perms_set(apr_proc_mutex_t *mutex, |
1119 | | apr_fileperms_t perms, |
1120 | | apr_uid_t uid, |
1121 | | apr_gid_t gid) |
1122 | 0 | { |
1123 | |
|
1124 | 0 | if (mutex->fname) { |
1125 | 0 | if (!(perms & APR_FPROT_GSETID)) |
1126 | 0 | gid = -1; |
1127 | 0 | if (fchown(mutex->os.crossproc, uid, gid) < 0) { |
1128 | 0 | return errno; |
1129 | 0 | } |
1130 | 0 | } |
1131 | 0 | return APR_SUCCESS; |
1132 | 0 | } |
1133 | | |
1134 | | static const apr_proc_mutex_unix_lock_methods_t mutex_fcntl_methods = |
1135 | | { |
1136 | | #if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS || defined(FCNTL_IS_GLOBAL) |
1137 | | APR_PROCESS_LOCK_MECH_IS_GLOBAL, |
1138 | | #else |
1139 | | 0, |
1140 | | #endif |
1141 | | proc_mutex_fcntl_create, |
1142 | | proc_mutex_fcntl_acquire, |
1143 | | proc_mutex_fcntl_tryacquire, |
1144 | | proc_mutex_spinsleep_timedacquire, |
1145 | | proc_mutex_fcntl_release, |
1146 | | proc_mutex_fcntl_cleanup, |
1147 | | proc_mutex_no_child_init, |
1148 | | proc_mutex_fcntl_perms_set, |
1149 | | APR_LOCK_FCNTL, |
1150 | | "fcntl" |
1151 | | }; |
1152 | | |
1153 | | #endif /* fcntl implementation */ |
1154 | | |
1155 | | #if APR_HAS_FLOCK_SERIALIZE |
1156 | | |
1157 | | static apr_status_t proc_mutex_flock_release(apr_proc_mutex_t *); |
1158 | | |
1159 | | static apr_status_t proc_mutex_flock_cleanup(void *mutex_) |
1160 | 0 | { |
1161 | 0 | apr_status_t status = APR_SUCCESS; |
1162 | 0 | apr_proc_mutex_t *mutex=mutex_; |
1163 | |
|
1164 | 0 | if (mutex->curr_locked == 1) { |
1165 | 0 | status = proc_mutex_flock_release(mutex); |
1166 | 0 | if (status != APR_SUCCESS) |
1167 | 0 | return status; |
1168 | 0 | } |
1169 | 0 | if (mutex->interproc) { /* if it was opened properly */ |
1170 | 0 | status = apr_file_close(mutex->interproc); |
1171 | 0 | } |
1172 | 0 | if (!mutex->interproc_closing |
1173 | 0 | && mutex->os.crossproc != -1 |
1174 | 0 | && close(mutex->os.crossproc) == -1 |
1175 | 0 | && status == APR_SUCCESS) { |
1176 | 0 | status = errno; |
1177 | 0 | } |
1178 | 0 | if (mutex->fname) { |
1179 | 0 | unlink(mutex->fname); |
1180 | 0 | } |
1181 | 0 | return status; |
1182 | 0 | } |
1183 | | |
1184 | | static apr_status_t proc_mutex_flock_create(apr_proc_mutex_t *new_mutex, |
1185 | | const char *fname) |
1186 | 0 | { |
1187 | 0 | int rv; |
1188 | |
|
1189 | 0 | if (fname) { |
1190 | 0 | new_mutex->fname = apr_pstrdup(new_mutex->pool, fname); |
1191 | 0 | rv = apr_file_open(&new_mutex->interproc, new_mutex->fname, |
1192 | 0 | APR_FOPEN_CREATE | APR_FOPEN_WRITE | APR_FOPEN_EXCL, |
1193 | 0 | APR_FPROT_UREAD | APR_FPROT_UWRITE, |
1194 | 0 | new_mutex->pool); |
1195 | 0 | } |
1196 | 0 | else { |
1197 | 0 | new_mutex->fname = apr_pstrdup(new_mutex->pool, "/tmp/aprXXXXXX"); |
1198 | 0 | rv = apr_file_mktemp(&new_mutex->interproc, new_mutex->fname, |
1199 | 0 | APR_FOPEN_CREATE | APR_FOPEN_WRITE | APR_FOPEN_EXCL, |
1200 | 0 | new_mutex->pool); |
1201 | 0 | } |
1202 | |
|
1203 | 0 | if (rv != APR_SUCCESS) { |
1204 | 0 | proc_mutex_flock_cleanup(new_mutex); |
1205 | 0 | return rv; |
1206 | 0 | } |
1207 | | |
1208 | 0 | new_mutex->os.crossproc = new_mutex->interproc->filedes; |
1209 | 0 | new_mutex->interproc_closing = 1; |
1210 | 0 | new_mutex->curr_locked = 0; |
1211 | 0 | apr_pool_cleanup_register(new_mutex->pool, (void *)new_mutex, |
1212 | 0 | apr_proc_mutex_cleanup, |
1213 | 0 | apr_pool_cleanup_null); |
1214 | 0 | return APR_SUCCESS; |
1215 | 0 | } |
1216 | | |
1217 | | static apr_status_t proc_mutex_flock_acquire(apr_proc_mutex_t *mutex) |
1218 | 0 | { |
1219 | 0 | int rc; |
1220 | |
|
1221 | 0 | do { |
1222 | 0 | rc = flock(mutex->os.crossproc, LOCK_EX); |
1223 | 0 | } while (rc < 0 && errno == EINTR); |
1224 | 0 | if (rc < 0) { |
1225 | 0 | return errno; |
1226 | 0 | } |
1227 | 0 | mutex->curr_locked = 1; |
1228 | 0 | return APR_SUCCESS; |
1229 | 0 | } |
1230 | | |
1231 | | static apr_status_t proc_mutex_flock_tryacquire(apr_proc_mutex_t *mutex) |
1232 | 0 | { |
1233 | 0 | int rc; |
1234 | |
|
1235 | 0 | do { |
1236 | 0 | rc = flock(mutex->os.crossproc, LOCK_EX | LOCK_NB); |
1237 | 0 | } while (rc < 0 && errno == EINTR); |
1238 | 0 | if (rc < 0) { |
1239 | 0 | if (errno == EWOULDBLOCK || errno == EAGAIN) { |
1240 | 0 | return APR_EBUSY; |
1241 | 0 | } |
1242 | 0 | return errno; |
1243 | 0 | } |
1244 | 0 | mutex->curr_locked = 1; |
1245 | 0 | return APR_SUCCESS; |
1246 | 0 | } |
1247 | | |
1248 | | static apr_status_t proc_mutex_flock_release(apr_proc_mutex_t *mutex) |
1249 | 0 | { |
1250 | 0 | int rc; |
1251 | |
|
1252 | 0 | mutex->curr_locked = 0; |
1253 | 0 | do { |
1254 | 0 | rc = flock(mutex->os.crossproc, LOCK_UN); |
1255 | 0 | } while (rc < 0 && errno == EINTR); |
1256 | 0 | if (rc < 0) { |
1257 | 0 | return errno; |
1258 | 0 | } |
1259 | 0 | return APR_SUCCESS; |
1260 | 0 | } |
1261 | | |
1262 | | static apr_status_t proc_mutex_flock_child_init(apr_proc_mutex_t **mutex, |
1263 | | apr_pool_t *pool, |
1264 | | const char *fname) |
1265 | 0 | { |
1266 | 0 | apr_proc_mutex_t *new_mutex; |
1267 | 0 | int rv; |
1268 | |
|
1269 | 0 | if (!fname) { |
1270 | 0 | fname = (*mutex)->fname; |
1271 | 0 | if (!fname) { |
1272 | 0 | return APR_SUCCESS; |
1273 | 0 | } |
1274 | 0 | } |
1275 | | |
1276 | 0 | new_mutex = (apr_proc_mutex_t *)apr_pmemdup(pool, *mutex, |
1277 | 0 | sizeof(apr_proc_mutex_t)); |
1278 | 0 | new_mutex->pool = pool; |
1279 | 0 | new_mutex->fname = apr_pstrdup(pool, fname); |
1280 | 0 | rv = apr_file_open(&new_mutex->interproc, new_mutex->fname, |
1281 | 0 | APR_FOPEN_WRITE, 0, new_mutex->pool); |
1282 | 0 | if (rv != APR_SUCCESS) { |
1283 | 0 | return rv; |
1284 | 0 | } |
1285 | 0 | new_mutex->os.crossproc = new_mutex->interproc->filedes; |
1286 | 0 | new_mutex->interproc_closing = 1; |
1287 | |
|
1288 | 0 | *mutex = new_mutex; |
1289 | 0 | return APR_SUCCESS; |
1290 | 0 | } |
1291 | | |
1292 | | static apr_status_t proc_mutex_flock_perms_set(apr_proc_mutex_t *mutex, |
1293 | | apr_fileperms_t perms, |
1294 | | apr_uid_t uid, |
1295 | | apr_gid_t gid) |
1296 | 0 | { |
1297 | |
|
1298 | 0 | if (mutex->fname) { |
1299 | 0 | if (!(perms & APR_FPROT_GSETID)) |
1300 | 0 | gid = -1; |
1301 | 0 | if (fchown(mutex->os.crossproc, uid, gid) < 0) { |
1302 | 0 | return errno; |
1303 | 0 | } |
1304 | 0 | } |
1305 | 0 | return APR_SUCCESS; |
1306 | 0 | } |
1307 | | |
1308 | | static const apr_proc_mutex_unix_lock_methods_t mutex_flock_methods = |
1309 | | { |
1310 | | #if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS || defined(FLOCK_IS_GLOBAL) |
1311 | | APR_PROCESS_LOCK_MECH_IS_GLOBAL, |
1312 | | #else |
1313 | | 0, |
1314 | | #endif |
1315 | | proc_mutex_flock_create, |
1316 | | proc_mutex_flock_acquire, |
1317 | | proc_mutex_flock_tryacquire, |
1318 | | proc_mutex_spinsleep_timedacquire, |
1319 | | proc_mutex_flock_release, |
1320 | | proc_mutex_flock_cleanup, |
1321 | | proc_mutex_flock_child_init, |
1322 | | proc_mutex_flock_perms_set, |
1323 | | APR_LOCK_FLOCK, |
1324 | | "flock" |
1325 | | }; |
1326 | | |
1327 | | #endif /* flock implementation */ |
1328 | | |
1329 | | void apr_proc_mutex_unix_setup_lock(void) |
1330 | 0 | { |
1331 | | /* setup only needed for sysvsem and fnctl */ |
1332 | 0 | #if APR_HAS_SYSVSEM_SERIALIZE |
1333 | 0 | proc_mutex_sysv_setup(); |
1334 | 0 | #endif |
1335 | 0 | #if APR_HAS_FCNTL_SERIALIZE |
1336 | 0 | proc_mutex_fcntl_setup(); |
1337 | 0 | #endif |
1338 | 0 | } |
1339 | | |
1340 | | static apr_status_t proc_mutex_choose_method(apr_proc_mutex_t *new_mutex, |
1341 | | apr_lockmech_e mech, |
1342 | | apr_os_proc_mutex_t *ospmutex) |
1343 | 0 | { |
1344 | 0 | #if APR_HAS_PROC_PTHREAD_SERIALIZE |
1345 | 0 | new_mutex->os.pthread_interproc = NULL; |
1346 | 0 | #endif |
1347 | 0 | #if APR_HAS_POSIXSEM_SERIALIZE |
1348 | 0 | new_mutex->os.psem_interproc = NULL; |
1349 | 0 | #endif |
1350 | 0 | #if APR_HAS_SYSVSEM_SERIALIZE || APR_HAS_FCNTL_SERIALIZE || APR_HAS_FLOCK_SERIALIZE |
1351 | 0 | new_mutex->os.crossproc = -1; |
1352 | |
|
1353 | 0 | #if APR_HAS_FCNTL_SERIALIZE || APR_HAS_FLOCK_SERIALIZE |
1354 | 0 | new_mutex->interproc = NULL; |
1355 | 0 | new_mutex->interproc_closing = 0; |
1356 | 0 | #endif |
1357 | 0 | #endif |
1358 | |
|
1359 | 0 | switch (mech) { |
1360 | 0 | case APR_LOCK_FCNTL: |
1361 | 0 | #if APR_HAS_FCNTL_SERIALIZE |
1362 | 0 | new_mutex->meth = &mutex_fcntl_methods; |
1363 | 0 | if (ospmutex) { |
1364 | 0 | if (ospmutex->crossproc == -1) { |
1365 | 0 | return APR_EINVAL; |
1366 | 0 | } |
1367 | 0 | new_mutex->os.crossproc = ospmutex->crossproc; |
1368 | 0 | } |
1369 | | #else |
1370 | | return APR_ENOTIMPL; |
1371 | | #endif |
1372 | 0 | break; |
1373 | 0 | case APR_LOCK_FLOCK: |
1374 | 0 | #if APR_HAS_FLOCK_SERIALIZE |
1375 | 0 | new_mutex->meth = &mutex_flock_methods; |
1376 | 0 | if (ospmutex) { |
1377 | 0 | if (ospmutex->crossproc == -1) { |
1378 | 0 | return APR_EINVAL; |
1379 | 0 | } |
1380 | 0 | new_mutex->os.crossproc = ospmutex->crossproc; |
1381 | 0 | } |
1382 | | #else |
1383 | | return APR_ENOTIMPL; |
1384 | | #endif |
1385 | 0 | break; |
1386 | 0 | case APR_LOCK_SYSVSEM: |
1387 | 0 | #if APR_HAS_SYSVSEM_SERIALIZE |
1388 | 0 | new_mutex->meth = &mutex_sysv_methods; |
1389 | 0 | if (ospmutex) { |
1390 | 0 | if (ospmutex->crossproc == -1) { |
1391 | 0 | return APR_EINVAL; |
1392 | 0 | } |
1393 | 0 | new_mutex->os.crossproc = ospmutex->crossproc; |
1394 | 0 | } |
1395 | | #else |
1396 | | return APR_ENOTIMPL; |
1397 | | #endif |
1398 | 0 | break; |
1399 | 0 | case APR_LOCK_POSIXSEM: |
1400 | 0 | #if APR_HAS_POSIXSEM_SERIALIZE |
1401 | 0 | new_mutex->meth = &mutex_posixsem_methods; |
1402 | 0 | if (ospmutex) { |
1403 | 0 | if (ospmutex->psem_interproc == NULL) { |
1404 | 0 | return APR_EINVAL; |
1405 | 0 | } |
1406 | 0 | new_mutex->os.psem_interproc = ospmutex->psem_interproc; |
1407 | 0 | } |
1408 | | #else |
1409 | | return APR_ENOTIMPL; |
1410 | | #endif |
1411 | 0 | break; |
1412 | 0 | case APR_LOCK_PROC_PTHREAD: |
1413 | 0 | #if APR_HAS_PROC_PTHREAD_SERIALIZE |
1414 | 0 | new_mutex->meth = &mutex_proc_pthread_methods; |
1415 | 0 | if (ospmutex) { |
1416 | 0 | if (ospmutex->pthread_interproc == NULL) { |
1417 | 0 | return APR_EINVAL; |
1418 | 0 | } |
1419 | 0 | new_mutex->os.pthread_interproc = ospmutex->pthread_interproc; |
1420 | 0 | } |
1421 | | #else |
1422 | | return APR_ENOTIMPL; |
1423 | | #endif |
1424 | 0 | break; |
1425 | 0 | case APR_LOCK_DEFAULT_TIMED: |
1426 | 0 | #if APR_HAS_PROC_PTHREAD_SERIALIZE \ |
1427 | 0 | && (APR_USE_PROC_PTHREAD_MUTEX_COND \ |
1428 | 0 | || defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK)) \ |
1429 | 0 | && defined(HAVE_PTHREAD_MUTEX_ROBUST) |
1430 | | #if APR_USE_PROC_PTHREAD_MUTEX_COND |
1431 | | new_mutex->meth = &mutex_proc_pthread_cond_methods; |
1432 | | #else |
1433 | 0 | new_mutex->meth = &mutex_proc_pthread_methods; |
1434 | 0 | #endif |
1435 | 0 | if (ospmutex) { |
1436 | 0 | if (ospmutex->pthread_interproc == NULL) { |
1437 | 0 | return APR_EINVAL; |
1438 | 0 | } |
1439 | 0 | new_mutex->os.pthread_interproc = ospmutex->pthread_interproc; |
1440 | 0 | } |
1441 | 0 | break; |
1442 | | #elif APR_HAS_SYSVSEM_SERIALIZE && defined(HAVE_SEMTIMEDOP) |
1443 | | new_mutex->meth = &mutex_sysv_methods; |
1444 | | if (ospmutex) { |
1445 | | if (ospmutex->crossproc == -1) { |
1446 | | return APR_EINVAL; |
1447 | | } |
1448 | | new_mutex->os.crossproc = ospmutex->crossproc; |
1449 | | } |
1450 | | break; |
1451 | | #elif APR_HAS_POSIXSEM_SERIALIZE && defined(HAVE_SEM_TIMEDWAIT) |
1452 | | new_mutex->meth = &mutex_posixsem_methods; |
1453 | | if (ospmutex) { |
1454 | | if (ospmutex->psem_interproc == NULL) { |
1455 | | return APR_EINVAL; |
1456 | | } |
1457 | | new_mutex->os.psem_interproc = ospmutex->psem_interproc; |
1458 | | } |
1459 | | break; |
1460 | | #endif |
1461 | | /* fall trough */ |
1462 | 0 | case APR_LOCK_DEFAULT: |
1463 | | #if APR_USE_FLOCK_SERIALIZE |
1464 | | new_mutex->meth = &mutex_flock_methods; |
1465 | | if (ospmutex) { |
1466 | | if (ospmutex->crossproc == -1) { |
1467 | | return APR_EINVAL; |
1468 | | } |
1469 | | new_mutex->os.crossproc = ospmutex->crossproc; |
1470 | | } |
1471 | | #elif APR_USE_SYSVSEM_SERIALIZE |
1472 | | new_mutex->meth = &mutex_sysv_methods; |
1473 | | if (ospmutex) { |
1474 | | if (ospmutex->crossproc == -1) { |
1475 | | return APR_EINVAL; |
1476 | | } |
1477 | | new_mutex->os.crossproc = ospmutex->crossproc; |
1478 | | } |
1479 | | #elif APR_USE_FCNTL_SERIALIZE |
1480 | | new_mutex->meth = &mutex_fcntl_methods; |
1481 | | if (ospmutex) { |
1482 | | if (ospmutex->crossproc == -1) { |
1483 | | return APR_EINVAL; |
1484 | | } |
1485 | | new_mutex->os.crossproc = ospmutex->crossproc; |
1486 | | } |
1487 | | #elif APR_USE_PROC_PTHREAD_SERIALIZE |
1488 | 0 | new_mutex->meth = &mutex_proc_pthread_methods; |
1489 | 0 | if (ospmutex) { |
1490 | 0 | if (ospmutex->pthread_interproc == NULL) { |
1491 | 0 | return APR_EINVAL; |
1492 | 0 | } |
1493 | 0 | new_mutex->os.pthread_interproc = ospmutex->pthread_interproc; |
1494 | 0 | } |
1495 | | #elif APR_USE_POSIXSEM_SERIALIZE |
1496 | | new_mutex->meth = &mutex_posixsem_methods; |
1497 | | if (ospmutex) { |
1498 | | if (ospmutex->psem_interproc == NULL) { |
1499 | | return APR_EINVAL; |
1500 | | } |
1501 | | new_mutex->os.psem_interproc = ospmutex->psem_interproc; |
1502 | | } |
1503 | | #else |
1504 | | return APR_ENOTIMPL; |
1505 | | #endif |
1506 | 0 | break; |
1507 | 0 | default: |
1508 | 0 | return APR_ENOTIMPL; |
1509 | 0 | } |
1510 | 0 | return APR_SUCCESS; |
1511 | 0 | } |
1512 | | |
1513 | | APR_DECLARE(const char *) apr_proc_mutex_defname(void) |
1514 | 0 | { |
1515 | 0 | apr_proc_mutex_t mutex; |
1516 | |
|
1517 | 0 | if (proc_mutex_choose_method(&mutex, APR_LOCK_DEFAULT, |
1518 | 0 | NULL) != APR_SUCCESS) { |
1519 | 0 | return "unknown"; |
1520 | 0 | } |
1521 | | |
1522 | 0 | return apr_proc_mutex_name(&mutex); |
1523 | 0 | } |
1524 | | |
1525 | | static apr_status_t proc_mutex_create(apr_proc_mutex_t *new_mutex, apr_lockmech_e mech, const char *fname) |
1526 | 0 | { |
1527 | 0 | apr_status_t rv; |
1528 | |
|
1529 | 0 | if ((rv = proc_mutex_choose_method(new_mutex, mech, |
1530 | 0 | NULL)) != APR_SUCCESS) { |
1531 | 0 | return rv; |
1532 | 0 | } |
1533 | | |
1534 | 0 | if ((rv = new_mutex->meth->create(new_mutex, fname)) != APR_SUCCESS) { |
1535 | 0 | return rv; |
1536 | 0 | } |
1537 | | |
1538 | 0 | return APR_SUCCESS; |
1539 | 0 | } |
1540 | | |
1541 | | APR_DECLARE(apr_status_t) apr_proc_mutex_create(apr_proc_mutex_t **mutex, |
1542 | | const char *fname, |
1543 | | apr_lockmech_e mech, |
1544 | | apr_pool_t *pool) |
1545 | 0 | { |
1546 | 0 | apr_proc_mutex_t *new_mutex; |
1547 | 0 | apr_status_t rv; |
1548 | |
|
1549 | 0 | new_mutex = apr_pcalloc(pool, sizeof(apr_proc_mutex_t)); |
1550 | 0 | new_mutex->pool = pool; |
1551 | |
|
1552 | 0 | if ((rv = proc_mutex_create(new_mutex, mech, fname)) != APR_SUCCESS) |
1553 | 0 | return rv; |
1554 | | |
1555 | 0 | *mutex = new_mutex; |
1556 | 0 | return APR_SUCCESS; |
1557 | 0 | } |
1558 | | |
1559 | | APR_DECLARE(apr_status_t) apr_proc_mutex_child_init(apr_proc_mutex_t **mutex, |
1560 | | const char *fname, |
1561 | | apr_pool_t *pool) |
1562 | 0 | { |
1563 | 0 | return (*mutex)->meth->child_init(mutex, pool, fname); |
1564 | 0 | } |
1565 | | |
1566 | | APR_DECLARE(apr_status_t) apr_proc_mutex_lock(apr_proc_mutex_t *mutex) |
1567 | 0 | { |
1568 | 0 | return mutex->meth->acquire(mutex); |
1569 | 0 | } |
1570 | | |
1571 | | APR_DECLARE(apr_status_t) apr_proc_mutex_trylock(apr_proc_mutex_t *mutex) |
1572 | 0 | { |
1573 | 0 | return mutex->meth->tryacquire(mutex); |
1574 | 0 | } |
1575 | | |
1576 | | APR_DECLARE(apr_status_t) apr_proc_mutex_timedlock(apr_proc_mutex_t *mutex, |
1577 | | apr_interval_time_t timeout) |
1578 | 0 | { |
1579 | 0 | return mutex->meth->timedacquire(mutex, timeout); |
1580 | 0 | } |
1581 | | |
1582 | | APR_DECLARE(apr_status_t) apr_proc_mutex_unlock(apr_proc_mutex_t *mutex) |
1583 | 0 | { |
1584 | 0 | return mutex->meth->release(mutex); |
1585 | 0 | } |
1586 | | |
1587 | | APR_DECLARE(apr_status_t) apr_proc_mutex_cleanup(void *mutex) |
1588 | 0 | { |
1589 | 0 | return ((apr_proc_mutex_t *)mutex)->meth->cleanup(mutex); |
1590 | 0 | } |
1591 | | |
1592 | | APR_DECLARE(apr_lockmech_e) apr_proc_mutex_mech(apr_proc_mutex_t *mutex) |
1593 | 0 | { |
1594 | 0 | return mutex->meth->mech; |
1595 | 0 | } |
1596 | | |
1597 | | APR_DECLARE(const char *) apr_proc_mutex_name(apr_proc_mutex_t *mutex) |
1598 | 0 | { |
1599 | 0 | return mutex->meth->name; |
1600 | 0 | } |
1601 | | |
1602 | | APR_DECLARE(const char *) apr_proc_mutex_lockfile(apr_proc_mutex_t *mutex) |
1603 | 0 | { |
1604 | | /* POSIX sems use the fname field but don't use a file, |
1605 | | * so be careful. */ |
1606 | 0 | #if APR_HAS_FLOCK_SERIALIZE |
1607 | 0 | if (mutex->meth == &mutex_flock_methods) { |
1608 | 0 | return mutex->fname; |
1609 | 0 | } |
1610 | 0 | #endif |
1611 | 0 | #if APR_HAS_FCNTL_SERIALIZE |
1612 | 0 | if (mutex->meth == &mutex_fcntl_methods) { |
1613 | 0 | return mutex->fname; |
1614 | 0 | } |
1615 | 0 | #endif |
1616 | 0 | return NULL; |
1617 | 0 | } |
1618 | | |
1619 | | APR_PERMS_SET_IMPLEMENT(proc_mutex) |
1620 | 0 | { |
1621 | 0 | apr_proc_mutex_t *mutex = (apr_proc_mutex_t *)theproc_mutex; |
1622 | 0 | return mutex->meth->perms_set(mutex, perms, uid, gid); |
1623 | 0 | } |
1624 | | |
1625 | | APR_POOL_IMPLEMENT_ACCESSOR(proc_mutex) |
1626 | | |
1627 | | /* Implement OS-specific accessors defined in apr_portable.h */ |
1628 | | |
1629 | | APR_DECLARE(apr_status_t) apr_os_proc_mutex_get_ex(apr_os_proc_mutex_t *ospmutex, |
1630 | | apr_proc_mutex_t *pmutex, |
1631 | | apr_lockmech_e *mech) |
1632 | 0 | { |
1633 | 0 | *ospmutex = pmutex->os; |
1634 | 0 | if (mech) { |
1635 | 0 | *mech = pmutex->meth->mech; |
1636 | 0 | } |
1637 | 0 | return APR_SUCCESS; |
1638 | 0 | } |
1639 | | |
1640 | | APR_DECLARE(apr_status_t) apr_os_proc_mutex_get(apr_os_proc_mutex_t *ospmutex, |
1641 | | apr_proc_mutex_t *pmutex) |
1642 | 0 | { |
1643 | 0 | return apr_os_proc_mutex_get_ex(ospmutex, pmutex, NULL); |
1644 | 0 | } |
1645 | | |
1646 | | APR_DECLARE(apr_status_t) apr_os_proc_mutex_put_ex(apr_proc_mutex_t **pmutex, |
1647 | | apr_os_proc_mutex_t *ospmutex, |
1648 | | apr_lockmech_e mech, |
1649 | | int register_cleanup, |
1650 | | apr_pool_t *pool) |
1651 | 0 | { |
1652 | 0 | apr_status_t rv; |
1653 | 0 | if (pool == NULL) { |
1654 | 0 | return APR_ENOPOOL; |
1655 | 0 | } |
1656 | | |
1657 | 0 | if ((*pmutex) == NULL) { |
1658 | 0 | (*pmutex) = (apr_proc_mutex_t *)apr_pcalloc(pool, |
1659 | 0 | sizeof(apr_proc_mutex_t)); |
1660 | 0 | (*pmutex)->pool = pool; |
1661 | 0 | } |
1662 | 0 | rv = proc_mutex_choose_method(*pmutex, mech, ospmutex); |
1663 | 0 | #if APR_HAS_FCNTL_SERIALIZE || APR_HAS_FLOCK_SERIALIZE |
1664 | 0 | if (rv == APR_SUCCESS) { |
1665 | 0 | rv = apr_os_file_put(&(*pmutex)->interproc, &(*pmutex)->os.crossproc, |
1666 | 0 | 0, pool); |
1667 | 0 | } |
1668 | 0 | #endif |
1669 | |
|
1670 | 0 | if (rv == APR_SUCCESS && register_cleanup) { |
1671 | 0 | apr_pool_cleanup_register(pool, *pmutex, apr_proc_mutex_cleanup, |
1672 | 0 | apr_pool_cleanup_null); |
1673 | 0 | } |
1674 | 0 | return rv; |
1675 | 0 | } |
1676 | | |
1677 | | APR_DECLARE(apr_status_t) apr_os_proc_mutex_put(apr_proc_mutex_t **pmutex, |
1678 | | apr_os_proc_mutex_t *ospmutex, |
1679 | | apr_pool_t *pool) |
1680 | 0 | { |
1681 | 0 | return apr_os_proc_mutex_put_ex(pmutex, ospmutex, APR_LOCK_DEFAULT, |
1682 | 0 | 0, pool); |
1683 | 0 | } |
1684 | | |