Line  | Count  | Source  | 
1  |  | /*  | 
2  |  |  * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson  | 
3  |  |  *  | 
4  |  |  * Redistribution and use in source and binary forms, with or without  | 
5  |  |  * modification, are permitted provided that the following conditions  | 
6  |  |  * are met:  | 
7  |  |  * 1. Redistributions of source code must retain the above copyright  | 
8  |  |  *    notice, this list of conditions and the following disclaimer.  | 
9  |  |  * 2. Redistributions in binary form must reproduce the above copyright  | 
10  |  |  *    notice, this list of conditions and the following disclaimer in the  | 
11  |  |  *    documentation and/or other materials provided with the distribution.  | 
12  |  |  * 3. The name of the author may not be used to endorse or promote products  | 
13  |  |  *    derived from this software without specific prior written permission.  | 
14  |  |  *  | 
15  |  |  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR  | 
16  |  |  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES  | 
17  |  |  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  | 
18  |  |  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,  | 
19  |  |  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT  | 
20  |  |  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,  | 
21  |  |  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY  | 
22  |  |  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT  | 
23  |  |  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF  | 
24  |  |  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.  | 
25  |  |  */  | 
26  |  |  | 
27  |  | #include "event2/event-config.h"  | 
28  |  | #include "evconfig-private.h"  | 
29  |  |  | 
30  |  | #ifndef EVENT__DISABLE_THREAD_SUPPORT  | 
31  |  |  | 
32  |  | #include "event2/thread.h"  | 
33  |  |  | 
34  |  | #include <stdlib.h>  | 
35  |  | #include <string.h>  | 
36  |  |  | 
37  |  | #include "log-internal.h"  | 
38  |  | #include "mm-internal.h"  | 
39  |  | #include "util-internal.h"  | 
40  |  | #include "evthread-internal.h"  | 
41  |  |  | 
42  |  | #ifdef EVTHREAD_EXPOSE_STRUCTS  | 
43  |  | #define GLOBAL  | 
44  |  | #else  | 
45  |  | #define GLOBAL static  | 
46  |  | #endif  | 
47  |  |  | 
48  |  | #ifndef EVENT__DISABLE_DEBUG_MODE  | 
49  |  | extern int event_debug_created_threadable_ctx_;  | 
50  |  | extern int event_debug_mode_on_;  | 
51  |  | #endif  | 
52  |  |  | 
53  |  | /* globals */  | 
54  |  | GLOBAL int evthread_lock_debugging_enabled_ = 0;  | 
55  |  | GLOBAL struct evthread_lock_callbacks evthread_lock_fns_ = { | 
56  |  |   0, 0, NULL, NULL, NULL, NULL  | 
57  |  | };  | 
58  |  | GLOBAL unsigned long (*evthread_id_fn_)(void) = NULL;  | 
59  |  | GLOBAL struct evthread_condition_callbacks evthread_cond_fns_ = { | 
60  |  |   0, NULL, NULL, NULL, NULL  | 
61  |  | };  | 
62  |  |  | 
63  |  | /* Used for debugging */  | 
64  |  | static struct evthread_lock_callbacks original_lock_fns_ = { | 
65  |  |   0, 0, NULL, NULL, NULL, NULL  | 
66  |  | };  | 
67  |  | static struct evthread_condition_callbacks original_cond_fns_ = { | 
68  |  |   0, NULL, NULL, NULL, NULL  | 
69  |  | };  | 
70  |  |  | 
71  |  | void  | 
72  |  | evthread_set_id_callback(unsigned long (*id_fn)(void))  | 
73  | 0  | { | 
74  | 0  |   evthread_id_fn_ = id_fn;  | 
75  | 0  | }  | 
76  |  |  | 
77  |  | struct evthread_lock_callbacks *evthread_get_lock_callbacks(void)  | 
78  | 0  | { | 
79  | 0  |   return evthread_lock_debugging_enabled_  | 
80  | 0  |       ? &original_lock_fns_ : &evthread_lock_fns_;  | 
81  | 0  | }  | 
82  |  | struct evthread_condition_callbacks *evthread_get_condition_callbacks(void)  | 
83  | 0  | { | 
84  | 0  |   return evthread_lock_debugging_enabled_  | 
85  | 0  |       ? &original_cond_fns_ : &evthread_cond_fns_;  | 
86  | 0  | }  | 
87  |  | void evthreadimpl_disable_lock_debugging_(void)  | 
88  | 0  | { | 
89  | 0  |   evthread_lock_debugging_enabled_ = 0;  | 
90  | 0  | }  | 
91  |  |  | 
92  |  | int  | 
93  |  | evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)  | 
94  | 0  | { | 
95  | 0  |   struct evthread_lock_callbacks *target = evthread_get_lock_callbacks();  | 
96  |  | 
  | 
97  | 0  | #ifndef EVENT__DISABLE_DEBUG_MODE  | 
98  | 0  |   if (event_debug_mode_on_) { | 
99  | 0  |     if (event_debug_created_threadable_ctx_) { | 
100  | 0  |         event_errx(1, "evthread initialization must be called BEFORE anything else!");  | 
101  | 0  |     }  | 
102  | 0  |   }  | 
103  | 0  | #endif  | 
104  |  |  | 
105  | 0  |   if (!cbs) { | 
106  | 0  |     if (target->alloc)  | 
107  | 0  |       event_warnx("Trying to disable lock functions after " | 
108  | 0  |           "they have been set up will probably not work.");  | 
109  | 0  |     memset(target, 0, sizeof(evthread_lock_fns_));  | 
110  | 0  |     return 0;  | 
111  | 0  |   }  | 
112  | 0  |   if (target->alloc) { | 
113  |  |     /* Uh oh; we already had locking callbacks set up.*/  | 
114  | 0  |     if (target->lock_api_version == cbs->lock_api_version &&  | 
115  | 0  |       target->supported_locktypes == cbs->supported_locktypes &&  | 
116  | 0  |       target->alloc == cbs->alloc &&  | 
117  | 0  |       target->free == cbs->free &&  | 
118  | 0  |       target->lock == cbs->lock &&  | 
119  | 0  |       target->unlock == cbs->unlock) { | 
120  |  |       /* no change -- allow this. */  | 
121  | 0  |       return 0;  | 
122  | 0  |     }  | 
123  | 0  |     event_warnx("Can't change lock callbacks once they have been " | 
124  | 0  |         "initialized.");  | 
125  | 0  |     return -1;  | 
126  | 0  |   }  | 
127  | 0  |   if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) { | 
128  | 0  |     memcpy(target, cbs, sizeof(evthread_lock_fns_));  | 
129  | 0  |     return event_global_setup_locks_(1);  | 
130  | 0  |   } else { | 
131  | 0  |     return -1;  | 
132  | 0  |   }  | 
133  | 0  | }  | 
134  |  |  | 
135  |  | int  | 
136  |  | evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs)  | 
137  | 0  | { | 
138  | 0  |   struct evthread_condition_callbacks *target = evthread_get_condition_callbacks();  | 
139  |  | 
  | 
140  | 0  | #ifndef EVENT__DISABLE_DEBUG_MODE  | 
141  | 0  |   if (event_debug_mode_on_) { | 
142  | 0  |     if (event_debug_created_threadable_ctx_) { | 
143  | 0  |         event_errx(1, "evthread initialization must be called BEFORE anything else!");  | 
144  | 0  |     }  | 
145  | 0  |   }  | 
146  | 0  | #endif  | 
147  |  |  | 
148  | 0  |   if (!cbs) { | 
149  | 0  |     if (target->alloc_condition)  | 
150  | 0  |       event_warnx("Trying to disable condition functions " | 
151  | 0  |           "after they have been set up will probably not "  | 
152  | 0  |           "work.");  | 
153  | 0  |     memset(target, 0, sizeof(evthread_cond_fns_));  | 
154  | 0  |     return 0;  | 
155  | 0  |   }  | 
156  | 0  |   if (target->alloc_condition) { | 
157  |  |     /* Uh oh; we already had condition callbacks set up.*/  | 
158  | 0  |     if (target->condition_api_version == cbs->condition_api_version &&  | 
159  | 0  |       target->alloc_condition == cbs->alloc_condition &&  | 
160  | 0  |       target->free_condition == cbs->free_condition &&  | 
161  | 0  |       target->signal_condition == cbs->signal_condition &&  | 
162  | 0  |       target->wait_condition == cbs->wait_condition) { | 
163  |  |       /* no change -- allow this. */  | 
164  | 0  |       return 0;  | 
165  | 0  |     }  | 
166  | 0  |     event_warnx("Can't change condition callbacks once they " | 
167  | 0  |         "have been initialized.");  | 
168  | 0  |     return -1;  | 
169  | 0  |   }  | 
170  | 0  |   if (cbs->alloc_condition && cbs->free_condition &&  | 
171  | 0  |       cbs->signal_condition && cbs->wait_condition) { | 
172  | 0  |     memcpy(target, cbs, sizeof(evthread_cond_fns_));  | 
173  | 0  |   }  | 
174  | 0  |   if (evthread_lock_debugging_enabled_) { | 
175  | 0  |     evthread_cond_fns_.alloc_condition = cbs->alloc_condition;  | 
176  | 0  |     evthread_cond_fns_.free_condition = cbs->free_condition;  | 
177  | 0  |     evthread_cond_fns_.signal_condition = cbs->signal_condition;  | 
178  | 0  |   }  | 
179  | 0  |   return 0;  | 
180  | 0  | }  | 
181  |  |  | 
182  | 0  | #define DEBUG_LOCK_SIG  0xdeb0b10c  | 
183  |  |  | 
184  |  | struct debug_lock { | 
185  |  |   unsigned signature;  | 
186  |  |   unsigned locktype;  | 
187  |  |   unsigned long held_by;  | 
188  |  |   /* XXXX if we ever use read-write locks, we will need a separate  | 
189  |  |    * lock to protect count. */  | 
190  |  |   int count;  | 
191  |  |   void *lock;  | 
192  |  | };  | 
193  |  |  | 
194  |  | static void *  | 
195  |  | debug_lock_alloc(unsigned locktype)  | 
196  | 0  | { | 
197  | 0  |   struct debug_lock *result = mm_malloc(sizeof(struct debug_lock));  | 
198  | 0  |   if (!result)  | 
199  | 0  |     return NULL;  | 
200  | 0  |   if (original_lock_fns_.alloc) { | 
201  | 0  |     if (!(result->lock = original_lock_fns_.alloc(  | 
202  | 0  |         locktype|EVTHREAD_LOCKTYPE_RECURSIVE))) { | 
203  | 0  |       mm_free(result);  | 
204  | 0  |       return NULL;  | 
205  | 0  |     }  | 
206  | 0  |   } else { | 
207  | 0  |     result->lock = NULL;  | 
208  | 0  |   }  | 
209  | 0  |   result->signature = DEBUG_LOCK_SIG;  | 
210  | 0  |   result->locktype = locktype;  | 
211  | 0  |   result->count = 0;  | 
212  | 0  |   result->held_by = 0;  | 
213  | 0  |   return result;  | 
214  | 0  | }  | 
215  |  |  | 
216  |  | static void  | 
217  |  | debug_lock_free(void *lock_, unsigned locktype)  | 
218  | 0  | { | 
219  | 0  |   struct debug_lock *lock = lock_;  | 
220  | 0  |   EVUTIL_ASSERT(lock->count == 0);  | 
221  | 0  |   EVUTIL_ASSERT(locktype == lock->locktype);  | 
222  | 0  |   EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);  | 
223  | 0  |   if (original_lock_fns_.free) { | 
224  | 0  |     original_lock_fns_.free(lock->lock,  | 
225  | 0  |         lock->locktype|EVTHREAD_LOCKTYPE_RECURSIVE);  | 
226  | 0  |   }  | 
227  | 0  |   lock->lock = NULL;  | 
228  | 0  |   lock->count = -100;  | 
229  | 0  |   lock->signature = 0x12300fda;  | 
230  | 0  |   mm_free(lock);  | 
231  | 0  | }  | 
232  |  |  | 
233  |  | static void  | 
234  |  | evthread_debug_lock_mark_locked(unsigned mode, struct debug_lock *lock)  | 
235  | 0  | { | 
236  | 0  |   EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);  | 
237  | 0  |   ++lock->count;  | 
238  | 0  |   if (!(lock->locktype & EVTHREAD_LOCKTYPE_RECURSIVE))  | 
239  | 0  |     EVUTIL_ASSERT(lock->count == 1);  | 
240  | 0  |   if (evthread_id_fn_) { | 
241  | 0  |     unsigned long me;  | 
242  | 0  |     me = evthread_id_fn_();  | 
243  | 0  |     if (lock->count > 1)  | 
244  | 0  |       EVUTIL_ASSERT(lock->held_by == me);  | 
245  | 0  |     lock->held_by = me;  | 
246  | 0  |   }  | 
247  | 0  | }  | 
248  |  |  | 
249  |  | static int  | 
250  |  | debug_lock_lock(unsigned mode, void *lock_)  | 
251  | 0  | { | 
252  | 0  |   struct debug_lock *lock = lock_;  | 
253  | 0  |   int res = 0;  | 
254  | 0  |   if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)  | 
255  | 0  |     EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));  | 
256  | 0  |   else  | 
257  | 0  |     EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);  | 
258  | 0  |   if (original_lock_fns_.lock)  | 
259  | 0  |     res = original_lock_fns_.lock(mode, lock->lock);  | 
260  | 0  |   if (!res) { | 
261  | 0  |     evthread_debug_lock_mark_locked(mode, lock);  | 
262  | 0  |   }  | 
263  | 0  |   return res;  | 
264  | 0  | }  | 
265  |  |  | 
266  |  | static void  | 
267  |  | evthread_debug_lock_mark_unlocked(unsigned mode, struct debug_lock *lock)  | 
268  | 0  | { | 
269  | 0  |   EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);  | 
270  | 0  |   if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)  | 
271  | 0  |     EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));  | 
272  | 0  |   else  | 
273  | 0  |     EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);  | 
274  | 0  |   if (evthread_id_fn_) { | 
275  | 0  |     unsigned long me;  | 
276  | 0  |     me = evthread_id_fn_();  | 
277  | 0  |     EVUTIL_ASSERT(lock->held_by == me);  | 
278  | 0  |     if (lock->count == 1)  | 
279  | 0  |       lock->held_by = 0;  | 
280  | 0  |   }  | 
281  | 0  |   --lock->count;  | 
282  | 0  |   EVUTIL_ASSERT(lock->count >= 0);  | 
283  | 0  | }  | 
284  |  |  | 
285  |  | static int  | 
286  |  | debug_lock_unlock(unsigned mode, void *lock_)  | 
287  | 0  | { | 
288  | 0  |   struct debug_lock *lock = lock_;  | 
289  | 0  |   int res = 0;  | 
290  | 0  |   evthread_debug_lock_mark_unlocked(mode, lock);  | 
291  | 0  |   if (original_lock_fns_.unlock)  | 
292  | 0  |     res = original_lock_fns_.unlock(mode, lock->lock);  | 
293  | 0  |   return res;  | 
294  | 0  | }  | 
295  |  |  | 
296  |  | static int  | 
297  |  | debug_cond_wait(void *cond_, void *lock_, const struct timeval *tv)  | 
298  | 0  | { | 
299  | 0  |   int r;  | 
300  | 0  |   struct debug_lock *lock = lock_;  | 
301  | 0  |   EVUTIL_ASSERT(lock);  | 
302  | 0  |   EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);  | 
303  | 0  |   EVLOCK_ASSERT_LOCKED(lock_);  | 
304  | 0  |   evthread_debug_lock_mark_unlocked(0, lock);  | 
305  | 0  |   r = original_cond_fns_.wait_condition(cond_, lock->lock, tv);  | 
306  | 0  |   evthread_debug_lock_mark_locked(0, lock);  | 
307  | 0  |   return r;  | 
308  | 0  | }  | 
309  |  |  | 
310  |  | /* misspelled version for backward compatibility */  | 
311  |  | void  | 
312  |  | evthread_enable_lock_debuging(void)  | 
313  | 0  | { | 
314  | 0  |   evthread_enable_lock_debugging();  | 
315  | 0  | }  | 
316  |  |  | 
317  |  | void  | 
318  |  | evthread_enable_lock_debugging(void)  | 
319  | 0  | { | 
320  | 0  |   struct evthread_lock_callbacks cbs = { | 
321  | 0  |     EVTHREAD_LOCK_API_VERSION,  | 
322  | 0  |     EVTHREAD_LOCKTYPE_RECURSIVE,  | 
323  | 0  |     debug_lock_alloc,  | 
324  | 0  |     debug_lock_free,  | 
325  | 0  |     debug_lock_lock,  | 
326  | 0  |     debug_lock_unlock  | 
327  | 0  |   };  | 
328  | 0  |   if (evthread_lock_debugging_enabled_)  | 
329  | 0  |     return;  | 
330  | 0  |   memcpy(&original_lock_fns_, &evthread_lock_fns_,  | 
331  | 0  |       sizeof(struct evthread_lock_callbacks));  | 
332  | 0  |   memcpy(&evthread_lock_fns_, &cbs,  | 
333  | 0  |       sizeof(struct evthread_lock_callbacks));  | 
334  |  | 
  | 
335  | 0  |   memcpy(&original_cond_fns_, &evthread_cond_fns_,  | 
336  | 0  |       sizeof(struct evthread_condition_callbacks));  | 
337  | 0  |   evthread_cond_fns_.wait_condition = debug_cond_wait;  | 
338  | 0  |   evthread_lock_debugging_enabled_ = 1;  | 
339  |  |  | 
340  |  |   /* XXX return value should get checked. */  | 
341  | 0  |   event_global_setup_locks_(0);  | 
342  | 0  | }  | 
343  |  |  | 
344  |  | int  | 
345  |  | evthread_is_debug_lock_held_(void *lock_)  | 
346  | 0  | { | 
347  | 0  |   struct debug_lock *lock = lock_;  | 
348  | 0  |   if (! lock->count)  | 
349  | 0  |     return 0;  | 
350  | 0  |   if (evthread_id_fn_) { | 
351  | 0  |     unsigned long me = evthread_id_fn_();  | 
352  | 0  |     if (lock->held_by != me)  | 
353  | 0  |       return 0;  | 
354  | 0  |   }  | 
355  | 0  |   return 1;  | 
356  | 0  | }  | 
357  |  |  | 
358  |  | void *  | 
359  |  | evthread_debug_get_real_lock_(void *lock_)  | 
360  | 0  | { | 
361  | 0  |   struct debug_lock *lock = lock_;  | 
362  | 0  |   return lock->lock;  | 
363  | 0  | }  | 
364  |  |  | 
365  |  | void *  | 
366  |  | evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)  | 
367  | 0  | { | 
368  |  |   /* there are four cases here:  | 
369  |  |      1) we're turning on debugging; locking is not on.  | 
370  |  |      2) we're turning on debugging; locking is on.  | 
371  |  |      3) we're turning on locking; debugging is not on.  | 
372  |  |      4) we're turning on locking; debugging is on. */  | 
373  |  | 
  | 
374  | 0  |   if (!enable_locks && original_lock_fns_.alloc == NULL) { | 
375  |  |     /* Case 1: allocate a debug lock. */  | 
376  | 0  |     EVUTIL_ASSERT(lock_ == NULL);  | 
377  | 0  |     return debug_lock_alloc(locktype);  | 
378  | 0  |   } else if (!enable_locks && original_lock_fns_.alloc != NULL) { | 
379  |  |     /* Case 2: wrap the lock in a debug lock. */  | 
380  | 0  |     struct debug_lock *lock;  | 
381  | 0  |     EVUTIL_ASSERT(lock_ != NULL);  | 
382  |  | 
  | 
383  | 0  |     if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) { | 
384  |  |       /* We can't wrap it: We need a recursive lock */  | 
385  | 0  |       original_lock_fns_.free(lock_, locktype);  | 
386  | 0  |       return debug_lock_alloc(locktype);  | 
387  | 0  |     }  | 
388  | 0  |     lock = mm_malloc(sizeof(struct debug_lock));  | 
389  | 0  |     if (!lock) { | 
390  | 0  |       original_lock_fns_.free(lock_, locktype);  | 
391  | 0  |       return NULL;  | 
392  | 0  |     }  | 
393  | 0  |     lock->lock = lock_;  | 
394  | 0  |     lock->locktype = locktype;  | 
395  | 0  |     lock->count = 0;  | 
396  | 0  |     lock->held_by = 0;  | 
397  | 0  |     return lock;  | 
398  | 0  |   } else if (enable_locks && ! evthread_lock_debugging_enabled_) { | 
399  |  |     /* Case 3: allocate a regular lock */  | 
400  | 0  |     EVUTIL_ASSERT(lock_ == NULL);  | 
401  | 0  |     return evthread_lock_fns_.alloc(locktype);  | 
402  | 0  |   } else { | 
403  |  |     /* Case 4: Fill in a debug lock with a real lock */  | 
404  | 0  |     struct debug_lock *lock = lock_ ? lock_ : debug_lock_alloc(locktype);  | 
405  | 0  |     if (!lock)  | 
406  | 0  |       return NULL;  | 
407  | 0  |     EVUTIL_ASSERT(enable_locks && evthread_lock_debugging_enabled_);  | 
408  | 0  |     EVUTIL_ASSERT(lock->locktype == locktype);  | 
409  | 0  |     if (!lock->lock) { | 
410  | 0  |       lock->lock = original_lock_fns_.alloc(  | 
411  | 0  |         locktype|EVTHREAD_LOCKTYPE_RECURSIVE);  | 
412  | 0  |       if (!lock->lock) { | 
413  | 0  |         lock->count = -200;  | 
414  | 0  |         mm_free(lock);  | 
415  | 0  |         return NULL;  | 
416  | 0  |       }  | 
417  | 0  |     }  | 
418  | 0  |     return lock;  | 
419  | 0  |   }  | 
420  | 0  | }  | 
421  |  |  | 
422  |  |  | 
423  |  | #ifndef EVTHREAD_EXPOSE_STRUCTS  | 
424  |  | unsigned long  | 
425  |  | evthreadimpl_get_id_()  | 
426  |  | { | 
427  |  |   return evthread_id_fn_ ? evthread_id_fn_() : 1;  | 
428  |  | }  | 
429  |  | void *  | 
430  |  | evthreadimpl_lock_alloc_(unsigned locktype)  | 
431  |  | { | 
432  |  | #ifndef EVENT__DISABLE_DEBUG_MODE  | 
433  |  |   if (event_debug_mode_on_) { | 
434  |  |     event_debug_created_threadable_ctx_ = 1;  | 
435  |  |   }  | 
436  |  | #endif  | 
437  |  |  | 
438  |  |   return evthread_lock_fns_.alloc ?  | 
439  |  |       evthread_lock_fns_.alloc(locktype) : NULL;  | 
440  |  | }  | 
441  |  | void  | 
442  |  | evthreadimpl_lock_free_(void *lock, unsigned locktype)  | 
443  |  | { | 
444  |  |   if (evthread_lock_fns_.free)  | 
445  |  |     evthread_lock_fns_.free(lock, locktype);  | 
446  |  | }  | 
447  |  | int  | 
448  |  | evthreadimpl_lock_lock_(unsigned mode, void *lock)  | 
449  |  | { | 
450  |  |   if (evthread_lock_fns_.lock)  | 
451  |  |     return evthread_lock_fns_.lock(mode, lock);  | 
452  |  |   else  | 
453  |  |     return 0;  | 
454  |  | }  | 
455  |  | int  | 
456  |  | evthreadimpl_lock_unlock_(unsigned mode, void *lock)  | 
457  |  | { | 
458  |  |   if (evthread_lock_fns_.unlock)  | 
459  |  |     return evthread_lock_fns_.unlock(mode, lock);  | 
460  |  |   else  | 
461  |  |     return 0;  | 
462  |  | }  | 
463  |  | void *  | 
464  |  | evthreadimpl_cond_alloc_(unsigned condtype)  | 
465  |  | { | 
466  |  | #ifndef EVENT__DISABLE_DEBUG_MODE  | 
467  |  |   if (event_debug_mode_on_) { | 
468  |  |     event_debug_created_threadable_ctx_ = 1;  | 
469  |  |   }  | 
470  |  | #endif  | 
471  |  |  | 
472  |  |   return evthread_cond_fns_.alloc_condition ?  | 
473  |  |       evthread_cond_fns_.alloc_condition(condtype) : NULL;  | 
474  |  | }  | 
475  |  | void  | 
476  |  | evthreadimpl_cond_free_(void *cond)  | 
477  |  | { | 
478  |  |   if (evthread_cond_fns_.free_condition)  | 
479  |  |     evthread_cond_fns_.free_condition(cond);  | 
480  |  | }  | 
481  |  | int  | 
482  |  | evthreadimpl_cond_signal_(void *cond, int broadcast)  | 
483  |  | { | 
484  |  |   if (evthread_cond_fns_.signal_condition)  | 
485  |  |     return evthread_cond_fns_.signal_condition(cond, broadcast);  | 
486  |  |   else  | 
487  |  |     return 0;  | 
488  |  | }  | 
489  |  | int  | 
490  |  | evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv)  | 
491  |  | { | 
492  |  |   if (evthread_cond_fns_.wait_condition)  | 
493  |  |     return evthread_cond_fns_.wait_condition(cond, lock, tv);  | 
494  |  |   else  | 
495  |  |     return 0;  | 
496  |  | }  | 
497  |  | int  | 
498  |  | evthreadimpl_is_lock_debugging_enabled_(void)  | 
499  |  | { | 
500  |  |   return evthread_lock_debugging_enabled_;  | 
501  |  | }  | 
502  |  |  | 
503  |  | int  | 
504  |  | evthreadimpl_locking_enabled_(void)  | 
505  |  | { | 
506  |  |   return evthread_lock_fns_.lock != NULL;  | 
507  |  | }  | 
508  |  | #endif  | 
509  |  |  | 
510  |  | #endif  |