/src/libevent/evthread-internal.h
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson |
3 | | * |
4 | | * Redistribution and use in source and binary forms, with or without |
5 | | * modification, are permitted provided that the following conditions |
6 | | * are met: |
7 | | * 1. Redistributions of source code must retain the above copyright |
8 | | * notice, this list of conditions and the following disclaimer. |
9 | | * 2. Redistributions in binary form must reproduce the above copyright |
10 | | * notice, this list of conditions and the following disclaimer in the |
11 | | * documentation and/or other materials provided with the distribution. |
12 | | * 3. The name of the author may not be used to endorse or promote products |
13 | | * derived from this software without specific prior written permission. |
14 | | * |
15 | | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
16 | | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
17 | | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
18 | | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
19 | | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
20 | | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
21 | | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
22 | | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
23 | | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
24 | | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
25 | | */ |
26 | | #ifndef EVTHREAD_INTERNAL_H_INCLUDED_ |
27 | | #define EVTHREAD_INTERNAL_H_INCLUDED_ |
28 | | |
29 | | #ifdef __cplusplus |
30 | | extern "C" { |
31 | | #endif |
32 | | |
33 | | #include "event2/event-config.h" |
34 | | #include "evconfig-private.h" |
35 | | |
36 | | #include "event2/thread.h" |
37 | | #include "util-internal.h" |
38 | | |
39 | | struct event_base; |
40 | | |
41 | | #ifndef _WIN32 |
42 | | /* On Windows, the way we currently make DLLs, it's not allowed for us to |
43 | | * have shared global structures. Thus, we only do the direct-call-to-function |
44 | | * code path if we know that the local shared library system supports it. |
45 | | */ |
46 | | #define EVTHREAD_EXPOSE_STRUCTS |
47 | | #endif |
48 | | |
49 | | #if ! defined(EVENT__DISABLE_THREAD_SUPPORT) && defined(EVTHREAD_EXPOSE_STRUCTS) |
50 | | /* Global function pointers to lock-related functions. NULL if locking isn't |
51 | | enabled. */ |
52 | | EVENT2_EXPORT_SYMBOL |
53 | | extern struct evthread_lock_callbacks evthread_lock_fns_; |
54 | | EVENT2_EXPORT_SYMBOL |
55 | | extern struct evthread_condition_callbacks evthread_cond_fns_; |
56 | | extern unsigned long (*evthread_id_fn_)(void); |
57 | | EVENT2_EXPORT_SYMBOL |
58 | | extern int evthread_lock_debugging_enabled_; |
59 | | |
60 | | /** Return the ID of the current thread, or 1 if threading isn't enabled. */ |
61 | | #define EVTHREAD_GET_ID() \ |
62 | 0 | (evthread_id_fn_ ? evthread_id_fn_() : 1) |
63 | | |
64 | | /** Return true iff we're in the thread that is currently (or most recently) |
65 | | * running a given event_base's loop. Requires lock. */ |
66 | | #define EVBASE_IN_THREAD(base) \ |
67 | 0 | (evthread_id_fn_ == NULL || \ |
68 | 0 | (base)->th_owner_id == evthread_id_fn_()) |
69 | | |
70 | | /** Return true iff we need to notify the base's main thread about changes to |
71 | | * its state, because it's currently running the main loop in another |
72 | | * thread. Requires lock. */ |
73 | | #define EVBASE_NEED_NOTIFY(base) \ |
74 | 0 | (evthread_id_fn_ != NULL && \ |
75 | 0 | (base)->running_loop && \ |
76 | 0 | (base)->th_owner_id != evthread_id_fn_()) |
77 | | |
78 | | /** Allocate a new lock, and store it in lockvar, a void*. Sets lockvar to |
79 | | NULL if locking is not enabled. */ |
80 | | #define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \ |
81 | 0 | ((lockvar) = evthread_lock_fns_.alloc ? \ |
82 | 0 | evthread_lock_fns_.alloc(locktype) : NULL) |
83 | | |
84 | | /** Free a given lock, if it is present and locking is enabled. */ |
85 | | #define EVTHREAD_FREE_LOCK(lockvar, locktype) \ |
86 | 0 | do { \ |
87 | 0 | void *lock_tmp_ = (lockvar); \ |
88 | 0 | if (lock_tmp_ && evthread_lock_fns_.free) \ |
89 | 0 | evthread_lock_fns_.free(lock_tmp_, (locktype)); \ |
90 | 0 | } while (0) |
91 | | |
92 | | /** Acquire a lock. */ |
93 | | #define EVLOCK_LOCK(lockvar,mode) \ |
94 | 0 | do { \ |
95 | 0 | if (lockvar) \ |
96 | 0 | evthread_lock_fns_.lock(mode, lockvar); \ |
97 | 0 | } while (0) |
98 | | |
99 | | /** Release a lock */ |
100 | | #define EVLOCK_UNLOCK(lockvar,mode) \ |
101 | 0 | do { \ |
102 | 0 | if (lockvar) \ |
103 | 0 | evthread_lock_fns_.unlock(mode, lockvar); \ |
104 | 0 | } while (0) |
105 | | |
106 | | /** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */ |
107 | | #define EVLOCK_SORTLOCKS_(lockvar1, lockvar2) \ |
108 | | do { \ |
109 | | if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \ |
110 | | void *tmp = lockvar1; \ |
111 | | lockvar1 = lockvar2; \ |
112 | | lockvar2 = tmp; \ |
113 | | } \ |
114 | | } while (0) |
115 | | |
116 | | /** Lock an event_base, if it is set up for locking. Acquires the lock |
117 | | in the base structure whose field is named 'lockvar'. */ |
118 | 0 | #define EVBASE_ACQUIRE_LOCK(base, lockvar) do { \ |
119 | 0 | EVLOCK_LOCK((base)->lockvar, 0); \ |
120 | 0 | } while (0) |
121 | | |
122 | | /** Unlock an event_base, if it is set up for locking. */ |
123 | 0 | #define EVBASE_RELEASE_LOCK(base, lockvar) do { \ |
124 | 0 | EVLOCK_UNLOCK((base)->lockvar, 0); \ |
125 | 0 | } while (0) |
126 | | |
127 | | /** If lock debugging is enabled, and lock is non-null, assert that 'lock' is |
128 | | * locked and held by us. */ |
129 | | #define EVLOCK_ASSERT_LOCKED(lock) \ |
130 | 0 | do { \ |
131 | 0 | if ((lock) && evthread_lock_debugging_enabled_) { \ |
132 | 0 | EVUTIL_ASSERT(evthread_is_debug_lock_held_(lock)); \ |
133 | 0 | } \ |
134 | 0 | } while (0) |
135 | | |
136 | | /** Try to grab the lock for 'lockvar' without blocking, and return 1 if we |
137 | | * manage to get it. */ |
138 | | static inline int EVLOCK_TRY_LOCK_(void *lock); |
139 | | static inline int |
140 | | EVLOCK_TRY_LOCK_(void *lock) |
141 | 0 | { |
142 | 0 | if (lock && evthread_lock_fns_.lock) { |
143 | 0 | int r = evthread_lock_fns_.lock(EVTHREAD_TRY, lock); |
144 | 0 | return !r; |
145 | 0 | } else { |
146 | 0 | /* Locking is disabled either globally or for this thing; |
147 | 0 | * of course we count as having the lock. */ |
148 | 0 | return 1; |
149 | 0 | } |
150 | 0 | } Unexecuted instantiation: event.c:EVLOCK_TRY_LOCK_ Unexecuted instantiation: evthread.c:EVLOCK_TRY_LOCK_ Unexecuted instantiation: evutil.c:EVLOCK_TRY_LOCK_ Unexecuted instantiation: evutil_rand.c:EVLOCK_TRY_LOCK_ Unexecuted instantiation: select.c:EVLOCK_TRY_LOCK_ Unexecuted instantiation: poll.c:EVLOCK_TRY_LOCK_ Unexecuted instantiation: epoll.c:EVLOCK_TRY_LOCK_ Unexecuted instantiation: signal.c:EVLOCK_TRY_LOCK_ Unexecuted instantiation: evdns.c:EVLOCK_TRY_LOCK_ |
151 | | |
152 | | /** Allocate a new condition variable and store it in the void *, condvar */ |
153 | | #define EVTHREAD_ALLOC_COND(condvar) \ |
154 | 0 | do { \ |
155 | 0 | (condvar) = evthread_cond_fns_.alloc_condition ? \ |
156 | 0 | evthread_cond_fns_.alloc_condition(0) : NULL; \ |
157 | 0 | } while (0) |
158 | | /** Deallocate and free a condition variable in condvar */ |
159 | | #define EVTHREAD_FREE_COND(cond) \ |
160 | 0 | do { \ |
161 | 0 | if (cond) \ |
162 | 0 | evthread_cond_fns_.free_condition((cond)); \ |
163 | 0 | } while (0) |
164 | | /** Signal one thread waiting on cond */ |
165 | | #define EVTHREAD_COND_SIGNAL(cond) \ |
166 | | ( (cond) ? evthread_cond_fns_.signal_condition((cond), 0) : 0 ) |
167 | | /** Signal all threads waiting on cond */ |
168 | | #define EVTHREAD_COND_BROADCAST(cond) \ |
169 | 0 | ( (cond) ? evthread_cond_fns_.signal_condition((cond), 1) : 0 ) |
170 | | /** Wait until the condition 'cond' is signalled. Must be called while |
171 | | * holding 'lock'. The lock will be released until the condition is |
172 | | * signalled, at which point it will be acquired again. Returns 0 for |
173 | | * success, -1 for failure. */ |
174 | | #define EVTHREAD_COND_WAIT(cond, lock) \ |
175 | 0 | ( (cond) ? evthread_cond_fns_.wait_condition((cond), (lock), NULL) : 0 ) |
176 | | /** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1 |
177 | | * on timeout. */ |
178 | | #define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \ |
179 | | ( (cond) ? evthread_cond_fns_.wait_condition((cond), (lock), (tv)) : 0 ) |
180 | | |
181 | | /** True iff locking functions have been configured. */ |
182 | | #define EVTHREAD_LOCKING_ENABLED() \ |
183 | 0 | (evthread_lock_fns_.lock != NULL) |
184 | | |
185 | | #elif ! defined(EVENT__DISABLE_THREAD_SUPPORT) |
186 | | |
187 | | unsigned long evthreadimpl_get_id_(void); |
188 | | EVENT2_EXPORT_SYMBOL |
189 | | int evthreadimpl_is_lock_debugging_enabled_(void); |
190 | | EVENT2_EXPORT_SYMBOL |
191 | | void *evthreadimpl_lock_alloc_(unsigned locktype); |
192 | | EVENT2_EXPORT_SYMBOL |
193 | | void evthreadimpl_lock_free_(void *lock, unsigned locktype); |
194 | | EVENT2_EXPORT_SYMBOL |
195 | | int evthreadimpl_lock_lock_(unsigned mode, void *lock); |
196 | | EVENT2_EXPORT_SYMBOL |
197 | | int evthreadimpl_lock_unlock_(unsigned mode, void *lock); |
198 | | EVENT2_EXPORT_SYMBOL |
199 | | void *evthreadimpl_cond_alloc_(unsigned condtype); |
200 | | EVENT2_EXPORT_SYMBOL |
201 | | void evthreadimpl_cond_free_(void *cond); |
202 | | EVENT2_EXPORT_SYMBOL |
203 | | int evthreadimpl_cond_signal_(void *cond, int broadcast); |
204 | | EVENT2_EXPORT_SYMBOL |
205 | | int evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv); |
206 | | int evthreadimpl_locking_enabled_(void); |
207 | | |
208 | | #define EVTHREAD_GET_ID() evthreadimpl_get_id_() |
209 | | #define EVBASE_IN_THREAD(base) \ |
210 | | ((base)->th_owner_id == evthreadimpl_get_id_()) |
211 | | #define EVBASE_NEED_NOTIFY(base) \ |
212 | | ((base)->running_loop && \ |
213 | | ((base)->th_owner_id != evthreadimpl_get_id_())) |
214 | | |
215 | | #define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \ |
216 | | ((lockvar) = evthreadimpl_lock_alloc_(locktype)) |
217 | | |
218 | | #define EVTHREAD_FREE_LOCK(lockvar, locktype) \ |
219 | | do { \ |
220 | | void *lock_tmp_ = (lockvar); \ |
221 | | if (lock_tmp_) \ |
222 | | evthreadimpl_lock_free_(lock_tmp_, (locktype)); \ |
223 | | } while (0) |
224 | | |
225 | | /** Acquire a lock. */ |
226 | | #define EVLOCK_LOCK(lockvar,mode) \ |
227 | | do { \ |
228 | | if (lockvar) \ |
229 | | evthreadimpl_lock_lock_(mode, lockvar); \ |
230 | | } while (0) |
231 | | |
232 | | /** Release a lock */ |
233 | | #define EVLOCK_UNLOCK(lockvar,mode) \ |
234 | | do { \ |
235 | | if (lockvar) \ |
236 | | evthreadimpl_lock_unlock_(mode, lockvar); \ |
237 | | } while (0) |
238 | | |
239 | | /** Lock an event_base, if it is set up for locking. Acquires the lock |
240 | | in the base structure whose field is named 'lockvar'. */ |
241 | | #define EVBASE_ACQUIRE_LOCK(base, lockvar) do { \ |
242 | | EVLOCK_LOCK((base)->lockvar, 0); \ |
243 | | } while (0) |
244 | | |
245 | | /** Unlock an event_base, if it is set up for locking. */ |
246 | | #define EVBASE_RELEASE_LOCK(base, lockvar) do { \ |
247 | | EVLOCK_UNLOCK((base)->lockvar, 0); \ |
248 | | } while (0) |
249 | | |
250 | | /** If lock debugging is enabled, and lock is non-null, assert that 'lock' is |
251 | | * locked and held by us. */ |
252 | | #define EVLOCK_ASSERT_LOCKED(lock) \ |
253 | | do { \ |
254 | | if ((lock) && evthreadimpl_is_lock_debugging_enabled_()) { \ |
255 | | EVUTIL_ASSERT(evthread_is_debug_lock_held_(lock)); \ |
256 | | } \ |
257 | | } while (0) |
258 | | |
259 | | /** Try to grab the lock for 'lockvar' without blocking, and return 1 if we |
260 | | * manage to get it. */ |
261 | | static inline int EVLOCK_TRY_LOCK_(void *lock); |
262 | | static inline int |
263 | | EVLOCK_TRY_LOCK_(void *lock) |
264 | | { |
265 | | if (lock) { |
266 | | int r = evthreadimpl_lock_lock_(EVTHREAD_TRY, lock); |
267 | | return !r; |
268 | | } else { |
269 | | /* Locking is disabled either globally or for this thing; |
270 | | * of course we count as having the lock. */ |
271 | | return 1; |
272 | | } |
273 | | } |
274 | | |
275 | | /** Allocate a new condition variable and store it in the void *, condvar */ |
276 | | #define EVTHREAD_ALLOC_COND(condvar) \ |
277 | | do { \ |
278 | | (condvar) = evthreadimpl_cond_alloc_(0); \ |
279 | | } while (0) |
280 | | /** Deallocate and free a condition variable in condvar */ |
281 | | #define EVTHREAD_FREE_COND(cond) \ |
282 | | do { \ |
283 | | if (cond) \ |
284 | | evthreadimpl_cond_free_((cond)); \ |
285 | | } while (0) |
286 | | /** Signal one thread waiting on cond */ |
287 | | #define EVTHREAD_COND_SIGNAL(cond) \ |
288 | | ( (cond) ? evthreadimpl_cond_signal_((cond), 0) : 0 ) |
289 | | /** Signal all threads waiting on cond */ |
290 | | #define EVTHREAD_COND_BROADCAST(cond) \ |
291 | | ( (cond) ? evthreadimpl_cond_signal_((cond), 1) : 0 ) |
292 | | /** Wait until the condition 'cond' is signalled. Must be called while |
293 | | * holding 'lock'. The lock will be released until the condition is |
294 | | * signalled, at which point it will be acquired again. Returns 0 for |
295 | | * success, -1 for failure. */ |
296 | | #define EVTHREAD_COND_WAIT(cond, lock) \ |
297 | | ( (cond) ? evthreadimpl_cond_wait_((cond), (lock), NULL) : 0 ) |
298 | | /** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1 |
299 | | * on timeout. */ |
300 | | #define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \ |
301 | | ( (cond) ? evthreadimpl_cond_wait_((cond), (lock), (tv)) : 0 ) |
302 | | |
303 | | #define EVTHREAD_LOCKING_ENABLED() \ |
304 | | (evthreadimpl_locking_enabled_()) |
305 | | |
306 | | #else /* EVENT__DISABLE_THREAD_SUPPORT */ |
307 | | |
308 | | #define EVTHREAD_GET_ID() 1 |
309 | | #define EVTHREAD_ALLOC_LOCK(lockvar, locktype) EVUTIL_NIL_STMT_ |
310 | | #define EVTHREAD_FREE_LOCK(lockvar, locktype) EVUTIL_NIL_STMT_ |
311 | | |
312 | | #define EVLOCK_LOCK(lockvar, mode) EVUTIL_NIL_STMT_ |
313 | | #define EVLOCK_UNLOCK(lockvar, mode) EVUTIL_NIL_STMT_ |
314 | | #define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) EVUTIL_NIL_STMT_ |
315 | | #define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) EVUTIL_NIL_STMT_ |
316 | | |
317 | | #define EVBASE_IN_THREAD(base) 1 |
318 | | #define EVBASE_NEED_NOTIFY(base) 0 |
319 | | #define EVBASE_ACQUIRE_LOCK(base, lock) EVUTIL_NIL_STMT_ |
320 | | #define EVBASE_RELEASE_LOCK(base, lock) EVUTIL_NIL_STMT_ |
321 | | #define EVLOCK_ASSERT_LOCKED(lock) EVUTIL_NIL_STMT_ |
322 | | |
323 | | #define EVLOCK_TRY_LOCK_(lock) 1 |
324 | | |
325 | | #define EVTHREAD_ALLOC_COND(condvar) EVUTIL_NIL_STMT_ |
326 | | #define EVTHREAD_FREE_COND(cond) EVUTIL_NIL_STMT_ |
327 | | #define EVTHREAD_COND_SIGNAL(cond) EVUTIL_NIL_STMT_ |
328 | | #define EVTHREAD_COND_BROADCAST(cond) EVUTIL_NIL_STMT_ |
329 | | #define EVTHREAD_COND_WAIT(cond, lock) EVUTIL_NIL_STMT_ |
330 | | #define EVTHREAD_COND_WAIT_TIMED(cond, lock, howlong) EVUTIL_NIL_STMT_ |
331 | | |
332 | | #define EVTHREAD_LOCKING_ENABLED() 0 |
333 | | |
334 | | #endif |
335 | | |
336 | | /* This code is shared between both lock impls */ |
337 | | #if ! defined(EVENT__DISABLE_THREAD_SUPPORT) |
338 | | /** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */ |
339 | | #define EVLOCK_SORTLOCKS_(lockvar1, lockvar2) \ |
340 | | do { \ |
341 | | if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \ |
342 | | void *tmp = lockvar1; \ |
343 | | lockvar1 = lockvar2; \ |
344 | | lockvar2 = tmp; \ |
345 | | } \ |
346 | | } while (0) |
347 | | |
348 | | /** Acquire both lock1 and lock2. Always allocates locks in the same order, |
349 | | * so that two threads locking two locks with LOCK2 will not deadlock. */ |
350 | | #define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) \ |
351 | | do { \ |
352 | | void *lock1_tmplock_ = (lock1); \ |
353 | | void *lock2_tmplock_ = (lock2); \ |
354 | | EVLOCK_SORTLOCKS_(lock1_tmplock_,lock2_tmplock_); \ |
355 | | EVLOCK_LOCK(lock1_tmplock_,mode1); \ |
356 | | if (lock2_tmplock_ != lock1_tmplock_) \ |
357 | | EVLOCK_LOCK(lock2_tmplock_,mode2); \ |
358 | | } while (0) |
359 | | /** Release both lock1 and lock2. */ |
360 | | #define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) \ |
361 | | do { \ |
362 | | void *lock1_tmplock_ = (lock1); \ |
363 | | void *lock2_tmplock_ = (lock2); \ |
364 | | EVLOCK_SORTLOCKS_(lock1_tmplock_,lock2_tmplock_); \ |
365 | | if (lock2_tmplock_ != lock1_tmplock_) \ |
366 | | EVLOCK_UNLOCK(lock2_tmplock_,mode2); \ |
367 | | EVLOCK_UNLOCK(lock1_tmplock_,mode1); \ |
368 | | } while (0) |
369 | | |
370 | | EVENT2_EXPORT_SYMBOL |
371 | | int evthread_is_debug_lock_held_(void *lock); |
372 | | void *evthread_debug_get_real_lock_(void *lock); |
373 | | |
374 | | void *evthread_setup_global_lock_(void *lock_, unsigned locktype, |
375 | | int enable_locks); |
376 | | |
377 | | #define EVTHREAD_SETUP_GLOBAL_LOCK(lockvar, locktype) \ |
378 | 0 | do { \ |
379 | 0 | lockvar = evthread_setup_global_lock_(lockvar, \ |
380 | 0 | (locktype), enable_locks); \ |
381 | 0 | if (!lockvar) { \ |
382 | 0 | event_warn("Couldn't allocate %s", #lockvar); \ |
383 | 0 | return -1; \ |
384 | 0 | } \ |
385 | 0 | } while (0); |
386 | | |
387 | | int event_global_setup_locks_(const int enable_locks); |
388 | | int evsig_global_setup_locks_(const int enable_locks); |
389 | | int evutil_global_setup_locks_(const int enable_locks); |
390 | | int evutil_secure_rng_global_setup_locks_(const int enable_locks); |
391 | | |
392 | | /** Return current evthread_lock_callbacks */ |
393 | | EVENT2_EXPORT_SYMBOL |
394 | | struct evthread_lock_callbacks *evthread_get_lock_callbacks(void); |
395 | | /** Return current evthread_condition_callbacks */ |
396 | | struct evthread_condition_callbacks *evthread_get_condition_callbacks(void); |
397 | | /** Disable locking for internal usage (like global shutdown) */ |
398 | | void evthreadimpl_disable_lock_debugging_(void); |
399 | | |
400 | | #endif |
401 | | |
402 | | #ifdef __cplusplus |
403 | | } |
404 | | #endif |
405 | | |
406 | | #endif /* EVTHREAD_INTERNAL_H_INCLUDED_ */ |