/src/libevent/bufferevent.c
Line  | Count  | Source  | 
1  |  | /*  | 
2  |  |  * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>  | 
3  |  |  * Copyright (c) 2007-2012 Niels Provos, Nick Mathewson  | 
4  |  |  *  | 
5  |  |  * Redistribution and use in source and binary forms, with or without  | 
6  |  |  * modification, are permitted provided that the following conditions  | 
7  |  |  * are met:  | 
8  |  |  * 1. Redistributions of source code must retain the above copyright  | 
9  |  |  *    notice, this list of conditions and the following disclaimer.  | 
10  |  |  * 2. Redistributions in binary form must reproduce the above copyright  | 
11  |  |  *    notice, this list of conditions and the following disclaimer in the  | 
12  |  |  *    documentation and/or other materials provided with the distribution.  | 
13  |  |  * 3. The name of the author may not be used to endorse or promote products  | 
14  |  |  *    derived from this software without specific prior written permission.  | 
15  |  |  *  | 
16  |  |  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR  | 
17  |  |  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES  | 
18  |  |  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  | 
19  |  |  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,  | 
20  |  |  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT  | 
21  |  |  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,  | 
22  |  |  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY  | 
23  |  |  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT  | 
24  |  |  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF  | 
25  |  |  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.  | 
26  |  |  */  | 
27  |  |  | 
28  |  | #include "event2/event-config.h"  | 
29  |  | #include "evconfig-private.h"  | 
30  |  |  | 
31  |  | #include <sys/types.h>  | 
32  |  |  | 
33  |  | #ifdef EVENT__HAVE_SYS_TIME_H  | 
34  |  | #include <sys/time.h>  | 
35  |  | #endif  | 
36  |  |  | 
37  |  | #include <errno.h>  | 
38  |  | #include <stdio.h>  | 
39  |  | #include <stdlib.h>  | 
40  |  | #include <string.h>  | 
41  |  | #ifdef EVENT__HAVE_STDARG_H  | 
42  |  | #include <stdarg.h>  | 
43  |  | #endif  | 
44  |  |  | 
45  |  | #ifdef _WIN32  | 
46  |  | #include <winsock2.h>  | 
47  |  | #endif  | 
48  |  |  | 
49  |  | #include "event2/util.h"  | 
50  |  | #include "event2/buffer.h"  | 
51  |  | #include "event2/buffer_compat.h"  | 
52  |  | #include "event2/bufferevent.h"  | 
53  |  | #include "event2/bufferevent_struct.h"  | 
54  |  | #include "event2/bufferevent_compat.h"  | 
55  |  | #include "event2/event.h"  | 
56  |  | #include "event-internal.h"  | 
57  |  | #include "log-internal.h"  | 
58  |  | #include "mm-internal.h"  | 
59  |  | #include "bufferevent-internal.h"  | 
60  |  | #include "evbuffer-internal.h"  | 
61  |  | #include "util-internal.h"  | 
62  |  |  | 
63  |  | static void bufferevent_cancel_all_(struct bufferevent *bev);  | 
64  |  | static void bufferevent_finalize_cb_(struct event_callback *evcb, void *arg_);  | 
65  |  |  | 
66  |  | void  | 
67  |  | bufferevent_suspend_read_(struct bufferevent *bufev, bufferevent_suspend_flags what)  | 
68  | 0  | { | 
69  | 0  |   struct bufferevent_private *bufev_private = BEV_UPCAST(bufev);  | 
70  | 0  |   BEV_LOCK(bufev);  | 
71  | 0  |   if (!bufev_private->read_suspended)  | 
72  | 0  |     bufev->be_ops->disable(bufev, EV_READ);  | 
73  | 0  |   bufev_private->read_suspended |= what;  | 
74  | 0  |   BEV_UNLOCK(bufev);  | 
75  | 0  | }  | 
76  |  |  | 
77  |  | void  | 
78  |  | bufferevent_unsuspend_read_(struct bufferevent *bufev, bufferevent_suspend_flags what)  | 
79  | 0  | { | 
80  | 0  |   struct bufferevent_private *bufev_private = BEV_UPCAST(bufev);  | 
81  | 0  |   BEV_LOCK(bufev);  | 
82  | 0  |   bufev_private->read_suspended &= ~what;  | 
83  | 0  |   if (!bufev_private->read_suspended && (bufev->enabled & EV_READ))  | 
84  | 0  |     bufev->be_ops->enable(bufev, EV_READ);  | 
85  | 0  |   BEV_UNLOCK(bufev);  | 
86  | 0  | }  | 
87  |  |  | 
88  |  | void  | 
89  |  | bufferevent_suspend_write_(struct bufferevent *bufev, bufferevent_suspend_flags what)  | 
90  | 0  | { | 
91  | 0  |   struct bufferevent_private *bufev_private = BEV_UPCAST(bufev);  | 
92  | 0  |   BEV_LOCK(bufev);  | 
93  | 0  |   if (!bufev_private->write_suspended)  | 
94  | 0  |     bufev->be_ops->disable(bufev, EV_WRITE);  | 
95  | 0  |   bufev_private->write_suspended |= what;  | 
96  | 0  |   BEV_UNLOCK(bufev);  | 
97  | 0  | }  | 
98  |  |  | 
99  |  | void  | 
100  |  | bufferevent_unsuspend_write_(struct bufferevent *bufev, bufferevent_suspend_flags what)  | 
101  | 0  | { | 
102  | 0  |   struct bufferevent_private *bufev_private = BEV_UPCAST(bufev);  | 
103  | 0  |   BEV_LOCK(bufev);  | 
104  | 0  |   bufev_private->write_suspended &= ~what;  | 
105  | 0  |   if (!bufev_private->write_suspended && (bufev->enabled & EV_WRITE))  | 
106  | 0  |     bufev->be_ops->enable(bufev, EV_WRITE);  | 
107  | 0  |   BEV_UNLOCK(bufev);  | 
108  | 0  | }  | 
109  |  |  | 
110  |  | /**  | 
111  |  |  * Sometimes bufferevent's implementation can overrun high watermarks  | 
112  |  |  * (one of examples is openssl) and in this case if the read callback  | 
113  |  |  * will not handle enough data do over condition above the read  | 
114  |  |  * callback will never be called again (due to suspend above).  | 
115  |  |  *  | 
116  |  |  * To avoid this we are scheduling read callback again here, but only  | 
117  |  |  * from the user callback to avoid multiple scheduling:  | 
118  |  |  * - when the data had been added to it  | 
119  |  |  * - when the data had been drained from it (user specified read callback)  | 
120  |  |  */  | 
121  |  | static void bufferevent_inbuf_wm_check(struct bufferevent *bev)  | 
122  | 0  | { | 
123  | 0  |   if (!bev->wm_read.high)  | 
124  | 0  |     return;  | 
125  | 0  |   if (!(bev->enabled & EV_READ))  | 
126  | 0  |     return;  | 
127  | 0  |   if (evbuffer_get_length(bev->input) < bev->wm_read.high)  | 
128  | 0  |     return;  | 
129  |  |  | 
130  | 0  |   bufferevent_trigger(bev, EV_READ, BEV_OPT_DEFER_CALLBACKS);  | 
131  | 0  | }  | 
132  |  |  | 
133  |  | /* Callback to implement watermarks on the input buffer.  Only enabled  | 
134  |  |  * if the watermark is set. */  | 
135  |  | static void  | 
136  |  | bufferevent_inbuf_wm_cb(struct evbuffer *buf,  | 
137  |  |     const struct evbuffer_cb_info *cbinfo,  | 
138  |  |     void *arg)  | 
139  | 0  | { | 
140  | 0  |   struct bufferevent *bufev = arg;  | 
141  | 0  |   size_t size;  | 
142  |  | 
  | 
143  | 0  |   size = evbuffer_get_length(buf);  | 
144  |  | 
  | 
145  | 0  |   if (size >= bufev->wm_read.high)  | 
146  | 0  |     bufferevent_wm_suspend_read(bufev);  | 
147  | 0  |   else  | 
148  | 0  |     bufferevent_wm_unsuspend_read(bufev);  | 
149  | 0  | }  | 
150  |  |  | 
151  |  | static void  | 
152  |  | bufferevent_run_deferred_callbacks_locked(struct event_callback *cb, void *arg)  | 
153  | 0  | { | 
154  | 0  |   struct bufferevent_private *bufev_private = arg;  | 
155  | 0  |   struct bufferevent *bufev = &bufev_private->bev;  | 
156  |  | 
  | 
157  | 0  |   BEV_LOCK(bufev);  | 
158  | 0  |   if ((bufev_private->eventcb_pending & BEV_EVENT_CONNECTED) &&  | 
159  | 0  |       bufev->errorcb) { | 
160  |  |     /* The "connected" happened before any reads or writes, so  | 
161  |  |        send it first. */  | 
162  | 0  |     bufev_private->eventcb_pending &= ~BEV_EVENT_CONNECTED;  | 
163  | 0  |     bufev->errorcb(bufev, BEV_EVENT_CONNECTED, bufev->cbarg);  | 
164  | 0  |   }  | 
165  | 0  |   if (bufev_private->readcb_pending && bufev->readcb) { | 
166  | 0  |     bufev_private->readcb_pending = 0;  | 
167  | 0  |     bufev->readcb(bufev, bufev->cbarg);  | 
168  | 0  |     bufferevent_inbuf_wm_check(bufev);  | 
169  | 0  |   }  | 
170  | 0  |   if (bufev_private->writecb_pending && bufev->writecb) { | 
171  | 0  |     bufev_private->writecb_pending = 0;  | 
172  | 0  |     bufev->writecb(bufev, bufev->cbarg);  | 
173  | 0  |   }  | 
174  | 0  |   if (bufev_private->eventcb_pending && bufev->errorcb) { | 
175  | 0  |     short what = bufev_private->eventcb_pending;  | 
176  | 0  |     int err = bufev_private->errno_pending;  | 
177  | 0  |     bufev_private->eventcb_pending = 0;  | 
178  | 0  |     bufev_private->errno_pending = 0;  | 
179  | 0  |     EVUTIL_SET_SOCKET_ERROR(err);  | 
180  | 0  |     bufev->errorcb(bufev, what, bufev->cbarg);  | 
181  | 0  |   }  | 
182  | 0  |   bufferevent_decref_and_unlock_(bufev);  | 
183  | 0  | }  | 
184  |  |  | 
185  |  | static void  | 
186  |  | bufferevent_run_deferred_callbacks_unlocked(struct event_callback *cb, void *arg)  | 
187  | 0  | { | 
188  | 0  |   struct bufferevent_private *bufev_private = arg;  | 
189  | 0  |   struct bufferevent *bufev = &bufev_private->bev;  | 
190  |  | 
  | 
191  | 0  |   BEV_LOCK(bufev);  | 
192  | 0  | #define UNLOCKED(stmt) \  | 
193  | 0  |   do { BEV_UNLOCK(bufev); stmt; BEV_LOCK(bufev); } while(0) | 
194  |  | 
  | 
195  | 0  |   if ((bufev_private->eventcb_pending & BEV_EVENT_CONNECTED) &&  | 
196  | 0  |       bufev->errorcb) { | 
197  |  |     /* The "connected" happened before any reads or writes, so  | 
198  |  |        send it first. */  | 
199  | 0  |     bufferevent_event_cb errorcb = bufev->errorcb;  | 
200  | 0  |     void *cbarg = bufev->cbarg;  | 
201  | 0  |     bufev_private->eventcb_pending &= ~BEV_EVENT_CONNECTED;  | 
202  | 0  |     UNLOCKED(errorcb(bufev, BEV_EVENT_CONNECTED, cbarg));  | 
203  | 0  |   }  | 
204  | 0  |   if (bufev_private->readcb_pending && bufev->readcb) { | 
205  | 0  |     bufferevent_data_cb readcb = bufev->readcb;  | 
206  | 0  |     void *cbarg = bufev->cbarg;  | 
207  | 0  |     bufev_private->readcb_pending = 0;  | 
208  | 0  |     UNLOCKED(readcb(bufev, cbarg));  | 
209  | 0  |     bufferevent_inbuf_wm_check(bufev);  | 
210  | 0  |   }  | 
211  | 0  |   if (bufev_private->writecb_pending && bufev->writecb) { | 
212  | 0  |     bufferevent_data_cb writecb = bufev->writecb;  | 
213  | 0  |     void *cbarg = bufev->cbarg;  | 
214  | 0  |     bufev_private->writecb_pending = 0;  | 
215  | 0  |     UNLOCKED(writecb(bufev, cbarg));  | 
216  | 0  |   }  | 
217  | 0  |   if (bufev_private->eventcb_pending && bufev->errorcb) { | 
218  | 0  |     bufferevent_event_cb errorcb = bufev->errorcb;  | 
219  | 0  |     void *cbarg = bufev->cbarg;  | 
220  | 0  |     short what = bufev_private->eventcb_pending;  | 
221  | 0  |     int err = bufev_private->errno_pending;  | 
222  | 0  |     bufev_private->eventcb_pending = 0;  | 
223  | 0  |     bufev_private->errno_pending = 0;  | 
224  | 0  |     EVUTIL_SET_SOCKET_ERROR(err);  | 
225  | 0  |     UNLOCKED(errorcb(bufev,what,cbarg));  | 
226  | 0  |   }  | 
227  | 0  |   bufferevent_decref_and_unlock_(bufev);  | 
228  | 0  | #undef UNLOCKED  | 
229  | 0  | }  | 
230  |  |  | 
231  |  | #define SCHEDULE_DEFERRED(bevp)           \  | 
232  | 0  |   do {               \ | 
233  | 0  |     if (event_deferred_cb_schedule_(      \  | 
234  | 0  |           (bevp)->bev.ev_base,      \  | 
235  | 0  |       &(bevp)->deferred))       \  | 
236  | 0  |       bufferevent_incref_(&(bevp)->bev);   \  | 
237  | 0  |   } while (0)  | 
238  |  |  | 
239  |  |  | 
240  |  | void  | 
241  |  | bufferevent_run_readcb_(struct bufferevent *bufev, int options)  | 
242  | 0  | { | 
243  |  |   /* Requires that we hold the lock and a reference */  | 
244  | 0  |   struct bufferevent_private *p = BEV_UPCAST(bufev);  | 
245  | 0  |   if (bufev->readcb == NULL)  | 
246  | 0  |     return;  | 
247  | 0  |   if ((p->options|options) & BEV_OPT_DEFER_CALLBACKS) { | 
248  | 0  |     p->readcb_pending = 1;  | 
249  | 0  |     SCHEDULE_DEFERRED(p);  | 
250  | 0  |   } else { | 
251  | 0  |     bufev->readcb(bufev, bufev->cbarg);  | 
252  | 0  |     bufferevent_inbuf_wm_check(bufev);  | 
253  | 0  |   }  | 
254  | 0  | }  | 
255  |  |  | 
256  |  | void  | 
257  |  | bufferevent_run_writecb_(struct bufferevent *bufev, int options)  | 
258  | 0  | { | 
259  |  |   /* Requires that we hold the lock and a reference */  | 
260  | 0  |   struct bufferevent_private *p = BEV_UPCAST(bufev);  | 
261  | 0  |   if (bufev->writecb == NULL)  | 
262  | 0  |     return;  | 
263  | 0  |   if ((p->options|options) & BEV_OPT_DEFER_CALLBACKS) { | 
264  | 0  |     p->writecb_pending = 1;  | 
265  | 0  |     SCHEDULE_DEFERRED(p);  | 
266  | 0  |   } else { | 
267  | 0  |     bufev->writecb(bufev, bufev->cbarg);  | 
268  | 0  |   }  | 
269  | 0  | }  | 
270  |  |  | 
271  | 0  | #define BEV_TRIG_ALL_OPTS (     \  | 
272  | 0  |     BEV_TRIG_IGNORE_WATERMARKS| \  | 
273  | 0  |     BEV_TRIG_DEFER_CALLBACKS  \  | 
274  | 0  |   )  | 
275  |  |  | 
276  |  | void  | 
277  |  | bufferevent_trigger(struct bufferevent *bufev, short iotype, int options)  | 
278  | 0  | { | 
279  | 0  |   bufferevent_incref_and_lock_(bufev);  | 
280  | 0  |   bufferevent_trigger_nolock_(bufev, iotype, options&BEV_TRIG_ALL_OPTS);  | 
281  | 0  |   bufferevent_decref_and_unlock_(bufev);  | 
282  | 0  | }  | 
283  |  |  | 
284  |  | void  | 
285  |  | bufferevent_run_eventcb_(struct bufferevent *bufev, short what, int options)  | 
286  | 0  | { | 
287  |  |   /* Requires that we hold the lock and a reference */  | 
288  | 0  |   struct bufferevent_private *p = BEV_UPCAST(bufev);  | 
289  | 0  |   if (bufev->errorcb == NULL)  | 
290  | 0  |     return;  | 
291  | 0  |   if ((p->options|options) & BEV_OPT_DEFER_CALLBACKS) { | 
292  | 0  |     p->eventcb_pending |= what;  | 
293  | 0  |     p->errno_pending = EVUTIL_SOCKET_ERROR();  | 
294  | 0  |     SCHEDULE_DEFERRED(p);  | 
295  | 0  |   } else { | 
296  | 0  |     bufev->errorcb(bufev, what, bufev->cbarg);  | 
297  | 0  |   }  | 
298  | 0  | }  | 
299  |  |  | 
300  |  | void  | 
301  |  | bufferevent_trigger_event(struct bufferevent *bufev, short what, int options)  | 
302  | 0  | { | 
303  | 0  |   bufferevent_incref_and_lock_(bufev);  | 
304  | 0  |   bufferevent_run_eventcb_(bufev, what, options&BEV_TRIG_ALL_OPTS);  | 
305  | 0  |   bufferevent_decref_and_unlock_(bufev);  | 
306  | 0  | }  | 
307  |  |  | 
308  |  | int  | 
309  |  | bufferevent_init_common_(struct bufferevent_private *bufev_private,  | 
310  |  |     struct event_base *base,  | 
311  |  |     const struct bufferevent_ops *ops,  | 
312  |  |     enum bufferevent_options options)  | 
313  | 0  | { | 
314  | 0  |   struct bufferevent *bufev = &bufev_private->bev;  | 
315  |  | 
  | 
316  | 0  |   if (!bufev->input) { | 
317  | 0  |     if ((bufev->input = evbuffer_new()) == NULL)  | 
318  | 0  |       goto err;  | 
319  | 0  |   }  | 
320  |  |  | 
321  | 0  |   if (!bufev->output) { | 
322  | 0  |     if ((bufev->output = evbuffer_new()) == NULL)  | 
323  | 0  |       goto err;  | 
324  | 0  |   }  | 
325  |  |  | 
326  | 0  |   bufev_private->refcnt = 1;  | 
327  | 0  |   bufev->ev_base = base;  | 
328  |  |  | 
329  |  |   /* Disable timeouts. */  | 
330  | 0  |   evutil_timerclear(&bufev->timeout_read);  | 
331  | 0  |   evutil_timerclear(&bufev->timeout_write);  | 
332  |  | 
  | 
333  | 0  |   bufev->be_ops = ops;  | 
334  |  | 
  | 
335  | 0  |   if (bufferevent_ratelim_init_(bufev_private))  | 
336  | 0  |     goto err;  | 
337  |  |  | 
338  |  |   /*  | 
339  |  |    * Set to EV_WRITE so that using bufferevent_write is going to  | 
340  |  |    * trigger a callback.  Reading needs to be explicitly enabled  | 
341  |  |    * because otherwise no data will be available.  | 
342  |  |    */  | 
343  | 0  |   bufev->enabled = EV_WRITE;  | 
344  |  | 
  | 
345  | 0  | #ifndef EVENT__DISABLE_THREAD_SUPPORT  | 
346  | 0  |   if (options & BEV_OPT_THREADSAFE) { | 
347  | 0  |     if (bufferevent_enable_locking_(bufev, NULL) < 0)  | 
348  | 0  |       goto err;  | 
349  | 0  |   }  | 
350  | 0  | #endif  | 
351  | 0  |   if ((options & (BEV_OPT_DEFER_CALLBACKS|BEV_OPT_UNLOCK_CALLBACKS))  | 
352  | 0  |       == BEV_OPT_UNLOCK_CALLBACKS) { | 
353  | 0  |     event_warnx("UNLOCK_CALLBACKS requires DEFER_CALLBACKS"); | 
354  | 0  |     goto err;  | 
355  | 0  |   }  | 
356  | 0  |   if (options & BEV_OPT_UNLOCK_CALLBACKS)  | 
357  | 0  |     event_deferred_cb_init_(  | 
358  | 0  |         &bufev_private->deferred,  | 
359  | 0  |         event_base_get_npriorities(base) / 2,  | 
360  | 0  |         bufferevent_run_deferred_callbacks_unlocked,  | 
361  | 0  |         bufev_private);  | 
362  | 0  |   else  | 
363  | 0  |     event_deferred_cb_init_(  | 
364  | 0  |         &bufev_private->deferred,  | 
365  | 0  |         event_base_get_npriorities(base) / 2,  | 
366  | 0  |         bufferevent_run_deferred_callbacks_locked,  | 
367  | 0  |         bufev_private);  | 
368  |  | 
  | 
369  | 0  |   bufev_private->options = options;  | 
370  |  | 
  | 
371  | 0  |   evbuffer_set_parent_(bufev->input, bufev);  | 
372  | 0  |   evbuffer_set_parent_(bufev->output, bufev);  | 
373  |  | 
  | 
374  | 0  |   return 0;  | 
375  |  |  | 
376  | 0  | err:  | 
377  | 0  |   if (bufev->input) { | 
378  | 0  |     evbuffer_free(bufev->input);  | 
379  | 0  |     bufev->input = NULL;  | 
380  | 0  |   }  | 
381  | 0  |   if (bufev->output) { | 
382  | 0  |     evbuffer_free(bufev->output);  | 
383  | 0  |     bufev->output = NULL;  | 
384  | 0  |   }  | 
385  | 0  |   return -1;  | 
386  | 0  | }  | 
387  |  |  | 
388  |  | void  | 
389  |  | bufferevent_setcb(struct bufferevent *bufev,  | 
390  |  |     bufferevent_data_cb readcb, bufferevent_data_cb writecb,  | 
391  |  |     bufferevent_event_cb eventcb, void *cbarg)  | 
392  | 0  | { | 
393  | 0  |   BEV_LOCK(bufev);  | 
394  |  | 
  | 
395  | 0  |   bufev->readcb = readcb;  | 
396  | 0  |   bufev->writecb = writecb;  | 
397  | 0  |   bufev->errorcb = eventcb;  | 
398  |  | 
  | 
399  | 0  |   bufev->cbarg = cbarg;  | 
400  | 0  |   BEV_UNLOCK(bufev);  | 
401  | 0  | }  | 
402  |  |  | 
403  |  | void  | 
404  |  | bufferevent_getcb(struct bufferevent *bufev,  | 
405  |  |     bufferevent_data_cb *readcb_ptr,  | 
406  |  |     bufferevent_data_cb *writecb_ptr,  | 
407  |  |     bufferevent_event_cb *eventcb_ptr,  | 
408  |  |     void **cbarg_ptr)  | 
409  | 0  | { | 
410  | 0  |   BEV_LOCK(bufev);  | 
411  | 0  |   if (readcb_ptr)  | 
412  | 0  |     *readcb_ptr = bufev->readcb;  | 
413  | 0  |   if (writecb_ptr)  | 
414  | 0  |     *writecb_ptr = bufev->writecb;  | 
415  | 0  |   if (eventcb_ptr)  | 
416  | 0  |     *eventcb_ptr = bufev->errorcb;  | 
417  | 0  |   if (cbarg_ptr)  | 
418  | 0  |     *cbarg_ptr = bufev->cbarg;  | 
419  |  | 
  | 
420  | 0  |   BEV_UNLOCK(bufev);  | 
421  | 0  | }  | 
422  |  |  | 
423  |  | struct evbuffer *  | 
424  |  | bufferevent_get_input(struct bufferevent *bufev)  | 
425  | 0  | { | 
426  | 0  |   return bufev->input;  | 
427  | 0  | }  | 
428  |  |  | 
429  |  | struct evbuffer *  | 
430  |  | bufferevent_get_output(struct bufferevent *bufev)  | 
431  | 0  | { | 
432  | 0  |   return bufev->output;  | 
433  | 0  | }  | 
434  |  |  | 
435  |  | struct event_base *  | 
436  |  | bufferevent_get_base(struct bufferevent *bufev)  | 
437  | 0  | { | 
438  | 0  |   return bufev->ev_base;  | 
439  | 0  | }  | 
440  |  |  | 
441  |  | int  | 
442  |  | bufferevent_get_priority(const struct bufferevent *bufev)  | 
443  | 0  | { | 
444  | 0  |   if (event_initialized(&bufev->ev_read)) { | 
445  | 0  |     return event_get_priority(&bufev->ev_read);  | 
446  | 0  |   } else { | 
447  | 0  |     return event_base_get_npriorities(bufev->ev_base) / 2;  | 
448  | 0  |   }  | 
449  | 0  | }  | 
450  |  |  | 
451  |  | int  | 
452  |  | bufferevent_write(struct bufferevent *bufev, const void *data, size_t size)  | 
453  | 0  | { | 
454  | 0  |   if (evbuffer_add(bufev->output, data, size) == -1)  | 
455  | 0  |     return (-1);  | 
456  |  |  | 
457  | 0  |   return 0;  | 
458  | 0  | }  | 
459  |  |  | 
460  |  | int  | 
461  |  | bufferevent_write_buffer(struct bufferevent *bufev, struct evbuffer *buf)  | 
462  | 0  | { | 
463  | 0  |   if (evbuffer_add_buffer(bufev->output, buf) == -1)  | 
464  | 0  |     return (-1);  | 
465  |  |  | 
466  | 0  |   return 0;  | 
467  | 0  | }  | 
468  |  |  | 
469  |  | size_t  | 
470  |  | bufferevent_read(struct bufferevent *bufev, void *data, size_t size)  | 
471  | 0  | { | 
472  | 0  |   int r = evbuffer_remove(bufev->input, data, size);  | 
473  |  | 
  | 
474  | 0  |   if (r == -1)  | 
475  | 0  |     return 0;  | 
476  |  |  | 
477  | 0  |   return r;  | 
478  | 0  | }  | 
479  |  |  | 
480  |  | int  | 
481  |  | bufferevent_read_buffer(struct bufferevent *bufev, struct evbuffer *buf)  | 
482  | 0  | { | 
483  | 0  |   return (evbuffer_add_buffer(buf, bufev->input));  | 
484  | 0  | }  | 
485  |  |  | 
486  |  | int  | 
487  |  | bufferevent_enable(struct bufferevent *bufev, short event)  | 
488  | 0  | { | 
489  | 0  |   struct bufferevent_private *bufev_private = BEV_UPCAST(bufev);  | 
490  | 0  |   short impl_events = event;  | 
491  | 0  |   int r = 0;  | 
492  |  | 
  | 
493  | 0  |   bufferevent_incref_and_lock_(bufev);  | 
494  | 0  |   if (bufev_private->read_suspended)  | 
495  | 0  |     impl_events &= ~EV_READ;  | 
496  | 0  |   if (bufev_private->write_suspended)  | 
497  | 0  |     impl_events &= ~EV_WRITE;  | 
498  |  | 
  | 
499  | 0  |   bufev->enabled |= event;  | 
500  |  | 
  | 
501  | 0  |   if (impl_events && bufev->be_ops->enable(bufev, impl_events) < 0)  | 
502  | 0  |     r = -1;  | 
503  | 0  |   if (r)  | 
504  | 0  |     event_debug(("%s: cannot enable 0x%hx on %p", __func__, event, (void *)bufev)); | 
505  |  | 
  | 
506  | 0  |   bufferevent_decref_and_unlock_(bufev);  | 
507  | 0  |   return r;  | 
508  | 0  | }  | 
509  |  |  | 
510  |  | int  | 
511  |  | bufferevent_set_timeouts(struct bufferevent *bufev,  | 
512  |  |        const struct timeval *tv_read,  | 
513  |  |        const struct timeval *tv_write)  | 
514  | 0  | { | 
515  | 0  |   int r = 0;  | 
516  | 0  |   BEV_LOCK(bufev);  | 
517  | 0  |   if (tv_read) { | 
518  | 0  |     bufev->timeout_read = *tv_read;  | 
519  | 0  |   } else { | 
520  | 0  |     evutil_timerclear(&bufev->timeout_read);  | 
521  | 0  |   }  | 
522  | 0  |   if (tv_write) { | 
523  | 0  |     bufev->timeout_write = *tv_write;  | 
524  | 0  |   } else { | 
525  | 0  |     evutil_timerclear(&bufev->timeout_write);  | 
526  | 0  |   }  | 
527  |  | 
  | 
528  | 0  |   if (bufev->be_ops->adj_timeouts)  | 
529  | 0  |     r = bufev->be_ops->adj_timeouts(bufev);  | 
530  | 0  |   BEV_UNLOCK(bufev);  | 
531  |  | 
  | 
532  | 0  |   return r;  | 
533  | 0  | }  | 
534  |  |  | 
535  |  |  | 
536  |  | /* Obsolete; use bufferevent_set_timeouts */  | 
537  |  | void  | 
538  |  | bufferevent_settimeout(struct bufferevent *bufev,  | 
539  |  |            int timeout_read, int timeout_write)  | 
540  | 0  | { | 
541  | 0  |   struct timeval tv_read, tv_write;  | 
542  | 0  |   struct timeval *ptv_read = NULL, *ptv_write = NULL;  | 
543  |  | 
  | 
544  | 0  |   memset(&tv_read, 0, sizeof(tv_read));  | 
545  | 0  |   memset(&tv_write, 0, sizeof(tv_write));  | 
546  |  | 
  | 
547  | 0  |   if (timeout_read) { | 
548  | 0  |     tv_read.tv_sec = timeout_read;  | 
549  | 0  |     ptv_read = &tv_read;  | 
550  | 0  |   }  | 
551  | 0  |   if (timeout_write) { | 
552  | 0  |     tv_write.tv_sec = timeout_write;  | 
553  | 0  |     ptv_write = &tv_write;  | 
554  | 0  |   }  | 
555  |  | 
  | 
556  | 0  |   bufferevent_set_timeouts(bufev, ptv_read, ptv_write);  | 
557  | 0  | }  | 
558  |  |  | 
559  |  |  | 
560  |  | int  | 
561  |  | bufferevent_disable_hard_(struct bufferevent *bufev, short event)  | 
562  | 0  | { | 
563  | 0  |   int r = 0;  | 
564  | 0  |   struct bufferevent_private *bufev_private = BEV_UPCAST(bufev);  | 
565  |  | 
  | 
566  | 0  |   BEV_LOCK(bufev);  | 
567  | 0  |   bufev->enabled &= ~event;  | 
568  |  | 
  | 
569  | 0  |   bufev_private->connecting = 0;  | 
570  | 0  |   if (bufev->be_ops->disable(bufev, event) < 0)  | 
571  | 0  |     r = -1;  | 
572  |  | 
  | 
573  | 0  |   BEV_UNLOCK(bufev);  | 
574  | 0  |   return r;  | 
575  | 0  | }  | 
576  |  |  | 
577  |  | int  | 
578  |  | bufferevent_disable(struct bufferevent *bufev, short event)  | 
579  | 0  | { | 
580  | 0  |   int r = 0;  | 
581  |  | 
  | 
582  | 0  |   BEV_LOCK(bufev);  | 
583  | 0  |   bufev->enabled &= ~event;  | 
584  |  | 
  | 
585  | 0  |   if (bufev->be_ops->disable(bufev, event) < 0)  | 
586  | 0  |     r = -1;  | 
587  | 0  |   if (r)  | 
588  | 0  |     event_debug(("%s: cannot disable 0x%hx on %p", __func__, event, (void *)bufev)); | 
589  |  | 
  | 
590  | 0  |   BEV_UNLOCK(bufev);  | 
591  | 0  |   return r;  | 
592  | 0  | }  | 
593  |  |  | 
594  |  | /*  | 
595  |  |  * Sets the water marks  | 
596  |  |  */  | 
597  |  |  | 
598  |  | void  | 
599  |  | bufferevent_setwatermark(struct bufferevent *bufev, short events,  | 
600  |  |     size_t lowmark, size_t highmark)  | 
601  | 0  | { | 
602  | 0  |   struct bufferevent_private *bufev_private = BEV_UPCAST(bufev);  | 
603  |  | 
  | 
604  | 0  |   BEV_LOCK(bufev);  | 
605  | 0  |   if (events & EV_WRITE) { | 
606  | 0  |     bufev->wm_write.low = lowmark;  | 
607  | 0  |     bufev->wm_write.high = highmark;  | 
608  | 0  |   }  | 
609  |  | 
  | 
610  | 0  |   if (events & EV_READ) { | 
611  | 0  |     bufev->wm_read.low = lowmark;  | 
612  | 0  |     bufev->wm_read.high = highmark;  | 
613  |  | 
  | 
614  | 0  |     if (highmark) { | 
615  |  |       /* There is now a new high-water mark for read.  | 
616  |  |          enable the callback if needed, and see if we should  | 
617  |  |          suspend/bufferevent_wm_unsuspend. */  | 
618  |  | 
  | 
619  | 0  |       if (bufev_private->read_watermarks_cb == NULL) { | 
620  | 0  |         bufev_private->read_watermarks_cb =  | 
621  | 0  |             evbuffer_add_cb(bufev->input,  | 
622  | 0  |                 bufferevent_inbuf_wm_cb,  | 
623  | 0  |                 bufev);  | 
624  | 0  |       }  | 
625  | 0  |       evbuffer_cb_set_flags(bufev->input,  | 
626  | 0  |               bufev_private->read_watermarks_cb,  | 
627  | 0  |               EVBUFFER_CB_ENABLED|EVBUFFER_CB_NODEFER);  | 
628  |  | 
  | 
629  | 0  |       if (evbuffer_get_length(bufev->input) >= highmark)  | 
630  | 0  |         bufferevent_wm_suspend_read(bufev);  | 
631  | 0  |       else if (evbuffer_get_length(bufev->input) < highmark)  | 
632  | 0  |         bufferevent_wm_unsuspend_read(bufev);  | 
633  | 0  |     } else { | 
634  |  |       /* There is now no high-water mark for read. */  | 
635  | 0  |       if (bufev_private->read_watermarks_cb)  | 
636  | 0  |         evbuffer_cb_clear_flags(bufev->input,  | 
637  | 0  |             bufev_private->read_watermarks_cb,  | 
638  | 0  |             EVBUFFER_CB_ENABLED);  | 
639  | 0  |       bufferevent_wm_unsuspend_read(bufev);  | 
640  | 0  |     }  | 
641  | 0  |   }  | 
642  | 0  |   BEV_UNLOCK(bufev);  | 
643  | 0  | }  | 
644  |  |  | 
645  |  | int  | 
646  |  | bufferevent_getwatermark(struct bufferevent *bufev, short events,  | 
647  |  |     size_t *lowmark, size_t *highmark)  | 
648  | 0  | { | 
649  | 0  |   if (events == EV_WRITE) { | 
650  | 0  |     BEV_LOCK(bufev);  | 
651  | 0  |     if (lowmark)  | 
652  | 0  |       *lowmark = bufev->wm_write.low;  | 
653  | 0  |     if (highmark)  | 
654  | 0  |       *highmark = bufev->wm_write.high;  | 
655  | 0  |     BEV_UNLOCK(bufev);  | 
656  | 0  |     return 0;  | 
657  | 0  |   }  | 
658  |  |  | 
659  | 0  |   if (events == EV_READ) { | 
660  | 0  |     BEV_LOCK(bufev);  | 
661  | 0  |     if (lowmark)  | 
662  | 0  |       *lowmark = bufev->wm_read.low;  | 
663  | 0  |     if (highmark)  | 
664  | 0  |       *highmark = bufev->wm_read.high;  | 
665  | 0  |     BEV_UNLOCK(bufev);  | 
666  | 0  |     return 0;  | 
667  | 0  |   }  | 
668  | 0  |   return -1;  | 
669  | 0  | }  | 
670  |  |  | 
671  |  | int  | 
672  |  | bufferevent_flush(struct bufferevent *bufev,  | 
673  |  |     short iotype,  | 
674  |  |     enum bufferevent_flush_mode mode)  | 
675  | 0  | { | 
676  | 0  |   int r = -1;  | 
677  | 0  |   BEV_LOCK(bufev);  | 
678  | 0  |   if (bufev->be_ops->flush)  | 
679  | 0  |     r = bufev->be_ops->flush(bufev, iotype, mode);  | 
680  | 0  |   BEV_UNLOCK(bufev);  | 
681  | 0  |   return r;  | 
682  | 0  | }  | 
683  |  |  | 
684  |  | void  | 
685  |  | bufferevent_incref_and_lock_(struct bufferevent *bufev)  | 
686  | 0  | { | 
687  | 0  |   struct bufferevent_private *bufev_private = BEV_UPCAST(bufev);  | 
688  | 0  |   BEV_LOCK(bufev);  | 
689  | 0  |   ++bufev_private->refcnt;  | 
690  | 0  | }  | 
691  |  |  | 
692  |  | #if 0  | 
693  |  | static void  | 
694  |  | bufferevent_transfer_lock_ownership_(struct bufferevent *donor,  | 
695  |  |     struct bufferevent *recipient)  | 
696  |  | { | 
697  |  |   struct bufferevent_private *d = BEV_UPCAST(donor);  | 
698  |  |   struct bufferevent_private *r = BEV_UPCAST(recipient);  | 
699  |  |   if (d->lock != r->lock)  | 
700  |  |     return;  | 
701  |  |   if (r->own_lock)  | 
702  |  |     return;  | 
703  |  |   if (d->own_lock) { | 
704  |  |     d->own_lock = 0;  | 
705  |  |     r->own_lock = 1;  | 
706  |  |   }  | 
707  |  | }  | 
708  |  | #endif  | 
709  |  |  | 
710  |  | int  | 
711  |  | bufferevent_decref_and_unlock_(struct bufferevent *bufev)  | 
712  | 0  | { | 
713  | 0  |   struct bufferevent_private *bufev_private = BEV_UPCAST(bufev);  | 
714  | 0  |   int n_cbs = 0;  | 
715  | 0  | #define MAX_CBS 16  | 
716  | 0  |   struct event_callback *cbs[MAX_CBS];  | 
717  |  | 
  | 
718  | 0  |   EVUTIL_ASSERT(bufev_private->refcnt > 0);  | 
719  |  | 
  | 
720  | 0  |   if (--bufev_private->refcnt) { | 
721  | 0  |     BEV_UNLOCK(bufev);  | 
722  | 0  |     return 0;  | 
723  | 0  |   }  | 
724  |  |  | 
725  | 0  |   if (bufev->be_ops->unlink)  | 
726  | 0  |     bufev->be_ops->unlink(bufev);  | 
727  |  |  | 
728  |  |   /* Okay, we're out of references. Let's finalize this once all the  | 
729  |  |    * callbacks are done running. */  | 
730  | 0  |   cbs[0] = &bufev->ev_read.ev_evcallback;  | 
731  | 0  |   cbs[1] = &bufev->ev_write.ev_evcallback;  | 
732  | 0  |   cbs[2] = &bufev_private->deferred;  | 
733  | 0  |   n_cbs = 3;  | 
734  | 0  |   if (bufev_private->rate_limiting) { | 
735  | 0  |     struct event *e = &bufev_private->rate_limiting->refill_bucket_event;  | 
736  | 0  |     if (event_initialized(e))  | 
737  | 0  |       cbs[n_cbs++] = &e->ev_evcallback;  | 
738  | 0  |   }  | 
739  | 0  |   n_cbs += evbuffer_get_callbacks_(bufev->input, cbs+n_cbs, MAX_CBS-n_cbs);  | 
740  | 0  |   n_cbs += evbuffer_get_callbacks_(bufev->output, cbs+n_cbs, MAX_CBS-n_cbs);  | 
741  |  | 
  | 
742  | 0  |   event_callback_finalize_many_(bufev->ev_base, n_cbs, cbs,  | 
743  | 0  |       bufferevent_finalize_cb_);  | 
744  |  | 
  | 
745  | 0  | #undef MAX_CBS  | 
746  | 0  |   BEV_UNLOCK(bufev);  | 
747  |  | 
  | 
748  | 0  |   return 1;  | 
749  | 0  | }  | 
750  |  |  | 
751  |  | static void  | 
752  |  | bufferevent_finalize_cb_(struct event_callback *evcb, void *arg_)  | 
753  | 0  | { | 
754  | 0  |   struct bufferevent *bufev = arg_;  | 
755  | 0  |   struct bufferevent *underlying;  | 
756  | 0  |   struct bufferevent_private *bufev_private = BEV_UPCAST(bufev);  | 
757  |  | 
  | 
758  | 0  |   BEV_LOCK(bufev);  | 
759  | 0  |   underlying = bufferevent_get_underlying(bufev);  | 
760  |  |  | 
761  |  |   /* Clean up the shared info */  | 
762  | 0  |   if (bufev->be_ops->destruct)  | 
763  | 0  |     bufev->be_ops->destruct(bufev);  | 
764  |  |  | 
765  |  |   /* XXX what happens if refcnt for these buffers is > 1?  | 
766  |  |    * The buffers can share a lock with this bufferevent object,  | 
767  |  |    * but the lock might be destroyed below. */  | 
768  |  |   /* evbuffer will free the callbacks */  | 
769  | 0  |   evbuffer_free(bufev->input);  | 
770  | 0  |   evbuffer_free(bufev->output);  | 
771  |  | 
  | 
772  | 0  |   if (bufev_private->rate_limiting) { | 
773  | 0  |     if (bufev_private->rate_limiting->group)  | 
774  | 0  |       bufferevent_remove_from_rate_limit_group_internal_(bufev,0);  | 
775  | 0  |     mm_free(bufev_private->rate_limiting);  | 
776  | 0  |     bufev_private->rate_limiting = NULL;  | 
777  | 0  |   }  | 
778  |  |  | 
779  |  | 
  | 
780  | 0  |   BEV_UNLOCK(bufev);  | 
781  |  | 
  | 
782  | 0  |   if (bufev_private->own_lock)  | 
783  | 0  |     EVTHREAD_FREE_LOCK(bufev_private->lock,  | 
784  | 0  |         EVTHREAD_LOCKTYPE_RECURSIVE);  | 
785  |  |  | 
786  |  |   /* Free the actual allocated memory. */  | 
787  | 0  |   mm_free(((char*)bufev) - bufev->be_ops->mem_offset);  | 
788  |  |  | 
789  |  |   /* Release the reference to underlying now that we no longer need the  | 
790  |  |    * reference to it.  We wait this long mainly in case our lock is  | 
791  |  |    * shared with underlying.  | 
792  |  |    *  | 
793  |  |    * The 'destruct' function will also drop a reference to underlying  | 
794  |  |    * if BEV_OPT_CLOSE_ON_FREE is set.  | 
795  |  |    *  | 
796  |  |    * XXX Should we/can we just refcount evbuffer/bufferevent locks?  | 
797  |  |    * It would probably save us some headaches.  | 
798  |  |    */  | 
799  | 0  |   if (underlying)  | 
800  | 0  |     bufferevent_decref_(underlying);  | 
801  | 0  | }  | 
802  |  |  | 
803  |  | int  | 
804  |  | bufferevent_decref(struct bufferevent *bufev)  | 
805  | 0  | { | 
806  | 0  |   BEV_LOCK(bufev);  | 
807  | 0  |   return bufferevent_decref_and_unlock_(bufev);  | 
808  | 0  | }  | 
809  |  |  | 
810  |  | void  | 
811  |  | bufferevent_free(struct bufferevent *bufev)  | 
812  | 0  | { | 
813  | 0  |   BEV_LOCK(bufev);  | 
814  | 0  |   bufferevent_setcb(bufev, NULL, NULL, NULL, NULL);  | 
815  | 0  |   bufferevent_cancel_all_(bufev);  | 
816  | 0  |   bufferevent_decref_and_unlock_(bufev);  | 
817  | 0  | }  | 
818  |  |  | 
819  |  | void  | 
820  |  | bufferevent_incref(struct bufferevent *bufev)  | 
821  | 0  | { | 
822  | 0  |   struct bufferevent_private *bufev_private = BEV_UPCAST(bufev);  | 
823  |  |  | 
824  |  |   /* XXX: now that this function is public, we might want to  | 
825  |  |    * - return the count from this function  | 
826  |  |    * - create a new function to atomically grab the current refcount  | 
827  |  |    */  | 
828  | 0  |   BEV_LOCK(bufev);  | 
829  | 0  |   ++bufev_private->refcnt;  | 
830  | 0  |   BEV_UNLOCK(bufev);  | 
831  | 0  | }  | 
832  |  |  | 
833  |  | int  | 
834  |  | bufferevent_enable_locking_(struct bufferevent *bufev, void *lock)  | 
835  | 0  | { | 
836  |  | #ifdef EVENT__DISABLE_THREAD_SUPPORT  | 
837  |  |   return -1;  | 
838  |  | #else  | 
839  | 0  |   struct bufferevent *underlying;  | 
840  |  | 
  | 
841  | 0  |   if (BEV_UPCAST(bufev)->lock)  | 
842  | 0  |     return -1;  | 
843  | 0  |   underlying = bufferevent_get_underlying(bufev);  | 
844  |  | 
  | 
845  | 0  |   if (!lock && underlying && BEV_UPCAST(underlying)->lock) { | 
846  | 0  |     lock = BEV_UPCAST(underlying)->lock;  | 
847  | 0  |     BEV_UPCAST(bufev)->lock = lock;  | 
848  | 0  |     BEV_UPCAST(bufev)->own_lock = 0;  | 
849  | 0  |   } else if (!lock) { | 
850  | 0  |     EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE);  | 
851  | 0  |     if (!lock)  | 
852  | 0  |       return -1;  | 
853  | 0  |     BEV_UPCAST(bufev)->lock = lock;  | 
854  | 0  |     BEV_UPCAST(bufev)->own_lock = 1;  | 
855  | 0  |   } else { | 
856  | 0  |     BEV_UPCAST(bufev)->lock = lock;  | 
857  | 0  |     BEV_UPCAST(bufev)->own_lock = 0;  | 
858  | 0  |   }  | 
859  | 0  |   evbuffer_enable_locking(bufev->input, lock);  | 
860  | 0  |   evbuffer_enable_locking(bufev->output, lock);  | 
861  |  | 
  | 
862  | 0  |   if (underlying && !BEV_UPCAST(underlying)->lock)  | 
863  | 0  |     bufferevent_enable_locking_(underlying, lock);  | 
864  |  | 
  | 
865  | 0  |   return 0;  | 
866  | 0  | #endif  | 
867  | 0  | }  | 
868  |  |  | 
869  |  | int  | 
870  |  | bufferevent_setfd(struct bufferevent *bev, evutil_socket_t fd)  | 
871  | 0  | { | 
872  | 0  |   union bufferevent_ctrl_data d;  | 
873  | 0  |   int res = -1;  | 
874  | 0  |   d.fd = fd;  | 
875  | 0  |   BEV_LOCK(bev);  | 
876  | 0  |   if (bev->be_ops->ctrl)  | 
877  | 0  |     res = bev->be_ops->ctrl(bev, BEV_CTRL_SET_FD, &d);  | 
878  | 0  |   if (res)  | 
879  | 0  |     event_debug(("%s: cannot set fd for %p to "EV_SOCK_FMT, __func__, (void *)bev, fd)); | 
880  | 0  |   BEV_UNLOCK(bev);  | 
881  | 0  |   return res;  | 
882  | 0  | }  | 
883  |  |  | 
884  |  | int  | 
885  |  | bufferevent_replacefd(struct bufferevent *bev, evutil_socket_t fd)  | 
886  | 0  | { | 
887  | 0  |   union bufferevent_ctrl_data d;  | 
888  | 0  |   int err = -1;  | 
889  | 0  |   evutil_socket_t old_fd = EVUTIL_INVALID_SOCKET;  | 
890  |  | 
  | 
891  | 0  |   BEV_LOCK(bev);  | 
892  | 0  |   if (bev->be_ops->ctrl) { | 
893  | 0  |     err = bev->be_ops->ctrl(bev, BEV_CTRL_GET_FD, &d);  | 
894  | 0  |     if (!err) { | 
895  | 0  |       old_fd = d.fd;  | 
896  | 0  |       if (old_fd != EVUTIL_INVALID_SOCKET) { | 
897  | 0  |         err = evutil_closesocket(old_fd);  | 
898  | 0  |       }  | 
899  | 0  |     }  | 
900  | 0  |     if (!err) { | 
901  | 0  |       d.fd = fd;  | 
902  | 0  |       err = bev->be_ops->ctrl(bev, BEV_CTRL_SET_FD, &d);  | 
903  | 0  |     }  | 
904  | 0  |   }  | 
905  | 0  |   if (err)  | 
906  | 0  |     event_debug(("%s: cannot replace fd for %p from "EV_SOCK_FMT" to "EV_SOCK_FMT, __func__, (void *)bev, old_fd, fd)); | 
907  | 0  |   BEV_UNLOCK(bev);  | 
908  |  | 
  | 
909  | 0  |   return err;  | 
910  | 0  | }  | 
911  |  |  | 
912  |  | evutil_socket_t  | 
913  |  | bufferevent_getfd(struct bufferevent *bev)  | 
914  | 0  | { | 
915  | 0  |   union bufferevent_ctrl_data d;  | 
916  | 0  |   int res = -1;  | 
917  | 0  |   d.fd = -1;  | 
918  | 0  |   BEV_LOCK(bev);  | 
919  | 0  |   if (bev->be_ops->ctrl)  | 
920  | 0  |     res = bev->be_ops->ctrl(bev, BEV_CTRL_GET_FD, &d);  | 
921  | 0  |   if (res)  | 
922  | 0  |     event_debug(("%s: cannot get fd for %p", __func__, (void *)bev)); | 
923  | 0  |   BEV_UNLOCK(bev);  | 
924  | 0  |   return (res<0) ? -1 : d.fd;  | 
925  | 0  | }  | 
926  |  |  | 
927  |  | enum bufferevent_options  | 
928  |  | bufferevent_get_options_(struct bufferevent *bev)  | 
929  | 0  | { | 
930  | 0  |   struct bufferevent_private *bev_p = BEV_UPCAST(bev);  | 
931  | 0  |   enum bufferevent_options options;  | 
932  |  | 
  | 
933  | 0  |   BEV_LOCK(bev);  | 
934  | 0  |   options = bev_p->options;  | 
935  | 0  |   BEV_UNLOCK(bev);  | 
936  | 0  |   return options;  | 
937  | 0  | }  | 
938  |  |  | 
939  |  |  | 
940  |  | static void  | 
941  |  | bufferevent_cancel_all_(struct bufferevent *bev)  | 
942  | 0  | { | 
943  | 0  |   union bufferevent_ctrl_data d;  | 
944  | 0  |   memset(&d, 0, sizeof(d));  | 
945  | 0  |   BEV_LOCK(bev);  | 
946  | 0  |   if (bev->be_ops->ctrl)  | 
947  | 0  |     bev->be_ops->ctrl(bev, BEV_CTRL_CANCEL_ALL, &d);  | 
948  | 0  |   BEV_UNLOCK(bev);  | 
949  | 0  | }  | 
950  |  |  | 
951  |  | short  | 
952  |  | bufferevent_get_enabled(struct bufferevent *bufev)  | 
953  | 0  | { | 
954  | 0  |   short r;  | 
955  | 0  |   BEV_LOCK(bufev);  | 
956  | 0  |   r = bufev->enabled;  | 
957  | 0  |   BEV_UNLOCK(bufev);  | 
958  | 0  |   return r;  | 
959  | 0  | }  | 
960  |  |  | 
961  |  | struct bufferevent *  | 
962  |  | bufferevent_get_underlying(struct bufferevent *bev)  | 
963  | 0  | { | 
964  | 0  |   union bufferevent_ctrl_data d;  | 
965  | 0  |   int res = -1;  | 
966  | 0  |   d.ptr = NULL;  | 
967  | 0  |   BEV_LOCK(bev);  | 
968  | 0  |   if (bev->be_ops->ctrl)  | 
969  | 0  |     res = bev->be_ops->ctrl(bev, BEV_CTRL_GET_UNDERLYING, &d);  | 
970  | 0  |   BEV_UNLOCK(bev);  | 
971  | 0  |   return (res<0) ? NULL : d.ptr;  | 
972  | 0  | }  | 
973  |  |  | 
974  |  | static void  | 
975  |  | bufferevent_generic_read_timeout_cb(evutil_socket_t fd, short event, void *ctx)  | 
976  | 0  | { | 
977  | 0  |   struct bufferevent *bev = ctx;  | 
978  | 0  |   bufferevent_incref_and_lock_(bev);  | 
979  | 0  |   bufferevent_disable(bev, EV_READ);  | 
980  | 0  |   bufferevent_run_eventcb_(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_READING, 0);  | 
981  | 0  |   bufferevent_decref_and_unlock_(bev);  | 
982  | 0  | }  | 
983  |  | static void  | 
984  |  | bufferevent_generic_write_timeout_cb(evutil_socket_t fd, short event, void *ctx)  | 
985  | 0  | { | 
986  | 0  |   struct bufferevent *bev = ctx;  | 
987  | 0  |   bufferevent_incref_and_lock_(bev);  | 
988  | 0  |   bufferevent_disable(bev, EV_WRITE);  | 
989  | 0  |   bufferevent_run_eventcb_(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_WRITING, 0);  | 
990  | 0  |   bufferevent_decref_and_unlock_(bev);  | 
991  | 0  | }  | 
992  |  |  | 
993  |  | void  | 
994  |  | bufferevent_init_generic_timeout_cbs_(struct bufferevent *bev)  | 
995  | 0  | { | 
996  | 0  |   event_assign(&bev->ev_read, bev->ev_base, -1, EV_FINALIZE,  | 
997  | 0  |       bufferevent_generic_read_timeout_cb, bev);  | 
998  | 0  |   event_assign(&bev->ev_write, bev->ev_base, -1, EV_FINALIZE,  | 
999  | 0  |       bufferevent_generic_write_timeout_cb, bev);  | 
1000  | 0  | }  | 
1001  |  |  | 
1002  |  | int  | 
1003  |  | bufferevent_generic_adj_timeouts_(struct bufferevent *bev)  | 
1004  | 0  | { | 
1005  | 0  |   const short enabled = bev->enabled;  | 
1006  | 0  |   struct bufferevent_private *bev_p = BEV_UPCAST(bev);  | 
1007  | 0  |   int r1=0, r2=0;  | 
1008  | 0  |   if ((enabled & EV_READ) && !bev_p->read_suspended &&  | 
1009  | 0  |       evutil_timerisset(&bev->timeout_read))  | 
1010  | 0  |     r1 = event_add(&bev->ev_read, &bev->timeout_read);  | 
1011  | 0  |   else  | 
1012  | 0  |     r1 = event_del(&bev->ev_read);  | 
1013  |  | 
  | 
1014  | 0  |   if ((enabled & EV_WRITE) && !bev_p->write_suspended &&  | 
1015  | 0  |       evutil_timerisset(&bev->timeout_write) &&  | 
1016  | 0  |       evbuffer_get_length(bev->output))  | 
1017  | 0  |     r2 = event_add(&bev->ev_write, &bev->timeout_write);  | 
1018  | 0  |   else  | 
1019  | 0  |     r2 = event_del(&bev->ev_write);  | 
1020  | 0  |   if (r1 < 0 || r2 < 0)  | 
1021  | 0  |     return -1;  | 
1022  | 0  |   return 0;  | 
1023  | 0  | }  | 
1024  |  |  | 
1025  |  | int  | 
1026  |  | bufferevent_generic_adj_existing_timeouts_(struct bufferevent *bev)  | 
1027  | 0  | { | 
1028  | 0  |   int r = 0;  | 
1029  | 0  |   if (event_pending(&bev->ev_read, EV_READ, NULL)) { | 
1030  | 0  |     if (evutil_timerisset(&bev->timeout_read)) { | 
1031  | 0  |           if (bufferevent_add_event_(&bev->ev_read, &bev->timeout_read) < 0)  | 
1032  | 0  |             r = -1;  | 
1033  | 0  |     } else { | 
1034  | 0  |       event_remove_timer(&bev->ev_read);  | 
1035  | 0  |     }  | 
1036  | 0  |   }  | 
1037  | 0  |   if (event_pending(&bev->ev_write, EV_WRITE, NULL)) { | 
1038  | 0  |     if (evutil_timerisset(&bev->timeout_write)) { | 
1039  | 0  |       if (bufferevent_add_event_(&bev->ev_write, &bev->timeout_write) < 0)  | 
1040  | 0  |         r = -1;  | 
1041  | 0  |     } else { | 
1042  | 0  |       event_remove_timer(&bev->ev_write);  | 
1043  | 0  |     }  | 
1044  | 0  |   }  | 
1045  | 0  |   return r;  | 
1046  | 0  | }  | 
1047  |  |  | 
1048  |  | int  | 
1049  |  | bufferevent_add_event_(struct event *ev, const struct timeval *tv)  | 
1050  | 0  | { | 
1051  | 0  |   if (!evutil_timerisset(tv))  | 
1052  | 0  |     return event_add(ev, NULL);  | 
1053  | 0  |   else  | 
1054  | 0  |     return event_add(ev, tv);  | 
1055  | 0  | }  | 
1056  |  |  | 
1057  |  | /* For use by user programs only; internally, we should be calling  | 
1058  |  |    either bufferevent_incref_and_lock_(), or BEV_LOCK. */  | 
1059  |  | void  | 
1060  |  | bufferevent_lock(struct bufferevent *bev)  | 
1061  | 0  | { | 
1062  | 0  |   bufferevent_incref_and_lock_(bev);  | 
1063  | 0  | }  | 
1064  |  |  | 
1065  |  | void  | 
1066  |  | bufferevent_unlock(struct bufferevent *bev)  | 
1067  | 0  | { | 
1068  | 0  |   bufferevent_decref_and_unlock_(bev);  | 
1069  | 0  | }  |