Line  | Count  | Source  | 
1  |  | /*  | 
2  |  |  * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>  | 
3  |  |  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson  | 
4  |  |  *  | 
5  |  |  * Redistribution and use in source and binary forms, with or without  | 
6  |  |  * modification, are permitted provided that the following conditions  | 
7  |  |  * are met:  | 
8  |  |  * 1. Redistributions of source code must retain the above copyright  | 
9  |  |  *    notice, this list of conditions and the following disclaimer.  | 
10  |  |  * 2. Redistributions in binary form must reproduce the above copyright  | 
11  |  |  *    notice, this list of conditions and the following disclaimer in the  | 
12  |  |  *    documentation and/or other materials provided with the distribution.  | 
13  |  |  * 3. The name of the author may not be used to endorse or promote products  | 
14  |  |  *    derived from this software without specific prior written permission.  | 
15  |  |  *  | 
16  |  |  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR  | 
17  |  |  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES  | 
18  |  |  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  | 
19  |  |  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,  | 
20  |  |  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT  | 
21  |  |  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,  | 
22  |  |  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY  | 
23  |  |  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT  | 
24  |  |  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF  | 
25  |  |  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.  | 
26  |  |  */  | 
27  |  |  | 
28  |  | #include "event2/event-config.h"  | 
29  |  | #include "evconfig-private.h"  | 
30  |  |  | 
31  |  | #ifdef _WIN32  | 
32  |  | #include <winsock2.h>  | 
33  |  | #include <windows.h>  | 
34  |  | #include <io.h>  | 
35  |  | #endif  | 
36  |  |  | 
37  |  | #include <sys/types.h>  | 
38  |  |  | 
39  |  | #ifdef EVENT__HAVE_SYS_TIME_H  | 
40  |  | #include <sys/time.h>  | 
41  |  | #endif  | 
42  |  |  | 
43  |  | #ifdef EVENT__HAVE_SYS_SOCKET_H  | 
44  |  | #include <sys/socket.h>  | 
45  |  | #endif  | 
46  |  |  | 
47  |  | #ifdef EVENT__HAVE_SYS_UIO_H  | 
48  |  | #include <sys/uio.h>  | 
49  |  | #endif  | 
50  |  |  | 
51  |  | #ifdef EVENT__HAVE_SYS_IOCTL_H  | 
52  |  | #include <sys/ioctl.h>  | 
53  |  | #endif  | 
54  |  |  | 
55  |  | #ifdef EVENT__HAVE_SYS_MMAN_H  | 
56  |  | #include <sys/mman.h>  | 
57  |  | #endif  | 
58  |  |  | 
59  |  | #ifdef EVENT__HAVE_SYS_SENDFILE_H  | 
60  |  | #include <sys/sendfile.h>  | 
61  |  | #endif  | 
62  |  | #ifdef EVENT__HAVE_SYS_STAT_H  | 
63  |  | #include <sys/stat.h>  | 
64  |  | #endif  | 
65  |  |  | 
66  |  |  | 
67  |  | #include <errno.h>  | 
68  |  | #include <stdio.h>  | 
69  |  | #include <stdlib.h>  | 
70  |  | #include <string.h>  | 
71  |  | #ifdef EVENT__HAVE_STDARG_H  | 
72  |  | #include <stdarg.h>  | 
73  |  | #endif  | 
74  |  | #ifdef EVENT__HAVE_UNISTD_H  | 
75  |  | #include <unistd.h>  | 
76  |  | #endif  | 
77  |  | #include <limits.h>  | 
78  |  |  | 
79  |  | #include "event2/event.h"  | 
80  |  | #include "event2/buffer.h"  | 
81  |  | #include "event2/buffer_compat.h"  | 
82  |  | #include "event2/bufferevent.h"  | 
83  |  | #include "event2/bufferevent_compat.h"  | 
84  |  | #include "event2/bufferevent_struct.h"  | 
85  |  | #include "event2/thread.h"  | 
86  |  | #include "log-internal.h"  | 
87  |  | #include "mm-internal.h"  | 
88  |  | #include "util-internal.h"  | 
89  |  | #include "evthread-internal.h"  | 
90  |  | #include "evbuffer-internal.h"  | 
91  |  | #include "bufferevent-internal.h"  | 
92  |  | #include "event-internal.h"  | 
93  |  |  | 
94  |  | /* some systems do not have MAP_FAILED */  | 
95  |  | #ifndef MAP_FAILED  | 
96  |  | #define MAP_FAILED  ((void *)-1)  | 
97  |  | #endif  | 
98  |  |  | 
99  |  | /* send file support */  | 
100  |  | #if defined(EVENT__HAVE_SYS_SENDFILE_H) && defined(EVENT__HAVE_SENDFILE) && defined(__linux__)  | 
101  |  | #define USE_SENDFILE    1  | 
102  |  | #define SENDFILE_IS_LINUX 1  | 
103  |  | #elif defined(EVENT__HAVE_SENDFILE) && defined(__FreeBSD__)  | 
104  |  | #define USE_SENDFILE    1  | 
105  |  | #define SENDFILE_IS_FREEBSD 1  | 
106  |  | #elif defined(EVENT__HAVE_SENDFILE) && defined(__APPLE__)  | 
107  |  | #define USE_SENDFILE    1  | 
108  |  | #define SENDFILE_IS_MACOSX  1  | 
109  |  | #elif defined(EVENT__HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__)  | 
110  |  | #define USE_SENDFILE    1  | 
111  |  | #define SENDFILE_IS_SOLARIS 1  | 
112  |  | #endif  | 
113  |  |  | 
114  |  | /* Mask of user-selectable callback flags. */  | 
115  |  | #define EVBUFFER_CB_USER_FLAGS      0xffff  | 
116  |  | /* Mask of all internal-use-only flags. */  | 
117  | 0  | #define EVBUFFER_CB_INTERNAL_FLAGS  0xffff0000  | 
118  |  |  | 
119  |  | /* Flag set if the callback is using the cb_obsolete function pointer  */  | 
120  | 0  | #define EVBUFFER_CB_OBSOLETE         0x00040000  | 
121  |  |  | 
122  |  | /* evbuffer_chain support */  | 
123  | 0  | #define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off)  | 
124  | 0  | #define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \  | 
125  | 0  |       0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off))  | 
126  |  |  | 
127  | 0  | #define CHAIN_PINNED(ch)  (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0)  | 
128  | 0  | #define CHAIN_PINNED_R(ch)  (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0)  | 
129  |  |  | 
130  |  | /* evbuffer_ptr support */  | 
131  | 0  | #define PTR_NOT_FOUND(ptr) do {     \ | 
132  | 0  |   (ptr)->pos = -1;          \  | 
133  | 0  |   (ptr)->internal_.chain = NULL;    \  | 
134  | 0  |   (ptr)->internal_.pos_in_chain = 0;  \  | 
135  | 0  | } while (0)  | 
136  |  |  | 
137  | 0  | #define EVBUFFER_MAX_READ_DEFAULT 4096  | 
138  |  |  | 
139  |  | static void evbuffer_chain_align(struct evbuffer_chain *chain);  | 
140  |  | static int evbuffer_chain_should_realign(struct evbuffer_chain *chain,  | 
141  |  |     size_t datalen);  | 
142  |  | static void evbuffer_deferred_callback(struct event_callback *cb, void *arg);  | 
143  |  | static int evbuffer_ptr_memcmp(const struct evbuffer *buf,  | 
144  |  |     const struct evbuffer_ptr *pos, const char *mem, size_t len);  | 
145  |  | static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf,  | 
146  |  |     size_t datlen);  | 
147  |  | static int evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos,  | 
148  |  |     size_t howfar);  | 
149  |  | static int evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg);  | 
150  |  | static inline void evbuffer_chain_incref(struct evbuffer_chain *chain);  | 
151  |  |  | 
152  |  | static struct evbuffer_chain *  | 
153  |  | evbuffer_chain_new(size_t size)  | 
154  | 0  | { | 
155  | 0  |   struct evbuffer_chain *chain;  | 
156  | 0  |   size_t to_alloc;  | 
157  |  | 
  | 
158  | 0  |   if (size > EVBUFFER_CHAIN_MAX - EVBUFFER_CHAIN_SIZE)  | 
159  | 0  |     return (NULL);  | 
160  |  |  | 
161  | 0  |   to_alloc = size + EVBUFFER_CHAIN_SIZE;  | 
162  |  |  | 
163  |  |   /* we get everything in one chunk */  | 
164  | 0  |   if ((chain = mm_malloc(to_alloc)) == NULL)  | 
165  | 0  |     return (NULL);  | 
166  |  |  | 
167  | 0  |   memset(chain, 0, EVBUFFER_CHAIN_SIZE);  | 
168  |  | 
  | 
169  | 0  |   chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE;  | 
170  |  |  | 
171  |  |   /* this way we can manipulate the buffer to different addresses,  | 
172  |  |    * which is required for mmap for example.  | 
173  |  |    */  | 
174  | 0  |   chain->buffer = EVBUFFER_CHAIN_EXTRA(unsigned char, chain);  | 
175  |  | 
  | 
176  | 0  |   chain->refcnt = 1;  | 
177  |  | 
  | 
178  | 0  |   return (chain);  | 
179  | 0  | }  | 
180  |  |  | 
181  |  | static struct evbuffer_chain *  | 
182  |  | evbuffer_chain_new_membuf(size_t size)  | 
183  | 0  | { | 
184  | 0  |   size_t to_alloc;  | 
185  |  | 
  | 
186  | 0  |   if (size > EVBUFFER_CHAIN_MAX - EVBUFFER_CHAIN_SIZE)  | 
187  | 0  |     return (NULL);  | 
188  |  |  | 
189  | 0  |   size += EVBUFFER_CHAIN_SIZE;  | 
190  |  |  | 
191  |  |   /* get the next largest memory that can hold the buffer */  | 
192  | 0  |   if (size < EVBUFFER_CHAIN_MAX / 2) { | 
193  | 0  |     to_alloc = MIN_BUFFER_SIZE;  | 
194  | 0  |     while (to_alloc < size) { | 
195  | 0  |       to_alloc <<= 1;  | 
196  | 0  |     }  | 
197  | 0  |   } else { | 
198  | 0  |     to_alloc = size;  | 
199  | 0  |   }  | 
200  |  | 
  | 
201  | 0  |   return evbuffer_chain_new(to_alloc - EVBUFFER_CHAIN_SIZE);  | 
202  | 0  | }  | 
203  |  |  | 
204  |  | static inline void  | 
205  |  | evbuffer_chain_free(struct evbuffer_chain *chain)  | 
206  | 0  | { | 
207  | 0  |   EVUTIL_ASSERT(chain->refcnt > 0);  | 
208  | 0  |   if (--chain->refcnt > 0) { | 
209  |  |     /* chain is still referenced by other chains */  | 
210  | 0  |     return;  | 
211  | 0  |   }  | 
212  |  |  | 
213  | 0  |   if (CHAIN_PINNED(chain)) { | 
214  |  |     /* will get freed once no longer dangling */  | 
215  | 0  |     chain->refcnt++;  | 
216  | 0  |     chain->flags |= EVBUFFER_DANGLING;  | 
217  | 0  |     return;  | 
218  | 0  |   }  | 
219  |  |  | 
220  |  |   /* safe to release chain, it's either a referencing  | 
221  |  |    * chain or all references to it have been freed */  | 
222  | 0  |   if (chain->flags & EVBUFFER_REFERENCE) { | 
223  | 0  |     struct evbuffer_chain_reference *info =  | 
224  | 0  |         EVBUFFER_CHAIN_EXTRA(  | 
225  | 0  |           struct evbuffer_chain_reference,  | 
226  | 0  |           chain);  | 
227  | 0  |     if (info->cleanupfn)  | 
228  | 0  |       (*info->cleanupfn)(chain->buffer,  | 
229  | 0  |           chain->buffer_len,  | 
230  | 0  |           info->extra);  | 
231  | 0  |   }  | 
232  | 0  |   if (chain->flags & EVBUFFER_FILESEGMENT) { | 
233  | 0  |     struct evbuffer_chain_file_segment *info =  | 
234  | 0  |         EVBUFFER_CHAIN_EXTRA(  | 
235  | 0  |           struct evbuffer_chain_file_segment,  | 
236  | 0  |           chain);  | 
237  | 0  |     if (info->segment) { | 
238  |  | #ifdef _WIN32  | 
239  |  |       if (info->segment->is_mapping)  | 
240  |  |         UnmapViewOfFile(chain->buffer);  | 
241  |  | #endif  | 
242  | 0  |       evbuffer_file_segment_free(info->segment);  | 
243  | 0  |     }  | 
244  | 0  |   }  | 
245  | 0  |   if (chain->flags & EVBUFFER_MULTICAST) { | 
246  | 0  |     struct evbuffer_multicast_parent *info =  | 
247  | 0  |         EVBUFFER_CHAIN_EXTRA(  | 
248  | 0  |           struct evbuffer_multicast_parent,  | 
249  | 0  |           chain);  | 
250  |  |     /* referencing chain is being freed, decrease  | 
251  |  |      * refcounts of source chain and associated  | 
252  |  |      * evbuffer (which get freed once both reach  | 
253  |  |      * zero) */  | 
254  | 0  |     EVUTIL_ASSERT(info->source != NULL);  | 
255  | 0  |     EVUTIL_ASSERT(info->parent != NULL);  | 
256  | 0  |     EVBUFFER_LOCK(info->source);  | 
257  | 0  |     evbuffer_chain_free(info->parent);  | 
258  | 0  |     evbuffer_decref_and_unlock_(info->source);  | 
259  | 0  |   }  | 
260  |  | 
  | 
261  | 0  |   mm_free(chain);  | 
262  | 0  | }  | 
263  |  |  | 
264  |  | static void  | 
265  |  | evbuffer_free_all_chains(struct evbuffer_chain *chain)  | 
266  | 0  | { | 
267  | 0  |   struct evbuffer_chain *next;  | 
268  | 0  |   for (; chain; chain = next) { | 
269  | 0  |     next = chain->next;  | 
270  | 0  |     evbuffer_chain_free(chain);  | 
271  | 0  |   }  | 
272  | 0  | }  | 
273  |  |  | 
274  |  | #ifndef NDEBUG  | 
275  |  | static int  | 
276  |  | evbuffer_chains_all_empty(struct evbuffer_chain *chain)  | 
277  |  | { | 
278  |  |   for (; chain; chain = chain->next) { | 
279  |  |     if (chain->off)  | 
280  |  |       return 0;  | 
281  |  |   }  | 
282  |  |   return 1;  | 
283  |  | }  | 
284  |  | #else  | 
285  |  | /* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid  | 
286  |  | "unused variable" warnings. */  | 
287  | 0  | static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) { | 
288  | 0  |   return 1;  | 
289  | 0  | }  | 
290  |  | #endif  | 
291  |  |  | 
292  |  | /* Free all trailing chains in 'buf' that are neither pinned nor empty, prior  | 
293  |  |  * to replacing them all with a new chain.  Return a pointer to the place  | 
294  |  |  * where the new chain will go.  | 
295  |  |  *  | 
296  |  |  * Internal; requires lock.  The caller must fix up buf->last and buf->first  | 
297  |  |  * as needed; they might have been freed.  | 
298  |  |  */  | 
299  |  | static struct evbuffer_chain **  | 
300  |  | evbuffer_free_trailing_empty_chains(struct evbuffer *buf)  | 
301  | 0  | { | 
302  | 0  |   struct evbuffer_chain **ch = buf->last_with_datap;  | 
303  |  |   /* Find the first victim chain.  It might be *last_with_datap */  | 
304  | 0  |   while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch)))  | 
305  | 0  |     ch = &(*ch)->next;  | 
306  | 0  |   if (*ch) { | 
307  | 0  |     EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch));  | 
308  | 0  |     evbuffer_free_all_chains(*ch);  | 
309  | 0  |     *ch = NULL;  | 
310  | 0  |   }  | 
311  | 0  |   return ch;  | 
312  | 0  | }  | 
313  |  |  | 
314  |  | /* Add a single chain 'chain' to the end of 'buf', freeing trailing empty  | 
315  |  |  * chains as necessary.  Requires lock.  Does not schedule callbacks.  | 
316  |  |  */  | 
317  |  | static void  | 
318  |  | evbuffer_chain_insert(struct evbuffer *buf,  | 
319  |  |     struct evbuffer_chain *chain)  | 
320  | 0  | { | 
321  | 0  |   ASSERT_EVBUFFER_LOCKED(buf);  | 
322  | 0  |   if (*buf->last_with_datap == NULL) { | 
323  |  |     /* There are no chains data on the buffer at all. */  | 
324  | 0  |     EVUTIL_ASSERT(buf->last_with_datap == &buf->first);  | 
325  | 0  |     EVUTIL_ASSERT(buf->first == NULL);  | 
326  | 0  |     buf->first = buf->last = chain;  | 
327  | 0  |   } else { | 
328  | 0  |     struct evbuffer_chain **chp;  | 
329  | 0  |     chp = evbuffer_free_trailing_empty_chains(buf);  | 
330  | 0  |     *chp = chain;  | 
331  | 0  |     if (chain->off)  | 
332  | 0  |       buf->last_with_datap = chp;  | 
333  | 0  |     buf->last = chain;  | 
334  | 0  |   }  | 
335  | 0  |   buf->total_len += chain->off;  | 
336  | 0  | }  | 
337  |  |  | 
338  |  | static inline struct evbuffer_chain *  | 
339  |  | evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen)  | 
340  | 0  | { | 
341  | 0  |   struct evbuffer_chain *chain;  | 
342  | 0  |   if ((chain = evbuffer_chain_new_membuf(datlen)) == NULL)  | 
343  | 0  |     return NULL;  | 
344  | 0  |   evbuffer_chain_insert(buf, chain);  | 
345  | 0  |   return chain;  | 
346  | 0  | }  | 
347  |  |  | 
348  |  | void  | 
349  |  | evbuffer_chain_pin_(struct evbuffer_chain *chain, unsigned flag)  | 
350  | 0  | { | 
351  | 0  |   EVUTIL_ASSERT((chain->flags & flag) == 0);  | 
352  | 0  |   chain->flags |= flag;  | 
353  | 0  | }  | 
354  |  |  | 
355  |  | void  | 
356  |  | evbuffer_chain_unpin_(struct evbuffer_chain *chain, unsigned flag)  | 
357  | 0  | { | 
358  | 0  |   EVUTIL_ASSERT((chain->flags & flag) != 0);  | 
359  | 0  |   chain->flags &= ~flag;  | 
360  | 0  |   if (chain->flags & EVBUFFER_DANGLING)  | 
361  | 0  |     evbuffer_chain_free(chain);  | 
362  | 0  | }  | 
363  |  |  | 
364  |  | static inline void  | 
365  |  | evbuffer_chain_incref(struct evbuffer_chain *chain)  | 
366  | 0  | { | 
367  | 0  |     ++chain->refcnt;  | 
368  | 0  | }  | 
369  |  |  | 
370  |  | struct evbuffer *  | 
371  |  | evbuffer_new(void)  | 
372  | 0  | { | 
373  | 0  |   struct evbuffer *buffer;  | 
374  |  | 
  | 
375  | 0  |   buffer = mm_calloc(1, sizeof(struct evbuffer));  | 
376  | 0  |   if (buffer == NULL)  | 
377  | 0  |     return (NULL);  | 
378  |  |  | 
379  | 0  |   LIST_INIT(&buffer->callbacks);  | 
380  | 0  |   buffer->refcnt = 1;  | 
381  | 0  |   buffer->last_with_datap = &buffer->first;  | 
382  | 0  |   buffer->max_read = EVBUFFER_MAX_READ_DEFAULT;  | 
383  |  | 
  | 
384  | 0  |   return (buffer);  | 
385  | 0  | }  | 
386  |  |  | 
387  |  | int  | 
388  |  | evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags)  | 
389  | 0  | { | 
390  | 0  |   EVBUFFER_LOCK(buf);  | 
391  | 0  |   buf->flags |= (ev_uint32_t)flags;  | 
392  | 0  |   EVBUFFER_UNLOCK(buf);  | 
393  | 0  |   return 0;  | 
394  | 0  | }  | 
395  |  |  | 
396  |  | int  | 
397  |  | evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags)  | 
398  | 0  | { | 
399  | 0  |   EVBUFFER_LOCK(buf);  | 
400  | 0  |   buf->flags &= ~(ev_uint32_t)flags;  | 
401  | 0  |   EVBUFFER_UNLOCK(buf);  | 
402  | 0  |   return 0;  | 
403  | 0  | }  | 
404  |  |  | 
405  |  | void  | 
406  |  | evbuffer_incref_(struct evbuffer *buf)  | 
407  | 0  | { | 
408  | 0  |   EVBUFFER_LOCK(buf);  | 
409  | 0  |   ++buf->refcnt;  | 
410  | 0  |   EVBUFFER_UNLOCK(buf);  | 
411  | 0  | }  | 
412  |  |  | 
413  |  | void  | 
414  |  | evbuffer_incref_and_lock_(struct evbuffer *buf)  | 
415  | 0  | { | 
416  | 0  |   EVBUFFER_LOCK(buf);  | 
417  | 0  |   ++buf->refcnt;  | 
418  | 0  | }  | 
419  |  |  | 
420  |  | int  | 
421  |  | evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base)  | 
422  | 0  | { | 
423  | 0  |   EVBUFFER_LOCK(buffer);  | 
424  | 0  |   buffer->cb_queue = base;  | 
425  | 0  |   buffer->deferred_cbs = 1;  | 
426  | 0  |   event_deferred_cb_init_(&buffer->deferred,  | 
427  | 0  |       event_base_get_npriorities(base) / 2,  | 
428  | 0  |       evbuffer_deferred_callback, buffer);  | 
429  | 0  |   EVBUFFER_UNLOCK(buffer);  | 
430  | 0  |   return 0;  | 
431  | 0  | }  | 
432  |  |  | 
433  |  | int  | 
434  |  | evbuffer_enable_locking(struct evbuffer *buf, void *lock)  | 
435  | 0  | { | 
436  |  | #ifdef EVENT__DISABLE_THREAD_SUPPORT  | 
437  |  |   return -1;  | 
438  |  | #else  | 
439  | 0  |   if (buf->lock)  | 
440  | 0  |     return -1;  | 
441  |  |  | 
442  | 0  |   if (!lock) { | 
443  | 0  |     EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE);  | 
444  | 0  |     if (!lock)  | 
445  | 0  |       return -1;  | 
446  | 0  |     buf->lock = lock;  | 
447  | 0  |     buf->own_lock = 1;  | 
448  | 0  |   } else { | 
449  | 0  |     buf->lock = lock;  | 
450  | 0  |     buf->own_lock = 0;  | 
451  | 0  |   }  | 
452  |  |  | 
453  | 0  |   return 0;  | 
454  | 0  | #endif  | 
455  | 0  | }  | 
456  |  |  | 
457  |  | void  | 
458  |  | evbuffer_set_parent_(struct evbuffer *buf, struct bufferevent *bev)  | 
459  | 0  | { | 
460  | 0  |   EVBUFFER_LOCK(buf);  | 
461  | 0  |   buf->parent = bev;  | 
462  | 0  |   EVBUFFER_UNLOCK(buf);  | 
463  | 0  | }  | 
464  |  |  | 
465  |  | static void  | 
466  |  | evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred)  | 
467  | 0  | { | 
468  | 0  |   struct evbuffer_cb_entry *cbent, *next;  | 
469  | 0  |   struct evbuffer_cb_info info;  | 
470  | 0  |   size_t new_size;  | 
471  | 0  |   ev_uint32_t mask, masked_val;  | 
472  | 0  |   int clear = 1;  | 
473  |  | 
  | 
474  | 0  |   if (running_deferred) { | 
475  | 0  |     mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;  | 
476  | 0  |     masked_val = EVBUFFER_CB_ENABLED;  | 
477  | 0  |   } else if (buffer->deferred_cbs) { | 
478  | 0  |     mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;  | 
479  | 0  |     masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;  | 
480  |  |     /* Don't zero-out n_add/n_del, since the deferred callbacks  | 
481  |  |        will want to see them. */  | 
482  | 0  |     clear = 0;  | 
483  | 0  |   } else { | 
484  | 0  |     mask = EVBUFFER_CB_ENABLED;  | 
485  | 0  |     masked_val = EVBUFFER_CB_ENABLED;  | 
486  | 0  |   }  | 
487  |  | 
  | 
488  | 0  |   ASSERT_EVBUFFER_LOCKED(buffer);  | 
489  |  | 
  | 
490  | 0  |   if (LIST_EMPTY(&buffer->callbacks)) { | 
491  | 0  |     buffer->n_add_for_cb = buffer->n_del_for_cb = 0;  | 
492  | 0  |     return;  | 
493  | 0  |   }  | 
494  | 0  |   if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0)  | 
495  | 0  |     return;  | 
496  |  |  | 
497  | 0  |   new_size = buffer->total_len;  | 
498  | 0  |   info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb;  | 
499  | 0  |   info.n_added = buffer->n_add_for_cb;  | 
500  | 0  |   info.n_deleted = buffer->n_del_for_cb;  | 
501  | 0  |   if (clear) { | 
502  | 0  |     buffer->n_add_for_cb = 0;  | 
503  | 0  |     buffer->n_del_for_cb = 0;  | 
504  | 0  |   }  | 
505  | 0  |   for (cbent = LIST_FIRST(&buffer->callbacks);  | 
506  | 0  |        cbent != LIST_END(&buffer->callbacks);  | 
507  | 0  |        cbent = next) { | 
508  |  |     /* Get the 'next' pointer now in case this callback decides  | 
509  |  |      * to remove itself or something. */  | 
510  | 0  |     next = LIST_NEXT(cbent, next);  | 
511  |  | 
  | 
512  | 0  |     if ((cbent->flags & mask) != masked_val)  | 
513  | 0  |       continue;  | 
514  |  |  | 
515  | 0  |     if ((cbent->flags & EVBUFFER_CB_OBSOLETE))  | 
516  | 0  |       cbent->cb.cb_obsolete(buffer,  | 
517  | 0  |           info.orig_size, new_size, cbent->cbarg);  | 
518  | 0  |     else  | 
519  | 0  |       cbent->cb.cb_func(buffer, &info, cbent->cbarg);  | 
520  | 0  |   }  | 
521  | 0  | }  | 
522  |  |  | 
523  |  | void  | 
524  |  | evbuffer_invoke_callbacks_(struct evbuffer *buffer)  | 
525  | 0  | { | 
526  | 0  |   if (LIST_EMPTY(&buffer->callbacks)) { | 
527  | 0  |     buffer->n_add_for_cb = buffer->n_del_for_cb = 0;  | 
528  | 0  |     return;  | 
529  | 0  |   }  | 
530  |  |  | 
531  | 0  |   if (buffer->deferred_cbs) { | 
532  | 0  |     if (event_deferred_cb_schedule_(buffer->cb_queue, &buffer->deferred)) { | 
533  | 0  |       evbuffer_incref_and_lock_(buffer);  | 
534  | 0  |       if (buffer->parent)  | 
535  | 0  |         bufferevent_incref_(buffer->parent);  | 
536  | 0  |       EVBUFFER_UNLOCK(buffer);  | 
537  | 0  |     }  | 
538  | 0  |   }  | 
539  |  | 
  | 
540  | 0  |   evbuffer_run_callbacks(buffer, 0);  | 
541  | 0  | }  | 
542  |  |  | 
543  |  | static void  | 
544  |  | evbuffer_deferred_callback(struct event_callback *cb, void *arg)  | 
545  | 0  | { | 
546  | 0  |   struct bufferevent *parent = NULL;  | 
547  | 0  |   struct evbuffer *buffer = arg;  | 
548  |  |  | 
549  |  |   /* XXXX It would be better to run these callbacks without holding the  | 
550  |  |    * lock */  | 
551  | 0  |   EVBUFFER_LOCK(buffer);  | 
552  | 0  |   parent = buffer->parent;  | 
553  | 0  |   evbuffer_run_callbacks(buffer, 1);  | 
554  | 0  |   evbuffer_decref_and_unlock_(buffer);  | 
555  | 0  |   if (parent)  | 
556  | 0  |     bufferevent_decref_(parent);  | 
557  | 0  | }  | 
558  |  |  | 
559  |  | static void  | 
560  |  | evbuffer_remove_all_callbacks(struct evbuffer *buffer)  | 
561  | 0  | { | 
562  | 0  |   struct evbuffer_cb_entry *cbent;  | 
563  |  | 
  | 
564  | 0  |   while ((cbent = LIST_FIRST(&buffer->callbacks))) { | 
565  | 0  |     LIST_REMOVE(cbent, next);  | 
566  | 0  |     mm_free(cbent);  | 
567  | 0  |   }  | 
568  | 0  | }  | 
569  |  |  | 
570  |  | void  | 
571  |  | evbuffer_decref_and_unlock_(struct evbuffer *buffer)  | 
572  | 0  | { | 
573  | 0  |   struct evbuffer_chain *chain, *next;  | 
574  | 0  |   ASSERT_EVBUFFER_LOCKED(buffer);  | 
575  |  | 
  | 
576  | 0  |   EVUTIL_ASSERT(buffer->refcnt > 0);  | 
577  |  | 
  | 
578  | 0  |   if (--buffer->refcnt > 0) { | 
579  | 0  |     EVBUFFER_UNLOCK(buffer);  | 
580  | 0  |     return;  | 
581  | 0  |   }  | 
582  |  |  | 
583  | 0  |   for (chain = buffer->first; chain != NULL; chain = next) { | 
584  | 0  |     next = chain->next;  | 
585  | 0  |     evbuffer_chain_free(chain);  | 
586  | 0  |   }  | 
587  | 0  |   evbuffer_remove_all_callbacks(buffer);  | 
588  | 0  |   if (buffer->deferred_cbs)  | 
589  | 0  |     event_deferred_cb_cancel_(buffer->cb_queue, &buffer->deferred);  | 
590  |  | 
  | 
591  | 0  |   EVBUFFER_UNLOCK(buffer);  | 
592  | 0  |   if (buffer->own_lock)  | 
593  | 0  |     EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE);  | 
594  | 0  |   mm_free(buffer);  | 
595  | 0  | }  | 
596  |  |  | 
597  |  | void  | 
598  |  | evbuffer_free(struct evbuffer *buffer)  | 
599  | 0  | { | 
600  | 0  |   EVBUFFER_LOCK(buffer);  | 
601  | 0  |   evbuffer_decref_and_unlock_(buffer);  | 
602  | 0  | }  | 
603  |  |  | 
604  |  | int evbuffer_set_max_read(struct evbuffer *buf, size_t max)  | 
605  | 0  | { | 
606  | 0  |   if (max > INT_MAX) { | 
607  | 0  |     return -1;  | 
608  | 0  |   }  | 
609  |  |  | 
610  | 0  |   EVBUFFER_LOCK(buf);  | 
611  | 0  |   buf->max_read = max;  | 
612  | 0  |   EVBUFFER_UNLOCK(buf);  | 
613  | 0  |   return 0;  | 
614  | 0  | }  | 
615  |  | size_t evbuffer_get_max_read(struct evbuffer *buf)  | 
616  | 0  | { | 
617  | 0  |   size_t result;  | 
618  | 0  |   EVBUFFER_LOCK(buf);  | 
619  | 0  |   result = buf->max_read;  | 
620  | 0  |   EVBUFFER_UNLOCK(buf);  | 
621  | 0  |   return result;  | 
622  | 0  | }  | 
623  |  |  | 
624  |  | void  | 
625  |  | evbuffer_lock(struct evbuffer *buf)  | 
626  | 0  | { | 
627  | 0  |   EVBUFFER_LOCK(buf);  | 
628  | 0  | }  | 
629  |  |  | 
630  |  | void  | 
631  |  | evbuffer_unlock(struct evbuffer *buf)  | 
632  | 0  | { | 
633  | 0  |   EVBUFFER_UNLOCK(buf);  | 
634  | 0  | }  | 
635  |  |  | 
636  |  | size_t  | 
637  |  | evbuffer_get_length(const struct evbuffer *buffer)  | 
638  | 0  | { | 
639  | 0  |   size_t result;  | 
640  | 0  |   EVBUFFER_LOCK(buffer);  | 
641  | 0  |   result = buffer->total_len;  | 
642  | 0  |   EVBUFFER_UNLOCK(buffer);  | 
643  | 0  |   return result;  | 
644  | 0  | }  | 
645  |  |  | 
646  |  | size_t  | 
647  |  | evbuffer_get_contiguous_space(const struct evbuffer *buf)  | 
648  | 0  | { | 
649  | 0  |   struct evbuffer_chain *chain;  | 
650  | 0  |   size_t result;  | 
651  |  | 
  | 
652  | 0  |   EVBUFFER_LOCK(buf);  | 
653  | 0  |   chain = buf->first;  | 
654  | 0  |   result = (chain != NULL ? chain->off : 0);  | 
655  | 0  |   EVBUFFER_UNLOCK(buf);  | 
656  |  | 
  | 
657  | 0  |   return result;  | 
658  | 0  | }  | 
659  |  |  | 
660  |  | size_t  | 
661  | 0  | evbuffer_add_iovec(struct evbuffer * buf, struct evbuffer_iovec * vec, int n_vec) { | 
662  | 0  |   int n;  | 
663  | 0  |   size_t res;  | 
664  | 0  |   size_t to_alloc;  | 
665  |  | 
  | 
666  | 0  |   EVBUFFER_LOCK(buf);  | 
667  |  | 
  | 
668  | 0  |   res = to_alloc = 0;  | 
669  |  | 
  | 
670  | 0  |   for (n = 0; n < n_vec; n++) { | 
671  | 0  |     to_alloc += vec[n].iov_len;  | 
672  | 0  |   }  | 
673  |  | 
  | 
674  | 0  |   if (evbuffer_expand_fast_(buf, to_alloc, 2) < 0) { | 
675  | 0  |     goto done;  | 
676  | 0  |   }  | 
677  |  |  | 
678  | 0  |   for (n = 0; n < n_vec; n++) { | 
679  |  |     /* XXX each 'add' call here does a bunch of setup that's  | 
680  |  |      * obviated by evbuffer_expand_fast_, and some cleanup that we  | 
681  |  |      * would like to do only once.  Instead we should just extract  | 
682  |  |      * the part of the code that's needed. */  | 
683  |  | 
  | 
684  | 0  |     if (evbuffer_add(buf, vec[n].iov_base, vec[n].iov_len) < 0) { | 
685  | 0  |       goto done;  | 
686  | 0  |     }  | 
687  |  |  | 
688  | 0  |     res += vec[n].iov_len;  | 
689  | 0  |   }  | 
690  |  |  | 
691  | 0  | done:  | 
692  | 0  |     EVBUFFER_UNLOCK(buf);  | 
693  | 0  |     return res;  | 
694  | 0  | }  | 
695  |  |  | 
696  |  | int  | 
697  |  | evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size,  | 
698  |  |     struct evbuffer_iovec *vec, int n_vecs)  | 
699  | 0  | { | 
700  | 0  |   struct evbuffer_chain *chain, **chainp;  | 
701  | 0  |   int n = -1;  | 
702  |  | 
  | 
703  | 0  |   EVBUFFER_LOCK(buf);  | 
704  | 0  |   if (buf->freeze_end)  | 
705  | 0  |     goto done;  | 
706  | 0  |   if (n_vecs < 1)  | 
707  | 0  |     goto done;  | 
708  | 0  |   if (n_vecs == 1) { | 
709  | 0  |     if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL)  | 
710  | 0  |       goto done;  | 
711  |  |  | 
712  | 0  |     vec[0].iov_base = (void *)CHAIN_SPACE_PTR(chain);  | 
713  | 0  |     vec[0].iov_len = (size_t)CHAIN_SPACE_LEN(chain);  | 
714  | 0  |     EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size);  | 
715  | 0  |     n = 1;  | 
716  | 0  |   } else { | 
717  | 0  |     if (evbuffer_expand_fast_(buf, size, n_vecs)<0)  | 
718  | 0  |       goto done;  | 
719  | 0  |     n = evbuffer_read_setup_vecs_(buf, size, vec, n_vecs,  | 
720  | 0  |         &chainp, 0);  | 
721  | 0  |   }  | 
722  |  |  | 
723  | 0  | done:  | 
724  | 0  |   EVBUFFER_UNLOCK(buf);  | 
725  | 0  |   return n;  | 
726  |  | 
  | 
727  | 0  | }  | 
728  |  |  | 
729  |  | static int  | 
730  |  | advance_last_with_data(struct evbuffer *buf)  | 
731  | 0  | { | 
732  | 0  |   int n = 0;  | 
733  | 0  |   struct evbuffer_chain **chainp = buf->last_with_datap;  | 
734  |  | 
  | 
735  | 0  |   ASSERT_EVBUFFER_LOCKED(buf);  | 
736  |  | 
  | 
737  | 0  |   if (!*chainp)  | 
738  | 0  |     return 0;  | 
739  |  |  | 
740  | 0  |   while ((*chainp)->next) { | 
741  | 0  |     chainp = &(*chainp)->next;  | 
742  | 0  |     if ((*chainp)->off)  | 
743  | 0  |       buf->last_with_datap = chainp;  | 
744  | 0  |     ++n;  | 
745  | 0  |   }  | 
746  | 0  |   return n;  | 
747  | 0  | }  | 
748  |  |  | 
749  |  | int  | 
750  |  | evbuffer_commit_space(struct evbuffer *buf,  | 
751  |  |     struct evbuffer_iovec *vec, int n_vecs)  | 
752  | 0  | { | 
753  | 0  |   struct evbuffer_chain *chain, **firstchainp, **chainp;  | 
754  | 0  |   int result = -1;  | 
755  | 0  |   size_t added = 0;  | 
756  | 0  |   int i;  | 
757  |  | 
  | 
758  | 0  |   EVBUFFER_LOCK(buf);  | 
759  |  | 
  | 
760  | 0  |   if (buf->freeze_end)  | 
761  | 0  |     goto done;  | 
762  | 0  |   if (n_vecs == 0) { | 
763  | 0  |     result = 0;  | 
764  | 0  |     goto done;  | 
765  | 0  |   } else if (n_vecs == 1 &&  | 
766  | 0  |       (buf->last && vec[0].iov_base == (void *)CHAIN_SPACE_PTR(buf->last))) { | 
767  |  |     /* The user only got or used one chain; it might not  | 
768  |  |      * be the first one with space in it. */  | 
769  | 0  |     if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last))  | 
770  | 0  |       goto done;  | 
771  | 0  |     buf->last->off += vec[0].iov_len;  | 
772  | 0  |     added = vec[0].iov_len;  | 
773  | 0  |     if (added)  | 
774  | 0  |       advance_last_with_data(buf);  | 
775  | 0  |     goto okay;  | 
776  | 0  |   }  | 
777  |  |  | 
778  |  |   /* Advance 'firstchain' to the first chain with space in it. */  | 
779  | 0  |   firstchainp = buf->last_with_datap;  | 
780  | 0  |   if (!*firstchainp)  | 
781  | 0  |     goto done;  | 
782  | 0  |   if (CHAIN_SPACE_LEN(*firstchainp) == 0) { | 
783  | 0  |     firstchainp = &(*firstchainp)->next;  | 
784  | 0  |   }  | 
785  |  | 
  | 
786  | 0  |   chain = *firstchainp;  | 
787  |  |   /* pass 1: make sure that the pointers and lengths of vecs[] are in  | 
788  |  |    * bounds before we try to commit anything. */  | 
789  | 0  |   for (i=0; i<n_vecs; ++i) { | 
790  | 0  |     if (!chain)  | 
791  | 0  |       goto done;  | 
792  | 0  |     if (vec[i].iov_base != (void *)CHAIN_SPACE_PTR(chain) ||  | 
793  | 0  |         (size_t)vec[i].iov_len > CHAIN_SPACE_LEN(chain))  | 
794  | 0  |       goto done;  | 
795  | 0  |     chain = chain->next;  | 
796  | 0  |   }  | 
797  |  |   /* pass 2: actually adjust all the chains. */  | 
798  | 0  |   chainp = firstchainp;  | 
799  | 0  |   for (i=0; i<n_vecs; ++i) { | 
800  | 0  |     (*chainp)->off += vec[i].iov_len;  | 
801  | 0  |     added += vec[i].iov_len;  | 
802  | 0  |     if (vec[i].iov_len) { | 
803  | 0  |       buf->last_with_datap = chainp;  | 
804  | 0  |     }  | 
805  | 0  |     chainp = &(*chainp)->next;  | 
806  | 0  |   }  | 
807  |  | 
  | 
808  | 0  | okay:  | 
809  | 0  |   buf->total_len += added;  | 
810  | 0  |   buf->n_add_for_cb += added;  | 
811  | 0  |   result = 0;  | 
812  | 0  |   evbuffer_invoke_callbacks_(buf);  | 
813  |  | 
  | 
814  | 0  | done:  | 
815  | 0  |   EVBUFFER_UNLOCK(buf);  | 
816  | 0  |   return result;  | 
817  | 0  | }  | 
818  |  |  | 
819  |  | static inline int  | 
820  |  | HAS_PINNED_R(struct evbuffer *buf)  | 
821  | 0  | { | 
822  | 0  |   return (buf->last && CHAIN_PINNED_R(buf->last));  | 
823  | 0  | }  | 
824  |  |  | 
825  |  | static inline void  | 
826  |  | ZERO_CHAIN(struct evbuffer *dst)  | 
827  | 0  | { | 
828  | 0  |   ASSERT_EVBUFFER_LOCKED(dst);  | 
829  | 0  |   dst->first = NULL;  | 
830  | 0  |   dst->last = NULL;  | 
831  | 0  |   dst->last_with_datap = &(dst)->first;  | 
832  | 0  |   dst->total_len = 0;  | 
833  | 0  | }  | 
834  |  |  | 
835  |  | /* Prepares the contents of src to be moved to another buffer by removing  | 
836  |  |  * read-pinned chains. The first pinned chain is saved in first, and the  | 
837  |  |  * last in last. If src has no read-pinned chains, first and last are set  | 
838  |  |  * to NULL. */  | 
839  |  | static int  | 
840  |  | PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first,  | 
841  |  |     struct evbuffer_chain **last)  | 
842  | 0  | { | 
843  | 0  |   struct evbuffer_chain *chain, **pinned;  | 
844  |  | 
  | 
845  | 0  |   ASSERT_EVBUFFER_LOCKED(src);  | 
846  |  | 
  | 
847  | 0  |   if (!HAS_PINNED_R(src)) { | 
848  | 0  |     *first = *last = NULL;  | 
849  | 0  |     return 0;  | 
850  | 0  |   }  | 
851  |  |  | 
852  | 0  |   pinned = src->last_with_datap;  | 
853  | 0  |   if (!CHAIN_PINNED_R(*pinned))  | 
854  | 0  |     pinned = &(*pinned)->next;  | 
855  | 0  |   EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned));  | 
856  | 0  |   chain = *first = *pinned;  | 
857  | 0  |   *last = src->last;  | 
858  |  |  | 
859  |  |   /* If there's data in the first pinned chain, we need to allocate  | 
860  |  |    * a new chain and copy the data over. */  | 
861  | 0  |   if (chain->off) { | 
862  | 0  |     struct evbuffer_chain *tmp;  | 
863  |  | 
  | 
864  | 0  |     EVUTIL_ASSERT(pinned == src->last_with_datap);  | 
865  | 0  |     tmp = evbuffer_chain_new_membuf(chain->off);  | 
866  | 0  |     if (!tmp)  | 
867  | 0  |       return -1;  | 
868  | 0  |     memcpy(tmp->buffer, chain->buffer + chain->misalign,  | 
869  | 0  |       chain->off);  | 
870  | 0  |     tmp->off = chain->off;  | 
871  | 0  |     *src->last_with_datap = tmp;  | 
872  | 0  |     src->last = tmp;  | 
873  | 0  |     chain->misalign += chain->off;  | 
874  | 0  |     chain->off = 0;  | 
875  | 0  |   } else { | 
876  | 0  |     src->last = *src->last_with_datap;  | 
877  | 0  |     *pinned = NULL;  | 
878  | 0  |   }  | 
879  |  |  | 
880  | 0  |   return 0;  | 
881  | 0  | }  | 
882  |  |  | 
883  |  | static inline void  | 
884  |  | RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned,  | 
885  |  |     struct evbuffer_chain *last)  | 
886  | 0  | { | 
887  | 0  |   ASSERT_EVBUFFER_LOCKED(src);  | 
888  |  | 
  | 
889  | 0  |   if (!pinned) { | 
890  | 0  |     ZERO_CHAIN(src);  | 
891  | 0  |     return;  | 
892  | 0  |   }  | 
893  |  |  | 
894  | 0  |   src->first = pinned;  | 
895  | 0  |   src->last = last;  | 
896  | 0  |   src->last_with_datap = &src->first;  | 
897  | 0  |   src->total_len = 0;  | 
898  | 0  | }  | 
899  |  |  | 
900  |  | static inline void  | 
901  |  | COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src)  | 
902  | 0  | { | 
903  | 0  |   ASSERT_EVBUFFER_LOCKED(dst);  | 
904  | 0  |   ASSERT_EVBUFFER_LOCKED(src);  | 
905  | 0  |   dst->first = src->first;  | 
906  | 0  |   if (src->last_with_datap == &src->first)  | 
907  | 0  |     dst->last_with_datap = &dst->first;  | 
908  | 0  |   else  | 
909  | 0  |     dst->last_with_datap = src->last_with_datap;  | 
910  | 0  |   dst->last = src->last;  | 
911  | 0  |   dst->total_len = src->total_len;  | 
912  | 0  | }  | 
913  |  |  | 
914  |  | static void  | 
915  |  | APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)  | 
916  | 0  | { | 
917  | 0  |   struct evbuffer_chain **chp;  | 
918  |  | 
  | 
919  | 0  |   ASSERT_EVBUFFER_LOCKED(dst);  | 
920  | 0  |   ASSERT_EVBUFFER_LOCKED(src);  | 
921  |  | 
  | 
922  | 0  |   chp = evbuffer_free_trailing_empty_chains(dst);  | 
923  | 0  |   *chp = src->first;  | 
924  |  | 
  | 
925  | 0  |   if (src->last_with_datap == &src->first)  | 
926  | 0  |     dst->last_with_datap = chp;  | 
927  | 0  |   else  | 
928  | 0  |     dst->last_with_datap = src->last_with_datap;  | 
929  | 0  |   dst->last = src->last;  | 
930  | 0  |   dst->total_len += src->total_len;  | 
931  | 0  | }  | 
932  |  |  | 
933  |  | static inline void  | 
934  |  | APPEND_CHAIN_MULTICAST(struct evbuffer *dst, struct evbuffer *src)  | 
935  | 0  | { | 
936  | 0  |   struct evbuffer_chain *tmp;  | 
937  | 0  |   struct evbuffer_chain *chain = src->first;  | 
938  | 0  |   struct evbuffer_multicast_parent *extra;  | 
939  |  | 
  | 
940  | 0  |   ASSERT_EVBUFFER_LOCKED(dst);  | 
941  | 0  |   ASSERT_EVBUFFER_LOCKED(src);  | 
942  |  | 
  | 
943  | 0  |   for (; chain; chain = chain->next) { | 
944  | 0  |     if (!chain->off || chain->flags & EVBUFFER_DANGLING) { | 
945  |  |       /* skip empty chains */  | 
946  | 0  |       continue;  | 
947  | 0  |     }  | 
948  |  |  | 
949  | 0  |     tmp = evbuffer_chain_new(sizeof(struct evbuffer_multicast_parent));  | 
950  | 0  |     if (!tmp) { | 
951  | 0  |       event_warn("%s: out of memory", __func__); | 
952  | 0  |       return;  | 
953  | 0  |     }  | 
954  | 0  |     extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_multicast_parent, tmp);  | 
955  |  |     /* reference evbuffer containing source chain so it  | 
956  |  |      * doesn't get released while the chain is still  | 
957  |  |      * being referenced to */  | 
958  | 0  |     evbuffer_incref_(src);  | 
959  | 0  |     extra->source = src;  | 
960  |  |     /* reference source chain which now becomes immutable */  | 
961  | 0  |     evbuffer_chain_incref(chain);  | 
962  | 0  |     extra->parent = chain;  | 
963  | 0  |     chain->flags |= EVBUFFER_IMMUTABLE;  | 
964  | 0  |     tmp->buffer_len = chain->buffer_len;  | 
965  | 0  |     tmp->misalign = chain->misalign;  | 
966  | 0  |     tmp->off = chain->off;  | 
967  | 0  |     tmp->flags |= EVBUFFER_MULTICAST|EVBUFFER_IMMUTABLE;  | 
968  | 0  |     tmp->buffer = chain->buffer;  | 
969  | 0  |     evbuffer_chain_insert(dst, tmp);  | 
970  | 0  |   }  | 
971  | 0  | }  | 
972  |  |  | 
973  |  | static void  | 
974  |  | PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)  | 
975  | 0  | { | 
976  | 0  |   ASSERT_EVBUFFER_LOCKED(dst);  | 
977  | 0  |   ASSERT_EVBUFFER_LOCKED(src);  | 
978  | 0  |   src->last->next = dst->first;  | 
979  | 0  |   dst->first = src->first;  | 
980  | 0  |   dst->total_len += src->total_len;  | 
981  | 0  |   if (*dst->last_with_datap == NULL) { | 
982  | 0  |     if (src->last_with_datap == &(src)->first)  | 
983  | 0  |       dst->last_with_datap = &dst->first;  | 
984  | 0  |     else  | 
985  | 0  |       dst->last_with_datap = src->last_with_datap;  | 
986  | 0  |   } else if (dst->last_with_datap == &dst->first) { | 
987  | 0  |     dst->last_with_datap = &src->last->next;  | 
988  | 0  |   }  | 
989  | 0  | }  | 
990  |  |  | 
991  |  | int  | 
992  |  | evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)  | 
993  | 0  | { | 
994  | 0  |   struct evbuffer_chain *pinned, *last;  | 
995  | 0  |   size_t in_total_len, out_total_len;  | 
996  | 0  |   int result = 0;  | 
997  |  | 
  | 
998  | 0  |   EVBUFFER_LOCK2(inbuf, outbuf);  | 
999  | 0  |   in_total_len = inbuf->total_len;  | 
1000  | 0  |   out_total_len = outbuf->total_len;  | 
1001  |  | 
  | 
1002  | 0  |   if (in_total_len == 0 || outbuf == inbuf)  | 
1003  | 0  |     goto done;  | 
1004  |  |  | 
1005  | 0  |   if (outbuf->freeze_end || inbuf->freeze_start) { | 
1006  | 0  |     result = -1;  | 
1007  | 0  |     goto done;  | 
1008  | 0  |   }  | 
1009  |  |  | 
1010  | 0  |   if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { | 
1011  | 0  |     result = -1;  | 
1012  | 0  |     goto done;  | 
1013  | 0  |   }  | 
1014  |  |  | 
1015  | 0  |   if (out_total_len == 0) { | 
1016  |  |     /* There might be an empty chain at the start of outbuf; free  | 
1017  |  |      * it. */  | 
1018  | 0  |     evbuffer_free_all_chains(outbuf->first);  | 
1019  | 0  |     COPY_CHAIN(outbuf, inbuf);  | 
1020  | 0  |   } else { | 
1021  | 0  |     APPEND_CHAIN(outbuf, inbuf);  | 
1022  | 0  |   }  | 
1023  |  | 
  | 
1024  | 0  |   RESTORE_PINNED(inbuf, pinned, last);  | 
1025  |  | 
  | 
1026  | 0  |   inbuf->n_del_for_cb += in_total_len;  | 
1027  | 0  |   outbuf->n_add_for_cb += in_total_len;  | 
1028  |  | 
  | 
1029  | 0  |   evbuffer_invoke_callbacks_(inbuf);  | 
1030  | 0  |   evbuffer_invoke_callbacks_(outbuf);  | 
1031  |  | 
  | 
1032  | 0  | done:  | 
1033  | 0  |   EVBUFFER_UNLOCK2(inbuf, outbuf);  | 
1034  | 0  |   return result;  | 
1035  | 0  | }  | 
1036  |  |  | 
1037  |  | int  | 
1038  |  | evbuffer_add_buffer_reference(struct evbuffer *outbuf, struct evbuffer *inbuf)  | 
1039  | 0  | { | 
1040  | 0  |   size_t in_total_len, out_total_len;  | 
1041  | 0  |   struct evbuffer_chain *chain;  | 
1042  | 0  |   int result = 0;  | 
1043  |  | 
  | 
1044  | 0  |   EVBUFFER_LOCK2(inbuf, outbuf);  | 
1045  | 0  |   in_total_len = inbuf->total_len;  | 
1046  | 0  |   out_total_len = outbuf->total_len;  | 
1047  | 0  |   chain = inbuf->first;  | 
1048  |  | 
  | 
1049  | 0  |   if (in_total_len == 0)  | 
1050  | 0  |     goto done;  | 
1051  |  |  | 
1052  | 0  |   if (outbuf->freeze_end || outbuf == inbuf) { | 
1053  | 0  |     result = -1;  | 
1054  | 0  |     goto done;  | 
1055  | 0  |   }  | 
1056  |  |  | 
1057  | 0  |   for (; chain; chain = chain->next) { | 
1058  | 0  |     if ((chain->flags & (EVBUFFER_FILESEGMENT|EVBUFFER_SENDFILE|EVBUFFER_MULTICAST)) != 0) { | 
1059  |  |       /* chain type can not be referenced */  | 
1060  | 0  |       result = -1;  | 
1061  | 0  |       goto done;  | 
1062  | 0  |     }  | 
1063  | 0  |   }  | 
1064  |  |  | 
1065  | 0  |   if (out_total_len == 0) { | 
1066  |  |     /* There might be an empty chain at the start of outbuf; free  | 
1067  |  |      * it. */  | 
1068  | 0  |     evbuffer_free_all_chains(outbuf->first);  | 
1069  | 0  |   }  | 
1070  | 0  |   APPEND_CHAIN_MULTICAST(outbuf, inbuf);  | 
1071  |  | 
  | 
1072  | 0  |   outbuf->n_add_for_cb += in_total_len;  | 
1073  | 0  |   evbuffer_invoke_callbacks_(outbuf);  | 
1074  |  | 
  | 
1075  | 0  | done:  | 
1076  | 0  |   EVBUFFER_UNLOCK2(inbuf, outbuf);  | 
1077  | 0  |   return result;  | 
1078  | 0  | }  | 
1079  |  |  | 
1080  |  | int  | 
1081  |  | evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)  | 
1082  | 0  | { | 
1083  | 0  |   struct evbuffer_chain *pinned, *last;  | 
1084  | 0  |   size_t in_total_len, out_total_len;  | 
1085  | 0  |   int result = 0;  | 
1086  |  | 
  | 
1087  | 0  |   EVBUFFER_LOCK2(inbuf, outbuf);  | 
1088  |  | 
  | 
1089  | 0  |   in_total_len = inbuf->total_len;  | 
1090  | 0  |   out_total_len = outbuf->total_len;  | 
1091  |  | 
  | 
1092  | 0  |   if (!in_total_len || inbuf == outbuf)  | 
1093  | 0  |     goto done;  | 
1094  |  |  | 
1095  | 0  |   if (outbuf->freeze_start || inbuf->freeze_start) { | 
1096  | 0  |     result = -1;  | 
1097  | 0  |     goto done;  | 
1098  | 0  |   }  | 
1099  |  |  | 
1100  | 0  |   if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { | 
1101  | 0  |     result = -1;  | 
1102  | 0  |     goto done;  | 
1103  | 0  |   }  | 
1104  |  |  | 
1105  | 0  |   if (out_total_len == 0) { | 
1106  |  |     /* There might be an empty chain at the start of outbuf; free  | 
1107  |  |      * it. */  | 
1108  | 0  |     evbuffer_free_all_chains(outbuf->first);  | 
1109  | 0  |     COPY_CHAIN(outbuf, inbuf);  | 
1110  | 0  |   } else { | 
1111  | 0  |     PREPEND_CHAIN(outbuf, inbuf);  | 
1112  | 0  |   }  | 
1113  |  | 
  | 
1114  | 0  |   RESTORE_PINNED(inbuf, pinned, last);  | 
1115  |  | 
  | 
1116  | 0  |   inbuf->n_del_for_cb += in_total_len;  | 
1117  | 0  |   outbuf->n_add_for_cb += in_total_len;  | 
1118  |  | 
  | 
1119  | 0  |   evbuffer_invoke_callbacks_(inbuf);  | 
1120  | 0  |   evbuffer_invoke_callbacks_(outbuf);  | 
1121  | 0  | done:  | 
1122  | 0  |   EVBUFFER_UNLOCK2(inbuf, outbuf);  | 
1123  | 0  |   return result;  | 
1124  | 0  | }  | 
1125  |  |  | 
1126  |  | int  | 
1127  |  | evbuffer_drain(struct evbuffer *buf, size_t len)  | 
1128  | 0  | { | 
1129  | 0  |   struct evbuffer_chain *chain, *next;  | 
1130  | 0  |   size_t remaining, old_len;  | 
1131  | 0  |   int result = 0;  | 
1132  |  | 
  | 
1133  | 0  |   EVBUFFER_LOCK(buf);  | 
1134  | 0  |   old_len = buf->total_len;  | 
1135  |  | 
  | 
1136  | 0  |   if (old_len == 0)  | 
1137  | 0  |     goto done;  | 
1138  |  |  | 
1139  | 0  |   if (buf->freeze_start) { | 
1140  | 0  |     result = -1;  | 
1141  | 0  |     goto done;  | 
1142  | 0  |   }  | 
1143  |  |  | 
1144  | 0  |   if (len >= old_len && !HAS_PINNED_R(buf)) { | 
1145  | 0  |     len = old_len;  | 
1146  | 0  |     for (chain = buf->first; chain != NULL; chain = next) { | 
1147  | 0  |       next = chain->next;  | 
1148  | 0  |       evbuffer_chain_free(chain);  | 
1149  | 0  |     }  | 
1150  |  | 
  | 
1151  | 0  |     ZERO_CHAIN(buf);  | 
1152  | 0  |   } else { | 
1153  | 0  |     if (len >= old_len)  | 
1154  | 0  |       len = old_len;  | 
1155  |  | 
  | 
1156  | 0  |     buf->total_len -= len;  | 
1157  | 0  |     remaining = len;  | 
1158  | 0  |     for (chain = buf->first;  | 
1159  | 0  |          remaining >= chain->off;  | 
1160  | 0  |          chain = next) { | 
1161  | 0  |       next = chain->next;  | 
1162  | 0  |       remaining -= chain->off;  | 
1163  |  | 
  | 
1164  | 0  |       if (chain == *buf->last_with_datap) { | 
1165  | 0  |         buf->last_with_datap = &buf->first;  | 
1166  | 0  |       }  | 
1167  | 0  |       if (&chain->next == buf->last_with_datap)  | 
1168  | 0  |         buf->last_with_datap = &buf->first;  | 
1169  |  | 
  | 
1170  | 0  |       if (CHAIN_PINNED_R(chain)) { | 
1171  | 0  |         EVUTIL_ASSERT(remaining == 0);  | 
1172  | 0  |         chain->misalign += chain->off;  | 
1173  | 0  |         chain->off = 0;  | 
1174  | 0  |         break;  | 
1175  | 0  |       } else  | 
1176  | 0  |         evbuffer_chain_free(chain);  | 
1177  | 0  |     }  | 
1178  |  | 
  | 
1179  | 0  |     buf->first = chain;  | 
1180  | 0  |     EVUTIL_ASSERT(remaining <= chain->off);  | 
1181  | 0  |     chain->misalign += remaining;  | 
1182  | 0  |     chain->off -= remaining;  | 
1183  | 0  |   }  | 
1184  |  | 
  | 
1185  | 0  |   buf->n_del_for_cb += len;  | 
1186  |  |   /* Tell someone about changes in this buffer */  | 
1187  | 0  |   evbuffer_invoke_callbacks_(buf);  | 
1188  |  | 
  | 
1189  | 0  | done:  | 
1190  | 0  |   EVBUFFER_UNLOCK(buf);  | 
1191  | 0  |   return result;  | 
1192  | 0  | }  | 
1193  |  |  | 
1194  |  | /* Reads data from an event buffer and drains the bytes read */  | 
1195  |  | int  | 
1196  |  | evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen)  | 
1197  | 0  | { | 
1198  | 0  |   ev_ssize_t n;  | 
1199  | 0  |   EVBUFFER_LOCK(buf);  | 
1200  | 0  |   n = evbuffer_copyout_from(buf, NULL, data_out, datlen);  | 
1201  | 0  |   if (n > 0) { | 
1202  | 0  |     if (evbuffer_drain(buf, n)<0)  | 
1203  | 0  |       n = -1;  | 
1204  | 0  |   }  | 
1205  | 0  |   EVBUFFER_UNLOCK(buf);  | 
1206  | 0  |   return (int)n;  | 
1207  | 0  | }  | 
1208  |  |  | 
1209  |  | ev_ssize_t  | 
1210  |  | evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen)  | 
1211  | 0  | { | 
1212  | 0  |   return evbuffer_copyout_from(buf, NULL, data_out, datlen);  | 
1213  | 0  | }  | 
1214  |  |  | 
1215  |  | ev_ssize_t  | 
1216  |  | evbuffer_copyout_from(struct evbuffer *buf, const struct evbuffer_ptr *pos,  | 
1217  |  |     void *data_out, size_t datlen)  | 
1218  | 0  | { | 
1219  |  |   /*XXX fails badly on sendfile case. */  | 
1220  | 0  |   struct evbuffer_chain *chain;  | 
1221  | 0  |   char *data = data_out;  | 
1222  | 0  |   size_t nread;  | 
1223  | 0  |   ev_ssize_t result = 0;  | 
1224  | 0  |   size_t pos_in_chain;  | 
1225  |  | 
  | 
1226  | 0  |   EVBUFFER_LOCK(buf);  | 
1227  |  | 
  | 
1228  | 0  |   if (pos) { | 
1229  | 0  |     if (datlen > (size_t)(EV_SSIZE_MAX - pos->pos)) { | 
1230  | 0  |       result = -1;  | 
1231  | 0  |       goto done;  | 
1232  | 0  |     }  | 
1233  | 0  |     chain = pos->internal_.chain;  | 
1234  | 0  |     pos_in_chain = pos->internal_.pos_in_chain;  | 
1235  | 0  |     if (datlen + pos->pos > buf->total_len)  | 
1236  | 0  |       datlen = buf->total_len - pos->pos;  | 
1237  | 0  |   } else { | 
1238  | 0  |     chain = buf->first;  | 
1239  | 0  |     pos_in_chain = 0;  | 
1240  | 0  |     if (datlen > buf->total_len)  | 
1241  | 0  |       datlen = buf->total_len;  | 
1242  | 0  |   }  | 
1243  |  |  | 
1244  |  |  | 
1245  | 0  |   if (datlen == 0)  | 
1246  | 0  |     goto done;  | 
1247  |  |  | 
1248  | 0  |   if (buf->freeze_start) { | 
1249  | 0  |     result = -1;  | 
1250  | 0  |     goto done;  | 
1251  | 0  |   }  | 
1252  |  |  | 
1253  | 0  |   nread = datlen;  | 
1254  |  | 
  | 
1255  | 0  |   while (datlen && datlen >= chain->off - pos_in_chain) { | 
1256  | 0  |     size_t copylen = chain->off - pos_in_chain;  | 
1257  | 0  |     memcpy(data,  | 
1258  | 0  |         chain->buffer + chain->misalign + pos_in_chain,  | 
1259  | 0  |         copylen);  | 
1260  | 0  |     data += copylen;  | 
1261  | 0  |     datlen -= copylen;  | 
1262  |  | 
  | 
1263  | 0  |     chain = chain->next;  | 
1264  | 0  |     pos_in_chain = 0;  | 
1265  | 0  |     EVUTIL_ASSERT(chain || datlen==0);  | 
1266  | 0  |   }  | 
1267  |  | 
  | 
1268  | 0  |   if (datlen) { | 
1269  | 0  |     EVUTIL_ASSERT(chain);  | 
1270  | 0  |     EVUTIL_ASSERT(datlen+pos_in_chain <= chain->off);  | 
1271  |  | 
  | 
1272  | 0  |     memcpy(data, chain->buffer + chain->misalign + pos_in_chain,  | 
1273  | 0  |         datlen);  | 
1274  | 0  |   }  | 
1275  |  | 
  | 
1276  | 0  |   result = nread;  | 
1277  | 0  | done:  | 
1278  | 0  |   EVBUFFER_UNLOCK(buf);  | 
1279  | 0  |   return result;  | 
1280  | 0  | }  | 
1281  |  |  | 
1282  |  | /* reads data from the src buffer to the dst buffer, avoids memcpy as  | 
1283  |  |  * possible. */  | 
1284  |  | /*  XXXX should return ev_ssize_t */  | 
1285  |  | int  | 
1286  |  | evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst,  | 
1287  |  |     size_t datlen)  | 
1288  | 0  | { | 
1289  |  |   /*XXX We should have an option to force this to be zero-copy.*/  | 
1290  |  |  | 
1291  |  |   /*XXX can fail badly on sendfile case. */  | 
1292  | 0  |   struct evbuffer_chain *chain, *previous;  | 
1293  | 0  |   size_t nread = 0;  | 
1294  | 0  |   int result;  | 
1295  |  | 
  | 
1296  | 0  |   EVBUFFER_LOCK2(src, dst);  | 
1297  |  | 
  | 
1298  | 0  |   chain = previous = src->first;  | 
1299  |  | 
  | 
1300  | 0  |   if (datlen == 0 || dst == src) { | 
1301  | 0  |     result = 0;  | 
1302  | 0  |     goto done;  | 
1303  | 0  |   }  | 
1304  |  |  | 
1305  | 0  |   if (dst->freeze_end || src->freeze_start) { | 
1306  | 0  |     result = -1;  | 
1307  | 0  |     goto done;  | 
1308  | 0  |   }  | 
1309  |  |  | 
1310  |  |   /* short-cut if there is no more data buffered */  | 
1311  | 0  |   if (datlen >= src->total_len) { | 
1312  | 0  |     datlen = src->total_len;  | 
1313  | 0  |     evbuffer_add_buffer(dst, src);  | 
1314  | 0  |     result = (int)datlen; /*XXXX should return ev_ssize_t*/  | 
1315  | 0  |     goto done;  | 
1316  | 0  |   }  | 
1317  |  |  | 
1318  |  |   /* removes chains if possible */  | 
1319  | 0  |   while (chain->off <= datlen) { | 
1320  |  |     /* We can't remove the last with data from src unless we  | 
1321  |  |      * remove all chains, in which case we would have done the if  | 
1322  |  |      * block above */  | 
1323  | 0  |     EVUTIL_ASSERT(chain != *src->last_with_datap);  | 
1324  | 0  |     nread += chain->off;  | 
1325  | 0  |     datlen -= chain->off;  | 
1326  | 0  |     previous = chain;  | 
1327  | 0  |     if (src->last_with_datap == &chain->next)  | 
1328  | 0  |       src->last_with_datap = &src->first;  | 
1329  | 0  |     chain = chain->next;  | 
1330  | 0  |   }  | 
1331  |  | 
  | 
1332  | 0  |   if (chain != src->first) { | 
1333  |  |     /* we can remove the chain */  | 
1334  | 0  |     struct evbuffer_chain **chp;  | 
1335  | 0  |     chp = evbuffer_free_trailing_empty_chains(dst);  | 
1336  |  | 
  | 
1337  | 0  |     if (dst->first == NULL) { | 
1338  | 0  |       dst->first = src->first;  | 
1339  | 0  |     } else { | 
1340  | 0  |       *chp = src->first;  | 
1341  | 0  |     }  | 
1342  | 0  |     dst->last = previous;  | 
1343  | 0  |     previous->next = NULL;  | 
1344  | 0  |     src->first = chain;  | 
1345  | 0  |     advance_last_with_data(dst);  | 
1346  |  | 
  | 
1347  | 0  |     dst->total_len += nread;  | 
1348  | 0  |     dst->n_add_for_cb += nread;  | 
1349  | 0  |   }  | 
1350  |  |  | 
1351  |  |   /* we know that there is more data in the src buffer than  | 
1352  |  |    * we want to read, so we manually drain the chain */  | 
1353  | 0  |   evbuffer_add(dst, chain->buffer + chain->misalign, datlen);  | 
1354  | 0  |   chain->misalign += datlen;  | 
1355  | 0  |   chain->off -= datlen;  | 
1356  | 0  |   nread += datlen;  | 
1357  |  |  | 
1358  |  |   /* You might think we would want to increment dst->n_add_for_cb  | 
1359  |  |    * here too.  But evbuffer_add above already took care of that.  | 
1360  |  |    */  | 
1361  | 0  |   src->total_len -= nread;  | 
1362  | 0  |   src->n_del_for_cb += nread;  | 
1363  |  | 
  | 
1364  | 0  |   if (nread) { | 
1365  | 0  |     evbuffer_invoke_callbacks_(dst);  | 
1366  | 0  |     evbuffer_invoke_callbacks_(src);  | 
1367  | 0  |   }  | 
1368  | 0  |   result = (int)nread;/*XXXX should change return type */  | 
1369  |  | 
  | 
1370  | 0  | done:  | 
1371  | 0  |   EVBUFFER_UNLOCK2(src, dst);  | 
1372  | 0  |   return result;  | 
1373  | 0  | }  | 
1374  |  |  | 
1375  |  | unsigned char *  | 
1376  |  | evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size)  | 
1377  | 0  | { | 
1378  | 0  |   struct evbuffer_chain *chain, *next, *tmp, *last_with_data;  | 
1379  | 0  |   unsigned char *buffer, *result = NULL;  | 
1380  | 0  |   ev_ssize_t remaining;  | 
1381  | 0  |   int removed_last_with_data = 0;  | 
1382  | 0  |   int removed_last_with_datap = 0;  | 
1383  |  | 
  | 
1384  | 0  |   EVBUFFER_LOCK(buf);  | 
1385  |  | 
  | 
1386  | 0  |   chain = buf->first;  | 
1387  |  | 
  | 
1388  | 0  |   if (size < 0)  | 
1389  | 0  |     size = buf->total_len;  | 
1390  |  |   /* if size > buf->total_len, we cannot guarantee to the user that she  | 
1391  |  |    * is going to have a long enough buffer afterwards; so we return  | 
1392  |  |    * NULL */  | 
1393  | 0  |   if (size == 0 || (size_t)size > buf->total_len)  | 
1394  | 0  |     goto done;  | 
1395  |  |  | 
1396  |  |   /* No need to pull up anything; the first size bytes are  | 
1397  |  |    * already here. */  | 
1398  | 0  |   if (chain->off >= (size_t)size) { | 
1399  | 0  |     result = chain->buffer + chain->misalign;  | 
1400  | 0  |     goto done;  | 
1401  | 0  |   }  | 
1402  |  |  | 
1403  |  |   /* Make sure that none of the chains we need to copy from is pinned. */  | 
1404  | 0  |   remaining = size - chain->off;  | 
1405  | 0  |   EVUTIL_ASSERT(remaining >= 0);  | 
1406  | 0  |   for (tmp=chain->next; tmp; tmp=tmp->next) { | 
1407  | 0  |     if (CHAIN_PINNED(tmp))  | 
1408  | 0  |       goto done;  | 
1409  | 0  |     if (tmp->off >= (size_t)remaining)  | 
1410  | 0  |       break;  | 
1411  | 0  |     remaining -= tmp->off;  | 
1412  | 0  |   }  | 
1413  |  |  | 
1414  | 0  |   if (CHAIN_PINNED(chain)) { | 
1415  | 0  |     size_t old_off = chain->off;  | 
1416  | 0  |     if (CHAIN_SPACE_LEN(chain) < size - chain->off) { | 
1417  |  |       /* not enough room at end of chunk. */  | 
1418  | 0  |       goto done;  | 
1419  | 0  |     }  | 
1420  | 0  |     buffer = CHAIN_SPACE_PTR(chain);  | 
1421  | 0  |     tmp = chain;  | 
1422  | 0  |     tmp->off = size;  | 
1423  | 0  |     size -= old_off;  | 
1424  | 0  |     chain = chain->next;  | 
1425  | 0  |   } else if (chain->buffer_len - chain->misalign >= (size_t)size) { | 
1426  |  |     /* already have enough space in the first chain */  | 
1427  | 0  |     size_t old_off = chain->off;  | 
1428  | 0  |     buffer = chain->buffer + chain->misalign + chain->off;  | 
1429  | 0  |     tmp = chain;  | 
1430  | 0  |     tmp->off = size;  | 
1431  | 0  |     size -= old_off;  | 
1432  | 0  |     chain = chain->next;  | 
1433  | 0  |   } else { | 
1434  | 0  |     if ((tmp = evbuffer_chain_new_membuf(size)) == NULL) { | 
1435  | 0  |       event_warn("%s: out of memory", __func__); | 
1436  | 0  |       goto done;  | 
1437  | 0  |     }  | 
1438  | 0  |     buffer = tmp->buffer;  | 
1439  | 0  |     tmp->off = size;  | 
1440  | 0  |     buf->first = tmp;  | 
1441  | 0  |   }  | 
1442  |  |  | 
1443  |  |   /* TODO(niels): deal with buffers that point to NULL like sendfile */  | 
1444  |  |  | 
1445  |  |   /* Copy and free every chunk that will be entirely pulled into tmp */  | 
1446  | 0  |   last_with_data = *buf->last_with_datap;  | 
1447  | 0  |   for (; chain != NULL && (size_t)size >= chain->off; chain = next) { | 
1448  | 0  |     next = chain->next;  | 
1449  |  | 
  | 
1450  | 0  |     if (chain->buffer) { | 
1451  | 0  |       memcpy(buffer, chain->buffer + chain->misalign, chain->off);  | 
1452  | 0  |       size -= chain->off;  | 
1453  | 0  |       buffer += chain->off;  | 
1454  | 0  |     }  | 
1455  | 0  |     if (chain == last_with_data)  | 
1456  | 0  |       removed_last_with_data = 1;  | 
1457  | 0  |     if (&chain->next == buf->last_with_datap)  | 
1458  | 0  |       removed_last_with_datap = 1;  | 
1459  |  | 
  | 
1460  | 0  |     evbuffer_chain_free(chain);  | 
1461  | 0  |   }  | 
1462  |  | 
  | 
1463  | 0  |   if (chain != NULL) { | 
1464  | 0  |     memcpy(buffer, chain->buffer + chain->misalign, size);  | 
1465  | 0  |     chain->misalign += size;  | 
1466  | 0  |     chain->off -= size;  | 
1467  | 0  |   } else { | 
1468  | 0  |     buf->last = tmp;  | 
1469  | 0  |   }  | 
1470  |  | 
  | 
1471  | 0  |   tmp->next = chain;  | 
1472  |  | 
  | 
1473  | 0  |   if (removed_last_with_data) { | 
1474  | 0  |     buf->last_with_datap = &buf->first;  | 
1475  | 0  |   } else if (removed_last_with_datap) { | 
1476  | 0  |     if (buf->first->next && buf->first->next->off)  | 
1477  | 0  |       buf->last_with_datap = &buf->first->next;  | 
1478  | 0  |     else  | 
1479  | 0  |       buf->last_with_datap = &buf->first;  | 
1480  | 0  |   }  | 
1481  |  | 
  | 
1482  | 0  |   result = (tmp->buffer + tmp->misalign);  | 
1483  |  | 
  | 
1484  | 0  | done:  | 
1485  | 0  |   EVBUFFER_UNLOCK(buf);  | 
1486  | 0  |   return result;  | 
1487  | 0  | }  | 
1488  |  |  | 
1489  |  | /*  | 
1490  |  |  * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'.  | 
1491  |  |  * The returned buffer needs to be freed by the called.  | 
1492  |  |  */  | 
1493  |  | char *  | 
1494  |  | evbuffer_readline(struct evbuffer *buffer)  | 
1495  | 0  | { | 
1496  | 0  |   return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY);  | 
1497  | 0  | }  | 
1498  |  |  | 
1499  |  | static inline ev_ssize_t  | 
1500  |  | evbuffer_strchr(struct evbuffer_ptr *it, const char chr)  | 
1501  | 0  | { | 
1502  | 0  |   struct evbuffer_chain *chain = it->internal_.chain;  | 
1503  | 0  |   size_t i = it->internal_.pos_in_chain;  | 
1504  | 0  |   while (chain != NULL) { | 
1505  | 0  |     char *buffer = (char *)chain->buffer + chain->misalign;  | 
1506  | 0  |     char *cp = memchr(buffer+i, chr, chain->off-i);  | 
1507  | 0  |     if (cp) { | 
1508  | 0  |       it->internal_.chain = chain;  | 
1509  | 0  |       it->internal_.pos_in_chain = cp - buffer;  | 
1510  | 0  |       it->pos += (cp - buffer - i);  | 
1511  | 0  |       return it->pos;  | 
1512  | 0  |     }  | 
1513  | 0  |     it->pos += chain->off - i;  | 
1514  | 0  |     i = 0;  | 
1515  | 0  |     chain = chain->next;  | 
1516  | 0  |   }  | 
1517  |  |  | 
1518  | 0  |   return (-1);  | 
1519  | 0  | }  | 
1520  |  |  | 
1521  |  | static inline char *  | 
1522  |  | find_eol_char(char *s, size_t len)  | 
1523  | 0  | { | 
1524  | 0  | #define CHUNK_SZ 128  | 
1525  |  |   /* Lots of benchmarking found this approach to be faster in practice  | 
1526  |  |    * than doing two memchrs over the whole buffer, doin a memchr on each  | 
1527  |  |    * char of the buffer, or trying to emulate memchr by hand. */  | 
1528  | 0  |   char *s_end, *cr, *lf;  | 
1529  | 0  |   s_end = s+len;  | 
1530  | 0  |   while (s < s_end) { | 
1531  | 0  |     size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s);  | 
1532  | 0  |     cr = memchr(s, '\r', chunk);  | 
1533  | 0  |     lf = memchr(s, '\n', chunk);  | 
1534  | 0  |     if (cr) { | 
1535  | 0  |       if (lf && lf < cr)  | 
1536  | 0  |         return lf;  | 
1537  | 0  |       return cr;  | 
1538  | 0  |     } else if (lf) { | 
1539  | 0  |       return lf;  | 
1540  | 0  |     }  | 
1541  | 0  |     s += CHUNK_SZ;  | 
1542  | 0  |   }  | 
1543  |  |  | 
1544  | 0  |   return NULL;  | 
1545  | 0  | #undef CHUNK_SZ  | 
1546  | 0  | }  | 
1547  |  |  | 
1548  |  | static ev_ssize_t  | 
1549  |  | evbuffer_find_eol_char(struct evbuffer_ptr *it)  | 
1550  | 0  | { | 
1551  | 0  |   struct evbuffer_chain *chain = it->internal_.chain;  | 
1552  | 0  |   size_t i = it->internal_.pos_in_chain;  | 
1553  | 0  |   while (chain != NULL) { | 
1554  | 0  |     char *buffer = (char *)chain->buffer + chain->misalign;  | 
1555  | 0  |     char *cp = find_eol_char(buffer+i, chain->off-i);  | 
1556  | 0  |     if (cp) { | 
1557  | 0  |       it->internal_.chain = chain;  | 
1558  | 0  |       it->internal_.pos_in_chain = cp - buffer;  | 
1559  | 0  |       it->pos += (cp - buffer) - i;  | 
1560  | 0  |       return it->pos;  | 
1561  | 0  |     }  | 
1562  | 0  |     it->pos += chain->off - i;  | 
1563  | 0  |     i = 0;  | 
1564  | 0  |     chain = chain->next;  | 
1565  | 0  |   }  | 
1566  |  |  | 
1567  | 0  |   return (-1);  | 
1568  | 0  | }  | 
1569  |  |  | 
1570  |  | static inline size_t  | 
1571  |  | evbuffer_strspn(  | 
1572  |  |   struct evbuffer_ptr *ptr, const char *chrset)  | 
1573  | 0  | { | 
1574  | 0  |   size_t count = 0;  | 
1575  | 0  |   struct evbuffer_chain *chain = ptr->internal_.chain;  | 
1576  | 0  |   size_t i = ptr->internal_.pos_in_chain;  | 
1577  |  | 
  | 
1578  | 0  |   if (!chain)  | 
1579  | 0  |     return 0;  | 
1580  |  |  | 
1581  | 0  |   while (1) { | 
1582  | 0  |     char *buffer = (char *)chain->buffer + chain->misalign;  | 
1583  | 0  |     for (; i < chain->off; ++i) { | 
1584  | 0  |       const char *p = chrset;  | 
1585  | 0  |       while (*p) { | 
1586  | 0  |         if (buffer[i] == *p++)  | 
1587  | 0  |           goto next;  | 
1588  | 0  |       }  | 
1589  | 0  |       ptr->internal_.chain = chain;  | 
1590  | 0  |       ptr->internal_.pos_in_chain = i;  | 
1591  | 0  |       ptr->pos += count;  | 
1592  | 0  |       return count;  | 
1593  | 0  |     next:  | 
1594  | 0  |       ++count;  | 
1595  | 0  |     }  | 
1596  | 0  |     i = 0;  | 
1597  |  | 
  | 
1598  | 0  |     if (! chain->next) { | 
1599  | 0  |       ptr->internal_.chain = chain;  | 
1600  | 0  |       ptr->internal_.pos_in_chain = i;  | 
1601  | 0  |       ptr->pos += count;  | 
1602  | 0  |       return count;  | 
1603  | 0  |     }  | 
1604  |  |  | 
1605  | 0  |     chain = chain->next;  | 
1606  | 0  |   }  | 
1607  | 0  | }  | 
1608  |  |  | 
1609  |  |  | 
1610  |  | static inline int  | 
1611  |  | evbuffer_getchr(struct evbuffer_ptr *it)  | 
1612  | 0  | { | 
1613  | 0  |   struct evbuffer_chain *chain = it->internal_.chain;  | 
1614  | 0  |   size_t off = it->internal_.pos_in_chain;  | 
1615  |  | 
  | 
1616  | 0  |   if (chain == NULL)  | 
1617  | 0  |     return -1;  | 
1618  |  |  | 
1619  | 0  |   return (unsigned char)chain->buffer[chain->misalign + off];  | 
1620  | 0  | }  | 
1621  |  |  | 
1622  |  | struct evbuffer_ptr  | 
1623  |  | evbuffer_search_eol(struct evbuffer *buffer,  | 
1624  |  |     struct evbuffer_ptr *start, size_t *eol_len_out,  | 
1625  |  |     enum evbuffer_eol_style eol_style)  | 
1626  | 0  | { | 
1627  | 0  |   struct evbuffer_ptr it, it2;  | 
1628  | 0  |   size_t extra_drain = 0;  | 
1629  | 0  |   int ok = 0;  | 
1630  |  |  | 
1631  |  |   /* Avoid locking in trivial edge cases */  | 
1632  | 0  |   if (start && start->internal_.chain == NULL) { | 
1633  | 0  |     PTR_NOT_FOUND(&it);  | 
1634  | 0  |     if (eol_len_out)  | 
1635  | 0  |       *eol_len_out = extra_drain;  | 
1636  | 0  |     return it;  | 
1637  | 0  |   }  | 
1638  |  |  | 
1639  | 0  |   EVBUFFER_LOCK(buffer);  | 
1640  |  | 
  | 
1641  | 0  |   if (start) { | 
1642  | 0  |     memcpy(&it, start, sizeof(it));  | 
1643  | 0  |   } else { | 
1644  | 0  |     it.pos = 0;  | 
1645  | 0  |     it.internal_.chain = buffer->first;  | 
1646  | 0  |     it.internal_.pos_in_chain = 0;  | 
1647  | 0  |   }  | 
1648  |  |  | 
1649  |  |   /* the eol_style determines our first stop character and how many  | 
1650  |  |    * characters we are going to drain afterwards. */  | 
1651  | 0  |   switch (eol_style) { | 
1652  | 0  |   case EVBUFFER_EOL_ANY:  | 
1653  | 0  |     if (evbuffer_find_eol_char(&it) < 0)  | 
1654  | 0  |       goto done;  | 
1655  | 0  |     memcpy(&it2, &it, sizeof(it));  | 
1656  | 0  |     extra_drain = evbuffer_strspn(&it2, "\r\n");  | 
1657  | 0  |     break;  | 
1658  | 0  |   case EVBUFFER_EOL_CRLF_STRICT: { | 
1659  | 0  |     it = evbuffer_search(buffer, "\r\n", 2, &it);  | 
1660  | 0  |     if (it.pos < 0)  | 
1661  | 0  |       goto done;  | 
1662  | 0  |     extra_drain = 2;  | 
1663  | 0  |     break;  | 
1664  | 0  |   }  | 
1665  | 0  |   case EVBUFFER_EOL_CRLF: { | 
1666  | 0  |     ev_ssize_t start_pos = it.pos;  | 
1667  |  |     /* Look for a LF ... */  | 
1668  | 0  |     if (evbuffer_strchr(&it, '\n') < 0)  | 
1669  | 0  |       goto done;  | 
1670  | 0  |     extra_drain = 1;  | 
1671  |  |     /* ... optionally preceded by a CR. */  | 
1672  | 0  |     if (it.pos == start_pos)  | 
1673  | 0  |       break; /* If the first character is \n, don't back up */  | 
1674  |  |     /* This potentially does an extra linear walk over the first  | 
1675  |  |      * few chains.  Probably, that's not too expensive unless you  | 
1676  |  |      * have a really pathological setup. */  | 
1677  | 0  |     memcpy(&it2, &it, sizeof(it));  | 
1678  | 0  |     if (evbuffer_ptr_subtract(buffer, &it2, 1)<0)  | 
1679  | 0  |       break;  | 
1680  | 0  |     if (evbuffer_getchr(&it2) == '\r') { | 
1681  | 0  |       memcpy(&it, &it2, sizeof(it));  | 
1682  | 0  |       extra_drain = 2;  | 
1683  | 0  |     }  | 
1684  | 0  |     break;  | 
1685  | 0  |   }  | 
1686  | 0  |   case EVBUFFER_EOL_LF:  | 
1687  | 0  |     if (evbuffer_strchr(&it, '\n') < 0)  | 
1688  | 0  |       goto done;  | 
1689  | 0  |     extra_drain = 1;  | 
1690  | 0  |     break;  | 
1691  | 0  |   case EVBUFFER_EOL_NUL:  | 
1692  | 0  |     if (evbuffer_strchr(&it, '\0') < 0)  | 
1693  | 0  |       goto done;  | 
1694  | 0  |     extra_drain = 1;  | 
1695  | 0  |     break;  | 
1696  | 0  |   default:  | 
1697  | 0  |     goto done;  | 
1698  | 0  |   }  | 
1699  |  |  | 
1700  | 0  |   ok = 1;  | 
1701  | 0  | done:  | 
1702  | 0  |   EVBUFFER_UNLOCK(buffer);  | 
1703  |  | 
  | 
1704  | 0  |   if (!ok)  | 
1705  | 0  |     PTR_NOT_FOUND(&it);  | 
1706  | 0  |   if (eol_len_out)  | 
1707  | 0  |     *eol_len_out = extra_drain;  | 
1708  |  | 
  | 
1709  | 0  |   return it;  | 
1710  | 0  | }  | 
1711  |  |  | 
1712  |  | char *  | 
1713  |  | evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out,  | 
1714  |  |     enum evbuffer_eol_style eol_style)  | 
1715  | 0  | { | 
1716  | 0  |   struct evbuffer_ptr it;  | 
1717  | 0  |   char *line;  | 
1718  | 0  |   size_t n_to_copy=0, extra_drain=0;  | 
1719  | 0  |   char *result = NULL;  | 
1720  |  | 
  | 
1721  | 0  |   EVBUFFER_LOCK(buffer);  | 
1722  |  | 
  | 
1723  | 0  |   if (buffer->freeze_start) { | 
1724  | 0  |     goto done;  | 
1725  | 0  |   }  | 
1726  |  |  | 
1727  | 0  |   it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style);  | 
1728  | 0  |   if (it.pos < 0)  | 
1729  | 0  |     goto done;  | 
1730  | 0  |   n_to_copy = it.pos;  | 
1731  |  | 
  | 
1732  | 0  |   if ((line = mm_malloc(n_to_copy+1)) == NULL) { | 
1733  | 0  |     event_warn("%s: out of memory", __func__); | 
1734  | 0  |     goto done;  | 
1735  | 0  |   }  | 
1736  |  |  | 
1737  | 0  |   evbuffer_remove(buffer, line, n_to_copy);  | 
1738  | 0  |   line[n_to_copy] = '\0';  | 
1739  |  | 
  | 
1740  | 0  |   evbuffer_drain(buffer, extra_drain);  | 
1741  | 0  |   result = line;  | 
1742  | 0  | done:  | 
1743  | 0  |   EVBUFFER_UNLOCK(buffer);  | 
1744  |  | 
  | 
1745  | 0  |   if (n_read_out)  | 
1746  | 0  |     *n_read_out = result ? n_to_copy : 0;  | 
1747  |  | 
  | 
1748  | 0  |   return result;  | 
1749  | 0  | }  | 
1750  |  |  | 
1751  | 0  | #define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096  | 
1752  |  |  | 
1753  |  | /* Adds data to an event buffer */  | 
1754  |  |  | 
1755  |  | int  | 
1756  |  | evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen)  | 
1757  | 0  | { | 
1758  | 0  |   struct evbuffer_chain *chain, *tmp;  | 
1759  | 0  |   const unsigned char *data = data_in;  | 
1760  | 0  |   size_t remain, to_alloc;  | 
1761  | 0  |   int result = -1;  | 
1762  |  | 
  | 
1763  | 0  |   EVBUFFER_LOCK(buf);  | 
1764  |  | 
  | 
1765  | 0  |   if (buf->freeze_end) { | 
1766  | 0  |     goto done;  | 
1767  | 0  |   }  | 
1768  |  |   /* Prevent buf->total_len overflow */  | 
1769  | 0  |   if (datlen > EV_SIZE_MAX - buf->total_len) { | 
1770  | 0  |     goto done;  | 
1771  | 0  |   }  | 
1772  |  |  | 
1773  | 0  |   if (*buf->last_with_datap == NULL) { | 
1774  | 0  |     chain = buf->last;  | 
1775  | 0  |   } else { | 
1776  | 0  |     chain = *buf->last_with_datap;  | 
1777  | 0  |   }  | 
1778  |  |  | 
1779  |  |   /* If there are no chains allocated for this buffer, allocate one  | 
1780  |  |    * big enough to hold all the data. */  | 
1781  | 0  |   if (chain == NULL) { | 
1782  | 0  |     chain = evbuffer_chain_insert_new(buf, datlen);  | 
1783  | 0  |     if (!chain)  | 
1784  | 0  |       goto done;  | 
1785  | 0  |   }  | 
1786  |  |  | 
1787  | 0  |   if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { | 
1788  |  |     /* Always true for mutable buffers */  | 
1789  | 0  |     EVUTIL_ASSERT(chain->misalign >= 0 &&  | 
1790  | 0  |         (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX);  | 
1791  | 0  |     remain = chain->buffer_len - (size_t)chain->misalign - chain->off;  | 
1792  | 0  |     if (remain >= datlen) { | 
1793  |  |       /* there's enough space to hold all the data in the  | 
1794  |  |        * current last chain */  | 
1795  | 0  |       memcpy(chain->buffer + chain->misalign + chain->off,  | 
1796  | 0  |           data, datlen);  | 
1797  | 0  |       chain->off += datlen;  | 
1798  | 0  |       buf->total_len += datlen;  | 
1799  | 0  |       buf->n_add_for_cb += datlen;  | 
1800  | 0  |       goto out;  | 
1801  | 0  |     } else if (!CHAIN_PINNED(chain) &&  | 
1802  | 0  |         evbuffer_chain_should_realign(chain, datlen)) { | 
1803  |  |       /* we can fit the data into the misalignment */  | 
1804  | 0  |       evbuffer_chain_align(chain);  | 
1805  |  | 
  | 
1806  | 0  |       memcpy(chain->buffer + chain->off, data, datlen);  | 
1807  | 0  |       chain->off += datlen;  | 
1808  | 0  |       buf->total_len += datlen;  | 
1809  | 0  |       buf->n_add_for_cb += datlen;  | 
1810  | 0  |       goto out;  | 
1811  | 0  |     }  | 
1812  | 0  |   } else { | 
1813  |  |     /* we cannot write any data to the last chain */  | 
1814  | 0  |     remain = 0;  | 
1815  | 0  |   }  | 
1816  |  |  | 
1817  |  |   /* we need to add another chain */  | 
1818  | 0  |   to_alloc = chain->buffer_len;  | 
1819  | 0  |   if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2)  | 
1820  | 0  |     to_alloc <<= 1;  | 
1821  | 0  |   if (datlen > to_alloc)  | 
1822  | 0  |     to_alloc = datlen;  | 
1823  | 0  |   tmp = evbuffer_chain_new_membuf(to_alloc);  | 
1824  | 0  |   if (tmp == NULL)  | 
1825  | 0  |     goto done;  | 
1826  |  |  | 
1827  | 0  |   if (remain) { | 
1828  | 0  |     memcpy(chain->buffer + chain->misalign + chain->off,  | 
1829  | 0  |         data, remain);  | 
1830  | 0  |     chain->off += remain;  | 
1831  | 0  |     buf->total_len += remain;  | 
1832  | 0  |     buf->n_add_for_cb += remain;  | 
1833  | 0  |   }  | 
1834  |  | 
  | 
1835  | 0  |   data += remain;  | 
1836  | 0  |   datlen -= remain;  | 
1837  |  | 
  | 
1838  | 0  |   memcpy(tmp->buffer, data, datlen);  | 
1839  | 0  |   tmp->off = datlen;  | 
1840  | 0  |   evbuffer_chain_insert(buf, tmp);  | 
1841  | 0  |   buf->n_add_for_cb += datlen;  | 
1842  |  | 
  | 
1843  | 0  | out:  | 
1844  | 0  |   evbuffer_invoke_callbacks_(buf);  | 
1845  | 0  |   result = 0;  | 
1846  | 0  | done:  | 
1847  | 0  |   EVBUFFER_UNLOCK(buf);  | 
1848  | 0  |   return result;  | 
1849  | 0  | }  | 
1850  |  |  | 
1851  |  | int  | 
1852  |  | evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen)  | 
1853  | 0  | { | 
1854  | 0  |   struct evbuffer_chain *chain, *tmp;  | 
1855  | 0  |   int result = -1;  | 
1856  |  | 
  | 
1857  | 0  |   EVBUFFER_LOCK(buf);  | 
1858  |  | 
  | 
1859  | 0  |   if (datlen == 0) { | 
1860  | 0  |     result = 0;  | 
1861  | 0  |     goto done;  | 
1862  | 0  |   }  | 
1863  | 0  |   if (buf->freeze_start) { | 
1864  | 0  |     goto done;  | 
1865  | 0  |   }  | 
1866  | 0  |   if (datlen > EV_SIZE_MAX - buf->total_len) { | 
1867  | 0  |     goto done;  | 
1868  | 0  |   }  | 
1869  |  |  | 
1870  | 0  |   chain = buf->first;  | 
1871  |  | 
  | 
1872  | 0  |   if (chain == NULL) { | 
1873  | 0  |     chain = evbuffer_chain_insert_new(buf, datlen);  | 
1874  | 0  |     if (!chain)  | 
1875  | 0  |       goto done;  | 
1876  | 0  |   }  | 
1877  |  |  | 
1878  |  |   /* we cannot touch immutable buffers */  | 
1879  | 0  |   if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { | 
1880  |  |     /* Always true for mutable buffers */  | 
1881  | 0  |     EVUTIL_ASSERT(chain->misalign >= 0 &&  | 
1882  | 0  |         (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX);  | 
1883  |  |  | 
1884  |  |     /* If this chain is empty, we can treat it as  | 
1885  |  |      * 'empty at the beginning' rather than 'empty at the end' */  | 
1886  | 0  |     if (chain->off == 0)  | 
1887  | 0  |       chain->misalign = chain->buffer_len;  | 
1888  |  | 
  | 
1889  | 0  |     if ((size_t)chain->misalign >= datlen) { | 
1890  |  |       /* we have enough space to fit everything */  | 
1891  | 0  |       memcpy(chain->buffer + chain->misalign - datlen,  | 
1892  | 0  |           data, datlen);  | 
1893  | 0  |       chain->off += datlen;  | 
1894  | 0  |       chain->misalign -= datlen;  | 
1895  | 0  |       buf->total_len += datlen;  | 
1896  | 0  |       buf->n_add_for_cb += datlen;  | 
1897  | 0  |       goto out;  | 
1898  | 0  |     } else if (chain->misalign) { | 
1899  |  |       /* we can only fit some of the data. */  | 
1900  | 0  |       memcpy(chain->buffer,  | 
1901  | 0  |           (char*)data + datlen - chain->misalign,  | 
1902  | 0  |           (size_t)chain->misalign);  | 
1903  | 0  |       chain->off += (size_t)chain->misalign;  | 
1904  | 0  |       buf->total_len += (size_t)chain->misalign;  | 
1905  | 0  |       buf->n_add_for_cb += (size_t)chain->misalign;  | 
1906  | 0  |       datlen -= (size_t)chain->misalign;  | 
1907  | 0  |       chain->misalign = 0;  | 
1908  | 0  |     }  | 
1909  | 0  |   }  | 
1910  |  |  | 
1911  |  |   /* we need to add another chain */  | 
1912  | 0  |   if ((tmp = evbuffer_chain_new_membuf(datlen)) == NULL)  | 
1913  | 0  |     goto done;  | 
1914  | 0  |   buf->first = tmp;  | 
1915  | 0  |   if (buf->last_with_datap == &buf->first && chain->off)  | 
1916  | 0  |     buf->last_with_datap = &tmp->next;  | 
1917  |  | 
  | 
1918  | 0  |   tmp->next = chain;  | 
1919  |  | 
  | 
1920  | 0  |   tmp->off = datlen;  | 
1921  | 0  |   EVUTIL_ASSERT(datlen <= tmp->buffer_len);  | 
1922  | 0  |   tmp->misalign = tmp->buffer_len - datlen;  | 
1923  |  | 
  | 
1924  | 0  |   memcpy(tmp->buffer + tmp->misalign, data, datlen);  | 
1925  | 0  |   buf->total_len += datlen;  | 
1926  | 0  |   buf->n_add_for_cb += datlen;  | 
1927  |  | 
  | 
1928  | 0  | out:  | 
1929  | 0  |   evbuffer_invoke_callbacks_(buf);  | 
1930  | 0  |   result = 0;  | 
1931  | 0  | done:  | 
1932  | 0  |   EVBUFFER_UNLOCK(buf);  | 
1933  | 0  |   return result;  | 
1934  | 0  | }  | 
1935  |  |  | 
1936  |  | /** Helper: realigns the memory in chain->buffer so that misalign is 0. */  | 
1937  |  | static void  | 
1938  |  | evbuffer_chain_align(struct evbuffer_chain *chain)  | 
1939  | 0  | { | 
1940  | 0  |   EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE));  | 
1941  | 0  |   EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY));  | 
1942  | 0  |   memmove(chain->buffer, chain->buffer + chain->misalign, chain->off);  | 
1943  | 0  |   chain->misalign = 0;  | 
1944  | 0  | }  | 
1945  |  |  | 
1946  | 0  | #define MAX_TO_COPY_IN_EXPAND 4096  | 
1947  | 0  | #define MAX_TO_REALIGN_IN_EXPAND 2048  | 
1948  |  |  | 
1949  |  | /** Helper: return true iff we should realign chain to fit datalen bytes of  | 
1950  |  |     data in it. */  | 
1951  |  | static int  | 
1952  |  | evbuffer_chain_should_realign(struct evbuffer_chain *chain,  | 
1953  |  |     size_t datlen)  | 
1954  | 0  | { | 
1955  | 0  |   return chain->buffer_len - chain->off >= datlen &&  | 
1956  | 0  |       (chain->off < chain->buffer_len / 2) &&  | 
1957  | 0  |       (chain->off <= MAX_TO_REALIGN_IN_EXPAND);  | 
1958  | 0  | }  | 
1959  |  |  | 
1960  |  | /* Expands the available space in the event buffer to at least datlen, all in  | 
1961  |  |  * a single chunk.  Return that chunk. */  | 
1962  |  | static struct evbuffer_chain *  | 
1963  |  | evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen)  | 
1964  | 0  | { | 
1965  | 0  |   struct evbuffer_chain *chain, **chainp;  | 
1966  | 0  |   struct evbuffer_chain *result = NULL;  | 
1967  | 0  |   ASSERT_EVBUFFER_LOCKED(buf);  | 
1968  |  | 
  | 
1969  | 0  |   chainp = buf->last_with_datap;  | 
1970  |  |  | 
1971  |  |   /* XXX If *chainp is no longer writeable, but has enough space in its  | 
1972  |  |    * misalign, this might be a bad idea: we could still use *chainp, not  | 
1973  |  |    * (*chainp)->next. */  | 
1974  | 0  |   if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0)  | 
1975  | 0  |     chainp = &(*chainp)->next;  | 
1976  |  |  | 
1977  |  |   /* 'chain' now points to the first chain with writable space (if any)  | 
1978  |  |    * We will either use it, realign it, replace it, or resize it. */  | 
1979  | 0  |   chain = *chainp;  | 
1980  |  | 
  | 
1981  | 0  |   if (chain == NULL ||  | 
1982  | 0  |       (chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) { | 
1983  |  |     /* We can't use the last_with_data chain at all.  Just add a  | 
1984  |  |      * new one that's big enough. */  | 
1985  | 0  |     goto insert_new;  | 
1986  | 0  |   }  | 
1987  |  |  | 
1988  |  |   /* If we can fit all the data, then we don't have to do anything */  | 
1989  | 0  |   if (CHAIN_SPACE_LEN(chain) >= datlen) { | 
1990  | 0  |     result = chain;  | 
1991  | 0  |     goto ok;  | 
1992  | 0  |   }  | 
1993  |  |  | 
1994  |  |   /* If the chain is completely empty, just replace it by adding a new  | 
1995  |  |    * empty chain. */  | 
1996  | 0  |   if (chain->off == 0) { | 
1997  | 0  |     goto insert_new;  | 
1998  | 0  |   }  | 
1999  |  |  | 
2000  |  |   /* If the misalignment plus the remaining space fulfills our data  | 
2001  |  |    * needs, we could just force an alignment to happen.  Afterwards, we  | 
2002  |  |    * have enough space.  But only do this if we're saving a lot of space  | 
2003  |  |    * and not moving too much data.  Otherwise the space savings are  | 
2004  |  |    * probably offset by the time lost in copying.  | 
2005  |  |    */  | 
2006  | 0  |   if (evbuffer_chain_should_realign(chain, datlen)) { | 
2007  | 0  |     evbuffer_chain_align(chain);  | 
2008  | 0  |     result = chain;  | 
2009  | 0  |     goto ok;  | 
2010  | 0  |   }  | 
2011  |  |  | 
2012  |  |   /* At this point, we can either resize the last chunk with space in  | 
2013  |  |    * it, use the next chunk after it, or   If we add a new chunk, we waste  | 
2014  |  |    * CHAIN_SPACE_LEN(chain) bytes in the former last chunk.  If we  | 
2015  |  |    * resize, we have to copy chain->off bytes.  | 
2016  |  |    */  | 
2017  |  |  | 
2018  |  |   /* Would expanding this chunk be affordable and worthwhile? */  | 
2019  | 0  |   if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 ||  | 
2020  | 0  |       chain->off > MAX_TO_COPY_IN_EXPAND ||  | 
2021  | 0  |     datlen >= (EVBUFFER_CHAIN_MAX - chain->off)) { | 
2022  |  |     /* It's not worth resizing this chain. Can the next one be  | 
2023  |  |      * used? */  | 
2024  | 0  |     if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) { | 
2025  |  |       /* Yes, we can just use the next chain (which should  | 
2026  |  |        * be empty. */  | 
2027  | 0  |       result = chain->next;  | 
2028  | 0  |       goto ok;  | 
2029  | 0  |     } else { | 
2030  |  |       /* No; append a new chain (which will free all  | 
2031  |  |        * terminal empty chains.) */  | 
2032  | 0  |       goto insert_new;  | 
2033  | 0  |     }  | 
2034  | 0  |   } else { | 
2035  |  |     /* Okay, we're going to try to resize this chain: Not doing so  | 
2036  |  |      * would waste at least 1/8 of its current allocation, and we  | 
2037  |  |      * can do so without having to copy more than  | 
2038  |  |      * MAX_TO_COPY_IN_EXPAND bytes. */  | 
2039  |  |     /* figure out how much space we need */  | 
2040  | 0  |     size_t length = chain->off + datlen;  | 
2041  | 0  |     struct evbuffer_chain *tmp = evbuffer_chain_new_membuf(length);  | 
2042  | 0  |     if (tmp == NULL)  | 
2043  | 0  |       goto err;  | 
2044  |  |  | 
2045  |  |     /* copy the data over that we had so far */  | 
2046  | 0  |     tmp->off = chain->off;  | 
2047  | 0  |     memcpy(tmp->buffer, chain->buffer + chain->misalign,  | 
2048  | 0  |         chain->off);  | 
2049  |  |     /* fix up the list */  | 
2050  | 0  |     EVUTIL_ASSERT(*chainp == chain);  | 
2051  | 0  |     result = *chainp = tmp;  | 
2052  |  | 
  | 
2053  | 0  |     if (buf->last == chain)  | 
2054  | 0  |       buf->last = tmp;  | 
2055  |  | 
  | 
2056  | 0  |     tmp->next = chain->next;  | 
2057  | 0  |     evbuffer_chain_free(chain);  | 
2058  | 0  |     goto ok;  | 
2059  | 0  |   }  | 
2060  |  |  | 
2061  | 0  | insert_new:  | 
2062  | 0  |   result = evbuffer_chain_insert_new(buf, datlen);  | 
2063  | 0  |   if (!result)  | 
2064  | 0  |     goto err;  | 
2065  | 0  | ok:  | 
2066  | 0  |   EVUTIL_ASSERT(result);  | 
2067  | 0  |   EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen);  | 
2068  | 0  | err:  | 
2069  | 0  |   return result;  | 
2070  | 0  | }  | 
2071  |  |  | 
2072  |  | /* Make sure that datlen bytes are available for writing in the last n  | 
2073  |  |  * chains.  Never copies or moves data. */  | 
2074  |  | int  | 
2075  |  | evbuffer_expand_fast_(struct evbuffer *buf, size_t datlen, int n)  | 
2076  | 0  | { | 
2077  | 0  |   struct evbuffer_chain *chain = buf->last, *tmp, *next;  | 
2078  | 0  |   size_t avail;  | 
2079  | 0  |   int used;  | 
2080  |  | 
  | 
2081  | 0  |   ASSERT_EVBUFFER_LOCKED(buf);  | 
2082  | 0  |   EVUTIL_ASSERT(n >= 2);  | 
2083  |  | 
  | 
2084  | 0  |   if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) { | 
2085  |  |     /* There is no last chunk, or we can't touch the last chunk.  | 
2086  |  |      * Just add a new chunk. */  | 
2087  | 0  |     chain = evbuffer_chain_insert_new(buf, datlen);  | 
2088  | 0  |     if (chain == NULL)  | 
2089  | 0  |       return (-1);  | 
2090  | 0  |     else  | 
2091  | 0  |       return (0);  | 
2092  | 0  |   }  | 
2093  |  |  | 
2094  | 0  |   used = 0; /* number of chains we're using space in. */  | 
2095  | 0  |   avail = 0; /* how much space they have. */  | 
2096  |  |   /* How many bytes can we stick at the end of buffer as it is?  Iterate  | 
2097  |  |    * over the chains at the end of the buffer, tring to see how much  | 
2098  |  |    * space we have in the first n. */  | 
2099  | 0  |   for (chain = *buf->last_with_datap; chain; chain = chain->next) { | 
2100  | 0  |     if (chain->off) { | 
2101  | 0  |       size_t space = (size_t) CHAIN_SPACE_LEN(chain);  | 
2102  | 0  |       EVUTIL_ASSERT(chain == *buf->last_with_datap);  | 
2103  | 0  |       if (space) { | 
2104  | 0  |         avail += space;  | 
2105  | 0  |         ++used;  | 
2106  | 0  |       }  | 
2107  | 0  |     } else { | 
2108  |  |       /* No data in chain; realign it. */  | 
2109  | 0  |       chain->misalign = 0;  | 
2110  | 0  |       avail += chain->buffer_len;  | 
2111  | 0  |       ++used;  | 
2112  | 0  |     }  | 
2113  | 0  |     if (avail >= datlen) { | 
2114  |  |       /* There is already enough space.  Just return */  | 
2115  | 0  |       return (0);  | 
2116  | 0  |     }  | 
2117  | 0  |     if (used == n)  | 
2118  | 0  |       break;  | 
2119  | 0  |   }  | 
2120  |  |  | 
2121  |  |   /* There wasn't enough space in the first n chains with space in  | 
2122  |  |    * them. Either add a new chain with enough space, or replace all  | 
2123  |  |    * empty chains with one that has enough space, depending on n. */  | 
2124  | 0  |   if (used < n) { | 
2125  |  |     /* The loop ran off the end of the chains before it hit n  | 
2126  |  |      * chains; we can add another. */  | 
2127  | 0  |     EVUTIL_ASSERT(chain == NULL);  | 
2128  |  | 
  | 
2129  | 0  |     tmp = evbuffer_chain_new_membuf(datlen - avail);  | 
2130  | 0  |     if (tmp == NULL)  | 
2131  | 0  |       return (-1);  | 
2132  |  |  | 
2133  | 0  |     buf->last->next = tmp;  | 
2134  | 0  |     buf->last = tmp;  | 
2135  |  |     /* (we would only set last_with_data if we added the first  | 
2136  |  |      * chain. But if the buffer had no chains, we would have  | 
2137  |  |      * just allocated a new chain earlier) */  | 
2138  | 0  |     return (0);  | 
2139  | 0  |   } else { | 
2140  |  |     /* Nuke _all_ the empty chains. */  | 
2141  | 0  |     int rmv_all = 0; /* True iff we removed last_with_data. */  | 
2142  | 0  |     chain = *buf->last_with_datap;  | 
2143  | 0  |     if (!chain->off) { | 
2144  | 0  |       EVUTIL_ASSERT(chain == buf->first);  | 
2145  | 0  |       rmv_all = 1;  | 
2146  | 0  |       avail = 0;  | 
2147  | 0  |     } else { | 
2148  |  |       /* can't overflow, since only mutable chains have  | 
2149  |  |        * huge misaligns. */  | 
2150  | 0  |       avail = (size_t) CHAIN_SPACE_LEN(chain);  | 
2151  | 0  |       chain = chain->next;  | 
2152  | 0  |     }  | 
2153  |  |  | 
2154  |  | 
  | 
2155  | 0  |     for (; chain; chain = next) { | 
2156  | 0  |       next = chain->next;  | 
2157  | 0  |       EVUTIL_ASSERT(chain->off == 0);  | 
2158  | 0  |       evbuffer_chain_free(chain);  | 
2159  | 0  |     }  | 
2160  | 0  |     EVUTIL_ASSERT(datlen >= avail);  | 
2161  | 0  |     tmp = evbuffer_chain_new_membuf(datlen - avail);  | 
2162  | 0  |     if (tmp == NULL) { | 
2163  | 0  |       if (rmv_all) { | 
2164  | 0  |         ZERO_CHAIN(buf);  | 
2165  | 0  |       } else { | 
2166  | 0  |         buf->last = *buf->last_with_datap;  | 
2167  | 0  |         (*buf->last_with_datap)->next = NULL;  | 
2168  | 0  |       }  | 
2169  | 0  |       return (-1);  | 
2170  | 0  |     }  | 
2171  |  |  | 
2172  | 0  |     if (rmv_all) { | 
2173  | 0  |       buf->first = buf->last = tmp;  | 
2174  | 0  |       buf->last_with_datap = &buf->first;  | 
2175  | 0  |     } else { | 
2176  | 0  |       (*buf->last_with_datap)->next = tmp;  | 
2177  | 0  |       buf->last = tmp;  | 
2178  | 0  |     }  | 
2179  | 0  |     return (0);  | 
2180  | 0  |   }  | 
2181  | 0  | }  | 
2182  |  |  | 
2183  |  | int  | 
2184  |  | evbuffer_expand(struct evbuffer *buf, size_t datlen)  | 
2185  | 0  | { | 
2186  | 0  |   struct evbuffer_chain *chain;  | 
2187  |  | 
  | 
2188  | 0  |   EVBUFFER_LOCK(buf);  | 
2189  | 0  |   chain = evbuffer_expand_singlechain(buf, datlen);  | 
2190  | 0  |   EVBUFFER_UNLOCK(buf);  | 
2191  | 0  |   return chain ? 0 : -1;  | 
2192  | 0  | }  | 
2193  |  |  | 
2194  |  | /*  | 
2195  |  |  * Reads data from a file descriptor into a buffer.  | 
2196  |  |  */  | 
2197  |  |  | 
2198  |  | #if defined(EVENT__HAVE_SYS_UIO_H) || defined(_WIN32)  | 
2199  |  | #define USE_IOVEC_IMPL  | 
2200  |  | #endif  | 
2201  |  |  | 
2202  |  | #ifdef USE_IOVEC_IMPL  | 
2203  |  |  | 
2204  |  | #ifdef EVENT__HAVE_SYS_UIO_H  | 
2205  |  | /* number of iovec we use for writev, fragmentation is going to determine  | 
2206  |  |  * how much we end up writing */  | 
2207  |  |  | 
2208  | 0  | #define DEFAULT_WRITE_IOVEC 128  | 
2209  |  |  | 
2210  |  | #if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC  | 
2211  |  | #define NUM_WRITE_IOVEC UIO_MAXIOV  | 
2212  |  | #elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC  | 
2213  |  | #define NUM_WRITE_IOVEC IOV_MAX  | 
2214  |  | #else  | 
2215  | 0  | #define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC  | 
2216  |  | #endif  | 
2217  |  |  | 
2218  | 0  | #define IOV_TYPE struct iovec  | 
2219  | 0  | #define IOV_PTR_FIELD iov_base  | 
2220  | 0  | #define IOV_LEN_FIELD iov_len  | 
2221  |  | #define IOV_LEN_TYPE size_t  | 
2222  |  | #else  | 
2223  |  | #define NUM_WRITE_IOVEC 16  | 
2224  |  | #define IOV_TYPE WSABUF  | 
2225  |  | #define IOV_PTR_FIELD buf  | 
2226  |  | #define IOV_LEN_FIELD len  | 
2227  |  | #define IOV_LEN_TYPE unsigned long  | 
2228  |  | #endif  | 
2229  |  | #endif  | 
2230  | 0  | #define NUM_READ_IOVEC 4  | 
2231  |  |  | 
2232  |  | /** Helper function to figure out which space to use for reading data into  | 
2233  |  |     an evbuffer.  Internal use only.  | 
2234  |  |  | 
2235  |  |     @param buf The buffer to read into  | 
2236  |  |     @param howmuch How much we want to read.  | 
2237  |  |     @param vecs An array of two or more iovecs or WSABUFs.  | 
2238  |  |     @param n_vecs_avail The length of vecs  | 
2239  |  |     @param chainp A pointer to a variable to hold the first chain we're  | 
2240  |  |       reading into.  | 
2241  |  |     @param exact Boolean: if true, we do not provide more than 'howmuch'  | 
2242  |  |       space in the vectors, even if more space is available.  | 
2243  |  |     @return The number of buffers we're using.  | 
2244  |  |  */  | 
2245  |  | int  | 
2246  |  | evbuffer_read_setup_vecs_(struct evbuffer *buf, ev_ssize_t howmuch,  | 
2247  |  |     struct evbuffer_iovec *vecs, int n_vecs_avail,  | 
2248  |  |     struct evbuffer_chain ***chainp, int exact)  | 
2249  | 0  | { | 
2250  | 0  |   struct evbuffer_chain *chain;  | 
2251  | 0  |   struct evbuffer_chain **firstchainp;  | 
2252  | 0  |   size_t so_far;  | 
2253  | 0  |   int i;  | 
2254  | 0  |   ASSERT_EVBUFFER_LOCKED(buf);  | 
2255  |  | 
  | 
2256  | 0  |   if (howmuch < 0)  | 
2257  | 0  |     return -1;  | 
2258  |  |  | 
2259  | 0  |   so_far = 0;  | 
2260  |  |   /* Let firstchain be the first chain with any space on it */  | 
2261  | 0  |   firstchainp = buf->last_with_datap;  | 
2262  | 0  |   EVUTIL_ASSERT(*firstchainp);  | 
2263  | 0  |   if (CHAIN_SPACE_LEN(*firstchainp) == 0) { | 
2264  | 0  |     firstchainp = &(*firstchainp)->next;  | 
2265  | 0  |   }  | 
2266  |  | 
  | 
2267  | 0  |   chain = *firstchainp;  | 
2268  | 0  |   EVUTIL_ASSERT(chain);  | 
2269  | 0  |   for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) { | 
2270  | 0  |     size_t avail = (size_t) CHAIN_SPACE_LEN(chain);  | 
2271  | 0  |     if (avail > (howmuch - so_far) && exact)  | 
2272  | 0  |       avail = howmuch - so_far;  | 
2273  | 0  |     vecs[i].iov_base = (void *)CHAIN_SPACE_PTR(chain);  | 
2274  | 0  |     vecs[i].iov_len = avail;  | 
2275  | 0  |     so_far += avail;  | 
2276  | 0  |     chain = chain->next;  | 
2277  | 0  |   }  | 
2278  |  | 
  | 
2279  | 0  |   *chainp = firstchainp;  | 
2280  | 0  |   return i;  | 
2281  | 0  | }  | 
2282  |  |  | 
2283  |  | static int  | 
2284  |  | get_n_bytes_readable_on_socket(evutil_socket_t fd)  | 
2285  | 0  | { | 
2286  |  | #if defined(FIONREAD) && defined(_WIN32)  | 
2287  |  |   unsigned long lng = EVBUFFER_MAX_READ_DEFAULT;  | 
2288  |  |   if (ioctlsocket(fd, FIONREAD, &lng) < 0)  | 
2289  |  |     return -1;  | 
2290  |  |   /* Can overflow, but mostly harmlessly. XXXX */  | 
2291  |  |   return (int)lng;  | 
2292  |  | #elif defined(FIONREAD)  | 
2293  | 0  |   int n = EVBUFFER_MAX_READ_DEFAULT;  | 
2294  | 0  |   if (ioctl(fd, FIONREAD, &n) < 0)  | 
2295  | 0  |     return -1;  | 
2296  | 0  |   return n;  | 
2297  |  | #else  | 
2298  |  |   return EVBUFFER_MAX_READ_DEFAULT;  | 
2299  |  | #endif  | 
2300  | 0  | }  | 
2301  |  |  | 
2302  |  | /* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t  | 
2303  |  |  * as howmuch? */  | 
2304  |  | int  | 
2305  |  | evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch)  | 
2306  | 0  | { | 
2307  | 0  |   int n;  | 
2308  | 0  |   int result;  | 
2309  |  | 
  | 
2310  | 0  | #ifdef USE_IOVEC_IMPL  | 
2311  | 0  |   struct evbuffer_chain **chainp;  | 
2312  | 0  |   int nvecs, i, remaining;  | 
2313  |  | #else  | 
2314  |  |   struct evbuffer_chain *chain;  | 
2315  |  |   unsigned char *p;  | 
2316  |  | #endif  | 
2317  |  | 
  | 
2318  | 0  |   EVBUFFER_LOCK(buf);  | 
2319  |  | 
  | 
2320  | 0  |   if (buf->freeze_end) { | 
2321  | 0  |     result = -1;  | 
2322  | 0  |     goto done;  | 
2323  | 0  |   }  | 
2324  |  |  | 
2325  | 0  |   n = get_n_bytes_readable_on_socket(fd);  | 
2326  | 0  |   if (n <= 0 || n > (int)buf->max_read)  | 
2327  | 0  |     n = (int)buf->max_read;  | 
2328  | 0  |   if (howmuch < 0 || howmuch > n)  | 
2329  | 0  |     howmuch = n;  | 
2330  |  | 
  | 
2331  | 0  | #ifdef USE_IOVEC_IMPL  | 
2332  |  |   /* Since we can use iovecs, we're willing to use the last  | 
2333  |  |    * NUM_READ_IOVEC chains. */  | 
2334  | 0  |   if (evbuffer_expand_fast_(buf, howmuch, NUM_READ_IOVEC) == -1) { | 
2335  | 0  |     result = -1;  | 
2336  | 0  |     goto done;  | 
2337  | 0  |   } else { | 
2338  | 0  |     IOV_TYPE vecs[NUM_READ_IOVEC];  | 
2339  | 0  | #ifdef EVBUFFER_IOVEC_IS_NATIVE_  | 
2340  | 0  |     nvecs = evbuffer_read_setup_vecs_(buf, howmuch, vecs,  | 
2341  | 0  |         NUM_READ_IOVEC, &chainp, 1);  | 
2342  |  | #else  | 
2343  |  |     /* We aren't using the native struct iovec.  Therefore,  | 
2344  |  |        we are on win32. */  | 
2345  |  |     struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC];  | 
2346  |  |     nvecs = evbuffer_read_setup_vecs_(buf, howmuch, ev_vecs, 2,  | 
2347  |  |         &chainp, 1);  | 
2348  |  |  | 
2349  |  |     for (i=0; i < nvecs; ++i)  | 
2350  |  |       WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]);  | 
2351  |  | #endif  | 
2352  |  | 
  | 
2353  |  | #ifdef _WIN32  | 
2354  |  |     { | 
2355  |  |       DWORD bytesRead;  | 
2356  |  |       DWORD flags=0;  | 
2357  |  |       if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) { | 
2358  |  |         /* The read failed. It might be a close,  | 
2359  |  |          * or it might be an error. */  | 
2360  |  |         if (WSAGetLastError() == WSAECONNABORTED)  | 
2361  |  |           n = 0;  | 
2362  |  |         else  | 
2363  |  |           n = -1;  | 
2364  |  |       } else  | 
2365  |  |         n = bytesRead;  | 
2366  |  |     }  | 
2367  |  | #else  | 
2368  |  |     /* TODO(panjf2000): wrap it with `unlikely` as compiler hint? */  | 
2369  | 0  |     if (nvecs == 1)  | 
2370  | 0  |       n = read(fd, vecs[0].IOV_PTR_FIELD, vecs[0].IOV_LEN_FIELD);  | 
2371  | 0  |     else  | 
2372  | 0  |       n = readv(fd, vecs, nvecs);  | 
2373  | 0  | #endif  | 
2374  | 0  |   }  | 
2375  |  |  | 
2376  |  | #else /* !USE_IOVEC_IMPL */  | 
2377  |  |   /* If we don't have FIONREAD, we might waste some space here */  | 
2378  |  |   /* XXX we _will_ waste some space here if there is any space left  | 
2379  |  |    * over on buf->last. */  | 
2380  |  |   if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) { | 
2381  |  |     result = -1;  | 
2382  |  |     goto done;  | 
2383  |  |   }  | 
2384  |  |  | 
2385  |  |   /* We can append new data at this point */  | 
2386  |  |   p = chain->buffer + chain->misalign + chain->off;  | 
2387  |  |  | 
2388  |  | #ifndef _WIN32  | 
2389  |  |   n = read(fd, p, howmuch);  | 
2390  |  | #else  | 
2391  |  |   n = recv(fd, p, howmuch, 0);  | 
2392  |  | #endif  | 
2393  |  | #endif /* USE_IOVEC_IMPL */  | 
2394  |  |  | 
2395  | 0  |   if (n == -1) { | 
2396  | 0  |     result = -1;  | 
2397  | 0  |     goto done;  | 
2398  | 0  |   }  | 
2399  | 0  |   if (n == 0) { | 
2400  | 0  |     result = 0;  | 
2401  | 0  |     goto done;  | 
2402  | 0  |   }  | 
2403  |  |  | 
2404  | 0  | #ifdef USE_IOVEC_IMPL  | 
2405  | 0  |   remaining = n;  | 
2406  | 0  |   for (i=0; i < nvecs; ++i) { | 
2407  |  |     /* can't overflow, since only mutable chains have  | 
2408  |  |      * huge misaligns. */  | 
2409  | 0  |     size_t space = (size_t) CHAIN_SPACE_LEN(*chainp);  | 
2410  |  |     /* XXXX This is a kludge that can waste space in perverse  | 
2411  |  |      * situations. */  | 
2412  | 0  |     if (space > EVBUFFER_CHAIN_MAX)  | 
2413  | 0  |       space = EVBUFFER_CHAIN_MAX;  | 
2414  | 0  |     if ((ev_ssize_t)space < remaining) { | 
2415  | 0  |       (*chainp)->off += space;  | 
2416  | 0  |       remaining -= (int)space;  | 
2417  | 0  |     } else { | 
2418  | 0  |       (*chainp)->off += remaining;  | 
2419  | 0  |       buf->last_with_datap = chainp;  | 
2420  | 0  |       break;  | 
2421  | 0  |     }  | 
2422  | 0  |     chainp = &(*chainp)->next;  | 
2423  | 0  |   }  | 
2424  |  | #else  | 
2425  |  |   chain->off += n;  | 
2426  |  |   advance_last_with_data(buf);  | 
2427  |  | #endif  | 
2428  | 0  |   buf->total_len += n;  | 
2429  | 0  |   buf->n_add_for_cb += n;  | 
2430  |  |  | 
2431  |  |   /* Tell someone about changes in this buffer */  | 
2432  | 0  |   evbuffer_invoke_callbacks_(buf);  | 
2433  | 0  |   result = n;  | 
2434  | 0  | done:  | 
2435  | 0  |   EVBUFFER_UNLOCK(buf);  | 
2436  | 0  |   return result;  | 
2437  | 0  | }  | 
2438  |  |  | 
2439  |  | #ifdef USE_IOVEC_IMPL  | 
2440  |  | static inline int  | 
2441  |  | evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd,  | 
2442  |  |     ev_ssize_t howmuch)  | 
2443  | 0  | { | 
2444  | 0  |   IOV_TYPE iov[NUM_WRITE_IOVEC];  | 
2445  | 0  |   struct evbuffer_chain *chain = buffer->first;  | 
2446  | 0  |   int n, i = 0;  | 
2447  |  | 
  | 
2448  | 0  |   if (howmuch < 0)  | 
2449  | 0  |     return -1;  | 
2450  |  |  | 
2451  | 0  |   ASSERT_EVBUFFER_LOCKED(buffer);  | 
2452  |  |   /* XXX make this top out at some maximal data length?  if the  | 
2453  |  |    * buffer has (say) 1MB in it, split over 128 chains, there's  | 
2454  |  |    * no way it all gets written in one go. */  | 
2455  | 0  |   while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) { | 
2456  | 0  | #ifdef USE_SENDFILE  | 
2457  |  |     /* we cannot write the file info via writev */  | 
2458  | 0  |     if (chain->flags & EVBUFFER_SENDFILE)  | 
2459  | 0  |       break;  | 
2460  | 0  | #endif  | 
2461  | 0  |     iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign);  | 
2462  | 0  |     if ((size_t)howmuch >= chain->off) { | 
2463  |  |       /* XXXcould be problematic when windows supports mmap*/  | 
2464  | 0  |       iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off;  | 
2465  | 0  |       howmuch -= chain->off;  | 
2466  | 0  |     } else { | 
2467  |  |       /* XXXcould be problematic when windows supports mmap*/  | 
2468  | 0  |       iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch;  | 
2469  | 0  |       break;  | 
2470  | 0  |     }  | 
2471  | 0  |     chain = chain->next;  | 
2472  | 0  |   }  | 
2473  | 0  |   if (! i)  | 
2474  | 0  |     return 0;  | 
2475  |  |  | 
2476  |  | #ifdef _WIN32  | 
2477  |  |   { | 
2478  |  |     DWORD bytesSent;  | 
2479  |  |     if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL))  | 
2480  |  |       n = -1;  | 
2481  |  |     else  | 
2482  |  |       n = bytesSent;  | 
2483  |  |   }  | 
2484  |  | #else  | 
2485  |  |   /* TODO(panjf2000): wrap it with `unlikely` as compiler hint? */  | 
2486  | 0  |   if (i == 1)  | 
2487  | 0  |     n = write(fd, iov[0].IOV_PTR_FIELD, iov[0].IOV_LEN_FIELD);  | 
2488  | 0  |   else  | 
2489  | 0  |     n = writev(fd, iov, i);  | 
2490  | 0  | #endif  | 
2491  | 0  |   return (n);  | 
2492  | 0  | }  | 
2493  |  | #endif  | 
2494  |  |  | 
2495  |  | #ifdef USE_SENDFILE  | 
2496  |  | static inline int  | 
2497  |  | evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t dest_fd,  | 
2498  |  |     ev_ssize_t howmuch)  | 
2499  | 0  | { | 
2500  | 0  |   struct evbuffer_chain *chain = buffer->first;  | 
2501  | 0  |   struct evbuffer_chain_file_segment *info =  | 
2502  | 0  |       EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment,  | 
2503  | 0  |     chain);  | 
2504  | 0  |   const int source_fd = info->segment->fd;  | 
2505  |  | #if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD)  | 
2506  |  |   int res;  | 
2507  |  |   ev_off_t len = chain->off;  | 
2508  |  | #elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS)  | 
2509  | 0  |   ev_ssize_t res;  | 
2510  | 0  |   off_t offset = chain->misalign;  | 
2511  | 0  | #endif  | 
2512  |  | 
  | 
2513  | 0  |   ASSERT_EVBUFFER_LOCKED(buffer);  | 
2514  |  | 
  | 
2515  |  | #if defined(SENDFILE_IS_MACOSX)  | 
2516  |  |   res = sendfile(source_fd, dest_fd, chain->misalign, &len, NULL, 0);  | 
2517  |  |   if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))  | 
2518  |  |     return (-1);  | 
2519  |  |  | 
2520  |  |   return (len);  | 
2521  |  | #elif defined(SENDFILE_IS_FREEBSD)  | 
2522  |  |   res = sendfile(source_fd, dest_fd, chain->misalign, chain->off, NULL, &len, 0);  | 
2523  |  |   if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))  | 
2524  |  |     return (-1);  | 
2525  |  |  | 
2526  |  |   return (len);  | 
2527  |  | #elif defined(SENDFILE_IS_LINUX)  | 
2528  |  |   res = sendfile(dest_fd, source_fd, &offset, chain->off);  | 
2529  | 0  |   if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { | 
2530  |  |     /* if this is EAGAIN or EINTR return 0; otherwise, -1 */  | 
2531  | 0  |     return (0);  | 
2532  | 0  |   }  | 
2533  | 0  |   return (res);  | 
2534  |  | #elif defined(SENDFILE_IS_SOLARIS)  | 
2535  |  |   { | 
2536  |  |     const off_t offset_orig = offset;  | 
2537  |  |     res = sendfile(dest_fd, source_fd, &offset, chain->off);  | 
2538  |  |     if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { | 
2539  |  |       if (offset - offset_orig)  | 
2540  |  |         return offset - offset_orig;  | 
2541  |  |       /* if this is EAGAIN or EINTR and no bytes were  | 
2542  |  |        * written, return 0 */  | 
2543  |  |       return (0);  | 
2544  |  |     }  | 
2545  |  |     return (res);  | 
2546  |  |   }  | 
2547  |  | #endif  | 
2548  | 0  | }  | 
2549  |  | #endif  | 
2550  |  |  | 
2551  |  | int  | 
2552  |  | evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd,  | 
2553  |  |     ev_ssize_t howmuch)  | 
2554  | 0  | { | 
2555  | 0  |   int n = -1;  | 
2556  |  | 
  | 
2557  | 0  |   EVBUFFER_LOCK(buffer);  | 
2558  |  | 
  | 
2559  | 0  |   if (buffer->freeze_start) { | 
2560  | 0  |     goto done;  | 
2561  | 0  |   }  | 
2562  |  |  | 
2563  | 0  |   if (howmuch < 0 || (size_t)howmuch > buffer->total_len)  | 
2564  | 0  |     howmuch = buffer->total_len;  | 
2565  |  | 
  | 
2566  | 0  |   if (howmuch > 0) { | 
2567  | 0  | #ifdef USE_SENDFILE  | 
2568  | 0  |     struct evbuffer_chain *chain = buffer->first;  | 
2569  | 0  |     if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE))  | 
2570  | 0  |       n = evbuffer_write_sendfile(buffer, fd, howmuch);  | 
2571  | 0  |     else { | 
2572  | 0  | #endif  | 
2573  | 0  | #ifdef USE_IOVEC_IMPL  | 
2574  | 0  |     n = evbuffer_write_iovec(buffer, fd, howmuch);  | 
2575  |  | #elif defined(_WIN32)  | 
2576  |  |     /* XXX(nickm) Don't disable this code until we know if  | 
2577  |  |      * the WSARecv code above works. */  | 
2578  |  |     void *p = evbuffer_pullup(buffer, howmuch);  | 
2579  |  |     EVUTIL_ASSERT(p || !howmuch);  | 
2580  |  |     n = send(fd, p, howmuch, 0);  | 
2581  |  | #else  | 
2582  |  |     void *p = evbuffer_pullup(buffer, howmuch);  | 
2583  |  |     EVUTIL_ASSERT(p || !howmuch);  | 
2584  |  |     n = write(fd, p, howmuch);  | 
2585  |  | #endif  | 
2586  | 0  | #ifdef USE_SENDFILE  | 
2587  | 0  |     }  | 
2588  | 0  | #endif  | 
2589  | 0  |   }  | 
2590  |  | 
  | 
2591  | 0  |   if (n > 0)  | 
2592  | 0  |     evbuffer_drain(buffer, n);  | 
2593  |  | 
  | 
2594  | 0  | done:  | 
2595  | 0  |   EVBUFFER_UNLOCK(buffer);  | 
2596  | 0  |   return (n);  | 
2597  | 0  | }  | 
2598  |  |  | 
2599  |  | int  | 
2600  |  | evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd)  | 
2601  | 0  | { | 
2602  | 0  |   return evbuffer_write_atmost(buffer, fd, -1);  | 
2603  | 0  | }  | 
2604  |  |  | 
2605  |  | unsigned char *  | 
2606  |  | evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len)  | 
2607  | 0  | { | 
2608  | 0  |   unsigned char *search;  | 
2609  | 0  |   struct evbuffer_ptr ptr;  | 
2610  |  | 
  | 
2611  | 0  |   EVBUFFER_LOCK(buffer);  | 
2612  |  | 
  | 
2613  | 0  |   ptr = evbuffer_search(buffer, (const char *)what, len, NULL);  | 
2614  | 0  |   if (ptr.pos < 0) { | 
2615  | 0  |     search = NULL;  | 
2616  | 0  |   } else { | 
2617  | 0  |     search = evbuffer_pullup(buffer, ptr.pos + len);  | 
2618  | 0  |     if (search)  | 
2619  | 0  |       search += ptr.pos;  | 
2620  | 0  |   }  | 
2621  | 0  |   EVBUFFER_UNLOCK(buffer);  | 
2622  | 0  |   return search;  | 
2623  | 0  | }  | 
2624  |  |  | 
2625  |  | /* Subract <b>howfar</b> from the position of <b>pos</b> within  | 
2626  |  |  * <b>buf</b>. Returns 0 on success, -1 on failure.  | 
2627  |  |  *  | 
2628  |  |  * This isn't exposed yet, because of potential inefficiency issues.  | 
2629  |  |  * Maybe it should be. */  | 
2630  |  | static int  | 
2631  |  | evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos,  | 
2632  |  |     size_t howfar)  | 
2633  | 0  | { | 
2634  | 0  |   if (pos->pos < 0)  | 
2635  | 0  |     return -1;  | 
2636  | 0  |   if (howfar > (size_t)pos->pos)  | 
2637  | 0  |     return -1;  | 
2638  | 0  |   if (pos->internal_.chain && howfar <= pos->internal_.pos_in_chain) { | 
2639  | 0  |     pos->internal_.pos_in_chain -= howfar;  | 
2640  | 0  |     pos->pos -= howfar;  | 
2641  | 0  |     return 0;  | 
2642  | 0  |   } else { | 
2643  | 0  |     const size_t newpos = pos->pos - howfar;  | 
2644  |  |     /* Here's the inefficient part: it walks over the  | 
2645  |  |      * chains until we hit newpos. */  | 
2646  | 0  |     return evbuffer_ptr_set(buf, pos, newpos, EVBUFFER_PTR_SET);  | 
2647  | 0  |   }  | 
2648  | 0  | }  | 
2649  |  |  | 
2650  |  | int  | 
2651  |  | evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos,  | 
2652  |  |     size_t position, enum evbuffer_ptr_how how)  | 
2653  | 0  | { | 
2654  | 0  |   size_t left = position;  | 
2655  | 0  |   struct evbuffer_chain *chain = NULL;  | 
2656  | 0  |   int result = 0;  | 
2657  |  | 
  | 
2658  | 0  |   EVBUFFER_LOCK(buf);  | 
2659  |  | 
  | 
2660  | 0  |   switch (how) { | 
2661  | 0  |   case EVBUFFER_PTR_SET:  | 
2662  | 0  |     chain = buf->first;  | 
2663  | 0  |     pos->pos = position;  | 
2664  | 0  |     position = 0;  | 
2665  | 0  |     break;  | 
2666  | 0  |   case EVBUFFER_PTR_ADD:  | 
2667  |  |     /* this avoids iterating over all previous chains if  | 
2668  |  |        we just want to advance the position */  | 
2669  | 0  |     if (pos->pos < 0 || EV_SIZE_MAX - position < (size_t)pos->pos) { | 
2670  | 0  |       EVBUFFER_UNLOCK(buf);  | 
2671  | 0  |       return -1;  | 
2672  | 0  |     }  | 
2673  | 0  |     chain = pos->internal_.chain;  | 
2674  | 0  |     pos->pos += position;  | 
2675  | 0  |     position = pos->internal_.pos_in_chain;  | 
2676  | 0  |     break;  | 
2677  | 0  |   }  | 
2678  |  |  | 
2679  | 0  |   EVUTIL_ASSERT(EV_SIZE_MAX - left >= position);  | 
2680  | 0  |   while (chain && position + left >= chain->off) { | 
2681  | 0  |     left -= chain->off - position;  | 
2682  | 0  |     chain = chain->next;  | 
2683  | 0  |     position = 0;  | 
2684  | 0  |   }  | 
2685  | 0  |   if (chain) { | 
2686  | 0  |     pos->internal_.chain = chain;  | 
2687  | 0  |     pos->internal_.pos_in_chain = position + left;  | 
2688  | 0  |   } else if (left == 0) { | 
2689  |  |     /* The first byte in the (nonexistent) chain after the last chain */  | 
2690  | 0  |     pos->internal_.chain = NULL;  | 
2691  | 0  |     pos->internal_.pos_in_chain = 0;  | 
2692  | 0  |   } else { | 
2693  | 0  |     PTR_NOT_FOUND(pos);  | 
2694  | 0  |     result = -1;  | 
2695  | 0  |   }  | 
2696  |  | 
  | 
2697  | 0  |   EVBUFFER_UNLOCK(buf);  | 
2698  |  | 
  | 
2699  | 0  |   return result;  | 
2700  | 0  | }  | 
2701  |  |  | 
2702  |  | /**  | 
2703  |  |    Compare the bytes in buf at position pos to the len bytes in mem.  Return  | 
2704  |  |    less than 0, 0, or greater than 0 as memcmp.  | 
2705  |  |  */  | 
2706  |  | static int  | 
2707  |  | evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos,  | 
2708  |  |     const char *mem, size_t len)  | 
2709  | 0  | { | 
2710  | 0  |   struct evbuffer_chain *chain;  | 
2711  | 0  |   size_t position;  | 
2712  | 0  |   int r;  | 
2713  |  | 
  | 
2714  | 0  |   ASSERT_EVBUFFER_LOCKED(buf);  | 
2715  |  | 
  | 
2716  | 0  |   if (pos->pos < 0 ||  | 
2717  | 0  |       EV_SIZE_MAX - len < (size_t)pos->pos ||  | 
2718  | 0  |       pos->pos + len > buf->total_len)  | 
2719  | 0  |     return -1;  | 
2720  |  |  | 
2721  | 0  |   chain = pos->internal_.chain;  | 
2722  | 0  |   position = pos->internal_.pos_in_chain;  | 
2723  | 0  |   while (len && chain) { | 
2724  | 0  |     size_t n_comparable;  | 
2725  | 0  |     if (len + position > chain->off)  | 
2726  | 0  |       n_comparable = chain->off - position;  | 
2727  | 0  |     else  | 
2728  | 0  |       n_comparable = len;  | 
2729  | 0  |     r = memcmp(chain->buffer + chain->misalign + position, mem,  | 
2730  | 0  |         n_comparable);  | 
2731  | 0  |     if (r)  | 
2732  | 0  |       return r;  | 
2733  | 0  |     mem += n_comparable;  | 
2734  | 0  |     len -= n_comparable;  | 
2735  | 0  |     position = 0;  | 
2736  | 0  |     chain = chain->next;  | 
2737  | 0  |   }  | 
2738  |  |  | 
2739  | 0  |   return 0;  | 
2740  | 0  | }  | 
2741  |  |  | 
2742  |  | struct evbuffer_ptr  | 
2743  |  | evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start)  | 
2744  | 0  | { | 
2745  | 0  |   return evbuffer_search_range(buffer, what, len, start, NULL);  | 
2746  | 0  | }  | 
2747  |  |  | 
2748  |  | struct evbuffer_ptr  | 
2749  |  | evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end)  | 
2750  | 0  | { | 
2751  | 0  |   struct evbuffer_ptr pos;  | 
2752  | 0  |   struct evbuffer_chain *chain, *last_chain = NULL;  | 
2753  | 0  |   const unsigned char *p;  | 
2754  | 0  |   char first;  | 
2755  |  | 
  | 
2756  | 0  |   EVBUFFER_LOCK(buffer);  | 
2757  |  | 
  | 
2758  | 0  |   if (start) { | 
2759  | 0  |     memcpy(&pos, start, sizeof(pos));  | 
2760  | 0  |     chain = pos.internal_.chain;  | 
2761  | 0  |   } else { | 
2762  | 0  |     pos.pos = 0;  | 
2763  | 0  |     chain = pos.internal_.chain = buffer->first;  | 
2764  | 0  |     pos.internal_.pos_in_chain = 0;  | 
2765  | 0  |   }  | 
2766  |  | 
  | 
2767  | 0  |   if (end)  | 
2768  | 0  |     last_chain = end->internal_.chain;  | 
2769  |  | 
  | 
2770  | 0  |   if (!len || len > EV_SSIZE_MAX)  | 
2771  | 0  |     goto done;  | 
2772  |  |  | 
2773  | 0  |   first = what[0];  | 
2774  |  | 
  | 
2775  | 0  |   while (chain) { | 
2776  | 0  |     const unsigned char *start_at =  | 
2777  | 0  |         chain->buffer + chain->misalign +  | 
2778  | 0  |         pos.internal_.pos_in_chain;  | 
2779  | 0  |     p = memchr(start_at, first,  | 
2780  | 0  |         chain->off - pos.internal_.pos_in_chain);  | 
2781  | 0  |     if (p) { | 
2782  | 0  |       pos.pos += p - start_at;  | 
2783  | 0  |       pos.internal_.pos_in_chain += p - start_at;  | 
2784  | 0  |       if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) { | 
2785  | 0  |         if (end && pos.pos + (ev_ssize_t)len > end->pos)  | 
2786  | 0  |           goto not_found;  | 
2787  | 0  |         else  | 
2788  | 0  |           goto done;  | 
2789  | 0  |       }  | 
2790  | 0  |       ++pos.pos;  | 
2791  | 0  |       ++pos.internal_.pos_in_chain;  | 
2792  | 0  |       if (pos.internal_.pos_in_chain == chain->off) { | 
2793  | 0  |         chain = pos.internal_.chain = chain->next;  | 
2794  | 0  |         pos.internal_.pos_in_chain = 0;  | 
2795  | 0  |       }  | 
2796  | 0  |     } else { | 
2797  | 0  |       if (chain == last_chain)  | 
2798  | 0  |         goto not_found;  | 
2799  | 0  |       pos.pos += chain->off - pos.internal_.pos_in_chain;  | 
2800  | 0  |       chain = pos.internal_.chain = chain->next;  | 
2801  | 0  |       pos.internal_.pos_in_chain = 0;  | 
2802  | 0  |     }  | 
2803  | 0  |   }  | 
2804  |  |  | 
2805  | 0  | not_found:  | 
2806  | 0  |   PTR_NOT_FOUND(&pos);  | 
2807  | 0  | done:  | 
2808  | 0  |   EVBUFFER_UNLOCK(buffer);  | 
2809  | 0  |   return pos;  | 
2810  | 0  | }  | 
2811  |  |  | 
2812  |  | int  | 
2813  |  | evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len,  | 
2814  |  |     struct evbuffer_ptr *start_at,  | 
2815  |  |     struct evbuffer_iovec *vec, int n_vec)  | 
2816  | 0  | { | 
2817  | 0  |   struct evbuffer_chain *chain;  | 
2818  | 0  |   int idx = 0;  | 
2819  | 0  |   ev_ssize_t len_so_far = 0;  | 
2820  |  |  | 
2821  |  |   /* Avoid locking in trivial edge cases */  | 
2822  | 0  |   if (start_at && start_at->internal_.chain == NULL)  | 
2823  | 0  |     return 0;  | 
2824  |  |  | 
2825  | 0  |   EVBUFFER_LOCK(buffer);  | 
2826  |  | 
  | 
2827  | 0  |   if (start_at) { | 
2828  | 0  |     chain = start_at->internal_.chain;  | 
2829  | 0  |     len_so_far = chain->off  | 
2830  | 0  |         - start_at->internal_.pos_in_chain;  | 
2831  | 0  |     idx = 1;  | 
2832  | 0  |     if (n_vec > 0) { | 
2833  | 0  |       vec[0].iov_base = (void *)(chain->buffer + chain->misalign  | 
2834  | 0  |           + start_at->internal_.pos_in_chain);  | 
2835  | 0  |       vec[0].iov_len = len_so_far;  | 
2836  | 0  |     }  | 
2837  | 0  |     chain = chain->next;  | 
2838  | 0  |   } else { | 
2839  | 0  |     chain = buffer->first;  | 
2840  | 0  |   }  | 
2841  |  | 
  | 
2842  | 0  |   if (n_vec == 0 && len < 0) { | 
2843  |  |     /* If no vectors are provided and they asked for "everything",  | 
2844  |  |      * pretend they asked for the actual available amount. */  | 
2845  | 0  |     len = buffer->total_len;  | 
2846  | 0  |     if (start_at) { | 
2847  | 0  |       len -= start_at->pos;  | 
2848  | 0  |     }  | 
2849  | 0  |   }  | 
2850  |  | 
  | 
2851  | 0  |   while (chain) { | 
2852  | 0  |     if (len >= 0 && len_so_far >= len)  | 
2853  | 0  |       break;  | 
2854  | 0  |     if (idx<n_vec) { | 
2855  | 0  |       vec[idx].iov_base = (void *)(chain->buffer + chain->misalign);  | 
2856  | 0  |       vec[idx].iov_len = chain->off;  | 
2857  | 0  |     } else if (len<0) { | 
2858  | 0  |       break;  | 
2859  | 0  |     }  | 
2860  | 0  |     ++idx;  | 
2861  | 0  |     len_so_far += chain->off;  | 
2862  | 0  |     chain = chain->next;  | 
2863  | 0  |   }  | 
2864  |  | 
  | 
2865  | 0  |   EVBUFFER_UNLOCK(buffer);  | 
2866  |  | 
  | 
2867  | 0  |   return idx;  | 
2868  | 0  | }  | 
2869  |  |  | 
2870  |  |  | 
2871  |  | int  | 
2872  |  | evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap)  | 
2873  | 0  | { | 
2874  | 0  |   char *buffer;  | 
2875  | 0  |   size_t space;  | 
2876  | 0  |   int sz, result = -1;  | 
2877  | 0  |   va_list aq;  | 
2878  | 0  |   struct evbuffer_chain *chain;  | 
2879  |  |  | 
2880  |  | 
  | 
2881  | 0  |   EVBUFFER_LOCK(buf);  | 
2882  |  | 
  | 
2883  | 0  |   if (buf->freeze_end) { | 
2884  | 0  |     goto done;  | 
2885  | 0  |   }  | 
2886  |  |  | 
2887  |  |   /* make sure that at least some space is available */  | 
2888  | 0  |   if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL)  | 
2889  | 0  |     goto done;  | 
2890  |  |  | 
2891  | 0  |   for (;;) { | 
2892  |  | #if 0  | 
2893  |  |     size_t used = chain->misalign + chain->off;  | 
2894  |  |     buffer = (char *)chain->buffer + chain->misalign + chain->off;  | 
2895  |  |     EVUTIL_ASSERT(chain->buffer_len >= used);  | 
2896  |  |     space = chain->buffer_len - used;  | 
2897  |  | #endif  | 
2898  | 0  |     buffer = (char*) CHAIN_SPACE_PTR(chain);  | 
2899  | 0  |     space = (size_t) CHAIN_SPACE_LEN(chain);  | 
2900  |  | 
  | 
2901  |  | #ifndef va_copy  | 
2902  |  | #define va_copy(dst, src) memcpy(&(dst), &(src), sizeof(va_list))  | 
2903  |  | #endif  | 
2904  | 0  |     va_copy(aq, ap);  | 
2905  |  | 
  | 
2906  | 0  |     sz = evutil_vsnprintf(buffer, space, fmt, aq);  | 
2907  |  | 
  | 
2908  | 0  |     va_end(aq);  | 
2909  |  | 
  | 
2910  | 0  |     if (sz < 0)  | 
2911  | 0  |       goto done;  | 
2912  | 0  |     if (INT_MAX >= EVBUFFER_CHAIN_MAX &&  | 
2913  | 0  |         (size_t)sz >= EVBUFFER_CHAIN_MAX)  | 
2914  | 0  |       goto done;  | 
2915  | 0  |     if ((size_t)sz < space) { | 
2916  | 0  |       chain->off += sz;  | 
2917  | 0  |       buf->total_len += sz;  | 
2918  | 0  |       buf->n_add_for_cb += sz;  | 
2919  |  | 
  | 
2920  | 0  |       advance_last_with_data(buf);  | 
2921  | 0  |       evbuffer_invoke_callbacks_(buf);  | 
2922  | 0  |       result = sz;  | 
2923  | 0  |       goto done;  | 
2924  | 0  |     }  | 
2925  | 0  |     if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL)  | 
2926  | 0  |       goto done;  | 
2927  | 0  |   }  | 
2928  |  |   /* NOTREACHED */  | 
2929  |  |  | 
2930  | 0  | done:  | 
2931  | 0  |   EVBUFFER_UNLOCK(buf);  | 
2932  | 0  |   return result;  | 
2933  | 0  | }  | 
2934  |  |  | 
2935  |  | int  | 
2936  |  | evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...)  | 
2937  | 0  | { | 
2938  | 0  |   int res = -1;  | 
2939  | 0  |   va_list ap;  | 
2940  |  | 
  | 
2941  | 0  |   va_start(ap, fmt);  | 
2942  | 0  |   res = evbuffer_add_vprintf(buf, fmt, ap);  | 
2943  | 0  |   va_end(ap);  | 
2944  |  | 
  | 
2945  | 0  |   return (res);  | 
2946  | 0  | }  | 
2947  |  |  | 
2948  |  | int  | 
2949  |  | evbuffer_add_reference(struct evbuffer *outbuf,  | 
2950  |  |     const void *data, size_t datlen,  | 
2951  |  |     evbuffer_ref_cleanup_cb cleanupfn, void *extra)  | 
2952  | 0  | { | 
2953  | 0  |   return evbuffer_add_reference_with_offset(outbuf, data, /* offset= */ 0, datlen, cleanupfn, extra);  | 
2954  | 0  | }  | 
2955  |  |  | 
2956  |  | int  | 
2957  |  | evbuffer_add_reference_with_offset(struct evbuffer *outbuf, const void *data,  | 
2958  |  |   size_t offset, size_t datlen, evbuffer_ref_cleanup_cb cleanupfn,  | 
2959  |  |   void *extra)  | 
2960  | 0  | { | 
2961  | 0  |   struct evbuffer_chain *chain;  | 
2962  | 0  |   struct evbuffer_chain_reference *info;  | 
2963  | 0  |   int result = -1;  | 
2964  |  | 
  | 
2965  | 0  |   chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference));  | 
2966  | 0  |   if (!chain)  | 
2967  | 0  |     return (-1);  | 
2968  | 0  |   chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE;  | 
2969  | 0  |   chain->buffer = (unsigned char *)data;  | 
2970  | 0  |   chain->misalign = offset;  | 
2971  | 0  |   chain->buffer_len = offset + datlen;  | 
2972  | 0  |   chain->off = datlen;  | 
2973  |  | 
  | 
2974  | 0  |   info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain);  | 
2975  | 0  |   info->cleanupfn = cleanupfn;  | 
2976  | 0  |   info->extra = extra;  | 
2977  |  | 
  | 
2978  | 0  |   EVBUFFER_LOCK(outbuf);  | 
2979  | 0  |   if (outbuf->freeze_end) { | 
2980  |  |     /* don't call chain_free; we do not want to actually invoke  | 
2981  |  |      * the cleanup function */  | 
2982  | 0  |     mm_free(chain);  | 
2983  | 0  |     goto done;  | 
2984  | 0  |   }  | 
2985  | 0  |   evbuffer_chain_insert(outbuf, chain);  | 
2986  | 0  |   outbuf->n_add_for_cb += datlen;  | 
2987  |  | 
  | 
2988  | 0  |   evbuffer_invoke_callbacks_(outbuf);  | 
2989  |  | 
  | 
2990  | 0  |   result = 0;  | 
2991  | 0  | done:  | 
2992  | 0  |   EVBUFFER_UNLOCK(outbuf);  | 
2993  |  | 
  | 
2994  | 0  |   return result;  | 
2995  | 0  | }  | 
2996  |  |  | 
2997  |  | /* TODO(niels): we may want to add to automagically convert to mmap, in  | 
2998  |  |  * case evbuffer_remove() or evbuffer_pullup() are being used.  | 
2999  |  |  */  | 
3000  |  | struct evbuffer_file_segment *  | 
3001  |  | evbuffer_file_segment_new(  | 
3002  |  |   int fd, ev_off_t offset, ev_off_t length, unsigned flags)  | 
3003  | 0  | { | 
3004  | 0  |   struct evbuffer_file_segment *seg =  | 
3005  | 0  |       mm_calloc(1, sizeof(struct evbuffer_file_segment));  | 
3006  | 0  |   if (!seg)  | 
3007  | 0  |     return NULL;  | 
3008  | 0  |   seg->refcnt = 1;  | 
3009  | 0  |   seg->fd = fd;  | 
3010  | 0  |   seg->flags = flags;  | 
3011  | 0  |   seg->file_offset = offset;  | 
3012  | 0  |   seg->cleanup_cb = NULL;  | 
3013  | 0  |   seg->cleanup_cb_arg = NULL;  | 
3014  | 0  |   if (length == -1) { | 
3015  | 0  |     length = evutil_fd_filesize(fd);  | 
3016  | 0  |     if (length == -1)  | 
3017  | 0  |       goto err;  | 
3018  | 0  |   }  | 
3019  | 0  |   seg->length = length;  | 
3020  |  | 
  | 
3021  | 0  |   if (offset < 0 || length < 0 ||  | 
3022  | 0  |       ((ev_uint64_t)length > EVBUFFER_CHAIN_MAX) ||  | 
3023  | 0  |       (ev_uint64_t)offset > (ev_uint64_t)(EVBUFFER_CHAIN_MAX - length))  | 
3024  | 0  |     goto err;  | 
3025  |  |  | 
3026  | 0  | #if defined(USE_SENDFILE)  | 
3027  | 0  |   if (!(flags & EVBUF_FS_DISABLE_SENDFILE)) { | 
3028  | 0  |     seg->can_sendfile = 1;  | 
3029  | 0  |     goto done;  | 
3030  | 0  |   }  | 
3031  | 0  | #endif  | 
3032  |  |  | 
3033  | 0  |   if (evbuffer_file_segment_materialize(seg)<0)  | 
3034  | 0  |     goto err;  | 
3035  |  |  | 
3036  | 0  | #if defined(USE_SENDFILE)  | 
3037  | 0  | done:  | 
3038  | 0  | #endif  | 
3039  | 0  |   if (!(flags & EVBUF_FS_DISABLE_LOCKING)) { | 
3040  | 0  |     EVTHREAD_ALLOC_LOCK(seg->lock, 0);  | 
3041  | 0  |   }  | 
3042  | 0  |   return seg;  | 
3043  | 0  | err:  | 
3044  | 0  |   mm_free(seg);  | 
3045  | 0  |   return NULL;  | 
3046  | 0  | }  | 
3047  |  |  | 
3048  |  | #ifdef EVENT__HAVE_MMAP  | 
3049  |  | static long  | 
3050  |  | get_page_size(void)  | 
3051  | 0  | { | 
3052  |  | #ifdef SC_PAGE_SIZE  | 
3053  |  |   return sysconf(SC_PAGE_SIZE);  | 
3054  |  | #elif defined(_SC_PAGE_SIZE)  | 
3055  | 0  |   return sysconf(_SC_PAGE_SIZE);  | 
3056  |  | #else  | 
3057  |  |   return 1;  | 
3058  |  | #endif  | 
3059  | 0  | }  | 
3060  |  | #endif  | 
3061  |  |  | 
3062  |  | /* DOCDOC */  | 
3063  |  | /* Requires lock */  | 
3064  |  | static int  | 
3065  |  | evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg)  | 
3066  | 0  | { | 
3067  | 0  | #if defined(EVENT__HAVE_MMAP) || defined(_WIN32)  | 
3068  | 0  |   const unsigned flags = seg->flags;  | 
3069  | 0  | #endif  | 
3070  | 0  |   const int fd = seg->fd;  | 
3071  | 0  |   const ev_off_t length = seg->length;  | 
3072  | 0  |   const ev_off_t offset = seg->file_offset;  | 
3073  |  | 
  | 
3074  | 0  |   if (seg->contents || seg->is_mapping)  | 
3075  | 0  |     return 0; /* already materialized */  | 
3076  |  |  | 
3077  | 0  | #if defined(EVENT__HAVE_MMAP)  | 
3078  | 0  |   if (!(flags & EVBUF_FS_DISABLE_MMAP)) { | 
3079  | 0  |     off_t offset_rounded = 0, offset_leftover = 0;  | 
3080  | 0  |     int mmap_flags =  | 
3081  |  | #ifdef MAP_NOCACHE  | 
3082  |  |       MAP_NOCACHE | /* ??? */  | 
3083  |  | #endif  | 
3084  | 0  | #ifdef MAP_FILE  | 
3085  | 0  |       MAP_FILE |  | 
3086  | 0  | #endif  | 
3087  | 0  |       MAP_PRIVATE;  | 
3088  | 0  |     void *mapped;  | 
3089  | 0  |     if (offset) { | 
3090  |  |       /* mmap implementations don't generally like us  | 
3091  |  |        * to have an offset that isn't a round  */  | 
3092  | 0  |       long page_size = get_page_size();  | 
3093  | 0  |       if (page_size == -1)  | 
3094  | 0  |         goto err;  | 
3095  | 0  |       offset_leftover = offset % page_size;  | 
3096  | 0  |       offset_rounded = offset - offset_leftover;  | 
3097  | 0  |     }  | 
3098  | 0  | #if defined(EVENT__HAVE_MMAP64)  | 
3099  | 0  |     mapped = mmap64(NULL, length + offset_leftover,  | 
3100  |  | #else  | 
3101  |  |     mapped = mmap(NULL, length + offset_leftover,  | 
3102  |  | #endif  | 
3103  | 0  |       PROT_READ, mmap_flags, fd, offset_rounded);  | 
3104  | 0  |     if (mapped == MAP_FAILED) { | 
3105  | 0  |       event_warn("%s: mmap(NULL, %zu, %d, %d, %d, %lld) failed", __func__, | 
3106  | 0  |         (size_t)(length + offset_leftover), PROT_READ, mmap_flags, fd,  | 
3107  | 0  |         (long long)offset_rounded);  | 
3108  | 0  |     } else { | 
3109  | 0  |       seg->mapping = mapped;  | 
3110  | 0  |       seg->contents = (char*)mapped+offset_leftover;  | 
3111  | 0  |       seg->mmap_offset = 0;  | 
3112  | 0  |       seg->is_mapping = 1;  | 
3113  | 0  |       goto done;  | 
3114  | 0  |     }  | 
3115  | 0  |   }  | 
3116  | 0  | #endif  | 
3117  |  | #ifdef _WIN32  | 
3118  |  |   if (!(flags & EVBUF_FS_DISABLE_MMAP)) { | 
3119  |  |     intptr_t h = _get_osfhandle(fd);  | 
3120  |  |     HANDLE m;  | 
3121  |  |     ev_uint64_t total_size = length+offset;  | 
3122  |  |     if ((HANDLE)h == INVALID_HANDLE_VALUE)  | 
3123  |  |       goto err;  | 
3124  |  |     m = CreateFileMapping((HANDLE)h, NULL, PAGE_READONLY,  | 
3125  |  |         (total_size >> 32), total_size & 0xfffffffful,  | 
3126  |  |         NULL);  | 
3127  |  |     if (m != INVALID_HANDLE_VALUE) { /* Does h leak? */ | 
3128  |  |       seg->mapping_handle = m;  | 
3129  |  |       seg->mmap_offset = offset;  | 
3130  |  |       seg->is_mapping = 1;  | 
3131  |  |       goto done;  | 
3132  |  |     }  | 
3133  |  |   }  | 
3134  |  | #endif  | 
3135  | 0  |   { | 
3136  | 0  |     ev_off_t read_so_far = 0;  | 
3137  | 0  |     ev_ssize_t n = 0;  | 
3138  | 0  |     char *mem;  | 
3139  |  | #ifndef EVENT__HAVE_PREAD  | 
3140  |  | #ifdef _WIN32  | 
3141  |  | #ifndef lseek  | 
3142  |  | #define lseek _lseeki64  | 
3143  |  | #endif  | 
3144  |  | #endif  | 
3145  |  |     ev_off_t start_pos = lseek(fd, 0, SEEK_CUR);  | 
3146  |  |     ev_off_t pos;  | 
3147  |  |     int e;  | 
3148  |  | #endif /* no pread() */  | 
3149  | 0  |     if (!(mem = mm_malloc(length)))  | 
3150  | 0  |       goto err;  | 
3151  | 0  | #ifdef EVENT__HAVE_PREAD  | 
3152  | 0  |     while (read_so_far < length) { | 
3153  | 0  |       n = pread(fd, mem + read_so_far, length - read_so_far,  | 
3154  | 0  |           offset + read_so_far);  | 
3155  | 0  |       if (n <= 0)  | 
3156  | 0  |         break;  | 
3157  | 0  |       read_so_far += n;  | 
3158  | 0  |     }  | 
3159  | 0  |     if (n < 0 || (n == 0 && length > read_so_far)) { | 
3160  | 0  |       mm_free(mem);  | 
3161  | 0  |       goto err;  | 
3162  | 0  |     }  | 
3163  |  | #else /* fallback to seek() and read() */  | 
3164  |  |     if (start_pos < 0) { | 
3165  |  |       mm_free(mem);  | 
3166  |  |       goto err;  | 
3167  |  |     }  | 
3168  |  |     if (lseek(fd, offset, SEEK_SET) < 0) { | 
3169  |  |       mm_free(mem);  | 
3170  |  |       goto err;  | 
3171  |  |     }  | 
3172  |  |     while (read_so_far < length) { | 
3173  |  |       n = read(fd, mem+read_so_far, length-read_so_far);  | 
3174  |  |       if (n <= 0)  | 
3175  |  |         break;  | 
3176  |  |       read_so_far += n;  | 
3177  |  |     }  | 
3178  |  |  | 
3179  |  |     e = errno;  | 
3180  |  |     pos = lseek(fd, start_pos, SEEK_SET);  | 
3181  |  |     if (n < 0 || (n == 0 && length > read_so_far)) { | 
3182  |  |       mm_free(mem);  | 
3183  |  |       errno = e;  | 
3184  |  |       goto err;  | 
3185  |  |     } else if (pos < 0) { | 
3186  |  |       mm_free(mem);  | 
3187  |  |       goto err;  | 
3188  |  |     }  | 
3189  |  | #endif /* pread */  | 
3190  |  |  | 
3191  | 0  |     seg->contents = mem;  | 
3192  | 0  |   }  | 
3193  | 0  | #if defined(EVENT__HAVE_MMAP) || defined(_WIN32)  | 
3194  | 0  | done:  | 
3195  | 0  | #endif  | 
3196  | 0  |   return 0;  | 
3197  | 0  | err:  | 
3198  | 0  |   return -1;  | 
3199  | 0  | }  | 
3200  |  |  | 
3201  |  | void evbuffer_file_segment_add_cleanup_cb(struct evbuffer_file_segment *seg,  | 
3202  |  |   evbuffer_file_segment_cleanup_cb cb, void* arg)  | 
3203  | 0  | { | 
3204  | 0  |   EVUTIL_ASSERT(seg->refcnt > 0);  | 
3205  | 0  |   seg->cleanup_cb = cb;  | 
3206  | 0  |   seg->cleanup_cb_arg = arg;  | 
3207  | 0  | }  | 
3208  |  |  | 
3209  |  | void  | 
3210  |  | evbuffer_file_segment_free(struct evbuffer_file_segment *seg)  | 
3211  | 0  | { | 
3212  | 0  |   int refcnt;  | 
3213  | 0  |   EVLOCK_LOCK(seg->lock, 0);  | 
3214  | 0  |   refcnt = --seg->refcnt;  | 
3215  | 0  |   EVLOCK_UNLOCK(seg->lock, 0);  | 
3216  | 0  |   if (refcnt > 0)  | 
3217  | 0  |     return;  | 
3218  | 0  |   EVUTIL_ASSERT(refcnt == 0);  | 
3219  |  | 
  | 
3220  | 0  |   if (seg->is_mapping) { | 
3221  |  | #ifdef _WIN32  | 
3222  |  |     CloseHandle(seg->mapping_handle);  | 
3223  |  | #elif defined (EVENT__HAVE_MMAP)  | 
3224  |  |     off_t offset_leftover;  | 
3225  | 0  |     offset_leftover = seg->file_offset % get_page_size();  | 
3226  | 0  |     if (munmap(seg->mapping, seg->length + offset_leftover) == -1)  | 
3227  | 0  |       event_warn("%s: munmap failed", __func__); | 
3228  | 0  | #endif  | 
3229  | 0  |   } else if (seg->contents) { | 
3230  | 0  |     mm_free(seg->contents);  | 
3231  | 0  |   }  | 
3232  |  | 
  | 
3233  | 0  |   if ((seg->flags & EVBUF_FS_CLOSE_ON_FREE) && seg->fd >= 0) { | 
3234  | 0  |     close(seg->fd);  | 
3235  | 0  |   }  | 
3236  |  | 
  | 
3237  | 0  |   if (seg->cleanup_cb) { | 
3238  | 0  |     (*seg->cleanup_cb)((struct evbuffer_file_segment const*)seg,  | 
3239  | 0  |         seg->flags, seg->cleanup_cb_arg);  | 
3240  | 0  |     seg->cleanup_cb = NULL;  | 
3241  | 0  |     seg->cleanup_cb_arg = NULL;  | 
3242  | 0  |   }  | 
3243  |  | 
  | 
3244  | 0  |   EVTHREAD_FREE_LOCK(seg->lock, 0);  | 
3245  | 0  |   mm_free(seg);  | 
3246  | 0  | }  | 
3247  |  |  | 
3248  |  | int  | 
3249  |  | evbuffer_add_file_segment(struct evbuffer *buf,  | 
3250  |  |     struct evbuffer_file_segment *seg, ev_off_t offset, ev_off_t length)  | 
3251  | 0  | { | 
3252  | 0  |   struct evbuffer_chain *chain;  | 
3253  | 0  |   struct evbuffer_chain_file_segment *extra;  | 
3254  | 0  |   int can_use_sendfile = 0;  | 
3255  |  | 
  | 
3256  | 0  |   EVBUFFER_LOCK(buf);  | 
3257  | 0  |   EVLOCK_LOCK(seg->lock, 0);  | 
3258  | 0  |   if (buf->flags & EVBUFFER_FLAG_DRAINS_TO_FD) { | 
3259  | 0  |     can_use_sendfile = 1;  | 
3260  | 0  |   } else { | 
3261  | 0  |     if (evbuffer_file_segment_materialize(seg)<0) { | 
3262  | 0  |       EVLOCK_UNLOCK(seg->lock, 0);  | 
3263  | 0  |       goto err;  | 
3264  | 0  |     }  | 
3265  | 0  |   }  | 
3266  | 0  |   EVLOCK_UNLOCK(seg->lock, 0);  | 
3267  |  | 
  | 
3268  | 0  |   if (buf->freeze_end)  | 
3269  | 0  |     goto err;  | 
3270  |  |  | 
3271  | 0  |   if (length < 0) { | 
3272  | 0  |     if (offset > seg->length)  | 
3273  | 0  |       goto err;  | 
3274  | 0  |     length = seg->length - offset;  | 
3275  | 0  |   }  | 
3276  |  |  | 
3277  |  |   /* Can we actually add this? */  | 
3278  | 0  |   if (offset+length > seg->length)  | 
3279  | 0  |     goto err;  | 
3280  |  |  | 
3281  | 0  |   chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_file_segment));  | 
3282  | 0  |   if (!chain)  | 
3283  | 0  |     goto err;  | 
3284  | 0  |   extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, chain);  | 
3285  |  | 
  | 
3286  | 0  |   chain->flags |= EVBUFFER_IMMUTABLE|EVBUFFER_FILESEGMENT;  | 
3287  | 0  |   if (can_use_sendfile && seg->can_sendfile) { | 
3288  | 0  |     chain->flags |= EVBUFFER_SENDFILE;  | 
3289  | 0  |     chain->misalign = seg->file_offset + offset;  | 
3290  | 0  |     chain->off = length;  | 
3291  | 0  |     chain->buffer_len = chain->misalign + length;  | 
3292  | 0  |   } else if (seg->is_mapping) { | 
3293  |  | #ifdef _WIN32  | 
3294  |  |     ev_uint64_t total_offset = seg->mmap_offset+offset;  | 
3295  |  |     ev_uint64_t offset_rounded=0, offset_remaining=0;  | 
3296  |  |     LPVOID data;  | 
3297  |  |     if (total_offset) { | 
3298  |  |       SYSTEM_INFO si;  | 
3299  |  |       memset(&si, 0, sizeof(si)); /* cargo cult */  | 
3300  |  |       GetSystemInfo(&si);  | 
3301  |  |       offset_remaining = total_offset % si.dwAllocationGranularity;  | 
3302  |  |       offset_rounded = total_offset - offset_remaining;  | 
3303  |  |     }  | 
3304  |  |     data = MapViewOfFile(  | 
3305  |  |       seg->mapping_handle,  | 
3306  |  |       FILE_MAP_READ,  | 
3307  |  |       offset_rounded >> 32,  | 
3308  |  |       offset_rounded & 0xfffffffful,  | 
3309  |  |       length + offset_remaining);  | 
3310  |  |     if (data == NULL) { | 
3311  |  |       mm_free(chain);  | 
3312  |  |       goto err;  | 
3313  |  |     }  | 
3314  |  |     chain->buffer = (unsigned char*) data;  | 
3315  |  |     chain->buffer_len = length+offset_remaining;  | 
3316  |  |     chain->misalign = offset_remaining;  | 
3317  |  |     chain->off = length;  | 
3318  |  | #else  | 
3319  | 0  |     chain->buffer = (unsigned char*)(seg->contents + offset);  | 
3320  | 0  |     chain->buffer_len = length;  | 
3321  | 0  |     chain->off = length;  | 
3322  | 0  | #endif  | 
3323  | 0  |   } else { | 
3324  | 0  |     chain->buffer = (unsigned char*)(seg->contents + offset);  | 
3325  | 0  |     chain->buffer_len = length;  | 
3326  | 0  |     chain->off = length;  | 
3327  | 0  |   }  | 
3328  |  | 
  | 
3329  | 0  |   EVLOCK_LOCK(seg->lock, 0);  | 
3330  | 0  |   ++seg->refcnt;  | 
3331  | 0  |   EVLOCK_UNLOCK(seg->lock, 0);  | 
3332  | 0  |   extra->segment = seg;  | 
3333  | 0  |   buf->n_add_for_cb += length;  | 
3334  | 0  |   evbuffer_chain_insert(buf, chain);  | 
3335  |  | 
  | 
3336  | 0  |   evbuffer_invoke_callbacks_(buf);  | 
3337  |  | 
  | 
3338  | 0  |   EVBUFFER_UNLOCK(buf);  | 
3339  |  | 
  | 
3340  | 0  |   return 0;  | 
3341  | 0  | err:  | 
3342  | 0  |   EVBUFFER_UNLOCK(buf);  | 
3343  | 0  |   evbuffer_file_segment_free(seg); /* Lowers the refcount */  | 
3344  | 0  |   return -1;  | 
3345  | 0  | }  | 
3346  |  |  | 
3347  |  | int  | 
3348  |  | evbuffer_add_file(struct evbuffer *buf, int fd, ev_off_t offset, ev_off_t length)  | 
3349  | 0  | { | 
3350  | 0  |   struct evbuffer_file_segment *seg;  | 
3351  | 0  |   unsigned flags = EVBUF_FS_CLOSE_ON_FREE;  | 
3352  | 0  |   int r;  | 
3353  |  | 
  | 
3354  | 0  |   seg = evbuffer_file_segment_new(fd, offset, length, flags);  | 
3355  | 0  |   if (!seg)  | 
3356  | 0  |     return -1;  | 
3357  | 0  |   r = evbuffer_add_file_segment(buf, seg, 0, length);  | 
3358  | 0  |   if (r == 0)  | 
3359  | 0  |     evbuffer_file_segment_free(seg);  | 
3360  | 0  |   return r;  | 
3361  | 0  | }  | 
3362  |  |  | 
3363  |  | int  | 
3364  |  | evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg)  | 
3365  | 0  | { | 
3366  | 0  |   EVBUFFER_LOCK(buffer);  | 
3367  |  | 
  | 
3368  | 0  |   if (!LIST_EMPTY(&buffer->callbacks))  | 
3369  | 0  |     evbuffer_remove_all_callbacks(buffer);  | 
3370  |  | 
  | 
3371  | 0  |   if (cb) { | 
3372  | 0  |     struct evbuffer_cb_entry *ent =  | 
3373  | 0  |         evbuffer_add_cb(buffer, NULL, cbarg);  | 
3374  | 0  |     if (!ent) { | 
3375  | 0  |       EVBUFFER_UNLOCK(buffer);  | 
3376  | 0  |       return -1;  | 
3377  | 0  |     }  | 
3378  | 0  |     ent->cb.cb_obsolete = cb;  | 
3379  | 0  |     ent->flags |= EVBUFFER_CB_OBSOLETE;  | 
3380  | 0  |   }  | 
3381  | 0  |   EVBUFFER_UNLOCK(buffer);  | 
3382  | 0  |   return 0;  | 
3383  | 0  | }  | 
3384  |  |  | 
3385  |  | struct evbuffer_cb_entry *  | 
3386  |  | evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)  | 
3387  | 0  | { | 
3388  | 0  |   struct evbuffer_cb_entry *e;  | 
3389  | 0  |   if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry))))  | 
3390  | 0  |     return NULL;  | 
3391  | 0  |   EVBUFFER_LOCK(buffer);  | 
3392  | 0  |   e->cb.cb_func = cb;  | 
3393  | 0  |   e->cbarg = cbarg;  | 
3394  | 0  |   e->flags = EVBUFFER_CB_ENABLED;  | 
3395  | 0  |   LIST_INSERT_HEAD(&buffer->callbacks, e, next);  | 
3396  | 0  |   EVBUFFER_UNLOCK(buffer);  | 
3397  | 0  |   return e;  | 
3398  | 0  | }  | 
3399  |  |  | 
3400  |  | int  | 
3401  |  | evbuffer_remove_cb_entry(struct evbuffer *buffer,  | 
3402  |  |        struct evbuffer_cb_entry *ent)  | 
3403  | 0  | { | 
3404  | 0  |   EVBUFFER_LOCK(buffer);  | 
3405  | 0  |   LIST_REMOVE(ent, next);  | 
3406  | 0  |   EVBUFFER_UNLOCK(buffer);  | 
3407  | 0  |   mm_free(ent);  | 
3408  | 0  |   return 0;  | 
3409  | 0  | }  | 
3410  |  |  | 
3411  |  | int  | 
3412  |  | evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)  | 
3413  | 0  | { | 
3414  | 0  |   struct evbuffer_cb_entry *cbent;  | 
3415  | 0  |   int result = -1;  | 
3416  | 0  |   EVBUFFER_LOCK(buffer);  | 
3417  | 0  |   LIST_FOREACH(cbent, &buffer->callbacks, next) { | 
3418  | 0  |     if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) { | 
3419  | 0  |       result = evbuffer_remove_cb_entry(buffer, cbent);  | 
3420  | 0  |       goto done;  | 
3421  | 0  |     }  | 
3422  | 0  |   }  | 
3423  | 0  | done:  | 
3424  | 0  |   EVBUFFER_UNLOCK(buffer);  | 
3425  | 0  |   return result;  | 
3426  | 0  | }  | 
3427  |  |  | 
3428  |  | int  | 
3429  |  | evbuffer_cb_set_flags(struct evbuffer *buffer,  | 
3430  |  |           struct evbuffer_cb_entry *cb, ev_uint32_t flags)  | 
3431  | 0  | { | 
3432  |  |   /* the user isn't allowed to mess with these. */  | 
3433  | 0  |   flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;  | 
3434  | 0  |   EVBUFFER_LOCK(buffer);  | 
3435  | 0  |   cb->flags |= flags;  | 
3436  | 0  |   EVBUFFER_UNLOCK(buffer);  | 
3437  | 0  |   return 0;  | 
3438  | 0  | }  | 
3439  |  |  | 
3440  |  | int  | 
3441  |  | evbuffer_cb_clear_flags(struct evbuffer *buffer,  | 
3442  |  |           struct evbuffer_cb_entry *cb, ev_uint32_t flags)  | 
3443  | 0  | { | 
3444  |  |   /* the user isn't allowed to mess with these. */  | 
3445  | 0  |   flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;  | 
3446  | 0  |   EVBUFFER_LOCK(buffer);  | 
3447  | 0  |   cb->flags &= ~flags;  | 
3448  | 0  |   EVBUFFER_UNLOCK(buffer);  | 
3449  | 0  |   return 0;  | 
3450  | 0  | }  | 
3451  |  |  | 
3452  |  | int  | 
3453  |  | evbuffer_freeze(struct evbuffer *buffer, int start)  | 
3454  | 0  | { | 
3455  | 0  |   EVBUFFER_LOCK(buffer);  | 
3456  | 0  |   if (start)  | 
3457  | 0  |     buffer->freeze_start = 1;  | 
3458  | 0  |   else  | 
3459  | 0  |     buffer->freeze_end = 1;  | 
3460  | 0  |   EVBUFFER_UNLOCK(buffer);  | 
3461  | 0  |   return 0;  | 
3462  | 0  | }  | 
3463  |  |  | 
3464  |  | int  | 
3465  |  | evbuffer_unfreeze(struct evbuffer *buffer, int start)  | 
3466  | 0  | { | 
3467  | 0  |   EVBUFFER_LOCK(buffer);  | 
3468  | 0  |   if (start)  | 
3469  | 0  |     buffer->freeze_start = 0;  | 
3470  | 0  |   else  | 
3471  | 0  |     buffer->freeze_end = 0;  | 
3472  | 0  |   EVBUFFER_UNLOCK(buffer);  | 
3473  | 0  |   return 0;  | 
3474  | 0  | }  | 
3475  |  |  | 
3476  |  | #if 0  | 
3477  |  | void  | 
3478  |  | evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)  | 
3479  |  | { | 
3480  |  |   if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) { | 
3481  |  |     cb->size_before_suspend = evbuffer_get_length(buffer);  | 
3482  |  |     cb->flags |= EVBUFFER_CB_SUSPENDED;  | 
3483  |  |   }  | 
3484  |  | }  | 
3485  |  |  | 
3486  |  | void  | 
3487  |  | evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)  | 
3488  |  | { | 
3489  |  |   if ((cb->flags & EVBUFFER_CB_SUSPENDED)) { | 
3490  |  |     unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND);  | 
3491  |  |     size_t sz = cb->size_before_suspend;  | 
3492  |  |     cb->flags &= ~(EVBUFFER_CB_SUSPENDED|  | 
3493  |  |              EVBUFFER_CB_CALL_ON_UNSUSPEND);  | 
3494  |  |     cb->size_before_suspend = 0;  | 
3495  |  |     if (call && (cb->flags & EVBUFFER_CB_ENABLED)) { | 
3496  |  |       cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg);  | 
3497  |  |     }  | 
3498  |  |   }  | 
3499  |  | }  | 
3500  |  | #endif  | 
3501  |  |  | 
3502  |  | int  | 
3503  |  | evbuffer_get_callbacks_(struct evbuffer *buffer, struct event_callback **cbs,  | 
3504  |  |     int max_cbs)  | 
3505  | 0  | { | 
3506  | 0  |   int r = 0;  | 
3507  | 0  |   EVBUFFER_LOCK(buffer);  | 
3508  | 0  |   if (buffer->deferred_cbs) { | 
3509  | 0  |     if (max_cbs < 1) { | 
3510  | 0  |       r = -1;  | 
3511  | 0  |       goto done;  | 
3512  | 0  |     }  | 
3513  | 0  |     cbs[0] = &buffer->deferred;  | 
3514  | 0  |     r = 1;  | 
3515  | 0  |   }  | 
3516  | 0  | done:  | 
3517  | 0  |   EVBUFFER_UNLOCK(buffer);  | 
3518  | 0  |   return r;  | 
3519  | 0  | }  |