/src/brpc/src/butil/iobuf.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | // iobuf - A non-continuous zero-copied buffer |
19 | | |
20 | | // Date: Thu Nov 22 13:57:56 CST 2012 |
21 | | |
22 | | #ifndef BUTIL_IOBUF_H |
23 | | #define BUTIL_IOBUF_H |
24 | | |
25 | | #include <sys/uio.h> // iovec |
26 | | #include <stdint.h> // uint32_t |
27 | | #include <functional> |
28 | | #include <string> // std::string |
29 | | #include <ostream> // std::ostream |
30 | | #include <google/protobuf/io/zero_copy_stream.h> // ZeroCopyInputStream |
31 | | #include "butil/strings/string_piece.h" // butil::StringPiece |
32 | | #include "butil/third_party/snappy/snappy-sinksource.h" |
33 | | #include "butil/zero_copy_stream_as_streambuf.h" |
34 | | #include "butil/macros.h" |
35 | | #include "butil/reader_writer.h" |
36 | | #include "butil/binary_printer.h" |
37 | | |
38 | | // For IOBuf::appendv(const const_iovec*, size_t). The only difference of this |
39 | | // struct from iovec (defined in sys/uio.h) is that iov_base is `const void*' |
40 | | // which is assignable by const pointers w/o any error. |
41 | | extern "C" { |
42 | | struct const_iovec { |
43 | | const void* iov_base; |
44 | | size_t iov_len; |
45 | | }; |
46 | | #ifndef USE_MESALINK |
47 | | struct ssl_st; |
48 | | #else |
49 | | #define ssl_st MESALINK_SSL |
50 | | #endif |
51 | | } |
52 | | |
53 | | namespace butil { |
54 | | |
55 | | // IOBuf is a non-continuous buffer that can be cut and combined w/o copying |
56 | | // payload. It can be read from or flushed into file descriptors as well. |
57 | | // IOBuf is [thread-compatible]. Namely using different IOBuf in different |
58 | | // threads simultaneously is safe, and reading a static IOBuf from different |
59 | | // threads is safe as well. |
60 | | // IOBuf is [NOT thread-safe]. Modifying a same IOBuf from different threads |
61 | | // simultaneously is unsafe and likely to crash. |
62 | | class IOBuf { |
63 | | friend class IOBufAsZeroCopyInputStream; |
64 | | friend class IOBufAsZeroCopyOutputStream; |
65 | | friend class IOBufBytesIterator; |
66 | | friend class IOBufCutter; |
67 | | public: |
68 | | static const size_t DEFAULT_BLOCK_SIZE = 8192; |
69 | | static const size_t INITIAL_CAP = 32; // must be power of 2 |
70 | | |
71 | | struct Block; |
72 | | |
73 | | // can't directly use `struct iovec' here because we also need to access the |
74 | | // reference counter(nshared) in Block* |
75 | | struct BlockRef { |
76 | | // NOTICE: first bit of `offset' is shared with BigView::start |
77 | | uint32_t offset; |
78 | | uint32_t length; |
79 | | Block* block; |
80 | | }; |
81 | | |
82 | | // IOBuf is essentially a tiny queue of BlockRefs. |
83 | | struct SmallView { |
84 | | BlockRef refs[2]; |
85 | | }; |
86 | | |
87 | | struct BigView { |
88 | | int32_t magic; |
89 | | uint32_t start; |
90 | | BlockRef* refs; |
91 | | uint32_t nref; |
92 | | uint32_t cap_mask; |
93 | | size_t nbytes; |
94 | | |
95 | | const BlockRef& ref_at(uint32_t i) const |
96 | 0 | { return refs[(start + i) & cap_mask]; } |
97 | | |
98 | | BlockRef& ref_at(uint32_t i) |
99 | 9.30k | { return refs[(start + i) & cap_mask]; } |
100 | | |
101 | 2.56k | uint32_t capacity() const { return cap_mask + 1; } |
102 | | }; |
103 | | |
104 | | struct Movable { |
105 | 0 | explicit Movable(IOBuf& v) : _v(&v) { } |
106 | 0 | IOBuf& value() const { return *_v; } |
107 | | private: |
108 | | IOBuf *_v; |
109 | | }; |
110 | | |
111 | | typedef uint64_t Area; |
112 | | static const Area INVALID_AREA = 0; |
113 | | |
114 | | IOBuf(); |
115 | | IOBuf(const IOBuf&); |
116 | | IOBuf(const Movable&); |
117 | 17.5k | ~IOBuf() { clear(); } |
118 | | void operator=(const IOBuf&); |
119 | | void operator=(const Movable&); |
120 | | void operator=(const char*); |
121 | | void operator=(const std::string&); |
122 | | |
123 | | // Exchange internal fields with another IOBuf. |
124 | | void swap(IOBuf&); |
125 | | |
126 | | // Pop n bytes from front side |
127 | | // If n == 0, nothing popped; if n >= length(), all bytes are popped |
128 | | // Returns bytes popped. |
129 | | size_t pop_front(size_t n); |
130 | | |
131 | | // Pop n bytes from back side |
132 | | // If n == 0, nothing popped; if n >= length(), all bytes are popped |
133 | | // Returns bytes popped. |
134 | | size_t pop_back(size_t n); |
135 | | |
136 | | // Cut off n bytes from front side and APPEND to `out' |
137 | | // If n == 0, nothing cut; if n >= length(), all bytes are cut |
138 | | // Returns bytes cut. |
139 | | size_t cutn(IOBuf* out, size_t n); |
140 | | size_t cutn(void* out, size_t n); |
141 | | size_t cutn(std::string* out, size_t n); |
142 | | // Cut off 1 byte from the front side and set to *c |
143 | | // Return true on cut, false otherwise. |
144 | | bool cut1(void* c); |
145 | | |
146 | | // Cut from front side until the characters matches `delim', append |
147 | | // data before the matched characters to `out'. |
148 | | // Returns 0 on success, -1 when there's no match (including empty `delim') |
149 | | // or other errors. |
150 | | int cut_until(IOBuf* out, char const* delim); |
151 | | |
152 | | // std::string version, `delim' could be binary |
153 | | int cut_until(IOBuf* out, const std::string& delim); |
154 | | |
155 | | // Cut at most `size_hint' bytes(approximately) into the writer |
156 | | // Returns bytes cut on success, -1 otherwise and errno is set. |
157 | | ssize_t cut_into_writer(IWriter* writer, size_t size_hint = 1024*1024); |
158 | | |
159 | | // Cut at most `size_hint' bytes(approximately) into the file descriptor |
160 | | // Returns bytes cut on success, -1 otherwise and errno is set. |
161 | | ssize_t cut_into_file_descriptor(int fd, size_t size_hint = 1024*1024); |
162 | | |
163 | | // Cut at most `size_hint' bytes(approximately) into the file descriptor at |
164 | | // a given offset(from the start of the file). The file offset is not changed. |
165 | | // If `offset' is negative, does exactly what cut_into_file_descriptor does. |
166 | | // Returns bytes cut on success, -1 otherwise and errno is set. |
167 | | // |
168 | | // NOTE: POSIX requires that a file open with the O_APPEND flag should |
169 | | // not affect pwrite(). However, on Linux, if |fd| is open with O_APPEND, |
170 | | // pwrite() appends data to the end of the file, regardless of the value |
171 | | // of |offset|. |
172 | | ssize_t pcut_into_file_descriptor(int fd, off_t offset /*NOTE*/, |
173 | | size_t size_hint = 1024*1024); |
174 | | |
175 | | // Cut into SSL channel `ssl'. Returns what `SSL_write' returns |
176 | | // and the ssl error code will be filled into `ssl_error' |
177 | | ssize_t cut_into_SSL_channel(struct ssl_st* ssl, int* ssl_error); |
178 | | |
179 | | // Cut `count' number of `pieces' into the writer. |
180 | | // Returns bytes cut on success, -1 otherwise and errno is set. |
181 | | static ssize_t cut_multiple_into_writer( |
182 | | IWriter* writer, IOBuf* const* pieces, size_t count); |
183 | | |
184 | | // Cut `count' number of `pieces' into the file descriptor. |
185 | | // Returns bytes cut on success, -1 otherwise and errno is set. |
186 | | static ssize_t cut_multiple_into_file_descriptor( |
187 | | int fd, IOBuf* const* pieces, size_t count); |
188 | | |
189 | | // Cut `count' number of `pieces' into file descriptor `fd' at a given |
190 | | // offset. The file offset is not changed. |
191 | | // If `offset' is negative, does exactly what cut_multiple_into_file_descriptor |
192 | | // does. |
193 | | // Read NOTE of pcut_into_file_descriptor. |
194 | | // Returns bytes cut on success, -1 otherwise and errno is set. |
195 | | static ssize_t pcut_multiple_into_file_descriptor( |
196 | | int fd, off_t offset, IOBuf* const* pieces, size_t count); |
197 | | |
198 | | // Cut `count' number of `pieces' into SSL channel `ssl'. |
199 | | // Returns bytes cut on success, -1 otherwise and errno is set. |
200 | | static ssize_t cut_multiple_into_SSL_channel( |
201 | | struct ssl_st* ssl, IOBuf* const* pieces, size_t count, int* ssl_error); |
202 | | |
203 | | // Append another IOBuf to back side, payload of the IOBuf is shared |
204 | | // rather than copied. |
205 | | void append(const IOBuf& other); |
206 | | // Append content of `other' to self and clear `other'. |
207 | | void append(const Movable& other); |
208 | | |
209 | | // =================================================================== |
210 | | // Following push_back()/append() are just implemented for convenience |
211 | | // and occasional usages, they're relatively slow because of the overhead |
212 | | // of frequent BlockRef-management and reference-countings. If you get |
213 | | // a lot of push_back/append to do, you should use IOBufAppender or |
214 | | // IOBufBuilder instead, which reduce overhead by owning IOBuf::Block. |
215 | | // =================================================================== |
216 | | |
217 | | // Append a character to back side. (with copying) |
218 | | // Returns 0 on success, -1 otherwise. |
219 | | int push_back(char c); |
220 | | |
221 | | // Append `data' with `count' bytes to back side. (with copying) |
222 | | // Returns 0 on success(include count == 0), -1 otherwise. |
223 | | int append(void const* data, size_t count); |
224 | | |
225 | | // Append multiple data to back side in one call, faster than appending |
226 | | // one by one separately. |
227 | | // Returns 0 on success, -1 otherwise. |
228 | | // Example: |
229 | | // const_iovec vec[] = { { data1, len1 }, |
230 | | // { data2, len2 }, |
231 | | // { data3, len3 } }; |
232 | | // foo.appendv(vec, arraysize(vec)); |
233 | | int appendv(const const_iovec vec[], size_t n); |
234 | | int appendv(const iovec* vec, size_t n) |
235 | 0 | { return appendv((const const_iovec*)vec, n); } |
236 | | |
237 | | // Append a c-style string to back side. (with copying) |
238 | | // Returns 0 on success, -1 otherwise. |
239 | | // NOTE: Returns 0 when `s' is empty. |
240 | | int append(char const* s); |
241 | | |
242 | | // Append a std::string to back side. (with copying) |
243 | | // Returns 0 on success, -1 otherwise. |
244 | | // NOTE: Returns 0 when `s' is empty. |
245 | | int append(const std::string& s); |
246 | | |
247 | | // Append the user-data to back side WITHOUT copying. |
248 | | // The user-data can be split and shared by smaller IOBufs and will be |
249 | | // deleted using the deleter func when no IOBuf references it anymore. |
250 | | int append_user_data(void* data, size_t size, std::function<void(void*)> deleter); |
251 | | |
252 | | // Append the user-data to back side WITHOUT copying. |
253 | | // The meta is associated with this piece of user-data. |
254 | | int append_user_data_with_meta(void* data, size_t size, std::function<void(void*)> deleter, uint64_t meta); |
255 | | |
256 | | // Get the data meta of the first byte in this IOBuf. |
257 | | // The meta is specified with append_user_data_with_meta before. |
258 | | // 0 means the meta is invalid. |
259 | | uint64_t get_first_data_meta(); |
260 | | |
261 | | // Resizes the buf to a length of n characters. |
262 | | // If n is smaller than the current length, all bytes after n will be |
263 | | // truncated. |
264 | | // If n is greater than the current length, the buffer would be append with |
265 | | // as many |c| as needed to reach a size of n. If c is not specified, |
266 | | // null-character would be appended. |
267 | | // Returns 0 on success, -1 otherwise. |
268 | 0 | int resize(size_t n) { return resize(n, '\0'); } |
269 | | int resize(size_t n, char c); |
270 | | |
271 | | // Reserve `n' uninitialized bytes at back-side. |
272 | | // Returns an object representing the reserved area, INVALID_AREA on failure. |
273 | | // NOTE: reserve(0) returns INVALID_AREA. |
274 | | Area reserve(size_t n); |
275 | | |
276 | | // [EXTREMELY UNSAFE] |
277 | | // Copy `data' to the reserved `area'. `data' must be as long as the |
278 | | // reserved size. |
279 | | // Returns 0 on success, -1 otherwise. |
280 | | // [Rules] |
281 | | // 1. Make sure the IOBuf to be assigned was NOT cut/pop from front side |
282 | | // after reserving, otherwise behavior of this function is undefined, |
283 | | // even if it returns 0. |
284 | | // 2. Make sure the IOBuf to be assigned was NOT copied to/from another |
285 | | // IOBuf after reserving to prevent underlying blocks from being shared, |
286 | | // otherwise the assignment affects all IOBuf sharing the blocks, which |
287 | | // is probably not what we want. |
288 | | int unsafe_assign(Area area, const void* data); |
289 | | |
290 | | // Append min(n, length()) bytes starting from `pos' at front side to `buf'. |
291 | | // The real payload is shared rather than copied. |
292 | | // Returns bytes copied. |
293 | | size_t append_to(IOBuf* buf, size_t n = (size_t)-1L, size_t pos = 0) const; |
294 | | |
295 | | // Explicitly declare this overload as error to avoid copy_to(butil::IOBuf*) |
296 | | // from being interpreted as copy_to(void*) by the compiler (which causes |
297 | | // undefined behavior). |
298 | | size_t copy_to(IOBuf* buf, size_t n = (size_t)-1L, size_t pos = 0) const |
299 | | // the error attribute in not available in gcc 3.4 |
300 | | #if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) |
301 | | __attribute__ (( error("Call append_to(IOBuf*) instead") )) |
302 | | #endif |
303 | | ; |
304 | | |
305 | | // Copy min(n, length()) bytes starting from `pos' at front side into `buf'. |
306 | | // Returns bytes copied. |
307 | | size_t copy_to(void* buf, size_t n = (size_t)-1L, size_t pos = 0) const; |
308 | | |
309 | | // NOTE: first parameter is not std::string& because user may pass in |
310 | | // a pointer of std::string by mistake, in which case, the void* overload |
311 | | // would be wrongly called. |
312 | | size_t copy_to(std::string* s, size_t n = (size_t)-1L, size_t pos = 0) const; |
313 | | size_t append_to(std::string* s, size_t n = (size_t)-1L, size_t pos = 0) const; |
314 | | |
315 | | // Copy min(n, length()) bytes staring from `pos' at front side into |
316 | | // `cstr' and end it with '\0'. |
317 | | // `cstr' must be as long as min(n, length())+1. |
318 | | // Returns bytes copied (not including ending '\0') |
319 | | size_t copy_to_cstr(char* cstr, size_t n = (size_t)-1L, size_t pos = 0) const; |
320 | | |
321 | | // Convert all data in this buffer to a std::string. |
322 | | std::string to_string() const; |
323 | | |
324 | | // Get `n' front-side bytes with minimum copying. Length of `aux_buffer' |
325 | | // must not be less than `n'. |
326 | | // Returns: |
327 | | // NULL - n is greater than length() |
328 | | // aux_buffer - n bytes are copied into aux_buffer |
329 | | // internal buffer - the bytes are stored continuously in the internal |
330 | | // buffer, no copying is needed. This function does not |
331 | | // add additional reference to the underlying block, |
332 | | // so user should not change this IOBuf during using |
333 | | // the internal buffer. |
334 | | // If n == 0 and buffer is empty, return value is undefined. |
335 | | const void* fetch(void* aux_buffer, size_t n) const; |
336 | | // Fetch one character from front side. |
337 | | // Returns pointer to the character, NULL on empty. |
338 | | const void* fetch1() const; |
339 | | |
340 | | // Remove all data |
341 | | void clear(); |
342 | | |
343 | | // True iff there's no data |
344 | | bool empty() const; |
345 | | |
346 | | // Number of bytes |
347 | | size_t length() const; |
348 | 4.44k | size_t size() const { return length(); } |
349 | | |
350 | | // Get number of Blocks in use. block_memory = block_count * BLOCK_SIZE |
351 | | static size_t block_count(); |
352 | | static size_t block_memory(); |
353 | | static size_t new_bigview_count(); |
354 | | static size_t block_count_hit_tls_threshold(); |
355 | | |
356 | | // Equal with a string/IOBuf or not. |
357 | | bool equals(const butil::StringPiece&) const; |
358 | | bool equals(const IOBuf& other) const; |
359 | | |
360 | | // Get the number of backing blocks |
361 | 7.64k | size_t backing_block_num() const { return _ref_num(); } |
362 | | |
363 | | // Get #i backing_block, an empty StringPiece is returned if no such block |
364 | | StringPiece backing_block(size_t i) const; |
365 | | |
366 | | // Make a movable version of self |
367 | 0 | Movable movable() { return Movable(*this); } |
368 | | |
369 | | protected: |
370 | | int _cut_by_char(IOBuf* out, char); |
371 | | int _cut_by_delim(IOBuf* out, char const* dbegin, size_t ndelim); |
372 | | |
373 | | // Returns: true iff this should be viewed as SmallView |
374 | | bool _small() const; |
375 | | |
376 | | template <bool MOVE> |
377 | | void _push_or_move_back_ref_to_smallview(const BlockRef&); |
378 | | template <bool MOVE> |
379 | | void _push_or_move_back_ref_to_bigview(const BlockRef&); |
380 | | |
381 | | // Push a BlockRef to back side |
382 | | // NOTICE: All fields of the ref must be initialized or assigned |
383 | | // properly, or it will ruin this queue |
384 | | void _push_back_ref(const BlockRef&); |
385 | | // Move a BlockRef to back side. After calling this function, content of |
386 | | // the BlockRef will be invalid and should never be used again. |
387 | | void _move_back_ref(const BlockRef&); |
388 | | |
389 | | // Pop a BlockRef from front side. |
390 | | // Returns: 0 on success and -1 on empty. |
391 | 48 | int _pop_front_ref() { return _pop_or_moveout_front_ref<false>(); } |
392 | | |
393 | | // Move a BlockRef out from front side. |
394 | | // Returns: 0 on success and -1 on empty. |
395 | 103 | int _moveout_front_ref() { return _pop_or_moveout_front_ref<true>(); } |
396 | | |
397 | | template <bool MOVEOUT> |
398 | | int _pop_or_moveout_front_ref(); |
399 | | |
400 | | // Pop a BlockRef from back side. |
401 | | // Returns: 0 on success and -1 on empty. |
402 | | int _pop_back_ref(); |
403 | | |
404 | | // Number of refs in the queue |
405 | | size_t _ref_num() const; |
406 | | |
407 | | // Get reference to front/back BlockRef in the queue |
408 | | // should not be called if queue is empty or the behavior is undefined |
409 | | BlockRef& _front_ref(); |
410 | | const BlockRef& _front_ref() const; |
411 | | BlockRef& _back_ref(); |
412 | | const BlockRef& _back_ref() const; |
413 | | |
414 | | // Get reference to n-th BlockRef(counting from front) in the queue |
415 | | // NOTICE: should not be called if queue is empty and the `n' must |
416 | | // be inside [0, _ref_num()-1] or behavior is undefined |
417 | | BlockRef& _ref_at(size_t i); |
418 | | const BlockRef& _ref_at(size_t i) const; |
419 | | |
420 | | // Get pointer to n-th BlockRef(counting from front) |
421 | | // If i is out-of-range, NULL is returned. |
422 | | const BlockRef* _pref_at(size_t i) const; |
423 | | |
424 | | private: |
425 | | union { |
426 | | BigView _bv; |
427 | | SmallView _sv; |
428 | | }; |
429 | | }; |
430 | | |
431 | | std::ostream& operator<<(std::ostream&, const IOBuf& buf); |
432 | | |
433 | | inline bool operator==(const butil::IOBuf& b, const butil::StringPiece& s) |
434 | 0 | { return b.equals(s); } |
435 | | inline bool operator==(const butil::StringPiece& s, const butil::IOBuf& b) |
436 | 0 | { return b.equals(s); } |
437 | | inline bool operator!=(const butil::IOBuf& b, const butil::StringPiece& s) |
438 | 0 | { return !b.equals(s); } |
439 | | inline bool operator!=(const butil::StringPiece& s, const butil::IOBuf& b) |
440 | 0 | { return !b.equals(s); } |
441 | | inline bool operator==(const butil::IOBuf& b1, const butil::IOBuf& b2) |
442 | 0 | { return b1.equals(b2); } |
443 | | inline bool operator!=(const butil::IOBuf& b1, const butil::IOBuf& b2) |
444 | 0 | { return !b1.equals(b2); } |
445 | | |
446 | | // IOPortal is a subclass of IOBuf that can read from file descriptors. |
447 | | // Typically used as the buffer to store bytes from sockets. |
448 | | class IOPortal : public IOBuf { |
449 | | public: |
450 | 0 | IOPortal() : _block(NULL) { } |
451 | 0 | IOPortal(const IOPortal& rhs) : IOBuf(rhs), _block(NULL) { } |
452 | | ~IOPortal(); |
453 | | IOPortal& operator=(const IOPortal& rhs); |
454 | | |
455 | | // Read at most `max_count' bytes from the reader and append to self. |
456 | | ssize_t append_from_reader(IReader* reader, size_t max_count); |
457 | | |
458 | | // Read at most `max_count' bytes from file descriptor `fd' and |
459 | | // append to self. |
460 | | ssize_t append_from_file_descriptor(int fd, size_t max_count); |
461 | | |
462 | | // Read at most `max_count' bytes from file descriptor `fd' at a given |
463 | | // offset and append to self. The file offset is not changed. |
464 | | // If `offset' is negative, does exactly what append_from_file_descriptor does. |
465 | | ssize_t pappend_from_file_descriptor(int fd, off_t offset, size_t max_count); |
466 | | |
467 | | // Read as many bytes as possible from SSL channel `ssl', and stop until `max_count'. |
468 | | // Returns total bytes read and the ssl error code will be filled into `ssl_error' |
469 | | ssize_t append_from_SSL_channel(struct ssl_st* ssl, int* ssl_error, |
470 | | size_t max_count = 1024*1024); |
471 | | |
472 | | // Remove all data inside and return cached blocks. |
473 | | void clear(); |
474 | | |
475 | | // Return cached blocks to TLS. This function should be called by users |
476 | | // when this IOPortal are cut into intact messages and becomes empty, to |
477 | | // let continuing code on IOBuf to reuse the blocks. Calling this function |
478 | | // after each call to append_xxx does not make sense and may hurt |
479 | | // performance. Read comments on field `_block' below. |
480 | | void return_cached_blocks(); |
481 | | |
482 | | private: |
483 | | static void return_cached_blocks_impl(Block*); |
484 | | |
485 | | // Cached blocks for appending. Notice that the blocks are released |
486 | | // until return_cached_blocks()/clear()/dtor() are called, rather than |
487 | | // released after each append_xxx(), which makes messages read from one |
488 | | // file descriptor more likely to share blocks and have less BlockRefs. |
489 | | Block* _block; |
490 | | }; |
491 | | |
492 | | class IOReserveAlignedBuf : public IOBuf { |
493 | | public: |
494 | | IOReserveAlignedBuf(size_t alignment) |
495 | 0 | : _alignment(alignment), _reserved(false) {} |
496 | | Area reserve(size_t count); |
497 | | |
498 | | private: |
499 | | size_t _alignment; |
500 | | bool _reserved; |
501 | | }; |
502 | | |
503 | | // Specialized utility to cut from IOBuf faster than using corresponding |
504 | | // methods in IOBuf. |
505 | | // Designed for efficiently parsing data from IOBuf. |
506 | | // The cut IOBuf can be appended during cutting. |
507 | | class IOBufCutter { |
508 | | public: |
509 | | explicit IOBufCutter(butil::IOBuf* buf); |
510 | | ~IOBufCutter(); |
511 | | |
512 | | // Cut off n bytes and APPEND to `out' |
513 | | // Returns bytes cut. |
514 | | size_t cutn(butil::IOBuf* out, size_t n); |
515 | | size_t cutn(std::string* out, size_t n); |
516 | | size_t cutn(void* out, size_t n); |
517 | | |
518 | | // Cut off 1 byte from the front side and set to *c |
519 | | // Return true on cut, false otherwise. |
520 | | bool cut1(void* data); |
521 | | |
522 | | // Copy n bytes into `data' |
523 | | // Returns bytes copied. |
524 | | size_t copy_to(void* data, size_t n); |
525 | | |
526 | | // Fetch one character. |
527 | | // Returns pointer to the character, NULL on empty |
528 | | const void* fetch1(); |
529 | | |
530 | | // Pop n bytes from front side |
531 | | // Returns bytes popped. |
532 | | size_t pop_front(size_t n); |
533 | | |
534 | | // Uncut bytes |
535 | | size_t remaining_bytes() const; |
536 | | |
537 | | private: |
538 | | size_t slower_copy_to(void* data, size_t n); |
539 | | bool load_next_ref(); |
540 | | |
541 | | private: |
542 | | void* _data; |
543 | | void* _data_end; |
544 | | IOBuf::Block* _block; |
545 | | IOBuf* _buf; |
546 | | }; |
547 | | |
548 | | // Parse protobuf message from IOBuf. Notice that this wrapper does not change |
549 | | // source IOBuf, which also should not change during lifetime of the wrapper. |
550 | | // Even if a IOBufAsZeroCopyInputStream is created but parsed, the source |
551 | | // IOBuf should not be changed as well becuase constructor of the stream |
552 | | // saves internal information of the source IOBuf which is assumed to be |
553 | | // unchanged. |
554 | | // Example: |
555 | | // IOBufAsZeroCopyInputStream wrapper(the_iobuf_with_protobuf_format_data); |
556 | | // some_pb_message.ParseFromZeroCopyStream(&wrapper); |
557 | | class IOBufAsZeroCopyInputStream |
558 | | : public google::protobuf::io::ZeroCopyInputStream { |
559 | | public: |
560 | | explicit IOBufAsZeroCopyInputStream(const IOBuf&); |
561 | | |
562 | | bool Next(const void** data, int* size) override; |
563 | | void BackUp(int count) override; |
564 | | bool Skip(int count) override; |
565 | | google::protobuf::int64 ByteCount() const override; |
566 | | |
567 | | private: |
568 | | int _ref_index; |
569 | | int _add_offset; |
570 | | google::protobuf::int64 _byte_count; |
571 | | const IOBuf* _buf; |
572 | | }; |
573 | | |
574 | | // Serialize protobuf message into IOBuf. This wrapper does not clear source |
575 | | // IOBuf before appending. You can change the source IOBuf when stream is |
576 | | // not used(append sth. to the IOBuf, serialize a protobuf message, append |
577 | | // sth. again, serialize messages again...). This is different from |
578 | | // IOBufAsZeroCopyInputStream which needs the source IOBuf to be unchanged. |
579 | | // Example: |
580 | | // IOBufAsZeroCopyOutputStream wrapper(&the_iobuf_to_put_data_in); |
581 | | // some_pb_message.SerializeToZeroCopyStream(&wrapper); |
582 | | // |
583 | | // NOTE: Blocks are by default shared among all the ZeroCopyOutputStream in one |
584 | | // thread. If there are many manipulated streams at one time, there may be many |
585 | | // fragments. You can create a ZeroCopyOutputStream which has its own block by |
586 | | // passing a positive `block_size' argument to avoid this problem. |
587 | | class IOBufAsZeroCopyOutputStream |
588 | | : public google::protobuf::io::ZeroCopyOutputStream { |
589 | | public: |
590 | | explicit IOBufAsZeroCopyOutputStream(IOBuf*); |
591 | | IOBufAsZeroCopyOutputStream(IOBuf*, uint32_t block_size); |
592 | | ~IOBufAsZeroCopyOutputStream(); |
593 | | |
594 | | bool Next(void** data, int* size) override; |
595 | | void BackUp(int count) override; // `count' can be as long as ByteCount() |
596 | | google::protobuf::int64 ByteCount() const override; |
597 | | |
598 | | private: |
599 | | void _release_block(); |
600 | | |
601 | | IOBuf* _buf; |
602 | | uint32_t _block_size; |
603 | | IOBuf::Block *_cur_block; |
604 | | google::protobuf::int64 _byte_count; |
605 | | }; |
606 | | |
607 | | // Wrap IOBuf into input of snappy compression. |
608 | | class IOBufAsSnappySource : public butil::snappy::Source { |
609 | | public: |
610 | | explicit IOBufAsSnappySource(const butil::IOBuf& buf) |
611 | 0 | : _buf(&buf), _stream(buf) {} |
612 | 0 | virtual ~IOBufAsSnappySource() {} |
613 | | |
614 | | // Return the number of bytes left to read from the source |
615 | | size_t Available() const override; |
616 | | |
617 | | // Peek at the next flat region of the source. |
618 | | const char* Peek(size_t* len) override; |
619 | | |
620 | | // Skip the next n bytes. Invalidates any buffer returned by |
621 | | // a previous call to Peek(). |
622 | | void Skip(size_t n) override; |
623 | | |
624 | | private: |
625 | | const butil::IOBuf* _buf; |
626 | | butil::IOBufAsZeroCopyInputStream _stream; |
627 | | }; |
628 | | |
629 | | // Wrap IOBuf into output of snappy compression. |
630 | | class IOBufAsSnappySink : public butil::snappy::Sink { |
631 | | public: |
632 | | explicit IOBufAsSnappySink(butil::IOBuf& buf); |
633 | 0 | virtual ~IOBufAsSnappySink() {} |
634 | | |
635 | | // Append "bytes[0,n-1]" to this. |
636 | | void Append(const char* bytes, size_t n) override; |
637 | | |
638 | | // Returns a writable buffer of the specified length for appending. |
639 | | char* GetAppendBuffer(size_t length, char* scratch) override; |
640 | | |
641 | | private: |
642 | | char* _cur_buf; |
643 | | int _cur_len; |
644 | | butil::IOBuf* _buf; |
645 | | butil::IOBufAsZeroCopyOutputStream _buf_stream; |
646 | | }; |
647 | | |
648 | | // A std::ostream to build IOBuf. |
649 | | // Example: |
650 | | // IOBufBuilder builder; |
651 | | // builder << "Anything that can be sent to std::ostream"; |
652 | | // // You have several methods to fetch the IOBuf. |
653 | | // target_iobuf.append(builder.buf()); // builder.buf() was not changed |
654 | | // OR |
655 | | // builder.move_to(target_iobuf); // builder.buf() was clear()-ed. |
656 | | class IOBufBuilder : |
657 | | // Have to use private inheritance to arrange initialization order. |
658 | | virtual private IOBuf, |
659 | | virtual private IOBufAsZeroCopyOutputStream, |
660 | | virtual private ZeroCopyStreamAsStreamBuf, |
661 | | public std::ostream { |
662 | | public: |
663 | | explicit IOBufBuilder() |
664 | 0 | : IOBufAsZeroCopyOutputStream(this) |
665 | 0 | , ZeroCopyStreamAsStreamBuf(this) |
666 | 0 | , std::ostream(this) |
667 | 0 | { } |
668 | | |
669 | 0 | IOBuf& buf() { |
670 | 0 | this->shrink(); |
671 | 0 | return *this; |
672 | 0 | } |
673 | 0 | void buf(const IOBuf& buf) { |
674 | 0 | *static_cast<IOBuf*>(this) = buf; |
675 | 0 | } |
676 | 0 | void move_to(IOBuf& target) { |
677 | 0 | target = Movable(buf()); |
678 | 0 | } |
679 | | }; |
680 | | |
681 | | // Create IOBuf by appending data *faster* |
682 | | class IOBufAppender { |
683 | | public: |
684 | | IOBufAppender(); |
685 | | |
686 | | // Append `n' bytes starting from `data' to back side of the internal buffer |
687 | | // Costs 2/3 time of IOBuf.append for short data/strings on Intel(R) Xeon(R) |
688 | | // CPU E5-2620 @ 2.00GHz. Longer data/strings make differences smaller. |
689 | | // Returns 0 on success, -1 otherwise. |
690 | | int append(const void* data, size_t n); |
691 | | int append(const butil::StringPiece& str); |
692 | | |
693 | | // Format integer |d| to back side of the internal buffer, which is much faster |
694 | | // than snprintf(..., "%lu", d). |
695 | | // Returns 0 on success, -1 otherwise. |
696 | | int append_decimal(long d); |
697 | | |
698 | | // Push the character to back side of the internal buffer. |
699 | | // Costs ~3ns while IOBuf.push_back costs ~13ns on Intel(R) Xeon(R) CPU |
700 | | // E5-2620 @ 2.00GHz |
701 | | // Returns 0 on success, -1 otherwise. |
702 | | int push_back(char c); |
703 | | |
704 | 0 | IOBuf& buf() { |
705 | 0 | shrink(); |
706 | 0 | return _buf; |
707 | 0 | } |
708 | 0 | void move_to(IOBuf& target) { |
709 | 0 | target = IOBuf::Movable(buf()); |
710 | 0 | } |
711 | | |
712 | | private: |
713 | | void shrink(); |
714 | | int add_block(); |
715 | | |
716 | | void* _data; |
717 | | // Saving _data_end instead of _size avoid modifying _data and _size |
718 | | // in each push_back() which is probably a hotspot. |
719 | | void* _data_end; |
720 | | IOBuf _buf; |
721 | | IOBufAsZeroCopyOutputStream _zc_stream; |
722 | | }; |
723 | | |
724 | | // Iterate bytes of a IOBuf. |
725 | | // During iteration, the iobuf should NOT be changed. |
726 | | class IOBufBytesIterator { |
727 | | public: |
728 | | explicit IOBufBytesIterator(const butil::IOBuf& buf); |
729 | | // Construct from another iterator. |
730 | | IOBufBytesIterator(const IOBufBytesIterator& it); |
731 | | IOBufBytesIterator(const IOBufBytesIterator& it, size_t bytes_left); |
732 | | // Returning unsigned is safer than char which would be more error prone |
733 | | // to bitwise operations. For example: in "uint32_t value = *it", value |
734 | | // is (unexpected) 4294967168 when *it returns (char)128. |
735 | 62.9k | unsigned char operator*() const { return (unsigned char)*_block_begin; } |
736 | 63.2k | operator const void*() const { return (const void*)!!_bytes_left; } |
737 | | void operator++(); |
738 | 0 | void operator++(int) { return operator++(); } |
739 | | // Copy at most n bytes into buf, forwarding this iterator. |
740 | | // Returns bytes copied. |
741 | | size_t copy_and_forward(void* buf, size_t n); |
742 | | size_t copy_and_forward(std::string* s, size_t n); |
743 | | // Just forward this iterator for at most n bytes. |
744 | | size_t forward(size_t n); |
745 | | // Append at most n bytes into buf, forwarding this iterator. Data are |
746 | | // referenced rather than copied. |
747 | | size_t append_and_forward(butil::IOBuf* buf, size_t n); |
748 | | bool forward_one_block(const void** data, size_t* size); |
749 | 566 | size_t bytes_left() const { return _bytes_left; } |
750 | | private: |
751 | | void try_next_block(); |
752 | | const char* _block_begin; |
753 | | const char* _block_end; |
754 | | uint32_t _block_count; |
755 | | uint32_t _bytes_left; |
756 | | const butil::IOBuf* _buf; |
757 | | }; |
758 | | |
759 | | } // namespace butil |
760 | | |
761 | | // Specialize std::swap for IOBuf |
762 | | #if __cplusplus < 201103L // < C++11 |
763 | | #include <algorithm> // std::swap until C++11 |
764 | | #else |
765 | | #include <utility> // std::swap since C++11 |
766 | | #endif // __cplusplus < 201103L |
767 | | namespace std { |
768 | | template <> |
769 | 0 | inline void swap(butil::IOBuf& a, butil::IOBuf& b) { |
770 | 0 | return a.swap(b); |
771 | 0 | } |
772 | | } // namespace std |
773 | | |
774 | | #include "butil/iobuf_inl.h" |
775 | | |
776 | | #endif // BUTIL_IOBUF_H |