/src/brpc/src/butil/iobuf.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | // iobuf - A non-continuous zero-copied buffer |
19 | | |
20 | | // Date: Thu Nov 22 13:57:56 CST 2012 |
21 | | |
22 | | #include "butil/ssl_compat.h" // BIO_fd_non_fatal_error |
23 | | #include <openssl/err.h> |
24 | | #include <openssl/ssl.h> // SSL_* |
25 | | #ifdef USE_MESALINK |
26 | | #include <mesalink/openssl/ssl.h> |
27 | | #include <mesalink/openssl/err.h> |
28 | | #endif |
29 | | #include <sys/syscall.h> // syscall |
30 | | #include <fcntl.h> // O_RDONLY |
31 | | #include <errno.h> // errno |
32 | | #include <limits.h> // CHAR_BIT |
33 | | #include <stdexcept> // std::invalid_argument |
34 | | #include "butil/build_config.h" // ARCH_CPU_X86_64 |
35 | | #include "butil/atomicops.h" // butil::atomic |
36 | | #include "butil/thread_local.h" // thread_atexit |
37 | | #include "butil/macros.h" // BAIDU_CASSERT |
38 | | #include "butil/logging.h" // CHECK, LOG |
39 | | #include "butil/fd_guard.h" // butil::fd_guard |
40 | | #include "butil/iobuf.h" |
41 | | #include "butil/iobuf_profiler.h" |
42 | | |
43 | | namespace butil { |
44 | | namespace iobuf { |
45 | | |
46 | | typedef ssize_t (*iov_function)(int fd, const struct iovec *vector, |
47 | | int count, off_t offset); |
48 | | |
49 | | // Userpsace preadv |
50 | | static ssize_t user_preadv(int fd, const struct iovec *vector, |
51 | 0 | int count, off_t offset) { |
52 | 0 | ssize_t total_read = 0; |
53 | 0 | for (int i = 0; i < count; ++i) { |
54 | 0 | const ssize_t rc = ::pread(fd, vector[i].iov_base, vector[i].iov_len, offset); |
55 | 0 | if (rc <= 0) { |
56 | 0 | return total_read > 0 ? total_read : rc; |
57 | 0 | } |
58 | 0 | total_read += rc; |
59 | 0 | offset += rc; |
60 | 0 | if (rc < (ssize_t)vector[i].iov_len) { |
61 | 0 | break; |
62 | 0 | } |
63 | 0 | } |
64 | 0 | return total_read; |
65 | 0 | } |
66 | | |
67 | | static ssize_t user_pwritev(int fd, const struct iovec* vector, |
68 | 0 | int count, off_t offset) { |
69 | 0 | ssize_t total_write = 0; |
70 | 0 | for (int i = 0; i < count; ++i) { |
71 | 0 | const ssize_t rc = ::pwrite(fd, vector[i].iov_base, vector[i].iov_len, offset); |
72 | 0 | if (rc <= 0) { |
73 | 0 | return total_write > 0 ? total_write : rc; |
74 | 0 | } |
75 | 0 | total_write += rc; |
76 | 0 | offset += rc; |
77 | 0 | if (rc < (ssize_t)vector[i].iov_len) { |
78 | 0 | break; |
79 | 0 | } |
80 | 0 | } |
81 | 0 | return total_write; |
82 | 0 | } |
83 | | |
84 | | #if ARCH_CPU_X86_64 |
85 | | |
86 | | #ifndef SYS_preadv |
87 | | #define SYS_preadv 295 |
88 | | #endif // SYS_preadv |
89 | | |
90 | | #ifndef SYS_pwritev |
91 | | #define SYS_pwritev 296 |
92 | | #endif // SYS_pwritev |
93 | | |
94 | | // SYS_preadv/SYS_pwritev is available since Linux 2.6.30 |
95 | | static ssize_t sys_preadv(int fd, const struct iovec *vector, |
96 | 0 | int count, off_t offset) { |
97 | 0 | return syscall(SYS_preadv, fd, vector, count, offset); |
98 | 0 | } |
99 | | |
100 | | static ssize_t sys_pwritev(int fd, const struct iovec *vector, |
101 | 0 | int count, off_t offset) { |
102 | 0 | return syscall(SYS_pwritev, fd, vector, count, offset); |
103 | 0 | } |
104 | | |
105 | 0 | inline iov_function get_preadv_func() { |
106 | | #if defined(OS_MACOSX) |
107 | | return user_preadv; |
108 | | #endif |
109 | 0 | butil::fd_guard fd(open("/dev/zero", O_RDONLY)); |
110 | 0 | if (fd < 0) { |
111 | 0 | PLOG(WARNING) << "Fail to open /dev/zero"; |
112 | 0 | return user_preadv; |
113 | 0 | } |
114 | 0 | char dummy[1]; |
115 | 0 | iovec vec = { dummy, sizeof(dummy) }; |
116 | 0 | const int rc = syscall(SYS_preadv, (int)fd, &vec, 1, 0); |
117 | 0 | if (rc < 0) { |
118 | 0 | PLOG(WARNING) << "The kernel doesn't support SYS_preadv, " |
119 | 0 | " use user_preadv instead"; |
120 | 0 | return user_preadv; |
121 | 0 | } |
122 | 0 | return sys_preadv; |
123 | 0 | } |
124 | | |
125 | 0 | inline iov_function get_pwritev_func() { |
126 | 0 | butil::fd_guard fd(open("/dev/null", O_WRONLY)); |
127 | 0 | if (fd < 0) { |
128 | 0 | PLOG(ERROR) << "Fail to open /dev/null"; |
129 | 0 | return user_pwritev; |
130 | 0 | } |
131 | | #if defined(OS_MACOSX) |
132 | | return user_pwritev; |
133 | | #endif |
134 | 0 | char dummy[1]; |
135 | 0 | iovec vec = { dummy, sizeof(dummy) }; |
136 | 0 | const int rc = syscall(SYS_pwritev, (int)fd, &vec, 1, 0); |
137 | 0 | if (rc < 0) { |
138 | 0 | PLOG(WARNING) << "The kernel doesn't support SYS_pwritev, " |
139 | 0 | " use user_pwritev instead"; |
140 | 0 | return user_pwritev; |
141 | 0 | } |
142 | 0 | return sys_pwritev; |
143 | 0 | } |
144 | | |
145 | | #else // ARCH_CPU_X86_64 |
146 | | |
147 | | #warning "We don't check if the kernel supports SYS_preadv or SYS_pwritev on non-X86_64, use implementation on pread/pwrite directly." |
148 | | |
149 | | inline iov_function get_preadv_func() { |
150 | | return user_preadv; |
151 | | } |
152 | | |
153 | | inline iov_function get_pwritev_func() { |
154 | | return user_pwritev; |
155 | | } |
156 | | |
157 | | #endif // ARCH_CPU_X86_64 |
158 | | |
159 | 20.7k | void* cp(void *__restrict dest, const void *__restrict src, size_t n) { |
160 | | // memcpy in gcc 4.8 seems to be faster enough. |
161 | 20.7k | return memcpy(dest, src, n); |
162 | 20.7k | } |
163 | | |
164 | | // Function pointers to allocate or deallocate memory for a IOBuf::Block |
165 | | void* (*blockmem_allocate)(size_t) = ::malloc; |
166 | | void (*blockmem_deallocate)(void*) = ::free; |
167 | | |
168 | | void remove_tls_block_chain(); |
169 | | |
170 | | // Use default function pointers |
171 | 0 | void reset_blockmem_allocate_and_deallocate() { |
172 | | // There maybe block allocated by previous hooks, it's wrong to free them using |
173 | | // mismatched hook. |
174 | 0 | remove_tls_block_chain(); |
175 | 0 | blockmem_allocate = ::malloc; |
176 | 0 | blockmem_deallocate = ::free; |
177 | 0 | } |
178 | | |
179 | | butil::static_atomic<size_t> g_nblock = BUTIL_STATIC_ATOMIC_INIT(0); |
180 | | butil::static_atomic<size_t> g_blockmem = BUTIL_STATIC_ATOMIC_INIT(0); |
181 | | butil::static_atomic<size_t> g_newbigview = BUTIL_STATIC_ATOMIC_INIT(0); |
182 | | |
183 | | } // namespace iobuf |
184 | | |
185 | 0 | size_t IOBuf::block_count() { |
186 | 0 | return iobuf::g_nblock.load(butil::memory_order_relaxed); |
187 | 0 | } |
188 | | |
189 | 0 | size_t IOBuf::block_memory() { |
190 | 0 | return iobuf::g_blockmem.load(butil::memory_order_relaxed); |
191 | 0 | } |
192 | | |
193 | 0 | size_t IOBuf::new_bigview_count() { |
194 | 0 | return iobuf::g_newbigview.load(butil::memory_order_relaxed); |
195 | 0 | } |
196 | | |
197 | | const uint16_t IOBUF_BLOCK_FLAGS_USER_DATA = 1 << 0; |
198 | | const uint16_t IOBUF_BLOCK_FLAGS_SAMPLED = 1 << 1; |
199 | | using UserDataDeleter = std::function<void(void*)>; |
200 | | |
201 | | struct UserDataExtension { |
202 | | UserDataDeleter deleter; |
203 | | }; |
204 | | |
205 | | struct IOBuf::Block { |
206 | | butil::atomic<int> nshared; |
207 | | uint16_t flags; |
208 | | uint16_t abi_check; // original cap, never be zero. |
209 | | uint32_t size; |
210 | | uint32_t cap; |
211 | | // When flag is 0, portal_next is valid. |
212 | | // When flag & IOBUF_BLOCK_FLAGS_USER_DATA is non-0, data_meta is valid. |
213 | | union { |
214 | | Block* portal_next; |
215 | | uint64_t data_meta; |
216 | | } u; |
217 | | // When flag is 0, data points to `size` bytes starting at `(char*)this+sizeof(Block)' |
218 | | // When flag & IOBUF_BLOCK_FLAGS_USER_DATA is non-0, data points to the user data and |
219 | | // the deleter is put in UserDataExtension at `(char*)this+sizeof(Block)' |
220 | | char* data; |
221 | | |
222 | | Block(char* data_in, uint32_t data_size) |
223 | | : nshared(1) |
224 | | , flags(0) |
225 | | , abi_check(0) |
226 | | , size(0) |
227 | | , cap(data_size) |
228 | | , u({NULL}) |
229 | 79 | , data(data_in) { |
230 | 79 | iobuf::g_nblock.fetch_add(1, butil::memory_order_relaxed); |
231 | 79 | iobuf::g_blockmem.fetch_add(data_size + sizeof(Block), |
232 | 79 | butil::memory_order_relaxed); |
233 | 79 | if (is_samplable()) { |
234 | 79 | SubmitIOBufSample(this, 1); |
235 | 79 | } |
236 | 79 | } |
237 | | |
238 | | Block(char* data_in, uint32_t data_size, UserDataDeleter deleter) |
239 | | : nshared(1) |
240 | | , flags(IOBUF_BLOCK_FLAGS_USER_DATA) |
241 | | , abi_check(0) |
242 | | , size(data_size) |
243 | | , cap(data_size) |
244 | | , u({0}) |
245 | 0 | , data(data_in) { |
246 | 0 | auto ext = new (get_user_data_extension()) UserDataExtension(); |
247 | 0 | ext->deleter = std::move(deleter); |
248 | 0 | if (is_samplable()) { |
249 | 0 | SubmitIOBufSample(this, 1); |
250 | 0 | } |
251 | 0 | } |
252 | | |
253 | | // Undefined behavior when (flags & IOBUF_BLOCK_FLAGS_USER_DATA) is 0. |
254 | 0 | UserDataExtension* get_user_data_extension() { |
255 | 0 | char* p = (char*)this; |
256 | 0 | return (UserDataExtension*)(p + sizeof(Block)); |
257 | 0 | } |
258 | | |
259 | 20.3k | inline void check_abi() { |
260 | 20.3k | #ifndef NDEBUG |
261 | 20.3k | if (abi_check != 0) { |
262 | 0 | LOG(FATAL) << "Your program seems to wrongly contain two " |
263 | 0 | "ABI-incompatible implementations of IOBuf"; |
264 | 0 | } |
265 | 20.3k | #endif |
266 | 20.3k | } |
267 | | |
268 | 10.3k | void inc_ref() { |
269 | 10.3k | check_abi(); |
270 | 10.3k | nshared.fetch_add(1, butil::memory_order_relaxed); |
271 | 10.3k | if (sampled()) { |
272 | 10.3k | SubmitIOBufSample(this, 1); |
273 | 10.3k | } |
274 | 10.3k | } |
275 | | |
276 | 10.0k | void dec_ref() { |
277 | 10.0k | check_abi(); |
278 | 10.0k | if (sampled()) { |
279 | 10.0k | SubmitIOBufSample(this, -1); |
280 | 10.0k | } |
281 | 10.0k | if (nshared.fetch_sub(1, butil::memory_order_release) == 1) { |
282 | 59 | butil::atomic_thread_fence(butil::memory_order_acquire); |
283 | 59 | if (!is_user_data()) { |
284 | 59 | iobuf::g_nblock.fetch_sub(1, butil::memory_order_relaxed); |
285 | 59 | iobuf::g_blockmem.fetch_sub(cap + sizeof(Block), |
286 | 59 | butil::memory_order_relaxed); |
287 | 59 | this->~Block(); |
288 | 59 | iobuf::blockmem_deallocate(this); |
289 | 59 | } else if (flags & IOBUF_BLOCK_FLAGS_USER_DATA) { |
290 | 0 | auto ext = get_user_data_extension(); |
291 | 0 | ext->deleter(data); |
292 | 0 | ext->~UserDataExtension(); |
293 | 0 | this->~Block(); |
294 | 0 | free(this); |
295 | 0 | } |
296 | 59 | } |
297 | 10.0k | } |
298 | | |
299 | 0 | int ref_count() const { |
300 | 0 | return nshared.load(butil::memory_order_relaxed); |
301 | 0 | } |
302 | | |
303 | 9.79k | bool full() const { return size >= cap; } |
304 | 6.95k | size_t left_space() const { return cap - size; } |
305 | | |
306 | | private: |
307 | 79 | bool is_samplable() { |
308 | 79 | if (IsIOBufProfilerSamplable()) { |
309 | 79 | flags |= IOBUF_BLOCK_FLAGS_SAMPLED; |
310 | 79 | return true; |
311 | 79 | } |
312 | 0 | return false; |
313 | 79 | } |
314 | | |
315 | 20.3k | bool sampled() const { |
316 | 20.3k | return flags & IOBUF_BLOCK_FLAGS_SAMPLED; |
317 | 20.3k | } |
318 | | |
319 | 59 | bool is_user_data() const { |
320 | 59 | return flags & IOBUF_BLOCK_FLAGS_USER_DATA; |
321 | 59 | } |
322 | | }; |
323 | | |
324 | | namespace iobuf { |
325 | | |
326 | | // for unit test |
327 | 0 | int block_shared_count(IOBuf::Block const* b) { return b->ref_count(); } |
328 | | |
329 | 0 | IOBuf::Block* get_portal_next(IOBuf::Block const* b) { |
330 | 0 | return b->u.portal_next; |
331 | 0 | } |
332 | | |
333 | 0 | uint32_t block_cap(IOBuf::Block const* b) { |
334 | 0 | return b->cap; |
335 | 0 | } |
336 | | |
337 | 0 | uint32_t block_size(IOBuf::Block const* b) { |
338 | 0 | return b->size; |
339 | 0 | } |
340 | | |
341 | 79 | inline IOBuf::Block* create_block(const size_t block_size) { |
342 | 79 | if (block_size > 0xFFFFFFFFULL) { |
343 | 0 | LOG(FATAL) << "block_size=" << block_size << " is too large"; |
344 | 0 | return NULL; |
345 | 0 | } |
346 | 79 | char* mem = (char*)iobuf::blockmem_allocate(block_size); |
347 | 79 | if (mem == NULL) { |
348 | 0 | return NULL; |
349 | 0 | } |
350 | 79 | return new (mem) IOBuf::Block(mem + sizeof(IOBuf::Block), |
351 | 79 | block_size - sizeof(IOBuf::Block)); |
352 | 79 | } |
353 | | |
354 | 79 | inline IOBuf::Block* create_block() { |
355 | 79 | return create_block(IOBuf::DEFAULT_BLOCK_SIZE); |
356 | 79 | } |
357 | | |
358 | | // === Share TLS blocks between appending operations === |
359 | | // Max number of blocks in each TLS. This is a soft limit namely |
360 | | // release_tls_block_chain() may exceed this limit sometimes. |
361 | | const int MAX_BLOCKS_PER_THREAD = 8; |
362 | | |
363 | 0 | inline int max_blocks_per_thread() { |
364 | | // If IOBufProfiler is enabled, do not cache blocks in TLS. |
365 | 0 | return IsIOBufProfilerEnabled() ? 0 : MAX_BLOCKS_PER_THREAD; |
366 | 0 | } |
367 | | |
368 | | struct TLSData { |
369 | | // Head of the TLS block chain. |
370 | | IOBuf::Block* block_head; |
371 | | |
372 | | // Number of TLS blocks |
373 | | int num_blocks; |
374 | | |
375 | | // True if the remote_tls_block_chain is registered to the thread. |
376 | | bool registered; |
377 | | }; |
378 | | |
379 | | static __thread TLSData g_tls_data = { NULL, 0, false }; |
380 | | |
381 | | // Used in UT |
382 | 0 | IOBuf::Block* get_tls_block_head() { return g_tls_data.block_head; } |
383 | 0 | int get_tls_block_count() { return g_tls_data.num_blocks; } |
384 | | |
385 | | // Number of blocks that can't be returned to TLS which has too many block |
386 | | // already. This counter should be 0 in most scenarios, otherwise performance |
387 | | // of appending functions in IOPortal may be lowered. |
388 | | static butil::static_atomic<size_t> g_num_hit_tls_threshold = BUTIL_STATIC_ATOMIC_INIT(0); |
389 | | |
390 | | // Called in UT. |
391 | 0 | void remove_tls_block_chain() { |
392 | 0 | TLSData& tls_data = g_tls_data; |
393 | 0 | IOBuf::Block* b = tls_data.block_head; |
394 | 0 | if (!b) { |
395 | 0 | return; |
396 | 0 | } |
397 | 0 | tls_data.block_head = NULL; |
398 | 0 | int n = 0; |
399 | 0 | do { |
400 | 0 | IOBuf::Block* const saved_next = b->u.portal_next; |
401 | 0 | b->dec_ref(); |
402 | 0 | b = saved_next; |
403 | 0 | ++n; |
404 | 0 | } while (b); |
405 | 0 | CHECK_EQ(n, tls_data.num_blocks); |
406 | 0 | tls_data.num_blocks = 0; |
407 | 0 | } |
408 | | |
409 | | // Get a (non-full) block from TLS. |
410 | | // Notice that the block is not removed from TLS. |
411 | 9.72k | IOBuf::Block* share_tls_block() { |
412 | 9.72k | TLSData& tls_data = g_tls_data; |
413 | 9.72k | IOBuf::Block* const b = tls_data.block_head; |
414 | 9.72k | if (b != NULL && !b->full()) { |
415 | 9.64k | return b; |
416 | 9.64k | } |
417 | 79 | IOBuf::Block* new_block = NULL; |
418 | 79 | if (b) { |
419 | 72 | new_block = b; |
420 | 144 | while (new_block && new_block->full()) { |
421 | 72 | IOBuf::Block* const saved_next = new_block->u.portal_next; |
422 | 72 | new_block->dec_ref(); |
423 | 72 | --tls_data.num_blocks; |
424 | 72 | new_block = saved_next; |
425 | 72 | } |
426 | 72 | } else if (!tls_data.registered) { |
427 | 7 | tls_data.registered = true; |
428 | | // Only register atexit at the first time |
429 | 7 | butil::thread_atexit(remove_tls_block_chain); |
430 | 7 | } |
431 | 79 | if (!new_block) { |
432 | 79 | new_block = create_block(); // may be NULL |
433 | 79 | if (new_block) { |
434 | 79 | ++tls_data.num_blocks; |
435 | 79 | } |
436 | 79 | } |
437 | 79 | tls_data.block_head = new_block; |
438 | 79 | return new_block; |
439 | 9.72k | } |
440 | | |
441 | | // Return one block to TLS. |
442 | 0 | inline void release_tls_block(IOBuf::Block* b) { |
443 | 0 | if (!b) { |
444 | 0 | return; |
445 | 0 | } |
446 | 0 | TLSData& tls_data = g_tls_data; |
447 | 0 | if (b->full()) { |
448 | 0 | b->dec_ref(); |
449 | 0 | } else if (tls_data.num_blocks >= max_blocks_per_thread()) { |
450 | 0 | b->dec_ref(); |
451 | 0 | g_num_hit_tls_threshold.fetch_add(1, butil::memory_order_relaxed); |
452 | 0 | } else { |
453 | 0 | b->u.portal_next = tls_data.block_head; |
454 | 0 | tls_data.block_head = b; |
455 | 0 | ++tls_data.num_blocks; |
456 | 0 | if (!tls_data.registered) { |
457 | 0 | tls_data.registered = true; |
458 | 0 | butil::thread_atexit(remove_tls_block_chain); |
459 | 0 | } |
460 | 0 | } |
461 | 0 | } |
462 | | |
463 | | // Return chained blocks to TLS. |
464 | | // NOTE: b MUST be non-NULL and all blocks linked SHOULD not be full. |
465 | 0 | void release_tls_block_chain(IOBuf::Block* b) { |
466 | 0 | TLSData& tls_data = g_tls_data; |
467 | 0 | size_t n = 0; |
468 | 0 | if (tls_data.num_blocks >= max_blocks_per_thread()) { |
469 | 0 | do { |
470 | 0 | ++n; |
471 | 0 | IOBuf::Block* const saved_next = b->u.portal_next; |
472 | 0 | b->dec_ref(); |
473 | 0 | b = saved_next; |
474 | 0 | } while (b); |
475 | 0 | g_num_hit_tls_threshold.fetch_add(n, butil::memory_order_relaxed); |
476 | 0 | return; |
477 | 0 | } |
478 | 0 | IOBuf::Block* first_b = b; |
479 | 0 | IOBuf::Block* last_b = NULL; |
480 | 0 | do { |
481 | 0 | ++n; |
482 | 0 | CHECK(!b->full()); |
483 | 0 | if (b->u.portal_next == NULL) { |
484 | 0 | last_b = b; |
485 | 0 | break; |
486 | 0 | } |
487 | 0 | b = b->u.portal_next; |
488 | 0 | } while (true); |
489 | 0 | last_b->u.portal_next = tls_data.block_head; |
490 | 0 | tls_data.block_head = first_b; |
491 | 0 | tls_data.num_blocks += n; |
492 | 0 | if (!tls_data.registered) { |
493 | 0 | tls_data.registered = true; |
494 | 0 | butil::thread_atexit(remove_tls_block_chain); |
495 | 0 | } |
496 | 0 | } |
497 | | |
498 | | // Get and remove one (non-full) block from TLS. If TLS is empty, create one. |
499 | 0 | IOBuf::Block* acquire_tls_block() { |
500 | 0 | TLSData& tls_data = g_tls_data; |
501 | 0 | IOBuf::Block* b = tls_data.block_head; |
502 | 0 | if (!b) { |
503 | 0 | return create_block(); |
504 | 0 | } |
505 | 0 | while (b->full()) { |
506 | 0 | IOBuf::Block* const saved_next = b->u.portal_next; |
507 | 0 | b->dec_ref(); |
508 | 0 | tls_data.block_head = saved_next; |
509 | 0 | --tls_data.num_blocks; |
510 | 0 | b = saved_next; |
511 | 0 | if (!b) { |
512 | 0 | return create_block(); |
513 | 0 | } |
514 | 0 | } |
515 | 0 | tls_data.block_head = b->u.portal_next; |
516 | 0 | --tls_data.num_blocks; |
517 | 0 | b->u.portal_next = NULL; |
518 | 0 | return b; |
519 | 0 | } |
520 | | |
521 | 0 | inline IOBuf::BlockRef* acquire_blockref_array(size_t cap) { |
522 | 0 | iobuf::g_newbigview.fetch_add(1, butil::memory_order_relaxed); |
523 | 0 | return new IOBuf::BlockRef[cap]; |
524 | 0 | } |
525 | | |
526 | 0 | inline IOBuf::BlockRef* acquire_blockref_array() { |
527 | 0 | return acquire_blockref_array(IOBuf::INITIAL_CAP); |
528 | 0 | } |
529 | | |
530 | 0 | inline void release_blockref_array(IOBuf::BlockRef* refs, size_t cap) { |
531 | 0 | delete[] refs; |
532 | 0 | } |
533 | | |
534 | | } // namespace iobuf |
535 | | |
536 | 0 | size_t IOBuf::block_count_hit_tls_threshold() { |
537 | 0 | return iobuf::g_num_hit_tls_threshold.load(butil::memory_order_relaxed); |
538 | 0 | } |
539 | | |
540 | | BAIDU_CASSERT(sizeof(IOBuf::SmallView) == sizeof(IOBuf::BigView), |
541 | | sizeof_small_and_big_view_should_equal); |
542 | | |
543 | | BAIDU_CASSERT(IOBuf::DEFAULT_BLOCK_SIZE/4096*4096 == IOBuf::DEFAULT_BLOCK_SIZE, |
544 | | sizeof_block_should_be_multiply_of_4096); |
545 | | |
546 | | const IOBuf::Area IOBuf::INVALID_AREA; |
547 | | |
548 | 0 | IOBuf::IOBuf(const IOBuf& rhs) { |
549 | 0 | if (rhs._small()) { |
550 | 0 | _sv = rhs._sv; |
551 | 0 | if (_sv.refs[0].block) { |
552 | 0 | _sv.refs[0].block->inc_ref(); |
553 | 0 | } |
554 | 0 | if (_sv.refs[1].block) { |
555 | 0 | _sv.refs[1].block->inc_ref(); |
556 | 0 | } |
557 | 0 | } else { |
558 | 0 | _bv.magic = -1; |
559 | 0 | _bv.start = 0; |
560 | 0 | _bv.nref = rhs._bv.nref; |
561 | 0 | _bv.cap_mask = rhs._bv.cap_mask; |
562 | 0 | _bv.nbytes = rhs._bv.nbytes; |
563 | 0 | _bv.refs = iobuf::acquire_blockref_array(_bv.capacity()); |
564 | 0 | for (size_t i = 0; i < _bv.nref; ++i) { |
565 | 0 | _bv.refs[i] = rhs._bv.ref_at(i); |
566 | 0 | _bv.refs[i].block->inc_ref(); |
567 | 0 | } |
568 | 0 | } |
569 | 0 | } |
570 | | |
571 | 0 | void IOBuf::operator=(const IOBuf& rhs) { |
572 | 0 | if (this == &rhs) { |
573 | 0 | return; |
574 | 0 | } |
575 | 0 | if (!rhs._small() && !_small() && _bv.cap_mask == rhs._bv.cap_mask) { |
576 | | // Reuse array of refs |
577 | | // Remove references to previous blocks. |
578 | 0 | for (size_t i = 0; i < _bv.nref; ++i) { |
579 | 0 | _bv.ref_at(i).block->dec_ref(); |
580 | 0 | } |
581 | | // References blocks in rhs. |
582 | 0 | _bv.start = 0; |
583 | 0 | _bv.nref = rhs._bv.nref; |
584 | 0 | _bv.nbytes = rhs._bv.nbytes; |
585 | 0 | for (size_t i = 0; i < _bv.nref; ++i) { |
586 | 0 | _bv.refs[i] = rhs._bv.ref_at(i); |
587 | 0 | _bv.refs[i].block->inc_ref(); |
588 | 0 | } |
589 | 0 | } else { |
590 | 0 | this->~IOBuf(); |
591 | 0 | new (this) IOBuf(rhs); |
592 | 0 | } |
593 | 0 | } |
594 | | |
595 | | template <bool MOVE> |
596 | 12.5k | void IOBuf::_push_or_move_back_ref_to_smallview(const BlockRef& r) { |
597 | 12.5k | BlockRef* const refs = _sv.refs; |
598 | 12.5k | if (NULL == refs[0].block) { |
599 | 10.3k | refs[0] = r; |
600 | 10.3k | if (!MOVE) { |
601 | 10.2k | r.block->inc_ref(); |
602 | 10.2k | } |
603 | 10.3k | return; |
604 | 10.3k | } |
605 | 2.25k | if (NULL == refs[1].block) { |
606 | 2.24k | if (refs[0].block == r.block && |
607 | 2.24k | refs[0].offset + refs[0].length == r.offset) { // Merge ref |
608 | 2.16k | refs[0].length += r.length; |
609 | 2.16k | if (MOVE) { |
610 | 0 | r.block->dec_ref(); |
611 | 0 | } |
612 | 2.16k | return; |
613 | 2.16k | } |
614 | 79 | refs[1] = r; |
615 | 79 | if (!MOVE) { |
616 | 79 | r.block->inc_ref(); |
617 | 79 | } |
618 | 79 | return; |
619 | 2.24k | } |
620 | 12 | if (refs[1].block == r.block && |
621 | 12 | refs[1].offset + refs[1].length == r.offset) { // Merge ref |
622 | 12 | refs[1].length += r.length; |
623 | 12 | if (MOVE) { |
624 | 0 | r.block->dec_ref(); |
625 | 0 | } |
626 | 12 | return; |
627 | 12 | } |
628 | | // Convert to BigView |
629 | 0 | BlockRef* new_refs = iobuf::acquire_blockref_array(); |
630 | 0 | new_refs[0] = refs[0]; |
631 | 0 | new_refs[1] = refs[1]; |
632 | 0 | new_refs[2] = r; |
633 | 0 | const size_t new_nbytes = refs[0].length + refs[1].length + r.length; |
634 | 0 | if (!MOVE) { |
635 | 0 | r.block->inc_ref(); |
636 | 0 | } |
637 | 0 | _bv.magic = -1; |
638 | 0 | _bv.start = 0; |
639 | 0 | _bv.refs = new_refs; |
640 | 0 | _bv.nref = 3; |
641 | 0 | _bv.cap_mask = INITIAL_CAP - 1; |
642 | 0 | _bv.nbytes = new_nbytes; |
643 | 0 | } void butil::IOBuf::_push_or_move_back_ref_to_smallview<true>(butil::IOBuf::BlockRef const&) Line | Count | Source | 596 | 91 | void IOBuf::_push_or_move_back_ref_to_smallview(const BlockRef& r) { | 597 | 91 | BlockRef* const refs = _sv.refs; | 598 | 91 | if (NULL == refs[0].block) { | 599 | 91 | refs[0] = r; | 600 | 91 | if (!MOVE) { | 601 | 0 | r.block->inc_ref(); | 602 | 0 | } | 603 | 91 | return; | 604 | 91 | } | 605 | 0 | if (NULL == refs[1].block) { | 606 | 0 | if (refs[0].block == r.block && | 607 | 0 | refs[0].offset + refs[0].length == r.offset) { // Merge ref | 608 | 0 | refs[0].length += r.length; | 609 | 0 | if (MOVE) { | 610 | 0 | r.block->dec_ref(); | 611 | 0 | } | 612 | 0 | return; | 613 | 0 | } | 614 | 0 | refs[1] = r; | 615 | 0 | if (!MOVE) { | 616 | 0 | r.block->inc_ref(); | 617 | 0 | } | 618 | 0 | return; | 619 | 0 | } | 620 | 0 | if (refs[1].block == r.block && | 621 | 0 | refs[1].offset + refs[1].length == r.offset) { // Merge ref | 622 | 0 | refs[1].length += r.length; | 623 | 0 | if (MOVE) { | 624 | 0 | r.block->dec_ref(); | 625 | 0 | } | 626 | 0 | return; | 627 | 0 | } | 628 | | // Convert to BigView | 629 | 0 | BlockRef* new_refs = iobuf::acquire_blockref_array(); | 630 | 0 | new_refs[0] = refs[0]; | 631 | 0 | new_refs[1] = refs[1]; | 632 | 0 | new_refs[2] = r; | 633 | 0 | const size_t new_nbytes = refs[0].length + refs[1].length + r.length; | 634 | 0 | if (!MOVE) { | 635 | 0 | r.block->inc_ref(); | 636 | 0 | } | 637 | 0 | _bv.magic = -1; | 638 | 0 | _bv.start = 0; | 639 | 0 | _bv.refs = new_refs; | 640 | 0 | _bv.nref = 3; | 641 | 0 | _bv.cap_mask = INITIAL_CAP - 1; | 642 | 0 | _bv.nbytes = new_nbytes; | 643 | 0 | } |
void butil::IOBuf::_push_or_move_back_ref_to_smallview<false>(butil::IOBuf::BlockRef const&) Line | Count | Source | 596 | 12.5k | void IOBuf::_push_or_move_back_ref_to_smallview(const BlockRef& r) { | 597 | 12.5k | BlockRef* const refs = _sv.refs; | 598 | 12.5k | if (NULL == refs[0].block) { | 599 | 10.2k | refs[0] = r; | 600 | 10.2k | if (!MOVE) { | 601 | 10.2k | r.block->inc_ref(); | 602 | 10.2k | } | 603 | 10.2k | return; | 604 | 10.2k | } | 605 | 2.25k | if (NULL == refs[1].block) { | 606 | 2.24k | if (refs[0].block == r.block && | 607 | 2.24k | refs[0].offset + refs[0].length == r.offset) { // Merge ref | 608 | 2.16k | refs[0].length += r.length; | 609 | 2.16k | if (MOVE) { | 610 | 0 | r.block->dec_ref(); | 611 | 0 | } | 612 | 2.16k | return; | 613 | 2.16k | } | 614 | 79 | refs[1] = r; | 615 | 79 | if (!MOVE) { | 616 | 79 | r.block->inc_ref(); | 617 | 79 | } | 618 | 79 | return; | 619 | 2.24k | } | 620 | 12 | if (refs[1].block == r.block && | 621 | 12 | refs[1].offset + refs[1].length == r.offset) { // Merge ref | 622 | 12 | refs[1].length += r.length; | 623 | 12 | if (MOVE) { | 624 | 0 | r.block->dec_ref(); | 625 | 0 | } | 626 | 12 | return; | 627 | 12 | } | 628 | | // Convert to BigView | 629 | 0 | BlockRef* new_refs = iobuf::acquire_blockref_array(); | 630 | 0 | new_refs[0] = refs[0]; | 631 | 0 | new_refs[1] = refs[1]; | 632 | 0 | new_refs[2] = r; | 633 | 0 | const size_t new_nbytes = refs[0].length + refs[1].length + r.length; | 634 | 0 | if (!MOVE) { | 635 | 0 | r.block->inc_ref(); | 636 | 0 | } | 637 | 0 | _bv.magic = -1; | 638 | 0 | _bv.start = 0; | 639 | 0 | _bv.refs = new_refs; | 640 | 0 | _bv.nref = 3; | 641 | 0 | _bv.cap_mask = INITIAL_CAP - 1; | 642 | 0 | _bv.nbytes = new_nbytes; | 643 | 0 | } |
|
644 | | // Explicitly initialize templates. |
645 | | template void IOBuf::_push_or_move_back_ref_to_smallview<true>(const BlockRef&); |
646 | | template void IOBuf::_push_or_move_back_ref_to_smallview<false>(const BlockRef&); |
647 | | |
648 | | template <bool MOVE> |
649 | 0 | void IOBuf::_push_or_move_back_ref_to_bigview(const BlockRef& r) { |
650 | 0 | BlockRef& back = _bv.ref_at(_bv.nref - 1); |
651 | 0 | if (back.block == r.block && back.offset + back.length == r.offset) { |
652 | | // Merge ref |
653 | 0 | back.length += r.length; |
654 | 0 | _bv.nbytes += r.length; |
655 | 0 | if (MOVE) { |
656 | 0 | r.block->dec_ref(); |
657 | 0 | } |
658 | 0 | return; |
659 | 0 | } |
660 | | |
661 | 0 | if (_bv.nref != _bv.capacity()) { |
662 | 0 | _bv.ref_at(_bv.nref++) = r; |
663 | 0 | _bv.nbytes += r.length; |
664 | 0 | if (!MOVE) { |
665 | 0 | r.block->inc_ref(); |
666 | 0 | } |
667 | 0 | return; |
668 | 0 | } |
669 | | // resize, don't modify bv until new_refs is fully assigned |
670 | 0 | const uint32_t new_cap = _bv.capacity() * 2; |
671 | 0 | BlockRef* new_refs = iobuf::acquire_blockref_array(new_cap); |
672 | 0 | for (uint32_t i = 0; i < _bv.nref; ++i) { |
673 | 0 | new_refs[i] = _bv.ref_at(i); |
674 | 0 | } |
675 | 0 | new_refs[_bv.nref++] = r; |
676 | | |
677 | | // Change other variables |
678 | 0 | _bv.start = 0; |
679 | 0 | iobuf::release_blockref_array(_bv.refs, _bv.capacity()); |
680 | 0 | _bv.refs = new_refs; |
681 | 0 | _bv.cap_mask = new_cap - 1; |
682 | 0 | _bv.nbytes += r.length; |
683 | 0 | if (!MOVE) { |
684 | 0 | r.block->inc_ref(); |
685 | 0 | } |
686 | 0 | } Unexecuted instantiation: void butil::IOBuf::_push_or_move_back_ref_to_bigview<true>(butil::IOBuf::BlockRef const&) Unexecuted instantiation: void butil::IOBuf::_push_or_move_back_ref_to_bigview<false>(butil::IOBuf::BlockRef const&) |
687 | | // Explicitly initialize templates. |
688 | | template void IOBuf::_push_or_move_back_ref_to_bigview<true>(const BlockRef&); |
689 | | template void IOBuf::_push_or_move_back_ref_to_bigview<false>(const BlockRef&); |
690 | | |
691 | | template <bool MOVEOUT> |
692 | 165 | int IOBuf::_pop_or_moveout_front_ref() { |
693 | 165 | if (_small()) { |
694 | 165 | if (_sv.refs[0].block != NULL) { |
695 | 165 | if (!MOVEOUT) { |
696 | 74 | _sv.refs[0].block->dec_ref(); |
697 | 74 | } |
698 | 165 | _sv.refs[0] = _sv.refs[1]; |
699 | 165 | reset_block_ref(_sv.refs[1]); |
700 | 165 | return 0; |
701 | 165 | } |
702 | 0 | return -1; |
703 | 165 | } else { |
704 | | // _bv.nref must be greater than 2 |
705 | 0 | const uint32_t start = _bv.start; |
706 | 0 | if (!MOVEOUT) { |
707 | 0 | _bv.refs[start].block->dec_ref(); |
708 | 0 | } |
709 | 0 | if (--_bv.nref > 2) { |
710 | 0 | _bv.start = (start + 1) & _bv.cap_mask; |
711 | 0 | _bv.nbytes -= _bv.refs[start].length; |
712 | 0 | } else { // count==2, fall back to SmallView |
713 | 0 | BlockRef* const saved_refs = _bv.refs; |
714 | 0 | const uint32_t saved_cap_mask = _bv.cap_mask; |
715 | 0 | _sv.refs[0] = saved_refs[(start + 1) & saved_cap_mask]; |
716 | 0 | _sv.refs[1] = saved_refs[(start + 2) & saved_cap_mask]; |
717 | 0 | iobuf::release_blockref_array(saved_refs, saved_cap_mask + 1); |
718 | 0 | } |
719 | 0 | return 0; |
720 | 0 | } |
721 | 165 | } int butil::IOBuf::_pop_or_moveout_front_ref<true>() Line | Count | Source | 692 | 91 | int IOBuf::_pop_or_moveout_front_ref() { | 693 | 91 | if (_small()) { | 694 | 91 | if (_sv.refs[0].block != NULL) { | 695 | 91 | if (!MOVEOUT) { | 696 | 0 | _sv.refs[0].block->dec_ref(); | 697 | 0 | } | 698 | 91 | _sv.refs[0] = _sv.refs[1]; | 699 | 91 | reset_block_ref(_sv.refs[1]); | 700 | 91 | return 0; | 701 | 91 | } | 702 | 0 | return -1; | 703 | 91 | } else { | 704 | | // _bv.nref must be greater than 2 | 705 | 0 | const uint32_t start = _bv.start; | 706 | 0 | if (!MOVEOUT) { | 707 | 0 | _bv.refs[start].block->dec_ref(); | 708 | 0 | } | 709 | 0 | if (--_bv.nref > 2) { | 710 | 0 | _bv.start = (start + 1) & _bv.cap_mask; | 711 | 0 | _bv.nbytes -= _bv.refs[start].length; | 712 | 0 | } else { // count==2, fall back to SmallView | 713 | 0 | BlockRef* const saved_refs = _bv.refs; | 714 | 0 | const uint32_t saved_cap_mask = _bv.cap_mask; | 715 | 0 | _sv.refs[0] = saved_refs[(start + 1) & saved_cap_mask]; | 716 | 0 | _sv.refs[1] = saved_refs[(start + 2) & saved_cap_mask]; | 717 | 0 | iobuf::release_blockref_array(saved_refs, saved_cap_mask + 1); | 718 | 0 | } | 719 | 0 | return 0; | 720 | 0 | } | 721 | 91 | } |
int butil::IOBuf::_pop_or_moveout_front_ref<false>() Line | Count | Source | 692 | 74 | int IOBuf::_pop_or_moveout_front_ref() { | 693 | 74 | if (_small()) { | 694 | 74 | if (_sv.refs[0].block != NULL) { | 695 | 74 | if (!MOVEOUT) { | 696 | 74 | _sv.refs[0].block->dec_ref(); | 697 | 74 | } | 698 | 74 | _sv.refs[0] = _sv.refs[1]; | 699 | 74 | reset_block_ref(_sv.refs[1]); | 700 | 74 | return 0; | 701 | 74 | } | 702 | 0 | return -1; | 703 | 74 | } else { | 704 | | // _bv.nref must be greater than 2 | 705 | 0 | const uint32_t start = _bv.start; | 706 | 0 | if (!MOVEOUT) { | 707 | 0 | _bv.refs[start].block->dec_ref(); | 708 | 0 | } | 709 | 0 | if (--_bv.nref > 2) { | 710 | 0 | _bv.start = (start + 1) & _bv.cap_mask; | 711 | 0 | _bv.nbytes -= _bv.refs[start].length; | 712 | 0 | } else { // count==2, fall back to SmallView | 713 | 0 | BlockRef* const saved_refs = _bv.refs; | 714 | 0 | const uint32_t saved_cap_mask = _bv.cap_mask; | 715 | 0 | _sv.refs[0] = saved_refs[(start + 1) & saved_cap_mask]; | 716 | 0 | _sv.refs[1] = saved_refs[(start + 2) & saved_cap_mask]; | 717 | 0 | iobuf::release_blockref_array(saved_refs, saved_cap_mask + 1); | 718 | 0 | } | 719 | 0 | return 0; | 720 | 0 | } | 721 | 74 | } |
|
722 | | // Explicitly initialize templates. |
723 | | template int IOBuf::_pop_or_moveout_front_ref<true>(); |
724 | | template int IOBuf::_pop_or_moveout_front_ref<false>(); |
725 | | |
726 | 0 | int IOBuf::_pop_back_ref() { |
727 | 0 | if (_small()) { |
728 | 0 | if (_sv.refs[1].block != NULL) { |
729 | 0 | _sv.refs[1].block->dec_ref(); |
730 | 0 | reset_block_ref(_sv.refs[1]); |
731 | 0 | return 0; |
732 | 0 | } else if (_sv.refs[0].block != NULL) { |
733 | 0 | _sv.refs[0].block->dec_ref(); |
734 | 0 | reset_block_ref(_sv.refs[0]); |
735 | 0 | return 0; |
736 | 0 | } |
737 | 0 | return -1; |
738 | 0 | } else { |
739 | | // _bv.nref must be greater than 2 |
740 | 0 | const uint32_t start = _bv.start; |
741 | 0 | IOBuf::BlockRef& back = _bv.refs[(start + _bv.nref - 1) & _bv.cap_mask]; |
742 | 0 | back.block->dec_ref(); |
743 | 0 | if (--_bv.nref > 2) { |
744 | 0 | _bv.nbytes -= back.length; |
745 | 0 | } else { // count==2, fall back to SmallView |
746 | 0 | BlockRef* const saved_refs = _bv.refs; |
747 | 0 | const uint32_t saved_cap_mask = _bv.cap_mask; |
748 | 0 | _sv.refs[0] = saved_refs[start]; |
749 | 0 | _sv.refs[1] = saved_refs[(start + 1) & saved_cap_mask]; |
750 | 0 | iobuf::release_blockref_array(saved_refs, saved_cap_mask + 1); |
751 | 0 | } |
752 | 0 | return 0; |
753 | 0 | } |
754 | 0 | } |
755 | | |
756 | 17.7k | void IOBuf::clear() { |
757 | 17.7k | if (_small()) { |
758 | 17.7k | if (_sv.refs[0].block != NULL) { |
759 | 9.82k | _sv.refs[0].block->dec_ref(); |
760 | 9.82k | reset_block_ref(_sv.refs[0]); |
761 | | |
762 | 9.82k | if (_sv.refs[1].block != NULL) { |
763 | 56 | _sv.refs[1].block->dec_ref(); |
764 | 56 | reset_block_ref(_sv.refs[1]); |
765 | 56 | } |
766 | 9.82k | } |
767 | 17.7k | } else { |
768 | 0 | for (uint32_t i = 0; i < _bv.nref; ++i) { |
769 | 0 | _bv.ref_at(i).block->dec_ref(); |
770 | 0 | } |
771 | 0 | iobuf::release_blockref_array(_bv.refs, _bv.capacity()); |
772 | 0 | new (this) IOBuf; |
773 | 0 | } |
774 | 17.7k | } |
775 | | |
776 | 12.1k | size_t IOBuf::pop_front(size_t n) { |
777 | 12.1k | const size_t len = length(); |
778 | 12.1k | if (n >= len) { |
779 | 258 | clear(); |
780 | 258 | return len; |
781 | 258 | } |
782 | 11.8k | const size_t saved_n = n; |
783 | 11.8k | while (n) { // length() == 0 does not enter |
784 | 11.8k | IOBuf::BlockRef &r = _front_ref(); |
785 | 11.8k | if (r.length > n) { |
786 | 11.8k | r.offset += n; |
787 | 11.8k | r.length -= n; |
788 | 11.8k | if (!_small()) { |
789 | 0 | _bv.nbytes -= n; |
790 | 0 | } |
791 | 11.8k | return saved_n; |
792 | 11.8k | } |
793 | 6 | n -= r.length; |
794 | 6 | _pop_front_ref(); |
795 | 6 | } |
796 | 1 | return saved_n; |
797 | 11.8k | } |
798 | | |
799 | 0 | bool IOBuf::cut1(void* c) { |
800 | 0 | if (empty()) { |
801 | 0 | return false; |
802 | 0 | } |
803 | 0 | IOBuf::BlockRef &r = _front_ref(); |
804 | 0 | *(char*)c = r.block->data[r.offset]; |
805 | 0 | if (r.length > 1) { |
806 | 0 | ++r.offset; |
807 | 0 | --r.length; |
808 | 0 | if (!_small()) { |
809 | 0 | --_bv.nbytes; |
810 | 0 | } |
811 | 0 | } else { |
812 | 0 | _pop_front_ref(); |
813 | 0 | } |
814 | 0 | return true; |
815 | 0 | } |
816 | | |
817 | 0 | size_t IOBuf::pop_back(size_t n) { |
818 | 0 | const size_t len = length(); |
819 | 0 | if (n >= len) { |
820 | 0 | clear(); |
821 | 0 | return len; |
822 | 0 | } |
823 | 0 | const size_t saved_n = n; |
824 | 0 | while (n) { // length() == 0 does not enter |
825 | 0 | IOBuf::BlockRef &r = _back_ref(); |
826 | 0 | if (r.length > n) { |
827 | 0 | r.length -= n; |
828 | 0 | if (!_small()) { |
829 | 0 | _bv.nbytes -= n; |
830 | 0 | } |
831 | 0 | return saved_n; |
832 | 0 | } |
833 | 0 | n -= r.length; |
834 | 0 | _pop_back_ref(); |
835 | 0 | } |
836 | 0 | return saved_n; |
837 | 0 | } |
838 | | |
839 | 3.04k | size_t IOBuf::cutn(IOBuf* out, size_t n) { |
840 | 3.04k | const size_t len = length(); |
841 | 3.04k | if (n > len) { |
842 | 0 | n = len; |
843 | 0 | } |
844 | 3.04k | const size_t saved_n = n; |
845 | 3.13k | while (n) { // length() == 0 does not enter |
846 | 2.86k | IOBuf::BlockRef &r = _front_ref(); |
847 | 2.86k | if (r.length <= n) { |
848 | 91 | n -= r.length; |
849 | 91 | out->_move_back_ref(r); |
850 | 91 | _moveout_front_ref(); |
851 | 2.77k | } else { |
852 | 2.77k | const IOBuf::BlockRef cr = { r.offset, (uint32_t)n, r.block }; |
853 | 2.77k | out->_push_back_ref(cr); |
854 | | |
855 | 2.77k | r.offset += n; |
856 | 2.77k | r.length -= n; |
857 | 2.77k | if (!_small()) { |
858 | 0 | _bv.nbytes -= n; |
859 | 0 | } |
860 | 2.77k | return saved_n; |
861 | 2.77k | } |
862 | 2.86k | } |
863 | 269 | return saved_n; |
864 | 3.04k | } |
865 | | |
866 | 4.92k | size_t IOBuf::cutn(void* out, size_t n) { |
867 | 4.92k | const size_t len = length(); |
868 | 4.92k | if (n > len) { |
869 | 0 | n = len; |
870 | 0 | } |
871 | 4.92k | const size_t saved_n = n; |
872 | 4.99k | while (n) { // length() == 0 does not enter |
873 | 2.65k | IOBuf::BlockRef &r = _front_ref(); |
874 | 2.65k | if (r.length <= n) { |
875 | 68 | iobuf::cp(out, r.block->data + r.offset, r.length); |
876 | 68 | out = (char*)out + r.length; |
877 | 68 | n -= r.length; |
878 | 68 | _pop_front_ref(); |
879 | 2.58k | } else { |
880 | 2.58k | iobuf::cp(out, r.block->data + r.offset, n); |
881 | 2.58k | out = (char*)out + n; |
882 | 2.58k | r.offset += n; |
883 | 2.58k | r.length -= n; |
884 | 2.58k | if (!_small()) { |
885 | 0 | _bv.nbytes -= n; |
886 | 0 | } |
887 | 2.58k | return saved_n; |
888 | 2.58k | } |
889 | 2.65k | } |
890 | 2.33k | return saved_n; |
891 | 4.92k | } |
892 | | |
893 | 0 | size_t IOBuf::cutn(std::string* out, size_t n) { |
894 | 0 | if (n == 0) { |
895 | 0 | return 0; |
896 | 0 | } |
897 | 0 | const size_t len = length(); |
898 | 0 | if (n > len) { |
899 | 0 | n = len; |
900 | 0 | } |
901 | 0 | const size_t old_size = out->size(); |
902 | 0 | out->resize(out->size() + n); |
903 | 0 | return cutn(&(*out)[old_size], n); |
904 | 0 | } |
905 | | |
906 | 0 | int IOBuf::_cut_by_char(IOBuf* out, char d) { |
907 | 0 | const size_t nref = _ref_num(); |
908 | 0 | size_t n = 0; |
909 | | |
910 | 0 | for (size_t i = 0; i < nref; ++i) { |
911 | 0 | IOBuf::BlockRef const& r = _ref_at(i); |
912 | 0 | char const* const s = r.block->data + r.offset; |
913 | 0 | for (uint32_t j = 0; j < r.length; ++j, ++n) { |
914 | 0 | if (s[j] == d) { |
915 | | // There's no way cutn/pop_front fails |
916 | 0 | cutn(out, n); |
917 | 0 | pop_front(1); |
918 | 0 | return 0; |
919 | 0 | } |
920 | 0 | } |
921 | 0 | } |
922 | | |
923 | 0 | return -1; |
924 | 0 | } |
925 | | |
926 | 2.52k | int IOBuf::_cut_by_delim(IOBuf* out, char const* dbegin, size_t ndelim) { |
927 | 2.52k | typedef unsigned long SigType; |
928 | 2.52k | const size_t NMAX = sizeof(SigType); |
929 | | |
930 | 2.52k | if (ndelim > NMAX || ndelim > length()) { |
931 | 16 | return -1; |
932 | 16 | } |
933 | | |
934 | 2.51k | SigType dsig = 0; |
935 | 7.53k | for (size_t i = 0; i < ndelim; ++i) { |
936 | 5.02k | dsig = (dsig << CHAR_BIT) | static_cast<SigType>(dbegin[i]); |
937 | 5.02k | } |
938 | | |
939 | 2.51k | const SigType SIGMASK = |
940 | 2.51k | (ndelim == NMAX ? (SigType)-1 : (((SigType)1 << (ndelim * CHAR_BIT)) - 1)); |
941 | | |
942 | 2.51k | const size_t nref = _ref_num(); |
943 | 2.51k | SigType sig = 0; |
944 | 2.51k | size_t n = 0; |
945 | | |
946 | 2.53k | for (size_t i = 0; i < nref; ++i) { |
947 | 2.51k | IOBuf::BlockRef const& r = _ref_at(i); |
948 | 2.51k | char const* const s = r.block->data + r.offset; |
949 | | |
950 | 16.8k | for (uint32_t j = 0; j < r.length; ++j, ++n) { |
951 | 16.8k | sig = ((sig << CHAR_BIT) | static_cast<SigType>(s[j])) & SIGMASK; |
952 | 16.8k | if (sig == dsig) { |
953 | | // There's no way cutn/pop_front fails |
954 | 2.49k | cutn(out, n + 1 - ndelim); |
955 | 2.49k | pop_front(ndelim); |
956 | 2.49k | return 0; |
957 | 2.49k | } |
958 | 16.8k | } |
959 | 2.51k | } |
960 | | |
961 | 16 | return -1; |
962 | 2.51k | } |
963 | | |
964 | | // Since cut_into_file_descriptor() allocates iovec on stack, IOV_MAX=1024 |
965 | | // is too large(in the worst case) for bthreads with small stacks. |
966 | | static const size_t IOBUF_IOV_MAX = 256; |
967 | | |
968 | 0 | ssize_t IOBuf::pcut_into_file_descriptor(int fd, off_t offset, size_t size_hint) { |
969 | 0 | if (empty()) { |
970 | 0 | return 0; |
971 | 0 | } |
972 | | |
973 | 0 | const size_t nref = std::min(_ref_num(), IOBUF_IOV_MAX); |
974 | 0 | struct iovec vec[nref]; |
975 | 0 | size_t nvec = 0; |
976 | 0 | size_t cur_len = 0; |
977 | |
|
978 | 0 | do { |
979 | 0 | IOBuf::BlockRef const& r = _ref_at(nvec); |
980 | 0 | vec[nvec].iov_base = r.block->data + r.offset; |
981 | 0 | vec[nvec].iov_len = r.length; |
982 | 0 | ++nvec; |
983 | 0 | cur_len += r.length; |
984 | 0 | } while (nvec < nref && cur_len < size_hint); |
985 | |
|
986 | 0 | ssize_t nw = 0; |
987 | |
|
988 | 0 | if (offset >= 0) { |
989 | 0 | static iobuf::iov_function pwritev_func = iobuf::get_pwritev_func(); |
990 | 0 | nw = pwritev_func(fd, vec, nvec, offset); |
991 | 0 | } else { |
992 | 0 | nw = ::writev(fd, vec, nvec); |
993 | 0 | } |
994 | 0 | if (nw > 0) { |
995 | 0 | pop_front(nw); |
996 | 0 | } |
997 | 0 | return nw; |
998 | 0 | } |
999 | | |
1000 | 0 | ssize_t IOBuf::cut_into_writer(IWriter* writer, size_t size_hint) { |
1001 | 0 | if (empty()) { |
1002 | 0 | return 0; |
1003 | 0 | } |
1004 | 0 | const size_t nref = std::min(_ref_num(), IOBUF_IOV_MAX); |
1005 | 0 | struct iovec vec[nref]; |
1006 | 0 | size_t nvec = 0; |
1007 | 0 | size_t cur_len = 0; |
1008 | |
|
1009 | 0 | do { |
1010 | 0 | IOBuf::BlockRef const& r = _ref_at(nvec); |
1011 | 0 | vec[nvec].iov_base = r.block->data + r.offset; |
1012 | 0 | vec[nvec].iov_len = r.length; |
1013 | 0 | ++nvec; |
1014 | 0 | cur_len += r.length; |
1015 | 0 | } while (nvec < nref && cur_len < size_hint); |
1016 | |
|
1017 | 0 | const ssize_t nw = writer->WriteV(vec, nvec); |
1018 | 0 | if (nw > 0) { |
1019 | 0 | pop_front(nw); |
1020 | 0 | } |
1021 | 0 | return nw; |
1022 | 0 | } |
1023 | | |
1024 | 0 | ssize_t IOBuf::cut_into_SSL_channel(SSL* ssl, int* ssl_error) { |
1025 | 0 | *ssl_error = SSL_ERROR_NONE; |
1026 | 0 | if (empty()) { |
1027 | 0 | return 0; |
1028 | 0 | } |
1029 | | |
1030 | 0 | IOBuf::BlockRef const& r = _ref_at(0); |
1031 | 0 | ERR_clear_error(); |
1032 | 0 | const int nw = SSL_write(ssl, r.block->data + r.offset, r.length); |
1033 | 0 | if (nw > 0) { |
1034 | 0 | pop_front(nw); |
1035 | 0 | } |
1036 | 0 | *ssl_error = SSL_get_error(ssl, nw); |
1037 | 0 | return nw; |
1038 | 0 | } |
1039 | | |
1040 | | ssize_t IOBuf::cut_multiple_into_SSL_channel(SSL* ssl, IOBuf* const* pieces, |
1041 | 0 | size_t count, int* ssl_error) { |
1042 | 0 | ssize_t nw = 0; |
1043 | 0 | *ssl_error = SSL_ERROR_NONE; |
1044 | 0 | for (size_t i = 0; i < count; ) { |
1045 | 0 | if (pieces[i]->empty()) { |
1046 | 0 | ++i; |
1047 | 0 | continue; |
1048 | 0 | } |
1049 | | |
1050 | 0 | ssize_t rc = pieces[i]->cut_into_SSL_channel(ssl, ssl_error); |
1051 | 0 | if (rc > 0) { |
1052 | 0 | nw += rc; |
1053 | 0 | } else { |
1054 | 0 | if (rc < 0) { |
1055 | 0 | if (*ssl_error == SSL_ERROR_WANT_WRITE |
1056 | 0 | || (*ssl_error == SSL_ERROR_SYSCALL |
1057 | 0 | && BIO_fd_non_fatal_error(errno) == 1)) { |
1058 | | // Non fatal error, tell caller to write again |
1059 | 0 | *ssl_error = SSL_ERROR_WANT_WRITE; |
1060 | 0 | } else { |
1061 | | // Other errors are fatal |
1062 | 0 | return rc; |
1063 | 0 | } |
1064 | 0 | } |
1065 | 0 | if (nw == 0) { |
1066 | 0 | nw = rc; // Nothing written yet, overwrite nw |
1067 | 0 | } |
1068 | 0 | break; |
1069 | 0 | } |
1070 | 0 | } |
1071 | | |
1072 | 0 | #ifndef USE_MESALINK |
1073 | | // Flush remaining data inside the BIO buffer layer |
1074 | 0 | BIO* wbio = SSL_get_wbio(ssl); |
1075 | 0 | if (BIO_wpending(wbio) > 0) { |
1076 | 0 | int rc = BIO_flush(wbio); |
1077 | 0 | if (rc <= 0 && BIO_fd_non_fatal_error(errno) == 0) { |
1078 | | // Fatal error during BIO_flush |
1079 | 0 | *ssl_error = SSL_ERROR_SYSCALL; |
1080 | 0 | return rc; |
1081 | 0 | } |
1082 | 0 | } |
1083 | | #else |
1084 | | int rc = SSL_flush(ssl); |
1085 | | if (rc <= 0) { |
1086 | | *ssl_error = SSL_ERROR_SYSCALL; |
1087 | | return rc; |
1088 | | } |
1089 | | #endif |
1090 | | |
1091 | 0 | return nw; |
1092 | 0 | } |
1093 | | |
1094 | | ssize_t IOBuf::pcut_multiple_into_file_descriptor( |
1095 | 0 | int fd, off_t offset, IOBuf* const* pieces, size_t count) { |
1096 | 0 | if (BAIDU_UNLIKELY(count == 0)) { |
1097 | 0 | return 0; |
1098 | 0 | } |
1099 | 0 | if (1UL == count) { |
1100 | 0 | return pieces[0]->pcut_into_file_descriptor(fd, offset); |
1101 | 0 | } |
1102 | 0 | struct iovec vec[IOBUF_IOV_MAX]; |
1103 | 0 | size_t nvec = 0; |
1104 | 0 | for (size_t i = 0; i < count; ++i) { |
1105 | 0 | const IOBuf* p = pieces[i]; |
1106 | 0 | const size_t nref = p->_ref_num(); |
1107 | 0 | for (size_t j = 0; j < nref && nvec < IOBUF_IOV_MAX; ++j, ++nvec) { |
1108 | 0 | IOBuf::BlockRef const& r = p->_ref_at(j); |
1109 | 0 | vec[nvec].iov_base = r.block->data + r.offset; |
1110 | 0 | vec[nvec].iov_len = r.length; |
1111 | 0 | } |
1112 | 0 | } |
1113 | |
|
1114 | 0 | ssize_t nw = 0; |
1115 | 0 | if (offset >= 0) { |
1116 | 0 | static iobuf::iov_function pwritev_func = iobuf::get_pwritev_func(); |
1117 | 0 | nw = pwritev_func(fd, vec, nvec, offset); |
1118 | 0 | } else { |
1119 | 0 | nw = ::writev(fd, vec, nvec); |
1120 | 0 | } |
1121 | 0 | if (nw <= 0) { |
1122 | 0 | return nw; |
1123 | 0 | } |
1124 | 0 | size_t npop_all = nw; |
1125 | 0 | for (size_t i = 0; i < count; ++i) { |
1126 | 0 | npop_all -= pieces[i]->pop_front(npop_all); |
1127 | 0 | if (npop_all == 0) { |
1128 | 0 | break; |
1129 | 0 | } |
1130 | 0 | } |
1131 | 0 | return nw; |
1132 | 0 | } |
1133 | | |
1134 | | ssize_t IOBuf::cut_multiple_into_writer( |
1135 | 0 | IWriter* writer, IOBuf* const* pieces, size_t count) { |
1136 | 0 | if (BAIDU_UNLIKELY(count == 0)) { |
1137 | 0 | return 0; |
1138 | 0 | } |
1139 | 0 | if (1UL == count) { |
1140 | 0 | return pieces[0]->cut_into_writer(writer); |
1141 | 0 | } |
1142 | 0 | struct iovec vec[IOBUF_IOV_MAX]; |
1143 | 0 | size_t nvec = 0; |
1144 | 0 | for (size_t i = 0; i < count; ++i) { |
1145 | 0 | const IOBuf* p = pieces[i]; |
1146 | 0 | const size_t nref = p->_ref_num(); |
1147 | 0 | for (size_t j = 0; j < nref && nvec < IOBUF_IOV_MAX; ++j, ++nvec) { |
1148 | 0 | IOBuf::BlockRef const& r = p->_ref_at(j); |
1149 | 0 | vec[nvec].iov_base = r.block->data + r.offset; |
1150 | 0 | vec[nvec].iov_len = r.length; |
1151 | 0 | } |
1152 | 0 | } |
1153 | |
|
1154 | 0 | const ssize_t nw = writer->WriteV(vec, nvec); |
1155 | 0 | if (nw <= 0) { |
1156 | 0 | return nw; |
1157 | 0 | } |
1158 | 0 | size_t npop_all = nw; |
1159 | 0 | for (size_t i = 0; i < count; ++i) { |
1160 | 0 | npop_all -= pieces[i]->pop_front(npop_all); |
1161 | 0 | if (npop_all == 0) { |
1162 | 0 | break; |
1163 | 0 | } |
1164 | 0 | } |
1165 | 0 | return nw; |
1166 | 0 | } |
1167 | | |
1168 | | |
1169 | 0 | void IOBuf::append(const IOBuf& other) { |
1170 | 0 | const size_t nref = other._ref_num(); |
1171 | 0 | for (size_t i = 0; i < nref; ++i) { |
1172 | 0 | _push_back_ref(other._ref_at(i)); |
1173 | 0 | } |
1174 | 0 | } |
1175 | | |
1176 | 0 | void IOBuf::append(const Movable& movable_other) { |
1177 | 0 | if (empty()) { |
1178 | 0 | swap(movable_other.value()); |
1179 | 0 | } else { |
1180 | 0 | butil::IOBuf& other = movable_other.value(); |
1181 | 0 | const size_t nref = other._ref_num(); |
1182 | 0 | for (size_t i = 0; i < nref; ++i) { |
1183 | 0 | _move_back_ref(other._ref_at(i)); |
1184 | 0 | } |
1185 | 0 | if (!other._small()) { |
1186 | 0 | iobuf::release_blockref_array(other._bv.refs, other._bv.capacity()); |
1187 | 0 | } |
1188 | 0 | new (&other) IOBuf; |
1189 | 0 | } |
1190 | 0 | } |
1191 | | |
1192 | 2.77k | int IOBuf::push_back(char c) { |
1193 | 2.77k | IOBuf::Block* b = iobuf::share_tls_block(); |
1194 | 2.77k | if (BAIDU_UNLIKELY(!b)) { |
1195 | 0 | return -1; |
1196 | 0 | } |
1197 | 2.77k | b->data[b->size] = c; |
1198 | 2.77k | const IOBuf::BlockRef r = { b->size, 1, b }; |
1199 | 2.77k | ++b->size; |
1200 | 2.77k | _push_back_ref(r); |
1201 | 2.77k | return 0; |
1202 | 2.77k | } |
1203 | | |
1204 | 0 | int IOBuf::append(char const* s) { |
1205 | 0 | if (BAIDU_LIKELY(s != NULL)) { |
1206 | 0 | return append(s, strlen(s)); |
1207 | 0 | } |
1208 | 0 | return -1; |
1209 | 0 | } |
1210 | | |
1211 | 9.65k | int IOBuf::append(void const* data, size_t count) { |
1212 | 9.65k | if (BAIDU_UNLIKELY(!data)) { |
1213 | 0 | return -1; |
1214 | 0 | } |
1215 | 9.65k | if (count == 1) { |
1216 | 2.77k | return push_back(*((char const*)data)); |
1217 | 2.77k | } |
1218 | 6.88k | size_t total_nc = 0; |
1219 | 13.8k | while (total_nc < count) { // excluded count == 0 |
1220 | 6.95k | IOBuf::Block* b = iobuf::share_tls_block(); |
1221 | 6.95k | if (BAIDU_UNLIKELY(!b)) { |
1222 | 0 | return -1; |
1223 | 0 | } |
1224 | 6.95k | const size_t nc = std::min(count - total_nc, b->left_space()); |
1225 | 6.95k | iobuf::cp(b->data + b->size, (char*)data + total_nc, nc); |
1226 | | |
1227 | 6.95k | const IOBuf::BlockRef r = { (uint32_t)b->size, (uint32_t)nc, b }; |
1228 | 6.95k | _push_back_ref(r); |
1229 | 6.95k | b->size += nc; |
1230 | 6.95k | total_nc += nc; |
1231 | 6.95k | } |
1232 | 6.88k | return 0; |
1233 | 6.88k | } |
1234 | | |
1235 | 0 | int IOBuf::appendv(const const_iovec* vec, size_t n) { |
1236 | 0 | size_t offset = 0; |
1237 | 0 | for (size_t i = 0; i < n;) { |
1238 | 0 | IOBuf::Block* b = iobuf::share_tls_block(); |
1239 | 0 | if (BAIDU_UNLIKELY(!b)) { |
1240 | 0 | return -1; |
1241 | 0 | } |
1242 | 0 | uint32_t total_cp = 0; |
1243 | 0 | for (; i < n; ++i, offset = 0) { |
1244 | 0 | const const_iovec & vec_i = vec[i]; |
1245 | 0 | const size_t nc = std::min(vec_i.iov_len - offset, b->left_space() - total_cp); |
1246 | 0 | iobuf::cp(b->data + b->size + total_cp, (char*)vec_i.iov_base + offset, nc); |
1247 | 0 | total_cp += nc; |
1248 | 0 | offset += nc; |
1249 | 0 | if (offset != vec_i.iov_len) { |
1250 | 0 | break; |
1251 | 0 | } |
1252 | 0 | } |
1253 | | |
1254 | 0 | const IOBuf::BlockRef r = { (uint32_t)b->size, total_cp, b }; |
1255 | 0 | b->size += total_cp; |
1256 | 0 | _push_back_ref(r); |
1257 | 0 | } |
1258 | 0 | return 0; |
1259 | 0 | } |
1260 | | |
1261 | | int IOBuf::append_user_data_with_meta(void* data, |
1262 | | size_t size, |
1263 | | std::function<void(void*)> deleter, |
1264 | 0 | uint64_t meta) { |
1265 | 0 | if (size > 0xFFFFFFFFULL - 100) { |
1266 | 0 | LOG(FATAL) << "data_size=" << size << " is too large"; |
1267 | 0 | return -1; |
1268 | 0 | } |
1269 | 0 | if (!deleter) { |
1270 | 0 | deleter = ::free; |
1271 | 0 | } |
1272 | 0 | if (!size) { |
1273 | 0 | deleter(data); |
1274 | 0 | return 0; |
1275 | 0 | } |
1276 | 0 | char* mem = (char*)malloc(sizeof(IOBuf::Block) + sizeof(UserDataExtension)); |
1277 | 0 | if (mem == NULL) { |
1278 | 0 | return -1; |
1279 | 0 | } |
1280 | 0 | IOBuf::Block* b = new (mem) IOBuf::Block((char*)data, size, std::move(deleter)); |
1281 | 0 | b->u.data_meta = meta; |
1282 | 0 | const IOBuf::BlockRef r = { 0, b->cap, b }; |
1283 | 0 | _move_back_ref(r); |
1284 | 0 | return 0; |
1285 | 0 | } |
1286 | | |
1287 | 0 | uint64_t IOBuf::get_first_data_meta() { |
1288 | 0 | if (_ref_num() == 0) { |
1289 | 0 | return 0; |
1290 | 0 | } |
1291 | 0 | IOBuf::BlockRef const& r = _ref_at(0); |
1292 | 0 | if (!(r.block->flags & IOBUF_BLOCK_FLAGS_USER_DATA)) { |
1293 | 0 | return 0; |
1294 | 0 | } |
1295 | 0 | return r.block->u.data_meta; |
1296 | 0 | } |
1297 | | |
1298 | 0 | int IOBuf::resize(size_t n, char c) { |
1299 | 0 | const size_t saved_len = length(); |
1300 | 0 | if (n < saved_len) { |
1301 | 0 | pop_back(saved_len - n); |
1302 | 0 | return 0; |
1303 | 0 | } |
1304 | 0 | const size_t count = n - saved_len; |
1305 | 0 | size_t total_nc = 0; |
1306 | 0 | while (total_nc < count) { // excluded count == 0 |
1307 | 0 | IOBuf::Block* b = iobuf::share_tls_block(); |
1308 | 0 | if (BAIDU_UNLIKELY(!b)) { |
1309 | 0 | return -1; |
1310 | 0 | } |
1311 | 0 | const size_t nc = std::min(count - total_nc, b->left_space()); |
1312 | 0 | memset(b->data + b->size, c, nc); |
1313 | | |
1314 | 0 | const IOBuf::BlockRef r = { (uint32_t)b->size, (uint32_t)nc, b }; |
1315 | 0 | _push_back_ref(r); |
1316 | 0 | b->size += nc; |
1317 | 0 | total_nc += nc; |
1318 | 0 | } |
1319 | 0 | return 0; |
1320 | 0 | } |
1321 | | |
1322 | | // NOTE: We don't use C++ bitwise fields which make copying slower. |
1323 | | static const int REF_INDEX_BITS = 19; |
1324 | | static const int REF_OFFSET_BITS = 15; |
1325 | | static const int AREA_SIZE_BITS = 30; |
1326 | | static const uint32_t MAX_REF_INDEX = (((uint32_t)1) << REF_INDEX_BITS) - 1; |
1327 | | static const uint32_t MAX_REF_OFFSET = (((uint32_t)1) << REF_OFFSET_BITS) - 1; |
1328 | | static const uint32_t MAX_AREA_SIZE = (((uint32_t)1) << AREA_SIZE_BITS) - 1; |
1329 | | |
1330 | | inline IOBuf::Area make_area(uint32_t ref_index, uint32_t ref_offset, |
1331 | 0 | uint32_t size) { |
1332 | 0 | if (ref_index > MAX_REF_INDEX || |
1333 | 0 | ref_offset > MAX_REF_OFFSET || |
1334 | 0 | size > MAX_AREA_SIZE) { |
1335 | 0 | LOG(ERROR) << "Too big parameters!"; |
1336 | 0 | return IOBuf::INVALID_AREA; |
1337 | 0 | } |
1338 | 0 | return (((uint64_t)ref_index) << (REF_OFFSET_BITS + AREA_SIZE_BITS)) |
1339 | 0 | | (((uint64_t)ref_offset) << AREA_SIZE_BITS) |
1340 | 0 | | size; |
1341 | 0 | } |
1342 | 0 | inline uint32_t get_area_ref_index(IOBuf::Area c) { |
1343 | 0 | return (c >> (REF_OFFSET_BITS + AREA_SIZE_BITS)) & MAX_REF_INDEX; |
1344 | 0 | } |
1345 | 0 | inline uint32_t get_area_ref_offset(IOBuf::Area c) { |
1346 | 0 | return (c >> AREA_SIZE_BITS) & MAX_REF_OFFSET; |
1347 | 0 | } |
1348 | 0 | inline uint32_t get_area_size(IOBuf::Area c) { |
1349 | 0 | return (c & MAX_AREA_SIZE); |
1350 | 0 | } |
1351 | | |
1352 | 0 | IOBuf::Area IOBuf::reserve(size_t count) { |
1353 | 0 | IOBuf::Area result = INVALID_AREA; |
1354 | 0 | size_t total_nc = 0; |
1355 | 0 | while (total_nc < count) { // excluded count == 0 |
1356 | 0 | IOBuf::Block* b = iobuf::share_tls_block(); |
1357 | 0 | if (BAIDU_UNLIKELY(!b)) { |
1358 | 0 | return INVALID_AREA; |
1359 | 0 | } |
1360 | 0 | const size_t nc = std::min(count - total_nc, b->left_space()); |
1361 | 0 | const IOBuf::BlockRef r = { (uint32_t)b->size, (uint32_t)nc, b }; |
1362 | 0 | _push_back_ref(r); |
1363 | 0 | if (total_nc == 0) { |
1364 | | // Encode the area at first time. Notice that the pushed ref may |
1365 | | // be merged with existing ones. |
1366 | 0 | result = make_area(_ref_num() - 1, _back_ref().length - nc, count); |
1367 | 0 | } |
1368 | 0 | total_nc += nc; |
1369 | 0 | b->size += nc; |
1370 | 0 | } |
1371 | 0 | return result; |
1372 | 0 | } |
1373 | | |
1374 | 0 | int IOBuf::unsafe_assign(Area area, const void* data) { |
1375 | 0 | if (area == INVALID_AREA || data == NULL) { |
1376 | 0 | LOG(ERROR) << "Invalid parameters"; |
1377 | 0 | return -1; |
1378 | 0 | } |
1379 | 0 | const uint32_t ref_index = get_area_ref_index(area); |
1380 | 0 | uint32_t ref_offset = get_area_ref_offset(area); |
1381 | 0 | uint32_t length = get_area_size(area); |
1382 | 0 | const size_t nref = _ref_num(); |
1383 | 0 | for (size_t i = ref_index; i < nref; ++i) { |
1384 | 0 | IOBuf::BlockRef& r = _ref_at(i); |
1385 | | // NOTE: we can't check if the block is shared with another IOBuf or |
1386 | | // not since even a single IOBuf may reference a block multiple times |
1387 | | // (by different BlockRef-s) |
1388 | | |
1389 | 0 | const size_t nc = std::min(length, r.length - ref_offset); |
1390 | 0 | iobuf::cp(r.block->data + r.offset + ref_offset, data, nc); |
1391 | 0 | if (length == nc) { |
1392 | 0 | return 0; |
1393 | 0 | } |
1394 | 0 | ref_offset = 0; |
1395 | 0 | length -= nc; |
1396 | 0 | data = (char*)data + nc; |
1397 | 0 | } |
1398 | | |
1399 | | // Use check because we need to see the stack here. |
1400 | 0 | CHECK(false) << "IOBuf(" << size() << ", nref=" << _ref_num() |
1401 | 0 | << ") is shorter than what we reserved(" |
1402 | 0 | << "ref=" << get_area_ref_index(area) |
1403 | 0 | << " off=" << get_area_ref_offset(area) |
1404 | 0 | << " size=" << get_area_size(area) |
1405 | 0 | << "), this assignment probably corrupted something..."; |
1406 | 0 | return -1; |
1407 | 0 | } |
1408 | | |
1409 | 0 | size_t IOBuf::append_to(IOBuf* buf, size_t n, size_t pos) const { |
1410 | 0 | const size_t nref = _ref_num(); |
1411 | | // Skip `pos' bytes. `offset' is the starting position in starting BlockRef. |
1412 | 0 | size_t offset = pos; |
1413 | 0 | size_t i = 0; |
1414 | 0 | for (; offset != 0 && i < nref; ++i) { |
1415 | 0 | IOBuf::BlockRef const& r = _ref_at(i); |
1416 | 0 | if (offset < (size_t)r.length) { |
1417 | 0 | break; |
1418 | 0 | } |
1419 | 0 | offset -= r.length; |
1420 | 0 | } |
1421 | 0 | size_t m = n; |
1422 | 0 | for (; m != 0 && i < nref; ++i) { |
1423 | 0 | IOBuf::BlockRef const& r = _ref_at(i); |
1424 | 0 | const size_t nc = std::min(m, (size_t)r.length - offset); |
1425 | 0 | const IOBuf::BlockRef r2 = { (uint32_t)(r.offset + offset), |
1426 | 0 | (uint32_t)nc, r.block }; |
1427 | 0 | buf->_push_back_ref(r2); |
1428 | 0 | offset = 0; |
1429 | 0 | m -= nc; |
1430 | 0 | } |
1431 | | // If nref == 0, here returns 0 correctly |
1432 | 0 | return n - m; |
1433 | 0 | } |
1434 | | |
1435 | 13.0k | size_t IOBuf::copy_to(void* d, size_t n, size_t pos) const { |
1436 | 13.0k | const size_t nref = _ref_num(); |
1437 | | // Skip `pos' bytes. `offset' is the starting position in starting BlockRef. |
1438 | 13.0k | size_t offset = pos; |
1439 | 13.0k | size_t i = 0; |
1440 | 15.0k | for (; offset != 0 && i < nref; ++i) { |
1441 | 2.49k | IOBuf::BlockRef const& r = _ref_at(i); |
1442 | 2.49k | if (offset < (size_t)r.length) { |
1443 | 516 | break; |
1444 | 516 | } |
1445 | 1.98k | offset -= r.length; |
1446 | 1.98k | } |
1447 | 13.0k | size_t m = n; |
1448 | 24.2k | for (; m != 0 && i < nref; ++i) { |
1449 | 11.1k | IOBuf::BlockRef const& r = _ref_at(i); |
1450 | 11.1k | const size_t nc = std::min(m, (size_t)r.length - offset); |
1451 | 11.1k | iobuf::cp(d, r.block->data + r.offset + offset, nc); |
1452 | 11.1k | offset = 0; |
1453 | 11.1k | d = (char*)d + nc; |
1454 | 11.1k | m -= nc; |
1455 | 11.1k | } |
1456 | | // If nref == 0, here returns 0 correctly |
1457 | 13.0k | return n - m; |
1458 | 13.0k | } |
1459 | | |
1460 | 0 | size_t IOBuf::copy_to(std::string* s, size_t n, size_t pos) const { |
1461 | 0 | const size_t len = length(); |
1462 | 0 | if (len <= pos) { |
1463 | 0 | return 0; |
1464 | 0 | } |
1465 | 0 | if (n > len - pos) { // note: n + pos may overflow |
1466 | 0 | n = len - pos; |
1467 | 0 | } |
1468 | 0 | s->resize(n); |
1469 | 0 | return copy_to(&(*s)[0], n, pos); |
1470 | 0 | } |
1471 | | |
1472 | 0 | size_t IOBuf::append_to(std::string* s, size_t n, size_t pos) const { |
1473 | 0 | const size_t len = length(); |
1474 | 0 | if (len <= pos) { |
1475 | 0 | return 0; |
1476 | 0 | } |
1477 | 0 | if (n > len - pos) { // note: n + pos may overflow |
1478 | 0 | n = len - pos; |
1479 | 0 | } |
1480 | 0 | const size_t old_size = s->size(); |
1481 | 0 | s->resize(old_size + n); |
1482 | 0 | return copy_to(&(*s)[old_size], n, pos); |
1483 | 0 | } |
1484 | | |
1485 | | |
1486 | 2.49k | size_t IOBuf::copy_to_cstr(char* s, size_t n, size_t pos) const { |
1487 | 2.49k | const size_t nc = copy_to(s, n, pos); |
1488 | 2.49k | s[nc] = '\0'; |
1489 | 2.49k | return nc; |
1490 | 2.49k | } |
1491 | | |
1492 | 0 | void const* IOBuf::fetch(void* d, size_t n) const { |
1493 | 0 | if (n <= length()) { |
1494 | 0 | IOBuf::BlockRef const& r0 = _ref_at(0); |
1495 | 0 | if (n <= r0.length) { |
1496 | 0 | return r0.block->data + r0.offset; |
1497 | 0 | } |
1498 | | |
1499 | 0 | iobuf::cp(d, r0.block->data + r0.offset, r0.length); |
1500 | 0 | size_t total_nc = r0.length; |
1501 | 0 | const size_t nref = _ref_num(); |
1502 | 0 | for (size_t i = 1; i < nref; ++i) { |
1503 | 0 | IOBuf::BlockRef const& r = _ref_at(i); |
1504 | 0 | if (n <= r.length + total_nc) { |
1505 | 0 | iobuf::cp((char*)d + total_nc, |
1506 | 0 | r.block->data + r.offset, n - total_nc); |
1507 | 0 | return d; |
1508 | 0 | } |
1509 | 0 | iobuf::cp((char*)d + total_nc, r.block->data + r.offset, r.length); |
1510 | 0 | total_nc += r.length; |
1511 | 0 | } |
1512 | 0 | } |
1513 | 0 | return NULL; |
1514 | 0 | } |
1515 | | |
1516 | 12.7k | const void* IOBuf::fetch1() const { |
1517 | 12.7k | if (!empty()) { |
1518 | 12.6k | const IOBuf::BlockRef& r0 = _front_ref(); |
1519 | 12.6k | return r0.block->data + r0.offset; |
1520 | 12.6k | } |
1521 | 153 | return NULL; |
1522 | 12.7k | } |
1523 | | |
1524 | 0 | std::ostream& operator<<(std::ostream& os, const IOBuf& buf) { |
1525 | 0 | const size_t n = buf.backing_block_num(); |
1526 | 0 | for (size_t i = 0; i < n; ++i) { |
1527 | 0 | StringPiece blk = buf.backing_block(i); |
1528 | 0 | os.write(blk.data(), blk.size()); |
1529 | 0 | } |
1530 | 0 | return os; |
1531 | 0 | } |
1532 | | |
1533 | 0 | bool IOBuf::equals(const butil::StringPiece& s) const { |
1534 | 0 | if (size() != s.size()) { |
1535 | 0 | return false; |
1536 | 0 | } |
1537 | 0 | const size_t nref = _ref_num(); |
1538 | 0 | size_t soff = 0; |
1539 | 0 | for (size_t i = 0; i < nref; ++i) { |
1540 | 0 | const BlockRef& r = _ref_at(i); |
1541 | 0 | if (memcmp(r.block->data + r.offset, s.data() + soff, r.length) != 0) { |
1542 | 0 | return false; |
1543 | 0 | } |
1544 | 0 | soff += r.length; |
1545 | 0 | } |
1546 | 0 | return true; |
1547 | 0 | } |
1548 | | |
1549 | 4.94k | StringPiece IOBuf::backing_block(size_t i) const { |
1550 | 4.94k | if (i < _ref_num()) { |
1551 | 4.94k | const BlockRef& r = _ref_at(i); |
1552 | 4.94k | return StringPiece(r.block->data + r.offset, r.length); |
1553 | 4.94k | } |
1554 | 0 | return StringPiece(); |
1555 | 4.94k | } |
1556 | | |
1557 | 0 | bool IOBuf::equals(const butil::IOBuf& other) const { |
1558 | 0 | const size_t sz1 = size(); |
1559 | 0 | if (sz1 != other.size()) { |
1560 | 0 | return false; |
1561 | 0 | } |
1562 | 0 | if (!sz1) { |
1563 | 0 | return true; |
1564 | 0 | } |
1565 | 0 | const BlockRef& r1 = _ref_at(0); |
1566 | 0 | const char* d1 = r1.block->data + r1.offset; |
1567 | 0 | size_t len1 = r1.length; |
1568 | 0 | const BlockRef& r2 = other._ref_at(0); |
1569 | 0 | const char* d2 = r2.block->data + r2.offset; |
1570 | 0 | size_t len2 = r2.length; |
1571 | 0 | const size_t nref1 = _ref_num(); |
1572 | 0 | const size_t nref2 = other._ref_num(); |
1573 | 0 | size_t i = 1; |
1574 | 0 | size_t j = 1; |
1575 | 0 | do { |
1576 | 0 | const size_t cmplen = std::min(len1, len2); |
1577 | 0 | if (memcmp(d1, d2, cmplen) != 0) { |
1578 | 0 | return false; |
1579 | 0 | } |
1580 | 0 | len1 -= cmplen; |
1581 | 0 | if (!len1) { |
1582 | 0 | if (i >= nref1) { |
1583 | 0 | return true; |
1584 | 0 | } |
1585 | 0 | const BlockRef& r = _ref_at(i++); |
1586 | 0 | d1 = r.block->data + r.offset; |
1587 | 0 | len1 = r.length; |
1588 | 0 | } else { |
1589 | 0 | d1 += cmplen; |
1590 | 0 | } |
1591 | 0 | len2 -= cmplen; |
1592 | 0 | if (!len2) { |
1593 | 0 | if (j >= nref2) { |
1594 | 0 | return true; |
1595 | 0 | } |
1596 | 0 | const BlockRef& r = other._ref_at(j++); |
1597 | 0 | d2 = r.block->data + r.offset; |
1598 | 0 | len2 = r.length; |
1599 | 0 | } else { |
1600 | 0 | d2 += cmplen; |
1601 | 0 | } |
1602 | 0 | } while (true); |
1603 | 0 | return true; |
1604 | 0 | } |
1605 | | |
1606 | | ////////////////////////////// IOPortal ////////////////// |
1607 | 0 | IOPortal::~IOPortal() { return_cached_blocks(); } |
1608 | | |
1609 | 0 | IOPortal& IOPortal::operator=(const IOPortal& rhs) { |
1610 | 0 | IOBuf::operator=(rhs); |
1611 | 0 | return *this; |
1612 | 0 | } |
1613 | | |
1614 | 0 | void IOPortal::clear() { |
1615 | 0 | IOBuf::clear(); |
1616 | 0 | return_cached_blocks(); |
1617 | 0 | } |
1618 | | |
1619 | | const int MAX_APPEND_IOVEC = 64; |
1620 | | |
1621 | | ssize_t IOPortal::pappend_from_file_descriptor( |
1622 | 0 | int fd, off_t offset, size_t max_count) { |
1623 | 0 | iovec vec[MAX_APPEND_IOVEC]; |
1624 | 0 | int nvec = 0; |
1625 | 0 | size_t space = 0; |
1626 | 0 | Block* prev_p = NULL; |
1627 | 0 | Block* p = _block; |
1628 | | // Prepare at most MAX_APPEND_IOVEC blocks or space of blocks >= max_count |
1629 | 0 | do { |
1630 | 0 | if (p == NULL) { |
1631 | 0 | p = iobuf::acquire_tls_block(); |
1632 | 0 | if (BAIDU_UNLIKELY(!p)) { |
1633 | 0 | errno = ENOMEM; |
1634 | 0 | return -1; |
1635 | 0 | } |
1636 | 0 | if (prev_p != NULL) { |
1637 | 0 | prev_p->u.portal_next = p; |
1638 | 0 | } else { |
1639 | 0 | _block = p; |
1640 | 0 | } |
1641 | 0 | } |
1642 | 0 | vec[nvec].iov_base = p->data + p->size; |
1643 | 0 | vec[nvec].iov_len = std::min(p->left_space(), max_count - space); |
1644 | 0 | space += vec[nvec].iov_len; |
1645 | 0 | ++nvec; |
1646 | 0 | if (space >= max_count || nvec >= MAX_APPEND_IOVEC) { |
1647 | 0 | break; |
1648 | 0 | } |
1649 | 0 | prev_p = p; |
1650 | 0 | p = p->u.portal_next; |
1651 | 0 | } while (1); |
1652 | | |
1653 | 0 | ssize_t nr = 0; |
1654 | 0 | if (offset < 0) { |
1655 | 0 | nr = readv(fd, vec, nvec); |
1656 | 0 | } else { |
1657 | 0 | static iobuf::iov_function preadv_func = iobuf::get_preadv_func(); |
1658 | 0 | nr = preadv_func(fd, vec, nvec, offset); |
1659 | 0 | } |
1660 | 0 | if (nr <= 0) { // -1 or 0 |
1661 | 0 | if (empty()) { |
1662 | 0 | return_cached_blocks(); |
1663 | 0 | } |
1664 | 0 | return nr; |
1665 | 0 | } |
1666 | | |
1667 | 0 | size_t total_len = nr; |
1668 | 0 | do { |
1669 | 0 | const size_t len = std::min(total_len, _block->left_space()); |
1670 | 0 | total_len -= len; |
1671 | 0 | const IOBuf::BlockRef r = { _block->size, (uint32_t)len, _block }; |
1672 | 0 | _push_back_ref(r); |
1673 | 0 | _block->size += len; |
1674 | 0 | if (_block->full()) { |
1675 | 0 | Block* const saved_next = _block->u.portal_next; |
1676 | 0 | _block->dec_ref(); // _block may be deleted |
1677 | 0 | _block = saved_next; |
1678 | 0 | } |
1679 | 0 | } while (total_len); |
1680 | 0 | return nr; |
1681 | 0 | } |
1682 | | |
1683 | 0 | ssize_t IOPortal::append_from_reader(IReader* reader, size_t max_count) { |
1684 | 0 | iovec vec[MAX_APPEND_IOVEC]; |
1685 | 0 | int nvec = 0; |
1686 | 0 | size_t space = 0; |
1687 | 0 | Block* prev_p = NULL; |
1688 | 0 | Block* p = _block; |
1689 | | // Prepare at most MAX_APPEND_IOVEC blocks or space of blocks >= max_count |
1690 | 0 | do { |
1691 | 0 | if (p == NULL) { |
1692 | 0 | p = iobuf::acquire_tls_block(); |
1693 | 0 | if (BAIDU_UNLIKELY(!p)) { |
1694 | 0 | errno = ENOMEM; |
1695 | 0 | return -1; |
1696 | 0 | } |
1697 | 0 | if (prev_p != NULL) { |
1698 | 0 | prev_p->u.portal_next = p; |
1699 | 0 | } else { |
1700 | 0 | _block = p; |
1701 | 0 | } |
1702 | 0 | } |
1703 | 0 | vec[nvec].iov_base = p->data + p->size; |
1704 | 0 | vec[nvec].iov_len = std::min(p->left_space(), max_count - space); |
1705 | 0 | space += vec[nvec].iov_len; |
1706 | 0 | ++nvec; |
1707 | 0 | if (space >= max_count || nvec >= MAX_APPEND_IOVEC) { |
1708 | 0 | break; |
1709 | 0 | } |
1710 | 0 | prev_p = p; |
1711 | 0 | p = p->u.portal_next; |
1712 | 0 | } while (1); |
1713 | | |
1714 | 0 | const ssize_t nr = reader->ReadV(vec, nvec); |
1715 | 0 | if (nr <= 0) { // -1 or 0 |
1716 | 0 | if (empty()) { |
1717 | 0 | return_cached_blocks(); |
1718 | 0 | } |
1719 | 0 | return nr; |
1720 | 0 | } |
1721 | | |
1722 | 0 | size_t total_len = nr; |
1723 | 0 | do { |
1724 | 0 | const size_t len = std::min(total_len, _block->left_space()); |
1725 | 0 | total_len -= len; |
1726 | 0 | const IOBuf::BlockRef r = { _block->size, (uint32_t)len, _block }; |
1727 | 0 | _push_back_ref(r); |
1728 | 0 | _block->size += len; |
1729 | 0 | if (_block->full()) { |
1730 | 0 | Block* const saved_next = _block->u.portal_next; |
1731 | 0 | _block->dec_ref(); // _block may be deleted |
1732 | 0 | _block = saved_next; |
1733 | 0 | } |
1734 | 0 | } while (total_len); |
1735 | 0 | return nr; |
1736 | 0 | } |
1737 | | |
1738 | | |
1739 | | ssize_t IOPortal::append_from_SSL_channel( |
1740 | 0 | SSL* ssl, int* ssl_error, size_t max_count) { |
1741 | 0 | size_t nr = 0; |
1742 | 0 | do { |
1743 | 0 | if (!_block) { |
1744 | 0 | _block = iobuf::acquire_tls_block(); |
1745 | 0 | if (BAIDU_UNLIKELY(!_block)) { |
1746 | 0 | errno = ENOMEM; |
1747 | 0 | *ssl_error = SSL_ERROR_SYSCALL; |
1748 | 0 | return -1; |
1749 | 0 | } |
1750 | 0 | } |
1751 | | |
1752 | 0 | const size_t read_len = std::min(_block->left_space(), max_count - nr); |
1753 | 0 | ERR_clear_error(); |
1754 | 0 | const int rc = SSL_read(ssl, _block->data + _block->size, read_len); |
1755 | 0 | *ssl_error = SSL_get_error(ssl, rc); |
1756 | 0 | if (rc > 0) { |
1757 | 0 | const IOBuf::BlockRef r = { (uint32_t)_block->size, (uint32_t)rc, _block }; |
1758 | 0 | _push_back_ref(r); |
1759 | 0 | _block->size += rc; |
1760 | 0 | if (_block->full()) { |
1761 | 0 | Block* const saved_next = _block->u.portal_next; |
1762 | 0 | _block->dec_ref(); // _block may be deleted |
1763 | 0 | _block = saved_next; |
1764 | 0 | } |
1765 | 0 | nr += rc; |
1766 | 0 | } else { |
1767 | 0 | if (rc < 0) { |
1768 | 0 | if (*ssl_error == SSL_ERROR_WANT_READ |
1769 | 0 | || (*ssl_error == SSL_ERROR_SYSCALL |
1770 | 0 | && BIO_fd_non_fatal_error(errno) == 1)) { |
1771 | | // Non fatal error, tell caller to read again |
1772 | 0 | *ssl_error = SSL_ERROR_WANT_READ; |
1773 | 0 | } else { |
1774 | | // Other errors are fatal |
1775 | 0 | return rc; |
1776 | 0 | } |
1777 | 0 | } |
1778 | 0 | return (nr > 0 ? nr : rc); |
1779 | 0 | } |
1780 | 0 | } while (nr < max_count); |
1781 | 0 | return nr; |
1782 | 0 | } |
1783 | | |
1784 | 0 | void IOPortal::return_cached_blocks_impl(Block* b) { |
1785 | 0 | iobuf::release_tls_block_chain(b); |
1786 | 0 | } |
1787 | | |
1788 | | //////////////// IOBufCutter //////////////// |
1789 | | |
1790 | | IOBufCutter::IOBufCutter(butil::IOBuf* buf) |
1791 | | : _data(NULL) |
1792 | | , _data_end(NULL) |
1793 | | , _block(NULL) |
1794 | 0 | , _buf(buf) { |
1795 | 0 | } |
1796 | | |
1797 | 0 | IOBufCutter::~IOBufCutter() { |
1798 | 0 | if (_block) { |
1799 | 0 | if (_data != _data_end) { |
1800 | 0 | IOBuf::BlockRef& fr = _buf->_front_ref(); |
1801 | 0 | CHECK_EQ(fr.block, _block); |
1802 | 0 | fr.offset = (uint32_t)((char*)_data - _block->data); |
1803 | 0 | fr.length = (uint32_t)((char*)_data_end - (char*)_data); |
1804 | 0 | } else { |
1805 | 0 | _buf->_pop_front_ref(); |
1806 | 0 | } |
1807 | 0 | } |
1808 | 0 | } |
1809 | 0 | bool IOBufCutter::load_next_ref() { |
1810 | 0 | if (_block) { |
1811 | 0 | _buf->_pop_front_ref(); |
1812 | 0 | } |
1813 | 0 | if (!_buf->_ref_num()) { |
1814 | 0 | _data = NULL; |
1815 | 0 | _data_end = NULL; |
1816 | 0 | _block = NULL; |
1817 | 0 | return false; |
1818 | 0 | } else { |
1819 | 0 | const IOBuf::BlockRef& r = _buf->_front_ref(); |
1820 | 0 | _data = r.block->data + r.offset; |
1821 | 0 | _data_end = (char*)_data + r.length; |
1822 | 0 | _block = r.block; |
1823 | 0 | return true; |
1824 | 0 | } |
1825 | 0 | } |
1826 | | |
1827 | 0 | size_t IOBufCutter::slower_copy_to(void* dst, size_t n) { |
1828 | 0 | size_t size = (char*)_data_end - (char*)_data; |
1829 | 0 | if (size == 0) { |
1830 | 0 | if (!load_next_ref()) { |
1831 | 0 | return 0; |
1832 | 0 | } |
1833 | 0 | size = (char*)_data_end - (char*)_data; |
1834 | 0 | if (n <= size) { |
1835 | 0 | memcpy(dst, _data, n); |
1836 | 0 | return n; |
1837 | 0 | } |
1838 | 0 | } |
1839 | 0 | void* const saved_dst = dst; |
1840 | 0 | memcpy(dst, _data, size); |
1841 | 0 | dst = (char*)dst + size; |
1842 | 0 | n -= size; |
1843 | 0 | const size_t nref = _buf->_ref_num(); |
1844 | 0 | for (size_t i = 1; i < nref; ++i) { |
1845 | 0 | const IOBuf::BlockRef& r = _buf->_ref_at(i); |
1846 | 0 | const size_t nc = std::min(n, (size_t)r.length); |
1847 | 0 | memcpy(dst, r.block->data + r.offset, nc); |
1848 | 0 | dst = (char*)dst + nc; |
1849 | 0 | n -= nc; |
1850 | 0 | if (n == 0) { |
1851 | 0 | break; |
1852 | 0 | } |
1853 | 0 | } |
1854 | 0 | return (char*)dst - (char*)saved_dst; |
1855 | 0 | } |
1856 | | |
1857 | 0 | size_t IOBufCutter::cutn(butil::IOBuf* out, size_t n) { |
1858 | 0 | if (n == 0) { |
1859 | 0 | return 0; |
1860 | 0 | } |
1861 | 0 | const size_t size = (char*)_data_end - (char*)_data; |
1862 | 0 | if (n <= size) { |
1863 | 0 | const IOBuf::BlockRef r = { (uint32_t)((char*)_data - _block->data), |
1864 | 0 | (uint32_t)n, |
1865 | 0 | _block }; |
1866 | 0 | out->_push_back_ref(r); |
1867 | 0 | _data = (char*)_data + n; |
1868 | 0 | return n; |
1869 | 0 | } else if (size != 0) { |
1870 | 0 | const IOBuf::BlockRef r = { (uint32_t)((char*)_data - _block->data), |
1871 | 0 | (uint32_t)size, |
1872 | 0 | _block }; |
1873 | 0 | out->_push_back_ref(r); |
1874 | 0 | _buf->_pop_front_ref(); |
1875 | 0 | _data = NULL; |
1876 | 0 | _data_end = NULL; |
1877 | 0 | _block = NULL; |
1878 | 0 | return _buf->cutn(out, n - size) + size; |
1879 | 0 | } else { |
1880 | 0 | if (_block) { |
1881 | 0 | _data = NULL; |
1882 | 0 | _data_end = NULL; |
1883 | 0 | _block = NULL; |
1884 | 0 | _buf->_pop_front_ref(); |
1885 | 0 | } |
1886 | 0 | return _buf->cutn(out, n); |
1887 | 0 | } |
1888 | 0 | } |
1889 | | |
1890 | 0 | size_t IOBufCutter::cutn(void* out, size_t n) { |
1891 | 0 | if (n == 0) { |
1892 | 0 | return 0; |
1893 | 0 | } |
1894 | 0 | const size_t size = (char*)_data_end - (char*)_data; |
1895 | 0 | if (n <= size) { |
1896 | 0 | memcpy(out, _data, n); |
1897 | 0 | _data = (char*)_data + n; |
1898 | 0 | return n; |
1899 | 0 | } else if (size != 0) { |
1900 | 0 | memcpy(out, _data, size); |
1901 | 0 | _buf->_pop_front_ref(); |
1902 | 0 | _data = NULL; |
1903 | 0 | _data_end = NULL; |
1904 | 0 | _block = NULL; |
1905 | 0 | return _buf->cutn((char*)out + size, n - size) + size; |
1906 | 0 | } else { |
1907 | 0 | if (_block) { |
1908 | 0 | _data = NULL; |
1909 | 0 | _data_end = NULL; |
1910 | 0 | _block = NULL; |
1911 | 0 | _buf->_pop_front_ref(); |
1912 | 0 | } |
1913 | 0 | return _buf->cutn(out, n); |
1914 | 0 | } |
1915 | 0 | } |
1916 | | |
1917 | | IOBufAsZeroCopyInputStream::IOBufAsZeroCopyInputStream(const IOBuf& buf) |
1918 | | : _ref_index(0) |
1919 | | , _add_offset(0) |
1920 | | , _byte_count(0) |
1921 | 0 | , _buf(&buf) { |
1922 | 0 | } |
1923 | | |
1924 | 0 | bool IOBufAsZeroCopyInputStream::Next(const void** data, int* size) { |
1925 | 0 | const IOBuf::BlockRef* cur_ref = _buf->_pref_at(_ref_index); |
1926 | 0 | if (cur_ref == NULL) { |
1927 | 0 | return false; |
1928 | 0 | } |
1929 | 0 | *data = cur_ref->block->data + cur_ref->offset + _add_offset; |
1930 | | // Impl. of Backup/Skip guarantees that _add_offset < cur_ref->length. |
1931 | 0 | *size = cur_ref->length - _add_offset; |
1932 | 0 | _byte_count += cur_ref->length - _add_offset; |
1933 | 0 | _add_offset = 0; |
1934 | 0 | ++_ref_index; |
1935 | 0 | return true; |
1936 | 0 | } |
1937 | | |
1938 | 0 | void IOBufAsZeroCopyInputStream::BackUp(int count) { |
1939 | 0 | if (_ref_index > 0) { |
1940 | 0 | const IOBuf::BlockRef* cur_ref = _buf->_pref_at(--_ref_index); |
1941 | 0 | CHECK(_add_offset == 0 && cur_ref->length >= (uint32_t)count) |
1942 | 0 | << "BackUp() is not after a Next()"; |
1943 | 0 | _add_offset = cur_ref->length - count; |
1944 | 0 | _byte_count -= count; |
1945 | 0 | } else { |
1946 | 0 | LOG(FATAL) << "BackUp an empty ZeroCopyInputStream"; |
1947 | 0 | } |
1948 | 0 | } |
1949 | | |
1950 | | // Skips a number of bytes. Returns false if the end of the stream is |
1951 | | // reached or some input error occurred. In the end-of-stream case, the |
1952 | | // stream is advanced to the end of the stream (so ByteCount() will return |
1953 | | // the total size of the stream). |
1954 | 0 | bool IOBufAsZeroCopyInputStream::Skip(int count) { |
1955 | 0 | const IOBuf::BlockRef* cur_ref = _buf->_pref_at(_ref_index); |
1956 | 0 | while (cur_ref) { |
1957 | 0 | const int left_bytes = cur_ref->length - _add_offset; |
1958 | 0 | if (count < left_bytes) { |
1959 | 0 | _add_offset += count; |
1960 | 0 | _byte_count += count; |
1961 | 0 | return true; |
1962 | 0 | } |
1963 | 0 | count -= left_bytes; |
1964 | 0 | _add_offset = 0; |
1965 | 0 | _byte_count += left_bytes; |
1966 | 0 | cur_ref = _buf->_pref_at(++_ref_index); |
1967 | 0 | } |
1968 | 0 | return false; |
1969 | 0 | } |
1970 | | |
1971 | 0 | google::protobuf::int64 IOBufAsZeroCopyInputStream::ByteCount() const { |
1972 | 0 | return _byte_count; |
1973 | 0 | } |
1974 | | |
1975 | | IOBufAsZeroCopyOutputStream::IOBufAsZeroCopyOutputStream(IOBuf* buf) |
1976 | 0 | : _buf(buf), _block_size(0), _cur_block(NULL), _byte_count(0) { |
1977 | 0 | } |
1978 | | |
1979 | | IOBufAsZeroCopyOutputStream::IOBufAsZeroCopyOutputStream( |
1980 | | IOBuf *buf, uint32_t block_size) |
1981 | | : _buf(buf) |
1982 | | , _block_size(block_size) |
1983 | | , _cur_block(NULL) |
1984 | 0 | , _byte_count(0) { |
1985 | | |
1986 | 0 | if (_block_size <= offsetof(IOBuf::Block, data)) { |
1987 | 0 | throw std::invalid_argument("block_size is too small"); |
1988 | 0 | } |
1989 | 0 | } |
1990 | | |
1991 | 0 | IOBufAsZeroCopyOutputStream::~IOBufAsZeroCopyOutputStream() { |
1992 | 0 | _release_block(); |
1993 | 0 | } |
1994 | | |
1995 | 0 | bool IOBufAsZeroCopyOutputStream::Next(void** data, int* size) { |
1996 | 0 | if (_cur_block == NULL || _cur_block->full()) { |
1997 | 0 | _release_block(); |
1998 | 0 | if (_block_size > 0) { |
1999 | 0 | _cur_block = iobuf::create_block(_block_size); |
2000 | 0 | } else { |
2001 | 0 | _cur_block = iobuf::acquire_tls_block(); |
2002 | 0 | } |
2003 | 0 | if (_cur_block == NULL) { |
2004 | 0 | return false; |
2005 | 0 | } |
2006 | 0 | } |
2007 | 0 | const IOBuf::BlockRef r = { _cur_block->size, |
2008 | 0 | (uint32_t)_cur_block->left_space(), |
2009 | 0 | _cur_block }; |
2010 | 0 | *data = _cur_block->data + r.offset; |
2011 | 0 | *size = r.length; |
2012 | 0 | _cur_block->size = _cur_block->cap; |
2013 | 0 | _buf->_push_back_ref(r); |
2014 | 0 | _byte_count += r.length; |
2015 | 0 | return true; |
2016 | 0 | } |
2017 | | |
2018 | 0 | void IOBufAsZeroCopyOutputStream::BackUp(int count) { |
2019 | 0 | while (!_buf->empty()) { |
2020 | 0 | IOBuf::BlockRef& r = _buf->_back_ref(); |
2021 | 0 | if (_cur_block) { |
2022 | | // A ordinary BackUp that should be supported by all ZeroCopyOutputStream |
2023 | | // _cur_block must match end of the IOBuf |
2024 | 0 | if (r.block != _cur_block) { |
2025 | 0 | LOG(FATAL) << "r.block=" << r.block |
2026 | 0 | << " does not match _cur_block=" << _cur_block; |
2027 | 0 | return; |
2028 | 0 | } |
2029 | 0 | if (r.offset + r.length != _cur_block->size) { |
2030 | 0 | LOG(FATAL) << "r.offset(" << r.offset << ") + r.length(" |
2031 | 0 | << r.length << ") != _cur_block->size(" |
2032 | 0 | << _cur_block->size << ")"; |
2033 | 0 | return; |
2034 | 0 | } |
2035 | 0 | } else { |
2036 | | // An extended BackUp which is undefined in regular |
2037 | | // ZeroCopyOutputStream. The `count' given by user is larger than |
2038 | | // size of last _cur_block (already released in last iteration). |
2039 | 0 | if (r.block->ref_count() == 1) { |
2040 | | // A special case: the block is only referenced by last |
2041 | | // BlockRef of _buf. Safe to allocate more on the block. |
2042 | 0 | if (r.offset + r.length != r.block->size) { |
2043 | 0 | LOG(FATAL) << "r.offset(" << r.offset << ") + r.length(" |
2044 | 0 | << r.length << ") != r.block->size(" |
2045 | 0 | << r.block->size << ")"; |
2046 | 0 | return; |
2047 | 0 | } |
2048 | 0 | } else if (r.offset + r.length != r.block->size) { |
2049 | | // Last BlockRef does not match end of the block (which is |
2050 | | // used by other IOBuf already). Unsafe to re-reference |
2051 | | // the block and allocate more, just pop the bytes. |
2052 | 0 | _byte_count -= _buf->pop_back(count); |
2053 | 0 | return; |
2054 | 0 | } // else Last BlockRef matches end of the block. Even if the |
2055 | | // block is shared by other IOBuf, it's safe to allocate bytes |
2056 | | // after block->size. |
2057 | 0 | _cur_block = r.block; |
2058 | 0 | _cur_block->inc_ref(); |
2059 | 0 | } |
2060 | 0 | if (BAIDU_LIKELY(r.length > (uint32_t)count)) { |
2061 | 0 | r.length -= count; |
2062 | 0 | if (!_buf->_small()) { |
2063 | 0 | _buf->_bv.nbytes -= count; |
2064 | 0 | } |
2065 | 0 | _cur_block->size -= count; |
2066 | 0 | _byte_count -= count; |
2067 | | // Release block for TLS before quiting BackUp() for other |
2068 | | // code to reuse the block even if this wrapper object is |
2069 | | // not destructed. Example: |
2070 | | // IOBufAsZeroCopyOutputStream wrapper(...); |
2071 | | // ParseFromZeroCopyStream(&wrapper, ...); // Calls BackUp |
2072 | | // IOBuf buf; |
2073 | | // buf.append("foobar"); // can reuse the TLS block. |
2074 | 0 | if (_block_size == 0) { |
2075 | 0 | iobuf::release_tls_block(_cur_block); |
2076 | 0 | _cur_block = NULL; |
2077 | 0 | } |
2078 | 0 | return; |
2079 | 0 | } |
2080 | 0 | _cur_block->size -= r.length; |
2081 | 0 | _byte_count -= r.length; |
2082 | 0 | count -= r.length; |
2083 | 0 | _buf->_pop_back_ref(); |
2084 | 0 | _release_block(); |
2085 | 0 | if (count == 0) { |
2086 | 0 | return; |
2087 | 0 | } |
2088 | 0 | } |
2089 | 0 | LOG_IF(FATAL, count != 0) << "BackUp an empty IOBuf"; |
2090 | 0 | } |
2091 | | |
2092 | 0 | google::protobuf::int64 IOBufAsZeroCopyOutputStream::ByteCount() const { |
2093 | 0 | return _byte_count; |
2094 | 0 | } |
2095 | | |
2096 | 0 | void IOBufAsZeroCopyOutputStream::_release_block() { |
2097 | 0 | if (_block_size > 0) { |
2098 | 0 | if (_cur_block) { |
2099 | 0 | _cur_block->dec_ref(); |
2100 | 0 | } |
2101 | 0 | } else { |
2102 | 0 | iobuf::release_tls_block(_cur_block); |
2103 | 0 | } |
2104 | 0 | _cur_block = NULL; |
2105 | 0 | } |
2106 | | |
2107 | | IOBufAsSnappySink::IOBufAsSnappySink(butil::IOBuf& buf) |
2108 | 0 | : _cur_buf(NULL), _cur_len(0), _buf(&buf), _buf_stream(&buf) { |
2109 | 0 | } |
2110 | | |
2111 | 0 | void IOBufAsSnappySink::Append(const char* bytes, size_t n) { |
2112 | 0 | if (_cur_len > 0) { |
2113 | 0 | CHECK(bytes == _cur_buf && static_cast<int>(n) <= _cur_len) |
2114 | 0 | << "bytes must be _cur_buf"; |
2115 | 0 | _buf_stream.BackUp(_cur_len - n); |
2116 | 0 | _cur_len = 0; |
2117 | 0 | } else { |
2118 | 0 | _buf->append(bytes, n); |
2119 | 0 | } |
2120 | 0 | } |
2121 | | |
2122 | 0 | char* IOBufAsSnappySink::GetAppendBuffer(size_t length, char* scratch) { |
2123 | | // TODO: butil::IOBuf supports dynamic sized blocks. |
2124 | 0 | if (length <= 8000/*just a hint*/) { |
2125 | 0 | if (_buf_stream.Next(reinterpret_cast<void**>(&_cur_buf), &_cur_len)) { |
2126 | 0 | if (_cur_len >= static_cast<int>(length)) { |
2127 | 0 | return _cur_buf; |
2128 | 0 | } else { |
2129 | 0 | _buf_stream.BackUp(_cur_len); |
2130 | 0 | } |
2131 | 0 | } else { |
2132 | 0 | LOG(FATAL) << "Fail to alloc buffer"; |
2133 | 0 | } |
2134 | 0 | } // else no need to try. |
2135 | 0 | _cur_buf = NULL; |
2136 | 0 | _cur_len = 0; |
2137 | 0 | return scratch; |
2138 | 0 | } |
2139 | | |
2140 | 0 | size_t IOBufAsSnappySource::Available() const { |
2141 | 0 | return _buf->length() - _stream.ByteCount(); |
2142 | 0 | } |
2143 | | |
2144 | 0 | void IOBufAsSnappySource::Skip(size_t n) { |
2145 | 0 | _stream.Skip(n); |
2146 | 0 | } |
2147 | | |
2148 | 0 | const char* IOBufAsSnappySource::Peek(size_t* len) { |
2149 | 0 | const char* buffer = NULL; |
2150 | 0 | int res = 0; |
2151 | 0 | if (_stream.Next((const void**)&buffer, &res)) { |
2152 | 0 | *len = res; |
2153 | | // Source::Peek requires no reposition. |
2154 | 0 | _stream.BackUp(*len); |
2155 | 0 | return buffer; |
2156 | 0 | } else { |
2157 | 0 | *len = 0; |
2158 | 0 | return NULL; |
2159 | 0 | } |
2160 | 0 | } |
2161 | | |
2162 | | IOBufAppender::IOBufAppender() |
2163 | | : _data(NULL) |
2164 | | , _data_end(NULL) |
2165 | 0 | , _zc_stream(&_buf) { |
2166 | 0 | } |
2167 | | |
2168 | 0 | size_t IOBufBytesIterator::append_and_forward(butil::IOBuf* buf, size_t n) { |
2169 | 0 | size_t nc = 0; |
2170 | 0 | while (nc < n && _bytes_left != 0) { |
2171 | 0 | const IOBuf::BlockRef& r = _buf->_ref_at(_block_count - 1); |
2172 | 0 | const size_t block_size = _block_end - _block_begin; |
2173 | 0 | const size_t to_copy = std::min(block_size, n - nc); |
2174 | 0 | IOBuf::BlockRef r2 = { (uint32_t)(_block_begin - r.block->data), |
2175 | 0 | (uint32_t)to_copy, r.block }; |
2176 | 0 | buf->_push_back_ref(r2); |
2177 | 0 | _block_begin += to_copy; |
2178 | 0 | _bytes_left -= to_copy; |
2179 | 0 | nc += to_copy; |
2180 | 0 | if (_block_begin == _block_end) { |
2181 | 0 | try_next_block(); |
2182 | 0 | } |
2183 | 0 | } |
2184 | 0 | return nc; |
2185 | 0 | } |
2186 | | |
2187 | 0 | bool IOBufBytesIterator::forward_one_block(const void** data, size_t* size) { |
2188 | 0 | if (_bytes_left == 0) { |
2189 | 0 | return false; |
2190 | 0 | } |
2191 | 0 | const size_t block_size = _block_end - _block_begin; |
2192 | 0 | *data = _block_begin; |
2193 | 0 | *size = block_size; |
2194 | 0 | _bytes_left -= block_size; |
2195 | 0 | try_next_block(); |
2196 | 0 | return true; |
2197 | 0 | } |
2198 | | |
2199 | | } // namespace butil |
2200 | | |
2201 | 0 | void* fast_memcpy(void *__restrict dest, const void *__restrict src, size_t n) { |
2202 | 0 | return butil::iobuf::cp(dest, src, n); |
2203 | 0 | } |