/src/brpc/src/butil/iobuf.cpp
Line | Count | Source |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | // iobuf - A non-continuous zero-copied buffer |
19 | | |
20 | | // Date: Thu Nov 22 13:57:56 CST 2012 |
21 | | |
22 | | #include "butil/ssl_compat.h" // BIO_fd_non_fatal_error |
23 | | #include <openssl/err.h> |
24 | | #include <openssl/ssl.h> // SSL_* |
25 | | #ifdef USE_MESALINK |
26 | | #include <mesalink/openssl/ssl.h> |
27 | | #include <mesalink/openssl/err.h> |
28 | | #endif |
29 | | #include <sys/syscall.h> // syscall |
30 | | #include <fcntl.h> // O_RDONLY |
31 | | #include <errno.h> // errno |
32 | | #include <limits.h> // CHAR_BIT |
33 | | #include <stdexcept> // std::invalid_argument |
34 | | #include <gflags/gflags.h> // gflags |
35 | | #include "butil/build_config.h" // ARCH_CPU_X86_64 |
36 | | #include "butil/atomicops.h" // butil::atomic |
37 | | #include "butil/thread_local.h" // thread_atexit |
38 | | #include "butil/macros.h" // BAIDU_CASSERT |
39 | | #include "butil/logging.h" // CHECK, LOG |
40 | | #include "butil/fd_guard.h" // butil::fd_guard |
41 | | #include "butil/iobuf.h" |
42 | | #include "butil/iobuf_profiler.h" |
43 | | |
44 | | namespace butil { |
45 | | namespace iobuf { |
46 | | |
47 | | DEFINE_int32(iobuf_aligned_buf_block_size, 0, "iobuf aligned buf block size"); |
48 | | |
49 | | typedef ssize_t (*iov_function)(int fd, const struct iovec *vector, |
50 | | int count, off_t offset); |
51 | | |
52 | | // Userpsace preadv |
53 | | static ssize_t user_preadv(int fd, const struct iovec *vector, |
54 | 0 | int count, off_t offset) { |
55 | 0 | ssize_t total_read = 0; |
56 | 0 | for (int i = 0; i < count; ++i) { |
57 | 0 | const ssize_t rc = ::pread(fd, vector[i].iov_base, vector[i].iov_len, offset); |
58 | 0 | if (rc <= 0) { |
59 | 0 | return total_read > 0 ? total_read : rc; |
60 | 0 | } |
61 | 0 | total_read += rc; |
62 | 0 | offset += rc; |
63 | 0 | if (rc < (ssize_t)vector[i].iov_len) { |
64 | 0 | break; |
65 | 0 | } |
66 | 0 | } |
67 | 0 | return total_read; |
68 | 0 | } |
69 | | |
70 | | static ssize_t user_pwritev(int fd, const struct iovec* vector, |
71 | 0 | int count, off_t offset) { |
72 | 0 | ssize_t total_write = 0; |
73 | 0 | for (int i = 0; i < count; ++i) { |
74 | 0 | const ssize_t rc = ::pwrite(fd, vector[i].iov_base, vector[i].iov_len, offset); |
75 | 0 | if (rc <= 0) { |
76 | 0 | return total_write > 0 ? total_write : rc; |
77 | 0 | } |
78 | 0 | total_write += rc; |
79 | 0 | offset += rc; |
80 | 0 | if (rc < (ssize_t)vector[i].iov_len) { |
81 | 0 | break; |
82 | 0 | } |
83 | 0 | } |
84 | 0 | return total_write; |
85 | 0 | } |
86 | | |
87 | | #if ARCH_CPU_X86_64 |
88 | | |
89 | | #ifndef SYS_preadv |
90 | | #define SYS_preadv 295 |
91 | | #endif // SYS_preadv |
92 | | |
93 | | #ifndef SYS_pwritev |
94 | | #define SYS_pwritev 296 |
95 | | #endif // SYS_pwritev |
96 | | |
97 | | // SYS_preadv/SYS_pwritev is available since Linux 2.6.30 |
98 | | static ssize_t sys_preadv(int fd, const struct iovec *vector, |
99 | 0 | int count, off_t offset) { |
100 | 0 | return syscall(SYS_preadv, fd, vector, count, offset); |
101 | 0 | } |
102 | | |
103 | | static ssize_t sys_pwritev(int fd, const struct iovec *vector, |
104 | 0 | int count, off_t offset) { |
105 | 0 | return syscall(SYS_pwritev, fd, vector, count, offset); |
106 | 0 | } |
107 | | |
108 | 0 | inline iov_function get_preadv_func() { |
109 | | #if defined(OS_MACOSX) |
110 | | return user_preadv; |
111 | | #endif |
112 | 0 | butil::fd_guard fd(open("/dev/zero", O_RDONLY)); |
113 | 0 | if (fd < 0) { |
114 | 0 | PLOG(WARNING) << "Fail to open /dev/zero"; |
115 | 0 | return user_preadv; |
116 | 0 | } |
117 | 0 | char dummy[1]; |
118 | 0 | iovec vec = { dummy, sizeof(dummy) }; |
119 | 0 | const int rc = syscall(SYS_preadv, (int)fd, &vec, 1, 0); |
120 | 0 | if (rc < 0) { |
121 | 0 | PLOG(WARNING) << "The kernel doesn't support SYS_preadv, " |
122 | 0 | " use user_preadv instead"; |
123 | 0 | return user_preadv; |
124 | 0 | } |
125 | 0 | return sys_preadv; |
126 | 0 | } |
127 | | |
128 | 0 | inline iov_function get_pwritev_func() { |
129 | 0 | butil::fd_guard fd(open("/dev/null", O_WRONLY)); |
130 | 0 | if (fd < 0) { |
131 | 0 | PLOG(ERROR) << "Fail to open /dev/null"; |
132 | 0 | return user_pwritev; |
133 | 0 | } |
134 | | #if defined(OS_MACOSX) |
135 | | return user_pwritev; |
136 | | #endif |
137 | 0 | char dummy[1]; |
138 | 0 | iovec vec = { dummy, sizeof(dummy) }; |
139 | 0 | const int rc = syscall(SYS_pwritev, (int)fd, &vec, 1, 0); |
140 | 0 | if (rc < 0) { |
141 | 0 | PLOG(WARNING) << "The kernel doesn't support SYS_pwritev, " |
142 | 0 | " use user_pwritev instead"; |
143 | 0 | return user_pwritev; |
144 | 0 | } |
145 | 0 | return sys_pwritev; |
146 | 0 | } |
147 | | |
148 | | #else // ARCH_CPU_X86_64 |
149 | | |
150 | | #warning "We don't check if the kernel supports SYS_preadv or SYS_pwritev on non-X86_64, use implementation on pread/pwrite directly." |
151 | | |
152 | | inline iov_function get_preadv_func() { |
153 | | return user_preadv; |
154 | | } |
155 | | |
156 | | inline iov_function get_pwritev_func() { |
157 | | return user_pwritev; |
158 | | } |
159 | | |
160 | | #endif // ARCH_CPU_X86_64 |
161 | | |
162 | 0 | void* cp(void *__restrict dest, const void *__restrict src, size_t n) { |
163 | | // memcpy in gcc 4.8 seems to be faster enough. |
164 | 0 | return memcpy(dest, src, n); |
165 | 0 | } |
166 | | |
167 | | // Function pointers to allocate or deallocate memory for a IOBuf::Block |
168 | | void* (*blockmem_allocate)(size_t) = ::malloc; |
169 | | void (*blockmem_deallocate)(void*) = ::free; |
170 | | |
171 | | void remove_tls_block_chain(); |
172 | | |
173 | | // Use default function pointers |
174 | 0 | void reset_blockmem_allocate_and_deallocate() { |
175 | | // There maybe block allocated by previous hooks, it's wrong to free them using |
176 | | // mismatched hook. |
177 | 0 | remove_tls_block_chain(); |
178 | 0 | blockmem_allocate = ::malloc; |
179 | 0 | blockmem_deallocate = ::free; |
180 | 0 | } |
181 | | |
182 | | butil::static_atomic<size_t> g_nblock = BUTIL_STATIC_ATOMIC_INIT(0); |
183 | | butil::static_atomic<size_t> g_blockmem = BUTIL_STATIC_ATOMIC_INIT(0); |
184 | | butil::static_atomic<size_t> g_newbigview = BUTIL_STATIC_ATOMIC_INIT(0); |
185 | | |
186 | 0 | void inc_g_nblock() { |
187 | 0 | g_nblock.fetch_add(1, butil::memory_order_relaxed); |
188 | 0 | } |
189 | 0 | void dec_g_nblock() { |
190 | 0 | g_nblock.fetch_sub(1, butil::memory_order_relaxed); |
191 | 0 | } |
192 | | |
193 | 0 | void inc_g_blockmem() { |
194 | 0 | g_blockmem.fetch_add(1, butil::memory_order_relaxed); |
195 | 0 | } |
196 | 0 | void dec_g_blockmem() { |
197 | 0 | g_blockmem.fetch_sub(1, butil::memory_order_relaxed); |
198 | 0 | } |
199 | | |
200 | | } // namespace iobuf |
201 | | |
202 | 0 | size_t IOBuf::block_count() { |
203 | 0 | return iobuf::g_nblock.load(butil::memory_order_relaxed); |
204 | 0 | } |
205 | | |
206 | 0 | size_t IOBuf::block_memory() { |
207 | 0 | return iobuf::g_blockmem.load(butil::memory_order_relaxed); |
208 | 0 | } |
209 | | |
210 | 0 | size_t IOBuf::new_bigview_count() { |
211 | 0 | return iobuf::g_newbigview.load(butil::memory_order_relaxed); |
212 | 0 | } |
213 | | |
214 | | namespace iobuf { |
215 | | |
216 | | // for unit test |
217 | 0 | int block_shared_count(IOBuf::Block const* b) { return b->ref_count(); } |
218 | | |
219 | 0 | IOBuf::Block* get_portal_next(IOBuf::Block const* b) { |
220 | 0 | return b->u.portal_next; |
221 | 0 | } |
222 | | |
223 | 0 | uint32_t block_cap(IOBuf::Block const* b) { |
224 | 0 | return b->cap; |
225 | 0 | } |
226 | | |
227 | 0 | uint32_t block_size(IOBuf::Block const* b) { |
228 | 0 | return b->size; |
229 | 0 | } |
230 | | |
231 | 0 | inline IOBuf::Block* create_block_aligned(size_t block_size, size_t alignment) { |
232 | 0 | if (block_size > 0xFFFFFFFFULL) { |
233 | 0 | LOG(FATAL) << "block_size=" << block_size << " is too large"; |
234 | 0 | return NULL; |
235 | 0 | } |
236 | 0 | char* mem = (char*)iobuf::blockmem_allocate(block_size); |
237 | 0 | if (mem == NULL) { |
238 | 0 | return NULL; |
239 | 0 | } |
240 | 0 | char* data = mem + sizeof(IOBuf::Block); |
241 | | // change data pointer & data size make align satisfied |
242 | 0 | size_t adder = (-reinterpret_cast<uintptr_t>(data)) & (alignment - 1); |
243 | 0 | size_t size = |
244 | 0 | (block_size - sizeof(IOBuf::Block) - adder) & ~(alignment - 1); |
245 | 0 | return new (mem) IOBuf::Block(data + adder, size); |
246 | 0 | } |
247 | | |
248 | | // === Share TLS blocks between appending operations === |
249 | | |
250 | | static __thread TLSData g_tls_data = { NULL, 0, false }; |
251 | | |
252 | | // Used in release_tls_block() |
253 | 0 | TLSData* get_g_tls_data() { return &g_tls_data; } |
254 | | // Used in UT |
255 | 0 | IOBuf::Block* get_tls_block_head() { return g_tls_data.block_head; } |
256 | 0 | int get_tls_block_count() { return g_tls_data.num_blocks; } |
257 | | |
258 | | // Number of blocks that can't be returned to TLS which has too many block |
259 | | // already. This counter should be 0 in most scenarios, otherwise performance |
260 | | // of appending functions in IOPortal may be lowered. |
261 | | static butil::static_atomic<size_t> g_num_hit_tls_threshold = BUTIL_STATIC_ATOMIC_INIT(0); |
262 | | |
263 | 0 | void inc_g_num_hit_tls_threshold() { |
264 | 0 | g_num_hit_tls_threshold.fetch_add(1, butil::memory_order_relaxed); |
265 | 0 | } |
266 | | |
267 | 0 | void dec_g_num_hit_tls_threshold() { |
268 | 0 | g_num_hit_tls_threshold.fetch_sub(1, butil::memory_order_relaxed); |
269 | 0 | } |
270 | | |
271 | | // Called in UT. |
272 | 0 | void remove_tls_block_chain() { |
273 | 0 | TLSData& tls_data = g_tls_data; |
274 | 0 | IOBuf::Block* b = tls_data.block_head; |
275 | 0 | if (!b) { |
276 | 0 | return; |
277 | 0 | } |
278 | 0 | tls_data.block_head = NULL; |
279 | 0 | int n = 0; |
280 | 0 | do { |
281 | 0 | IOBuf::Block* const saved_next = b->u.portal_next; |
282 | 0 | b->dec_ref(); |
283 | 0 | b = saved_next; |
284 | 0 | ++n; |
285 | 0 | } while (b); |
286 | 0 | CHECK_EQ(n, tls_data.num_blocks); |
287 | 0 | tls_data.num_blocks = 0; |
288 | 0 | } |
289 | | |
290 | | // Get a (non-full) block from TLS. |
291 | | // Notice that the block is not removed from TLS. |
292 | 0 | IOBuf::Block* share_tls_block() { |
293 | 0 | TLSData& tls_data = g_tls_data; |
294 | 0 | IOBuf::Block* const b = tls_data.block_head; |
295 | 0 | if (b != NULL && !b->full()) { |
296 | 0 | return b; |
297 | 0 | } |
298 | 0 | IOBuf::Block* new_block = NULL; |
299 | 0 | if (b) { |
300 | 0 | new_block = b; |
301 | 0 | while (new_block && new_block->full()) { |
302 | 0 | IOBuf::Block* const saved_next = new_block->u.portal_next; |
303 | 0 | new_block->dec_ref(); |
304 | 0 | --tls_data.num_blocks; |
305 | 0 | new_block = saved_next; |
306 | 0 | } |
307 | 0 | } else if (!tls_data.registered) { |
308 | 0 | tls_data.registered = true; |
309 | | // Only register atexit at the first time |
310 | 0 | butil::thread_atexit(remove_tls_block_chain); |
311 | 0 | } |
312 | 0 | if (!new_block) { |
313 | 0 | new_block = create_block(); // may be NULL |
314 | 0 | if (new_block) { |
315 | 0 | ++tls_data.num_blocks; |
316 | 0 | } |
317 | 0 | } |
318 | 0 | tls_data.block_head = new_block; |
319 | 0 | return new_block; |
320 | 0 | } |
321 | | |
322 | | // Return chained blocks to TLS. |
323 | | // NOTE: b MUST be non-NULL and all blocks linked SHOULD not be full. |
324 | 0 | void release_tls_block_chain(IOBuf::Block* b) { |
325 | 0 | TLSData& tls_data = g_tls_data; |
326 | 0 | size_t n = 0; |
327 | 0 | if (tls_data.num_blocks >= max_blocks_per_thread()) { |
328 | 0 | do { |
329 | 0 | ++n; |
330 | 0 | IOBuf::Block* const saved_next = b->u.portal_next; |
331 | 0 | b->dec_ref(); |
332 | 0 | b = saved_next; |
333 | 0 | } while (b); |
334 | 0 | g_num_hit_tls_threshold.fetch_add(n, butil::memory_order_relaxed); |
335 | 0 | return; |
336 | 0 | } |
337 | 0 | IOBuf::Block* first_b = b; |
338 | 0 | IOBuf::Block* last_b = NULL; |
339 | 0 | do { |
340 | 0 | ++n; |
341 | 0 | CHECK(!b->full()); |
342 | 0 | if (b->u.portal_next == NULL) { |
343 | 0 | last_b = b; |
344 | 0 | break; |
345 | 0 | } |
346 | 0 | b = b->u.portal_next; |
347 | 0 | } while (true); |
348 | 0 | last_b->u.portal_next = tls_data.block_head; |
349 | 0 | tls_data.block_head = first_b; |
350 | 0 | tls_data.num_blocks += n; |
351 | 0 | if (!tls_data.registered) { |
352 | 0 | tls_data.registered = true; |
353 | 0 | butil::thread_atexit(remove_tls_block_chain); |
354 | 0 | } |
355 | 0 | } |
356 | | |
357 | | // Get and remove one (non-full) block from TLS. If TLS is empty, create one. |
358 | 0 | IOBuf::Block* acquire_tls_block() { |
359 | 0 | TLSData& tls_data = g_tls_data; |
360 | 0 | IOBuf::Block* b = tls_data.block_head; |
361 | 0 | if (!b) { |
362 | 0 | return create_block(); |
363 | 0 | } |
364 | 0 | while (b->full()) { |
365 | 0 | IOBuf::Block* const saved_next = b->u.portal_next; |
366 | 0 | b->dec_ref(); |
367 | 0 | tls_data.block_head = saved_next; |
368 | 0 | --tls_data.num_blocks; |
369 | 0 | b = saved_next; |
370 | 0 | if (!b) { |
371 | 0 | return create_block(); |
372 | 0 | } |
373 | 0 | } |
374 | 0 | tls_data.block_head = b->u.portal_next; |
375 | 0 | --tls_data.num_blocks; |
376 | 0 | b->u.portal_next = NULL; |
377 | 0 | return b; |
378 | 0 | } |
379 | | |
380 | 0 | inline IOBuf::BlockRef* acquire_blockref_array(size_t cap) { |
381 | 0 | iobuf::g_newbigview.fetch_add(1, butil::memory_order_relaxed); |
382 | 0 | return new IOBuf::BlockRef[cap]; |
383 | 0 | } |
384 | | |
385 | 0 | inline IOBuf::BlockRef* acquire_blockref_array() { |
386 | 0 | return acquire_blockref_array(IOBuf::INITIAL_CAP); |
387 | 0 | } |
388 | | |
389 | 0 | inline void release_blockref_array(IOBuf::BlockRef* refs, size_t cap) { |
390 | 0 | delete[] refs; |
391 | 0 | } |
392 | | |
393 | | } // namespace iobuf |
394 | | |
395 | 0 | size_t IOBuf::block_count_hit_tls_threshold() { |
396 | 0 | return iobuf::g_num_hit_tls_threshold.load(butil::memory_order_relaxed); |
397 | 0 | } |
398 | | |
399 | | BAIDU_CASSERT(sizeof(IOBuf::SmallView) == sizeof(IOBuf::BigView), |
400 | | sizeof_small_and_big_view_should_equal); |
401 | | |
402 | | BAIDU_CASSERT(IOBuf::DEFAULT_BLOCK_SIZE/4096*4096 == IOBuf::DEFAULT_BLOCK_SIZE, |
403 | | sizeof_block_should_be_multiply_of_4096); |
404 | | |
405 | | const IOBuf::Area IOBuf::INVALID_AREA; |
406 | | |
407 | 0 | IOBuf::IOBuf(const IOBuf& rhs) { |
408 | 0 | if (rhs._small()) { |
409 | 0 | _sv = rhs._sv; |
410 | 0 | if (_sv.refs[0].block) { |
411 | 0 | _sv.refs[0].block->inc_ref(); |
412 | 0 | } |
413 | 0 | if (_sv.refs[1].block) { |
414 | 0 | _sv.refs[1].block->inc_ref(); |
415 | 0 | } |
416 | 0 | } else { |
417 | 0 | _bv.magic = -1; |
418 | 0 | _bv.start = 0; |
419 | 0 | _bv.nref = rhs._bv.nref; |
420 | 0 | _bv.cap_mask = rhs._bv.cap_mask; |
421 | 0 | _bv.nbytes = rhs._bv.nbytes; |
422 | 0 | _bv.refs = iobuf::acquire_blockref_array(_bv.capacity()); |
423 | 0 | for (size_t i = 0; i < _bv.nref; ++i) { |
424 | 0 | _bv.refs[i] = rhs._bv.ref_at(i); |
425 | 0 | _bv.refs[i].block->inc_ref(); |
426 | 0 | } |
427 | 0 | } |
428 | 0 | } |
429 | | |
430 | 0 | void IOBuf::operator=(const IOBuf& rhs) { |
431 | 0 | if (this == &rhs) { |
432 | 0 | return; |
433 | 0 | } |
434 | 0 | if (!rhs._small() && !_small() && _bv.cap_mask == rhs._bv.cap_mask) { |
435 | | // Reuse array of refs |
436 | | // Remove references to previous blocks. |
437 | 0 | for (size_t i = 0; i < _bv.nref; ++i) { |
438 | 0 | _bv.ref_at(i).block->dec_ref(); |
439 | 0 | } |
440 | | // References blocks in rhs. |
441 | 0 | _bv.start = 0; |
442 | 0 | _bv.nref = rhs._bv.nref; |
443 | 0 | _bv.nbytes = rhs._bv.nbytes; |
444 | 0 | for (size_t i = 0; i < _bv.nref; ++i) { |
445 | 0 | _bv.refs[i] = rhs._bv.ref_at(i); |
446 | 0 | _bv.refs[i].block->inc_ref(); |
447 | 0 | } |
448 | 0 | } else { |
449 | 0 | this->~IOBuf(); |
450 | 0 | new (this) IOBuf(rhs); |
451 | 0 | } |
452 | 0 | } |
453 | | |
454 | | template <bool MOVE> |
455 | 0 | void IOBuf::_push_or_move_back_ref_to_smallview(const BlockRef& r) { |
456 | 0 | BlockRef* const refs = _sv.refs; |
457 | 0 | if (NULL == refs[0].block) { |
458 | 0 | refs[0] = r; |
459 | 0 | if (!MOVE) { |
460 | 0 | r.block->inc_ref(); |
461 | 0 | } |
462 | 0 | return; |
463 | 0 | } |
464 | 0 | if (NULL == refs[1].block) { |
465 | 0 | if (refs[0].block == r.block && |
466 | 0 | refs[0].offset + refs[0].length == r.offset) { // Merge ref |
467 | 0 | refs[0].length += r.length; |
468 | 0 | if (MOVE) { |
469 | 0 | r.block->dec_ref(); |
470 | 0 | } |
471 | 0 | return; |
472 | 0 | } |
473 | 0 | refs[1] = r; |
474 | 0 | if (!MOVE) { |
475 | 0 | r.block->inc_ref(); |
476 | 0 | } |
477 | 0 | return; |
478 | 0 | } |
479 | 0 | if (refs[1].block == r.block && |
480 | 0 | refs[1].offset + refs[1].length == r.offset) { // Merge ref |
481 | 0 | refs[1].length += r.length; |
482 | 0 | if (MOVE) { |
483 | 0 | r.block->dec_ref(); |
484 | 0 | } |
485 | 0 | return; |
486 | 0 | } |
487 | | // Convert to BigView |
488 | 0 | BlockRef* new_refs = iobuf::acquire_blockref_array(); |
489 | 0 | new_refs[0] = refs[0]; |
490 | 0 | new_refs[1] = refs[1]; |
491 | 0 | new_refs[2] = r; |
492 | 0 | const size_t new_nbytes = refs[0].length + refs[1].length + r.length; |
493 | 0 | if (!MOVE) { |
494 | 0 | r.block->inc_ref(); |
495 | 0 | } |
496 | 0 | _bv.magic = -1; |
497 | 0 | _bv.start = 0; |
498 | 0 | _bv.refs = new_refs; |
499 | 0 | _bv.nref = 3; |
500 | 0 | _bv.cap_mask = INITIAL_CAP - 1; |
501 | 0 | _bv.nbytes = new_nbytes; |
502 | 0 | } Unexecuted instantiation: void butil::IOBuf::_push_or_move_back_ref_to_smallview<true>(butil::IOBuf::BlockRef const&) Unexecuted instantiation: void butil::IOBuf::_push_or_move_back_ref_to_smallview<false>(butil::IOBuf::BlockRef const&) |
503 | | // Explicitly initialize templates. |
504 | | template void IOBuf::_push_or_move_back_ref_to_smallview<true>(const BlockRef&); |
505 | | template void IOBuf::_push_or_move_back_ref_to_smallview<false>(const BlockRef&); |
506 | | |
507 | | template <bool MOVE> |
508 | 0 | void IOBuf::_push_or_move_back_ref_to_bigview(const BlockRef& r) { |
509 | 0 | BlockRef& back = _bv.ref_at(_bv.nref - 1); |
510 | 0 | if (back.block == r.block && back.offset + back.length == r.offset) { |
511 | | // Merge ref |
512 | 0 | back.length += r.length; |
513 | 0 | _bv.nbytes += r.length; |
514 | 0 | if (MOVE) { |
515 | 0 | r.block->dec_ref(); |
516 | 0 | } |
517 | 0 | return; |
518 | 0 | } |
519 | | |
520 | 0 | if (_bv.nref != _bv.capacity()) { |
521 | 0 | _bv.ref_at(_bv.nref++) = r; |
522 | 0 | _bv.nbytes += r.length; |
523 | 0 | if (!MOVE) { |
524 | 0 | r.block->inc_ref(); |
525 | 0 | } |
526 | 0 | return; |
527 | 0 | } |
528 | | // resize, don't modify bv until new_refs is fully assigned |
529 | 0 | const uint32_t new_cap = _bv.capacity() * 2; |
530 | 0 | BlockRef* new_refs = iobuf::acquire_blockref_array(new_cap); |
531 | 0 | for (uint32_t i = 0; i < _bv.nref; ++i) { |
532 | 0 | new_refs[i] = _bv.ref_at(i); |
533 | 0 | } |
534 | 0 | new_refs[_bv.nref++] = r; |
535 | | |
536 | | // Change other variables |
537 | 0 | _bv.start = 0; |
538 | 0 | iobuf::release_blockref_array(_bv.refs, _bv.capacity()); |
539 | 0 | _bv.refs = new_refs; |
540 | 0 | _bv.cap_mask = new_cap - 1; |
541 | 0 | _bv.nbytes += r.length; |
542 | 0 | if (!MOVE) { |
543 | 0 | r.block->inc_ref(); |
544 | 0 | } |
545 | 0 | } Unexecuted instantiation: void butil::IOBuf::_push_or_move_back_ref_to_bigview<true>(butil::IOBuf::BlockRef const&) Unexecuted instantiation: void butil::IOBuf::_push_or_move_back_ref_to_bigview<false>(butil::IOBuf::BlockRef const&) |
546 | | // Explicitly initialize templates. |
547 | | template void IOBuf::_push_or_move_back_ref_to_bigview<true>(const BlockRef&); |
548 | | template void IOBuf::_push_or_move_back_ref_to_bigview<false>(const BlockRef&); |
549 | | |
550 | | template <bool MOVEOUT> |
551 | 0 | int IOBuf::_pop_or_moveout_front_ref() { |
552 | 0 | if (_small()) { |
553 | 0 | if (_sv.refs[0].block != NULL) { |
554 | 0 | if (!MOVEOUT) { |
555 | 0 | _sv.refs[0].block->dec_ref(); |
556 | 0 | } |
557 | 0 | _sv.refs[0] = _sv.refs[1]; |
558 | 0 | reset_block_ref(_sv.refs[1]); |
559 | 0 | return 0; |
560 | 0 | } |
561 | 0 | return -1; |
562 | 0 | } else { |
563 | | // _bv.nref must be greater than 2 |
564 | 0 | const uint32_t start = _bv.start; |
565 | 0 | if (!MOVEOUT) { |
566 | 0 | _bv.refs[start].block->dec_ref(); |
567 | 0 | } |
568 | 0 | if (--_bv.nref > 2) { |
569 | 0 | _bv.start = (start + 1) & _bv.cap_mask; |
570 | 0 | _bv.nbytes -= _bv.refs[start].length; |
571 | 0 | } else { // count==2, fall back to SmallView |
572 | 0 | BlockRef* const saved_refs = _bv.refs; |
573 | 0 | const uint32_t saved_cap_mask = _bv.cap_mask; |
574 | 0 | _sv.refs[0] = saved_refs[(start + 1) & saved_cap_mask]; |
575 | 0 | _sv.refs[1] = saved_refs[(start + 2) & saved_cap_mask]; |
576 | 0 | iobuf::release_blockref_array(saved_refs, saved_cap_mask + 1); |
577 | 0 | } |
578 | 0 | return 0; |
579 | 0 | } |
580 | 0 | } Unexecuted instantiation: int butil::IOBuf::_pop_or_moveout_front_ref<true>() Unexecuted instantiation: int butil::IOBuf::_pop_or_moveout_front_ref<false>() |
581 | | // Explicitly initialize templates. |
582 | | template int IOBuf::_pop_or_moveout_front_ref<true>(); |
583 | | template int IOBuf::_pop_or_moveout_front_ref<false>(); |
584 | | |
585 | 0 | int IOBuf::_pop_back_ref() { |
586 | 0 | if (_small()) { |
587 | 0 | if (_sv.refs[1].block != NULL) { |
588 | 0 | _sv.refs[1].block->dec_ref(); |
589 | 0 | reset_block_ref(_sv.refs[1]); |
590 | 0 | return 0; |
591 | 0 | } else if (_sv.refs[0].block != NULL) { |
592 | 0 | _sv.refs[0].block->dec_ref(); |
593 | 0 | reset_block_ref(_sv.refs[0]); |
594 | 0 | return 0; |
595 | 0 | } |
596 | 0 | return -1; |
597 | 0 | } else { |
598 | | // _bv.nref must be greater than 2 |
599 | 0 | const uint32_t start = _bv.start; |
600 | 0 | IOBuf::BlockRef& back = _bv.refs[(start + _bv.nref - 1) & _bv.cap_mask]; |
601 | 0 | back.block->dec_ref(); |
602 | 0 | if (--_bv.nref > 2) { |
603 | 0 | _bv.nbytes -= back.length; |
604 | 0 | } else { // count==2, fall back to SmallView |
605 | 0 | BlockRef* const saved_refs = _bv.refs; |
606 | 0 | const uint32_t saved_cap_mask = _bv.cap_mask; |
607 | 0 | _sv.refs[0] = saved_refs[start]; |
608 | 0 | _sv.refs[1] = saved_refs[(start + 1) & saved_cap_mask]; |
609 | 0 | iobuf::release_blockref_array(saved_refs, saved_cap_mask + 1); |
610 | 0 | } |
611 | 0 | return 0; |
612 | 0 | } |
613 | 0 | } |
614 | | |
615 | 0 | void IOBuf::clear() { |
616 | 0 | if (_small()) { |
617 | 0 | if (_sv.refs[0].block != NULL) { |
618 | 0 | _sv.refs[0].block->dec_ref(); |
619 | 0 | reset_block_ref(_sv.refs[0]); |
620 | | |
621 | 0 | if (_sv.refs[1].block != NULL) { |
622 | 0 | _sv.refs[1].block->dec_ref(); |
623 | 0 | reset_block_ref(_sv.refs[1]); |
624 | 0 | } |
625 | 0 | } |
626 | 0 | } else { |
627 | 0 | for (uint32_t i = 0; i < _bv.nref; ++i) { |
628 | 0 | _bv.ref_at(i).block->dec_ref(); |
629 | 0 | } |
630 | 0 | iobuf::release_blockref_array(_bv.refs, _bv.capacity()); |
631 | 0 | new (this) IOBuf; |
632 | 0 | } |
633 | 0 | } |
634 | | |
635 | 0 | size_t IOBuf::pop_front(size_t n) { |
636 | 0 | const size_t len = length(); |
637 | 0 | if (n >= len) { |
638 | 0 | clear(); |
639 | 0 | return len; |
640 | 0 | } |
641 | 0 | const size_t saved_n = n; |
642 | 0 | while (n) { // length() == 0 does not enter |
643 | 0 | IOBuf::BlockRef &r = _front_ref(); |
644 | 0 | if (r.length > n) { |
645 | 0 | r.offset += n; |
646 | 0 | r.length -= n; |
647 | 0 | if (!_small()) { |
648 | 0 | _bv.nbytes -= n; |
649 | 0 | } |
650 | 0 | return saved_n; |
651 | 0 | } |
652 | 0 | n -= r.length; |
653 | 0 | _pop_front_ref(); |
654 | 0 | } |
655 | 0 | return saved_n; |
656 | 0 | } |
657 | | |
658 | 0 | bool IOBuf::cut1(void* c) { |
659 | 0 | if (empty()) { |
660 | 0 | return false; |
661 | 0 | } |
662 | 0 | IOBuf::BlockRef &r = _front_ref(); |
663 | 0 | *(char*)c = r.block->data[r.offset]; |
664 | 0 | if (r.length > 1) { |
665 | 0 | ++r.offset; |
666 | 0 | --r.length; |
667 | 0 | if (!_small()) { |
668 | 0 | --_bv.nbytes; |
669 | 0 | } |
670 | 0 | } else { |
671 | 0 | _pop_front_ref(); |
672 | 0 | } |
673 | 0 | return true; |
674 | 0 | } |
675 | | |
676 | 0 | size_t IOBuf::pop_back(size_t n) { |
677 | 0 | const size_t len = length(); |
678 | 0 | if (n >= len) { |
679 | 0 | clear(); |
680 | 0 | return len; |
681 | 0 | } |
682 | 0 | const size_t saved_n = n; |
683 | 0 | while (n) { // length() == 0 does not enter |
684 | 0 | IOBuf::BlockRef &r = _back_ref(); |
685 | 0 | if (r.length > n) { |
686 | 0 | r.length -= n; |
687 | 0 | if (!_small()) { |
688 | 0 | _bv.nbytes -= n; |
689 | 0 | } |
690 | 0 | return saved_n; |
691 | 0 | } |
692 | 0 | n -= r.length; |
693 | 0 | _pop_back_ref(); |
694 | 0 | } |
695 | 0 | return saved_n; |
696 | 0 | } |
697 | | |
698 | 0 | size_t IOBuf::cutn(IOBuf* out, size_t n) { |
699 | 0 | const size_t len = length(); |
700 | 0 | if (n > len) { |
701 | 0 | n = len; |
702 | 0 | } |
703 | 0 | const size_t saved_n = n; |
704 | 0 | while (n) { // length() == 0 does not enter |
705 | 0 | IOBuf::BlockRef &r = _front_ref(); |
706 | 0 | if (r.length <= n) { |
707 | 0 | n -= r.length; |
708 | 0 | out->_move_back_ref(r); |
709 | 0 | _moveout_front_ref(); |
710 | 0 | } else { |
711 | 0 | const IOBuf::BlockRef cr = { r.offset, (uint32_t)n, r.block }; |
712 | 0 | out->_push_back_ref(cr); |
713 | | |
714 | 0 | r.offset += n; |
715 | 0 | r.length -= n; |
716 | 0 | if (!_small()) { |
717 | 0 | _bv.nbytes -= n; |
718 | 0 | } |
719 | 0 | return saved_n; |
720 | 0 | } |
721 | 0 | } |
722 | 0 | return saved_n; |
723 | 0 | } |
724 | | |
725 | 0 | size_t IOBuf::cutn(void* out, size_t n) { |
726 | 0 | const size_t len = length(); |
727 | 0 | if (n > len) { |
728 | 0 | n = len; |
729 | 0 | } |
730 | 0 | const size_t saved_n = n; |
731 | 0 | while (n) { // length() == 0 does not enter |
732 | 0 | IOBuf::BlockRef &r = _front_ref(); |
733 | 0 | if (r.length <= n) { |
734 | 0 | iobuf::cp(out, r.block->data + r.offset, r.length); |
735 | 0 | out = (char*)out + r.length; |
736 | 0 | n -= r.length; |
737 | 0 | _pop_front_ref(); |
738 | 0 | } else { |
739 | 0 | iobuf::cp(out, r.block->data + r.offset, n); |
740 | 0 | out = (char*)out + n; |
741 | 0 | r.offset += n; |
742 | 0 | r.length -= n; |
743 | 0 | if (!_small()) { |
744 | 0 | _bv.nbytes -= n; |
745 | 0 | } |
746 | 0 | return saved_n; |
747 | 0 | } |
748 | 0 | } |
749 | 0 | return saved_n; |
750 | 0 | } |
751 | | |
752 | 0 | size_t IOBuf::cutn(std::string* out, size_t n) { |
753 | 0 | if (n == 0) { |
754 | 0 | return 0; |
755 | 0 | } |
756 | 0 | const size_t len = length(); |
757 | 0 | if (n > len) { |
758 | 0 | n = len; |
759 | 0 | } |
760 | 0 | const size_t old_size = out->size(); |
761 | 0 | out->resize(out->size() + n); |
762 | 0 | return cutn(&(*out)[old_size], n); |
763 | 0 | } |
764 | | |
765 | 0 | int IOBuf::_cut_by_char(IOBuf* out, char d) { |
766 | 0 | const size_t nref = _ref_num(); |
767 | 0 | size_t n = 0; |
768 | | |
769 | 0 | for (size_t i = 0; i < nref; ++i) { |
770 | 0 | IOBuf::BlockRef const& r = _ref_at(i); |
771 | 0 | char const* const s = r.block->data + r.offset; |
772 | 0 | for (uint32_t j = 0; j < r.length; ++j, ++n) { |
773 | 0 | if (s[j] == d) { |
774 | | // There's no way cutn/pop_front fails |
775 | 0 | cutn(out, n); |
776 | 0 | pop_front(1); |
777 | 0 | return 0; |
778 | 0 | } |
779 | 0 | } |
780 | 0 | } |
781 | | |
782 | 0 | return -1; |
783 | 0 | } |
784 | | |
785 | 0 | int IOBuf::_cut_by_delim(IOBuf* out, char const* dbegin, size_t ndelim) { |
786 | 0 | typedef unsigned long SigType; |
787 | 0 | const size_t NMAX = sizeof(SigType); |
788 | | |
789 | 0 | if (ndelim > NMAX || ndelim > length()) { |
790 | 0 | return -1; |
791 | 0 | } |
792 | | |
793 | 0 | SigType dsig = 0; |
794 | 0 | for (size_t i = 0; i < ndelim; ++i) { |
795 | 0 | dsig = (dsig << CHAR_BIT) | static_cast<SigType>(dbegin[i]); |
796 | 0 | } |
797 | | |
798 | 0 | const SigType SIGMASK = |
799 | 0 | (ndelim == NMAX ? (SigType)-1 : (((SigType)1 << (ndelim * CHAR_BIT)) - 1)); |
800 | |
|
801 | 0 | const size_t nref = _ref_num(); |
802 | 0 | SigType sig = 0; |
803 | 0 | size_t n = 0; |
804 | |
|
805 | 0 | for (size_t i = 0; i < nref; ++i) { |
806 | 0 | IOBuf::BlockRef const& r = _ref_at(i); |
807 | 0 | char const* const s = r.block->data + r.offset; |
808 | | |
809 | 0 | for (uint32_t j = 0; j < r.length; ++j, ++n) { |
810 | 0 | sig = ((sig << CHAR_BIT) | static_cast<SigType>(s[j])) & SIGMASK; |
811 | 0 | if (sig == dsig) { |
812 | | // There's no way cutn/pop_front fails |
813 | 0 | cutn(out, n + 1 - ndelim); |
814 | 0 | pop_front(ndelim); |
815 | 0 | return 0; |
816 | 0 | } |
817 | 0 | } |
818 | 0 | } |
819 | | |
820 | 0 | return -1; |
821 | 0 | } |
822 | | |
823 | | // Since cut_into_file_descriptor() allocates iovec on stack, IOV_MAX=1024 |
824 | | // is too large(in the worst case) for bthreads with small stacks. |
825 | | static const size_t IOBUF_IOV_MAX = 256; |
826 | | |
827 | 0 | ssize_t IOBuf::pcut_into_file_descriptor(int fd, off_t offset, size_t size_hint) { |
828 | 0 | if (empty()) { |
829 | 0 | return 0; |
830 | 0 | } |
831 | | |
832 | 0 | const size_t nref = std::min(_ref_num(), IOBUF_IOV_MAX); |
833 | 0 | struct iovec vec[nref]; |
834 | 0 | size_t nvec = 0; |
835 | 0 | size_t cur_len = 0; |
836 | |
|
837 | 0 | do { |
838 | 0 | IOBuf::BlockRef const& r = _ref_at(nvec); |
839 | 0 | vec[nvec].iov_base = r.block->data + r.offset; |
840 | 0 | vec[nvec].iov_len = r.length; |
841 | 0 | ++nvec; |
842 | 0 | cur_len += r.length; |
843 | 0 | } while (nvec < nref && cur_len < size_hint); |
844 | |
|
845 | 0 | ssize_t nw = 0; |
846 | |
|
847 | 0 | if (offset >= 0) { |
848 | 0 | static iobuf::iov_function pwritev_func = iobuf::get_pwritev_func(); |
849 | 0 | nw = pwritev_func(fd, vec, nvec, offset); |
850 | 0 | } else { |
851 | 0 | nw = ::writev(fd, vec, nvec); |
852 | 0 | } |
853 | 0 | if (nw > 0) { |
854 | 0 | pop_front(nw); |
855 | 0 | } |
856 | 0 | return nw; |
857 | 0 | } |
858 | | |
859 | 0 | ssize_t IOBuf::cut_into_writer(IWriter* writer, size_t size_hint) { |
860 | 0 | if (empty()) { |
861 | 0 | return 0; |
862 | 0 | } |
863 | 0 | const size_t nref = std::min(_ref_num(), IOBUF_IOV_MAX); |
864 | 0 | struct iovec vec[nref]; |
865 | 0 | size_t nvec = 0; |
866 | 0 | size_t cur_len = 0; |
867 | |
|
868 | 0 | do { |
869 | 0 | IOBuf::BlockRef const& r = _ref_at(nvec); |
870 | 0 | vec[nvec].iov_base = r.block->data + r.offset; |
871 | 0 | vec[nvec].iov_len = r.length; |
872 | 0 | ++nvec; |
873 | 0 | cur_len += r.length; |
874 | 0 | } while (nvec < nref && cur_len < size_hint); |
875 | |
|
876 | 0 | const ssize_t nw = writer->WriteV(vec, nvec); |
877 | 0 | if (nw > 0) { |
878 | 0 | pop_front(nw); |
879 | 0 | } |
880 | 0 | return nw; |
881 | 0 | } |
882 | | |
883 | 0 | ssize_t IOBuf::cut_into_SSL_channel(SSL* ssl, int* ssl_error) { |
884 | 0 | *ssl_error = SSL_ERROR_NONE; |
885 | 0 | if (empty()) { |
886 | 0 | return 0; |
887 | 0 | } |
888 | | |
889 | 0 | IOBuf::BlockRef const& r = _ref_at(0); |
890 | 0 | ERR_clear_error(); |
891 | 0 | const int nw = SSL_write(ssl, r.block->data + r.offset, r.length); |
892 | 0 | if (nw > 0) { |
893 | 0 | pop_front(nw); |
894 | 0 | } |
895 | 0 | *ssl_error = SSL_get_error(ssl, nw); |
896 | 0 | return nw; |
897 | 0 | } |
898 | | |
899 | | ssize_t IOBuf::cut_multiple_into_SSL_channel(SSL* ssl, IOBuf* const* pieces, |
900 | 0 | size_t count, int* ssl_error) { |
901 | 0 | ssize_t nw = 0; |
902 | 0 | *ssl_error = SSL_ERROR_NONE; |
903 | 0 | for (size_t i = 0; i < count; ) { |
904 | 0 | if (pieces[i]->empty()) { |
905 | 0 | ++i; |
906 | 0 | continue; |
907 | 0 | } |
908 | | |
909 | 0 | ssize_t rc = pieces[i]->cut_into_SSL_channel(ssl, ssl_error); |
910 | 0 | if (rc > 0) { |
911 | 0 | nw += rc; |
912 | 0 | } else { |
913 | 0 | if (rc < 0) { |
914 | 0 | if (*ssl_error == SSL_ERROR_WANT_WRITE |
915 | 0 | || (*ssl_error == SSL_ERROR_SYSCALL |
916 | 0 | && BIO_fd_non_fatal_error(errno) == 1)) { |
917 | | // Non fatal error, tell caller to write again |
918 | 0 | *ssl_error = SSL_ERROR_WANT_WRITE; |
919 | 0 | } else { |
920 | | // Other errors are fatal |
921 | 0 | return rc; |
922 | 0 | } |
923 | 0 | } |
924 | 0 | if (nw == 0) { |
925 | 0 | nw = rc; // Nothing written yet, overwrite nw |
926 | 0 | } |
927 | 0 | break; |
928 | 0 | } |
929 | 0 | } |
930 | | |
931 | 0 | #ifndef USE_MESALINK |
932 | | // BIO is disabled for now (see socket.cpp) and the following implementation is |
933 | | // NOT correct since it doesn't handle the EAGAIN event of BIO_flush |
934 | | // BIO* wbio = SSL_get_wbio(ssl); |
935 | | // if (BIO_wpending(wbio) > 0) { |
936 | | // int rc = BIO_flush(wbio); |
937 | | // if (rc <= 0 && BIO_fd_non_fatal_error(errno) == 0) { |
938 | | // // Fatal error during BIO_flush |
939 | | // *ssl_error = SSL_ERROR_SYSCALL; |
940 | | // return rc; |
941 | | // } |
942 | | // } |
943 | | #else |
944 | | int rc = SSL_flush(ssl); |
945 | | if (rc <= 0) { |
946 | | *ssl_error = SSL_ERROR_SYSCALL; |
947 | | return rc; |
948 | | } |
949 | | #endif |
950 | | |
951 | 0 | return nw; |
952 | 0 | } |
953 | | |
954 | | ssize_t IOBuf::pcut_multiple_into_file_descriptor( |
955 | 0 | int fd, off_t offset, IOBuf* const* pieces, size_t count) { |
956 | 0 | if (BAIDU_UNLIKELY(count == 0)) { |
957 | 0 | return 0; |
958 | 0 | } |
959 | 0 | if (1UL == count) { |
960 | 0 | return pieces[0]->pcut_into_file_descriptor(fd, offset); |
961 | 0 | } |
962 | 0 | struct iovec vec[IOBUF_IOV_MAX]; |
963 | 0 | size_t nvec = 0; |
964 | 0 | for (size_t i = 0; i < count; ++i) { |
965 | 0 | const IOBuf* p = pieces[i]; |
966 | 0 | const size_t nref = p->_ref_num(); |
967 | 0 | for (size_t j = 0; j < nref && nvec < IOBUF_IOV_MAX; ++j, ++nvec) { |
968 | 0 | IOBuf::BlockRef const& r = p->_ref_at(j); |
969 | 0 | vec[nvec].iov_base = r.block->data + r.offset; |
970 | 0 | vec[nvec].iov_len = r.length; |
971 | 0 | } |
972 | 0 | } |
973 | |
|
974 | 0 | ssize_t nw = 0; |
975 | 0 | if (offset >= 0) { |
976 | 0 | static iobuf::iov_function pwritev_func = iobuf::get_pwritev_func(); |
977 | 0 | nw = pwritev_func(fd, vec, nvec, offset); |
978 | 0 | } else { |
979 | 0 | nw = ::writev(fd, vec, nvec); |
980 | 0 | } |
981 | 0 | if (nw <= 0) { |
982 | 0 | return nw; |
983 | 0 | } |
984 | 0 | size_t npop_all = nw; |
985 | 0 | for (size_t i = 0; i < count; ++i) { |
986 | 0 | npop_all -= pieces[i]->pop_front(npop_all); |
987 | 0 | if (npop_all == 0) { |
988 | 0 | break; |
989 | 0 | } |
990 | 0 | } |
991 | 0 | return nw; |
992 | 0 | } |
993 | | |
994 | | ssize_t IOBuf::cut_multiple_into_writer( |
995 | 0 | IWriter* writer, IOBuf* const* pieces, size_t count) { |
996 | 0 | if (BAIDU_UNLIKELY(count == 0)) { |
997 | 0 | return 0; |
998 | 0 | } |
999 | 0 | if (1UL == count) { |
1000 | 0 | return pieces[0]->cut_into_writer(writer); |
1001 | 0 | } |
1002 | 0 | struct iovec vec[IOBUF_IOV_MAX]; |
1003 | 0 | size_t nvec = 0; |
1004 | 0 | for (size_t i = 0; i < count; ++i) { |
1005 | 0 | const IOBuf* p = pieces[i]; |
1006 | 0 | const size_t nref = p->_ref_num(); |
1007 | 0 | for (size_t j = 0; j < nref && nvec < IOBUF_IOV_MAX; ++j, ++nvec) { |
1008 | 0 | IOBuf::BlockRef const& r = p->_ref_at(j); |
1009 | 0 | vec[nvec].iov_base = r.block->data + r.offset; |
1010 | 0 | vec[nvec].iov_len = r.length; |
1011 | 0 | } |
1012 | 0 | } |
1013 | |
|
1014 | 0 | const ssize_t nw = writer->WriteV(vec, nvec); |
1015 | 0 | if (nw <= 0) { |
1016 | 0 | return nw; |
1017 | 0 | } |
1018 | 0 | size_t npop_all = nw; |
1019 | 0 | for (size_t i = 0; i < count; ++i) { |
1020 | 0 | npop_all -= pieces[i]->pop_front(npop_all); |
1021 | 0 | if (npop_all == 0) { |
1022 | 0 | break; |
1023 | 0 | } |
1024 | 0 | } |
1025 | 0 | return nw; |
1026 | 0 | } |
1027 | | |
1028 | | |
1029 | 0 | void IOBuf::append(const IOBuf& other) { |
1030 | 0 | const size_t nref = other._ref_num(); |
1031 | 0 | for (size_t i = 0; i < nref; ++i) { |
1032 | 0 | _push_back_ref(other._ref_at(i)); |
1033 | 0 | } |
1034 | 0 | } |
1035 | | |
1036 | 0 | void IOBuf::append(const Movable& movable_other) { |
1037 | 0 | if (empty()) { |
1038 | 0 | swap(movable_other.value()); |
1039 | 0 | } else { |
1040 | 0 | butil::IOBuf& other = movable_other.value(); |
1041 | 0 | const size_t nref = other._ref_num(); |
1042 | 0 | for (size_t i = 0; i < nref; ++i) { |
1043 | 0 | _move_back_ref(other._ref_at(i)); |
1044 | 0 | } |
1045 | 0 | if (!other._small()) { |
1046 | 0 | iobuf::release_blockref_array(other._bv.refs, other._bv.capacity()); |
1047 | 0 | } |
1048 | 0 | new (&other) IOBuf; |
1049 | 0 | } |
1050 | 0 | } |
1051 | | |
1052 | 0 | int IOBuf::push_back(char c) { |
1053 | 0 | IOBuf::Block* b = iobuf::share_tls_block(); |
1054 | 0 | if (BAIDU_UNLIKELY(!b)) { |
1055 | 0 | return -1; |
1056 | 0 | } |
1057 | 0 | b->data[b->size] = c; |
1058 | 0 | const IOBuf::BlockRef r = { b->size, 1, b }; |
1059 | 0 | ++b->size; |
1060 | 0 | _push_back_ref(r); |
1061 | 0 | return 0; |
1062 | 0 | } |
1063 | | |
1064 | 0 | int IOBuf::append(char const* s) { |
1065 | 0 | if (BAIDU_LIKELY(s != NULL)) { |
1066 | 0 | return append(s, strlen(s)); |
1067 | 0 | } |
1068 | 0 | return -1; |
1069 | 0 | } |
1070 | | |
1071 | 0 | int IOBuf::append(void const* data, size_t count) { |
1072 | 0 | if (BAIDU_UNLIKELY(!data)) { |
1073 | 0 | return -1; |
1074 | 0 | } |
1075 | 0 | if (count == 1) { |
1076 | 0 | return push_back(*((char const*)data)); |
1077 | 0 | } |
1078 | 0 | size_t total_nc = 0; |
1079 | 0 | while (total_nc < count) { // excluded count == 0 |
1080 | 0 | IOBuf::Block* b = iobuf::share_tls_block(); |
1081 | 0 | if (BAIDU_UNLIKELY(!b)) { |
1082 | 0 | return -1; |
1083 | 0 | } |
1084 | 0 | const size_t nc = std::min(count - total_nc, b->left_space()); |
1085 | 0 | iobuf::cp(b->data + b->size, (char*)data + total_nc, nc); |
1086 | | |
1087 | 0 | const IOBuf::BlockRef r = { (uint32_t)b->size, (uint32_t)nc, b }; |
1088 | 0 | _push_back_ref(r); |
1089 | 0 | b->size += nc; |
1090 | 0 | total_nc += nc; |
1091 | 0 | } |
1092 | 0 | return 0; |
1093 | 0 | } |
1094 | | |
1095 | 0 | int IOBuf::appendv(const const_iovec* vec, size_t n) { |
1096 | 0 | size_t offset = 0; |
1097 | 0 | for (size_t i = 0; i < n;) { |
1098 | 0 | IOBuf::Block* b = iobuf::share_tls_block(); |
1099 | 0 | if (BAIDU_UNLIKELY(!b)) { |
1100 | 0 | return -1; |
1101 | 0 | } |
1102 | 0 | uint32_t total_cp = 0; |
1103 | 0 | for (; i < n; ++i, offset = 0) { |
1104 | 0 | const const_iovec & vec_i = vec[i]; |
1105 | 0 | const size_t nc = std::min(vec_i.iov_len - offset, b->left_space() - total_cp); |
1106 | 0 | iobuf::cp(b->data + b->size + total_cp, (char*)vec_i.iov_base + offset, nc); |
1107 | 0 | total_cp += nc; |
1108 | 0 | offset += nc; |
1109 | 0 | if (offset != vec_i.iov_len) { |
1110 | 0 | break; |
1111 | 0 | } |
1112 | 0 | } |
1113 | | |
1114 | 0 | const IOBuf::BlockRef r = { (uint32_t)b->size, total_cp, b }; |
1115 | 0 | b->size += total_cp; |
1116 | 0 | _push_back_ref(r); |
1117 | 0 | } |
1118 | 0 | return 0; |
1119 | 0 | } |
1120 | | |
1121 | | int IOBuf::append_user_data_with_meta(void* data, |
1122 | | size_t size, |
1123 | | std::function<void(void*)> deleter, |
1124 | 0 | uint64_t meta) { |
1125 | 0 | if (size > 0xFFFFFFFFULL - 100) { |
1126 | 0 | LOG(FATAL) << "data_size=" << size << " is too large"; |
1127 | 0 | return -1; |
1128 | 0 | } |
1129 | 0 | if (!deleter) { |
1130 | 0 | deleter = ::free; |
1131 | 0 | } |
1132 | 0 | if (!size) { |
1133 | 0 | deleter(data); |
1134 | 0 | return 0; |
1135 | 0 | } |
1136 | 0 | char* mem = (char*)malloc(sizeof(IOBuf::Block) + sizeof(UserDataExtension)); |
1137 | 0 | if (mem == NULL) { |
1138 | 0 | return -1; |
1139 | 0 | } |
1140 | 0 | IOBuf::Block* b = new (mem) IOBuf::Block((char*)data, size, std::move(deleter)); |
1141 | 0 | b->u.data_meta = meta; |
1142 | 0 | const IOBuf::BlockRef r = { 0, b->cap, b }; |
1143 | 0 | _move_back_ref(r); |
1144 | 0 | return 0; |
1145 | 0 | } |
1146 | | |
1147 | 0 | uint64_t IOBuf::get_first_data_meta() { |
1148 | 0 | if (_ref_num() == 0) { |
1149 | 0 | return 0; |
1150 | 0 | } |
1151 | 0 | IOBuf::BlockRef const& r = _ref_at(0); |
1152 | 0 | if (!(r.block->flags & IOBUF_BLOCK_FLAGS_USER_DATA)) { |
1153 | 0 | return 0; |
1154 | 0 | } |
1155 | 0 | return r.block->u.data_meta; |
1156 | 0 | } |
1157 | | |
1158 | 0 | int IOBuf::resize(size_t n, char c) { |
1159 | 0 | const size_t saved_len = length(); |
1160 | 0 | if (n < saved_len) { |
1161 | 0 | pop_back(saved_len - n); |
1162 | 0 | return 0; |
1163 | 0 | } |
1164 | 0 | const size_t count = n - saved_len; |
1165 | 0 | size_t total_nc = 0; |
1166 | 0 | while (total_nc < count) { // excluded count == 0 |
1167 | 0 | IOBuf::Block* b = iobuf::share_tls_block(); |
1168 | 0 | if (BAIDU_UNLIKELY(!b)) { |
1169 | 0 | return -1; |
1170 | 0 | } |
1171 | 0 | const size_t nc = std::min(count - total_nc, b->left_space()); |
1172 | 0 | memset(b->data + b->size, c, nc); |
1173 | | |
1174 | 0 | const IOBuf::BlockRef r = { (uint32_t)b->size, (uint32_t)nc, b }; |
1175 | 0 | _push_back_ref(r); |
1176 | 0 | b->size += nc; |
1177 | 0 | total_nc += nc; |
1178 | 0 | } |
1179 | 0 | return 0; |
1180 | 0 | } |
1181 | | |
1182 | | // NOTE: We don't use C++ bitwise fields which make copying slower. |
1183 | | static const int REF_INDEX_BITS = 19; |
1184 | | static const int REF_OFFSET_BITS = 15; |
1185 | | static const int AREA_SIZE_BITS = 30; |
1186 | | static const uint32_t MAX_REF_INDEX = (((uint32_t)1) << REF_INDEX_BITS) - 1; |
1187 | | static const uint32_t MAX_REF_OFFSET = (((uint32_t)1) << REF_OFFSET_BITS) - 1; |
1188 | | static const uint32_t MAX_AREA_SIZE = (((uint32_t)1) << AREA_SIZE_BITS) - 1; |
1189 | | |
1190 | | inline IOBuf::Area make_area(uint32_t ref_index, uint32_t ref_offset, |
1191 | 0 | uint32_t size) { |
1192 | 0 | if (ref_index > MAX_REF_INDEX || |
1193 | 0 | ref_offset > MAX_REF_OFFSET || |
1194 | 0 | size > MAX_AREA_SIZE) { |
1195 | 0 | LOG(ERROR) << "Too big parameters!"; |
1196 | 0 | return IOBuf::INVALID_AREA; |
1197 | 0 | } |
1198 | 0 | return (((uint64_t)ref_index) << (REF_OFFSET_BITS + AREA_SIZE_BITS)) |
1199 | 0 | | (((uint64_t)ref_offset) << AREA_SIZE_BITS) |
1200 | 0 | | size; |
1201 | 0 | } |
1202 | 0 | inline uint32_t get_area_ref_index(IOBuf::Area c) { |
1203 | 0 | return (c >> (REF_OFFSET_BITS + AREA_SIZE_BITS)) & MAX_REF_INDEX; |
1204 | 0 | } |
1205 | 0 | inline uint32_t get_area_ref_offset(IOBuf::Area c) { |
1206 | 0 | return (c >> AREA_SIZE_BITS) & MAX_REF_OFFSET; |
1207 | 0 | } |
1208 | 0 | inline uint32_t get_area_size(IOBuf::Area c) { |
1209 | 0 | return (c & MAX_AREA_SIZE); |
1210 | 0 | } |
1211 | | |
1212 | 0 | IOBuf::Area IOBuf::reserve(size_t count) { |
1213 | 0 | IOBuf::Area result = INVALID_AREA; |
1214 | 0 | size_t total_nc = 0; |
1215 | 0 | while (total_nc < count) { // excluded count == 0 |
1216 | 0 | IOBuf::Block* b = iobuf::share_tls_block(); |
1217 | 0 | if (BAIDU_UNLIKELY(!b)) { |
1218 | 0 | return INVALID_AREA; |
1219 | 0 | } |
1220 | 0 | const size_t nc = std::min(count - total_nc, b->left_space()); |
1221 | 0 | const IOBuf::BlockRef r = { (uint32_t)b->size, (uint32_t)nc, b }; |
1222 | 0 | _push_back_ref(r); |
1223 | 0 | if (total_nc == 0) { |
1224 | | // Encode the area at first time. Notice that the pushed ref may |
1225 | | // be merged with existing ones. |
1226 | 0 | result = make_area(_ref_num() - 1, _back_ref().length - nc, count); |
1227 | 0 | } |
1228 | 0 | total_nc += nc; |
1229 | 0 | b->size += nc; |
1230 | 0 | } |
1231 | 0 | return result; |
1232 | 0 | } |
1233 | | |
1234 | 0 | int IOBuf::unsafe_assign(Area area, const void* data) { |
1235 | 0 | if (area == INVALID_AREA || data == NULL) { |
1236 | 0 | LOG(ERROR) << "Invalid parameters"; |
1237 | 0 | return -1; |
1238 | 0 | } |
1239 | 0 | const uint32_t ref_index = get_area_ref_index(area); |
1240 | 0 | uint32_t ref_offset = get_area_ref_offset(area); |
1241 | 0 | uint32_t length = get_area_size(area); |
1242 | 0 | const size_t nref = _ref_num(); |
1243 | 0 | for (size_t i = ref_index; i < nref; ++i) { |
1244 | 0 | IOBuf::BlockRef& r = _ref_at(i); |
1245 | | // NOTE: we can't check if the block is shared with another IOBuf or |
1246 | | // not since even a single IOBuf may reference a block multiple times |
1247 | | // (by different BlockRef-s) |
1248 | | |
1249 | 0 | const size_t nc = std::min(length, r.length - ref_offset); |
1250 | 0 | iobuf::cp(r.block->data + r.offset + ref_offset, data, nc); |
1251 | 0 | if (length == nc) { |
1252 | 0 | return 0; |
1253 | 0 | } |
1254 | 0 | ref_offset = 0; |
1255 | 0 | length -= nc; |
1256 | 0 | data = (char*)data + nc; |
1257 | 0 | } |
1258 | | |
1259 | | // Use check because we need to see the stack here. |
1260 | 0 | CHECK(false) << "IOBuf(" << size() << ", nref=" << _ref_num() |
1261 | 0 | << ") is shorter than what we reserved(" |
1262 | 0 | << "ref=" << get_area_ref_index(area) |
1263 | 0 | << " off=" << get_area_ref_offset(area) |
1264 | 0 | << " size=" << get_area_size(area) |
1265 | 0 | << "), this assignment probably corrupted something..."; |
1266 | 0 | return -1; |
1267 | 0 | } |
1268 | | |
1269 | 0 | size_t IOBuf::append_to(IOBuf* buf, size_t n, size_t pos) const { |
1270 | 0 | const size_t nref = _ref_num(); |
1271 | | // Skip `pos' bytes. `offset' is the starting position in starting BlockRef. |
1272 | 0 | size_t offset = pos; |
1273 | 0 | size_t i = 0; |
1274 | 0 | for (; offset != 0 && i < nref; ++i) { |
1275 | 0 | IOBuf::BlockRef const& r = _ref_at(i); |
1276 | 0 | if (offset < (size_t)r.length) { |
1277 | 0 | break; |
1278 | 0 | } |
1279 | 0 | offset -= r.length; |
1280 | 0 | } |
1281 | 0 | size_t m = n; |
1282 | 0 | for (; m != 0 && i < nref; ++i) { |
1283 | 0 | IOBuf::BlockRef const& r = _ref_at(i); |
1284 | 0 | const size_t nc = std::min(m, (size_t)r.length - offset); |
1285 | 0 | const IOBuf::BlockRef r2 = { (uint32_t)(r.offset + offset), |
1286 | 0 | (uint32_t)nc, r.block }; |
1287 | 0 | buf->_push_back_ref(r2); |
1288 | 0 | offset = 0; |
1289 | 0 | m -= nc; |
1290 | 0 | } |
1291 | | // If nref == 0, here returns 0 correctly |
1292 | 0 | return n - m; |
1293 | 0 | } |
1294 | | |
1295 | 0 | size_t IOBuf::copy_to(void* d, size_t n, size_t pos) const { |
1296 | 0 | const size_t nref = _ref_num(); |
1297 | | // Skip `pos' bytes. `offset' is the starting position in starting BlockRef. |
1298 | 0 | size_t offset = pos; |
1299 | 0 | size_t i = 0; |
1300 | 0 | for (; offset != 0 && i < nref; ++i) { |
1301 | 0 | IOBuf::BlockRef const& r = _ref_at(i); |
1302 | 0 | if (offset < (size_t)r.length) { |
1303 | 0 | break; |
1304 | 0 | } |
1305 | 0 | offset -= r.length; |
1306 | 0 | } |
1307 | 0 | size_t m = n; |
1308 | 0 | for (; m != 0 && i < nref; ++i) { |
1309 | 0 | IOBuf::BlockRef const& r = _ref_at(i); |
1310 | 0 | const size_t nc = std::min(m, (size_t)r.length - offset); |
1311 | 0 | iobuf::cp(d, r.block->data + r.offset + offset, nc); |
1312 | 0 | offset = 0; |
1313 | 0 | d = (char*)d + nc; |
1314 | 0 | m -= nc; |
1315 | 0 | } |
1316 | | // If nref == 0, here returns 0 correctly |
1317 | 0 | return n - m; |
1318 | 0 | } |
1319 | | |
1320 | 0 | size_t IOBuf::copy_to(std::string* s, size_t n, size_t pos) const { |
1321 | 0 | const size_t len = length(); |
1322 | 0 | if (len <= pos) { |
1323 | 0 | return 0; |
1324 | 0 | } |
1325 | 0 | if (n > len - pos) { // note: n + pos may overflow |
1326 | 0 | n = len - pos; |
1327 | 0 | } |
1328 | 0 | s->resize(n); |
1329 | 0 | return copy_to(&(*s)[0], n, pos); |
1330 | 0 | } |
1331 | | |
1332 | 0 | size_t IOBuf::append_to(std::string* s, size_t n, size_t pos) const { |
1333 | 0 | const size_t len = length(); |
1334 | 0 | if (len <= pos) { |
1335 | 0 | return 0; |
1336 | 0 | } |
1337 | 0 | if (n > len - pos) { // note: n + pos may overflow |
1338 | 0 | n = len - pos; |
1339 | 0 | } |
1340 | 0 | const size_t old_size = s->size(); |
1341 | 0 | s->resize(old_size + n); |
1342 | 0 | return copy_to(&(*s)[old_size], n, pos); |
1343 | 0 | } |
1344 | | |
1345 | | |
1346 | 0 | size_t IOBuf::copy_to_cstr(char* s, size_t n, size_t pos) const { |
1347 | 0 | const size_t nc = copy_to(s, n, pos); |
1348 | 0 | s[nc] = '\0'; |
1349 | 0 | return nc; |
1350 | 0 | } |
1351 | | |
1352 | 0 | void const* IOBuf::fetch(void* d, size_t n) const { |
1353 | 0 | if (n <= length()) { |
1354 | 0 | IOBuf::BlockRef const& r0 = _ref_at(0); |
1355 | 0 | if (n <= r0.length) { |
1356 | 0 | return r0.block->data + r0.offset; |
1357 | 0 | } |
1358 | | |
1359 | 0 | iobuf::cp(d, r0.block->data + r0.offset, r0.length); |
1360 | 0 | size_t total_nc = r0.length; |
1361 | 0 | const size_t nref = _ref_num(); |
1362 | 0 | for (size_t i = 1; i < nref; ++i) { |
1363 | 0 | IOBuf::BlockRef const& r = _ref_at(i); |
1364 | 0 | if (n <= r.length + total_nc) { |
1365 | 0 | iobuf::cp((char*)d + total_nc, |
1366 | 0 | r.block->data + r.offset, n - total_nc); |
1367 | 0 | return d; |
1368 | 0 | } |
1369 | 0 | iobuf::cp((char*)d + total_nc, r.block->data + r.offset, r.length); |
1370 | 0 | total_nc += r.length; |
1371 | 0 | } |
1372 | 0 | } |
1373 | 0 | return NULL; |
1374 | 0 | } |
1375 | | |
1376 | 0 | const void* IOBuf::fetch1() const { |
1377 | 0 | if (!empty()) { |
1378 | 0 | const IOBuf::BlockRef& r0 = _front_ref(); |
1379 | 0 | return r0.block->data + r0.offset; |
1380 | 0 | } |
1381 | 0 | return NULL; |
1382 | 0 | } |
1383 | | |
1384 | 0 | std::ostream& operator<<(std::ostream& os, const IOBuf& buf) { |
1385 | 0 | const size_t n = buf.backing_block_num(); |
1386 | 0 | for (size_t i = 0; i < n; ++i) { |
1387 | 0 | StringPiece blk = buf.backing_block(i); |
1388 | 0 | os.write(blk.data(), blk.size()); |
1389 | 0 | } |
1390 | 0 | return os; |
1391 | 0 | } |
1392 | | |
1393 | 0 | bool IOBuf::equals(const butil::StringPiece& s) const { |
1394 | 0 | if (size() != s.size()) { |
1395 | 0 | return false; |
1396 | 0 | } |
1397 | 0 | const size_t nref = _ref_num(); |
1398 | 0 | size_t soff = 0; |
1399 | 0 | for (size_t i = 0; i < nref; ++i) { |
1400 | 0 | const BlockRef& r = _ref_at(i); |
1401 | 0 | if (memcmp(r.block->data + r.offset, s.data() + soff, r.length) != 0) { |
1402 | 0 | return false; |
1403 | 0 | } |
1404 | 0 | soff += r.length; |
1405 | 0 | } |
1406 | 0 | return true; |
1407 | 0 | } |
1408 | | |
1409 | 0 | StringPiece IOBuf::backing_block(size_t i) const { |
1410 | 0 | if (i < _ref_num()) { |
1411 | 0 | const BlockRef& r = _ref_at(i); |
1412 | 0 | return StringPiece(r.block->data + r.offset, r.length); |
1413 | 0 | } |
1414 | 0 | return StringPiece(); |
1415 | 0 | } |
1416 | | |
1417 | 0 | bool IOBuf::equals(const butil::IOBuf& other) const { |
1418 | 0 | const size_t sz1 = size(); |
1419 | 0 | if (sz1 != other.size()) { |
1420 | 0 | return false; |
1421 | 0 | } |
1422 | 0 | if (!sz1) { |
1423 | 0 | return true; |
1424 | 0 | } |
1425 | 0 | const BlockRef& r1 = _ref_at(0); |
1426 | 0 | const char* d1 = r1.block->data + r1.offset; |
1427 | 0 | size_t len1 = r1.length; |
1428 | 0 | const BlockRef& r2 = other._ref_at(0); |
1429 | 0 | const char* d2 = r2.block->data + r2.offset; |
1430 | 0 | size_t len2 = r2.length; |
1431 | 0 | const size_t nref1 = _ref_num(); |
1432 | 0 | const size_t nref2 = other._ref_num(); |
1433 | 0 | size_t i = 1; |
1434 | 0 | size_t j = 1; |
1435 | 0 | do { |
1436 | 0 | const size_t cmplen = std::min(len1, len2); |
1437 | 0 | if (memcmp(d1, d2, cmplen) != 0) { |
1438 | 0 | return false; |
1439 | 0 | } |
1440 | 0 | len1 -= cmplen; |
1441 | 0 | if (!len1) { |
1442 | 0 | if (i >= nref1) { |
1443 | 0 | return true; |
1444 | 0 | } |
1445 | 0 | const BlockRef& r = _ref_at(i++); |
1446 | 0 | d1 = r.block->data + r.offset; |
1447 | 0 | len1 = r.length; |
1448 | 0 | } else { |
1449 | 0 | d1 += cmplen; |
1450 | 0 | } |
1451 | 0 | len2 -= cmplen; |
1452 | 0 | if (!len2) { |
1453 | 0 | if (j >= nref2) { |
1454 | 0 | return true; |
1455 | 0 | } |
1456 | 0 | const BlockRef& r = other._ref_at(j++); |
1457 | 0 | d2 = r.block->data + r.offset; |
1458 | 0 | len2 = r.length; |
1459 | 0 | } else { |
1460 | 0 | d2 += cmplen; |
1461 | 0 | } |
1462 | 0 | } while (true); |
1463 | 0 | return true; |
1464 | 0 | } |
1465 | | |
1466 | | ////////////////////////////// IOPortal ////////////////// |
1467 | 0 | IOPortal::~IOPortal() { return_cached_blocks(); } |
1468 | | |
1469 | 0 | IOPortal& IOPortal::operator=(const IOPortal& rhs) { |
1470 | 0 | IOBuf::operator=(rhs); |
1471 | 0 | return *this; |
1472 | 0 | } |
1473 | | |
1474 | 0 | void IOPortal::clear() { |
1475 | 0 | IOBuf::clear(); |
1476 | 0 | return_cached_blocks(); |
1477 | 0 | } |
1478 | | |
1479 | | const int MAX_APPEND_IOVEC = 64; |
1480 | | |
1481 | | ssize_t IOPortal::pappend_from_file_descriptor( |
1482 | 0 | int fd, off_t offset, size_t max_count) { |
1483 | 0 | iovec vec[MAX_APPEND_IOVEC]; |
1484 | 0 | int nvec = 0; |
1485 | 0 | size_t space = 0; |
1486 | 0 | Block* prev_p = NULL; |
1487 | 0 | Block* p = _block; |
1488 | | // Prepare at most MAX_APPEND_IOVEC blocks or space of blocks >= max_count |
1489 | 0 | do { |
1490 | 0 | if (p == NULL) { |
1491 | 0 | p = iobuf::acquire_tls_block(); |
1492 | 0 | if (BAIDU_UNLIKELY(!p)) { |
1493 | 0 | errno = ENOMEM; |
1494 | 0 | return -1; |
1495 | 0 | } |
1496 | 0 | if (prev_p != NULL) { |
1497 | 0 | prev_p->u.portal_next = p; |
1498 | 0 | } else { |
1499 | 0 | _block = p; |
1500 | 0 | } |
1501 | 0 | } |
1502 | 0 | vec[nvec].iov_base = p->data + p->size; |
1503 | 0 | vec[nvec].iov_len = std::min(p->left_space(), max_count - space); |
1504 | 0 | space += vec[nvec].iov_len; |
1505 | 0 | ++nvec; |
1506 | 0 | if (space >= max_count || nvec >= MAX_APPEND_IOVEC) { |
1507 | 0 | break; |
1508 | 0 | } |
1509 | 0 | prev_p = p; |
1510 | 0 | p = p->u.portal_next; |
1511 | 0 | } while (1); |
1512 | | |
1513 | 0 | ssize_t nr = 0; |
1514 | 0 | if (offset < 0) { |
1515 | 0 | nr = readv(fd, vec, nvec); |
1516 | 0 | } else { |
1517 | 0 | static iobuf::iov_function preadv_func = iobuf::get_preadv_func(); |
1518 | 0 | nr = preadv_func(fd, vec, nvec, offset); |
1519 | 0 | } |
1520 | 0 | if (nr <= 0) { // -1 or 0 |
1521 | 0 | if (empty()) { |
1522 | 0 | return_cached_blocks(); |
1523 | 0 | } |
1524 | 0 | return nr; |
1525 | 0 | } |
1526 | | |
1527 | 0 | size_t total_len = nr; |
1528 | 0 | do { |
1529 | 0 | const size_t len = std::min(total_len, _block->left_space()); |
1530 | 0 | total_len -= len; |
1531 | 0 | const IOBuf::BlockRef r = { _block->size, (uint32_t)len, _block }; |
1532 | 0 | _push_back_ref(r); |
1533 | 0 | _block->size += len; |
1534 | 0 | if (_block->full()) { |
1535 | 0 | Block* const saved_next = _block->u.portal_next; |
1536 | 0 | _block->dec_ref(); // _block may be deleted |
1537 | 0 | _block = saved_next; |
1538 | 0 | } |
1539 | 0 | } while (total_len); |
1540 | 0 | return nr; |
1541 | 0 | } |
1542 | | |
1543 | 0 | ssize_t IOPortal::append_from_reader(IReader* reader, size_t max_count) { |
1544 | 0 | iovec vec[MAX_APPEND_IOVEC]; |
1545 | 0 | int nvec = 0; |
1546 | 0 | size_t space = 0; |
1547 | 0 | Block* prev_p = NULL; |
1548 | 0 | Block* p = _block; |
1549 | | // Prepare at most MAX_APPEND_IOVEC blocks or space of blocks >= max_count |
1550 | 0 | do { |
1551 | 0 | if (p == NULL) { |
1552 | 0 | p = iobuf::acquire_tls_block(); |
1553 | 0 | if (BAIDU_UNLIKELY(!p)) { |
1554 | 0 | errno = ENOMEM; |
1555 | 0 | return -1; |
1556 | 0 | } |
1557 | 0 | if (prev_p != NULL) { |
1558 | 0 | prev_p->u.portal_next = p; |
1559 | 0 | } else { |
1560 | 0 | _block = p; |
1561 | 0 | } |
1562 | 0 | } |
1563 | 0 | vec[nvec].iov_base = p->data + p->size; |
1564 | 0 | vec[nvec].iov_len = std::min(p->left_space(), max_count - space); |
1565 | 0 | space += vec[nvec].iov_len; |
1566 | 0 | ++nvec; |
1567 | 0 | if (space >= max_count || nvec >= MAX_APPEND_IOVEC) { |
1568 | 0 | break; |
1569 | 0 | } |
1570 | 0 | prev_p = p; |
1571 | 0 | p = p->u.portal_next; |
1572 | 0 | } while (1); |
1573 | | |
1574 | 0 | const ssize_t nr = reader->ReadV(vec, nvec); |
1575 | 0 | if (nr <= 0) { // -1 or 0 |
1576 | 0 | if (empty()) { |
1577 | 0 | return_cached_blocks(); |
1578 | 0 | } |
1579 | 0 | return nr; |
1580 | 0 | } |
1581 | | |
1582 | 0 | size_t total_len = nr; |
1583 | 0 | do { |
1584 | 0 | const size_t len = std::min(total_len, _block->left_space()); |
1585 | 0 | total_len -= len; |
1586 | 0 | const IOBuf::BlockRef r = { _block->size, (uint32_t)len, _block }; |
1587 | 0 | _push_back_ref(r); |
1588 | 0 | _block->size += len; |
1589 | 0 | if (_block->full()) { |
1590 | 0 | Block* const saved_next = _block->u.portal_next; |
1591 | 0 | _block->dec_ref(); // _block may be deleted |
1592 | 0 | _block = saved_next; |
1593 | 0 | } |
1594 | 0 | } while (total_len); |
1595 | 0 | return nr; |
1596 | 0 | } |
1597 | | |
1598 | | |
1599 | | ssize_t IOPortal::append_from_SSL_channel( |
1600 | 0 | SSL* ssl, int* ssl_error, size_t max_count) { |
1601 | 0 | size_t nr = 0; |
1602 | 0 | do { |
1603 | 0 | if (!_block) { |
1604 | 0 | _block = iobuf::acquire_tls_block(); |
1605 | 0 | if (BAIDU_UNLIKELY(!_block)) { |
1606 | 0 | errno = ENOMEM; |
1607 | 0 | *ssl_error = SSL_ERROR_SYSCALL; |
1608 | 0 | return -1; |
1609 | 0 | } |
1610 | 0 | } |
1611 | | |
1612 | 0 | const size_t read_len = std::min(_block->left_space(), max_count - nr); |
1613 | 0 | ERR_clear_error(); |
1614 | 0 | const int rc = SSL_read(ssl, _block->data + _block->size, read_len); |
1615 | 0 | *ssl_error = SSL_get_error(ssl, rc); |
1616 | 0 | if (rc > 0) { |
1617 | 0 | const IOBuf::BlockRef r = { (uint32_t)_block->size, (uint32_t)rc, _block }; |
1618 | 0 | _push_back_ref(r); |
1619 | 0 | _block->size += rc; |
1620 | 0 | if (_block->full()) { |
1621 | 0 | Block* const saved_next = _block->u.portal_next; |
1622 | 0 | _block->dec_ref(); // _block may be deleted |
1623 | 0 | _block = saved_next; |
1624 | 0 | } |
1625 | 0 | nr += rc; |
1626 | 0 | } else { |
1627 | 0 | if (rc < 0) { |
1628 | 0 | if (*ssl_error == SSL_ERROR_WANT_READ |
1629 | 0 | || (*ssl_error == SSL_ERROR_SYSCALL |
1630 | 0 | && BIO_fd_non_fatal_error(errno) == 1)) { |
1631 | | // Non fatal error, tell caller to read again |
1632 | 0 | *ssl_error = SSL_ERROR_WANT_READ; |
1633 | 0 | } else { |
1634 | | // Other errors are fatal |
1635 | 0 | return rc; |
1636 | 0 | } |
1637 | 0 | } |
1638 | 0 | return (nr > 0 ? nr : rc); |
1639 | 0 | } |
1640 | 0 | } while (nr < max_count); |
1641 | 0 | return nr; |
1642 | 0 | } |
1643 | | |
1644 | 0 | void IOPortal::return_cached_blocks_impl(Block* b) { |
1645 | 0 | iobuf::release_tls_block_chain(b); |
1646 | 0 | } |
1647 | | |
1648 | 0 | IOBuf::Area IOReserveAlignedBuf::reserve(size_t count) { |
1649 | 0 | IOBuf::Area result = INVALID_AREA; |
1650 | 0 | if (_reserved == true) { |
1651 | 0 | LOG(ERROR) << "Already call reserved"; |
1652 | 0 | return result; |
1653 | 0 | } |
1654 | 0 | _reserved = true; |
1655 | 0 | bool is_power_two = _alignment > 0 && (_alignment & (_alignment - 1)); |
1656 | 0 | if (is_power_two != 0) { |
1657 | 0 | LOG(ERROR) << "Invalid alignment, must power of two"; |
1658 | 0 | return INVALID_AREA; |
1659 | 0 | } |
1660 | 0 | count = (count + _alignment - 1) & ~(_alignment - 1); |
1661 | 0 | size_t total_nc = 0; |
1662 | 0 | while (total_nc < count) { |
1663 | 0 | auto block_size = |
1664 | 0 | std::max(_alignment, 4096UL) * 2 + sizeof(IOBuf::Block); |
1665 | 0 | if (iobuf::FLAGS_iobuf_aligned_buf_block_size != 0) { |
1666 | 0 | block_size = iobuf::FLAGS_iobuf_aligned_buf_block_size; |
1667 | 0 | } |
1668 | 0 | auto b = iobuf::create_block_aligned(block_size, _alignment); |
1669 | 0 | if (BAIDU_UNLIKELY(!b)) { |
1670 | 0 | LOG(ERROR) << "Create block failed"; |
1671 | 0 | return result; |
1672 | 0 | } |
1673 | 0 | const size_t nc = std::min(count - total_nc, b->left_space()); |
1674 | 0 | const IOBuf::BlockRef r = {(uint32_t)b->size, (uint32_t)nc, b}; |
1675 | 0 | _push_back_ref(r); |
1676 | | // aligned block is not from tls, release block ref |
1677 | 0 | b->dec_ref(); |
1678 | 0 | if (total_nc == 0) { |
1679 | | // Encode the area at first time. Notice that the pushed ref may |
1680 | | // be merged with existing ones. |
1681 | 0 | result = make_area(_ref_num() - 1, _back_ref().length - nc, count); |
1682 | 0 | } |
1683 | | // add total nc |
1684 | 0 | total_nc += nc; |
1685 | 0 | b->size += nc; |
1686 | 0 | }; |
1687 | 0 | return result; |
1688 | 0 | } |
1689 | | |
1690 | | //////////////// IOBufCutter //////////////// |
1691 | | |
1692 | | IOBufCutter::IOBufCutter(butil::IOBuf* buf) |
1693 | 0 | : _data(NULL) |
1694 | 0 | , _data_end(NULL) |
1695 | 0 | , _block(NULL) |
1696 | 0 | , _buf(buf) { |
1697 | 0 | } |
1698 | | |
1699 | 0 | IOBufCutter::~IOBufCutter() { |
1700 | 0 | if (_block) { |
1701 | 0 | if (_data != _data_end) { |
1702 | 0 | IOBuf::BlockRef& fr = _buf->_front_ref(); |
1703 | 0 | CHECK_EQ(fr.block, _block); |
1704 | 0 | fr.offset = (uint32_t)((char*)_data - _block->data); |
1705 | 0 | fr.length = (uint32_t)((char*)_data_end - (char*)_data); |
1706 | 0 | } else { |
1707 | 0 | _buf->_pop_front_ref(); |
1708 | 0 | } |
1709 | 0 | } |
1710 | 0 | } |
1711 | 0 | bool IOBufCutter::load_next_ref() { |
1712 | 0 | if (_block) { |
1713 | 0 | _buf->_pop_front_ref(); |
1714 | 0 | } |
1715 | 0 | if (!_buf->_ref_num()) { |
1716 | 0 | _data = NULL; |
1717 | 0 | _data_end = NULL; |
1718 | 0 | _block = NULL; |
1719 | 0 | return false; |
1720 | 0 | } else { |
1721 | 0 | const IOBuf::BlockRef& r = _buf->_front_ref(); |
1722 | 0 | _data = r.block->data + r.offset; |
1723 | 0 | _data_end = (char*)_data + r.length; |
1724 | 0 | _block = r.block; |
1725 | 0 | return true; |
1726 | 0 | } |
1727 | 0 | } |
1728 | | |
1729 | 0 | size_t IOBufCutter::slower_copy_to(void* dst, size_t n) { |
1730 | 0 | size_t size = (char*)_data_end - (char*)_data; |
1731 | 0 | if (size == 0) { |
1732 | 0 | if (!load_next_ref()) { |
1733 | 0 | return 0; |
1734 | 0 | } |
1735 | 0 | size = (char*)_data_end - (char*)_data; |
1736 | 0 | if (n <= size) { |
1737 | 0 | memcpy(dst, _data, n); |
1738 | 0 | return n; |
1739 | 0 | } |
1740 | 0 | } |
1741 | 0 | void* const saved_dst = dst; |
1742 | 0 | memcpy(dst, _data, size); |
1743 | 0 | dst = (char*)dst + size; |
1744 | 0 | n -= size; |
1745 | 0 | const size_t nref = _buf->_ref_num(); |
1746 | 0 | for (size_t i = 1; i < nref; ++i) { |
1747 | 0 | const IOBuf::BlockRef& r = _buf->_ref_at(i); |
1748 | 0 | const size_t nc = std::min(n, (size_t)r.length); |
1749 | 0 | memcpy(dst, r.block->data + r.offset, nc); |
1750 | 0 | dst = (char*)dst + nc; |
1751 | 0 | n -= nc; |
1752 | 0 | if (n == 0) { |
1753 | 0 | break; |
1754 | 0 | } |
1755 | 0 | } |
1756 | 0 | return (char*)dst - (char*)saved_dst; |
1757 | 0 | } |
1758 | | |
1759 | 0 | size_t IOBufCutter::cutn(butil::IOBuf* out, size_t n) { |
1760 | 0 | if (n == 0) { |
1761 | 0 | return 0; |
1762 | 0 | } |
1763 | 0 | const size_t size = (char*)_data_end - (char*)_data; |
1764 | 0 | if (n <= size) { |
1765 | 0 | const IOBuf::BlockRef r = { (uint32_t)((char*)_data - _block->data), |
1766 | 0 | (uint32_t)n, |
1767 | 0 | _block }; |
1768 | 0 | out->_push_back_ref(r); |
1769 | 0 | _data = (char*)_data + n; |
1770 | 0 | return n; |
1771 | 0 | } else if (size != 0) { |
1772 | 0 | const IOBuf::BlockRef r = { (uint32_t)((char*)_data - _block->data), |
1773 | 0 | (uint32_t)size, |
1774 | 0 | _block }; |
1775 | 0 | out->_push_back_ref(r); |
1776 | 0 | _buf->_pop_front_ref(); |
1777 | 0 | _data = NULL; |
1778 | 0 | _data_end = NULL; |
1779 | 0 | _block = NULL; |
1780 | 0 | return _buf->cutn(out, n - size) + size; |
1781 | 0 | } else { |
1782 | 0 | if (_block) { |
1783 | 0 | _data = NULL; |
1784 | 0 | _data_end = NULL; |
1785 | 0 | _block = NULL; |
1786 | 0 | _buf->_pop_front_ref(); |
1787 | 0 | } |
1788 | 0 | return _buf->cutn(out, n); |
1789 | 0 | } |
1790 | 0 | } |
1791 | | |
1792 | 0 | size_t IOBufCutter::cutn(void* out, size_t n) { |
1793 | 0 | if (n == 0) { |
1794 | 0 | return 0; |
1795 | 0 | } |
1796 | 0 | const size_t size = (char*)_data_end - (char*)_data; |
1797 | 0 | if (n <= size) { |
1798 | 0 | memcpy(out, _data, n); |
1799 | 0 | _data = (char*)_data + n; |
1800 | 0 | return n; |
1801 | 0 | } else if (size != 0) { |
1802 | 0 | memcpy(out, _data, size); |
1803 | 0 | _buf->_pop_front_ref(); |
1804 | 0 | _data = NULL; |
1805 | 0 | _data_end = NULL; |
1806 | 0 | _block = NULL; |
1807 | 0 | return _buf->cutn((char*)out + size, n - size) + size; |
1808 | 0 | } else { |
1809 | 0 | if (_block) { |
1810 | 0 | _data = NULL; |
1811 | 0 | _data_end = NULL; |
1812 | 0 | _block = NULL; |
1813 | 0 | _buf->_pop_front_ref(); |
1814 | 0 | } |
1815 | 0 | return _buf->cutn(out, n); |
1816 | 0 | } |
1817 | 0 | } |
1818 | | |
1819 | | IOBufAsZeroCopyInputStream::IOBufAsZeroCopyInputStream(const IOBuf& buf) |
1820 | 0 | : _ref_index(0) |
1821 | 0 | , _add_offset(0) |
1822 | 0 | , _byte_count(0) |
1823 | 0 | , _buf(&buf) { |
1824 | 0 | } |
1825 | | |
1826 | 0 | bool IOBufAsZeroCopyInputStream::Next(const void** data, int* size) { |
1827 | 0 | const IOBuf::BlockRef* cur_ref = _buf->_pref_at(_ref_index); |
1828 | 0 | if (cur_ref == NULL) { |
1829 | 0 | return false; |
1830 | 0 | } |
1831 | 0 | *data = cur_ref->block->data + cur_ref->offset + _add_offset; |
1832 | | // Impl. of Backup/Skip guarantees that _add_offset < cur_ref->length. |
1833 | 0 | *size = cur_ref->length - _add_offset; |
1834 | 0 | _byte_count += cur_ref->length - _add_offset; |
1835 | 0 | _add_offset = 0; |
1836 | 0 | ++_ref_index; |
1837 | 0 | return true; |
1838 | 0 | } |
1839 | | |
1840 | 0 | void IOBufAsZeroCopyInputStream::BackUp(int count) { |
1841 | 0 | if (_ref_index > 0) { |
1842 | 0 | const IOBuf::BlockRef* cur_ref = _buf->_pref_at(--_ref_index); |
1843 | 0 | CHECK(_add_offset == 0 && cur_ref->length >= (uint32_t)count) |
1844 | 0 | << "BackUp() is not after a Next()"; |
1845 | 0 | _add_offset = cur_ref->length - count; |
1846 | 0 | _byte_count -= count; |
1847 | 0 | } else { |
1848 | 0 | LOG(FATAL) << "BackUp an empty ZeroCopyInputStream"; |
1849 | 0 | } |
1850 | 0 | } |
1851 | | |
1852 | | // Skips a number of bytes. Returns false if the end of the stream is |
1853 | | // reached or some input error occurred. In the end-of-stream case, the |
1854 | | // stream is advanced to the end of the stream (so ByteCount() will return |
1855 | | // the total size of the stream). |
1856 | 0 | bool IOBufAsZeroCopyInputStream::Skip(int count) { |
1857 | 0 | const IOBuf::BlockRef* cur_ref = _buf->_pref_at(_ref_index); |
1858 | 0 | while (cur_ref) { |
1859 | 0 | const int left_bytes = cur_ref->length - _add_offset; |
1860 | 0 | if (count < left_bytes) { |
1861 | 0 | _add_offset += count; |
1862 | 0 | _byte_count += count; |
1863 | 0 | return true; |
1864 | 0 | } |
1865 | 0 | count -= left_bytes; |
1866 | 0 | _add_offset = 0; |
1867 | 0 | _byte_count += left_bytes; |
1868 | 0 | cur_ref = _buf->_pref_at(++_ref_index); |
1869 | 0 | } |
1870 | 0 | return false; |
1871 | 0 | } |
1872 | | |
1873 | 0 | int64_t IOBufAsZeroCopyInputStream::ByteCount() const { |
1874 | 0 | return _byte_count; |
1875 | 0 | } |
1876 | | |
1877 | | IOBufAsZeroCopyOutputStream::IOBufAsZeroCopyOutputStream(IOBuf* buf) |
1878 | 0 | : _buf(buf), _block_size(0), _cur_block(NULL), _byte_count(0) { |
1879 | 0 | } |
1880 | | |
1881 | | IOBufAsZeroCopyOutputStream::IOBufAsZeroCopyOutputStream( |
1882 | | IOBuf *buf, uint32_t block_size) |
1883 | 0 | : _buf(buf) |
1884 | 0 | , _block_size(block_size) |
1885 | 0 | , _cur_block(NULL) |
1886 | 0 | , _byte_count(0) { |
1887 | | |
1888 | 0 | if (_block_size <= offsetof(IOBuf::Block, data)) { |
1889 | 0 | throw std::invalid_argument("block_size is too small"); |
1890 | 0 | } |
1891 | 0 | } |
1892 | | |
1893 | 0 | IOBufAsZeroCopyOutputStream::~IOBufAsZeroCopyOutputStream() { |
1894 | 0 | _release_block(); |
1895 | 0 | } |
1896 | | |
1897 | 0 | bool IOBufAsZeroCopyOutputStream::Next(void** data, int* size) { |
1898 | 0 | if (_cur_block == NULL || _cur_block->full()) { |
1899 | 0 | _release_block(); |
1900 | 0 | if (_block_size > 0) { |
1901 | 0 | _cur_block = iobuf::create_block(_block_size); |
1902 | 0 | } else { |
1903 | 0 | _cur_block = iobuf::acquire_tls_block(); |
1904 | 0 | } |
1905 | 0 | if (_cur_block == NULL) { |
1906 | 0 | return false; |
1907 | 0 | } |
1908 | 0 | } |
1909 | 0 | const IOBuf::BlockRef r = { _cur_block->size, |
1910 | 0 | (uint32_t)_cur_block->left_space(), |
1911 | 0 | _cur_block }; |
1912 | 0 | *data = _cur_block->data + r.offset; |
1913 | 0 | *size = r.length; |
1914 | 0 | _cur_block->size = _cur_block->cap; |
1915 | 0 | _buf->_push_back_ref(r); |
1916 | 0 | _byte_count += r.length; |
1917 | 0 | return true; |
1918 | 0 | } |
1919 | | |
1920 | 0 | void IOBufAsZeroCopyOutputStream::BackUp(int count) { |
1921 | 0 | while (!_buf->empty()) { |
1922 | 0 | IOBuf::BlockRef& r = _buf->_back_ref(); |
1923 | 0 | if (_cur_block) { |
1924 | | // A ordinary BackUp that should be supported by all ZeroCopyOutputStream |
1925 | | // _cur_block must match end of the IOBuf |
1926 | 0 | if (r.block != _cur_block) { |
1927 | 0 | LOG(FATAL) << "r.block=" << r.block |
1928 | 0 | << " does not match _cur_block=" << _cur_block; |
1929 | 0 | return; |
1930 | 0 | } |
1931 | 0 | if (r.offset + r.length != _cur_block->size) { |
1932 | 0 | LOG(FATAL) << "r.offset(" << r.offset << ") + r.length(" |
1933 | 0 | << r.length << ") != _cur_block->size(" |
1934 | 0 | << _cur_block->size << ")"; |
1935 | 0 | return; |
1936 | 0 | } |
1937 | 0 | } else { |
1938 | | // An extended BackUp which is undefined in regular |
1939 | | // ZeroCopyOutputStream. The `count' given by user is larger than |
1940 | | // size of last _cur_block (already released in last iteration). |
1941 | 0 | if (r.block->ref_count() == 1) { |
1942 | | // A special case: the block is only referenced by last |
1943 | | // BlockRef of _buf. Safe to allocate more on the block. |
1944 | 0 | if (r.offset + r.length != r.block->size) { |
1945 | 0 | LOG(FATAL) << "r.offset(" << r.offset << ") + r.length(" |
1946 | 0 | << r.length << ") != r.block->size(" |
1947 | 0 | << r.block->size << ")"; |
1948 | 0 | return; |
1949 | 0 | } |
1950 | 0 | } else if (r.offset + r.length != r.block->size) { |
1951 | | // Last BlockRef does not match end of the block (which is |
1952 | | // used by other IOBuf already). Unsafe to re-reference |
1953 | | // the block and allocate more, just pop the bytes. |
1954 | 0 | _byte_count -= _buf->pop_back(count); |
1955 | 0 | return; |
1956 | 0 | } // else Last BlockRef matches end of the block. Even if the |
1957 | | // block is shared by other IOBuf, it's safe to allocate bytes |
1958 | | // after block->size. |
1959 | 0 | _cur_block = r.block; |
1960 | 0 | _cur_block->inc_ref(); |
1961 | 0 | } |
1962 | 0 | if (BAIDU_LIKELY(r.length > (uint32_t)count)) { |
1963 | 0 | r.length -= count; |
1964 | 0 | if (!_buf->_small()) { |
1965 | 0 | _buf->_bv.nbytes -= count; |
1966 | 0 | } |
1967 | 0 | _cur_block->size -= count; |
1968 | 0 | _byte_count -= count; |
1969 | | // Release block for TLS before quiting BackUp() for other |
1970 | | // code to reuse the block even if this wrapper object is |
1971 | | // not destructed. Example: |
1972 | | // IOBufAsZeroCopyOutputStream wrapper(...); |
1973 | | // ParseFromZeroCopyStream(&wrapper, ...); // Calls BackUp |
1974 | | // IOBuf buf; |
1975 | | // buf.append("foobar"); // can reuse the TLS block. |
1976 | 0 | if (_block_size == 0) { |
1977 | 0 | iobuf::release_tls_block(_cur_block); |
1978 | 0 | _cur_block = NULL; |
1979 | 0 | } |
1980 | 0 | return; |
1981 | 0 | } |
1982 | 0 | _cur_block->size -= r.length; |
1983 | 0 | _byte_count -= r.length; |
1984 | 0 | count -= r.length; |
1985 | 0 | _buf->_pop_back_ref(); |
1986 | 0 | _release_block(); |
1987 | 0 | if (count == 0) { |
1988 | 0 | return; |
1989 | 0 | } |
1990 | 0 | } |
1991 | 0 | LOG_IF(FATAL, count != 0) << "BackUp an empty IOBuf"; |
1992 | 0 | } |
1993 | | |
1994 | 0 | int64_t IOBufAsZeroCopyOutputStream::ByteCount() const { |
1995 | 0 | return _byte_count; |
1996 | 0 | } |
1997 | | |
1998 | 0 | void IOBufAsZeroCopyOutputStream::_release_block() { |
1999 | 0 | if (_block_size > 0) { |
2000 | 0 | if (_cur_block) { |
2001 | 0 | _cur_block->dec_ref(); |
2002 | 0 | } |
2003 | 0 | } else { |
2004 | 0 | iobuf::release_tls_block(_cur_block); |
2005 | 0 | } |
2006 | 0 | _cur_block = NULL; |
2007 | 0 | } |
2008 | | |
2009 | | IOBufAsSnappySink::IOBufAsSnappySink(butil::IOBuf& buf) |
2010 | 0 | : _cur_buf(NULL), _cur_len(0), _buf(&buf), _buf_stream(&buf) { |
2011 | 0 | } |
2012 | | |
2013 | 0 | void IOBufAsSnappySink::Append(const char* bytes, size_t n) { |
2014 | 0 | if (_cur_len > 0) { |
2015 | 0 | CHECK(bytes == _cur_buf && static_cast<int>(n) <= _cur_len) |
2016 | 0 | << "bytes must be _cur_buf"; |
2017 | 0 | _buf_stream.BackUp(_cur_len - n); |
2018 | 0 | _cur_len = 0; |
2019 | 0 | } else { |
2020 | 0 | _buf->append(bytes, n); |
2021 | 0 | } |
2022 | 0 | } |
2023 | | |
2024 | 0 | char* IOBufAsSnappySink::GetAppendBuffer(size_t length, char* scratch) { |
2025 | | // TODO: butil::IOBuf supports dynamic sized blocks. |
2026 | 0 | if (length <= 8000/*just a hint*/) { |
2027 | 0 | if (_buf_stream.Next(reinterpret_cast<void**>(&_cur_buf), &_cur_len)) { |
2028 | 0 | if (_cur_len >= static_cast<int>(length)) { |
2029 | 0 | return _cur_buf; |
2030 | 0 | } else { |
2031 | 0 | _buf_stream.BackUp(_cur_len); |
2032 | 0 | } |
2033 | 0 | } else { |
2034 | 0 | LOG(FATAL) << "Fail to alloc buffer"; |
2035 | 0 | } |
2036 | 0 | } // else no need to try. |
2037 | 0 | _cur_buf = NULL; |
2038 | 0 | _cur_len = 0; |
2039 | 0 | return scratch; |
2040 | 0 | } |
2041 | | |
2042 | 0 | size_t IOBufAsSnappySource::Available() const { |
2043 | 0 | return _buf->length() - _stream.ByteCount(); |
2044 | 0 | } |
2045 | | |
2046 | 0 | void IOBufAsSnappySource::Skip(size_t n) { |
2047 | 0 | _stream.Skip(n); |
2048 | 0 | } |
2049 | | |
2050 | 0 | const char* IOBufAsSnappySource::Peek(size_t* len) { |
2051 | 0 | const char* buffer = NULL; |
2052 | 0 | int res = 0; |
2053 | 0 | if (_stream.Next((const void**)&buffer, &res)) { |
2054 | 0 | *len = res; |
2055 | | // Source::Peek requires no reposition. |
2056 | 0 | _stream.BackUp(*len); |
2057 | 0 | return buffer; |
2058 | 0 | } else { |
2059 | 0 | *len = 0; |
2060 | 0 | return NULL; |
2061 | 0 | } |
2062 | 0 | } |
2063 | | |
2064 | | IOBufAppender::IOBufAppender() |
2065 | 0 | : _data(NULL) |
2066 | 0 | , _data_end(NULL) |
2067 | 0 | , _zc_stream(&_buf) { |
2068 | 0 | } |
2069 | | |
2070 | 0 | size_t IOBufBytesIterator::append_and_forward(butil::IOBuf* buf, size_t n) { |
2071 | 0 | size_t nc = 0; |
2072 | 0 | while (nc < n && _bytes_left != 0) { |
2073 | 0 | const IOBuf::BlockRef& r = _buf->_ref_at(_block_count - 1); |
2074 | 0 | const size_t block_size = _block_end - _block_begin; |
2075 | 0 | const size_t to_copy = std::min(block_size, n - nc); |
2076 | 0 | IOBuf::BlockRef r2 = { (uint32_t)(_block_begin - r.block->data), |
2077 | 0 | (uint32_t)to_copy, r.block }; |
2078 | 0 | buf->_push_back_ref(r2); |
2079 | 0 | _block_begin += to_copy; |
2080 | 0 | _bytes_left -= to_copy; |
2081 | 0 | nc += to_copy; |
2082 | 0 | if (_block_begin == _block_end) { |
2083 | 0 | try_next_block(); |
2084 | 0 | } |
2085 | 0 | } |
2086 | 0 | return nc; |
2087 | 0 | } |
2088 | | |
2089 | 0 | bool IOBufBytesIterator::forward_one_block(const void** data, size_t* size) { |
2090 | 0 | if (_bytes_left == 0) { |
2091 | 0 | return false; |
2092 | 0 | } |
2093 | 0 | const size_t block_size = _block_end - _block_begin; |
2094 | 0 | *data = _block_begin; |
2095 | 0 | *size = block_size; |
2096 | 0 | _bytes_left -= block_size; |
2097 | 0 | try_next_block(); |
2098 | 0 | return true; |
2099 | 0 | } |
2100 | | |
2101 | | } // namespace butil |
2102 | | |
2103 | 0 | void* fast_memcpy(void *__restrict dest, const void *__restrict src, size_t n) { |
2104 | 0 | return butil::iobuf::cp(dest, src, n); |
2105 | 0 | } // namespace butil |