Coverage Report

Created: 2023-11-12 09:30

/proc/self/cwd/source/common/buffer/buffer_impl.h
Line
Count
Source (jump to first uncovered line)
1
#pragma once
2
3
#include <algorithm>
4
#include <cstdint>
5
#include <deque>
6
#include <memory>
7
#include <string>
8
9
#include "envoy/buffer/buffer.h"
10
#include "envoy/http/stream_reset_handler.h"
11
12
#include "source/common/common/assert.h"
13
#include "source/common/common/non_copyable.h"
14
#include "source/common/common/utility.h"
15
#include "source/common/event/libevent.h"
16
17
namespace Envoy {
18
namespace Buffer {
19
20
/**
21
 * A Slice manages a contiguous block of bytes.
22
 * The block is arranged like this:
23
 *                   |<- dataSize() ->|<- reservableSize() ->|
24
 * +-----------------+----------------+----------------------+
25
 * | Drained         | Data           | Reservable           |
26
 * | Unused space    | Usable content | New content can be   |
27
 * | that formerly   |                | added here with      |
28
 * | was in the Data |                | reserve()/commit()   |
29
 * | section         |                | or append()          |
30
 * +-----------------+----------------+----------------------+
31
 * ^                 ^                ^                      ^
32
 * |                 |                |                      |
33
 * base_             data()           base_ + reservable_    base_ + capacity_
34
 */
35
class Slice {
36
public:
37
  using Reservation = RawSlice;
38
  using StoragePtr = std::unique_ptr<uint8_t[]>;
39
40
  struct SizedStorage {
41
    StoragePtr mem_{};
42
    size_t len_{};
43
  };
44
45
  /**
46
   * Create an empty Slice with 0 capacity.
47
   */
48
42.0M
  Slice() = default;
49
50
  /**
51
   * Create an empty mutable Slice that owns its storage, which it charges to the provided account,
52
   * if any.
53
   * @param min_capacity number of bytes of space the slice should have. Actual capacity is rounded
54
   * up to the next multiple of 4kb.
55
   * @param account the account to charge.
56
   */
57
  Slice(uint64_t min_capacity, const BufferMemoryAccountSharedPtr& account)
58
      : capacity_(sliceSize(min_capacity)), storage_(new uint8_t[capacity_]),
59
3.68M
        base_(storage_.get()) {
60
3.68M
    if (account) {
61
0
      account->charge(capacity_);
62
0
      account_ = account;
63
0
    }
64
3.68M
  }
65
66
  /**
67
   * Create an empty mutable Slice that owns its storage, which it charges to the provided account,
68
   * if any.
69
   * @param storage backend storage for the slice.
70
   * @param used_size the size already used in storage.
71
   * @param account the account to charge.
72
   */
73
  Slice(SizedStorage storage, uint64_t used_size, const BufferMemoryAccountSharedPtr& account)
74
      : capacity_(storage.len_), storage_(std::move(storage.mem_)), base_(storage_.get()),
75
56.4k
        reservable_(used_size) {
76
56.4k
    ASSERT(sliceSize(capacity_) == capacity_);
77
56.4k
    ASSERT(reservable_ <= capacity_);
78
79
56.4k
    if (account) {
80
0
      account->charge(capacity_);
81
0
      account_ = account;
82
0
    }
83
56.4k
  }
84
85
  /**
86
   * Create an immutable Slice that refers to an external buffer fragment.
87
   * @param fragment provides externally owned immutable data.
88
   */
89
  Slice(BufferFragment& fragment)
90
      : capacity_(fragment.size()), storage_(nullptr),
91
        base_(static_cast<uint8_t*>(const_cast<void*>(fragment.data()))),
92
59.1k
        reservable_(fragment.size()) {
93
59.1k
    releasor_ = [&fragment]() { fragment.done(); };
94
59.1k
  }
95
96
0
  Slice(Slice&& rhs) noexcept {
97
0
    capacity_ = rhs.capacity_;
98
0
    storage_ = std::move(rhs.storage_);
99
0
    base_ = rhs.base_;
100
0
    data_ = rhs.data_;
101
0
    reservable_ = rhs.reservable_;
102
0
    drain_trackers_ = std::move(rhs.drain_trackers_);
103
0
    account_ = std::move(rhs.account_);
104
0
    releasor_.swap(rhs.releasor_);
105
106
0
    rhs.capacity_ = 0;
107
0
    rhs.base_ = nullptr;
108
0
    rhs.data_ = 0;
109
0
    rhs.reservable_ = 0;
110
0
  }
111
112
11.1M
  Slice& operator=(Slice&& rhs) noexcept {
113
11.1M
    if (this != &rhs) {
114
11.1M
      callAndClearDrainTrackersAndCharges();
115
116
11.1M
      capacity_ = rhs.capacity_;
117
11.1M
      storage_ = std::move(rhs.storage_);
118
11.1M
      base_ = rhs.base_;
119
11.1M
      data_ = rhs.data_;
120
11.1M
      reservable_ = rhs.reservable_;
121
11.1M
      drain_trackers_ = std::move(rhs.drain_trackers_);
122
11.1M
      account_ = std::move(rhs.account_);
123
11.1M
      if (releasor_) {
124
6.71k
        releasor_();
125
6.71k
      }
126
11.1M
      releasor_ = rhs.releasor_;
127
11.1M
      rhs.releasor_ = nullptr;
128
129
11.1M
      rhs.capacity_ = 0;
130
11.1M
      rhs.base_ = nullptr;
131
11.1M
      rhs.data_ = 0;
132
11.1M
      rhs.reservable_ = 0;
133
11.1M
    }
134
135
11.1M
    return *this;
136
11.1M
  }
137
138
45.8M
  ~Slice() {
139
45.8M
    callAndClearDrainTrackersAndCharges();
140
45.8M
    if (releasor_) {
141
52.4k
      releasor_();
142
52.4k
    }
143
45.8M
  }
144
145
  /**
146
   * @return true if the data in the slice is mutable
147
   */
148
0
  bool isMutable() const { return storage_ != nullptr; }
149
150
  /**
151
   * @return true if content in this Slice can be coalesced into another Slice.
152
   */
153
2.40M
  bool canCoalesce() const { return storage_ != nullptr; }
154
155
  /**
156
   * @return a pointer to the start of the usable content.
157
   */
158
3.34M
  const uint8_t* data() const { return base_ + data_; }
159
160
  /**
161
   * @return a pointer to the start of the usable content.
162
   */
163
972k
  uint8_t* data() { return base_ + data_; }
164
165
  /**
166
   * @return the size in bytes of the usable content.
167
   */
168
372M
  uint64_t dataSize() const { return reservable_ - data_; }
169
170
  /**
171
   * Remove the first `size` bytes of usable content. Runs in O(1) time.
172
   * @param size number of bytes to remove. If greater than data_size(), the result is undefined.
173
   */
174
757k
  void drain(uint64_t size) {
175
757k
    ASSERT(data_ + size <= reservable_);
176
757k
    data_ += size;
177
757k
    if (data_ == reservable_) {
178
      // All the data in the slice has been drained. Reset the offsets so all
179
      // the data can be reused.
180
0
      data_ = 0;
181
0
      reservable_ = 0;
182
0
    }
183
757k
  }
184
185
  /**
186
   * @return the number of bytes available to be reserved.
187
   * @note Read-only implementations of Slice should return zero from this method.
188
   */
189
5.85M
  uint64_t reservableSize() const {
190
5.85M
    ASSERT(capacity_ >= reservable_);
191
5.85M
    return capacity_ - reservable_;
192
5.85M
  }
193
194
  /**
195
   * Reserve `size` bytes that the caller can populate with content. The caller SHOULD then
196
   * call commit() to add the newly populated content from the Reserved section to the Data
197
   * section.
198
   * @note If there is already an outstanding reservation (i.e., a reservation obtained
199
   *       from reserve() that has not been released by calling commit()), this method will
200
   *       return a new reservation that replaces it.
201
   * @param size the number of bytes to reserve. The Slice implementation MAY reserve
202
   *        fewer bytes than requested (for example, if it doesn't have enough room in the
203
   *        Reservable section to fulfill the whole request).
204
   * @return a tuple containing the address of the start of resulting reservation and the
205
   *         reservation size in bytes. If the address is null, the reservation failed.
206
   * @note Read-only implementations of Slice should return {nullptr, 0} from this method.
207
   */
208
150k
  Reservation reserve(uint64_t size) {
209
150k
    if (size == 0) {
210
0
      return {nullptr, 0};
211
0
    }
212
    // Verify the semantics that drain() enforces: if the slice is empty, either because
213
    // no data has been added or because all the added data has been drained, the data
214
    // section is at the very start of the slice.
215
150k
    ASSERT(!(dataSize() == 0 && data_ > 0));
216
150k
    uint64_t available_size = capacity_ - reservable_;
217
150k
    if (available_size == 0) {
218
42
      return {nullptr, 0};
219
42
    }
220
150k
    uint64_t reservation_size = std::min(size, available_size);
221
150k
    void* reservation = &(base_[reservable_]);
222
150k
    return {reservation, static_cast<size_t>(reservation_size)};
223
150k
  }
224
225
  /**
226
   * Commit a Reservation that was previously obtained from a call to reserve().
227
   * The Reservation's size is added to the Data section.
228
   * @param reservation a reservation obtained from a previous call to reserve().
229
   *        If the reservation is not from this Slice, commit() will return false.
230
   *        If the caller is committing fewer bytes than provided by reserve(), it
231
   *        should change the len_ field of the reservation before calling commit().
232
   *        For example, if a caller reserve()s 4KB to do a nonblocking socket read,
233
   *        and the read only returns two bytes, the caller should set
234
   *        reservation.len_ = 2 and then call `commit(reservation)`.
235
   * @return whether the Reservation was successfully committed to the Slice.
236
   * @note template parameter `SafeCommit` can be used to disable memory range check.
237
   */
238
125k
  template <bool SafeCommit = true> bool commit(const Reservation& reservation) {
239
125k
    if constexpr (SafeCommit) {
240
123k
      if (static_cast<const uint8_t*>(reservation.mem_) != base_ + reservable_ ||
241
2.08k
          reservable_ + reservation.len_ > capacity_ || reservable_ >= capacity_) {
242
        // The reservation is not from this Slice.
243
0
        return false;
244
0
      }
245
123k
    } else {
246
123k
      ASSERT(static_cast<const uint8_t*>(reservation.mem_) == base_ + reservable_ &&
247
123k
             reservable_ + reservation.len_ <= capacity_);
248
123k
    }
249
125k
    reservable_ += reservation.len_;
250
125k
    return true;
251
125k
  }
bool Envoy::Buffer::Slice::commit<true>(Envoy::Buffer::RawSlice const&)
Line
Count
Source
238
2.08k
  template <bool SafeCommit = true> bool commit(const Reservation& reservation) {
239
2.08k
    if constexpr (SafeCommit) {
240
2.08k
      if (static_cast<const uint8_t*>(reservation.mem_) != base_ + reservable_ ||
241
2.08k
          reservable_ + reservation.len_ > capacity_ || reservable_ >= capacity_) {
242
        // The reservation is not from this Slice.
243
0
        return false;
244
0
      }
245
2.08k
    } else {
246
2.08k
      ASSERT(static_cast<const uint8_t*>(reservation.mem_) == base_ + reservable_ &&
247
2.08k
             reservable_ + reservation.len_ <= capacity_);
248
2.08k
    }
249
2.08k
    reservable_ += reservation.len_;
250
2.08k
    return true;
251
2.08k
  }
bool Envoy::Buffer::Slice::commit<false>(Envoy::Buffer::RawSlice const&)
Line
Count
Source
238
123k
  template <bool SafeCommit = true> bool commit(const Reservation& reservation) {
239
123k
    if constexpr (SafeCommit) {
240
123k
      if (static_cast<const uint8_t*>(reservation.mem_) != base_ + reservable_ ||
241
123k
          reservable_ + reservation.len_ > capacity_ || reservable_ >= capacity_) {
242
        // The reservation is not from this Slice.
243
123k
        return false;
244
123k
      }
245
123k
    } else {
246
123k
      ASSERT(static_cast<const uint8_t*>(reservation.mem_) == base_ + reservable_ &&
247
123k
             reservable_ + reservation.len_ <= capacity_);
248
123k
    }
249
123k
    reservable_ += reservation.len_;
250
123k
    return true;
251
123k
  }
252
253
  /**
254
   * Copy as much of the supplied data as possible to the end of the slice.
255
   * @param data start of the data to copy.
256
   * @param size number of bytes to copy.
257
   * @return number of bytes copied (may be a smaller than size, may even be zero).
258
   */
259
5.33M
  uint64_t append(const void* data, uint64_t size) {
260
5.33M
    uint64_t copy_size = std::min(size, reservableSize());
261
5.33M
    if (copy_size == 0) {
262
36.4k
      return 0;
263
36.4k
    }
264
5.29M
    uint8_t* dest = base_ + reservable_;
265
5.29M
    reservable_ += copy_size;
266
    // NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
267
5.29M
    memcpy(dest, data, copy_size); // NOLINT(safe-memcpy)
268
5.29M
    return copy_size;
269
5.33M
  }
270
271
  /**
272
   * Copy as much of the supplied data as possible to the front of the slice.
273
   * If only part of the data will fit in the slice, the bytes from the _end_ are
274
   * copied.
275
   * @param data start of the data to copy.
276
   * @param size number of bytes to copy.
277
   * @return number of bytes copied (may be a smaller than size, may even be zero).
278
   */
279
6.24k
  uint64_t prepend(const void* data, uint64_t size) {
280
6.24k
    const uint8_t* src = static_cast<const uint8_t*>(data);
281
6.24k
    uint64_t copy_size;
282
6.24k
    if (dataSize() == 0) {
283
      // There is nothing in the slice, so put the data at the very end in case the caller
284
      // later tries to prepend anything else in front of it.
285
3.12k
      copy_size = std::min(size, reservableSize());
286
3.12k
      reservable_ = capacity_;
287
3.12k
      data_ = capacity_ - copy_size;
288
3.12k
    } else {
289
3.12k
      if (data_ == 0) {
290
        // There is content in the slice, and no space in front of it to write anything.
291
2.66k
        return 0;
292
2.66k
      }
293
      // Write into the space in front of the slice's current content.
294
458
      copy_size = std::min(size, data_);
295
458
      data_ -= copy_size;
296
458
    }
297
3.58k
    memcpy(base_ + data_, src + size - copy_size, copy_size); // NOLINT(safe-memcpy)
298
3.58k
    return copy_size;
299
6.24k
  }
300
301
  /**
302
   * Describe the in-memory representation of the slice. For use
303
   * in tests that want to make assertions about the specific arrangement of
304
   * bytes in a slice.
305
   */
306
  struct SliceRepresentation {
307
    uint64_t data;
308
    uint64_t reservable;
309
    uint64_t capacity;
310
  };
311
0
  SliceRepresentation describeSliceForTest() const {
312
0
    return SliceRepresentation{dataSize(), reservableSize(), capacity_};
313
0
  }
314
315
  /**
316
   * Move all drain trackers and charges from the current slice to the destination slice.
317
   */
318
480k
  void transferDrainTrackersTo(Slice& destination) {
319
480k
    destination.drain_trackers_.splice(destination.drain_trackers_.end(), drain_trackers_);
320
480k
    ASSERT(drain_trackers_.empty());
321
    // The releasor needn't to be transferred, and actually if there is releasor, this
322
    // slice can't coalesce. Then there won't be a chance to calling this method.
323
480k
    ASSERT(releasor_ == nullptr);
324
480k
  }
325
326
  /**
327
   * Add a drain tracker to the slice.
328
   */
329
1.05M
  void addDrainTracker(std::function<void()> drain_tracker) {
330
1.05M
    drain_trackers_.emplace_back(std::move(drain_tracker));
331
1.05M
  }
332
333
  /**
334
   * Call all drain trackers associated with the slice, then clear
335
   * the drain tracker list.
336
   */
337
56.9M
  void callAndClearDrainTrackersAndCharges() {
338
56.9M
    for (const auto& drain_tracker : drain_trackers_) {
339
1.05M
      drain_tracker();
340
1.05M
    }
341
56.9M
    drain_trackers_.clear();
342
343
56.9M
    if (account_) {
344
0
      account_->credit(capacity_);
345
0
      account_.reset();
346
0
    }
347
56.9M
  }
348
349
  /**
350
   * Charges the provided account for the resources if these conditions hold:
351
   * - we're not already charging for this slice
352
   * - the given account is non-null
353
   * - the slice owns backing memory
354
   */
355
1.92M
  void maybeChargeAccount(const BufferMemoryAccountSharedPtr& account) {
356
1.92M
    if (account_ != nullptr || storage_ == nullptr || account == nullptr) {
357
1.92M
      return;
358
1.92M
    }
359
1
    account->charge(capacity_);
360
1
    account_ = account;
361
1
  }
362
363
  static constexpr uint32_t default_slice_size_ = 16384;
364
365
public:
366
  /**
367
   * Compute a slice size big enough to hold a specified amount of data.
368
   * @param data_size the minimum amount of data the slice must be able to store, in bytes.
369
   * @return a recommended slice size, in bytes.
370
   */
371
4.17M
  static uint64_t sliceSize(uint64_t data_size) {
372
4.17M
    static constexpr uint64_t PageSize = 4096;
373
4.17M
    const uint64_t num_pages = (data_size + PageSize - 1) / PageSize;
374
4.17M
    return num_pages * PageSize;
375
4.17M
  }
376
377
  /**
378
   * Create new backend storage with min capacity. This method will create a recommended capacity
379
   * which will bigger or equal to the min capacity and create new backend storage based on the
380
   * recommended capacity.
381
   * @param min_capacity the min capacity of new created backend storage.
382
   * @return a backend storage for slice.
383
   */
384
24.8k
  static inline SizedStorage newStorage(uint64_t min_capacity) {
385
24.8k
    const uint64_t slice_size = sliceSize(min_capacity);
386
24.8k
    return {StoragePtr{new uint8_t[slice_size]}, static_cast<size_t>(slice_size)};
387
24.8k
  }
388
389
protected:
390
  /** Length of the byte array that base_ points to. This is also the offset in bytes from the start
391
   * of the slice to the end of the Reservable section. */
392
  uint64_t capacity_ = 0;
393
394
  /** Backing storage for mutable slices which own their own storage. This storage should never be
395
   * accessed directly; access base_ instead. */
396
  StoragePtr storage_;
397
398
  /** Start of the slice. Points to storage_ iff the slice owns its own storage. */
399
  uint8_t* base_{nullptr};
400
401
  /** Offset in bytes from the start of the slice to the start of the Data section. */
402
  uint64_t data_ = 0;
403
404
  /** Offset in bytes from the start of the slice to the start of the Reservable section which is
405
   * also the end of the Data section. */
406
  uint64_t reservable_ = 0;
407
408
  /** Hooks to execute when the slice is destroyed. */
409
  std::list<std::function<void()>> drain_trackers_;
410
411
  /** Account associated with this slice. This may be null. When
412
   * coalescing with another slice, we do not transfer over their account. */
413
  BufferMemoryAccountSharedPtr account_;
414
415
  /** The releasor for the BufferFragment */
416
  std::function<void()> releasor_;
417
};
418
419
class OwnedImpl;
420
421
class SliceDataImpl : public SliceData {
422
public:
423
0
  explicit SliceDataImpl(Slice&& slice) : slice_(std::move(slice)) {}
424
425
  // SliceData
426
0
  absl::Span<uint8_t> getMutableData() override {
427
0
    RELEASE_ASSERT(slice_.isMutable(), "Not allowed to call getMutableData if slice is immutable");
428
0
    return {slice_.data(), static_cast<absl::Span<uint8_t>::size_type>(slice_.dataSize())};
429
0
  }
430
431
private:
432
  friend OwnedImpl;
433
  Slice slice_;
434
};
435
436
/**
437
 * Queue of Slice that supports efficient read and write access to both
438
 * the front and the back of the queue.
439
 * @note This class has similar properties to std::deque<T>. The reason for using
440
 *       a custom deque implementation is that benchmark testing during development
441
 *       revealed that std::deque was too slow to reach performance parity with the
442
 *       prior evbuffer-based buffer implementation.
443
 */
444
class SliceDeque {
445
public:
446
4.54M
  SliceDeque() : ring_(inline_ring_), capacity_(InlineRingCapacity) {}
447
448
17.4k
  SliceDeque(SliceDeque&& rhs) noexcept {
449
    // This custom move constructor is needed so that ring_ will be updated properly.
450
17.4k
    std::move(rhs.inline_ring_, rhs.inline_ring_ + InlineRingCapacity, inline_ring_);
451
17.4k
    external_ring_ = std::move(rhs.external_ring_);
452
17.4k
    ring_ = (external_ring_ != nullptr) ? external_ring_.get() : inline_ring_;
453
17.4k
    start_ = rhs.start_;
454
17.4k
    size_ = rhs.size_;
455
17.4k
    capacity_ = rhs.capacity_;
456
17.4k
  }
457
458
0
  SliceDeque& operator=(SliceDeque&& rhs) noexcept {
459
0
    // This custom assignment move operator is needed so that ring_ will be updated properly.
460
0
    std::move(rhs.inline_ring_, rhs.inline_ring_ + InlineRingCapacity, inline_ring_);
461
0
    external_ring_ = std::move(rhs.external_ring_);
462
0
    ring_ = (external_ring_ != nullptr) ? external_ring_.get() : inline_ring_;
463
0
    start_ = rhs.start_;
464
0
    size_ = rhs.size_;
465
0
    capacity_ = rhs.capacity_;
466
0
    return *this;
467
0
  }
468
469
5.71M
  void emplace_back(Slice&& slice) { // NOLINT(readability-identifier-naming)
470
5.71M
    growRing();
471
5.71M
    size_t index = internalIndex(size_);
472
5.71M
    ring_[index] = std::move(slice);
473
5.71M
    size_++;
474
5.71M
  }
475
476
10.9k
  void emplace_front(Slice&& slice) { // NOLINT(readability-identifier-naming)
477
10.9k
    growRing();
478
10.9k
    start_ = (start_ == 0) ? capacity_ - 1 : start_ - 1;
479
10.9k
    ring_[start_] = std::move(slice);
480
10.9k
    size_++;
481
10.9k
  }
482
483
379M
  bool empty() const { return size() == 0; }
484
386M
  size_t size() const { return size_; }
485
486
14.7M
  Slice& front() { return ring_[start_]; }
487
0
  const Slice& front() const { return ring_[start_]; }
488
7.57M
  Slice& back() { return ring_[internalIndex(size_ - 1)]; }
489
0
  const Slice& back() const { return ring_[internalIndex(size_ - 1)]; }
490
491
98.5k
  Slice& operator[](size_t i) {
492
98.5k
    ASSERT(!empty());
493
98.5k
    return ring_[internalIndex(i)];
494
98.5k
  }
495
360M
  const Slice& operator[](size_t i) const {
496
360M
    ASSERT(!empty());
497
360M
    return ring_[internalIndex(i)];
498
360M
  }
499
500
4.98M
  void pop_front() { // NOLINT(readability-identifier-naming)
501
4.98M
    if (size() == 0) {
502
0
      return;
503
0
    }
504
4.98M
    front() = Slice();
505
4.98M
    size_--;
506
4.98M
    start_++;
507
4.98M
    if (start_ == capacity_) {
508
120k
      start_ = 0;
509
120k
    }
510
4.98M
  }
511
512
5.98k
  void pop_back() { // NOLINT(readability-identifier-naming)
513
5.98k
    if (size() == 0) {
514
0
      return;
515
0
    }
516
5.98k
    back() = Slice();
517
5.98k
    size_--;
518
5.98k
  }
519
520
  /**
521
   * Forward const iterator for SliceDeque.
522
   * @note this implementation currently supports the minimum functionality needed to support
523
   *       the `for (const auto& slice : slice_deque)` idiom.
524
   */
525
  class ConstIterator {
526
  public:
527
360M
    const Slice& operator*() { return deque_[index_]; }
528
529
360M
    ConstIterator operator++() {
530
360M
      index_++;
531
360M
      return *this;
532
360M
    }
533
534
376M
    bool operator!=(const ConstIterator& rhs) const {
535
376M
      return &deque_ != &rhs.deque_ || index_ != rhs.index_;
536
376M
    }
537
538
    friend class SliceDeque;
539
540
  private:
541
32.3M
    ConstIterator(const SliceDeque& deque, size_t index) : deque_(deque), index_(index) {}
542
    const SliceDeque& deque_;
543
    size_t index_;
544
  };
545
546
16.1M
  ConstIterator begin() const noexcept { return {*this, 0}; }
547
548
16.1M
  ConstIterator end() const noexcept { return {*this, size_}; }
549
550
private:
551
  constexpr static size_t InlineRingCapacity = 8;
552
553
374M
  size_t internalIndex(size_t index) const {
554
374M
    size_t internal_index = start_ + index;
555
374M
    if (internal_index >= capacity_) {
556
494k
      internal_index -= capacity_;
557
494k
      ASSERT(internal_index < capacity_);
558
494k
    }
559
374M
    return internal_index;
560
374M
  }
561
562
5.72M
  void growRing() {
563
5.72M
    if (size_ < capacity_) {
564
5.71M
      return;
565
5.71M
    }
566
4.71k
    const size_t new_capacity = capacity_ * 2;
567
4.71k
    auto new_ring = std::make_unique<Slice[]>(new_capacity);
568
4.71k
    size_t src = start_;
569
4.71k
    size_t dst = 0;
570
257k
    for (size_t i = 0; i < size_; i++) {
571
252k
      new_ring[dst++] = std::move(ring_[src++]);
572
252k
      if (src == capacity_) {
573
4.71k
        src = 0;
574
4.71k
      }
575
252k
    }
576
4.71k
    external_ring_.swap(new_ring);
577
4.71k
    ring_ = external_ring_.get();
578
4.71k
    start_ = 0;
579
4.71k
    capacity_ = new_capacity;
580
4.71k
  }
581
582
  Slice inline_ring_[InlineRingCapacity];
583
  std::unique_ptr<Slice[]> external_ring_;
584
  Slice* ring_; // points to start of either inline or external ring.
585
  size_t start_{0};
586
  size_t size_{0};
587
  size_t capacity_;
588
};
589
590
/**
591
 * An implementation of BufferFragment where a releasor callback is called when the data is
592
 * no longer needed.
593
 */
594
class BufferFragmentImpl : NonCopyable, public BufferFragment {
595
public:
596
  /**
597
   * Creates a new wrapper around the externally owned <data> of size <size>.
598
   * The caller must ensure <data> is valid until releasor() is called, or for the lifetime of the
599
   * fragment. releasor() is called with <data>, <size> and <this> to allow caller to delete
600
   * the fragment object.
601
   * @param data external data to reference
602
   * @param size size of data
603
   * @param releasor a callback function to be called when data is no longer needed.
604
   */
605
  BufferFragmentImpl(
606
      const void* data, size_t size,
607
      const std::function<void(const void*, size_t, const BufferFragmentImpl*)>& releasor)
608
56.6k
      : data_(data), size_(size), releasor_(releasor) {}
609
610
  // Buffer::BufferFragment
611
105k
  const void* data() const override { return data_; }
612
211k
  size_t size() const override { return size_; }
613
56.6k
  void done() override {
614
56.6k
    if (releasor_) {
615
56.6k
      releasor_(data_, size_, this);
616
56.6k
    }
617
56.6k
  }
618
619
private:
620
  const void* const data_;
621
  const size_t size_;
622
  const std::function<void(const void*, size_t, const BufferFragmentImpl*)> releasor_;
623
};
624
625
class LibEventInstance : public Instance {
626
public:
627
  // Called after accessing the memory in buffer() directly to allow any post-processing.
628
  virtual void postProcess() PURE;
629
};
630
631
/**
632
 * Wrapper for uint64_t that asserts upon integer overflow and underflow.
633
 */
634
class OverflowDetectingUInt64 {
635
public:
636
26.7M
  operator uint64_t() const { return value_; }
637
638
7.50M
  OverflowDetectingUInt64& operator+=(uint64_t size) {
639
7.50M
    uint64_t new_value = value_ + size;
640
7.50M
    RELEASE_ASSERT(new_value >= value_, "64-bit unsigned integer overflowed");
641
7.50M
    value_ = new_value;
642
7.50M
    return *this;
643
7.50M
  }
644
645
5.74M
  OverflowDetectingUInt64& operator-=(uint64_t size) {
646
5.74M
    RELEASE_ASSERT(value_ >= size, "unsigned integer underflowed");
647
5.74M
    value_ -= size;
648
5.74M
    return *this;
649
5.74M
  }
650
651
private:
652
  uint64_t value_{0};
653
};
654
655
/**
656
 * Wraps an allocated and owned buffer.
657
 *
658
 * Note that due to the internals of move(), OwnedImpl is not
659
 * compatible with non-OwnedImpl buffers.
660
 */
661
class OwnedImpl : public LibEventInstance {
662
public:
663
  OwnedImpl();
664
  OwnedImpl(absl::string_view data);
665
  OwnedImpl(const Instance& data);
666
  OwnedImpl(const void* data, uint64_t size);
667
  OwnedImpl(BufferMemoryAccountSharedPtr account);
668
669
  // Buffer::Instance
670
  void addDrainTracker(std::function<void()> drain_tracker) override;
671
  void bindAccount(BufferMemoryAccountSharedPtr account) override;
672
  void add(const void* data, uint64_t size) override;
673
  void addBufferFragment(BufferFragment& fragment) override;
674
  void add(absl::string_view data) override;
675
  void add(const Instance& data) override;
676
  void prepend(absl::string_view data) override;
677
  void prepend(Instance& data) override;
678
  void copyOut(size_t start, uint64_t size, void* data) const override;
679
  uint64_t copyOutToSlices(uint64_t size, Buffer::RawSlice* slices,
680
                           uint64_t num_slice) const override;
681
  void drain(uint64_t size) override;
682
  RawSliceVector getRawSlices(absl::optional<uint64_t> max_slices = absl::nullopt) const override;
683
  RawSlice frontSlice() const override;
684
  SliceDataPtr extractMutableFrontSlice() override;
685
  uint64_t length() const override;
686
  void* linearize(uint32_t size) override;
687
  void move(Instance& rhs) override;
688
  void move(Instance& rhs, uint64_t length) override;
689
  void move(Instance& rhs, uint64_t length, bool reset_drain_trackers_and_accounting) override;
690
  Reservation reserveForRead() override;
691
  ReservationSingleSlice reserveSingleSlice(uint64_t length, bool separate_slice = false) override;
692
  ssize_t search(const void* data, uint64_t size, size_t start, size_t length) const override;
693
  bool startsWith(absl::string_view data) const override;
694
  std::string toString() const override;
695
696
  // LibEventInstance
697
  void postProcess() override;
698
699
  /**
700
   * Create a new slice at the end of the buffer, and copy the supplied content into it.
701
   * @param data start of the content to copy.
702
   *
703
   */
704
  virtual void appendSliceForTest(const void* data, uint64_t size);
705
706
  /**
707
   * Create a new slice at the end of the buffer, and copy the supplied string into it.
708
   * @param data the string to append to the buffer.
709
   */
710
  virtual void appendSliceForTest(absl::string_view data);
711
712
  /**
713
   * @return the BufferMemoryAccount bound to this buffer, if any.
714
   */
715
  BufferMemoryAccountSharedPtr getAccountForTest();
716
717
  // Does not implement watermarking.
718
  // TODO(antoniovicente) Implement watermarks by merging the OwnedImpl and WatermarkBuffer
719
  // implementations. Also, make high-watermark config a constructor argument.
720
0
  void setWatermarks(uint32_t, uint32_t) override { ASSERT(false, "watermarks not implemented."); }
721
0
  uint32_t highWatermark() const override { return 0; }
722
0
  bool highWatermarkTriggered() const override { return false; }
723
724
  /**
725
   * Describe the in-memory representation of the slices in the buffer. For use
726
   * in tests that want to make assertions about the specific arrangement of
727
   * bytes in the buffer.
728
   */
729
  std::vector<Slice::SliceRepresentation> describeSlicesForTest() const;
730
731
  /**
732
   * Create a reservation for reading with a non-default length. Used in benchmark tests.
733
   */
734
0
  Reservation reserveForReadWithLengthForTest(uint64_t length) {
735
0
    return reserveWithMaxLength(length);
736
0
  }
737
738
  size_t addFragments(absl::Span<const absl::string_view> fragments) override;
739
740
protected:
741
  static constexpr uint64_t default_read_reservation_size_ =
742
      Reservation::MAX_SLICES_ * Slice::default_slice_size_;
743
744
  /**
745
   * Create a reservation with a maximum length.
746
   */
747
  Reservation reserveWithMaxLength(uint64_t max_length);
748
749
  void commit(uint64_t length, absl::Span<RawSlice> slices,
750
              ReservationSlicesOwnerPtr slices_owner) override;
751
752
private:
753
  /**
754
   * @param rhs another buffer
755
   * @return whether the rhs buffer is also an instance of OwnedImpl (or a subclass) that
756
   *         uses the same internal implementation as this buffer.
757
   */
758
  bool isSameBufferImpl(const Instance& rhs) const;
759
760
  void addImpl(const void* data, uint64_t size);
761
  void drainImpl(uint64_t size);
762
763
  /**
764
   * Moves contents of the `other_slice` by either taking its ownership or coalescing it
765
   * into an existing slice.
766
   * NOTE: the caller is responsible for draining the buffer that contains the `other_slice`.
767
   */
768
  void coalesceOrAddSlice(Slice&& other_slice);
769
770
  /** Ring buffer of slices. */
771
  SliceDeque slices_;
772
773
  /** Sum of the dataSize of all slices. */
774
  OverflowDetectingUInt64 length_;
775
776
  BufferMemoryAccountSharedPtr account_;
777
778
  struct OwnedImplReservationSlicesOwner : public ReservationSlicesOwner {
779
    virtual absl::Span<Slice::SizedStorage> ownedStorages() PURE;
780
  };
781
782
  struct OwnedImplReservationSlicesOwnerMultiple : public OwnedImplReservationSlicesOwner {
783
  public:
784
    static constexpr uint32_t free_list_max_ = Buffer::Reservation::MAX_SLICES_;
785
786
56.1k
    OwnedImplReservationSlicesOwnerMultiple() : free_list_ref_(free_list_) {}
787
56.1k
    ~OwnedImplReservationSlicesOwnerMultiple() override {
788
495k
      for (auto r = owned_storages_.rbegin(); r != owned_storages_.rend(); r++) {
789
439k
        if (r->mem_ != nullptr) {
790
385k
          ASSERT(r->len_ == Slice::default_slice_size_);
791
385k
          if (free_list_ref_.size() < free_list_max_) {
792
385k
            free_list_ref_.push_back(std::move(r->mem_));
793
385k
          }
794
385k
        }
795
439k
      }
796
56.1k
    }
797
798
417k
    Slice::SizedStorage newStorage() {
799
417k
      ASSERT(Slice::sliceSize(Slice::default_slice_size_) == Slice::default_slice_size_);
800
801
417k
      Slice::SizedStorage storage{nullptr, Slice::default_slice_size_};
802
417k
      if (!free_list_ref_.empty()) {
803
339k
        storage.mem_ = std::move(free_list_ref_.back());
804
339k
        free_list_ref_.pop_back();
805
339k
      } else {
806
77.6k
        storage.mem_.reset(new uint8_t[Slice::default_slice_size_]);
807
77.6k
      }
808
809
417k
      return storage;
810
417k
    }
811
812
27.2k
    absl::Span<Slice::SizedStorage> ownedStorages() override {
813
27.2k
      return absl::MakeSpan(owned_storages_);
814
27.2k
    }
815
816
    absl::InlinedVector<Slice::SizedStorage, Buffer::Reservation::MAX_SLICES_> owned_storages_;
817
818
  private:
819
    // Thread local resolving introduces additional overhead. Initialize this reference once when
820
    // constructing the owner to reduce thread local resolving to improve performance.
821
    absl::InlinedVector<Slice::StoragePtr, free_list_max_>& free_list_ref_;
822
823
    // Simple thread local cache to reduce unnecessary memory allocation and release. This cache
824
    // is currently only used for multiple slices reservation because of the additional overhead
825
    // that thread local resolving would introduce.
826
    static thread_local absl::InlinedVector<Slice::StoragePtr, free_list_max_> free_list_;
827
  };
828
829
  struct OwnedImplReservationSlicesOwnerSingle : public OwnedImplReservationSlicesOwner {
830
24.7k
    absl::Span<Slice::SizedStorage> ownedStorages() override {
831
24.7k
      return absl::MakeSpan(&owned_storage_, 1);
832
24.7k
    }
833
834
    Slice::SizedStorage owned_storage_;
835
  };
836
};
837
838
using BufferFragmentPtr = std::unique_ptr<BufferFragment>;
839
840
/**
841
 * An implementation of BufferFragment where a releasor callback is called when the data is
842
 * no longer needed. Copies data into internal buffer.
843
 */
844
class OwnedBufferFragmentImpl final : public BufferFragment, public InlineStorage {
845
public:
846
  using Releasor = std::function<void(const OwnedBufferFragmentImpl*)>;
847
848
  /**
849
   * Copies the data into internal buffer. The releasor is called when the data has been
850
   * fully drained or the buffer that contains this fragment is destroyed.
851
   * @param data external data to reference
852
   * @param releasor a callback function to be called when data is no longer needed.
853
   */
854
855
6.25k
  static BufferFragmentPtr create(absl::string_view data, const Releasor& releasor) {
856
6.25k
    return BufferFragmentPtr(new (sizeof(OwnedBufferFragmentImpl) + data.size())
857
6.25k
                                 OwnedBufferFragmentImpl(data, releasor));
858
6.25k
  }
859
860
  // Buffer::BufferFragment
861
6.25k
  const void* data() const override { return data_; }
862
18.7k
  size_t size() const override { return size_; }
863
6.25k
  void done() override { releasor_(this); }
864
865
private:
866
  OwnedBufferFragmentImpl(absl::string_view data, const Releasor& releasor)
867
6.25k
      : releasor_(releasor), size_(data.size()) {
868
6.25k
    ASSERT(releasor != nullptr);
869
6.25k
    memcpy(data_, data.data(), data.size()); // NOLINT(safe-memcpy)
870
6.25k
  }
871
872
  const Releasor releasor_;
873
  const size_t size_;
874
  uint8_t data_[];
875
};
876
877
using OwnedBufferFragmentImplPtr = std::unique_ptr<OwnedBufferFragmentImpl>;
878
879
} // namespace Buffer
880
} // namespace Envoy