Coverage Report

Created: 2024-09-19 09:45

/proc/self/cwd/source/common/buffer/buffer_impl.cc
Line
Count
Source (jump to first uncovered line)
1
#include "source/common/buffer/buffer_impl.h"
2
3
#include <cstdint>
4
#include <memory>
5
#include <string>
6
7
#include "source/common/common/assert.h"
8
9
#include "absl/container/fixed_array.h"
10
#include "event2/buffer.h"
11
12
namespace Envoy {
13
namespace Buffer {
14
namespace {
15
// This size has been determined to be optimal from running the
16
// //test/integration:http_benchmark benchmark tests.
17
// TODO(yanavlasov): This may not be optimal for all hardware configurations or traffic patterns and
18
// may need to be configurable in the future.
19
constexpr uint64_t CopyThreshold = 512;
20
} // namespace
21
22
thread_local absl::InlinedVector<Slice::StoragePtr,
23
                                 OwnedImpl::OwnedImplReservationSlicesOwnerMultiple::free_list_max_>
24
    OwnedImpl::OwnedImplReservationSlicesOwnerMultiple::free_list_;
25
26
8.83M
void OwnedImpl::addImpl(const void* data, uint64_t size) {
27
8.83M
  const char* src = static_cast<const char*>(data);
28
8.83M
  bool new_slice_needed = slices_.empty();
29
16.9M
  while (size != 0) {
30
8.13M
    if (new_slice_needed) {
31
5.86M
      slices_.emplace_back(Slice(size, account_));
32
5.86M
    }
33
8.13M
    uint64_t copy_size = slices_.back().append(src, size);
34
8.13M
    src += copy_size;
35
8.13M
    size -= copy_size;
36
8.13M
    length_ += copy_size;
37
8.13M
    new_slice_needed = true;
38
8.13M
  }
39
8.83M
}
40
41
861k
void OwnedImpl::addDrainTracker(std::function<void()> drain_tracker) {
42
861k
  ASSERT(!slices_.empty());
43
861k
  slices_.back().addDrainTracker(std::move(drain_tracker));
44
861k
}
45
46
6.77k
void OwnedImpl::bindAccount(BufferMemoryAccountSharedPtr account) {
47
6.77k
  ASSERT(slices_.empty());
48
6.77k
  account_ = std::move(account);
49
6.77k
}
50
51
0
BufferMemoryAccountSharedPtr OwnedImpl::getAccountForTest() { return account_; }
52
53
7.52M
void OwnedImpl::add(const void* data, uint64_t size) { addImpl(data, size); }
54
55
7.28k
void OwnedImpl::addBufferFragment(BufferFragment& fragment) {
56
7.28k
  length_ += fragment.size();
57
7.28k
  slices_.emplace_back(fragment);
58
7.28k
}
59
60
3.97M
void OwnedImpl::add(absl::string_view data) { add(data.data(), data.size()); }
61
62
40.1k
void OwnedImpl::add(const Instance& data) {
63
40.1k
  ASSERT(&data != this);
64
40.1k
  for (const RawSlice& slice : data.getRawSlices()) {
65
14.8k
    add(slice.mem_, slice.len_);
66
14.8k
  }
67
40.1k
}
68
69
3.17k
void OwnedImpl::prepend(absl::string_view data) {
70
3.17k
  uint64_t size = data.size();
71
3.17k
  bool new_slice_needed = slices_.empty();
72
8.26k
  while (size != 0) {
73
5.09k
    if (new_slice_needed) {
74
2.53k
      slices_.emplace_front(Slice(size, account_));
75
2.53k
    }
76
5.09k
    uint64_t copy_size = slices_.front().prepend(data.data(), size);
77
5.09k
    size -= copy_size;
78
5.09k
    length_ += copy_size;
79
5.09k
    new_slice_needed = true;
80
5.09k
  }
81
3.17k
}
82
83
7.19k
void OwnedImpl::prepend(Instance& data) {
84
7.19k
  ASSERT(&data != this);
85
7.19k
  OwnedImpl& other = static_cast<OwnedImpl&>(data);
86
16.2k
  while (!other.slices_.empty()) {
87
9.07k
    uint64_t slice_size = other.slices_.back().dataSize();
88
9.07k
    length_ += slice_size;
89
9.07k
    slices_.emplace_front(std::move(other.slices_.back()));
90
9.07k
    slices_.front().maybeChargeAccount(account_);
91
9.07k
    other.slices_.pop_back();
92
9.07k
    other.length_ -= slice_size;
93
9.07k
  }
94
7.19k
  other.postProcess();
95
7.19k
}
96
97
255k
void OwnedImpl::copyOut(size_t start, uint64_t size, void* data) const {
98
255k
  uint64_t bytes_to_skip = start;
99
255k
  uint8_t* dest = static_cast<uint8_t*>(data);
100
305k
  for (const auto& slice : slices_) {
101
305k
    if (size == 0) {
102
42.9k
      break;
103
42.9k
    }
104
262k
    uint64_t data_size = slice.dataSize();
105
262k
    if (data_size <= bytes_to_skip) {
106
      // The offset where the caller wants to start copying is after the end of this slice,
107
      // so just skip over this slice completely.
108
995
      bytes_to_skip -= data_size;
109
995
      continue;
110
995
    }
111
261k
    uint64_t copy_size = std::min(size, data_size - bytes_to_skip);
112
261k
    memcpy(dest, slice.data() + bytes_to_skip, copy_size); // NOLINT(safe-memcpy)
113
261k
    size -= copy_size;
114
261k
    dest += copy_size;
115
    // Now that we've started copying, there are no bytes left to skip over. If there
116
    // is any more data to be copied, the next iteration can start copying from the very
117
    // beginning of the next slice.
118
261k
    bytes_to_skip = 0;
119
261k
  }
120
255k
  ASSERT(size == 0);
121
255k
}
122
123
uint64_t OwnedImpl::copyOutToSlices(uint64_t size, Buffer::RawSlice* dest_slices,
124
1.52k
                                    uint64_t num_slice) const {
125
1.52k
  uint64_t total_length_to_read = std::min(size, this->length());
126
1.52k
  uint64_t num_bytes_read = 0;
127
1.52k
  uint64_t num_dest_slices_read = 0;
128
1.52k
  uint64_t num_src_slices_read = 0;
129
1.52k
  uint64_t dest_slice_offset = 0;
130
1.52k
  uint64_t src_slice_offset = 0;
131
3.14k
  while (num_dest_slices_read < num_slice && num_bytes_read < total_length_to_read) {
132
1.62k
    const Slice& src_slice = slices_[num_src_slices_read];
133
1.62k
    const Buffer::RawSlice& dest_slice = dest_slices[num_dest_slices_read];
134
1.62k
    uint64_t left_to_read = total_length_to_read - num_bytes_read;
135
1.62k
    uint64_t left_data_size_in_dst_slice = dest_slice.len_ - dest_slice_offset;
136
1.62k
    uint64_t left_data_size_in_src_slice = src_slice.dataSize() - src_slice_offset;
137
    // The length to copy should be size of smallest in the source slice available size and
138
    // the dest slice available size.
139
1.62k
    uint64_t length_to_copy =
140
1.62k
        std::min(left_data_size_in_src_slice, std::min(left_data_size_in_dst_slice, left_to_read));
141
1.62k
    memcpy(static_cast<uint8_t*>(dest_slice.mem_) + dest_slice_offset, // NOLINT(safe-memcpy)
142
1.62k
           src_slice.data() + src_slice_offset, length_to_copy);
143
1.62k
    src_slice_offset = src_slice_offset + length_to_copy;
144
1.62k
    dest_slice_offset = dest_slice_offset + length_to_copy;
145
1.62k
    if (src_slice_offset == src_slice.dataSize()) {
146
1.22k
      num_src_slices_read++;
147
1.22k
      src_slice_offset = 0;
148
1.22k
    }
149
1.62k
    if (dest_slice_offset == dest_slice.len_) {
150
395
      num_dest_slices_read++;
151
395
      dest_slice_offset = 0;
152
395
    }
153
1.62k
    ASSERT(src_slice_offset <= src_slice.dataSize());
154
1.62k
    ASSERT(dest_slice_offset <= dest_slice.len_);
155
1.62k
    num_bytes_read += length_to_copy;
156
1.62k
  }
157
1.52k
  return num_bytes_read;
158
1.52k
}
159
160
3.74M
void OwnedImpl::drain(uint64_t size) { drainImpl(size); }
161
162
3.74M
void OwnedImpl::drainImpl(uint64_t size) {
163
7.79M
  while (size != 0) {
164
4.04M
    if (slices_.empty()) {
165
0
      break;
166
0
    }
167
4.04M
    uint64_t slice_size = slices_.front().dataSize();
168
4.04M
    if (slice_size <= size) {
169
3.86M
      slices_.pop_front();
170
3.86M
      length_ -= slice_size;
171
3.86M
      size -= slice_size;
172
3.86M
    } else {
173
185k
      slices_.front().drain(size);
174
185k
      length_ -= size;
175
185k
      size = 0;
176
185k
    }
177
4.04M
  }
178
  // Make sure to drain any zero byte fragments that might have been added as
179
  // sentinels for flushed data.
180
3.74M
  while (!slices_.empty() && slices_.front().dataSize() == 0) {
181
3.19k
    slices_.pop_front();
182
3.19k
  }
183
3.74M
}
184
185
3.67M
RawSliceVector OwnedImpl::getRawSlices(absl::optional<uint64_t> max_slices) const {
186
3.67M
  uint64_t max_out = slices_.size();
187
3.67M
  if (max_slices.has_value()) {
188
20.8k
    max_out = std::min(max_out, max_slices.value());
189
20.8k
  }
190
191
3.67M
  RawSliceVector raw_slices;
192
3.67M
  raw_slices.reserve(max_out);
193
4.05M
  for (const auto& slice : slices_) {
194
4.05M
    if (raw_slices.size() >= max_out) {
195
83
      break;
196
83
    }
197
198
4.05M
    if (slice.dataSize() == 0) {
199
42.0k
      continue;
200
42.0k
    }
201
202
    // Temporary cast to fix 32-bit Envoy mobile builds, where sizeof(uint64_t) != sizeof(size_t).
203
    // dataSize represents the size of a buffer so size_t should always be large enough to hold its
204
    // size regardless of architecture. Buffer slices should in practice be relatively small, but
205
    // there is currently no max size validation.
206
    // TODO(antoniovicente) Set realistic limits on the max size of BufferSlice and consider use of
207
    // size_t instead of uint64_t in the Slice interface.
208
4.01M
    raw_slices.emplace_back(
209
4.01M
        RawSlice{const_cast<uint8_t*>(slice.data()), static_cast<size_t>(slice.dataSize())});
210
4.01M
  }
211
3.67M
  return raw_slices;
212
3.67M
}
213
214
123k
RawSlice OwnedImpl::frontSlice() const {
215
  // Ignore zero-size slices and return the first slice with data.
216
123k
  for (const auto& slice : slices_) {
217
119k
    if (slice.dataSize() > 0) {
218
119k
      return RawSlice{const_cast<uint8_t*>(slice.data()),
219
119k
                      static_cast<absl::Span<uint8_t>::size_type>(slice.dataSize())};
220
119k
    }
221
119k
  }
222
223
3.56k
  return {nullptr, 0};
224
123k
}
225
226
0
SliceDataPtr OwnedImpl::extractMutableFrontSlice() {
227
0
  RELEASE_ASSERT(length_ > 0, "Extract called on empty buffer");
228
  // Remove zero byte fragments from the front of the queue to ensure
229
  // that the extracted slice has data.
230
0
  while (!slices_.empty() && slices_.front().dataSize() == 0) {
231
0
    slices_.pop_front();
232
0
  }
233
0
  ASSERT(!slices_.empty());
234
0
  auto slice = std::move(slices_.front());
235
0
  auto size = slice.dataSize();
236
0
  length_ -= size;
237
0
  slices_.pop_front();
238
0
  if (!slice.isMutable()) {
239
    // Create a mutable copy of the immutable slice data.
240
0
    Slice mutable_slice{size, nullptr};
241
0
    auto copy_size = mutable_slice.append(slice.data(), size);
242
0
    ASSERT(copy_size == size);
243
    // Drain trackers for the immutable slice will be called as part of the slice destructor.
244
0
    return std::make_unique<SliceDataImpl>(std::move(mutable_slice));
245
0
  } else {
246
    // Make sure drain trackers are called before ownership of the slice is transferred from
247
    // the buffer to the caller.
248
0
    slice.callAndClearDrainTrackersAndCharges();
249
0
    return std::make_unique<SliceDataImpl>(std::move(slice));
250
0
  }
251
0
}
252
253
19.6M
uint64_t OwnedImpl::length() const {
254
19.6M
#ifndef NDEBUG
255
  // When running in debug mode, verify that the precomputed length matches the sum
256
  // of the lengths of the slices.
257
19.6M
  uint64_t length = 0;
258
324M
  for (const auto& slice : slices_) {
259
324M
    length += slice.dataSize();
260
324M
  }
261
19.6M
  ASSERT(length == length_);
262
19.6M
#endif
263
264
19.6M
  return length_;
265
19.6M
}
266
267
85.9k
void* OwnedImpl::linearize(uint32_t size) {
268
85.9k
  RELEASE_ASSERT(size <= length(), "Linearize size exceeds buffer size");
269
85.9k
  if (slices_.empty()) {
270
481
    return nullptr;
271
481
  }
272
85.4k
  if (slices_[0].dataSize() < size) {
273
2.83k
    Slice new_slice{size, account_};
274
2.83k
    Slice::Reservation reservation = new_slice.reserve(size);
275
2.83k
    ASSERT(reservation.mem_ != nullptr);
276
2.83k
    ASSERT(reservation.len_ == size);
277
2.83k
    copyOut(0, size, reservation.mem_);
278
2.83k
    new_slice.commit(reservation);
279
280
    // Replace the first 'size' bytes in the buffer with the new slice. Since new_slice re-adds the
281
    // drained bytes, avoid use of the overridable 'drain' method to avoid incorrectly checking if
282
    // we dipped below low-watermark.
283
2.83k
    drainImpl(size);
284
2.83k
    slices_.emplace_front(std::move(new_slice));
285
2.83k
    length_ += size;
286
2.83k
  }
287
85.4k
  return slices_.front().data();
288
85.4k
}
289
290
5.39M
void OwnedImpl::coalesceOrAddSlice(Slice&& other_slice) {
291
5.39M
  const uint64_t slice_size = other_slice.dataSize();
292
  // The `other_slice` content can be coalesced into the existing slice IFF:
293
  // 1. The `other_slice` can be coalesced. Immutable slices can not be safely coalesced because
294
  // their destructors can be arbitrary global side effects.
295
  // 2. There are existing slices;
296
  // 3. The `other_slice` content length is under the CopyThreshold;
297
  // 4. There is enough unused space in the existing slice to accommodate the `other_slice` content.
298
5.39M
  if (other_slice.canCoalesce() && !slices_.empty() && slice_size < CopyThreshold &&
299
5.39M
      slices_.back().reservableSize() >= slice_size) {
300
    // Copy content of the `other_slice`. The `move` methods which call this method effectively
301
    // drain the source buffer.
302
1.29M
    addImpl(other_slice.data(), slice_size);
303
1.29M
    other_slice.transferDrainTrackersTo(slices_.back());
304
4.09M
  } else {
305
    // Take ownership of the slice.
306
4.09M
    other_slice.maybeChargeAccount(account_);
307
4.09M
    slices_.emplace_back(std::move(other_slice));
308
4.09M
    length_ += slice_size;
309
4.09M
  }
310
5.39M
}
311
312
3.56M
void OwnedImpl::move(Instance& rhs) {
313
3.56M
  ASSERT(&rhs != this);
314
  // We do the static cast here because in practice we only have one buffer implementation right
315
  // now and this is safe. This is a reasonable compromise in a high performance path where we
316
  // want to maintain an abstraction.
317
3.56M
  OwnedImpl& other = static_cast<OwnedImpl&>(rhs);
318
7.30M
  while (!other.slices_.empty()) {
319
3.74M
    const uint64_t slice_size = other.slices_.front().dataSize();
320
3.74M
    coalesceOrAddSlice(std::move(other.slices_.front()));
321
3.74M
    other.length_ -= slice_size;
322
3.74M
    other.slices_.pop_front();
323
3.74M
  }
324
3.56M
  other.postProcess();
325
3.56M
}
326
327
1.86M
void OwnedImpl::move(Instance& rhs, uint64_t length) {
328
1.86M
  move(rhs, length, /*reset_drain_trackers_and_accounting=*/false);
329
1.86M
}
330
331
1.86M
void OwnedImpl::move(Instance& rhs, uint64_t length, bool reset_drain_trackers_and_accounting) {
332
1.86M
  ASSERT(&rhs != this);
333
  // See move() above for why we do the static cast.
334
1.86M
  OwnedImpl& other = static_cast<OwnedImpl&>(rhs);
335
3.73M
  while (length != 0 && !other.slices_.empty()) {
336
1.86M
    const uint64_t slice_size = other.slices_.front().dataSize();
337
1.86M
    const uint64_t copy_size = std::min(slice_size, length);
338
1.86M
    if (copy_size == 0) {
339
106
      other.slices_.pop_front();
340
1.86M
    } else if (copy_size < slice_size) {
341
      // TODO(brian-pane) add reference-counting to allow slices to share their storage
342
      // and eliminate the copy for this partial-slice case?
343
213k
      add(other.slices_.front().data(), copy_size);
344
213k
      other.slices_.front().drain(copy_size);
345
213k
      other.length_ -= copy_size;
346
1.65M
    } else {
347
1.65M
      if (reset_drain_trackers_and_accounting) {
348
        // The other slice is owned by a user-space IO handle and its drain trackers may refer to a
349
        // connection that can die (and be freed) at any time. Call and clear the drain trackers to
350
        // avoid potential use-after-free.
351
0
        other.slices_.front().callAndClearDrainTrackersAndCharges();
352
0
      }
353
1.65M
      coalesceOrAddSlice(std::move(other.slices_.front()));
354
1.65M
      other.slices_.pop_front();
355
1.65M
      other.length_ -= slice_size;
356
1.65M
    }
357
1.86M
    length -= copy_size;
358
1.86M
  }
359
1.86M
  other.postProcess();
360
1.86M
}
361
362
8.17k
Reservation OwnedImpl::reserveForRead() {
363
8.17k
  return reserveWithMaxLength(default_read_reservation_size_);
364
8.17k
}
365
366
34.5k
Reservation OwnedImpl::reserveWithMaxLength(uint64_t max_length) {
367
34.5k
  Reservation reservation = Reservation::bufferImplUseOnlyConstruct(*this);
368
34.5k
  if (max_length == 0) {
369
0
    return reservation;
370
0
  }
371
372
  // Remove any empty slices at the end.
373
34.6k
  while (!slices_.empty() && slices_.back().dataSize() == 0) {
374
78
    slices_.pop_back();
375
78
  }
376
377
34.5k
  uint64_t bytes_remaining = max_length;
378
34.5k
  uint64_t reserved = 0;
379
34.5k
  auto& reservation_slices = reservation.bufferImplUseOnlySlices();
380
34.5k
  auto slices_owner = std::make_unique<OwnedImplReservationSlicesOwnerMultiple>();
381
382
  // Check whether there are any empty slices with reservable space at the end of the buffer.
383
34.5k
  uint64_t reservable_size = slices_.empty() ? 0 : slices_.back().reservableSize();
384
34.5k
  if (reservable_size >= max_length || reservable_size >= (Slice::default_slice_size_ / 8)) {
385
12.9k
    uint64_t reserve_size = std::min(reservable_size, bytes_remaining);
386
12.9k
    RawSlice slice = slices_.back().reserve(reserve_size);
387
12.9k
    reservation_slices.push_back(slice);
388
12.9k
    slices_owner->owned_storages_.push_back({});
389
12.9k
    bytes_remaining -= slice.len_;
390
12.9k
    reserved += slice.len_;
391
12.9k
  }
392
393
291k
  while (bytes_remaining != 0 && reservation_slices.size() < reservation.MAX_SLICES_) {
394
259k
    constexpr uint64_t size = Slice::default_slice_size_;
395
396
    // If the next slice would go over the desired size, and the amount already reserved is already
397
    // at least one full slice in size, stop allocating slices. This prevents returning a
398
    // reservation larger than requested, which could go above the watermark limits for a watermark
399
    // buffer, unless the size would be very small (less than 1 full slice).
400
259k
    if (size > bytes_remaining && reserved >= size) {
401
2.75k
      break;
402
2.75k
    }
403
404
256k
    Slice::SizedStorage storage = slices_owner->newStorage();
405
256k
    ASSERT(storage.len_ == size);
406
256k
    const RawSlice raw_slice{storage.mem_.get(), size};
407
256k
    slices_owner->owned_storages_.emplace_back(std::move(storage));
408
256k
    reservation_slices.push_back(raw_slice);
409
256k
    bytes_remaining -= std::min<uint64_t>(raw_slice.len_, bytes_remaining);
410
256k
    reserved += raw_slice.len_;
411
256k
  }
412
413
34.5k
  ASSERT(reservation_slices.size() == slices_owner->owned_storages_.size());
414
34.5k
  reservation.bufferImplUseOnlySlicesOwner() = std::move(slices_owner);
415
34.5k
  reservation.bufferImplUseOnlySetLength(reserved);
416
417
34.5k
  return reservation;
418
34.5k
}
419
420
30.5k
ReservationSingleSlice OwnedImpl::reserveSingleSlice(uint64_t length, bool separate_slice) {
421
30.5k
  ReservationSingleSlice reservation = ReservationSingleSlice::bufferImplUseOnlyConstruct(*this);
422
30.5k
  if (length == 0) {
423
0
    return reservation;
424
0
  }
425
426
  // Remove any empty slices at the end.
427
30.7k
  while (!slices_.empty() && slices_.back().dataSize() == 0) {
428
132
    slices_.pop_back();
429
132
  }
430
431
30.5k
  auto& reservation_slice = reservation.bufferImplUseOnlySlice();
432
30.5k
  auto slice_owner = std::make_unique<OwnedImplReservationSlicesOwnerSingle>();
433
434
  // Check whether there are any empty slices with reservable space at the end of the buffer.
435
30.5k
  uint64_t reservable_size =
436
30.5k
      (separate_slice || slices_.empty()) ? 0 : slices_.back().reservableSize();
437
30.5k
  if (reservable_size >= length) {
438
362
    reservation_slice = slices_.back().reserve(length);
439
30.2k
  } else {
440
30.2k
    slice_owner->owned_storage_ = Slice::newStorage(length);
441
30.2k
    ASSERT(slice_owner->owned_storage_.len_ >= length);
442
30.2k
    reservation_slice = {slice_owner->owned_storage_.mem_.get(), static_cast<size_t>(length)};
443
30.2k
  }
444
445
30.5k
  reservation.bufferImplUseOnlySliceOwner() = std::move(slice_owner);
446
447
30.5k
  return reservation;
448
30.5k
}
449
450
void OwnedImpl::commit(uint64_t length, absl::Span<RawSlice> slices,
451
65.1k
                       ReservationSlicesOwnerPtr slices_owner_base) {
452
65.1k
  if (length == 0) {
453
16.7k
    return;
454
16.7k
  }
455
456
48.4k
  ASSERT(dynamic_cast<OwnedImplReservationSlicesOwner*>(slices_owner_base.get()) != nullptr);
457
48.4k
  std::unique_ptr<OwnedImplReservationSlicesOwner> slices_owner(
458
48.4k
      static_cast<OwnedImplReservationSlicesOwner*>(slices_owner_base.release()));
459
460
48.4k
  absl::Span<Slice::SizedStorage> owned_storages = slices_owner->ownedStorages();
461
48.4k
  ASSERT(slices.size() == owned_storages.size());
462
463
48.4k
  uint64_t bytes_remaining = length;
464
98.5k
  for (uint32_t i = 0; i < slices.size() && bytes_remaining > 0; i++) {
465
50.1k
    slices[i].len_ = std::min<uint64_t>(slices[i].len_, bytes_remaining);
466
467
50.1k
    if (auto& owned_storage = owned_storages[i]; owned_storage.mem_ != nullptr) {
468
48.7k
      ASSERT(slices[i].len_ <= owned_storage.len_);
469
48.7k
      slices_.emplace_back(Slice(std::move(owned_storage), slices[i].len_, account_));
470
48.7k
    } else {
471
1.43k
      bool success = slices_.back().commit<false>(slices[i]);
472
1.43k
      ASSERT(success);
473
1.43k
    }
474
475
50.1k
    length_ += slices[i].len_;
476
50.1k
    bytes_remaining -= slices[i].len_;
477
50.1k
  }
478
48.4k
}
479
480
42.5k
ssize_t OwnedImpl::search(const void* data, uint64_t size, size_t start, size_t length) const {
481
  // This implementation uses the same search algorithm as evbuffer_search(), a naive
482
  // scan that requires O(M*N) comparisons in the worst case.
483
  // TODO(brian-pane): replace this with a more efficient search if it shows up
484
  // prominently in CPU profiling.
485
42.5k
  if (size == 0) {
486
503
    return (start <= length_) ? start : -1;
487
503
  }
488
489
  // length equal to zero means that entire buffer must be searched.
490
  // Adjust the length to buffer length taking the staring index into account.
491
42.0k
  size_t left_to_search = length;
492
42.0k
  if (0 == length) {
493
30.3k
    left_to_search = length_ - start;
494
30.3k
  }
495
42.0k
  ssize_t offset = 0;
496
42.0k
  const uint8_t* needle = static_cast<const uint8_t*>(data);
497
56.4k
  for (size_t slice_index = 0; slice_index < slices_.size() && (left_to_search > 0);
498
43.8k
       slice_index++) {
499
43.8k
    const auto& slice = slices_[slice_index];
500
43.8k
    uint64_t slice_size = slice.dataSize();
501
43.8k
    if (slice_size <= start) {
502
267
      start -= slice_size;
503
267
      offset += slice_size;
504
267
      continue;
505
267
    }
506
43.5k
    const uint8_t* slice_start = slice.data();
507
43.5k
    const uint8_t* haystack = slice_start;
508
43.5k
    const uint8_t* haystack_end = haystack + slice_size;
509
43.5k
    haystack += start;
510
809k
    while (haystack < haystack_end) {
511
808k
      const size_t slice_search_limit =
512
808k
          std::min(static_cast<size_t>(haystack_end - haystack), left_to_search);
513
      // Search within this slice for the first byte of the needle.
514
808k
      const uint8_t* first_byte_match =
515
808k
          static_cast<const uint8_t*>(memchr(haystack, needle[0], slice_search_limit));
516
808k
      if (first_byte_match == nullptr) {
517
13.2k
        left_to_search -= slice_search_limit;
518
13.2k
        break;
519
13.2k
      }
520
      // After finding a match for the first byte of the needle, check whether the following
521
      // bytes in the buffer match the remainder of the needle. Note that the match can span
522
      // two or more slices.
523
795k
      left_to_search -= static_cast<size_t>(first_byte_match - haystack + 1);
524
      // Save the current number of bytes left to search.
525
      // If the pattern is not found, the search will resume from the next byte
526
      // and left_to_search value must be restored.
527
795k
      const size_t saved_left_to_search = left_to_search;
528
795k
      size_t i = 1;
529
795k
      size_t match_index = slice_index;
530
795k
      const uint8_t* match_next = first_byte_match + 1;
531
795k
      const uint8_t* match_end = haystack_end;
532
447M
      while ((i < size) && (0 < left_to_search)) {
533
447M
        if (match_next >= match_end) {
534
          // We've hit the end of this slice, so continue checking against the next slice.
535
2.04k
          match_index++;
536
2.04k
          if (match_index == slices_.size()) {
537
            // We've hit the end of the entire buffer.
538
544
            break;
539
544
          }
540
1.50k
          const auto& match_slice = slices_[match_index];
541
1.50k
          match_next = match_slice.data();
542
1.50k
          match_end = match_next + match_slice.dataSize();
543
1.50k
          continue;
544
2.04k
        }
545
447M
        left_to_search--;
546
447M
        if (*match_next++ != needle[i]) {
547
697k
          break;
548
697k
        }
549
446M
        i++;
550
446M
      }
551
795k
      if (i == size) {
552
        // Successful match of the entire needle.
553
29.4k
        return offset + (first_byte_match - slice_start);
554
29.4k
      }
555
      // If this wasn't a successful match, start scanning again at the next byte.
556
765k
      haystack = first_byte_match + 1;
557
765k
      left_to_search = saved_left_to_search;
558
765k
    }
559
14.1k
    start = 0;
560
14.1k
    offset += slice_size;
561
14.1k
  }
562
12.5k
  return -1;
563
42.0k
}
564
565
2.07k
bool OwnedImpl::startsWith(absl::string_view data) const {
566
2.07k
  if (length() < data.length()) {
567
    // Buffer is too short to contain data.
568
426
    return false;
569
426
  }
570
571
1.64k
  if (data.length() == 0) {
572
211
    return true;
573
211
  }
574
575
1.43k
  const uint8_t* prefix = reinterpret_cast<const uint8_t*>(data.data());
576
1.43k
  size_t size = data.length();
577
1.72k
  for (const auto& slice : slices_) {
578
1.72k
    uint64_t slice_size = slice.dataSize();
579
1.72k
    const uint8_t* slice_start = slice.data();
580
581
1.72k
    if (slice_size >= size) {
582
      // The remaining size bytes of data are in this slice.
583
1.33k
      return memcmp(prefix, slice_start, size) == 0;
584
1.33k
    }
585
586
    // Slice is smaller than data, see if the prefix matches.
587
394
    if (memcmp(prefix, slice_start, slice_size) != 0) {
588
107
      return false;
589
107
    }
590
591
    // Prefix matched. Continue looking at the next slice.
592
287
    prefix += slice_size;
593
287
    size -= slice_size;
594
287
  }
595
596
  // Less data in slices than length() reported.
597
0
  IS_ENVOY_BUG("unexpected data in slices");
598
0
  return false;
599
0
}
600
601
7.59M
OwnedImpl::OwnedImpl() = default;
602
603
3.25M
OwnedImpl::OwnedImpl(absl::string_view data) : OwnedImpl() { add(data); }
604
605
172
OwnedImpl::OwnedImpl(const Instance& data) : OwnedImpl() { add(data); }
606
607
227k
OwnedImpl::OwnedImpl(const void* data, uint64_t size) : OwnedImpl() { add(data, size); }
608
609
0
OwnedImpl::OwnedImpl(BufferMemoryAccountSharedPtr account) : account_(std::move(account)) {}
610
611
2.42M
std::string OwnedImpl::toString() const {
612
2.42M
  std::string output;
613
2.42M
  output.reserve(length());
614
2.60M
  for (const RawSlice& slice : getRawSlices()) {
615
2.60M
    output.append(static_cast<const char*>(slice.mem_), slice.len_);
616
2.60M
  }
617
618
2.42M
  return output;
619
2.42M
}
620
621
5.21M
void OwnedImpl::postProcess() {}
622
623
0
void OwnedImpl::appendSliceForTest(const void* data, uint64_t size) {
624
0
  slices_.emplace_back(Slice(size, account_));
625
0
  slices_.back().append(data, size);
626
0
  length_ += size;
627
0
}
628
629
0
void OwnedImpl::appendSliceForTest(absl::string_view data) {
630
0
  appendSliceForTest(data.data(), data.size());
631
0
}
632
633
0
std::vector<Slice::SliceRepresentation> OwnedImpl::describeSlicesForTest() const {
634
0
  std::vector<Slice::SliceRepresentation> slices;
635
0
  for (const auto& slice : slices_) {
636
0
    slices.push_back(slice.describeSliceForTest());
637
0
  }
638
0
  return slices;
639
0
}
640
641
93.9k
size_t OwnedImpl::addFragments(absl::Span<const absl::string_view> fragments) {
642
93.9k
  size_t total_size_to_copy = 0;
643
644
382k
  for (const auto& fragment : fragments) {
645
382k
    total_size_to_copy += fragment.size();
646
382k
  }
647
648
93.9k
  if (slices_.empty()) {
649
22.9k
    slices_.emplace_back(Slice(total_size_to_copy, account_));
650
22.9k
  }
651
652
93.9k
  Slice& back = slices_.back();
653
93.9k
  Slice::Reservation reservation = back.reserve(total_size_to_copy);
654
93.9k
  uint8_t* mem = static_cast<uint8_t*>(reservation.mem_);
655
93.9k
  if (reservation.len_ == total_size_to_copy) {
656
    // Enough continuous memory for all fragments in the back slice then copy
657
    // all fragments directly for performance improvement.
658
370k
    for (const auto& fragment : fragments) {
659
370k
      memcpy(mem, fragment.data(), fragment.size()); // NOLINT(safe-memcpy)
660
370k
      mem += fragment.size();
661
370k
    }
662
90.7k
    back.commit<false>(reservation);
663
90.7k
    length_ += total_size_to_copy;
664
90.7k
  } else {
665
    // Downgrade to using `addImpl` if not enough memory in the back slice.
666
    // TODO(wbpcode): Fill the remaining memory space in the back slice then
667
    // allocate enough contiguous memory for the remaining unwritten fragments
668
    // and copy them directly. This may result in better performance.
669
12.5k
    for (const auto& fragment : fragments) {
670
12.5k
      addImpl(fragment.data(), fragment.size());
671
12.5k
    }
672
3.14k
  }
673
674
93.9k
  return total_size_to_copy;
675
93.9k
}
676
677
} // namespace Buffer
678
} // namespace Envoy