1
#include "source/common/buffer/buffer_impl.h"
2

            
3
#include <cstdint>
4
#include <memory>
5
#include <string>
6

            
7
#include "source/common/common/assert.h"
8

            
9
#include "absl/container/fixed_array.h"
10
#include "event2/buffer.h"
11

            
12
namespace Envoy {
13
namespace Buffer {
14
namespace {
15
// This size has been determined to be optimal from running the
16
// //test/integration:http_benchmark benchmark tests.
17
// TODO(yanavlasov): This may not be optimal for all hardware configurations or traffic patterns and
18
// may need to be configurable in the future.
19
constexpr uint64_t CopyThreshold = 512;
20
} // namespace
21

            
22
thread_local absl::InlinedVector<Slice::StoragePtr,
23
                                 OwnedImpl::OwnedImplReservationSlicesOwnerMultiple::free_list_max_>
24
    OwnedImpl::OwnedImplReservationSlicesOwnerMultiple::free_list_;
25

            
26
139272
uint64_t Slice::prepend(const void* data, uint64_t size) {
27
139272
  const uint8_t* src = static_cast<const uint8_t*>(data);
28
139272
  uint64_t copy_size;
29
139272
  if (dataSize() == 0) {
30
    // There is nothing in the slice, so put the data at the very end in case the caller
31
    // later tries to prepend anything else in front of it.
32
69651
    copy_size = std::min(size, reservableSize());
33
69651
    reservable_ = capacity_;
34
69651
    data_ = capacity_ - copy_size;
35
69659
  } else {
36
69621
    if (data_ == 0) {
37
      // There is content in the slice, and no space in front of it to write anything.
38
69605
      return 0;
39
69605
    }
40
    // Write into the space in front of the slice's current content.
41
16
    copy_size = std::min(size, data_);
42
16
    data_ -= copy_size;
43
16
  }
44
69667
  memcpy(base_ + data_, src + size - copy_size, copy_size); // NOLINT(safe-memcpy)
45
69667
  return copy_size;
46
139272
}
47

            
48
9210741
void OwnedImpl::addImpl(const void* data, uint64_t size) {
49
9210741
  const char* src = static_cast<const char*>(data);
50
9210741
  bool new_slice_needed = slices_.empty();
51
18834116
  while (size != 0) {
52
9623375
    if (new_slice_needed) {
53
4021445
      slices_.emplace_back(Slice(size, account_));
54
4021445
    }
55
9623375
    uint64_t copy_size = slices_.back().append(src, size);
56
9623375
    src += copy_size;
57
9623375
    size -= copy_size;
58
9623375
    length_ += copy_size;
59
9623375
    new_slice_needed = true;
60
9623375
  }
61
9210741
}
62

            
63
786395
void OwnedImpl::addDrainTracker(std::function<void()> drain_tracker) {
64
786395
  ASSERT(!slices_.empty());
65
786395
  slices_.back().addDrainTracker(std::move(drain_tracker));
66
786395
}
67

            
68
188358
void OwnedImpl::bindAccount(BufferMemoryAccountSharedPtr account) {
69
188358
  ASSERT(slices_.empty());
70
188358
  account_ = std::move(account);
71
188358
}
72

            
73
1233
BufferMemoryAccountSharedPtr OwnedImpl::getAccountForTest() { return account_; }
74

            
75
7983860
void OwnedImpl::add(const void* data, uint64_t size) { addImpl(data, size); }
76

            
77
810284
void OwnedImpl::addBufferFragment(BufferFragment& fragment) {
78
810284
  length_ += fragment.size();
79
810284
  slices_.emplace_back(fragment);
80
810284
}
81

            
82
1230325
void OwnedImpl::add(absl::string_view data) { add(data.data(), data.size()); }
83

            
84
357366
void OwnedImpl::add(const Instance& data) {
85
357366
  ASSERT(&data != this);
86
421078
  for (const RawSlice& slice : data.getRawSlices()) {
87
418192
    add(slice.mem_, slice.len_);
88
418192
  }
89
357366
}
90

            
91
69673
void OwnedImpl::prepend(absl::string_view data) {
92
69673
  uint64_t size = data.size();
93
69673
  bool new_slice_needed = slices_.empty();
94
208945
  while (size != 0) {
95
139272
    if (new_slice_needed) {
96
69649
      slices_.emplace_front(Slice(size, account_));
97
69649
    }
98
139272
    uint64_t copy_size = slices_.front().prepend(data.data(), size);
99
139272
    size -= copy_size;
100
139272
    length_ += copy_size;
101
139272
    new_slice_needed = true;
102
139272
  }
103
69673
}
104

            
105
82
void OwnedImpl::prepend(Instance& data) {
106
82
  ASSERT(&data != this);
107
82
  OwnedImpl& other = static_cast<OwnedImpl&>(data);
108
163
  while (!other.slices_.empty()) {
109
81
    uint64_t slice_size = other.slices_.back().dataSize();
110
81
    length_ += slice_size;
111
81
    slices_.emplace_front(std::move(other.slices_.back()));
112
81
    slices_.front().maybeChargeAccount(account_);
113
81
    other.slices_.pop_back();
114
81
    other.length_ -= slice_size;
115
81
  }
116
82
  other.postProcess();
117
82
}
118

            
119
188951
void OwnedImpl::copyOut(size_t start, uint64_t size, void* data) const {
120
188951
  uint64_t bytes_to_skip = start;
121
188951
  uint8_t* dest = static_cast<uint8_t*>(data);
122
274883
  for (const auto& slice : slices_) {
123
274727
    if (size == 0) {
124
75550
      break;
125
75550
    }
126
199177
    uint64_t data_size = slice.dataSize();
127
199177
    if (data_size <= bytes_to_skip) {
128
      // The offset where the caller wants to start copying is after the end of this slice,
129
      // so just skip over this slice completely.
130
63
      bytes_to_skip -= data_size;
131
63
      continue;
132
63
    }
133
199114
    uint64_t copy_size = std::min(size, data_size - bytes_to_skip);
134
199114
    memcpy(dest, slice.data() + bytes_to_skip, copy_size); // NOLINT(safe-memcpy)
135
199114
    size -= copy_size;
136
199114
    dest += copy_size;
137
    // Now that we've started copying, there are no bytes left to skip over. If there
138
    // is any more data to be copied, the next iteration can start copying from the very
139
    // beginning of the next slice.
140
199114
    bytes_to_skip = 0;
141
199114
  }
142
188951
  ASSERT(size == 0);
143
188951
}
144

            
145
uint64_t OwnedImpl::copyOutToSlices(uint64_t size, Buffer::RawSlice* dest_slices,
146
13
                                    uint64_t num_slice) const {
147
13
  uint64_t total_length_to_read = std::min(size, this->length());
148
13
  uint64_t num_bytes_read = 0;
149
13
  uint64_t num_dest_slices_read = 0;
150
13
  uint64_t num_src_slices_read = 0;
151
13
  uint64_t dest_slice_offset = 0;
152
13
  uint64_t src_slice_offset = 0;
153
36
  while (num_dest_slices_read < num_slice && num_bytes_read < total_length_to_read) {
154
23
    const Slice& src_slice = slices_[num_src_slices_read];
155
23
    const Buffer::RawSlice& dest_slice = dest_slices[num_dest_slices_read];
156
23
    uint64_t left_to_read = total_length_to_read - num_bytes_read;
157
23
    uint64_t left_data_size_in_dst_slice = dest_slice.len_ - dest_slice_offset;
158
23
    uint64_t left_data_size_in_src_slice = src_slice.dataSize() - src_slice_offset;
159
    // The length to copy should be size of smallest in the source slice available size and
160
    // the dest slice available size.
161
23
    uint64_t length_to_copy =
162
23
        std::min(left_data_size_in_src_slice, std::min(left_data_size_in_dst_slice, left_to_read));
163
23
    memcpy(static_cast<uint8_t*>(dest_slice.mem_) + dest_slice_offset, // NOLINT(safe-memcpy)
164
23
           src_slice.data() + src_slice_offset, length_to_copy);
165
23
    src_slice_offset = src_slice_offset + length_to_copy;
166
23
    dest_slice_offset = dest_slice_offset + length_to_copy;
167
23
    if (src_slice_offset == src_slice.dataSize()) {
168
17
      num_src_slices_read++;
169
17
      src_slice_offset = 0;
170
17
    }
171
23
    if (dest_slice_offset == dest_slice.len_) {
172
5
      num_dest_slices_read++;
173
5
      dest_slice_offset = 0;
174
5
    }
175
23
    ASSERT(src_slice_offset <= src_slice.dataSize());
176
23
    ASSERT(dest_slice_offset <= dest_slice.len_);
177
23
    num_bytes_read += length_to_copy;
178
23
  }
179
13
  return num_bytes_read;
180
13
}
181

            
182
2258594
void OwnedImpl::drain(uint64_t size) { drainImpl(size); }
183

            
184
2268085
void OwnedImpl::drainImpl(uint64_t size) {
185
5539786
  while (size != 0) {
186
3271806
    if (slices_.empty()) {
187
105
      break;
188
105
    }
189
3271701
    uint64_t slice_size = slices_.front().dataSize();
190
3271702
    if (slice_size <= size) {
191
3197377
      slices_.pop_front();
192
3197377
      length_ -= slice_size;
193
3197377
      size -= slice_size;
194
3225025
    } else {
195
74325
      slices_.front().drain(size);
196
74325
      length_ -= size;
197
74325
      size = 0;
198
74325
    }
199
3271701
  }
200
  // Make sure to drain any zero byte fragments that might have been added as
201
  // sentinels for flushed data.
202
2305038
  while (!slices_.empty() && slices_.front().dataSize() == 0) {
203
36953
    slices_.pop_front();
204
36953
  }
205
2268085
}
206

            
207
3100509
RawSliceVector OwnedImpl::getRawSlices(absl::optional<uint64_t> max_slices) const {
208
3100509
  uint64_t max_out = slices_.size();
209
3100509
  if (max_slices.has_value()) {
210
569285
    max_out = std::min(max_out, max_slices.value());
211
569285
  }
212

            
213
3100509
  RawSliceVector raw_slices;
214
3100509
  raw_slices.reserve(max_out);
215
4498049
  for (const auto& slice : slices_) {
216
4491799
    if (raw_slices.size() >= max_out) {
217
34547
      break;
218
34547
    }
219

            
220
4457252
    if (slice.dataSize() == 0) {
221
37480
      continue;
222
37480
    }
223

            
224
    // Temporary cast to fix 32-bit Envoy mobile builds, where sizeof(uint64_t) != sizeof(size_t).
225
    // dataSize represents the size of a buffer so size_t should always be large enough to hold its
226
    // size regardless of architecture. Buffer slices should in practice be relatively small, but
227
    // there is currently no max size validation.
228
    // TODO(antoniovicente) Set realistic limits on the max size of BufferSlice and consider use of
229
    // size_t instead of uint64_t in the Slice interface.
230
4419772
    raw_slices.emplace_back(
231
4419772
        RawSlice{const_cast<uint8_t*>(slice.data()), static_cast<size_t>(slice.dataSize())});
232
4419772
  }
233
3100509
  return raw_slices;
234
3100509
}
235

            
236
1920446
RawSlice OwnedImpl::frontSlice() const {
237
  // Ignore zero-size slices and return the first slice with data.
238
1920446
  for (const auto& slice : slices_) {
239
1299230
    if (slice.dataSize() > 0) {
240
1299201
      return RawSlice{const_cast<uint8_t*>(slice.data()),
241
1299201
                      static_cast<absl::Span<uint8_t>::size_type>(slice.dataSize())};
242
1299201
    }
243
1299226
  }
244

            
245
621245
  return {nullptr, 0};
246
1920446
}
247

            
248
10
SliceDataPtr OwnedImpl::extractMutableFrontSlice() {
249
10
  RELEASE_ASSERT(length_ > 0, "Extract called on empty buffer");
250
  // Remove zero byte fragments from the front of the queue to ensure
251
  // that the extracted slice has data.
252
11
  while (!slices_.empty() && slices_.front().dataSize() == 0) {
253
1
    slices_.pop_front();
254
1
  }
255
10
  ASSERT(!slices_.empty());
256
10
  auto slice = std::move(slices_.front());
257
10
  auto size = slice.dataSize();
258
10
  length_ -= size;
259
10
  slices_.pop_front();
260
10
  if (!slice.isMutable()) {
261
    // Create a mutable copy of the immutable slice data.
262
1
    Slice mutable_slice{size, nullptr};
263
1
    auto copy_size = mutable_slice.append(slice.data(), size);
264
1
    ASSERT(copy_size == size);
265
    // Drain trackers for the immutable slice will be called as part of the slice destructor.
266
1
    return std::make_unique<SliceDataImpl>(std::move(mutable_slice));
267
9
  } else {
268
    // Make sure drain trackers are called before ownership of the slice is transferred from
269
    // the buffer to the caller.
270
9
    slice.callAndClearDrainTrackersAndCharges();
271
9
    return std::make_unique<SliceDataImpl>(std::move(slice));
272
9
  }
273
10
}
274

            
275
23370758
uint64_t OwnedImpl::length() const {
276
#ifndef NDEBUG
277
  // When running in debug mode, verify that the precomputed length matches the sum
278
  // of the lengths of the slices.
279
  uint64_t length = 0;
280
  for (const auto& slice : slices_) {
281
    length += slice.dataSize();
282
  }
283
  ASSERT(length == length_);
284
#endif
285

            
286
23370758
  return length_;
287
23370758
}
288

            
289
27713
void* OwnedImpl::linearize(uint32_t size) {
290
27713
  RELEASE_ASSERT(size <= length(), "Linearize size exceeds buffer size");
291
27713
  if (slices_.empty()) {
292
24
    return nullptr;
293
24
  }
294
27689
  if (slices_[0].dataSize() < size) {
295
9461
    Slice new_slice{size, account_};
296
9461
    Slice::Reservation reservation = new_slice.reserve(size);
297
9461
    ASSERT(reservation.mem_ != nullptr);
298
9461
    ASSERT(reservation.len_ == size);
299
9461
    copyOut(0, size, reservation.mem_);
300
9461
    new_slice.commit(reservation);
301

            
302
    // Replace the first 'size' bytes in the buffer with the new slice. Since new_slice re-adds the
303
    // drained bytes, avoid use of the overridable 'drain' method to avoid incorrectly checking if
304
    // we dipped below low-watermark.
305
9461
    drainImpl(size);
306
9461
    slices_.emplace_front(std::move(new_slice));
307
9461
    length_ += size;
308
9461
  }
309
27689
  return slices_.front().data();
310
27713
}
311

            
312
3910440
void OwnedImpl::coalesceOrAddSlice(Slice&& other_slice) {
313
3910440
  const uint64_t slice_size = other_slice.dataSize();
314
  // The `other_slice` content can be coalesced into the existing slice IFF:
315
  // 1. The `other_slice` can be coalesced. Immutable slices can not be safely coalesced because
316
  // their destructors can be arbitrary global side effects.
317
  // 2. There are existing slices;
318
  // 3. The `other_slice` content length is under the CopyThreshold;
319
  // 4. There is enough unused space in the existing slice to accommodate the `other_slice` content.
320
3910440
  if (other_slice.canCoalesce() && !slices_.empty() && slice_size < CopyThreshold &&
321
3910440
      slices_.back().reservableSize() >= slice_size) {
322
    // Copy content of the `other_slice`. The `move` methods which call this method effectively
323
    // drain the source buffer.
324
1227072
    addImpl(other_slice.data(), slice_size);
325
1227072
    other_slice.transferDrainTrackersTo(slices_.back());
326
3253780
  } else {
327
    // Take ownership of the slice.
328
2683368
    other_slice.maybeChargeAccount(account_);
329
2683368
    slices_.emplace_back(std::move(other_slice));
330
2683368
    length_ += slice_size;
331
2683368
  }
332
3910440
}
333

            
334
2582674
void OwnedImpl::move(Instance& rhs) {
335
2582674
  ASSERT(&rhs != this);
336
  // We do the static cast here because in practice we only have one buffer implementation right
337
  // now and this is safe. This is a reasonable compromise in a high performance path where we
338
  // want to maintain an abstraction.
339
2582674
  OwnedImpl& other = static_cast<OwnedImpl&>(rhs);
340
5759039
  while (!other.slices_.empty()) {
341
3176365
    const uint64_t slice_size = other.slices_.front().dataSize();
342
3176365
    coalesceOrAddSlice(std::move(other.slices_.front()));
343
3176365
    other.length_ -= slice_size;
344
3176365
    other.slices_.pop_front();
345
3176365
  }
346
2582674
  other.postProcess();
347
2582674
}
348

            
349
1273691
void OwnedImpl::move(Instance& rhs, uint64_t length) {
350
1273691
  move(rhs, length, /*reset_drain_trackers_and_accounting=*/false);
351
1273691
}
352

            
353
1273815
void OwnedImpl::move(Instance& rhs, uint64_t length, bool reset_drain_trackers_and_accounting) {
354
1273815
  ASSERT(&rhs != this);
355
  // See move() above for why we do the static cast.
356
1273815
  OwnedImpl& other = static_cast<OwnedImpl&>(rhs);
357
2644928
  while (length != 0 && !other.slices_.empty()) {
358
1371113
    const uint64_t slice_size = other.slices_.front().dataSize();
359
1371113
    const uint64_t copy_size = std::min(slice_size, length);
360
1371113
    if (copy_size == 0) {
361
      other.slices_.pop_front();
362
1371113
    } else if (copy_size < slice_size) {
363
      // TODO(brian-pane) add reference-counting to allow slices to share their storage
364
      // and eliminate the copy for this partial-slice case?
365
637028
      add(other.slices_.front().data(), copy_size);
366
637028
      other.slices_.front().drain(copy_size);
367
637028
      other.length_ -= copy_size;
368
1047463
    } else {
369
734085
      if (reset_drain_trackers_and_accounting) {
370
        // The other slice is owned by a user-space IO handle and its drain trackers may refer to a
371
        // connection that can die (and be freed) at any time. Call and clear the drain trackers to
372
        // avoid potential use-after-free.
373
148
        other.slices_.front().callAndClearDrainTrackersAndCharges();
374
148
      }
375
734085
      coalesceOrAddSlice(std::move(other.slices_.front()));
376
734085
      other.slices_.pop_front();
377
734085
      other.length_ -= slice_size;
378
734085
    }
379
1371113
    length -= copy_size;
380
1371113
  }
381
1273815
  other.postProcess();
382
1273815
}
383

            
384
1384
Reservation OwnedImpl::reserveForRead() {
385
1384
  return reserveWithMaxLength(default_read_reservation_size_);
386
1384
}
387

            
388
1118601
Reservation OwnedImpl::reserveWithMaxLength(uint64_t max_length) {
389
1118601
  Reservation reservation = Reservation::bufferImplUseOnlyConstruct(*this);
390
1118601
  if (max_length == 0) {
391
    return reservation;
392
  }
393

            
394
  // Remove any empty slices at the end.
395
1118601
  while (!slices_.empty() && slices_.back().dataSize() == 0) {
396
    slices_.pop_back();
397
  }
398

            
399
1118601
  uint64_t bytes_remaining = max_length;
400
1118601
  uint64_t reserved = 0;
401
1118601
  auto& reservation_slices = reservation.bufferImplUseOnlySlices();
402
1118601
  auto slices_owner = std::make_unique<OwnedImplReservationSlicesOwnerMultiple>();
403

            
404
  // Check whether there are any empty slices with reservable space at the end of the buffer.
405
1118601
  uint64_t reservable_size = slices_.empty() ? 0 : slices_.back().reservableSize();
406
1118648
  if (reservable_size >= max_length || reservable_size >= (Slice::default_slice_size_ / 8)) {
407
491965
    uint64_t reserve_size = std::min(reservable_size, bytes_remaining);
408
491965
    RawSlice slice = slices_.back().reserve(reserve_size);
409
491965
    reservation_slices.push_back(slice);
410
491965
    slices_owner->owned_storages_.push_back({});
411
491965
    bytes_remaining -= slice.len_;
412
491965
    reserved += slice.len_;
413
491965
  }
414

            
415
9168002
  while (bytes_remaining != 0 && reservation_slices.size() < reservation.MAX_SLICES_) {
416
8051083
    constexpr uint64_t size = Slice::default_slice_size_;
417

            
418
    // If the next slice would go over the desired size, and the amount already reserved is already
419
    // at least one full slice in size, stop allocating slices. This prevents returning a
420
    // reservation larger than requested, which could go above the watermark limits for a watermark
421
    // buffer, unless the size would be very small (less than 1 full slice).
422
8051083
    if (size > bytes_remaining && reserved >= size) {
423
1682
      break;
424
1682
    }
425

            
426
8049401
    Slice::SizedStorage storage = slices_owner->newStorage();
427
8049401
    ASSERT(storage.len_ == size);
428
8049401
    const RawSlice raw_slice{storage.mem_.get(), size};
429
8049401
    slices_owner->owned_storages_.emplace_back(std::move(storage));
430
8049401
    reservation_slices.push_back(raw_slice);
431
8049401
    bytes_remaining -= std::min<uint64_t>(raw_slice.len_, bytes_remaining);
432
8049401
    reserved += raw_slice.len_;
433
8049401
  }
434

            
435
1118601
  ASSERT(reservation_slices.size() == slices_owner->owned_storages_.size());
436
1118601
  reservation.bufferImplUseOnlySlicesOwner() = std::move(slices_owner);
437
1118601
  reservation.bufferImplUseOnlySetLength(reserved);
438

            
439
1118601
  return reservation;
440
1118601
}
441

            
442
2688103
ReservationSingleSlice OwnedImpl::reserveSingleSlice(uint64_t length, bool separate_slice) {
443
2688103
  ReservationSingleSlice reservation = ReservationSingleSlice::bufferImplUseOnlyConstruct(*this);
444
2688103
  if (length == 0) {
445
1094
    return reservation;
446
1094
  }
447

            
448
  // Remove any empty slices at the end.
449
2687011
  while (!slices_.empty() && slices_.back().dataSize() == 0) {
450
2
    slices_.pop_back();
451
2
  }
452

            
453
2687009
  auto& reservation_slice = reservation.bufferImplUseOnlySlice();
454
2687009
  auto slice_owner = std::make_unique<OwnedImplReservationSlicesOwnerSingle>();
455

            
456
  // Check whether there are any empty slices with reservable space at the end of the buffer.
457
2687009
  uint64_t reservable_size =
458
2676378
      (separate_slice || slices_.empty()) ? 0 : slices_.back().reservableSize();
459
2687009
  if (reservable_size >= length) {
460
914
    reservation_slice = slices_.back().reserve(length);
461
2686974
  } else {
462
2686095
    slice_owner->owned_storage_ = Slice::newStorage(length);
463
2686095
    ASSERT(slice_owner->owned_storage_.len_ >= length);
464
2686095
    reservation_slice = {slice_owner->owned_storage_.mem_.get(), static_cast<size_t>(length)};
465
2686095
  }
466

            
467
2687009
  reservation.bufferImplUseOnlySliceOwner() = std::move(slice_owner);
468

            
469
2687009
  return reservation;
470
2688103
}
471

            
472
void OwnedImpl::commit(uint64_t length, absl::Span<RawSlice> slices,
473
2004266
                       ReservationSlicesOwnerPtr slices_owner_base) {
474
2004266
  if (length == 0) {
475
516655
    return;
476
516655
  }
477

            
478
1487611
  ASSERT(dynamic_cast<OwnedImplReservationSlicesOwner*>(slices_owner_base.get()) != nullptr);
479
1487611
  std::unique_ptr<OwnedImplReservationSlicesOwner> slices_owner(
480
1487611
      static_cast<OwnedImplReservationSlicesOwner*>(slices_owner_base.release()));
481

            
482
1487611
  absl::Span<Slice::SizedStorage> owned_storages = slices_owner->ownedStorages();
483
1487611
  ASSERT(slices.size() == owned_storages.size());
484

            
485
1487611
  uint64_t bytes_remaining = length;
486
3294028
  for (uint32_t i = 0; i < slices.size() && bytes_remaining > 0; i++) {
487
1806417
    slices[i].len_ = std::min<uint64_t>(slices[i].len_, bytes_remaining);
488

            
489
1806421
    if (auto& owned_storage = owned_storages[i]; owned_storage.mem_ != nullptr) {
490
1757848
      ASSERT(slices[i].len_ <= owned_storage.len_);
491
1757848
      slices_.emplace_back(Slice(std::move(owned_storage), slices[i].len_, account_));
492
1732978
    } else {
493
48573
      bool success = slices_.back().commit<false>(slices[i]);
494
48573
      ASSERT(success);
495
48573
    }
496

            
497
1806417
    length_ += slices[i].len_;
498
1806417
    bytes_remaining -= slices[i].len_;
499
1806417
  }
500
1487611
}
501

            
502
964
ssize_t OwnedImpl::search(const void* data, uint64_t size, size_t start, size_t length) const {
503
  // This implementation uses the same search algorithm as evbuffer_search(), a naive
504
  // scan that requires O(M*N) comparisons in the worst case.
505
  // TODO(brian-pane): replace this with a more efficient search if it shows up
506
  // prominently in CPU profiling.
507
964
  if (size == 0) {
508
3
    return (start <= length_) ? start : -1;
509
3
  }
510

            
511
  // length equal to zero means that entire buffer must be searched.
512
  // Adjust the length to buffer length taking the staring index into account.
513
961
  size_t left_to_search = length;
514
961
  if (0 == length) {
515
872
    left_to_search = length_ - start;
516
872
  }
517
961
  ssize_t offset = 0;
518
961
  const uint8_t* needle = static_cast<const uint8_t*>(data);
519
1560
  for (size_t slice_index = 0; slice_index < slices_.size() && (left_to_search > 0);
520
1040
       slice_index++) {
521
1036
    const auto& slice = slices_[slice_index];
522
1036
    uint64_t slice_size = slice.dataSize();
523
1036
    if (slice_size <= start) {
524
45
      start -= slice_size;
525
45
      offset += slice_size;
526
45
      continue;
527
45
    }
528
991
    const uint8_t* slice_start = slice.data();
529
991
    const uint8_t* haystack = slice_start;
530
991
    const uint8_t* haystack_end = haystack + slice_size;
531
991
    haystack += start;
532
2278
    while (haystack < haystack_end) {
533
2235
      const size_t slice_search_limit =
534
2235
          std::min(static_cast<size_t>(haystack_end - haystack), left_to_search);
535
      // Search within this slice for the first byte of the needle.
536
2235
      const uint8_t* first_byte_match =
537
2235
          static_cast<const uint8_t*>(memchr(haystack, needle[0], slice_search_limit));
538
2235
      if (first_byte_match == nullptr) {
539
511
        left_to_search -= slice_search_limit;
540
511
        break;
541
511
      }
542
      // After finding a match for the first byte of the needle, check whether the following
543
      // bytes in the buffer match the remainder of the needle. Note that the match can span
544
      // two or more slices.
545
1724
      left_to_search -= static_cast<size_t>(first_byte_match - haystack + 1);
546
      // Save the current number of bytes left to search.
547
      // If the pattern is not found, the search will resume from the next byte
548
      // and left_to_search value must be restored.
549
1724
      const size_t saved_left_to_search = left_to_search;
550
1724
      size_t i = 1;
551
1724
      size_t match_index = slice_index;
552
1724
      const uint8_t* match_next = first_byte_match + 1;
553
1724
      const uint8_t* match_end = haystack_end;
554
8369
      while ((i < size) && (0 < left_to_search)) {
555
7690
        if (match_next >= match_end) {
556
          // We've hit the end of this slice, so continue checking against the next slice.
557
76
          match_index++;
558
76
          if (match_index == slices_.size()) {
559
            // We've hit the end of the entire buffer.
560
12
            break;
561
12
          }
562
64
          const auto& match_slice = slices_[match_index];
563
64
          match_next = match_slice.data();
564
64
          match_end = match_next + match_slice.dataSize();
565
64
          continue;
566
76
        }
567
7614
        left_to_search--;
568
7614
        if (*match_next++ != needle[i]) {
569
1033
          break;
570
1033
        }
571
6581
        i++;
572
6581
      }
573
1724
      if (i == size) {
574
        // Successful match of the entire needle.
575
437
        return offset + (first_byte_match - slice_start);
576
437
      }
577
      // If this wasn't a successful match, start scanning again at the next byte.
578
1287
      haystack = first_byte_match + 1;
579
1287
      left_to_search = saved_left_to_search;
580
1287
    }
581
554
    start = 0;
582
554
    offset += slice_size;
583
554
  }
584
524
  return -1;
585
961
}
586

            
587
714
bool OwnedImpl::startsWith(absl::string_view data) const {
588
714
  if (length() < data.length()) {
589
    // Buffer is too short to contain data.
590
14
    return false;
591
14
  }
592

            
593
700
  if (data.length() == 0) {
594
1
    return true;
595
1
  }
596

            
597
699
  const uint8_t* prefix = reinterpret_cast<const uint8_t*>(data.data());
598
699
  size_t size = data.length();
599
715
  for (const auto& slice : slices_) {
600
715
    uint64_t slice_size = slice.dataSize();
601
715
    const uint8_t* slice_start = slice.data();
602

            
603
715
    if (slice_size >= size) {
604
      // The remaining size bytes of data are in this slice.
605
699
      return memcmp(prefix, slice_start, size) == 0;
606
699
    }
607

            
608
    // Slice is smaller than data, see if the prefix matches.
609
16
    if (memcmp(prefix, slice_start, slice_size) != 0) {
610
      return false;
611
    }
612

            
613
    // Prefix matched. Continue looking at the next slice.
614
16
    prefix += slice_size;
615
16
    size -= slice_size;
616
16
  }
617

            
618
  // Less data in slices than length() reported.
619
  IS_ENVOY_BUG("unexpected data in slices");
620
  return false;
621
699
}
622

            
623
9687973
OwnedImpl::OwnedImpl() = default;
624

            
625
845967
OwnedImpl::OwnedImpl(absl::string_view data) : OwnedImpl() { add(data); }
626

            
627
15985
OwnedImpl::OwnedImpl(const Instance& data) : OwnedImpl() { add(data); }
628

            
629
5008
OwnedImpl::OwnedImpl(const void* data, uint64_t size) : OwnedImpl() { add(data, size); }
630

            
631
245
OwnedImpl::OwnedImpl(BufferMemoryAccountSharedPtr account) : account_(std::move(account)) {}
632

            
633
518311
std::string OwnedImpl::toString() const {
634
518311
  std::string output;
635
518311
  output.reserve(length());
636
698658
  for (const RawSlice& slice : getRawSlices()) {
637
683904
    output.append(static_cast<const char*>(slice.mem_), slice.len_);
638
683904
  }
639

            
640
518311
  return output;
641
518311
}
642

            
643
2545301
void OwnedImpl::postProcess() {}
644

            
645
193
void OwnedImpl::appendSliceForTest(const void* data, uint64_t size) {
646
193
  slices_.emplace_back(Slice(size, account_));
647
193
  slices_.back().append(data, size);
648
193
  length_ += size;
649
193
}
650

            
651
182
void OwnedImpl::appendSliceForTest(absl::string_view data) {
652
182
  appendSliceForTest(data.data(), data.size());
653
182
}
654

            
655
31
std::vector<Slice::SliceRepresentation> OwnedImpl::describeSlicesForTest() const {
656
31
  std::vector<Slice::SliceRepresentation> slices;
657
63
  for (const auto& slice : slices_) {
658
63
    slices.push_back(slice.describeSliceForTest());
659
63
  }
660
31
  return slices;
661
31
}
662

            
663
513829
size_t OwnedImpl::addFragments(absl::Span<const absl::string_view> fragments) {
664
513829
  size_t total_size_to_copy = 0;
665

            
666
2026636
  for (const auto& fragment : fragments) {
667
2026636
    total_size_to_copy += fragment.size();
668
2026636
  }
669

            
670
513829
  if (slices_.empty()) {
671
95847
    slices_.emplace_back(Slice(total_size_to_copy, account_));
672
95847
  }
673

            
674
513829
  Slice& back = slices_.back();
675
513829
  Slice::Reservation reservation = back.reserve(total_size_to_copy);
676
513829
  uint8_t* mem = static_cast<uint8_t*>(reservation.mem_);
677
513829
  if (reservation.len_ == total_size_to_copy) {
678
    // Enough continuous memory for all fragments in the back slice then copy
679
    // all fragments directly for performance improvement.
680
2018384
    for (const auto& fragment : fragments) {
681
2018384
      memcpy(mem, fragment.data(), fragment.size()); // NOLINT(safe-memcpy)
682
2018384
      mem += fragment.size();
683
2018384
    }
684
511778
    back.commit<false>(reservation);
685
511778
    length_ += total_size_to_copy;
686
511778
  } else {
687
    // Fill the remaining space in the back slice first, then allocate one contiguous
688
    // slice for all remaining fragments. This reduces the number of slices created and
689
    // improves memory locality.
690
2051
    size_t fragment_index = 0;
691
2051
    uint64_t bytes_written_to_reservation = 0;
692

            
693
    // Fill as many complete fragments as possible into the existing reservation.
694
4723
    while (fragment_index < fragments.size() &&
695
4723
           bytes_written_to_reservation + fragments[fragment_index].size() <= reservation.len_) {
696
2672
      const auto& fragment = fragments[fragment_index];
697
2672
      memcpy(mem, fragment.data(), fragment.size()); // NOLINT(safe-memcpy)
698
2672
      mem += fragment.size();
699
2672
      bytes_written_to_reservation += fragment.size();
700
2672
      fragment_index++;
701
2672
    }
702

            
703
    // Commit what we've written to the existing reservation.
704
2051
    if (bytes_written_to_reservation > 0) {
705
1372
      back.commit<false>({reservation.mem_, bytes_written_to_reservation});
706
1372
      length_ += bytes_written_to_reservation;
707
1372
    }
708

            
709
    // If there are remaining fragments, allocate one contiguous slice for all of them.
710
2051
    if (fragment_index < fragments.size()) {
711
2050
      size_t remaining_size = 0;
712
7629
      for (size_t i = fragment_index; i < fragments.size(); i++) {
713
5579
        remaining_size += fragments[i].size();
714
5579
      }
715

            
716
2050
      slices_.emplace_back(Slice(remaining_size, account_));
717
2050
      Slice& new_slice = slices_.back();
718
2050
      Slice::Reservation new_reservation = new_slice.reserve(remaining_size);
719
2050
      ASSERT(new_reservation.len_ == remaining_size);
720
2050
      uint8_t* new_mem = static_cast<uint8_t*>(new_reservation.mem_);
721

            
722
7629
      for (size_t i = fragment_index; i < fragments.size(); i++) {
723
5579
        memcpy(new_mem, fragments[i].data(), fragments[i].size()); // NOLINT(safe-memcpy)
724
5579
        new_mem += fragments[i].size();
725
5579
      }
726

            
727
2050
      new_slice.commit<false>(new_reservation);
728
2050
      length_ += remaining_size;
729
2050
    }
730
2051
  }
731

            
732
513829
  return total_size_to_copy;
733
513829
}
734

            
735
} // namespace Buffer
736
} // namespace Envoy