LCOV - code coverage report
Current view: top level - source/common/buffer - buffer_impl.cc (source / functions) Hit Total Coverage
Test: coverage.dat Lines: 429 485 88.5 %
Date: 2024-01-05 06:35:25 Functions: 34 40 85.0 %

          Line data    Source code
       1             : #include "source/common/buffer/buffer_impl.h"
       2             : 
       3             : #include <cstdint>
       4             : #include <memory>
       5             : #include <string>
       6             : 
       7             : #include "source/common/common/assert.h"
       8             : 
       9             : #include "absl/container/fixed_array.h"
      10             : #include "event2/buffer.h"
      11             : 
      12             : namespace Envoy {
      13             : namespace Buffer {
      14             : namespace {
      15             : // This size has been determined to be optimal from running the
      16             : // //test/integration:http_benchmark benchmark tests.
      17             : // TODO(yanavlasov): This may not be optimal for all hardware configurations or traffic patterns and
      18             : // may need to be configurable in the future.
      19             : constexpr uint64_t CopyThreshold = 512;
      20             : } // namespace
      21             : 
      22             : thread_local absl::InlinedVector<Slice::StoragePtr,
      23             :                                  OwnedImpl::OwnedImplReservationSlicesOwnerMultiple::free_list_max_>
      24             :     OwnedImpl::OwnedImplReservationSlicesOwnerMultiple::free_list_;
      25             : 
      26     1633336 : void OwnedImpl::addImpl(const void* data, uint64_t size) {
      27     1633336 :   const char* src = static_cast<const char*>(data);
      28     1633336 :   bool new_slice_needed = slices_.empty();
      29     3297936 :   while (size != 0) {
      30     1664600 :     if (new_slice_needed) {
      31     1418375 :       slices_.emplace_back(Slice(size, account_));
      32     1418375 :     }
      33     1664600 :     uint64_t copy_size = slices_.back().append(src, size);
      34     1664600 :     src += copy_size;
      35     1664600 :     size -= copy_size;
      36     1664600 :     length_ += copy_size;
      37     1664600 :     new_slice_needed = true;
      38     1664600 :   }
      39     1633336 : }
      40             : 
      41       47392 : void OwnedImpl::addDrainTracker(std::function<void()> drain_tracker) {
      42       47392 :   ASSERT(!slices_.empty());
      43       47392 :   slices_.back().addDrainTracker(std::move(drain_tracker));
      44       47392 : }
      45             : 
      46         720 : void OwnedImpl::bindAccount(BufferMemoryAccountSharedPtr account) {
      47         720 :   ASSERT(slices_.empty());
      48         720 :   account_ = std::move(account);
      49         720 : }
      50             : 
      51           0 : BufferMemoryAccountSharedPtr OwnedImpl::getAccountForTest() { return account_; }
      52             : 
      53     1629277 : void OwnedImpl::add(const void* data, uint64_t size) { addImpl(data, size); }
      54             : 
      55        2574 : void OwnedImpl::addBufferFragment(BufferFragment& fragment) {
      56        2574 :   length_ += fragment.size();
      57        2574 :   slices_.emplace_back(fragment);
      58        2574 : }
      59             : 
      60     1134845 : void OwnedImpl::add(absl::string_view data) { add(data.data(), data.size()); }
      61             : 
      62        1357 : void OwnedImpl::add(const Instance& data) {
      63        1357 :   ASSERT(&data != this);
      64        1357 :   for (const RawSlice& slice : data.getRawSlices()) {
      65        1138 :     add(slice.mem_, slice.len_);
      66        1138 :   }
      67        1357 : }
      68             : 
      69         832 : void OwnedImpl::prepend(absl::string_view data) {
      70         832 :   uint64_t size = data.size();
      71         832 :   bool new_slice_needed = slices_.empty();
      72        2222 :   while (size != 0) {
      73        1390 :     if (new_slice_needed) {
      74         731 :       slices_.emplace_front(Slice(size, account_));
      75         731 :     }
      76        1390 :     uint64_t copy_size = slices_.front().prepend(data.data(), size);
      77        1390 :     size -= copy_size;
      78        1390 :     length_ += copy_size;
      79        1390 :     new_slice_needed = true;
      80        1390 :   }
      81         832 : }
      82             : 
      83         193 : void OwnedImpl::prepend(Instance& data) {
      84         193 :   ASSERT(&data != this);
      85         193 :   OwnedImpl& other = static_cast<OwnedImpl&>(data);
      86         395 :   while (!other.slices_.empty()) {
      87         202 :     uint64_t slice_size = other.slices_.back().dataSize();
      88         202 :     length_ += slice_size;
      89         202 :     slices_.emplace_front(std::move(other.slices_.back()));
      90         202 :     slices_.front().maybeChargeAccount(account_);
      91         202 :     other.slices_.pop_back();
      92         202 :     other.length_ -= slice_size;
      93         202 :   }
      94         193 :   other.postProcess();
      95         193 : }
      96             : 
      97       14304 : void OwnedImpl::copyOut(size_t start, uint64_t size, void* data) const {
      98       14304 :   uint64_t bytes_to_skip = start;
      99       14304 :   uint8_t* dest = static_cast<uint8_t*>(data);
     100       14337 :   for (const auto& slice : slices_) {
     101       14337 :     if (size == 0) {
     102          91 :       break;
     103          91 :     }
     104       14246 :     uint64_t data_size = slice.dataSize();
     105       14246 :     if (data_size <= bytes_to_skip) {
     106             :       // The offset where the caller wants to start copying is after the end of this slice,
     107             :       // so just skip over this slice completely.
     108          19 :       bytes_to_skip -= data_size;
     109          19 :       continue;
     110          19 :     }
     111       14227 :     uint64_t copy_size = std::min(size, data_size - bytes_to_skip);
     112       14227 :     memcpy(dest, slice.data() + bytes_to_skip, copy_size); // NOLINT(safe-memcpy)
     113       14227 :     size -= copy_size;
     114       14227 :     dest += copy_size;
     115             :     // Now that we've started copying, there are no bytes left to skip over. If there
     116             :     // is any more data to be copied, the next iteration can start copying from the very
     117             :     // beginning of the next slice.
     118       14227 :     bytes_to_skip = 0;
     119       14227 :   }
     120       14304 :   ASSERT(size == 0);
     121       14304 : }
     122             : 
     123             : uint64_t OwnedImpl::copyOutToSlices(uint64_t size, Buffer::RawSlice* dest_slices,
     124         199 :                                     uint64_t num_slice) const {
     125         199 :   uint64_t total_length_to_read = std::min(size, this->length());
     126         199 :   uint64_t num_bytes_read = 0;
     127         199 :   uint64_t num_dest_slices_read = 0;
     128         199 :   uint64_t num_src_slices_read = 0;
     129         199 :   uint64_t dest_slice_offset = 0;
     130         199 :   uint64_t src_slice_offset = 0;
     131         388 :   while (num_dest_slices_read < num_slice && num_bytes_read < total_length_to_read) {
     132         189 :     const Slice& src_slice = slices_[num_src_slices_read];
     133         189 :     const Buffer::RawSlice& dest_slice = dest_slices[num_dest_slices_read];
     134         189 :     uint64_t left_to_read = total_length_to_read - num_bytes_read;
     135         189 :     uint64_t left_data_size_in_dst_slice = dest_slice.len_ - dest_slice_offset;
     136         189 :     uint64_t left_data_size_in_src_slice = src_slice.dataSize() - src_slice_offset;
     137             :     // The length to copy should be size of smallest in the source slice available size and
     138             :     // the dest slice available size.
     139         189 :     uint64_t length_to_copy =
     140         189 :         std::min(left_data_size_in_src_slice, std::min(left_data_size_in_dst_slice, left_to_read));
     141         189 :     memcpy(static_cast<uint8_t*>(dest_slice.mem_) + dest_slice_offset, // NOLINT(safe-memcpy)
     142         189 :            src_slice.data() + src_slice_offset, length_to_copy);
     143         189 :     src_slice_offset = src_slice_offset + length_to_copy;
     144         189 :     dest_slice_offset = dest_slice_offset + length_to_copy;
     145         189 :     if (src_slice_offset == src_slice.dataSize()) {
     146         155 :       num_src_slices_read++;
     147         155 :       src_slice_offset = 0;
     148         155 :     }
     149         189 :     if (dest_slice_offset == dest_slice.len_) {
     150          24 :       num_dest_slices_read++;
     151          24 :       dest_slice_offset = 0;
     152          24 :     }
     153         189 :     ASSERT(src_slice_offset <= src_slice.dataSize());
     154         189 :     ASSERT(dest_slice_offset <= dest_slice.len_);
     155         189 :     num_bytes_read += length_to_copy;
     156         189 :   }
     157         199 :   return num_bytes_read;
     158         199 : }
     159             : 
     160     1227616 : void OwnedImpl::drain(uint64_t size) { drainImpl(size); }
     161             : 
     162     1227641 : void OwnedImpl::drainImpl(uint64_t size) {
     163     2474880 :   while (size != 0) {
     164     1247239 :     if (slices_.empty()) {
     165           0 :       break;
     166           0 :     }
     167     1247239 :     uint64_t slice_size = slices_.front().dataSize();
     168     1247239 :     if (slice_size <= size) {
     169     1246552 :       slices_.pop_front();
     170     1246552 :       length_ -= slice_size;
     171     1246552 :       size -= slice_size;
     172     1246816 :     } else {
     173         687 :       slices_.front().drain(size);
     174         687 :       length_ -= size;
     175         687 :       size = 0;
     176         687 :     }
     177     1247239 :   }
     178             :   // Make sure to drain any zero byte fragments that might have been added as
     179             :   // sentinels for flushed data.
     180     1228021 :   while (!slices_.empty() && slices_.front().dataSize() == 0) {
     181         380 :     slices_.pop_front();
     182         380 :   }
     183     1227641 : }
     184             : 
     185     1387624 : RawSliceVector OwnedImpl::getRawSlices(absl::optional<uint64_t> max_slices) const {
     186     1387624 :   uint64_t max_out = slices_.size();
     187     1387624 :   if (max_slices.has_value()) {
     188        5012 :     max_out = std::min(max_out, max_slices.value());
     189        5012 :   }
     190             : 
     191     1387624 :   RawSliceVector raw_slices;
     192     1387624 :   raw_slices.reserve(max_out);
     193     1405292 :   for (const auto& slice : slices_) {
     194     1397927 :     if (raw_slices.size() >= max_out) {
     195          12 :       break;
     196          12 :     }
     197             : 
     198     1397915 :     if (slice.dataSize() == 0) {
     199         853 :       continue;
     200         853 :     }
     201             : 
     202             :     // Temporary cast to fix 32-bit Envoy mobile builds, where sizeof(uint64_t) != sizeof(size_t).
     203             :     // dataSize represents the size of a buffer so size_t should always be large enough to hold its
     204             :     // size regardless of architecture. Buffer slices should in practice be relatively small, but
     205             :     // there is currently no max size validation.
     206             :     // TODO(antoniovicente) Set realistic limits on the max size of BufferSlice and consider use of
     207             :     // size_t instead of uint64_t in the Slice interface.
     208     1397062 :     raw_slices.emplace_back(
     209     1397062 :         RawSlice{const_cast<uint8_t*>(slice.data()), static_cast<size_t>(slice.dataSize())});
     210     1397062 :   }
     211     1387624 :   return raw_slices;
     212     1387624 : }
     213             : 
     214        4772 : RawSlice OwnedImpl::frontSlice() const {
     215             :   // Ignore zero-size slices and return the first slice with data.
     216        4772 :   for (const auto& slice : slices_) {
     217        4172 :     if (slice.dataSize() > 0) {
     218        4172 :       return RawSlice{const_cast<uint8_t*>(slice.data()),
     219        4172 :                       static_cast<absl::Span<uint8_t>::size_type>(slice.dataSize())};
     220        4172 :     }
     221        4172 :   }
     222             : 
     223         600 :   return {nullptr, 0};
     224        4772 : }
     225             : 
     226           0 : SliceDataPtr OwnedImpl::extractMutableFrontSlice() {
     227           0 :   RELEASE_ASSERT(length_ > 0, "Extract called on empty buffer");
     228             :   // Remove zero byte fragments from the front of the queue to ensure
     229             :   // that the extracted slice has data.
     230           0 :   while (!slices_.empty() && slices_.front().dataSize() == 0) {
     231           0 :     slices_.pop_front();
     232           0 :   }
     233           0 :   ASSERT(!slices_.empty());
     234           0 :   auto slice = std::move(slices_.front());
     235           0 :   auto size = slice.dataSize();
     236           0 :   length_ -= size;
     237           0 :   slices_.pop_front();
     238           0 :   if (!slice.isMutable()) {
     239             :     // Create a mutable copy of the immutable slice data.
     240           0 :     Slice mutable_slice{size, nullptr};
     241           0 :     auto copy_size = mutable_slice.append(slice.data(), size);
     242           0 :     ASSERT(copy_size == size);
     243             :     // Drain trackers for the immutable slice will be called as part of the slice destructor.
     244           0 :     return std::make_unique<SliceDataImpl>(std::move(mutable_slice));
     245           0 :   } else {
     246             :     // Make sure drain trackers are called before ownership of the slice is transferred from
     247             :     // the buffer to the caller.
     248           0 :     slice.callAndClearDrainTrackersAndCharges();
     249           0 :     return std::make_unique<SliceDataImpl>(std::move(slice));
     250           0 :   }
     251           0 : }
     252             : 
     253     5560246 : uint64_t OwnedImpl::length() const {
     254             : #ifndef NDEBUG
     255             :   // When running in debug mode, verify that the precomputed length matches the sum
     256             :   // of the lengths of the slices.
     257             :   uint64_t length = 0;
     258             :   for (const auto& slice : slices_) {
     259             :     length += slice.dataSize();
     260             :   }
     261             :   ASSERT(length == length_);
     262             : #endif
     263             : 
     264     5560246 :   return length_;
     265     5560246 : }
     266             : 
     267        1068 : void* OwnedImpl::linearize(uint32_t size) {
     268        1068 :   RELEASE_ASSERT(size <= length(), "Linearize size exceeds buffer size");
     269        1068 :   if (slices_.empty()) {
     270         208 :     return nullptr;
     271         208 :   }
     272         860 :   if (slices_[0].dataSize() < size) {
     273          26 :     Slice new_slice{size, account_};
     274          26 :     Slice::Reservation reservation = new_slice.reserve(size);
     275          26 :     ASSERT(reservation.mem_ != nullptr);
     276          26 :     ASSERT(reservation.len_ == size);
     277          26 :     copyOut(0, size, reservation.mem_);
     278          26 :     new_slice.commit(reservation);
     279             : 
     280             :     // Replace the first 'size' bytes in the buffer with the new slice. Since new_slice re-adds the
     281             :     // drained bytes, avoid use of the overridable 'drain' method to avoid incorrectly checking if
     282             :     // we dipped below low-watermark.
     283          26 :     drainImpl(size);
     284          26 :     slices_.emplace_front(std::move(new_slice));
     285          26 :     length_ += size;
     286          26 :   }
     287         860 :   return slices_.front().data();
     288        1068 : }
     289             : 
     290     1989971 : void OwnedImpl::coalesceOrAddSlice(Slice&& other_slice) {
     291     1989971 :   const uint64_t slice_size = other_slice.dataSize();
     292             :   // The `other_slice` content can be coalesced into the existing slice IFF:
     293             :   // 1. The `other_slice` can be coalesced. Immutable slices can not be safely coalesced because
     294             :   // their destructors can be arbitrary global side effects.
     295             :   // 2. There are existing slices;
     296             :   // 3. The `other_slice` content length is under the CopyThreshold;
     297             :   // 4. There is enough unused space in the existing slice to accommodate the `other_slice` content.
     298     1989971 :   if (other_slice.canCoalesce() && !slices_.empty() && slice_size < CopyThreshold &&
     299     1989971 :       slices_.back().reservableSize() >= slice_size) {
     300             :     // Copy content of the `other_slice`. The `move` methods which call this method effectively
     301             :     // drain the source buffer.
     302        4059 :     addImpl(other_slice.data(), slice_size);
     303        4059 :     other_slice.transferDrainTrackersTo(slices_.back());
     304     1986606 :   } else {
     305             :     // Take ownership of the slice.
     306     1985912 :     other_slice.maybeChargeAccount(account_);
     307     1985912 :     slices_.emplace_back(std::move(other_slice));
     308     1985912 :     length_ += slice_size;
     309     1985912 :   }
     310     1989971 : }
     311             : 
     312     1011642 : void OwnedImpl::move(Instance& rhs) {
     313     1011642 :   ASSERT(&rhs != this);
     314             :   // We do the static cast here because in practice we only have one buffer implementation right
     315             :   // now and this is safe. This is a reasonable compromise in a high performance path where we
     316             :   // want to maintain an abstraction.
     317     1011642 :   OwnedImpl& other = static_cast<OwnedImpl&>(rhs);
     318     2041290 :   while (!other.slices_.empty()) {
     319     1029648 :     const uint64_t slice_size = other.slices_.front().dataSize();
     320     1029648 :     coalesceOrAddSlice(std::move(other.slices_.front()));
     321     1029648 :     other.length_ -= slice_size;
     322     1029648 :     other.slices_.pop_front();
     323     1029648 :   }
     324     1011642 :   other.postProcess();
     325     1011642 : }
     326             : 
     327      985417 : void OwnedImpl::move(Instance& rhs, uint64_t length) {
     328      985417 :   move(rhs, length, /*reset_drain_trackers_and_accounting=*/false);
     329      985417 : }
     330             : 
     331      985417 : void OwnedImpl::move(Instance& rhs, uint64_t length, bool reset_drain_trackers_and_accounting) {
     332      985417 :   ASSERT(&rhs != this);
     333             :   // See move() above for why we do the static cast.
     334      985417 :   OwnedImpl& other = static_cast<OwnedImpl&>(rhs);
     335     1967552 :   while (length != 0 && !other.slices_.empty()) {
     336      982135 :     const uint64_t slice_size = other.slices_.front().dataSize();
     337      982135 :     const uint64_t copy_size = std::min(slice_size, length);
     338      982135 :     if (copy_size == 0) {
     339           1 :       other.slices_.pop_front();
     340      982134 :     } else if (copy_size < slice_size) {
     341             :       // TODO(brian-pane) add reference-counting to allow slices to share their storage
     342             :       // and eliminate the copy for this partial-slice case?
     343       21811 :       add(other.slices_.front().data(), copy_size);
     344       21811 :       other.slices_.front().drain(copy_size);
     345       21811 :       other.length_ -= copy_size;
     346      981413 :     } else {
     347      960323 :       if (reset_drain_trackers_and_accounting) {
     348             :         // The other slice is owned by a user-space IO handle and its drain trackers may refer to a
     349             :         // connection that can die (and be freed) at any time. Call and clear the drain trackers to
     350             :         // avoid potential use-after-free.
     351           0 :         other.slices_.front().callAndClearDrainTrackersAndCharges();
     352           0 :       }
     353      960323 :       coalesceOrAddSlice(std::move(other.slices_.front()));
     354      960323 :       other.slices_.pop_front();
     355      960323 :       other.length_ -= slice_size;
     356      960323 :     }
     357      982135 :     length -= copy_size;
     358      982135 :   }
     359      985417 :   other.postProcess();
     360      985417 : }
     361             : 
     362        1639 : Reservation OwnedImpl::reserveForRead() {
     363        1639 :   return reserveWithMaxLength(default_read_reservation_size_);
     364        1639 : }
     365             : 
     366        6010 : Reservation OwnedImpl::reserveWithMaxLength(uint64_t max_length) {
     367        6010 :   Reservation reservation = Reservation::bufferImplUseOnlyConstruct(*this);
     368        6010 :   if (max_length == 0) {
     369           0 :     return reservation;
     370           0 :   }
     371             : 
     372             :   // Remove any empty slices at the end.
     373        6016 :   while (!slices_.empty() && slices_.back().dataSize() == 0) {
     374           6 :     slices_.pop_back();
     375           6 :   }
     376             : 
     377        6010 :   uint64_t bytes_remaining = max_length;
     378        6010 :   uint64_t reserved = 0;
     379        6010 :   auto& reservation_slices = reservation.bufferImplUseOnlySlices();
     380        6010 :   auto slices_owner = std::make_unique<OwnedImplReservationSlicesOwnerMultiple>();
     381             : 
     382             :   // Check whether there are any empty slices with reservable space at the end of the buffer.
     383        6010 :   uint64_t reservable_size = slices_.empty() ? 0 : slices_.back().reservableSize();
     384        6010 :   if (reservable_size >= max_length || reservable_size >= (Slice::default_slice_size_ / 8)) {
     385        2515 :     uint64_t reserve_size = std::min(reservable_size, bytes_remaining);
     386        2515 :     RawSlice slice = slices_.back().reserve(reserve_size);
     387        2515 :     reservation_slices.push_back(slice);
     388        2515 :     slices_owner->owned_storages_.push_back({});
     389        2515 :     bytes_remaining -= slice.len_;
     390        2515 :     reserved += slice.len_;
     391        2515 :   }
     392             : 
     393       51239 :   while (bytes_remaining != 0 && reservation_slices.size() < reservation.MAX_SLICES_) {
     394       45358 :     constexpr uint64_t size = Slice::default_slice_size_;
     395             : 
     396             :     // If the next slice would go over the desired size, and the amount already reserved is already
     397             :     // at least one full slice in size, stop allocating slices. This prevents returning a
     398             :     // reservation larger than requested, which could go above the watermark limits for a watermark
     399             :     // buffer, unless the size would be very small (less than 1 full slice).
     400       45358 :     if (size > bytes_remaining && reserved >= size) {
     401         129 :       break;
     402         129 :     }
     403             : 
     404       45229 :     Slice::SizedStorage storage = slices_owner->newStorage();
     405       45229 :     ASSERT(storage.len_ == size);
     406       45229 :     const RawSlice raw_slice{storage.mem_.get(), size};
     407       45229 :     slices_owner->owned_storages_.emplace_back(std::move(storage));
     408       45229 :     reservation_slices.push_back(raw_slice);
     409       45229 :     bytes_remaining -= std::min<uint64_t>(raw_slice.len_, bytes_remaining);
     410       45229 :     reserved += raw_slice.len_;
     411       45229 :   }
     412             : 
     413        6010 :   ASSERT(reservation_slices.size() == slices_owner->owned_storages_.size());
     414        6010 :   reservation.bufferImplUseOnlySlicesOwner() = std::move(slices_owner);
     415        6010 :   reservation.bufferImplUseOnlySetLength(reserved);
     416             : 
     417        6010 :   return reservation;
     418        6010 : }
     419             : 
     420        2887 : ReservationSingleSlice OwnedImpl::reserveSingleSlice(uint64_t length, bool separate_slice) {
     421        2887 :   ReservationSingleSlice reservation = ReservationSingleSlice::bufferImplUseOnlyConstruct(*this);
     422        2887 :   if (length == 0) {
     423           0 :     return reservation;
     424           0 :   }
     425             : 
     426             :   // Remove any empty slices at the end.
     427        2894 :   while (!slices_.empty() && slices_.back().dataSize() == 0) {
     428           7 :     slices_.pop_back();
     429           7 :   }
     430             : 
     431        2887 :   auto& reservation_slice = reservation.bufferImplUseOnlySlice();
     432        2887 :   auto slice_owner = std::make_unique<OwnedImplReservationSlicesOwnerSingle>();
     433             : 
     434             :   // Check whether there are any empty slices with reservable space at the end of the buffer.
     435        2887 :   uint64_t reservable_size =
     436        2887 :       (separate_slice || slices_.empty()) ? 0 : slices_.back().reservableSize();
     437        2887 :   if (reservable_size >= length) {
     438          33 :     reservation_slice = slices_.back().reserve(length);
     439        2854 :   } else {
     440        2854 :     slice_owner->owned_storage_ = Slice::newStorage(length);
     441        2854 :     ASSERT(slice_owner->owned_storage_.len_ >= length);
     442        2854 :     reservation_slice = {slice_owner->owned_storage_.mem_.get(), static_cast<size_t>(length)};
     443        2854 :   }
     444             : 
     445        2887 :   reservation.bufferImplUseOnlySliceOwner() = std::move(slice_owner);
     446             : 
     447        2887 :   return reservation;
     448        2887 : }
     449             : 
     450             : void OwnedImpl::commit(uint64_t length, absl::Span<RawSlice> slices,
     451        7795 :                        ReservationSlicesOwnerPtr slices_owner_base) {
     452        7795 :   if (length == 0) {
     453        2618 :     return;
     454        2618 :   }
     455             : 
     456        5177 :   ASSERT(dynamic_cast<OwnedImplReservationSlicesOwner*>(slices_owner_base.get()) != nullptr);
     457        5177 :   std::unique_ptr<OwnedImplReservationSlicesOwner> slices_owner(
     458        5177 :       static_cast<OwnedImplReservationSlicesOwner*>(slices_owner_base.release()));
     459             : 
     460        5177 :   absl::Span<Slice::SizedStorage> owned_storages = slices_owner->ownedStorages();
     461        5177 :   ASSERT(slices.size() == owned_storages.size());
     462             : 
     463        5177 :   uint64_t bytes_remaining = length;
     464       10577 :   for (uint32_t i = 0; i < slices.size() && bytes_remaining > 0; i++) {
     465        5400 :     slices[i].len_ = std::min<uint64_t>(slices[i].len_, bytes_remaining);
     466             : 
     467        5401 :     if (auto& owned_storage = owned_storages[i]; owned_storage.mem_ != nullptr) {
     468        4806 :       ASSERT(slices[i].len_ <= owned_storage.len_);
     469        4806 :       slices_.emplace_back(Slice(std::move(owned_storage), slices[i].len_, account_));
     470        4474 :     } else {
     471         595 :       bool success = slices_.back().commit<false>(slices[i]);
     472         595 :       ASSERT(success);
     473         595 :     }
     474             : 
     475        5400 :     length_ += slices[i].len_;
     476        5400 :     bytes_remaining -= slices[i].len_;
     477        5400 :   }
     478        5177 : }
     479             : 
     480         175 : ssize_t OwnedImpl::search(const void* data, uint64_t size, size_t start, size_t length) const {
     481             :   // This implementation uses the same search algorithm as evbuffer_search(), a naive
     482             :   // scan that requires O(M*N) comparisons in the worst case.
     483             :   // TODO(brian-pane): replace this with a more efficient search if it shows up
     484             :   // prominently in CPU profiling.
     485         175 :   if (size == 0) {
     486          58 :     return (start <= length_) ? start : -1;
     487          58 :   }
     488             : 
     489             :   // length equal to zero means that entire buffer must be searched.
     490             :   // Adjust the length to buffer length taking the staring index into account.
     491         117 :   size_t left_to_search = length;
     492         117 :   if (0 == length) {
     493         117 :     left_to_search = length_ - start;
     494         117 :   }
     495         117 :   ssize_t offset = 0;
     496         117 :   const uint8_t* needle = static_cast<const uint8_t*>(data);
     497         245 :   for (size_t slice_index = 0; slice_index < slices_.size() && (left_to_search > 0);
     498         165 :        slice_index++) {
     499         163 :     const auto& slice = slices_[slice_index];
     500         163 :     uint64_t slice_size = slice.dataSize();
     501         163 :     if (slice_size <= start) {
     502          19 :       start -= slice_size;
     503          19 :       offset += slice_size;
     504          19 :       continue;
     505          19 :     }
     506         144 :     const uint8_t* slice_start = slice.data();
     507         144 :     const uint8_t* haystack = slice_start;
     508         144 :     const uint8_t* haystack_end = haystack + slice_size;
     509         144 :     haystack += start;
     510         148 :     while (haystack < haystack_end) {
     511         147 :       const size_t slice_search_limit =
     512         147 :           std::min(static_cast<size_t>(haystack_end - haystack), left_to_search);
     513             :       // Search within this slice for the first byte of the needle.
     514         147 :       const uint8_t* first_byte_match =
     515         147 :           static_cast<const uint8_t*>(memchr(haystack, needle[0], slice_search_limit));
     516         147 :       if (first_byte_match == nullptr) {
     517         108 :         left_to_search -= slice_search_limit;
     518         108 :         break;
     519         108 :       }
     520             :       // After finding a match for the first byte of the needle, check whether the following
     521             :       // bytes in the buffer match the remainder of the needle. Note that the match can span
     522             :       // two or more slices.
     523          39 :       left_to_search -= static_cast<size_t>(first_byte_match - haystack + 1);
     524             :       // Save the current number of bytes left to search.
     525             :       // If the pattern is not found, the search will resume from the next byte
     526             :       // and left_to_search value must be restored.
     527          39 :       const size_t saved_left_to_search = left_to_search;
     528          39 :       size_t i = 1;
     529          39 :       size_t match_index = slice_index;
     530          39 :       const uint8_t* match_next = first_byte_match + 1;
     531          39 :       const uint8_t* match_end = haystack_end;
     532          49 :       while ((i < size) && (0 < left_to_search)) {
     533          14 :         if (match_next >= match_end) {
     534             :           // We've hit the end of this slice, so continue checking against the next slice.
     535           4 :           match_index++;
     536           4 :           if (match_index == slices_.size()) {
     537             :             // We've hit the end of the entire buffer.
     538           0 :             break;
     539           0 :           }
     540           4 :           const auto& match_slice = slices_[match_index];
     541           4 :           match_next = match_slice.data();
     542           4 :           match_end = match_next + match_slice.dataSize();
     543           4 :           continue;
     544           4 :         }
     545          10 :         left_to_search--;
     546          10 :         if (*match_next++ != needle[i]) {
     547           4 :           break;
     548           4 :         }
     549           6 :         i++;
     550           6 :       }
     551          39 :       if (i == size) {
     552             :         // Successful match of the entire needle.
     553          35 :         return offset + (first_byte_match - slice_start);
     554          35 :       }
     555             :       // If this wasn't a successful match, start scanning again at the next byte.
     556           4 :       haystack = first_byte_match + 1;
     557           4 :       left_to_search = saved_left_to_search;
     558           4 :     }
     559         109 :     start = 0;
     560         109 :     offset += slice_size;
     561         109 :   }
     562          82 :   return -1;
     563         117 : }
     564             : 
     565         204 : bool OwnedImpl::startsWith(absl::string_view data) const {
     566         204 :   if (length() < data.length()) {
     567             :     // Buffer is too short to contain data.
     568          31 :     return false;
     569          31 :   }
     570             : 
     571         173 :   if (data.length() == 0) {
     572          39 :     return true;
     573          39 :   }
     574             : 
     575         134 :   const uint8_t* prefix = reinterpret_cast<const uint8_t*>(data.data());
     576         134 :   size_t size = data.length();
     577         134 :   for (const auto& slice : slices_) {
     578         134 :     uint64_t slice_size = slice.dataSize();
     579         134 :     const uint8_t* slice_start = slice.data();
     580             : 
     581         134 :     if (slice_size >= size) {
     582             :       // The remaining size bytes of data are in this slice.
     583         131 :       return memcmp(prefix, slice_start, size) == 0;
     584         131 :     }
     585             : 
     586             :     // Slice is smaller than data, see if the prefix matches.
     587           3 :     if (memcmp(prefix, slice_start, slice_size) != 0) {
     588           3 :       return false;
     589           3 :     }
     590             : 
     591             :     // Prefix matched. Continue looking at the next slice.
     592           0 :     prefix += slice_size;
     593           0 :     size -= slice_size;
     594           0 :   }
     595             : 
     596             :   // Less data in slices than length() reported.
     597           0 :   IS_ENVOY_BUG("unexpected data in slices");
     598           0 :   return false;
     599         134 : }
     600             : 
     601     2221795 : OwnedImpl::OwnedImpl() = default;
     602             : 
     603      964931 : OwnedImpl::OwnedImpl(absl::string_view data) : OwnedImpl() { add(data); }
     604             : 
     605         222 : OwnedImpl::OwnedImpl(const Instance& data) : OwnedImpl() { add(data); }
     606             : 
     607      168294 : OwnedImpl::OwnedImpl(const void* data, uint64_t size) : OwnedImpl() { add(data, size); }
     608             : 
     609           0 : OwnedImpl::OwnedImpl(BufferMemoryAccountSharedPtr account) : account_(std::move(account)) {}
     610             : 
     611      993184 : std::string OwnedImpl::toString() const {
     612      993184 :   std::string output;
     613      993184 :   output.reserve(length());
     614      993351 :   for (const RawSlice& slice : getRawSlices()) {
     615      990104 :     output.append(static_cast<const char*>(slice.mem_), slice.len_);
     616      990104 :   }
     617             : 
     618      993184 :   return output;
     619      993184 : }
     620             : 
     621     1973596 : void OwnedImpl::postProcess() {}
     622             : 
     623           0 : void OwnedImpl::appendSliceForTest(const void* data, uint64_t size) {
     624           0 :   slices_.emplace_back(Slice(size, account_));
     625           0 :   slices_.back().append(data, size);
     626           0 :   length_ += size;
     627           0 : }
     628             : 
     629           0 : void OwnedImpl::appendSliceForTest(absl::string_view data) {
     630           0 :   appendSliceForTest(data.data(), data.size());
     631           0 : }
     632             : 
     633           0 : std::vector<Slice::SliceRepresentation> OwnedImpl::describeSlicesForTest() const {
     634           0 :   std::vector<Slice::SliceRepresentation> slices;
     635           0 :   for (const auto& slice : slices_) {
     636           0 :     slices.push_back(slice.describeSliceForTest());
     637           0 :   }
     638           0 :   return slices;
     639           0 : }
     640             : 
     641        3430 : size_t OwnedImpl::addFragments(absl::Span<const absl::string_view> fragments) {
     642        3430 :   size_t total_size_to_copy = 0;
     643             : 
     644       13713 :   for (const auto& fragment : fragments) {
     645       13713 :     total_size_to_copy += fragment.size();
     646       13713 :   }
     647             : 
     648        3430 :   if (slices_.empty()) {
     649         648 :     slices_.emplace_back(Slice(total_size_to_copy, account_));
     650         648 :   }
     651             : 
     652        3430 :   Slice& back = slices_.back();
     653        3430 :   Slice::Reservation reservation = back.reserve(total_size_to_copy);
     654        3430 :   uint8_t* mem = static_cast<uint8_t*>(reservation.mem_);
     655        3430 :   if (reservation.len_ == total_size_to_copy) {
     656             :     // Enough continuous memory for all fragments in the back slice then copy
     657             :     // all fragments directly for performance improvement.
     658       13713 :     for (const auto& fragment : fragments) {
     659       13713 :       memcpy(mem, fragment.data(), fragment.size()); // NOLINT(safe-memcpy)
     660       13713 :       mem += fragment.size();
     661       13713 :     }
     662        3430 :     back.commit<false>(reservation);
     663        3430 :     length_ += total_size_to_copy;
     664        3430 :   } else {
     665             :     // Downgrade to using `addImpl` if not enough memory in the back slice.
     666             :     // TODO(wbpcode): Fill the remaining memory space in the back slice then
     667             :     // allocate enough contiguous memory for the remaining unwritten fragments
     668             :     // and copy them directly. This may result in better performance.
     669           0 :     for (const auto& fragment : fragments) {
     670           0 :       addImpl(fragment.data(), fragment.size());
     671           0 :     }
     672           0 :   }
     673             : 
     674        3430 :   return total_size_to_copy;
     675        3430 : }
     676             : 
     677             : } // namespace Buffer
     678             : } // namespace Envoy

Generated by: LCOV version 1.15