Coverage Report

Created: 2023-11-12 09:30

/proc/self/cwd/test/common/buffer/buffer_fuzz.cc
Line
Count
Source (jump to first uncovered line)
1
#include "test/common/buffer/buffer_fuzz.h"
2
3
#include <fcntl.h>
4
5
#include "envoy/common/platform.h"
6
7
#include "source/common/api/os_sys_calls_impl.h"
8
#include "source/common/buffer/buffer_impl.h"
9
#include "source/common/common/assert.h"
10
#include "source/common/common/logger.h"
11
#include "source/common/memory/stats.h"
12
#include "source/common/network/io_socket_handle_impl.h"
13
14
#include "test/fuzz/utility.h"
15
16
#include "absl/container/fixed_array.h"
17
#include "absl/strings/match.h"
18
#include "gtest/gtest.h"
19
20
namespace Envoy {
21
22
namespace {
23
24
// The number of buffers tracked. Each buffer fuzzer action references one or
25
// more of these. We don't need a ton of buffers to capture the range of
26
// possible behaviors, at least two to properly model move operations, let's
27
// assume only 3 for now.
28
constexpr uint32_t BufferCount = 3;
29
30
// These data are exogenous to the buffer, we don't need to worry about their
31
// deallocation, just keep them around until the fuzz run is over.
32
struct Context {
33
  std::vector<std::unique_ptr<Buffer::BufferFragmentImpl>> fragments_;
34
};
35
36
// Bound the maximum allocation size per action. We want this to be able to at
37
// least cover the span of multiple internal chunks. It looks like both
38
// the new OwnedImpl and libevent have minimum chunks in O(a few kilobytes).
39
// This makes sense in general, since you need to to minimize data structure
40
// overhead. If we make this number too big, we risk spending a lot of time in
41
// memcpy/memcmp and slowing down the fuzzer execution rate. The number below is
42
// our current best compromise.
43
constexpr uint32_t MaxAllocation = 16 * 1024;
44
45
// Hard bound on total bytes allocated across the trace.
46
constexpr uint32_t TotalMaxAllocation = 4 * MaxAllocation;
47
48
23.7k
uint32_t clampSize(uint32_t size, uint32_t max_alloc) {
49
23.7k
  return std::min(size, std::min(MaxAllocation, max_alloc));
50
23.7k
}
51
52
7.59k
void releaseFragmentAllocation(const void* p, size_t, const Buffer::BufferFragmentImpl*) {
53
7.59k
  ::free(const_cast<void*>(p));
54
7.59k
}
55
56
// Test implementation of Buffer. Conceptually, this is just a string that we
57
// can append/prepend to and consume bytes from the front of. However, naive
58
// implementations with std::string involve lots of copying to support this, and
59
// even std::stringbuf doesn't support cheap linearization. Instead we use a
60
// flat array that takes advantage of the fact that the total number of bytes
61
// allocated during fuzzing will be bounded by TotalMaxAllocation.
62
//
63
// The data structure is built around the concept of a large flat array of size
64
// 2 * TotalMaxAllocation, with the initial start position set to the middle.
65
// The goal is to make every mutating operation linear time, including
66
// add() and prepend(), as well as supporting O(1) linearization (critical to
67
// making it cheaper to compare results with the real buffer implementation).
68
// We maintain a (start, length) pair and ensure via assertions that we never
69
// walk off the edge; the caller should be guaranteeing this.
70
class StringBuffer : public Buffer::Instance {
71
public:
72
0
  void addDrainTracker(std::function<void()> drain_tracker) override {
73
    // Not implemented well.
74
0
    ASSERT(false);
75
0
    drain_tracker();
76
0
  }
77
78
0
  void bindAccount(Buffer::BufferMemoryAccountSharedPtr) override {
79
    // Not implemented.
80
0
    ASSERT(false);
81
0
  }
82
83
6.64k
  void add(const void* data, uint64_t size) override {
84
6.64k
    FUZZ_ASSERT(start_ + size_ + size <= data_.size());
85
6.64k
    ::memcpy(mutableEnd(), data, size);
86
6.64k
    size_ += size;
87
6.64k
  }
88
89
3.79k
  void addBufferFragment(Buffer::BufferFragment& fragment) override {
90
3.79k
    add(fragment.data(), fragment.size());
91
3.79k
    fragment.done();
92
3.79k
  }
93
94
838
  void add(absl::string_view data) override { add(data.data(), data.size()); }
95
96
1.13k
  void add(const Buffer::Instance& data) override {
97
1.13k
    const StringBuffer& src = dynamic_cast<const StringBuffer&>(data);
98
1.13k
    add(src.start(), src.size_);
99
1.13k
  }
100
101
1.91k
  void prepend(absl::string_view data) override {
102
1.91k
    FUZZ_ASSERT(start_ >= data.size());
103
1.91k
    start_ -= data.size();
104
1.91k
    size_ += data.size();
105
1.91k
    ::memcpy(mutableStart(), data.data(), data.size());
106
1.91k
  }
107
108
513
  void prepend(Instance& data) override {
109
513
    StringBuffer& src = dynamic_cast<StringBuffer&>(data);
110
513
    prepend(src.asStringView());
111
513
    src.size_ = 0;
112
513
  }
113
114
363
  void copyOut(size_t start, uint64_t size, void* data) const override {
115
363
    ::memcpy(data, this->start() + start, size);
116
363
  }
117
118
  uint64_t copyOutToSlices(uint64_t length, Buffer::RawSlice* slices,
119
1.58k
                           uint64_t num_slices) const override {
120
1.58k
    uint64_t size_copied = 0;
121
1.58k
    uint64_t num_slices_copied = 0;
122
2.48k
    while (size_copied < length && num_slices_copied < num_slices) {
123
894
      auto copy_length =
124
894
          std::min((length - size_copied), static_cast<uint64_t>(slices[num_slices_copied].len_));
125
894
      ::memcpy(slices[num_slices_copied].mem_, this->start(), copy_length);
126
894
      size_copied += copy_length;
127
894
      if (copy_length == slices[num_slices_copied].len_) {
128
431
        num_slices_copied++;
129
431
      }
130
894
    }
131
1.58k
    return size_copied;
132
1.58k
  }
133
134
970
  void drain(uint64_t size) override {
135
970
    FUZZ_ASSERT(size <= size_);
136
970
    start_ += size;
137
970
    size_ -= size;
138
970
  }
139
140
  Buffer::RawSliceVector
141
1.26k
  getRawSlices(absl::optional<uint64_t> max_slices = absl::nullopt) const override {
142
1.26k
    ASSERT(!max_slices.has_value() || max_slices.value() >= 1);
143
1.26k
    return {{const_cast<char*>(start()), size_}};
144
1.26k
  }
145
146
0
  Buffer::RawSlice frontSlice() const override { return {const_cast<char*>(start()), size_}; }
147
148
73.7k
  uint64_t length() const override { return size_; }
149
150
63.0k
  void* linearize(uint32_t /*size*/) override {
151
    // Sketchy, but probably will work for test purposes.
152
63.0k
    return mutableStart();
153
63.0k
  }
154
155
0
  Buffer::SliceDataPtr extractMutableFrontSlice() override { PANIC("not implemented"); }
156
157
640
  void move(Buffer::Instance& rhs) override { move(rhs, rhs.length()); }
158
159
878
  void move(Buffer::Instance& rhs, uint64_t length) override { move(rhs, length, false); }
160
161
878
  void move(Buffer::Instance& rhs, uint64_t length, bool) override {
162
878
    StringBuffer& src = dynamic_cast<StringBuffer&>(rhs);
163
878
    add(src.start(), length);
164
878
    src.start_ += length;
165
878
    src.size_ -= length;
166
878
  }
167
168
4.98k
  Buffer::Reservation reserveForRead() override {
169
4.98k
    auto reservation = Buffer::Reservation::bufferImplUseOnlyConstruct(*this);
170
4.98k
    Buffer::RawSlice slice;
171
4.98k
    slice.mem_ = mutableEnd();
172
4.98k
    slice.len_ = data_.size() - (start_ + size_);
173
4.98k
    reservation.bufferImplUseOnlySlices().push_back(slice);
174
4.98k
    reservation.bufferImplUseOnlySetLength(slice.len_);
175
176
4.98k
    return reservation;
177
4.98k
  }
178
179
602
  Buffer::ReservationSingleSlice reserveSingleSlice(uint64_t length, bool separate_slice) override {
180
602
    ASSERT(!separate_slice);
181
602
    FUZZ_ASSERT(start_ + size_ + length <= data_.size());
182
183
602
    auto reservation = Buffer::ReservationSingleSlice::bufferImplUseOnlyConstruct(*this);
184
602
    Buffer::RawSlice slice;
185
602
    slice.mem_ = mutableEnd();
186
602
    slice.len_ = length;
187
602
    reservation.bufferImplUseOnlySlice() = slice;
188
189
602
    return reservation;
190
602
  }
191
192
  void commit(uint64_t length, absl::Span<Buffer::RawSlice>,
193
5.58k
              Buffer::ReservationSlicesOwnerPtr) override {
194
5.58k
    FUZZ_ASSERT(start_ + size_ + length <= data_.size());
195
5.58k
    size_ += length;
196
5.58k
  }
197
198
838
  ssize_t search(const void* data, uint64_t size, size_t start, size_t length) const override {
199
838
    UNREFERENCED_PARAMETER(length);
200
838
    return asStringView().find({static_cast<const char*>(data), size}, start);
201
838
  }
202
203
785
  bool startsWith(absl::string_view data) const override {
204
785
    return absl::StartsWith(asStringView(), data);
205
785
  }
206
207
5.55k
  std::string toString() const override { return {data_.data() + start_, size_}; }
208
209
0
  size_t addFragments(absl::Span<const absl::string_view> fragments) override {
210
0
    size_t total_size_to_write = 0;
211
212
0
    for (const auto& fragment : fragments) {
213
0
      total_size_to_write += fragment.size();
214
0
      add(fragment.data(), fragment.size());
215
0
    }
216
0
    return total_size_to_write;
217
0
  }
218
219
0
  void setWatermarks(uint32_t, uint32_t) override {
220
    // Not implemented.
221
    // TODO(antoniovicente) Implement and add fuzz coverage as we merge the Buffer::OwnedImpl and
222
    // WatermarkBuffer implementations.
223
0
    ASSERT(false);
224
0
  }
225
226
0
  uint32_t highWatermark() const override { return 0; }
227
0
  bool highWatermarkTriggered() const override { return false; }
228
229
2.13k
  absl::string_view asStringView() const { return {start(), size_}; }
230
231
77.1k
  char* mutableStart() { return data_.data() + start_; }
232
233
6.67k
  const char* start() const { return data_.data() + start_; }
234
235
12.2k
  char* mutableEnd() { return mutableStart() + size_; }
236
237
0
  const char* end() const { return start() + size_; }
238
239
  std::array<char, 2 * TotalMaxAllocation> data_;
240
  uint32_t start_{TotalMaxAllocation};
241
  uint32_t size_{0};
242
};
243
244
using BufferList = std::vector<std::unique_ptr<Buffer::Instance>>;
245
246
// Process a single buffer operation.
247
uint32_t bufferAction(Context& ctxt, char insert_value, uint32_t max_alloc, BufferList& buffers,
248
41.6k
                      const test::common::buffer::Action& action) {
249
41.6k
  const uint32_t target_index = action.target_index() % BufferCount;
250
41.6k
  Buffer::Instance& target_buffer = *buffers[target_index];
251
41.6k
  uint32_t allocated = 0;
252
253
41.6k
  switch (action.action_selector_case()) {
254
7.59k
  case test::common::buffer::Action::kAddBufferFragment: {
255
7.59k
    const uint32_t size = clampSize(action.add_buffer_fragment(), max_alloc);
256
7.59k
    allocated += size;
257
7.59k
    void* p = ::malloc(size);
258
7.59k
    FUZZ_ASSERT(p != nullptr);
259
7.59k
    ::memset(p, insert_value, size);
260
7.59k
    auto fragment =
261
7.59k
        std::make_unique<Buffer::BufferFragmentImpl>(p, size, releaseFragmentAllocation);
262
7.59k
    ctxt.fragments_.emplace_back(std::move(fragment));
263
7.59k
    target_buffer.addBufferFragment(*ctxt.fragments_.back());
264
7.59k
    break;
265
7.59k
  }
266
1.67k
  case test::common::buffer::Action::kAddString: {
267
1.67k
    const uint32_t size = clampSize(action.add_string(), max_alloc);
268
1.67k
    allocated += size;
269
1.67k
    const std::string data(size, insert_value);
270
1.67k
    target_buffer.add(data);
271
1.67k
    break;
272
7.59k
  }
273
2.27k
  case test::common::buffer::Action::kAddBuffer: {
274
2.27k
    const uint32_t source_index = action.add_buffer() % BufferCount;
275
2.27k
    if (target_index == source_index) {
276
0
      break;
277
0
    }
278
2.27k
    Buffer::Instance& source_buffer = *buffers[source_index];
279
2.27k
    if (source_buffer.length() > max_alloc) {
280
0
      break;
281
0
    }
282
2.27k
    allocated += source_buffer.length();
283
2.27k
    target_buffer.add(source_buffer);
284
2.27k
    break;
285
2.27k
  }
286
2.79k
  case test::common::buffer::Action::kPrependString: {
287
2.79k
    const uint32_t size = clampSize(action.prepend_string(), max_alloc);
288
2.79k
    allocated += size;
289
2.79k
    const std::string data(size, insert_value);
290
2.79k
    target_buffer.prepend(data);
291
2.79k
    break;
292
2.27k
  }
293
1.03k
  case test::common::buffer::Action::kPrependBuffer: {
294
1.03k
    const uint32_t source_index = action.prepend_buffer() % BufferCount;
295
1.03k
    if (target_index == source_index) {
296
0
      break;
297
0
    }
298
1.03k
    Buffer::Instance& source_buffer = *buffers[source_index];
299
1.03k
    if (source_buffer.length() > max_alloc) {
300
6
      break;
301
6
    }
302
1.02k
    allocated += source_buffer.length();
303
1.02k
    target_buffer.prepend(source_buffer);
304
1.02k
    break;
305
1.03k
  }
306
1.22k
  case test::common::buffer::Action::kReserveCommit: {
307
1.22k
    const uint32_t reserve_length = clampSize(action.reserve_commit().reserve_length(), max_alloc);
308
1.22k
    allocated += reserve_length;
309
1.22k
    if (reserve_length == 0) {
310
2
      break;
311
2
    }
312
1.22k
    if (reserve_length < 16384) {
313
1.20k
      auto reservation = target_buffer.reserveSingleSlice(reserve_length);
314
1.20k
      ::memset(reservation.slice().mem_, insert_value, reservation.slice().len_);
315
1.20k
      reservation.commit(
316
1.20k
          std::min<uint64_t>(action.reserve_commit().commit_length(), reservation.length()));
317
1.20k
    } else {
318
20
      Buffer::Reservation reservation = target_buffer.reserveForRead();
319
110
      for (uint32_t i = 0; i < reservation.numSlices(); ++i) {
320
90
        ::memset(reservation.slices()[i].mem_, insert_value, reservation.slices()[i].len_);
321
90
      }
322
20
      const uint32_t target_length = clampSize(
323
20
          std::min<uint32_t>(reservation.length(), action.reserve_commit().commit_length()),
324
20
          reserve_length);
325
20
      reservation.commit(target_length);
326
20
    }
327
1.22k
    break;
328
1.22k
  }
329
726
  case test::common::buffer::Action::kCopyOut: {
330
726
    const uint32_t start =
331
726
        std::min(action.copy_out().start(), static_cast<uint32_t>(target_buffer.length()));
332
726
    const uint32_t length =
333
726
        std::min(static_cast<uint32_t>(target_buffer.length() - start), action.copy_out().length());
334
    // Make this static to avoid potential continuous ASAN inspired allocation.
335
726
    static uint8_t copy_buffer[TotalMaxAllocation];
336
726
    target_buffer.copyOut(start, length, copy_buffer);
337
726
    const std::string data = target_buffer.toString();
338
726
    FUZZ_ASSERT(::memcmp(copy_buffer, data.data() + start, length) == 0);
339
726
    break;
340
726
  }
341
3.17k
  case test::common::buffer::Action::kCopyOutToSlices: {
342
3.17k
    const uint32_t length =
343
3.17k
        std::min(static_cast<uint32_t>(target_buffer.length()), action.copy_out_to_slices());
344
3.17k
    Buffer::OwnedImpl buffer;
345
3.17k
    auto reservation = buffer.reserveForRead();
346
3.17k
    auto rc = target_buffer.copyOutToSlices(length, reservation.slices(), reservation.numSlices());
347
3.17k
    reservation.commit(rc);
348
3.17k
    const std::string data = buffer.toString();
349
3.17k
    const std::string target_data = target_buffer.toString();
350
3.17k
    FUZZ_ASSERT(::memcmp(data.data(), target_data.data(), reservation.length()) == 0);
351
3.17k
    break;
352
3.17k
  }
353
3.17k
  case test::common::buffer::Action::kDrain: {
354
1.21k
    const uint32_t previous_length = target_buffer.length();
355
1.21k
    const uint32_t drain_length =
356
1.21k
        std::min(static_cast<uint32_t>(target_buffer.length()), action.drain());
357
1.21k
    target_buffer.drain(drain_length);
358
1.21k
    FUZZ_ASSERT(previous_length - drain_length == target_buffer.length());
359
1.21k
    break;
360
1.21k
  }
361
1.26k
  case test::common::buffer::Action::kLinearize: {
362
1.26k
    const uint32_t linearize_size =
363
1.26k
        std::min(static_cast<uint32_t>(target_buffer.length()), action.linearize());
364
1.26k
    target_buffer.linearize(linearize_size);
365
1.26k
    break;
366
1.21k
  }
367
1.76k
  case test::common::buffer::Action::kMove: {
368
1.76k
    const uint32_t source_index = action.move().source_index() % BufferCount;
369
1.76k
    if (target_index == source_index) {
370
2
      break;
371
2
    }
372
1.75k
    Buffer::Instance& source_buffer = *buffers[source_index];
373
1.75k
    if (action.move().length() == 0) {
374
1.28k
      if (source_buffer.length() > max_alloc) {
375
2
        break;
376
2
      }
377
1.28k
      allocated += source_buffer.length();
378
1.28k
      target_buffer.move(source_buffer);
379
1.28k
    } else {
380
476
      const uint32_t source_length =
381
476
          std::min(static_cast<uint32_t>(source_buffer.length()), action.move().length());
382
476
      const uint32_t move_length = clampSize(max_alloc, source_length);
383
476
      if (move_length == 0) {
384
0
        break;
385
0
      }
386
476
      target_buffer.move(source_buffer, move_length);
387
476
      allocated += move_length;
388
476
    }
389
1.75k
    break;
390
1.75k
  }
391
9.94k
  case test::common::buffer::Action::kRead: {
392
9.94k
    const uint32_t max_length = clampSize(action.read(), max_alloc);
393
9.94k
    allocated += max_length;
394
9.94k
    if (max_length == 0) {
395
2
      break;
396
2
    }
397
9.94k
    int fds[2] = {0, 0};
398
9.94k
    auto& os_sys_calls = Api::OsSysCallsSingleton::get();
399
9.94k
    FUZZ_ASSERT(os_sys_calls.socketpair(AF_UNIX, SOCK_STREAM, 0, fds).return_value_ == 0);
400
9.94k
    Network::IoSocketHandleImpl io_handle(fds[0]);
401
9.94k
    FUZZ_ASSERT(::fcntl(fds[0], F_SETFL, O_NONBLOCK) == 0);
402
9.94k
    FUZZ_ASSERT(::fcntl(fds[1], F_SETFL, O_NONBLOCK) == 0);
403
9.94k
    std::string data(max_length, insert_value);
404
9.94k
    const ssize_t rc = ::write(fds[1], data.data(), max_length);
405
9.94k
    FUZZ_ASSERT(rc > 0);
406
9.94k
    Api::IoCallUint64Result result = io_handle.read(target_buffer, max_length);
407
9.94k
    FUZZ_ASSERT(result.return_value_ == static_cast<uint64_t>(rc));
408
9.94k
    FUZZ_ASSERT(::close(fds[1]) == 0);
409
9.94k
    break;
410
9.94k
  }
411
9.94k
  case test::common::buffer::Action::kWrite: {
412
1.29k
    int fds[2] = {0, 0};
413
1.29k
    auto& os_sys_calls = Api::OsSysCallsSingleton::get();
414
1.29k
    FUZZ_ASSERT(os_sys_calls.socketpair(AF_UNIX, SOCK_STREAM, 0, fds).return_value_ == 0);
415
1.29k
    Network::IoSocketHandleImpl io_handle(fds[1]);
416
1.29k
    FUZZ_ASSERT(::fcntl(fds[0], F_SETFL, O_NONBLOCK) == 0);
417
1.29k
    FUZZ_ASSERT(::fcntl(fds[1], F_SETFL, O_NONBLOCK) == 0);
418
1.29k
    uint64_t return_value;
419
2.02k
    do {
420
2.02k
      const bool empty = target_buffer.length() == 0;
421
2.02k
      const std::string previous_data = target_buffer.toString();
422
2.02k
      const auto result = io_handle.write(target_buffer);
423
2.02k
      FUZZ_ASSERT(result.ok());
424
2.02k
      return_value = result.return_value_;
425
2.02k
      ENVOY_LOG_MISC(trace, "Write return_value: {} errno: {}", return_value,
426
2.02k
                     result.err_ != nullptr ? result.err_->getErrorDetails() : "-");
427
2.02k
      if (empty) {
428
1.29k
        FUZZ_ASSERT(return_value == 0);
429
1.29k
      } else {
430
736
        auto buf = std::make_unique<char[]>(return_value);
431
736
        FUZZ_ASSERT(static_cast<uint64_t>(::read(fds[0], buf.get(), return_value)) == return_value);
432
736
        FUZZ_ASSERT(::memcmp(buf.get(), previous_data.data(), return_value) == 0);
433
736
      }
434
2.02k
    } while (return_value > 0);
435
1.29k
    FUZZ_ASSERT(::close(fds[0]) == 0);
436
1.29k
    break;
437
1.29k
  }
438
1.29k
  case test::common::buffer::Action::kGetRawSlices: {
439
254
    const uint64_t slices_needed = target_buffer.getRawSlices().size();
440
254
    const uint64_t slices_tested =
441
254
        std::min(slices_needed, static_cast<uint64_t>(action.get_raw_slices()));
442
254
    if (slices_tested == 0) {
443
0
      break;
444
0
    }
445
254
    Buffer::RawSliceVector raw_slices = target_buffer.getRawSlices(/*max_slices=*/slices_tested);
446
254
    const uint64_t slices_obtained = raw_slices.size();
447
254
    FUZZ_ASSERT(slices_obtained <= slices_needed);
448
254
    uint64_t offset = 0;
449
254
    const std::string data = target_buffer.toString();
450
1.61k
    for (const auto& raw_slices : raw_slices) {
451
1.61k
      FUZZ_ASSERT(::memcmp(raw_slices.mem_, data.data() + offset, raw_slices.len_) == 0);
452
1.61k
      offset += raw_slices.len_;
453
1.61k
    }
454
254
    FUZZ_ASSERT(slices_needed != slices_tested || offset == target_buffer.length());
455
254
    break;
456
254
  }
457
1.67k
  case test::common::buffer::Action::kSearch: {
458
1.67k
    const std::string& content = action.search().content();
459
1.67k
    const uint32_t offset = action.search().offset();
460
1.67k
    const std::string data = target_buffer.toString();
461
1.67k
    FUZZ_ASSERT(target_buffer.search(content.data(), content.size(), offset) ==
462
1.67k
                static_cast<ssize_t>(target_buffer.toString().find(content, offset)));
463
1.67k
    break;
464
1.67k
  }
465
1.67k
  case test::common::buffer::Action::kStartsWith: {
466
1.57k
    const std::string data = target_buffer.toString();
467
1.57k
    FUZZ_ASSERT(target_buffer.startsWith(action.starts_with()) ==
468
1.57k
                (data.find(action.starts_with()) == 0));
469
1.57k
    break;
470
1.57k
  }
471
2.13k
  default:
472
    // Maybe nothing is set?
473
2.13k
    break;
474
41.6k
  }
475
476
41.6k
  return allocated;
477
41.6k
}
478
479
} // namespace
480
481
void executeActions(const test::common::buffer::BufferFuzzTestCase& input, BufferList& buffers,
482
1.94k
                    BufferList& linear_buffers, Context& ctxt) {
483
  // Soft bound on the available memory for allocation to avoid OOMs and
484
  // timeouts.
485
1.94k
  uint32_t available_alloc = 2 * MaxAllocation;
486
1.94k
  constexpr auto max_actions = 128;
487
22.7k
  for (int i = 0; i < std::min(max_actions, input.actions().size()); ++i) {
488
20.8k
    const char insert_value = 'a' + i % 26;
489
20.8k
    const auto& action = input.actions(i);
490
20.8k
    ENVOY_LOG_MISC(debug, "Action {}", action.DebugString());
491
20.8k
    const uint32_t allocated = bufferAction(ctxt, insert_value, available_alloc, buffers, action);
492
20.8k
    const uint32_t linear_allocated =
493
20.8k
        bufferAction(ctxt, insert_value, available_alloc, linear_buffers, action);
494
20.8k
    FUZZ_ASSERT(allocated == linear_allocated);
495
20.8k
    FUZZ_ASSERT(allocated <= available_alloc);
496
20.8k
    available_alloc -= allocated;
497
    // When tracing, dump everything.
498
83.2k
    for (uint32_t j = 0; j < BufferCount; ++j) {
499
62.4k
      ENVOY_LOG_MISC(trace, "Buffer at index {}", j);
500
62.4k
      ENVOY_LOG_MISC(trace, "B: {}", buffers[j]->toString());
501
62.4k
      ENVOY_LOG_MISC(trace, "L: {}", linear_buffers[j]->toString());
502
62.4k
    }
503
    // Verification pass, only non-mutating methods for buffers.
504
20.8k
    uint64_t current_allocated_bytes = 0;
505
83.2k
    for (uint32_t j = 0; j < BufferCount; ++j) {
506
      // As an optimization, since we know that StringBuffer is just going to
507
      // return the pointer to its std::string array, we can avoid the
508
      // toString() copy here.
509
62.4k
      const uint64_t linear_buffer_length = linear_buffers[j]->length();
510
      // We may have spilled over TotalMaxAllocation at this point. Only compare up to
511
      // TotalMaxAllocation.
512
62.4k
      if (absl::string_view(
513
62.4k
              static_cast<const char*>(linear_buffers[j]->linearize(linear_buffer_length)),
514
62.4k
              linear_buffer_length)
515
62.4k
              .compare(buffers[j]->toString().substr(0, TotalMaxAllocation)) != 0) {
516
0
        ENVOY_LOG_MISC(debug, "Mismatched buffers at index {}", j);
517
0
        ENVOY_LOG_MISC(debug, "B: {}", buffers[j]->toString());
518
0
        ENVOY_LOG_MISC(debug, "L: {}", linear_buffers[j]->toString());
519
0
        FUZZ_ASSERT(false);
520
0
      }
521
62.4k
      FUZZ_ASSERT(std::min(TotalMaxAllocation, static_cast<uint32_t>(buffers[j]->length())) ==
522
62.4k
                  linear_buffer_length);
523
62.4k
      current_allocated_bytes += linear_buffer_length;
524
62.4k
    }
525
20.8k
    ENVOY_LOG_MISC(debug, "[{} MB allocated total]", current_allocated_bytes / (1024.0 * 1024));
526
    // We bail out if buffers get too big, otherwise we will OOM the sanitizer.
527
    // We can't use Memory::Stats::totalCurrentlyAllocated() here as we don't
528
    // have tcmalloc in ASAN builds, so just do a simple count.
529
20.8k
    if (current_allocated_bytes >= TotalMaxAllocation) {
530
0
      ENVOY_LOG_MISC(debug, "Terminating early with total buffer length {} to avoid OOM",
531
0
                     current_allocated_bytes);
532
0
      break;
533
0
    }
534
20.8k
  }
535
1.94k
}
536
537
1.94k
void BufferFuzz::bufferFuzz(const test::common::buffer::BufferFuzzTestCase& input) {
538
1.94k
  Context ctxt;
539
  // Fuzzed buffers.
540
1.94k
  BufferList buffers;
541
  // Shadow buffers based on StringBuffer.
542
1.94k
  BufferList linear_buffers;
543
7.78k
  for (uint32_t i = 0; i < BufferCount; ++i) {
544
5.84k
    buffers.emplace_back(new Buffer::OwnedImpl());
545
5.84k
    linear_buffers.emplace_back(new StringBuffer());
546
5.84k
  }
547
1.94k
  executeActions(input, buffers, linear_buffers, ctxt);
548
1.94k
}
549
550
} // namespace Envoy