Coverage Report

Created: 2024-09-19 09:45

/proc/self/cwd/test/common/buffer/buffer_fuzz.cc
Line
Count
Source (jump to first uncovered line)
1
#include "test/common/buffer/buffer_fuzz.h"
2
3
#include <fcntl.h>
4
5
#include "envoy/common/platform.h"
6
7
#include "source/common/api/os_sys_calls_impl.h"
8
#include "source/common/buffer/buffer_impl.h"
9
#include "source/common/common/assert.h"
10
#include "source/common/common/logger.h"
11
#include "source/common/memory/stats.h"
12
#include "source/common/network/io_socket_handle_impl.h"
13
14
#include "test/fuzz/utility.h"
15
16
#include "absl/container/fixed_array.h"
17
#include "absl/strings/match.h"
18
#include "gtest/gtest.h"
19
20
namespace Envoy {
21
22
namespace {
23
24
// The number of buffers tracked. Each buffer fuzzer action references one or
25
// more of these. We don't need a ton of buffers to capture the range of
26
// possible behaviors, at least two to properly model move operations, let's
27
// assume only 3 for now.
28
constexpr uint32_t BufferCount = 3;
29
30
// These data are exogenous to the buffer, we don't need to worry about their
31
// deallocation, just keep them around until the fuzz run is over.
32
struct Context {
33
  std::vector<std::unique_ptr<Buffer::BufferFragmentImpl>> fragments_;
34
};
35
36
// Bound the maximum allocation size per action. We want this to be able to at
37
// least cover the span of multiple internal chunks. It looks like both
38
// the new OwnedImpl and libevent have minimum chunks in O(a few kilobytes).
39
// This makes sense in general, since you need to to minimize data structure
40
// overhead. If we make this number too big, we risk spending a lot of time in
41
// memcpy/memcmp and slowing down the fuzzer execution rate. The number below is
42
// our current best compromise.
43
constexpr uint32_t MaxAllocation = 16 * 1024;
44
45
// Hard bound on total bytes allocated across the trace.
46
constexpr uint32_t TotalMaxAllocation = 4 * MaxAllocation;
47
48
24.5k
uint32_t clampSize(uint32_t size, uint32_t max_alloc) {
49
24.5k
  return std::min(size, std::min(MaxAllocation, max_alloc));
50
24.5k
}
51
52
7.98k
void releaseFragmentAllocation(const void* p, size_t, const Buffer::BufferFragmentImpl*) {
53
7.98k
  ::free(const_cast<void*>(p));
54
7.98k
}
55
56
// Test implementation of Buffer. Conceptually, this is just a string that we
57
// can append/prepend to and consume bytes from the front of. However, naive
58
// implementations with std::string involve lots of copying to support this, and
59
// even std::stringbuf doesn't support cheap linearization. Instead we use a
60
// flat array that takes advantage of the fact that the total number of bytes
61
// allocated during fuzzing will be bounded by TotalMaxAllocation.
62
//
63
// The data structure is built around the concept of a large flat array of size
64
// 2 * TotalMaxAllocation, with the initial start position set to the middle.
65
// The goal is to make every mutating operation linear time, including
66
// add() and prepend(), as well as supporting O(1) linearization (critical to
67
// making it cheaper to compare results with the real buffer implementation).
68
// We maintain a (start, length) pair and ensure via assertions that we never
69
// walk off the edge; the caller should be guaranteeing this.
70
class StringBuffer : public Buffer::Instance {
71
public:
72
0
  void addDrainTracker(std::function<void()> drain_tracker) override {
73
    // Not implemented well.
74
0
    ASSERT(false);
75
0
    drain_tracker();
76
0
  }
77
78
0
  void bindAccount(Buffer::BufferMemoryAccountSharedPtr) override {
79
    // Not implemented.
80
0
    ASSERT(false);
81
0
  }
82
83
6.86k
  void add(const void* data, uint64_t size) override {
84
6.86k
    FUZZ_ASSERT(start_ + size_ + size <= data_.size());
85
6.86k
    ::memcpy(mutableEnd(), data, size);
86
6.86k
    size_ += size;
87
6.86k
  }
88
89
3.99k
  void addBufferFragment(Buffer::BufferFragment& fragment) override {
90
3.99k
    add(fragment.data(), fragment.size());
91
3.99k
    fragment.done();
92
3.99k
  }
93
94
927
  void add(absl::string_view data) override { add(data.data(), data.size()); }
95
96
1.10k
  void add(const Buffer::Instance& data) override {
97
1.10k
    const StringBuffer& src = dynamic_cast<const StringBuffer&>(data);
98
1.10k
    add(src.start(), src.size_);
99
1.10k
  }
100
101
1.90k
  void prepend(absl::string_view data) override {
102
1.90k
    FUZZ_ASSERT(start_ >= data.size());
103
1.90k
    start_ -= data.size();
104
1.90k
    size_ += data.size();
105
1.90k
    ::memcpy(mutableStart(), data.data(), data.size());
106
1.90k
  }
107
108
490
  void prepend(Instance& data) override {
109
490
    StringBuffer& src = dynamic_cast<StringBuffer&>(data);
110
490
    prepend(src.asStringView());
111
490
    src.size_ = 0;
112
490
  }
113
114
350
  void copyOut(size_t start, uint64_t size, void* data) const override {
115
350
    ::memcpy(data, this->start() + start, size);
116
350
  }
117
118
  uint64_t copyOutToSlices(uint64_t length, Buffer::RawSlice* slices,
119
1.52k
                           uint64_t num_slices) const override {
120
1.52k
    uint64_t size_copied = 0;
121
1.52k
    uint64_t num_slices_copied = 0;
122
2.36k
    while (size_copied < length && num_slices_copied < num_slices) {
123
845
      auto copy_length =
124
845
          std::min((length - size_copied), static_cast<uint64_t>(slices[num_slices_copied].len_));
125
845
      ::memcpy(slices[num_slices_copied].mem_, this->start(), copy_length);
126
845
      size_copied += copy_length;
127
845
      if (copy_length == slices[num_slices_copied].len_) {
128
395
        num_slices_copied++;
129
395
      }
130
845
    }
131
1.52k
    return size_copied;
132
1.52k
  }
133
134
1.01k
  void drain(uint64_t size) override {
135
1.01k
    FUZZ_ASSERT(size <= size_);
136
1.01k
    start_ += size;
137
1.01k
    size_ -= size;
138
1.01k
  }
139
140
  Buffer::RawSliceVector
141
1.36k
  getRawSlices(absl::optional<uint64_t> max_slices = absl::nullopt) const override {
142
1.36k
    ASSERT(!max_slices.has_value() || max_slices.value() >= 1);
143
1.36k
    return {{const_cast<char*>(start()), size_}};
144
1.36k
  }
145
146
0
  Buffer::RawSlice frontSlice() const override { return {const_cast<char*>(start()), size_}; }
147
148
75.9k
  uint64_t length() const override { return size_; }
149
150
65.3k
  void* linearize(uint32_t /*size*/) override {
151
    // Sketchy, but probably will work for test purposes.
152
65.3k
    return mutableStart();
153
65.3k
  }
154
155
0
  Buffer::SliceDataPtr extractMutableFrontSlice() override { PANIC("not implemented"); }
156
157
627
  void move(Buffer::Instance& rhs) override { move(rhs, rhs.length()); }
158
159
841
  void move(Buffer::Instance& rhs, uint64_t length) override { move(rhs, length, false); }
160
161
841
  void move(Buffer::Instance& rhs, uint64_t length, bool) override {
162
841
    StringBuffer& src = dynamic_cast<StringBuffer&>(rhs);
163
841
    add(src.start(), length);
164
841
    src.start_ += length;
165
841
    src.size_ -= length;
166
841
  }
167
168
5.12k
  Buffer::Reservation reserveForRead() override {
169
5.12k
    auto reservation = Buffer::Reservation::bufferImplUseOnlyConstruct(*this);
170
5.12k
    Buffer::RawSlice slice;
171
5.12k
    slice.mem_ = mutableEnd();
172
5.12k
    slice.len_ = data_.size() - (start_ + size_);
173
5.12k
    reservation.bufferImplUseOnlySlices().push_back(slice);
174
5.12k
    reservation.bufferImplUseOnlySetLength(slice.len_);
175
176
5.12k
    return reservation;
177
5.12k
  }
178
179
606
  Buffer::ReservationSingleSlice reserveSingleSlice(uint64_t length, bool separate_slice) override {
180
606
    ASSERT(!separate_slice);
181
606
    FUZZ_ASSERT(start_ + size_ + length <= data_.size());
182
183
606
    auto reservation = Buffer::ReservationSingleSlice::bufferImplUseOnlyConstruct(*this);
184
606
    Buffer::RawSlice slice;
185
606
    slice.mem_ = mutableEnd();
186
606
    slice.len_ = length;
187
606
    reservation.bufferImplUseOnlySlice() = slice;
188
189
606
    return reservation;
190
606
  }
191
192
  void commit(uint64_t length, absl::Span<Buffer::RawSlice>,
193
5.72k
              Buffer::ReservationSlicesOwnerPtr) override {
194
5.72k
    FUZZ_ASSERT(start_ + size_ + length <= data_.size());
195
5.72k
    size_ += length;
196
5.72k
  }
197
198
872
  ssize_t search(const void* data, uint64_t size, size_t start, size_t length) const override {
199
872
    UNREFERENCED_PARAMETER(length);
200
872
    return asStringView().find({static_cast<const char*>(data), size}, start);
201
872
  }
202
203
796
  bool startsWith(absl::string_view data) const override {
204
796
    return absl::StartsWith(asStringView(), data);
205
796
  }
206
207
5.64k
  std::string toString() const override { return {data_.data() + start_, size_}; }
208
209
0
  size_t addFragments(absl::Span<const absl::string_view> fragments) override {
210
0
    size_t total_size_to_write = 0;
211
212
0
    for (const auto& fragment : fragments) {
213
0
      total_size_to_write += fragment.size();
214
0
      add(fragment.data(), fragment.size());
215
0
    }
216
0
    return total_size_to_write;
217
0
  }
218
219
0
  void setWatermarks(uint32_t, uint32_t) override {
220
    // Not implemented.
221
    // TODO(antoniovicente) Implement and add fuzz coverage as we merge the Buffer::OwnedImpl and
222
    // WatermarkBuffer implementations.
223
0
    ASSERT(false);
224
0
  }
225
226
0
  uint32_t highWatermark() const override { return 0; }
227
0
  bool highWatermarkTriggered() const override { return false; }
228
229
2.15k
  absl::string_view asStringView() const { return {start(), size_}; }
230
231
79.8k
  char* mutableStart() { return data_.data() + start_; }
232
233
6.66k
  const char* start() const { return data_.data() + start_; }
234
235
12.5k
  char* mutableEnd() { return mutableStart() + size_; }
236
237
0
  const char* end() const { return start() + size_; }
238
239
  std::array<char, 2 * TotalMaxAllocation> data_;
240
  uint32_t start_{TotalMaxAllocation};
241
  uint32_t size_{0};
242
};
243
244
using BufferList = std::vector<std::unique_ptr<Buffer::Instance>>;
245
246
// Process a single buffer operation.
247
uint32_t bufferAction(Context& ctxt, char insert_value, uint32_t max_alloc, BufferList& buffers,
248
43.1k
                      const test::common::buffer::Action& action) {
249
43.1k
  const uint32_t target_index = action.target_index() % BufferCount;
250
43.1k
  Buffer::Instance& target_buffer = *buffers[target_index];
251
43.1k
  uint32_t allocated = 0;
252
253
43.1k
  switch (action.action_selector_case()) {
254
7.98k
  case test::common::buffer::Action::kAddBufferFragment: {
255
7.98k
    const uint32_t size = clampSize(action.add_buffer_fragment(), max_alloc);
256
7.98k
    allocated += size;
257
7.98k
    void* p = ::malloc(size);
258
7.98k
    FUZZ_ASSERT(p != nullptr);
259
7.98k
    ::memset(p, insert_value, size);
260
7.98k
    auto fragment =
261
7.98k
        std::make_unique<Buffer::BufferFragmentImpl>(p, size, releaseFragmentAllocation);
262
7.98k
    ctxt.fragments_.emplace_back(std::move(fragment));
263
7.98k
    target_buffer.addBufferFragment(*ctxt.fragments_.back());
264
7.98k
    break;
265
7.98k
  }
266
1.85k
  case test::common::buffer::Action::kAddString: {
267
1.85k
    const uint32_t size = clampSize(action.add_string(), max_alloc);
268
1.85k
    allocated += size;
269
1.85k
    const std::string data(size, insert_value);
270
1.85k
    target_buffer.add(data);
271
1.85k
    break;
272
7.98k
  }
273
2.21k
  case test::common::buffer::Action::kAddBuffer: {
274
2.21k
    const uint32_t source_index = action.add_buffer() % BufferCount;
275
2.21k
    if (target_index == source_index) {
276
2
      break;
277
2
    }
278
2.21k
    Buffer::Instance& source_buffer = *buffers[source_index];
279
2.21k
    if (source_buffer.length() > max_alloc) {
280
0
      break;
281
0
    }
282
2.21k
    allocated += source_buffer.length();
283
2.21k
    target_buffer.add(source_buffer);
284
2.21k
    break;
285
2.21k
  }
286
2.82k
  case test::common::buffer::Action::kPrependString: {
287
2.82k
    const uint32_t size = clampSize(action.prepend_string(), max_alloc);
288
2.82k
    allocated += size;
289
2.82k
    const std::string data(size, insert_value);
290
2.82k
    target_buffer.prepend(data);
291
2.82k
    break;
292
2.21k
  }
293
986
  case test::common::buffer::Action::kPrependBuffer: {
294
986
    const uint32_t source_index = action.prepend_buffer() % BufferCount;
295
986
    if (target_index == source_index) {
296
0
      break;
297
0
    }
298
986
    Buffer::Instance& source_buffer = *buffers[source_index];
299
986
    if (source_buffer.length() > max_alloc) {
300
6
      break;
301
6
    }
302
980
    allocated += source_buffer.length();
303
980
    target_buffer.prepend(source_buffer);
304
980
    break;
305
986
  }
306
1.23k
  case test::common::buffer::Action::kReserveCommit: {
307
1.23k
    const uint32_t reserve_length = clampSize(action.reserve_commit().reserve_length(), max_alloc);
308
1.23k
    allocated += reserve_length;
309
1.23k
    if (reserve_length == 0) {
310
2
      break;
311
2
    }
312
1.23k
    if (reserve_length < 16384) {
313
1.21k
      auto reservation = target_buffer.reserveSingleSlice(reserve_length);
314
1.21k
      ::memset(reservation.slice().mem_, insert_value, reservation.slice().len_);
315
1.21k
      reservation.commit(
316
1.21k
          std::min<uint64_t>(action.reserve_commit().commit_length(), reservation.length()));
317
1.21k
    } else {
318
20
      Buffer::Reservation reservation = target_buffer.reserveForRead();
319
110
      for (uint32_t i = 0; i < reservation.numSlices(); ++i) {
320
90
        ::memset(reservation.slices()[i].mem_, insert_value, reservation.slices()[i].len_);
321
90
      }
322
20
      const uint32_t target_length = clampSize(
323
20
          std::min<uint32_t>(reservation.length(), action.reserve_commit().commit_length()),
324
20
          reserve_length);
325
20
      reservation.commit(target_length);
326
20
    }
327
1.23k
    break;
328
1.23k
  }
329
700
  case test::common::buffer::Action::kCopyOut: {
330
700
    const uint32_t start =
331
700
        std::min(action.copy_out().start(), static_cast<uint32_t>(target_buffer.length()));
332
700
    const uint32_t length =
333
700
        std::min(static_cast<uint32_t>(target_buffer.length() - start), action.copy_out().length());
334
    // Make this static to avoid potential continuous ASAN inspired allocation.
335
700
    static uint8_t copy_buffer[TotalMaxAllocation];
336
700
    target_buffer.copyOut(start, length, copy_buffer);
337
700
    const std::string data = target_buffer.toString();
338
700
    FUZZ_ASSERT(::memcmp(copy_buffer, data.data() + start, length) == 0);
339
700
    break;
340
700
  }
341
3.04k
  case test::common::buffer::Action::kCopyOutToSlices: {
342
3.04k
    const uint32_t length =
343
3.04k
        std::min(static_cast<uint32_t>(target_buffer.length()), action.copy_out_to_slices());
344
3.04k
    Buffer::OwnedImpl buffer;
345
3.04k
    auto reservation = buffer.reserveForRead();
346
3.04k
    auto rc = target_buffer.copyOutToSlices(length, reservation.slices(), reservation.numSlices());
347
3.04k
    reservation.commit(rc);
348
3.04k
    const std::string data = buffer.toString();
349
3.04k
    const std::string target_data = target_buffer.toString();
350
3.04k
    FUZZ_ASSERT(::memcmp(data.data(), target_data.data(), reservation.length()) == 0);
351
3.04k
    break;
352
3.04k
  }
353
3.04k
  case test::common::buffer::Action::kDrain: {
354
1.22k
    const uint32_t previous_length = target_buffer.length();
355
1.22k
    const uint32_t drain_length =
356
1.22k
        std::min(static_cast<uint32_t>(target_buffer.length()), action.drain());
357
1.22k
    target_buffer.drain(drain_length);
358
1.22k
    FUZZ_ASSERT(previous_length - drain_length == target_buffer.length());
359
1.22k
    break;
360
1.22k
  }
361
1.28k
  case test::common::buffer::Action::kLinearize: {
362
1.28k
    const uint32_t linearize_size =
363
1.28k
        std::min(static_cast<uint32_t>(target_buffer.length()), action.linearize());
364
1.28k
    target_buffer.linearize(linearize_size);
365
1.28k
    break;
366
1.22k
  }
367
1.69k
  case test::common::buffer::Action::kMove: {
368
1.69k
    const uint32_t source_index = action.move().source_index() % BufferCount;
369
1.69k
    if (target_index == source_index) {
370
4
      break;
371
4
    }
372
1.68k
    Buffer::Instance& source_buffer = *buffers[source_index];
373
1.68k
    if (action.move().length() == 0) {
374
1.25k
      if (source_buffer.length() > max_alloc) {
375
4
        break;
376
4
      }
377
1.25k
      allocated += source_buffer.length();
378
1.25k
      target_buffer.move(source_buffer);
379
1.25k
    } else {
380
428
      const uint32_t source_length =
381
428
          std::min(static_cast<uint32_t>(source_buffer.length()), action.move().length());
382
428
      const uint32_t move_length = clampSize(max_alloc, source_length);
383
428
      if (move_length == 0) {
384
0
        break;
385
0
      }
386
428
      target_buffer.move(source_buffer, move_length);
387
428
      allocated += move_length;
388
428
    }
389
1.68k
    break;
390
1.68k
  }
391
10.2k
  case test::common::buffer::Action::kRead: {
392
10.2k
    const uint32_t max_length = clampSize(action.read(), max_alloc);
393
10.2k
    allocated += max_length;
394
10.2k
    if (max_length == 0) {
395
2
      break;
396
2
    }
397
10.2k
    int fds[2] = {0, 0};
398
10.2k
    auto& os_sys_calls = Api::OsSysCallsSingleton::get();
399
10.2k
    FUZZ_ASSERT(os_sys_calls.socketpair(AF_UNIX, SOCK_STREAM, 0, fds).return_value_ == 0);
400
10.2k
    Network::IoSocketHandleImpl io_handle(fds[0]);
401
10.2k
    FUZZ_ASSERT(::fcntl(fds[0], F_SETFL, O_NONBLOCK) == 0);
402
10.2k
    FUZZ_ASSERT(::fcntl(fds[1], F_SETFL, O_NONBLOCK) == 0);
403
10.2k
    std::string data(max_length, insert_value);
404
10.2k
    const ssize_t rc = ::write(fds[1], data.data(), max_length);
405
10.2k
    FUZZ_ASSERT(rc > 0);
406
10.2k
    Api::IoCallUint64Result result = io_handle.read(target_buffer, max_length);
407
10.2k
    FUZZ_ASSERT(result.return_value_ == static_cast<uint64_t>(rc));
408
10.2k
    FUZZ_ASSERT(::close(fds[1]) == 0);
409
10.2k
    break;
410
10.2k
  }
411
10.2k
  case test::common::buffer::Action::kWrite: {
412
1.38k
    int fds[2] = {0, 0};
413
1.38k
    auto& os_sys_calls = Api::OsSysCallsSingleton::get();
414
1.38k
    FUZZ_ASSERT(os_sys_calls.socketpair(AF_UNIX, SOCK_STREAM, 0, fds).return_value_ == 0);
415
1.38k
    Network::IoSocketHandleImpl io_handle(fds[1]);
416
1.38k
    FUZZ_ASSERT(::fcntl(fds[0], F_SETFL, O_NONBLOCK) == 0);
417
1.38k
    FUZZ_ASSERT(::fcntl(fds[1], F_SETFL, O_NONBLOCK) == 0);
418
1.38k
    uint64_t return_value;
419
2.18k
    do {
420
2.18k
      const bool empty = target_buffer.length() == 0;
421
2.18k
      const std::string previous_data = target_buffer.toString();
422
2.18k
      const auto result = io_handle.write(target_buffer);
423
2.18k
      FUZZ_ASSERT(result.ok());
424
2.18k
      return_value = result.return_value_;
425
2.18k
      ENVOY_LOG_MISC(trace, "Write return_value: {} errno: {}", return_value,
426
2.18k
                     result.err_ != nullptr ? result.err_->getErrorDetails() : "-");
427
2.18k
      if (empty) {
428
1.38k
        FUZZ_ASSERT(return_value == 0);
429
1.38k
      } else {
430
805
        auto buf = std::make_unique<char[]>(return_value);
431
805
        FUZZ_ASSERT(static_cast<uint64_t>(::read(fds[0], buf.get(), return_value)) == return_value);
432
805
        FUZZ_ASSERT(::memcmp(buf.get(), previous_data.data(), return_value) == 0);
433
805
      }
434
2.18k
    } while (return_value > 0);
435
1.38k
    FUZZ_ASSERT(::close(fds[0]) == 0);
436
1.38k
    break;
437
1.38k
  }
438
1.38k
  case test::common::buffer::Action::kGetRawSlices: {
439
274
    const uint64_t slices_needed = target_buffer.getRawSlices().size();
440
274
    const uint64_t slices_tested =
441
274
        std::min(slices_needed, static_cast<uint64_t>(action.get_raw_slices()));
442
274
    if (slices_tested == 0) {
443
0
      break;
444
0
    }
445
274
    Buffer::RawSliceVector raw_slices = target_buffer.getRawSlices(/*max_slices=*/slices_tested);
446
274
    const uint64_t slices_obtained = raw_slices.size();
447
274
    FUZZ_ASSERT(slices_obtained <= slices_needed);
448
274
    uint64_t offset = 0;
449
274
    const std::string data = target_buffer.toString();
450
1.76k
    for (const auto& raw_slices : raw_slices) {
451
1.76k
      FUZZ_ASSERT(::memcmp(raw_slices.mem_, data.data() + offset, raw_slices.len_) == 0);
452
1.76k
      offset += raw_slices.len_;
453
1.76k
    }
454
274
    FUZZ_ASSERT(slices_needed != slices_tested || offset == target_buffer.length());
455
274
    break;
456
274
  }
457
1.74k
  case test::common::buffer::Action::kSearch: {
458
1.74k
    const std::string& content = action.search().content();
459
1.74k
    const uint32_t offset = action.search().offset();
460
1.74k
    const std::string data = target_buffer.toString();
461
1.74k
    FUZZ_ASSERT(target_buffer.search(content.data(), content.size(), offset) ==
462
1.74k
                static_cast<ssize_t>(target_buffer.toString().find(content, offset)));
463
1.74k
    break;
464
1.74k
  }
465
1.74k
  case test::common::buffer::Action::kStartsWith: {
466
1.59k
    const std::string data = target_buffer.toString();
467
1.59k
    FUZZ_ASSERT(target_buffer.startsWith(action.starts_with()) ==
468
1.59k
                (data.find(action.starts_with()) == 0));
469
1.59k
    break;
470
1.59k
  }
471
2.89k
  default:
472
    // Maybe nothing is set?
473
2.89k
    break;
474
43.1k
  }
475
476
43.1k
  return allocated;
477
43.1k
}
478
479
} // namespace
480
481
void executeActions(const test::common::buffer::BufferFuzzTestCase& input, BufferList& buffers,
482
1.98k
                    BufferList& linear_buffers, Context& ctxt) {
483
  // Soft bound on the available memory for allocation to avoid OOMs and
484
  // timeouts.
485
1.98k
  uint32_t available_alloc = 2 * MaxAllocation;
486
1.98k
  constexpr auto max_actions = 128;
487
23.5k
  for (int i = 0; i < std::min(max_actions, input.actions().size()); ++i) {
488
21.5k
    const char insert_value = 'a' + i % 26;
489
21.5k
    const auto& action = input.actions(i);
490
21.5k
    ENVOY_LOG_MISC(debug, "Action {}", action.DebugString());
491
21.5k
    const uint32_t allocated = bufferAction(ctxt, insert_value, available_alloc, buffers, action);
492
21.5k
    const uint32_t linear_allocated =
493
21.5k
        bufferAction(ctxt, insert_value, available_alloc, linear_buffers, action);
494
21.5k
    FUZZ_ASSERT(allocated == linear_allocated);
495
21.5k
    FUZZ_ASSERT(allocated <= available_alloc);
496
21.5k
    available_alloc -= allocated;
497
    // When tracing, dump everything.
498
86.3k
    for (uint32_t j = 0; j < BufferCount; ++j) {
499
64.7k
      ENVOY_LOG_MISC(trace, "Buffer at index {}", j);
500
64.7k
      ENVOY_LOG_MISC(trace, "B: {}", buffers[j]->toString());
501
64.7k
      ENVOY_LOG_MISC(trace, "L: {}", linear_buffers[j]->toString());
502
64.7k
    }
503
    // Verification pass, only non-mutating methods for buffers.
504
21.5k
    uint64_t current_allocated_bytes = 0;
505
86.3k
    for (uint32_t j = 0; j < BufferCount; ++j) {
506
      // As an optimization, since we know that StringBuffer is just going to
507
      // return the pointer to its std::string array, we can avoid the
508
      // toString() copy here.
509
64.7k
      const uint64_t linear_buffer_length = linear_buffers[j]->length();
510
      // We may have spilled over TotalMaxAllocation at this point. Only compare up to
511
      // TotalMaxAllocation.
512
64.7k
      if (absl::string_view(
513
64.7k
              static_cast<const char*>(linear_buffers[j]->linearize(linear_buffer_length)),
514
64.7k
              linear_buffer_length)
515
64.7k
              .compare(buffers[j]->toString().substr(0, TotalMaxAllocation)) != 0) {
516
0
        ENVOY_LOG_MISC(debug, "Mismatched buffers at index {}", j);
517
0
        ENVOY_LOG_MISC(debug, "B: {}", buffers[j]->toString());
518
0
        ENVOY_LOG_MISC(debug, "L: {}", linear_buffers[j]->toString());
519
0
        FUZZ_ASSERT(false);
520
0
      }
521
64.7k
      FUZZ_ASSERT(std::min(TotalMaxAllocation, static_cast<uint32_t>(buffers[j]->length())) ==
522
64.7k
                  linear_buffer_length);
523
64.7k
      current_allocated_bytes += linear_buffer_length;
524
64.7k
    }
525
21.5k
    ENVOY_LOG_MISC(debug, "[{} MB allocated total]", current_allocated_bytes / (1024.0 * 1024));
526
    // We bail out if buffers get too big, otherwise we will OOM the sanitizer.
527
    // We can't use Memory::Stats::totalCurrentlyAllocated() here as we don't
528
    // have tcmalloc in ASAN builds, so just do a simple count.
529
21.5k
    if (current_allocated_bytes >= TotalMaxAllocation) {
530
0
      ENVOY_LOG_MISC(debug, "Terminating early with total buffer length {} to avoid OOM",
531
0
                     current_allocated_bytes);
532
0
      break;
533
0
    }
534
21.5k
  }
535
1.98k
}
536
537
1.98k
void BufferFuzz::bufferFuzz(const test::common::buffer::BufferFuzzTestCase& input) {
538
1.98k
  Context ctxt;
539
  // Fuzzed buffers.
540
1.98k
  BufferList buffers;
541
  // Shadow buffers based on StringBuffer.
542
1.98k
  BufferList linear_buffers;
543
7.92k
  for (uint32_t i = 0; i < BufferCount; ++i) {
544
5.94k
    buffers.emplace_back(new Buffer::OwnedImpl());
545
5.94k
    linear_buffers.emplace_back(new StringBuffer());
546
5.94k
  }
547
1.98k
  executeActions(input, buffers, linear_buffers, ctxt);
548
1.98k
}
549
550
} // namespace Envoy