/src/LPM/external.protobuf/include/google/protobuf/serial_arena.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Protocol Buffers - Google's data interchange format |
2 | | // Copyright 2022 Google Inc. All rights reserved. |
3 | | // |
4 | | // Use of this source code is governed by a BSD-style |
5 | | // license that can be found in the LICENSE file or at |
6 | | // https://developers.google.com/open-source/licenses/bsd |
7 | | // |
8 | | // This file defines the internal class SerialArena |
9 | | |
10 | | #ifndef GOOGLE_PROTOBUF_SERIAL_ARENA_H__ |
11 | | #define GOOGLE_PROTOBUF_SERIAL_ARENA_H__ |
12 | | |
13 | | #include <algorithm> |
14 | | #include <atomic> |
15 | | #include <cstddef> |
16 | | #include <cstdint> |
17 | | #include <string> |
18 | | #include <vector> |
19 | | |
20 | | #include "absl/base/attributes.h" |
21 | | #include "absl/base/optimization.h" |
22 | | #include "absl/base/prefetch.h" |
23 | | #include "absl/log/absl_check.h" |
24 | | #include "absl/numeric/bits.h" |
25 | | #include "google/protobuf/arena_align.h" |
26 | | #include "google/protobuf/arena_cleanup.h" |
27 | | #include "google/protobuf/port.h" |
28 | | #include "google/protobuf/string_block.h" |
29 | | |
30 | | // Must be included last. |
31 | | #include "google/protobuf/port_def.inc" |
32 | | |
33 | | namespace google { |
34 | | namespace protobuf { |
35 | | namespace internal { |
36 | | |
37 | | // Arena blocks are variable length malloc-ed objects. The following structure |
38 | | // describes the common header for all blocks. |
39 | | struct ArenaBlock { |
40 | | // For the sentry block with zero-size where ptr_, limit_, cleanup_nodes all |
41 | | // point to "this". |
42 | | constexpr ArenaBlock() |
43 | 0 | : next(nullptr), cleanup_nodes(this), size(0) {} |
44 | | |
45 | | ArenaBlock(ArenaBlock* next, size_t size) |
46 | 0 | : next(next), cleanup_nodes(nullptr), size(size) { |
47 | 0 | ABSL_DCHECK_GT(size, sizeof(ArenaBlock)); |
48 | 0 | } |
49 | | |
50 | 0 | char* Pointer(size_t n) { |
51 | 0 | ABSL_DCHECK_LE(n, size); |
52 | 0 | return reinterpret_cast<char*>(this) + n; |
53 | 0 | } |
54 | 0 | char* Limit() { return Pointer(size & static_cast<size_t>(-8)); } |
55 | | |
56 | 0 | bool IsSentry() const { return size == 0; } |
57 | | |
58 | | ArenaBlock* const next; |
59 | | void* cleanup_nodes; |
60 | | const size_t size; |
61 | | // data follows |
62 | | }; |
63 | | |
64 | | enum class AllocationClient { kDefault, kArray }; |
65 | | |
66 | | class ThreadSafeArena; |
67 | | |
68 | | // Tag type used to invoke the constructor of the first SerialArena. |
69 | | struct FirstSerialArena { |
70 | | explicit FirstSerialArena() = default; |
71 | | }; |
72 | | |
73 | | // A simple arena allocator. Calls to allocate functions must be properly |
74 | | // serialized by the caller, hence this class cannot be used as a general |
75 | | // purpose allocator in a multi-threaded program. It serves as a building block |
76 | | // for ThreadSafeArena, which provides a thread-safe arena allocator. |
77 | | // |
78 | | // This class manages |
79 | | // 1) Arena bump allocation + owning memory blocks. |
80 | | // 2) Maintaining a cleanup list. |
81 | | // It delegates the actual memory allocation back to ThreadSafeArena, which |
82 | | // contains the information on block growth policy and backing memory allocation |
83 | | // used. |
84 | | class PROTOBUF_EXPORT SerialArena { |
85 | | public: |
86 | | static constexpr size_t kBlockHeaderSize = |
87 | | ArenaAlignDefault::Ceil(sizeof(ArenaBlock)); |
88 | | |
89 | | void CleanupList(); |
90 | 0 | uint64_t SpaceAllocated() const { |
91 | 0 | return space_allocated_.load(std::memory_order_relaxed); |
92 | 0 | } |
93 | | uint64_t SpaceUsed() const; |
94 | | |
95 | | // See comments on `cached_blocks_` member for details. |
96 | 0 | PROTOBUF_ALWAYS_INLINE void* TryAllocateFromCachedBlock(size_t size) { |
97 | 0 | if (PROTOBUF_PREDICT_FALSE(size < 16)) return nullptr; |
98 | 0 | // We round up to the next larger block in case the memory doesn't match |
99 | 0 | // the pattern we are looking for. |
100 | 0 | const size_t index = absl::bit_width(size - 1) - 4; |
101 | 0 |
|
102 | 0 | if (PROTOBUF_PREDICT_FALSE(index >= cached_block_length_)) return nullptr; |
103 | 0 | auto& cached_head = cached_blocks_[index]; |
104 | 0 | if (cached_head == nullptr) return nullptr; |
105 | 0 |
|
106 | 0 | void* ret = cached_head; |
107 | 0 | PROTOBUF_UNPOISON_MEMORY_REGION(ret, size); |
108 | 0 | cached_head = cached_head->next; |
109 | 0 | return ret; |
110 | 0 | } |
111 | | |
112 | | // In kArray mode we look through cached blocks. |
113 | | // We do not do this by default because most non-array allocations will not |
114 | | // have the right size and will fail to find an appropriate cached block. |
115 | | // |
116 | | // TODO: Evaluate if we should use cached blocks for message types of |
117 | | // the right size. We can statically know if the allocation size can benefit |
118 | | // from it. |
119 | | template <AllocationClient alloc_client = AllocationClient::kDefault> |
120 | | void* AllocateAligned(size_t n) { |
121 | | ABSL_DCHECK(internal::ArenaAlignDefault::IsAligned(n)); |
122 | | ABSL_DCHECK_GE(limit_, ptr()); |
123 | | |
124 | | if (alloc_client == AllocationClient::kArray) { |
125 | | if (void* res = TryAllocateFromCachedBlock(n)) { |
126 | | return res; |
127 | | } |
128 | | } |
129 | | |
130 | | void* ptr; |
131 | | if (PROTOBUF_PREDICT_TRUE(MaybeAllocateAligned(n, &ptr))) { |
132 | | return ptr; |
133 | | } |
134 | | return AllocateAlignedFallback(n); |
135 | | } |
136 | | |
137 | | private: |
138 | | static inline PROTOBUF_ALWAYS_INLINE constexpr size_t AlignUpTo(size_t n, |
139 | 0 | size_t a) { |
140 | 0 | // We are wasting space by over allocating align - 8 bytes. Compared to a |
141 | 0 | // dedicated function that takes current alignment in consideration. Such a |
142 | 0 | // scheme would only waste (align - 8)/2 bytes on average, but requires a |
143 | 0 | // dedicated function in the outline arena allocation functions. Possibly |
144 | 0 | // re-evaluate tradeoffs later. |
145 | 0 | return a <= 8 ? ArenaAlignDefault::Ceil(n) : ArenaAlignAs(a).Padded(n); |
146 | 0 | } |
147 | | |
148 | 0 | static inline PROTOBUF_ALWAYS_INLINE void* AlignTo(void* p, size_t a) { |
149 | 0 | return (a <= ArenaAlignDefault::align) |
150 | 0 | ? ArenaAlignDefault::CeilDefaultAligned(p) |
151 | 0 | : ArenaAlignAs(a).CeilDefaultAligned(p); |
152 | 0 | } |
153 | | |
154 | | // See comments on `cached_blocks_` member for details. |
155 | 0 | void ReturnArrayMemory(void* p, size_t size) { |
156 | 0 | // We only need to check for 32-bit platforms. |
157 | 0 | // In 64-bit platforms the minimum allocation size from Repeated*Field will |
158 | 0 | // be 16 guaranteed. |
159 | 0 | if (sizeof(void*) < 8) { |
160 | 0 | if (PROTOBUF_PREDICT_FALSE(size < 16)) return; |
161 | 0 | } else { |
162 | 0 | PROTOBUF_ASSUME(size >= 16); |
163 | 0 | } |
164 | 0 |
|
165 | 0 | // We round down to the next smaller block in case the memory doesn't match |
166 | 0 | // the pattern we are looking for. eg, someone might have called Reserve() |
167 | 0 | // on the repeated field. |
168 | 0 | const size_t index = absl::bit_width(size) - 5; |
169 | 0 |
|
170 | 0 | if (PROTOBUF_PREDICT_FALSE(index >= cached_block_length_)) { |
171 | 0 | // We can't put this object on the freelist so make this object the |
172 | 0 | // freelist. It is guaranteed it is larger than the one we have, and |
173 | 0 | // large enough to hold another allocation of `size`. |
174 | 0 | CachedBlock** new_list = static_cast<CachedBlock**>(p); |
175 | 0 | size_t new_size = size / sizeof(CachedBlock*); |
176 | 0 |
|
177 | 0 | std::copy(cached_blocks_, cached_blocks_ + cached_block_length_, |
178 | 0 | new_list); |
179 | 0 |
|
180 | 0 | // We need to unpoison this memory before filling it in case it has been |
181 | 0 | // poisoned by another santizer client. |
182 | 0 | PROTOBUF_UNPOISON_MEMORY_REGION( |
183 | 0 | new_list + cached_block_length_, |
184 | 0 | (new_size - cached_block_length_) * sizeof(CachedBlock*)); |
185 | 0 |
|
186 | 0 | std::fill(new_list + cached_block_length_, new_list + new_size, nullptr); |
187 | 0 |
|
188 | 0 | cached_blocks_ = new_list; |
189 | 0 | // Make the size fit in uint8_t. This is the power of two, so we don't |
190 | 0 | // need anything larger. |
191 | 0 | cached_block_length_ = |
192 | 0 | static_cast<uint8_t>(std::min(size_t{64}, new_size)); |
193 | 0 |
|
194 | 0 | return; |
195 | 0 | } |
196 | 0 |
|
197 | 0 | auto& cached_head = cached_blocks_[index]; |
198 | 0 | auto* new_node = static_cast<CachedBlock*>(p); |
199 | 0 | new_node->next = cached_head; |
200 | 0 | cached_head = new_node; |
201 | 0 | PROTOBUF_POISON_MEMORY_REGION(p, size); |
202 | 0 | } |
203 | | |
204 | | public: |
205 | | // Allocate space if the current region provides enough space. |
206 | 0 | bool MaybeAllocateAligned(size_t n, void** out) { |
207 | 0 | ABSL_DCHECK(internal::ArenaAlignDefault::IsAligned(n)); |
208 | 0 | ABSL_DCHECK_GE(limit_, ptr()); |
209 | 0 | char* ret = ptr(); |
210 | 0 | // ret + n may point out of the block bounds, or ret may be nullptr. |
211 | 0 | // Both computations have undefined behavior when done on pointers, |
212 | 0 | // so do them on uintptr_t instead. |
213 | 0 | if (PROTOBUF_PREDICT_FALSE(reinterpret_cast<uintptr_t>(ret) + n > |
214 | 0 | reinterpret_cast<uintptr_t>(limit_))) { |
215 | 0 | return false; |
216 | 0 | } |
217 | 0 | PROTOBUF_UNPOISON_MEMORY_REGION(ret, n); |
218 | 0 | *out = ret; |
219 | 0 | char* next = ret + n; |
220 | 0 | set_ptr(next); |
221 | 0 | MaybePrefetchForwards(next); |
222 | 0 | return true; |
223 | 0 | } |
224 | | |
225 | | // If there is enough space in the current block, allocate space for one |
226 | | // std::string object and register for destruction. The object has not been |
227 | | // constructed and the memory returned is uninitialized. |
228 | 0 | PROTOBUF_ALWAYS_INLINE void* MaybeAllocateStringWithCleanup() { |
229 | 0 | void* p; |
230 | 0 | return MaybeAllocateString(p) ? p : nullptr; |
231 | 0 | } |
232 | | |
233 | | PROTOBUF_ALWAYS_INLINE |
234 | | void* AllocateAlignedWithCleanup(size_t n, size_t align, |
235 | 0 | void (*destructor)(void*)) { |
236 | 0 | n = ArenaAlignDefault::Ceil(n); |
237 | 0 | char* ret = ArenaAlignAs(align).CeilDefaultAligned(ptr()); |
238 | 0 | // See the comment in MaybeAllocateAligned re uintptr_t. |
239 | 0 | if (PROTOBUF_PREDICT_FALSE(reinterpret_cast<uintptr_t>(ret) + n + |
240 | 0 | cleanup::Size() > |
241 | 0 | reinterpret_cast<uintptr_t>(limit_))) { |
242 | 0 | return AllocateAlignedWithCleanupFallback(n, align, destructor); |
243 | 0 | } |
244 | 0 | PROTOBUF_UNPOISON_MEMORY_REGION(ret, n); |
245 | 0 | char* next = ret + n; |
246 | 0 | set_ptr(next); |
247 | 0 | AddCleanupFromExisting(ret, destructor); |
248 | 0 | ABSL_DCHECK_GE(limit_, ptr()); |
249 | 0 | MaybePrefetchForwards(next); |
250 | 0 | return ret; |
251 | 0 | } |
252 | | |
253 | | PROTOBUF_ALWAYS_INLINE |
254 | 0 | void AddCleanup(void* elem, void (*destructor)(void*)) { |
255 | 0 | size_t has = static_cast<size_t>(limit_ - ptr()); |
256 | 0 | if (PROTOBUF_PREDICT_FALSE(cleanup::Size() > has)) { |
257 | 0 | return AddCleanupFallback(elem, destructor); |
258 | 0 | } |
259 | 0 | AddCleanupFromExisting(elem, destructor); |
260 | 0 | } |
261 | | |
262 | | ABSL_ATTRIBUTE_RETURNS_NONNULL void* AllocateFromStringBlock(); |
263 | | |
264 | | std::vector<void*> PeekCleanupListForTesting(); |
265 | | |
266 | | private: |
267 | | friend class ThreadSafeArena; |
268 | | |
269 | | // See comments for cached_blocks_. |
270 | | struct CachedBlock { |
271 | | // Simple linked list. |
272 | | CachedBlock* next; |
273 | | }; |
274 | | |
275 | | static constexpr ptrdiff_t kPrefetchForwardsDegree = ABSL_CACHELINE_SIZE * 16; |
276 | | static constexpr ptrdiff_t kPrefetchBackwardsDegree = ABSL_CACHELINE_SIZE * 6; |
277 | | |
278 | | // Constructor is private as only New() should be used. |
279 | | inline SerialArena(ArenaBlock* b, ThreadSafeArena& parent); |
280 | | |
281 | | // Constructors to handle the first SerialArena. |
282 | | inline explicit SerialArena(ThreadSafeArena& parent); |
283 | | inline SerialArena(FirstSerialArena, ArenaBlock* b, ThreadSafeArena& parent); |
284 | | |
285 | | bool MaybeAllocateString(void*& p); |
286 | | ABSL_ATTRIBUTE_RETURNS_NONNULL void* AllocateFromStringBlockFallback(); |
287 | | |
288 | | PROTOBUF_ALWAYS_INLINE |
289 | 0 | void AddCleanupFromExisting(void* elem, void (*destructor)(void*)) { |
290 | 0 | const size_t cleanup_size = cleanup::Size(); |
291 | 0 |
|
292 | 0 | PROTOBUF_UNPOISON_MEMORY_REGION(limit_ - cleanup_size, cleanup_size); |
293 | 0 | limit_ -= cleanup_size; |
294 | 0 | MaybePrefetchBackwards(limit_); |
295 | 0 | ABSL_DCHECK_GE(limit_, ptr()); |
296 | 0 | cleanup::CreateNode(limit_, elem, destructor); |
297 | 0 | } |
298 | | |
299 | | // Prefetch the next kPrefetchForwardsDegree bytes after `prefetch_ptr_` and |
300 | | // up to `prefetch_limit_`, if `next` is within kPrefetchForwardsDegree bytes |
301 | | // of `prefetch_ptr_`. |
302 | | PROTOBUF_ALWAYS_INLINE |
303 | 0 | void MaybePrefetchForwards(const char* next) { |
304 | 0 | ABSL_DCHECK(static_cast<const void*>(prefetch_ptr_) == nullptr || |
305 | 0 | static_cast<const void*>(prefetch_ptr_) >= head()); |
306 | 0 | if (PROTOBUF_PREDICT_TRUE(prefetch_ptr_ - next > kPrefetchForwardsDegree)) |
307 | 0 | return; |
308 | 0 | if (PROTOBUF_PREDICT_TRUE(prefetch_ptr_ < prefetch_limit_)) { |
309 | 0 | const char* prefetch_ptr = std::max(next, prefetch_ptr_); |
310 | 0 | ABSL_DCHECK(prefetch_ptr != nullptr); |
311 | 0 | const char* end = |
312 | 0 | std::min(prefetch_limit_, prefetch_ptr + ABSL_CACHELINE_SIZE * 16); |
313 | 0 | for (; prefetch_ptr < end; prefetch_ptr += ABSL_CACHELINE_SIZE) { |
314 | 0 | absl::PrefetchToLocalCacheForWrite(prefetch_ptr); |
315 | 0 | } |
316 | 0 | prefetch_ptr_ = prefetch_ptr; |
317 | 0 | } |
318 | 0 | } |
319 | | |
320 | | PROTOBUF_ALWAYS_INLINE |
321 | | // Prefetch up to kPrefetchBackwardsDegree before `prefetch_limit_` and after |
322 | | // `prefetch_ptr_`, if `limit` is within kPrefetchBackwardsDegree of |
323 | | // `prefetch_limit_`. |
324 | 0 | void MaybePrefetchBackwards(const char* limit) { |
325 | 0 | ABSL_DCHECK(prefetch_limit_ == nullptr || |
326 | 0 | static_cast<const void*>(prefetch_limit_) <= |
327 | 0 | static_cast<const void*>(head()->Limit())); |
328 | 0 | if (PROTOBUF_PREDICT_TRUE(limit - prefetch_limit_ > |
329 | 0 | kPrefetchBackwardsDegree)) |
330 | 0 | return; |
331 | 0 | if (PROTOBUF_PREDICT_TRUE(prefetch_limit_ > prefetch_ptr_)) { |
332 | 0 | const char* prefetch_limit = std::min(limit, prefetch_limit_); |
333 | 0 | ABSL_DCHECK_NE(prefetch_limit, nullptr); |
334 | 0 | const char* end = |
335 | 0 | std::max(prefetch_ptr_, prefetch_limit - kPrefetchBackwardsDegree); |
336 | 0 | for (; prefetch_limit > end; prefetch_limit -= ABSL_CACHELINE_SIZE) { |
337 | 0 | absl::PrefetchToLocalCacheForWrite(prefetch_limit); |
338 | 0 | } |
339 | 0 | prefetch_limit_ = prefetch_limit; |
340 | 0 | } |
341 | 0 | } |
342 | | |
343 | | // Creates a new SerialArena inside mem using the remaining memory as for |
344 | | // future allocations. |
345 | | // The `parent` arena must outlive the serial arena, which is guaranteed |
346 | | // because the parent manages the lifetime of the serial arenas. |
347 | | static SerialArena* New(SizedPtr mem, ThreadSafeArena& parent); |
348 | | // Free SerialArena returning the memory passed in to New. |
349 | | template <typename Deallocator> |
350 | | SizedPtr Free(Deallocator deallocator); |
351 | | |
352 | 0 | size_t FreeStringBlocks() { |
353 | 0 | // On the active block delete all strings skipping the unused instances. |
354 | 0 | size_t unused_bytes = string_block_unused_.load(std::memory_order_relaxed); |
355 | 0 | if (StringBlock* sb = string_block_.load(std::memory_order_relaxed)) { |
356 | 0 | return FreeStringBlocks(sb, unused_bytes); |
357 | 0 | } |
358 | 0 | return 0; |
359 | 0 | } |
360 | | static size_t FreeStringBlocks(StringBlock* string_block, size_t unused); |
361 | | |
362 | | // Adds 'used` to space_used_ in relaxed atomic order. |
363 | 0 | void AddSpaceUsed(size_t space_used) { |
364 | 0 | space_used_.store(space_used_.load(std::memory_order_relaxed) + space_used, |
365 | 0 | std::memory_order_relaxed); |
366 | 0 | } |
367 | | |
368 | | // Adds 'allocated` to space_allocated_ in relaxed atomic order. |
369 | 0 | void AddSpaceAllocated(size_t space_allocated) { |
370 | 0 | space_allocated_.store( |
371 | 0 | space_allocated_.load(std::memory_order_relaxed) + space_allocated, |
372 | 0 | std::memory_order_relaxed); |
373 | 0 | } |
374 | | |
375 | | // Helper getters/setters to handle relaxed operations on atomic variables. |
376 | 0 | ArenaBlock* head() { return head_.load(std::memory_order_relaxed); } |
377 | 0 | const ArenaBlock* head() const { |
378 | 0 | return head_.load(std::memory_order_relaxed); |
379 | 0 | } |
380 | | |
381 | 0 | char* ptr() { return ptr_.load(std::memory_order_relaxed); } |
382 | 0 | const char* ptr() const { return ptr_.load(std::memory_order_relaxed); } |
383 | 0 | void set_ptr(char* ptr) { return ptr_.store(ptr, std::memory_order_relaxed); } |
384 | 0 | PROTOBUF_ALWAYS_INLINE void set_range(char* ptr, char* limit) { |
385 | 0 | set_ptr(ptr); |
386 | 0 | prefetch_ptr_ = ptr; |
387 | 0 | limit_ = limit; |
388 | 0 | prefetch_limit_ = limit; |
389 | 0 | } |
390 | | |
391 | | void* AllocateAlignedFallback(size_t n); |
392 | | void* AllocateAlignedWithCleanupFallback(size_t n, size_t align, |
393 | | void (*destructor)(void*)); |
394 | | void AddCleanupFallback(void* elem, void (*destructor)(void*)); |
395 | | inline void AllocateNewBlock(size_t n); |
396 | | inline void Init(ArenaBlock* b, size_t offset); |
397 | | |
398 | | // Members are declared here to track sizeof(SerialArena) and hotness |
399 | | // centrally. They are (roughly) laid out in descending order of hotness. |
400 | | |
401 | | // Next pointer to allocate from. Always 8-byte aligned. Points inside |
402 | | // head_ (and head_->pos will always be non-canonical). We keep these |
403 | | // here to reduce indirection. |
404 | | std::atomic<char*> ptr_{nullptr}; |
405 | | // Limiting address up to which memory can be allocated from the head block. |
406 | | char* limit_ = nullptr; |
407 | | // Current prefetch positions. Data from `ptr_` up to but not including |
408 | | // `prefetch_ptr_` is software prefetched. Similarly, data from `limit_` down |
409 | | // to but not including `prefetch_limit_` is software prefetched. |
410 | | const char* prefetch_ptr_ = nullptr; |
411 | | const char* prefetch_limit_ = nullptr; |
412 | | |
413 | | // The active string block. |
414 | | std::atomic<StringBlock*> string_block_{nullptr}; |
415 | | |
416 | | // The number of unused bytes in string_block_. |
417 | | // We allocate from `effective_size()` down to 0 inside `string_block_`. |
418 | | // `unused == 0` means that `string_block_` is exhausted. (or null). |
419 | | std::atomic<size_t> string_block_unused_{0}; |
420 | | |
421 | | std::atomic<ArenaBlock*> head_{nullptr}; // Head of linked list of blocks. |
422 | | std::atomic<size_t> space_used_{0}; // Necessary for metrics. |
423 | | std::atomic<size_t> space_allocated_{0}; |
424 | | ThreadSafeArena& parent_; |
425 | | |
426 | | // Repeated*Field and Arena play together to reduce memory consumption by |
427 | | // reusing blocks. Currently, natural growth of the repeated field types makes |
428 | | // them allocate blocks of size `8 + 2^N, N>=3`. |
429 | | // When the repeated field grows returns the previous block and we put it in |
430 | | // this free list. |
431 | | // `cached_blocks_[i]` points to the free list for blocks of size `8+2^(i+3)`. |
432 | | // The array of freelists is grown when needed in `ReturnArrayMemory()`. |
433 | | uint8_t cached_block_length_ = 0; |
434 | | CachedBlock** cached_blocks_ = nullptr; |
435 | | }; |
436 | | |
437 | 0 | inline PROTOBUF_ALWAYS_INLINE bool SerialArena::MaybeAllocateString(void*& p) { |
438 | 0 | // Check how many unused instances are in the current block. |
439 | 0 | size_t unused_bytes = string_block_unused_.load(std::memory_order_relaxed); |
440 | 0 | if (PROTOBUF_PREDICT_TRUE(unused_bytes != 0)) { |
441 | 0 | unused_bytes -= sizeof(std::string); |
442 | 0 | string_block_unused_.store(unused_bytes, std::memory_order_relaxed); |
443 | 0 | p = string_block_.load(std::memory_order_relaxed)->AtOffset(unused_bytes); |
444 | 0 | return true; |
445 | 0 | } |
446 | 0 | return false; |
447 | 0 | } |
448 | | |
449 | | ABSL_ATTRIBUTE_RETURNS_NONNULL inline PROTOBUF_ALWAYS_INLINE void* |
450 | 0 | SerialArena::AllocateFromStringBlock() { |
451 | 0 | void* p; |
452 | 0 | if (ABSL_PREDICT_TRUE(MaybeAllocateString(p))) return p; |
453 | 0 | return AllocateFromStringBlockFallback(); |
454 | 0 | } |
455 | | |
456 | | } // namespace internal |
457 | | } // namespace protobuf |
458 | | } // namespace google |
459 | | |
460 | | #include "google/protobuf/port_undef.inc" |
461 | | |
462 | | #endif // GOOGLE_PROTOBUF_SERIAL_ARENA_H__ |