/rust/registry/src/index.crates.io-1949cf8c6b5b557f/bumpalo-3.19.1/src/lib.rs
Line | Count | Source |
1 | | #![doc = include_str!("../README.md")] |
2 | | #![deny(missing_debug_implementations)] |
3 | | #![deny(missing_docs)] |
4 | | #![cfg_attr(not(feature = "std"), no_std)] |
5 | | #![cfg_attr(feature = "allocator_api", feature(allocator_api))] |
6 | | |
7 | | #[doc(hidden)] |
8 | | pub extern crate alloc as core_alloc; |
9 | | |
10 | | #[cfg(feature = "boxed")] |
11 | | pub mod boxed; |
12 | | #[cfg(feature = "collections")] |
13 | | pub mod collections; |
14 | | |
15 | | mod alloc; |
16 | | |
17 | | use core::cell::Cell; |
18 | | use core::cmp::Ordering; |
19 | | use core::fmt::Display; |
20 | | use core::iter; |
21 | | use core::marker::PhantomData; |
22 | | use core::mem; |
23 | | use core::ptr::{self, NonNull}; |
24 | | use core::slice; |
25 | | use core::str; |
26 | | use core_alloc::alloc::{alloc, dealloc, Layout}; |
27 | | |
28 | | #[cfg(feature = "allocator_api")] |
29 | | use core_alloc::alloc::{AllocError, Allocator}; |
30 | | |
31 | | #[cfg(all(feature = "allocator-api2", not(feature = "allocator_api")))] |
32 | | use allocator_api2::alloc::{AllocError, Allocator}; |
33 | | |
34 | | pub use alloc::AllocErr; |
35 | | |
36 | | /// An error returned from [`Bump::try_alloc_try_with`]. |
37 | | #[derive(Clone, PartialEq, Eq, Debug)] |
38 | | pub enum AllocOrInitError<E> { |
39 | | /// Indicates that the initial allocation failed. |
40 | | Alloc(AllocErr), |
41 | | /// Indicates that the initializer failed with the contained error after |
42 | | /// allocation. |
43 | | /// |
44 | | /// It is possible but not guaranteed that the allocated memory has been |
45 | | /// released back to the allocator at this point. |
46 | | Init(E), |
47 | | } |
48 | | impl<E> From<AllocErr> for AllocOrInitError<E> { |
49 | 0 | fn from(e: AllocErr) -> Self { |
50 | 0 | Self::Alloc(e) |
51 | 0 | } |
52 | | } |
53 | | impl<E: Display> Display for AllocOrInitError<E> { |
54 | 0 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { |
55 | 0 | match self { |
56 | 0 | AllocOrInitError::Alloc(err) => err.fmt(f), |
57 | 0 | AllocOrInitError::Init(err) => write!(f, "initialization failed: {}", err), |
58 | | } |
59 | 0 | } |
60 | | } |
61 | | |
62 | | /// An arena to bump allocate into. |
63 | | /// |
64 | | /// ## No `Drop`s |
65 | | /// |
66 | | /// Objects that are bump-allocated will never have their [`Drop`] implementation |
67 | | /// called — unless you do it manually yourself. This makes it relatively |
68 | | /// easy to leak memory or other resources. |
69 | | /// |
70 | | /// If you have a type which internally manages |
71 | | /// |
72 | | /// * an allocation from the global heap (e.g. [`Vec<T>`]), |
73 | | /// * open file descriptors (e.g. [`std::fs::File`]), or |
74 | | /// * any other resource that must be cleaned up (e.g. an `mmap`) |
75 | | /// |
76 | | /// and relies on its `Drop` implementation to clean up the internal resource, |
77 | | /// then if you allocate that type with a `Bump`, you need to find a new way to |
78 | | /// clean up after it yourself. |
79 | | /// |
80 | | /// Potential solutions are: |
81 | | /// |
82 | | /// * Using [`bumpalo::boxed::Box::new_in`] instead of [`Bump::alloc`], that |
83 | | /// will drop wrapped values similarly to [`std::boxed::Box`]. Note that this |
84 | | /// requires enabling the `"boxed"` Cargo feature for this crate. **This is |
85 | | /// often the easiest solution.** |
86 | | /// |
87 | | /// * Calling [`drop_in_place`][drop_in_place] or using |
88 | | /// [`std::mem::ManuallyDrop`][manuallydrop] to manually drop these types. |
89 | | /// |
90 | | /// * Using [`bumpalo::collections::Vec`] instead of [`std::vec::Vec`]. |
91 | | /// |
92 | | /// * Avoiding allocating these problematic types within a `Bump`. |
93 | | /// |
94 | | /// Note that not calling `Drop` is memory safe! Destructors are never |
95 | | /// guaranteed to run in Rust, you can't rely on them for enforcing memory |
96 | | /// safety. |
97 | | /// |
98 | | /// [`Drop`]: https://doc.rust-lang.org/std/ops/trait.Drop.html |
99 | | /// [`Vec<T>`]: https://doc.rust-lang.org/std/vec/struct.Vec.html |
100 | | /// [`std::fs::File`]: https://doc.rust-lang.org/std/fs/struct.File.html |
101 | | /// [drop_in_place]: https://doc.rust-lang.org/std/ptr/fn.drop_in_place.html |
102 | | /// [manuallydrop]: https://doc.rust-lang.org/std/mem/struct.ManuallyDrop.html |
103 | | /// [`bumpalo::collections::Vec`]: collections/vec/struct.Vec.html |
104 | | /// [`std::vec::Vec`]: https://doc.rust-lang.org/std/vec/struct.Vec.html |
105 | | /// [`bumpalo::boxed::Box::new_in`]: boxed/struct.Box.html#method.new_in |
106 | | /// [`std::boxed::Box`]: https://doc.rust-lang.org/std/boxed/struct.Box.html |
107 | | /// |
108 | | /// ## Example |
109 | | /// |
110 | | /// ``` |
111 | | /// use bumpalo::Bump; |
112 | | /// |
113 | | /// // Create a new bump arena. |
114 | | /// let bump = Bump::new(); |
115 | | /// |
116 | | /// // Allocate values into the arena. |
117 | | /// let forty_two = bump.alloc(42); |
118 | | /// assert_eq!(*forty_two, 42); |
119 | | /// |
120 | | /// // Mutable references are returned from allocation. |
121 | | /// let mut s = bump.alloc("bumpalo"); |
122 | | /// *s = "the bump allocator; and also is a buffalo"; |
123 | | /// ``` |
124 | | /// |
125 | | /// ## Allocation Methods Come in Many Flavors |
126 | | /// |
127 | | /// There are various allocation methods on `Bump`, the simplest being |
128 | | /// [`alloc`][Bump::alloc]. The others exist to satisfy some combination of |
129 | | /// fallible allocation and initialization. The allocation methods are |
130 | | /// summarized in the following table: |
131 | | /// |
132 | | /// <table> |
133 | | /// <thead> |
134 | | /// <tr> |
135 | | /// <th></th> |
136 | | /// <th>Infallible Allocation</th> |
137 | | /// <th>Fallible Allocation</th> |
138 | | /// </tr> |
139 | | /// </thead> |
140 | | /// <tr> |
141 | | /// <th>By Value</th> |
142 | | /// <td><a href="#method.alloc"><code>alloc</code></a></td> |
143 | | /// <td><a href="#method.try_alloc"><code>try_alloc</code></a></td> |
144 | | /// </tr> |
145 | | /// <tr> |
146 | | /// <th>Infallible Initializer Function</th> |
147 | | /// <td><a href="#method.alloc_with"><code>alloc_with</code></a></td> |
148 | | /// <td><a href="#method.try_alloc_with"><code>try_alloc_with</code></a></td> |
149 | | /// </tr> |
150 | | /// <tr> |
151 | | /// <th>Fallible Initializer Function</th> |
152 | | /// <td><a href="#method.alloc_try_with"><code>alloc_try_with</code></a></td> |
153 | | /// <td><a href="#method.try_alloc_try_with"><code>try_alloc_try_with</code></a></td> |
154 | | /// </tr> |
155 | | /// <tbody> |
156 | | /// </tbody> |
157 | | /// </table> |
158 | | /// |
159 | | /// ### Fallible Allocation: The `try_alloc_` Method Prefix |
160 | | /// |
161 | | /// These allocation methods let you recover from out-of-memory (OOM) |
162 | | /// scenarios, rather than raising a panic on OOM. |
163 | | /// |
164 | | /// ``` |
165 | | /// use bumpalo::Bump; |
166 | | /// |
167 | | /// let bump = Bump::new(); |
168 | | /// |
169 | | /// match bump.try_alloc(MyStruct { |
170 | | /// // ... |
171 | | /// }) { |
172 | | /// Ok(my_struct) => { |
173 | | /// // Allocation succeeded. |
174 | | /// } |
175 | | /// Err(e) => { |
176 | | /// // Out of memory. |
177 | | /// } |
178 | | /// } |
179 | | /// |
180 | | /// struct MyStruct { |
181 | | /// // ... |
182 | | /// } |
183 | | /// ``` |
184 | | /// |
185 | | /// ### Initializer Functions: The `_with` Method Suffix |
186 | | /// |
187 | | /// Calling one of the generic `…alloc(x)` methods is essentially equivalent to |
188 | | /// the matching [`…alloc_with(|| x)`](?search=alloc_with). However if you use |
189 | | /// `…alloc_with`, then the closure will not be invoked until after allocating |
190 | | /// space for storing `x` on the heap. |
191 | | /// |
192 | | /// This can be useful in certain edge-cases related to compiler optimizations. |
193 | | /// When evaluating for example `bump.alloc(x)`, semantically `x` is first put |
194 | | /// on the stack and then moved onto the heap. In some cases, the compiler is |
195 | | /// able to optimize this into constructing `x` directly on the heap, however |
196 | | /// in many cases it does not. |
197 | | /// |
198 | | /// The `…alloc_with` functions try to help the compiler be smarter. In most |
199 | | /// cases doing for example `bump.try_alloc_with(|| x)` on release mode will be |
200 | | /// enough to help the compiler realize that this optimization is valid and |
201 | | /// to construct `x` directly onto the heap. |
202 | | /// |
203 | | /// #### Warning |
204 | | /// |
205 | | /// These functions critically depend on compiler optimizations to achieve their |
206 | | /// desired effect. This means that it is not an effective tool when compiling |
207 | | /// without optimizations on. |
208 | | /// |
209 | | /// Even when optimizations are on, these functions do not **guarantee** that |
210 | | /// the value is constructed on the heap. To the best of our knowledge no such |
211 | | /// guarantee can be made in stable Rust as of 1.54. |
212 | | /// |
213 | | /// ### Fallible Initialization: The `_try_with` Method Suffix |
214 | | /// |
215 | | /// The generic [`…alloc_try_with(|| x)`](?search=_try_with) methods behave |
216 | | /// like the purely `_with` suffixed methods explained above. However, they |
217 | | /// allow for fallible initialization by accepting a closure that returns a |
218 | | /// [`Result`] and will attempt to undo the initial allocation if this closure |
219 | | /// returns [`Err`]. |
220 | | /// |
221 | | /// #### Warning |
222 | | /// |
223 | | /// If the inner closure returns [`Ok`], space for the entire [`Result`] remains |
224 | | /// allocated inside `self`. This can be a problem especially if the [`Err`] |
225 | | /// variant is larger, but even otherwise there may be overhead for the |
226 | | /// [`Result`]'s discriminant. |
227 | | /// |
228 | | /// <p><details><summary>Undoing the allocation in the <code>Err</code> case |
229 | | /// always fails if <code>f</code> successfully made any additional allocations |
230 | | /// in <code>self</code>.</summary> |
231 | | /// |
232 | | /// For example, the following will always leak also space for the [`Result`] |
233 | | /// into this `Bump`, even though the inner reference isn't kept and the [`Err`] |
234 | | /// payload is returned semantically by value: |
235 | | /// |
236 | | /// ```rust |
237 | | /// let bump = bumpalo::Bump::new(); |
238 | | /// |
239 | | /// let r: Result<&mut [u8; 1000], ()> = bump.alloc_try_with(|| { |
240 | | /// let _ = bump.alloc(0_u8); |
241 | | /// Err(()) |
242 | | /// }); |
243 | | /// |
244 | | /// assert!(r.is_err()); |
245 | | /// ``` |
246 | | /// |
247 | | ///</details></p> |
248 | | /// |
249 | | /// Since [`Err`] payloads are first placed on the heap and then moved to the |
250 | | /// stack, `bump.…alloc_try_with(|| x)?` is likely to execute more slowly than |
251 | | /// the matching `bump.…alloc(x?)` in case of initialization failure. If this |
252 | | /// happens frequently, using the plain un-suffixed method may perform better. |
253 | | /// |
254 | | /// [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html |
255 | | /// [`Ok`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Ok |
256 | | /// [`Err`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Err |
257 | | /// |
258 | | /// ### `Bump` Allocation Limits |
259 | | /// |
260 | | /// `bumpalo` supports setting a limit on the maximum bytes of memory that can |
261 | | /// be allocated for use in a particular `Bump` arena. This limit can be set and removed with |
262 | | /// [`set_allocation_limit`][Bump::set_allocation_limit]. |
263 | | /// The allocation limit is only enforced when allocating new backing chunks for |
264 | | /// a `Bump`. Updating the allocation limit will not affect existing allocations |
265 | | /// or any future allocations within the `Bump`'s current chunk. |
266 | | /// |
267 | | /// #### Example |
268 | | /// |
269 | | /// ``` |
270 | | /// let bump = bumpalo::Bump::new(); |
271 | | /// |
272 | | /// assert_eq!(bump.allocation_limit(), None); |
273 | | /// bump.set_allocation_limit(Some(0)); |
274 | | /// |
275 | | /// assert!(bump.try_alloc(5).is_err()); |
276 | | /// |
277 | | /// bump.set_allocation_limit(Some(6)); |
278 | | /// |
279 | | /// assert_eq!(bump.allocation_limit(), Some(6)); |
280 | | /// |
281 | | /// bump.set_allocation_limit(None); |
282 | | /// |
283 | | /// assert_eq!(bump.allocation_limit(), None); |
284 | | /// ``` |
285 | | /// |
286 | | /// #### Warning |
287 | | /// |
288 | | /// Because of backwards compatibility, allocations that fail |
289 | | /// due to allocation limits will not present differently than |
290 | | /// errors due to resource exhaustion. |
291 | | #[derive(Debug)] |
292 | | pub struct Bump<const MIN_ALIGN: usize = 1> { |
293 | | // The current chunk we are bump allocating within. |
294 | | current_chunk_footer: Cell<NonNull<ChunkFooter>>, |
295 | | allocation_limit: Cell<Option<usize>>, |
296 | | } |
297 | | |
298 | | #[repr(C)] |
299 | | #[derive(Debug)] |
300 | | struct ChunkFooter { |
301 | | // Pointer to the start of this chunk allocation. This footer is always at |
302 | | // the end of the chunk. |
303 | | data: NonNull<u8>, |
304 | | |
305 | | // The layout of this chunk's allocation. |
306 | | layout: Layout, |
307 | | |
308 | | // Link to the previous chunk. |
309 | | // |
310 | | // Note that the last node in the `prev` linked list is the canonical empty |
311 | | // chunk, whose `prev` link points to itself. |
312 | | prev: Cell<NonNull<ChunkFooter>>, |
313 | | |
314 | | // Bump allocation finger that is always in the range `self.data..=self`. |
315 | | ptr: Cell<NonNull<u8>>, |
316 | | |
317 | | // The bytes allocated in all chunks so far, the canonical empty chunk has |
318 | | // a size of 0 and for all other chunks, `allocated_bytes` will be |
319 | | // the allocated_bytes of the current chunk plus the allocated bytes |
320 | | // of the `prev` chunk. |
321 | | allocated_bytes: usize, |
322 | | } |
323 | | |
324 | | /// A wrapper type for the canonical, statically allocated empty chunk. |
325 | | /// |
326 | | /// For the canonical empty chunk to be `static`, its type must be `Sync`, which |
327 | | /// is the purpose of this wrapper type. This is safe because the empty chunk is |
328 | | /// immutable and never actually modified. |
329 | | #[repr(transparent)] |
330 | | struct EmptyChunkFooter(ChunkFooter); |
331 | | |
332 | | unsafe impl Sync for EmptyChunkFooter {} |
333 | | |
334 | | static EMPTY_CHUNK: EmptyChunkFooter = EmptyChunkFooter(ChunkFooter { |
335 | | // This chunk is empty (except the foot itself). |
336 | | layout: Layout::new::<ChunkFooter>(), |
337 | | |
338 | | // The start of the (empty) allocatable region for this chunk is itself. |
339 | | data: unsafe { NonNull::new_unchecked(&EMPTY_CHUNK as *const EmptyChunkFooter as *mut u8) }, |
340 | | |
341 | | // The end of the (empty) allocatable region for this chunk is also itself. |
342 | | ptr: Cell::new(unsafe { |
343 | | NonNull::new_unchecked(&EMPTY_CHUNK as *const EmptyChunkFooter as *mut u8) |
344 | | }), |
345 | | |
346 | | // Invariant: the last chunk footer in all `ChunkFooter::prev` linked lists |
347 | | // is the empty chunk footer, whose `prev` points to itself. |
348 | | prev: Cell::new(unsafe { |
349 | | NonNull::new_unchecked(&EMPTY_CHUNK as *const EmptyChunkFooter as *mut ChunkFooter) |
350 | | }), |
351 | | |
352 | | // Empty chunks count as 0 allocated bytes in an arena. |
353 | | allocated_bytes: 0, |
354 | | }); |
355 | | |
356 | | impl EmptyChunkFooter { |
357 | 3.62M | fn get(&'static self) -> NonNull<ChunkFooter> { |
358 | 3.62M | NonNull::from(&self.0) |
359 | 3.62M | } |
360 | | } |
361 | | |
362 | | impl ChunkFooter { |
363 | | // Returns the start and length of the currently allocated region of this |
364 | | // chunk. |
365 | 0 | fn as_raw_parts(&self) -> (*const u8, usize) { |
366 | 0 | let data = self.data.as_ptr() as *const u8; |
367 | 0 | let ptr = self.ptr.get().as_ptr() as *const u8; |
368 | 0 | debug_assert!(data <= ptr); |
369 | 0 | debug_assert!(ptr <= self as *const ChunkFooter as *const u8); |
370 | 0 | let len = unsafe { (self as *const ChunkFooter as *const u8).offset_from(ptr) as usize }; |
371 | 0 | (ptr, len) |
372 | 0 | } |
373 | | |
374 | | /// Is this chunk the last empty chunk? |
375 | 2.76M | fn is_empty(&self) -> bool { |
376 | 2.76M | ptr::eq(self, EMPTY_CHUNK.get().as_ptr()) |
377 | 2.76M | } |
378 | | } |
379 | | |
380 | | impl<const MIN_ALIGN: usize> Default for Bump<MIN_ALIGN> { |
381 | 430k | fn default() -> Self { |
382 | 430k | Self::with_min_align() |
383 | 430k | } <bumpalo::Bump as core::default::Default>::default Line | Count | Source | 381 | 96.6k | fn default() -> Self { | 382 | 96.6k | Self::with_min_align() | 383 | 96.6k | } |
Unexecuted instantiation: <bumpalo::Bump as core::default::Default>::default Unexecuted instantiation: <bumpalo::Bump<_> as core::default::Default>::default <bumpalo::Bump as core::default::Default>::default Line | Count | Source | 381 | 333k | fn default() -> Self { | 382 | 333k | Self::with_min_align() | 383 | 333k | } |
|
384 | | } |
385 | | |
386 | | impl<const MIN_ALIGN: usize> Drop for Bump<MIN_ALIGN> { |
387 | 860k | fn drop(&mut self) { |
388 | 860k | unsafe { |
389 | 860k | dealloc_chunk_list(self.current_chunk_footer.get()); |
390 | 860k | } |
391 | 860k | } <bumpalo::Bump as core::ops::drop::Drop>::drop Line | Count | Source | 387 | 96.6k | fn drop(&mut self) { | 388 | 96.6k | unsafe { | 389 | 96.6k | dealloc_chunk_list(self.current_chunk_footer.get()); | 390 | 96.6k | } | 391 | 96.6k | } |
<bumpalo::Bump as core::ops::drop::Drop>::drop Line | Count | Source | 387 | 430k | fn drop(&mut self) { | 388 | 430k | unsafe { | 389 | 430k | dealloc_chunk_list(self.current_chunk_footer.get()); | 390 | 430k | } | 391 | 430k | } |
Unexecuted instantiation: <bumpalo::Bump as core::ops::drop::Drop>::drop Unexecuted instantiation: <bumpalo::Bump as core::ops::drop::Drop>::drop Unexecuted instantiation: <bumpalo::Bump<_> as core::ops::drop::Drop>::drop <bumpalo::Bump as core::ops::drop::Drop>::drop Line | Count | Source | 387 | 333k | fn drop(&mut self) { | 388 | 333k | unsafe { | 389 | 333k | dealloc_chunk_list(self.current_chunk_footer.get()); | 390 | 333k | } | 391 | 333k | } |
|
392 | | } |
393 | | |
394 | | #[inline] |
395 | 860k | unsafe fn dealloc_chunk_list(mut footer: NonNull<ChunkFooter>) { |
396 | 2.33M | while !footer.as_ref().is_empty() { |
397 | 1.47M | let f = footer; |
398 | 1.47M | footer = f.as_ref().prev.get(); |
399 | 1.47M | dealloc(f.as_ref().data.as_ptr(), f.as_ref().layout); |
400 | 1.47M | } |
401 | 860k | } |
402 | | |
403 | | // `Bump`s are safe to send between threads because nothing aliases its owned |
404 | | // chunks until you start allocating from it. But by the time you allocate from |
405 | | // it, the returned references to allocations borrow the `Bump` and therefore |
406 | | // prevent sending the `Bump` across threads until the borrows end. |
407 | | unsafe impl<const MIN_ALIGN: usize> Send for Bump<MIN_ALIGN> {} |
408 | | |
409 | | #[inline] |
410 | 0 | fn is_pointer_aligned_to<T>(pointer: *mut T, align: usize) -> bool { |
411 | 0 | debug_assert!(align.is_power_of_two()); |
412 | | |
413 | 0 | let pointer = pointer as usize; |
414 | 0 | let pointer_aligned = round_down_to(pointer, align); |
415 | 0 | pointer == pointer_aligned |
416 | 0 | } Unexecuted instantiation: bumpalo::is_pointer_aligned_to::<u8> Unexecuted instantiation: bumpalo::is_pointer_aligned_to::<_> |
417 | | |
418 | | #[inline] |
419 | 46.1M | pub(crate) const fn round_up_to(n: usize, divisor: usize) -> Option<usize> { |
420 | 46.1M | debug_assert!(divisor > 0); |
421 | 46.1M | debug_assert!(divisor.is_power_of_two()); |
422 | 46.1M | match n.checked_add(divisor - 1) { |
423 | 46.1M | Some(x) => Some(x & !(divisor - 1)), |
424 | 0 | None => None, |
425 | | } |
426 | 46.1M | } |
427 | | |
428 | | /// Like `round_up_to` but turns overflow into undefined behavior rather than |
429 | | /// returning `None`. |
430 | | #[inline] |
431 | 42.6M | pub(crate) unsafe fn round_up_to_unchecked(n: usize, divisor: usize) -> usize { |
432 | 42.6M | match round_up_to(n, divisor) { |
433 | 42.6M | Some(x) => x, |
434 | | None => { |
435 | 0 | debug_assert!(false, "round_up_to_unchecked failed"); |
436 | 0 | core::hint::unreachable_unchecked() |
437 | | } |
438 | | } |
439 | 42.6M | } |
440 | | |
441 | | #[inline] |
442 | 1.80M | pub(crate) fn round_down_to(n: usize, divisor: usize) -> usize { |
443 | 1.80M | debug_assert!(divisor > 0); |
444 | 1.80M | debug_assert!(divisor.is_power_of_two()); |
445 | 1.80M | n & !(divisor - 1) |
446 | 1.80M | } |
447 | | |
448 | | /// Same as `round_down_to` but preserves pointer provenance. |
449 | | #[inline] |
450 | 43.6M | pub(crate) fn round_mut_ptr_down_to(ptr: *mut u8, divisor: usize) -> *mut u8 { |
451 | 43.6M | debug_assert!(divisor > 0); |
452 | 43.6M | debug_assert!(divisor.is_power_of_two()); |
453 | 43.6M | ptr.wrapping_sub(ptr as usize & (divisor - 1)) |
454 | 43.6M | } |
455 | | |
456 | | #[inline] |
457 | 458k | pub(crate) unsafe fn round_mut_ptr_up_to_unchecked(ptr: *mut u8, divisor: usize) -> *mut u8 { |
458 | 458k | debug_assert!(divisor > 0); |
459 | 458k | debug_assert!(divisor.is_power_of_two()); |
460 | 458k | let aligned = round_up_to_unchecked(ptr as usize, divisor); |
461 | 458k | let delta = aligned - (ptr as usize); |
462 | 458k | ptr.add(delta) |
463 | 458k | } |
464 | | |
465 | | // The typical page size these days. |
466 | | // |
467 | | // Note that we don't need to exactly match page size for correctness, and it is |
468 | | // okay if this is smaller than the real page size in practice. It isn't worth |
469 | | // the portability concerns and lack of const propagation that dynamically |
470 | | // looking up the actual page size implies. |
471 | | const TYPICAL_PAGE_SIZE: usize = 0x1000; |
472 | | |
473 | | // We only support alignments of up to 16 bytes for iter_allocated_chunks. |
474 | | const SUPPORTED_ITER_ALIGNMENT: usize = 16; |
475 | | const CHUNK_ALIGN: usize = SUPPORTED_ITER_ALIGNMENT; |
476 | | const FOOTER_SIZE: usize = mem::size_of::<ChunkFooter>(); |
477 | | |
478 | | // Assert that `ChunkFooter` is at most the supported alignment. This will give a |
479 | | // compile time error if it is not the case |
480 | | const _FOOTER_ALIGN_ASSERTION: () = { |
481 | | assert!(mem::align_of::<ChunkFooter>() <= CHUNK_ALIGN); |
482 | | }; |
483 | | |
484 | | // Maximum typical overhead per allocation imposed by allocators. |
485 | | const MALLOC_OVERHEAD: usize = 16; |
486 | | |
487 | | // This is the overhead from malloc, footer and alignment. For instance, if |
488 | | // we want to request a chunk of memory that has at least X bytes usable for |
489 | | // allocations (where X is aligned to CHUNK_ALIGN), then we expect that the |
490 | | // after adding a footer, malloc overhead and alignment, the chunk of memory |
491 | | // the allocator actually sets aside for us is X+OVERHEAD rounded up to the |
492 | | // nearest suitable size boundary. |
493 | | const OVERHEAD: usize = match round_up_to(MALLOC_OVERHEAD + FOOTER_SIZE, CHUNK_ALIGN) { |
494 | | Some(x) => x, |
495 | | None => panic!(), |
496 | | }; |
497 | | |
498 | | // The target size of our first allocation, including our overhead. The |
499 | | // available bump capacity will be smaller. |
500 | | const FIRST_ALLOCATION_GOAL: usize = 1 << 9; |
501 | | |
502 | | // The actual size of the first allocation is going to be a bit smaller than the |
503 | | // goal. We need to make room for the footer, and we also need take the |
504 | | // alignment into account. We're trying to avoid this kind of situation: |
505 | | // https://blog.mozilla.org/nnethercote/2011/08/05/clownshoes-available-in-sizes-2101-and-up/ |
506 | | const DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER: usize = FIRST_ALLOCATION_GOAL - OVERHEAD; |
507 | | |
508 | | /// The memory size and alignment details for a potential new chunk |
509 | | /// allocation. |
510 | | #[derive(Debug, Clone, Copy)] |
511 | | struct NewChunkMemoryDetails { |
512 | | new_size_without_footer: usize, |
513 | | align: usize, |
514 | | size: usize, |
515 | | } |
516 | | |
517 | | /// Wrapper around `Layout::from_size_align` that adds debug assertions. |
518 | | #[inline] |
519 | 2.04M | fn layout_from_size_align(size: usize, align: usize) -> Result<Layout, AllocErr> { |
520 | 2.04M | Layout::from_size_align(size, align).map_err(|_| AllocErr) |
521 | 2.04M | } |
522 | | |
523 | | #[cold] |
524 | | #[inline(never)] |
525 | 0 | fn allocation_size_overflow<T>() -> T { |
526 | 0 | panic!("requested allocation size overflowed") |
527 | | } |
528 | | |
529 | | // NB: We don't have constructors as methods on `impl<N> Bump<N>` that return |
530 | | // `Self` because then `rustc` can't infer the `N` if it isn't explicitly |
531 | | // provided, even though it has a default value. There doesn't seem to be a good |
532 | | // workaround, other than putting constructors on the `Bump<DEFAULT>`; even |
533 | | // `std` does this same thing with `HashMap`, for example. |
534 | | impl Bump<1> { |
535 | | /// Construct a new arena to bump allocate into. |
536 | | /// |
537 | | /// ## Example |
538 | | /// |
539 | | /// ``` |
540 | | /// let bump = bumpalo::Bump::new(); |
541 | | /// # let _ = bump; |
542 | | /// ``` |
543 | 0 | pub fn new() -> Self { |
544 | 0 | Self::with_capacity(0) |
545 | 0 | } |
546 | | |
547 | | /// Attempt to construct a new arena to bump allocate into. |
548 | | /// |
549 | | /// ## Example |
550 | | /// |
551 | | /// ``` |
552 | | /// let bump = bumpalo::Bump::try_new(); |
553 | | /// # let _ = bump.unwrap(); |
554 | | /// ``` |
555 | 0 | pub fn try_new() -> Result<Self, AllocErr> { |
556 | 0 | Bump::try_with_capacity(0) |
557 | 0 | } |
558 | | |
559 | | /// Construct a new arena with the specified byte capacity to bump allocate |
560 | | /// into. |
561 | | /// |
562 | | /// ## Example |
563 | | /// |
564 | | /// ``` |
565 | | /// let bump = bumpalo::Bump::with_capacity(100); |
566 | | /// # let _ = bump; |
567 | | /// ``` |
568 | | /// |
569 | | /// ## Panics |
570 | | /// |
571 | | /// Panics if allocating the initial capacity fails. |
572 | 430k | pub fn with_capacity(capacity: usize) -> Self { |
573 | 430k | Self::try_with_capacity(capacity).unwrap_or_else(|_| oom()) |
574 | 430k | } |
575 | | |
576 | | /// Attempt to construct a new arena with the specified byte capacity to |
577 | | /// bump allocate into. |
578 | | /// |
579 | | /// Propagates errors when allocating the initial capacity. |
580 | | /// |
581 | | /// ## Example |
582 | | /// |
583 | | /// ``` |
584 | | /// # fn _foo() -> Result<(), bumpalo::AllocErr> { |
585 | | /// let bump = bumpalo::Bump::try_with_capacity(100)?; |
586 | | /// # let _ = bump; |
587 | | /// # Ok(()) |
588 | | /// # } |
589 | | /// ``` |
590 | 430k | pub fn try_with_capacity(capacity: usize) -> Result<Self, AllocErr> { |
591 | 430k | Self::try_with_min_align_and_capacity(capacity) |
592 | 430k | } |
593 | | } |
594 | | |
595 | | impl<const MIN_ALIGN: usize> Bump<MIN_ALIGN> { |
596 | | /// Create a new `Bump` that enforces a minimum alignment. |
597 | | /// |
598 | | /// The minimum alignment must be a power of two and no larger than `16`. |
599 | | /// |
600 | | /// Enforcing a minimum alignment can speed up allocation of objects with |
601 | | /// alignment less than or equal to the minimum alignment. This comes at the |
602 | | /// cost of introducing otherwise-unnecessary padding between allocations of |
603 | | /// objects with alignment less than the minimum. |
604 | | /// |
605 | | /// # Example |
606 | | /// |
607 | | /// ``` |
608 | | /// type BumpAlign8 = bumpalo::Bump<8>; |
609 | | /// let bump = BumpAlign8::with_min_align(); |
610 | | /// for x in 0..u8::MAX { |
611 | | /// let x = bump.alloc(x); |
612 | | /// assert_eq!((x as *mut _ as usize) % 8, 0, "x is aligned to 8"); |
613 | | /// } |
614 | | /// ``` |
615 | | /// |
616 | | /// # Panics |
617 | | /// |
618 | | /// Panics on invalid minimum alignments. |
619 | | // |
620 | | // Because of `rustc`'s poor type inference for default type/const |
621 | | // parameters (see the comment above the `impl Bump` block with no const |
622 | | // `MIN_ALIGN` parameter) and because we don't want to force everyone to |
623 | | // specify a minimum alignment with `Bump::new()` et al, we have a separate |
624 | | // constructor for specifying the minimum alignment. |
625 | 430k | pub fn with_min_align() -> Self { |
626 | 430k | assert!( |
627 | 430k | MIN_ALIGN.is_power_of_two(), |
628 | | "MIN_ALIGN must be a power of two; found {MIN_ALIGN}" |
629 | | ); |
630 | 430k | assert!( |
631 | 430k | MIN_ALIGN <= CHUNK_ALIGN, |
632 | | "MIN_ALIGN may not be larger than {CHUNK_ALIGN}; found {MIN_ALIGN}" |
633 | | ); |
634 | | |
635 | 430k | Bump { |
636 | 430k | current_chunk_footer: Cell::new(EMPTY_CHUNK.get()), |
637 | 430k | allocation_limit: Cell::new(None), |
638 | 430k | } |
639 | 430k | } <bumpalo::Bump>::with_min_align Line | Count | Source | 625 | 96.6k | pub fn with_min_align() -> Self { | 626 | 96.6k | assert!( | 627 | 96.6k | MIN_ALIGN.is_power_of_two(), | 628 | | "MIN_ALIGN must be a power of two; found {MIN_ALIGN}" | 629 | | ); | 630 | 96.6k | assert!( | 631 | 96.6k | MIN_ALIGN <= CHUNK_ALIGN, | 632 | | "MIN_ALIGN may not be larger than {CHUNK_ALIGN}; found {MIN_ALIGN}" | 633 | | ); | 634 | | | 635 | 96.6k | Bump { | 636 | 96.6k | current_chunk_footer: Cell::new(EMPTY_CHUNK.get()), | 637 | 96.6k | allocation_limit: Cell::new(None), | 638 | 96.6k | } | 639 | 96.6k | } |
Unexecuted instantiation: <bumpalo::Bump>::with_min_align Unexecuted instantiation: <bumpalo::Bump<_>>::with_min_align <bumpalo::Bump>::with_min_align Line | Count | Source | 625 | 333k | pub fn with_min_align() -> Self { | 626 | 333k | assert!( | 627 | 333k | MIN_ALIGN.is_power_of_two(), | 628 | | "MIN_ALIGN must be a power of two; found {MIN_ALIGN}" | 629 | | ); | 630 | 333k | assert!( | 631 | 333k | MIN_ALIGN <= CHUNK_ALIGN, | 632 | | "MIN_ALIGN may not be larger than {CHUNK_ALIGN}; found {MIN_ALIGN}" | 633 | | ); | 634 | | | 635 | 333k | Bump { | 636 | 333k | current_chunk_footer: Cell::new(EMPTY_CHUNK.get()), | 637 | 333k | allocation_limit: Cell::new(None), | 638 | 333k | } | 639 | 333k | } |
|
640 | | |
641 | | /// Create a new `Bump` that enforces a minimum alignment and starts with |
642 | | /// room for at least `capacity` bytes. |
643 | | /// |
644 | | /// The minimum alignment must be a power of two and no larger than `16`. |
645 | | /// |
646 | | /// Enforcing a minimum alignment can speed up allocation of objects with |
647 | | /// alignment less than or equal to the minimum alignment. This comes at the |
648 | | /// cost of introducing otherwise-unnecessary padding between allocations of |
649 | | /// objects with alignment less than the minimum. |
650 | | /// |
651 | | /// # Example |
652 | | /// |
653 | | /// ``` |
654 | | /// type BumpAlign8 = bumpalo::Bump<8>; |
655 | | /// let mut bump = BumpAlign8::with_min_align_and_capacity(8 * 100); |
656 | | /// for x in 0..100_u64 { |
657 | | /// let x = bump.alloc(x); |
658 | | /// assert_eq!((x as *mut _ as usize) % 8, 0, "x is aligned to 8"); |
659 | | /// } |
660 | | /// assert_eq!( |
661 | | /// bump.iter_allocated_chunks().count(), 1, |
662 | | /// "initial chunk had capacity for all allocations", |
663 | | /// ); |
664 | | /// ``` |
665 | | /// |
666 | | /// # Panics |
667 | | /// |
668 | | /// Panics on invalid minimum alignments. |
669 | | /// |
670 | | /// Panics if allocating the initial capacity fails. |
671 | 0 | pub fn with_min_align_and_capacity(capacity: usize) -> Self { |
672 | 0 | Self::try_with_min_align_and_capacity(capacity).unwrap_or_else(|_| oom()) |
673 | 0 | } |
674 | | |
675 | | /// Create a new `Bump` that enforces a minimum alignment and starts with |
676 | | /// room for at least `capacity` bytes. |
677 | | /// |
678 | | /// The minimum alignment must be a power of two and no larger than `16`. |
679 | | /// |
680 | | /// Enforcing a minimum alignment can speed up allocation of objects with |
681 | | /// alignment less than or equal to the minimum alignment. This comes at the |
682 | | /// cost of introducing otherwise-unnecessary padding between allocations of |
683 | | /// objects with alignment less than the minimum. |
684 | | /// |
685 | | /// # Example |
686 | | /// |
687 | | /// ``` |
688 | | /// # fn _foo() -> Result<(), bumpalo::AllocErr> { |
689 | | /// type BumpAlign8 = bumpalo::Bump<8>; |
690 | | /// let mut bump = BumpAlign8::try_with_min_align_and_capacity(8 * 100)?; |
691 | | /// for x in 0..100_u64 { |
692 | | /// let x = bump.alloc(x); |
693 | | /// assert_eq!((x as *mut _ as usize) % 8, 0, "x is aligned to 8"); |
694 | | /// } |
695 | | /// assert_eq!( |
696 | | /// bump.iter_allocated_chunks().count(), 1, |
697 | | /// "initial chunk had capacity for all allocations", |
698 | | /// ); |
699 | | /// # Ok(()) |
700 | | /// # } |
701 | | /// ``` |
702 | | /// |
703 | | /// # Panics |
704 | | /// |
705 | | /// Panics on invalid minimum alignments. |
706 | | /// |
707 | | /// Panics if allocating the initial capacity fails. |
708 | 430k | pub fn try_with_min_align_and_capacity(capacity: usize) -> Result<Self, AllocErr> { |
709 | 430k | assert!( |
710 | 430k | MIN_ALIGN.is_power_of_two(), |
711 | | "MIN_ALIGN must be a power of two; found {MIN_ALIGN}" |
712 | | ); |
713 | 430k | assert!( |
714 | 430k | MIN_ALIGN <= CHUNK_ALIGN, |
715 | | "MIN_ALIGN may not be larger than {CHUNK_ALIGN}; found {MIN_ALIGN}" |
716 | | ); |
717 | | |
718 | 430k | if capacity == 0 { |
719 | 0 | return Ok(Bump { |
720 | 0 | current_chunk_footer: Cell::new(EMPTY_CHUNK.get()), |
721 | 0 | allocation_limit: Cell::new(None), |
722 | 0 | }); |
723 | 430k | } |
724 | | |
725 | 430k | let layout = layout_from_size_align(capacity, MIN_ALIGN)?; |
726 | | |
727 | 430k | let chunk_footer = unsafe { |
728 | 430k | Self::new_chunk( |
729 | 430k | Self::new_chunk_memory_details(None, layout).ok_or(AllocErr)?, |
730 | 430k | layout, |
731 | 430k | EMPTY_CHUNK.get(), |
732 | | ) |
733 | 430k | .ok_or(AllocErr)? |
734 | | }; |
735 | | |
736 | 430k | Ok(Bump { |
737 | 430k | current_chunk_footer: Cell::new(chunk_footer), |
738 | 430k | allocation_limit: Cell::new(None), |
739 | 430k | }) |
740 | 430k | } |
741 | | |
742 | | /// Get this bump arena's minimum alignment. |
743 | | /// |
744 | | /// All objects allocated in this arena get aligned to this value. |
745 | | /// |
746 | | /// ## Example |
747 | | /// |
748 | | /// ``` |
749 | | /// let bump2 = bumpalo::Bump::<2>::with_min_align(); |
750 | | /// assert_eq!(bump2.min_align(), 2); |
751 | | /// |
752 | | /// let bump4 = bumpalo::Bump::<4>::with_min_align(); |
753 | | /// assert_eq!(bump4.min_align(), 4); |
754 | | /// ``` |
755 | | #[inline] |
756 | 0 | pub fn min_align(&self) -> usize { |
757 | 0 | MIN_ALIGN |
758 | 0 | } |
759 | | |
760 | | /// The allocation limit for this arena in bytes. |
761 | | /// |
762 | | /// ## Example |
763 | | /// |
764 | | /// ``` |
765 | | /// let bump = bumpalo::Bump::with_capacity(0); |
766 | | /// |
767 | | /// assert_eq!(bump.allocation_limit(), None); |
768 | | /// |
769 | | /// bump.set_allocation_limit(Some(6)); |
770 | | /// |
771 | | /// assert_eq!(bump.allocation_limit(), Some(6)); |
772 | | /// |
773 | | /// bump.set_allocation_limit(None); |
774 | | /// |
775 | | /// assert_eq!(bump.allocation_limit(), None); |
776 | | /// ``` |
777 | 1.04M | pub fn allocation_limit(&self) -> Option<usize> { |
778 | 1.04M | self.allocation_limit.get() |
779 | 1.04M | } <bumpalo::Bump>::allocation_limit Line | Count | Source | 777 | 1.04M | pub fn allocation_limit(&self) -> Option<usize> { | 778 | 1.04M | self.allocation_limit.get() | 779 | 1.04M | } |
Unexecuted instantiation: <bumpalo::Bump>::allocation_limit Unexecuted instantiation: <bumpalo::Bump<_>>::allocation_limit |
780 | | |
781 | | /// Set the allocation limit in bytes for this arena. |
782 | | /// |
783 | | /// The allocation limit is only enforced when allocating new backing chunks for |
784 | | /// a `Bump`. Updating the allocation limit will not affect existing allocations |
785 | | /// or any future allocations within the `Bump`'s current chunk. |
786 | | /// |
787 | | /// ## Example |
788 | | /// |
789 | | /// ``` |
790 | | /// let bump = bumpalo::Bump::with_capacity(0); |
791 | | /// |
792 | | /// bump.set_allocation_limit(Some(0)); |
793 | | /// |
794 | | /// assert!(bump.try_alloc(5).is_err()); |
795 | | /// ``` |
796 | 0 | pub fn set_allocation_limit(&self, limit: Option<usize>) { |
797 | 0 | self.allocation_limit.set(limit); |
798 | 0 | } |
799 | | |
800 | | /// How much headroom an arena has before it hits its allocation |
801 | | /// limit. |
802 | 1.04M | fn allocation_limit_remaining(&self) -> Option<usize> { |
803 | 1.04M | self.allocation_limit.get().and_then(|allocation_limit| { |
804 | 0 | let allocated_bytes = self.allocated_bytes(); |
805 | 0 | if allocated_bytes > allocation_limit { |
806 | 0 | None |
807 | | } else { |
808 | 0 | Some(usize::abs_diff(allocation_limit, allocated_bytes)) |
809 | | } |
810 | 0 | }) Unexecuted instantiation: <bumpalo::Bump>::allocation_limit_remaining::{closure#0}Unexecuted instantiation: <bumpalo::Bump>::allocation_limit_remaining::{closure#0}Unexecuted instantiation: <bumpalo::Bump<_>>::allocation_limit_remaining::{closure#0} |
811 | 1.04M | } <bumpalo::Bump>::allocation_limit_remaining Line | Count | Source | 802 | 1.04M | fn allocation_limit_remaining(&self) -> Option<usize> { | 803 | 1.04M | self.allocation_limit.get().and_then(|allocation_limit| { | 804 | | let allocated_bytes = self.allocated_bytes(); | 805 | | if allocated_bytes > allocation_limit { | 806 | | None | 807 | | } else { | 808 | | Some(usize::abs_diff(allocation_limit, allocated_bytes)) | 809 | | } | 810 | | }) | 811 | 1.04M | } |
Unexecuted instantiation: <bumpalo::Bump>::allocation_limit_remaining Unexecuted instantiation: <bumpalo::Bump<_>>::allocation_limit_remaining |
812 | | |
813 | | /// Whether a request to allocate a new chunk with a given size for a given |
814 | | /// requested layout will fit under the allocation limit set on a `Bump`. |
815 | 1.04M | fn chunk_fits_under_limit( |
816 | 1.04M | allocation_limit_remaining: Option<usize>, |
817 | 1.04M | new_chunk_memory_details: NewChunkMemoryDetails, |
818 | 1.04M | ) -> bool { |
819 | 1.04M | allocation_limit_remaining |
820 | 1.04M | .map(|allocation_limit_left| { |
821 | 0 | allocation_limit_left >= new_chunk_memory_details.new_size_without_footer |
822 | 0 | }) Unexecuted instantiation: <bumpalo::Bump>::chunk_fits_under_limit::{closure#0}Unexecuted instantiation: <bumpalo::Bump>::chunk_fits_under_limit::{closure#0}Unexecuted instantiation: <bumpalo::Bump<_>>::chunk_fits_under_limit::{closure#0} |
823 | 1.04M | .unwrap_or(true) |
824 | 1.04M | } <bumpalo::Bump>::chunk_fits_under_limit Line | Count | Source | 815 | 1.04M | fn chunk_fits_under_limit( | 816 | 1.04M | allocation_limit_remaining: Option<usize>, | 817 | 1.04M | new_chunk_memory_details: NewChunkMemoryDetails, | 818 | 1.04M | ) -> bool { | 819 | 1.04M | allocation_limit_remaining | 820 | 1.04M | .map(|allocation_limit_left| { | 821 | | allocation_limit_left >= new_chunk_memory_details.new_size_without_footer | 822 | | }) | 823 | 1.04M | .unwrap_or(true) | 824 | 1.04M | } |
Unexecuted instantiation: <bumpalo::Bump>::chunk_fits_under_limit Unexecuted instantiation: <bumpalo::Bump<_>>::chunk_fits_under_limit |
825 | | |
826 | | /// Determine the memory details including final size, alignment and final |
827 | | /// size without footer for a new chunk that would be allocated to fulfill |
828 | | /// an allocation request. |
829 | 1.47M | fn new_chunk_memory_details( |
830 | 1.47M | new_size_without_footer: Option<usize>, |
831 | 1.47M | requested_layout: Layout, |
832 | 1.47M | ) -> Option<NewChunkMemoryDetails> { |
833 | | // We must have `CHUNK_ALIGN` or better alignment... |
834 | 1.47M | let align = CHUNK_ALIGN |
835 | | // and we have to have at least our configured minimum alignment... |
836 | 1.47M | .max(MIN_ALIGN) |
837 | | // and make sure we satisfy the requested allocation's alignment. |
838 | 1.47M | .max(requested_layout.align()); |
839 | | |
840 | 1.47M | let mut new_size_without_footer = |
841 | 1.47M | new_size_without_footer.unwrap_or(DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER); |
842 | | |
843 | 1.47M | let requested_size = |
844 | 1.47M | round_up_to(requested_layout.size(), align).unwrap_or_else(allocation_size_overflow); |
845 | 1.47M | new_size_without_footer = new_size_without_footer.max(requested_size); |
846 | | |
847 | | // We want our allocations to play nice with the memory allocator, and |
848 | | // waste as little memory as possible. For small allocations, this means |
849 | | // that the entire allocation including the chunk footer and mallocs |
850 | | // internal overhead is as close to a power of two as we can go without |
851 | | // going over. For larger allocations, we only need to get close to a |
852 | | // page boundary without going over. |
853 | 1.47M | if new_size_without_footer < TYPICAL_PAGE_SIZE { |
854 | 1.38M | new_size_without_footer = |
855 | 1.38M | (new_size_without_footer + OVERHEAD).next_power_of_two() - OVERHEAD; |
856 | 1.38M | } else { |
857 | | new_size_without_footer = |
858 | 89.7k | round_up_to(new_size_without_footer + OVERHEAD, TYPICAL_PAGE_SIZE)? - OVERHEAD; |
859 | | } |
860 | | |
861 | 1.47M | debug_assert_eq!(align % CHUNK_ALIGN, 0); |
862 | 1.47M | debug_assert_eq!(new_size_without_footer % CHUNK_ALIGN, 0); |
863 | 1.47M | let size = new_size_without_footer |
864 | 1.47M | .checked_add(FOOTER_SIZE) |
865 | 1.47M | .unwrap_or_else(allocation_size_overflow); |
866 | | |
867 | 1.47M | Some(NewChunkMemoryDetails { |
868 | 1.47M | new_size_without_footer, |
869 | 1.47M | size, |
870 | 1.47M | align, |
871 | 1.47M | }) |
872 | 1.47M | } <bumpalo::Bump>::new_chunk_memory_details Line | Count | Source | 829 | 1.04M | fn new_chunk_memory_details( | 830 | 1.04M | new_size_without_footer: Option<usize>, | 831 | 1.04M | requested_layout: Layout, | 832 | 1.04M | ) -> Option<NewChunkMemoryDetails> { | 833 | | // We must have `CHUNK_ALIGN` or better alignment... | 834 | 1.04M | let align = CHUNK_ALIGN | 835 | | // and we have to have at least our configured minimum alignment... | 836 | 1.04M | .max(MIN_ALIGN) | 837 | | // and make sure we satisfy the requested allocation's alignment. | 838 | 1.04M | .max(requested_layout.align()); | 839 | | | 840 | 1.04M | let mut new_size_without_footer = | 841 | 1.04M | new_size_without_footer.unwrap_or(DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER); | 842 | | | 843 | 1.04M | let requested_size = | 844 | 1.04M | round_up_to(requested_layout.size(), align).unwrap_or_else(allocation_size_overflow); | 845 | 1.04M | new_size_without_footer = new_size_without_footer.max(requested_size); | 846 | | | 847 | | // We want our allocations to play nice with the memory allocator, and | 848 | | // waste as little memory as possible. For small allocations, this means | 849 | | // that the entire allocation including the chunk footer and mallocs | 850 | | // internal overhead is as close to a power of two as we can go without | 851 | | // going over. For larger allocations, we only need to get close to a | 852 | | // page boundary without going over. | 853 | 1.04M | if new_size_without_footer < TYPICAL_PAGE_SIZE { | 854 | 953k | new_size_without_footer = | 855 | 953k | (new_size_without_footer + OVERHEAD).next_power_of_two() - OVERHEAD; | 856 | 953k | } else { | 857 | | new_size_without_footer = | 858 | 88.5k | round_up_to(new_size_without_footer + OVERHEAD, TYPICAL_PAGE_SIZE)? - OVERHEAD; | 859 | | } | 860 | | | 861 | 1.04M | debug_assert_eq!(align % CHUNK_ALIGN, 0); | 862 | 1.04M | debug_assert_eq!(new_size_without_footer % CHUNK_ALIGN, 0); | 863 | 1.04M | let size = new_size_without_footer | 864 | 1.04M | .checked_add(FOOTER_SIZE) | 865 | 1.04M | .unwrap_or_else(allocation_size_overflow); | 866 | | | 867 | 1.04M | Some(NewChunkMemoryDetails { | 868 | 1.04M | new_size_without_footer, | 869 | 1.04M | size, | 870 | 1.04M | align, | 871 | 1.04M | }) | 872 | 1.04M | } |
Unexecuted instantiation: <bumpalo::Bump>::new_chunk_memory_details <bumpalo::Bump>::new_chunk_memory_details Line | Count | Source | 829 | 430k | fn new_chunk_memory_details( | 830 | 430k | new_size_without_footer: Option<usize>, | 831 | 430k | requested_layout: Layout, | 832 | 430k | ) -> Option<NewChunkMemoryDetails> { | 833 | | // We must have `CHUNK_ALIGN` or better alignment... | 834 | 430k | let align = CHUNK_ALIGN | 835 | | // and we have to have at least our configured minimum alignment... | 836 | 430k | .max(MIN_ALIGN) | 837 | | // and make sure we satisfy the requested allocation's alignment. | 838 | 430k | .max(requested_layout.align()); | 839 | | | 840 | 430k | let mut new_size_without_footer = | 841 | 430k | new_size_without_footer.unwrap_or(DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER); | 842 | | | 843 | 430k | let requested_size = | 844 | 430k | round_up_to(requested_layout.size(), align).unwrap_or_else(allocation_size_overflow); | 845 | 430k | new_size_without_footer = new_size_without_footer.max(requested_size); | 846 | | | 847 | | // We want our allocations to play nice with the memory allocator, and | 848 | | // waste as little memory as possible. For small allocations, this means | 849 | | // that the entire allocation including the chunk footer and mallocs | 850 | | // internal overhead is as close to a power of two as we can go without | 851 | | // going over. For larger allocations, we only need to get close to a | 852 | | // page boundary without going over. | 853 | 430k | if new_size_without_footer < TYPICAL_PAGE_SIZE { | 854 | 429k | new_size_without_footer = | 855 | 429k | (new_size_without_footer + OVERHEAD).next_power_of_two() - OVERHEAD; | 856 | 429k | } else { | 857 | | new_size_without_footer = | 858 | 1.11k | round_up_to(new_size_without_footer + OVERHEAD, TYPICAL_PAGE_SIZE)? - OVERHEAD; | 859 | | } | 860 | | | 861 | 430k | debug_assert_eq!(align % CHUNK_ALIGN, 0); | 862 | 430k | debug_assert_eq!(new_size_without_footer % CHUNK_ALIGN, 0); | 863 | 430k | let size = new_size_without_footer | 864 | 430k | .checked_add(FOOTER_SIZE) | 865 | 430k | .unwrap_or_else(allocation_size_overflow); | 866 | | | 867 | 430k | Some(NewChunkMemoryDetails { | 868 | 430k | new_size_without_footer, | 869 | 430k | size, | 870 | 430k | align, | 871 | 430k | }) | 872 | 430k | } |
|
873 | | |
874 | | /// Allocate a new chunk and return its initialized footer. |
875 | | /// |
876 | | /// If given, `layouts` is a tuple of the current chunk size and the |
877 | | /// layout of the allocation request that triggered us to fall back to |
878 | | /// allocating a new chunk of memory. |
879 | 1.47M | unsafe fn new_chunk( |
880 | 1.47M | new_chunk_memory_details: NewChunkMemoryDetails, |
881 | 1.47M | requested_layout: Layout, |
882 | 1.47M | prev: NonNull<ChunkFooter>, |
883 | 1.47M | ) -> Option<NonNull<ChunkFooter>> { |
884 | | let NewChunkMemoryDetails { |
885 | 1.47M | new_size_without_footer, |
886 | 1.47M | align, |
887 | 1.47M | size, |
888 | 1.47M | } = new_chunk_memory_details; |
889 | | |
890 | 1.47M | let layout = layout_from_size_align(size, align).ok()?; |
891 | | |
892 | 1.47M | debug_assert!(size >= requested_layout.size()); |
893 | | |
894 | 1.47M | let data = alloc(layout); |
895 | 1.47M | let data = NonNull::new(data)?; |
896 | | |
897 | | // The `ChunkFooter` is at the end of the chunk. |
898 | 1.47M | let footer_ptr = data.as_ptr().add(new_size_without_footer); |
899 | 1.47M | debug_assert_eq!((data.as_ptr() as usize) % align, 0); |
900 | 1.47M | debug_assert_eq!(footer_ptr as usize % CHUNK_ALIGN, 0); |
901 | 1.47M | let footer_ptr = footer_ptr as *mut ChunkFooter; |
902 | | |
903 | | // The bump pointer is initialized to the end of the range we will bump |
904 | | // out of, rounded down to the minimum alignment. It is the |
905 | | // `NewChunkMemoryDetails` constructor's responsibility to ensure that |
906 | | // even after this rounding we have enough non-zero capacity in the |
907 | | // chunk. |
908 | 1.47M | let ptr = round_mut_ptr_down_to(footer_ptr.cast::<u8>(), MIN_ALIGN); |
909 | 1.47M | debug_assert_eq!(ptr as usize % MIN_ALIGN, 0); |
910 | 1.47M | debug_assert!( |
911 | 0 | data.as_ptr() < ptr, |
912 | | "bump pointer {ptr:#p} should still be greater than or equal to the \ |
913 | | start of the bump chunk {data:#p}" |
914 | | ); |
915 | 1.47M | debug_assert_eq!( |
916 | 0 | (ptr as usize) - (data.as_ptr() as usize), |
917 | | new_size_without_footer |
918 | | ); |
919 | | |
920 | 1.47M | let ptr = Cell::new(NonNull::new_unchecked(ptr)); |
921 | | |
922 | | // The `allocated_bytes` of a new chunk counts the total size |
923 | | // of the chunks, not how much of the chunks are used. |
924 | 1.47M | let allocated_bytes = prev.as_ref().allocated_bytes + new_size_without_footer; |
925 | | |
926 | 1.47M | ptr::write( |
927 | 1.47M | footer_ptr, |
928 | 1.47M | ChunkFooter { |
929 | 1.47M | data, |
930 | 1.47M | layout, |
931 | 1.47M | prev: Cell::new(prev), |
932 | 1.47M | ptr, |
933 | 1.47M | allocated_bytes, |
934 | 1.47M | }, |
935 | | ); |
936 | | |
937 | 1.47M | Some(NonNull::new_unchecked(footer_ptr)) |
938 | 1.47M | } <bumpalo::Bump>::new_chunk Line | Count | Source | 879 | 1.04M | unsafe fn new_chunk( | 880 | 1.04M | new_chunk_memory_details: NewChunkMemoryDetails, | 881 | 1.04M | requested_layout: Layout, | 882 | 1.04M | prev: NonNull<ChunkFooter>, | 883 | 1.04M | ) -> Option<NonNull<ChunkFooter>> { | 884 | | let NewChunkMemoryDetails { | 885 | 1.04M | new_size_without_footer, | 886 | 1.04M | align, | 887 | 1.04M | size, | 888 | 1.04M | } = new_chunk_memory_details; | 889 | | | 890 | 1.04M | let layout = layout_from_size_align(size, align).ok()?; | 891 | | | 892 | 1.04M | debug_assert!(size >= requested_layout.size()); | 893 | | | 894 | 1.04M | let data = alloc(layout); | 895 | 1.04M | let data = NonNull::new(data)?; | 896 | | | 897 | | // The `ChunkFooter` is at the end of the chunk. | 898 | 1.04M | let footer_ptr = data.as_ptr().add(new_size_without_footer); | 899 | 1.04M | debug_assert_eq!((data.as_ptr() as usize) % align, 0); | 900 | 1.04M | debug_assert_eq!(footer_ptr as usize % CHUNK_ALIGN, 0); | 901 | 1.04M | let footer_ptr = footer_ptr as *mut ChunkFooter; | 902 | | | 903 | | // The bump pointer is initialized to the end of the range we will bump | 904 | | // out of, rounded down to the minimum alignment. It is the | 905 | | // `NewChunkMemoryDetails` constructor's responsibility to ensure that | 906 | | // even after this rounding we have enough non-zero capacity in the | 907 | | // chunk. | 908 | 1.04M | let ptr = round_mut_ptr_down_to(footer_ptr.cast::<u8>(), MIN_ALIGN); | 909 | 1.04M | debug_assert_eq!(ptr as usize % MIN_ALIGN, 0); | 910 | 1.04M | debug_assert!( | 911 | 0 | data.as_ptr() < ptr, | 912 | | "bump pointer {ptr:#p} should still be greater than or equal to the \ | 913 | | start of the bump chunk {data:#p}" | 914 | | ); | 915 | 1.04M | debug_assert_eq!( | 916 | 0 | (ptr as usize) - (data.as_ptr() as usize), | 917 | | new_size_without_footer | 918 | | ); | 919 | | | 920 | 1.04M | let ptr = Cell::new(NonNull::new_unchecked(ptr)); | 921 | | | 922 | | // The `allocated_bytes` of a new chunk counts the total size | 923 | | // of the chunks, not how much of the chunks are used. | 924 | 1.04M | let allocated_bytes = prev.as_ref().allocated_bytes + new_size_without_footer; | 925 | | | 926 | 1.04M | ptr::write( | 927 | 1.04M | footer_ptr, | 928 | 1.04M | ChunkFooter { | 929 | 1.04M | data, | 930 | 1.04M | layout, | 931 | 1.04M | prev: Cell::new(prev), | 932 | 1.04M | ptr, | 933 | 1.04M | allocated_bytes, | 934 | 1.04M | }, | 935 | | ); | 936 | | | 937 | 1.04M | Some(NonNull::new_unchecked(footer_ptr)) | 938 | 1.04M | } |
Unexecuted instantiation: <bumpalo::Bump>::new_chunk <bumpalo::Bump>::new_chunk Line | Count | Source | 879 | 430k | unsafe fn new_chunk( | 880 | 430k | new_chunk_memory_details: NewChunkMemoryDetails, | 881 | 430k | requested_layout: Layout, | 882 | 430k | prev: NonNull<ChunkFooter>, | 883 | 430k | ) -> Option<NonNull<ChunkFooter>> { | 884 | | let NewChunkMemoryDetails { | 885 | 430k | new_size_without_footer, | 886 | 430k | align, | 887 | 430k | size, | 888 | 430k | } = new_chunk_memory_details; | 889 | | | 890 | 430k | let layout = layout_from_size_align(size, align).ok()?; | 891 | | | 892 | 430k | debug_assert!(size >= requested_layout.size()); | 893 | | | 894 | 430k | let data = alloc(layout); | 895 | 430k | let data = NonNull::new(data)?; | 896 | | | 897 | | // The `ChunkFooter` is at the end of the chunk. | 898 | 430k | let footer_ptr = data.as_ptr().add(new_size_without_footer); | 899 | 430k | debug_assert_eq!((data.as_ptr() as usize) % align, 0); | 900 | 430k | debug_assert_eq!(footer_ptr as usize % CHUNK_ALIGN, 0); | 901 | 430k | let footer_ptr = footer_ptr as *mut ChunkFooter; | 902 | | | 903 | | // The bump pointer is initialized to the end of the range we will bump | 904 | | // out of, rounded down to the minimum alignment. It is the | 905 | | // `NewChunkMemoryDetails` constructor's responsibility to ensure that | 906 | | // even after this rounding we have enough non-zero capacity in the | 907 | | // chunk. | 908 | 430k | let ptr = round_mut_ptr_down_to(footer_ptr.cast::<u8>(), MIN_ALIGN); | 909 | 430k | debug_assert_eq!(ptr as usize % MIN_ALIGN, 0); | 910 | 430k | debug_assert!( | 911 | 0 | data.as_ptr() < ptr, | 912 | | "bump pointer {ptr:#p} should still be greater than or equal to the \ | 913 | | start of the bump chunk {data:#p}" | 914 | | ); | 915 | 430k | debug_assert_eq!( | 916 | 0 | (ptr as usize) - (data.as_ptr() as usize), | 917 | | new_size_without_footer | 918 | | ); | 919 | | | 920 | 430k | let ptr = Cell::new(NonNull::new_unchecked(ptr)); | 921 | | | 922 | | // The `allocated_bytes` of a new chunk counts the total size | 923 | | // of the chunks, not how much of the chunks are used. | 924 | 430k | let allocated_bytes = prev.as_ref().allocated_bytes + new_size_without_footer; | 925 | | | 926 | 430k | ptr::write( | 927 | 430k | footer_ptr, | 928 | 430k | ChunkFooter { | 929 | 430k | data, | 930 | 430k | layout, | 931 | 430k | prev: Cell::new(prev), | 932 | 430k | ptr, | 933 | 430k | allocated_bytes, | 934 | 430k | }, | 935 | | ); | 936 | | | 937 | 430k | Some(NonNull::new_unchecked(footer_ptr)) | 938 | 430k | } |
|
939 | | |
940 | | /// Reset this bump allocator. |
941 | | /// |
942 | | /// Performs mass deallocation on everything allocated in this arena by |
943 | | /// resetting the pointer into the underlying chunk of memory to the start |
944 | | /// of the chunk. Does not run any `Drop` implementations on deallocated |
945 | | /// objects; see [the top-level documentation](struct.Bump.html) for details. |
946 | | /// |
947 | | /// If this arena has allocated multiple chunks to bump allocate into, then |
948 | | /// the excess chunks are returned to the global allocator. |
949 | | /// |
950 | | /// ## Example |
951 | | /// |
952 | | /// ``` |
953 | | /// let mut bump = bumpalo::Bump::new(); |
954 | | /// |
955 | | /// // Allocate a bunch of things. |
956 | | /// { |
957 | | /// for i in 0..100 { |
958 | | /// bump.alloc(i); |
959 | | /// } |
960 | | /// } |
961 | | /// |
962 | | /// // Reset the arena. |
963 | | /// bump.reset(); |
964 | | /// |
965 | | /// // Allocate some new things in the space previously occupied by the |
966 | | /// // original things. |
967 | | /// for j in 200..400 { |
968 | | /// bump.alloc(j); |
969 | | /// } |
970 | | ///``` |
971 | 430k | pub fn reset(&mut self) { |
972 | | // Takes `&mut self` so `self` must be unique and there can't be any |
973 | | // borrows active that would get invalidated by resetting. |
974 | | unsafe { |
975 | 430k | if self.current_chunk_footer.get().as_ref().is_empty() { |
976 | 430k | return; |
977 | 0 | } |
978 | | |
979 | 0 | let mut cur_chunk = self.current_chunk_footer.get(); |
980 | | |
981 | | // Deallocate all chunks except the current one |
982 | 0 | let prev_chunk = cur_chunk.as_ref().prev.replace(EMPTY_CHUNK.get()); |
983 | 0 | dealloc_chunk_list(prev_chunk); |
984 | | |
985 | | // Reset the bump finger to the end of the chunk. |
986 | 0 | debug_assert!( |
987 | 0 | is_pointer_aligned_to(cur_chunk.as_ptr(), MIN_ALIGN), |
988 | | "bump pointer {cur_chunk:#p} should be aligned to the minimum alignment of {MIN_ALIGN:#x}" |
989 | | ); |
990 | 0 | cur_chunk.as_ref().ptr.set(cur_chunk.cast()); |
991 | | |
992 | | // Reset the allocated size of the chunk. |
993 | 0 | cur_chunk.as_mut().allocated_bytes = cur_chunk.as_ref().layout.size() - FOOTER_SIZE; |
994 | | |
995 | 0 | debug_assert!( |
996 | 0 | self.current_chunk_footer |
997 | 0 | .get() |
998 | 0 | .as_ref() |
999 | 0 | .prev |
1000 | 0 | .get() |
1001 | 0 | .as_ref() |
1002 | 0 | .is_empty(), |
1003 | | "We should only have a single chunk" |
1004 | | ); |
1005 | 0 | debug_assert_eq!( |
1006 | 0 | self.current_chunk_footer.get().as_ref().ptr.get(), |
1007 | 0 | self.current_chunk_footer.get().cast(), |
1008 | | "Our chunk's bump finger should be reset to the start of its allocation" |
1009 | | ); |
1010 | | } |
1011 | 430k | } Line | Count | Source | 971 | 96.6k | pub fn reset(&mut self) { | 972 | | // Takes `&mut self` so `self` must be unique and there can't be any | 973 | | // borrows active that would get invalidated by resetting. | 974 | | unsafe { | 975 | 96.6k | if self.current_chunk_footer.get().as_ref().is_empty() { | 976 | 96.6k | return; | 977 | 0 | } | 978 | | | 979 | 0 | let mut cur_chunk = self.current_chunk_footer.get(); | 980 | | | 981 | | // Deallocate all chunks except the current one | 982 | 0 | let prev_chunk = cur_chunk.as_ref().prev.replace(EMPTY_CHUNK.get()); | 983 | 0 | dealloc_chunk_list(prev_chunk); | 984 | | | 985 | | // Reset the bump finger to the end of the chunk. | 986 | 0 | debug_assert!( | 987 | 0 | is_pointer_aligned_to(cur_chunk.as_ptr(), MIN_ALIGN), | 988 | | "bump pointer {cur_chunk:#p} should be aligned to the minimum alignment of {MIN_ALIGN:#x}" | 989 | | ); | 990 | 0 | cur_chunk.as_ref().ptr.set(cur_chunk.cast()); | 991 | | | 992 | | // Reset the allocated size of the chunk. | 993 | 0 | cur_chunk.as_mut().allocated_bytes = cur_chunk.as_ref().layout.size() - FOOTER_SIZE; | 994 | | | 995 | 0 | debug_assert!( | 996 | 0 | self.current_chunk_footer | 997 | 0 | .get() | 998 | 0 | .as_ref() | 999 | 0 | .prev | 1000 | 0 | .get() | 1001 | 0 | .as_ref() | 1002 | 0 | .is_empty(), | 1003 | | "We should only have a single chunk" | 1004 | | ); | 1005 | 0 | debug_assert_eq!( | 1006 | 0 | self.current_chunk_footer.get().as_ref().ptr.get(), | 1007 | 0 | self.current_chunk_footer.get().cast(), | 1008 | | "Our chunk's bump finger should be reset to the start of its allocation" | 1009 | | ); | 1010 | | } | 1011 | 96.6k | } |
Unexecuted instantiation: <bumpalo::Bump<_>>::reset Line | Count | Source | 971 | 333k | pub fn reset(&mut self) { | 972 | | // Takes `&mut self` so `self` must be unique and there can't be any | 973 | | // borrows active that would get invalidated by resetting. | 974 | | unsafe { | 975 | 333k | if self.current_chunk_footer.get().as_ref().is_empty() { | 976 | 333k | return; | 977 | 0 | } | 978 | | | 979 | 0 | let mut cur_chunk = self.current_chunk_footer.get(); | 980 | | | 981 | | // Deallocate all chunks except the current one | 982 | 0 | let prev_chunk = cur_chunk.as_ref().prev.replace(EMPTY_CHUNK.get()); | 983 | 0 | dealloc_chunk_list(prev_chunk); | 984 | | | 985 | | // Reset the bump finger to the end of the chunk. | 986 | 0 | debug_assert!( | 987 | 0 | is_pointer_aligned_to(cur_chunk.as_ptr(), MIN_ALIGN), | 988 | | "bump pointer {cur_chunk:#p} should be aligned to the minimum alignment of {MIN_ALIGN:#x}" | 989 | | ); | 990 | 0 | cur_chunk.as_ref().ptr.set(cur_chunk.cast()); | 991 | | | 992 | | // Reset the allocated size of the chunk. | 993 | 0 | cur_chunk.as_mut().allocated_bytes = cur_chunk.as_ref().layout.size() - FOOTER_SIZE; | 994 | | | 995 | 0 | debug_assert!( | 996 | 0 | self.current_chunk_footer | 997 | 0 | .get() | 998 | 0 | .as_ref() | 999 | 0 | .prev | 1000 | 0 | .get() | 1001 | 0 | .as_ref() | 1002 | 0 | .is_empty(), | 1003 | | "We should only have a single chunk" | 1004 | | ); | 1005 | 0 | debug_assert_eq!( | 1006 | 0 | self.current_chunk_footer.get().as_ref().ptr.get(), | 1007 | 0 | self.current_chunk_footer.get().cast(), | 1008 | | "Our chunk's bump finger should be reset to the start of its allocation" | 1009 | | ); | 1010 | | } | 1011 | 333k | } |
|
1012 | | |
1013 | | /// Allocate an object in this `Bump` and return an exclusive reference to |
1014 | | /// it. |
1015 | | /// |
1016 | | /// ## Panics |
1017 | | /// |
1018 | | /// Panics if reserving space for `T` fails. |
1019 | | /// |
1020 | | /// ## Example |
1021 | | /// |
1022 | | /// ``` |
1023 | | /// let bump = bumpalo::Bump::new(); |
1024 | | /// let x = bump.alloc("hello"); |
1025 | | /// assert_eq!(*x, "hello"); |
1026 | | /// ``` |
1027 | | #[inline(always)] |
1028 | 0 | pub fn alloc<T>(&self, val: T) -> &mut T { |
1029 | 0 | self.alloc_with(|| val) |
1030 | 0 | } |
1031 | | |
1032 | | /// Try to allocate an object in this `Bump` and return an exclusive |
1033 | | /// reference to it. |
1034 | | /// |
1035 | | /// ## Errors |
1036 | | /// |
1037 | | /// Errors if reserving space for `T` fails. |
1038 | | /// |
1039 | | /// ## Example |
1040 | | /// |
1041 | | /// ``` |
1042 | | /// let bump = bumpalo::Bump::new(); |
1043 | | /// let x = bump.try_alloc("hello"); |
1044 | | /// assert_eq!(x, Ok(&mut "hello")); |
1045 | | /// ``` |
1046 | | #[inline(always)] |
1047 | 0 | pub fn try_alloc<T>(&self, val: T) -> Result<&mut T, AllocErr> { |
1048 | 0 | self.try_alloc_with(|| val) |
1049 | 0 | } |
1050 | | |
1051 | | /// Pre-allocate space for an object in this `Bump`, initializes it using |
1052 | | /// the closure, then returns an exclusive reference to it. |
1053 | | /// |
1054 | | /// See [The `_with` Method Suffix](#initializer-functions-the-_with-method-suffix) for a |
1055 | | /// discussion on the differences between the `_with` suffixed methods and |
1056 | | /// those methods without it, their performance characteristics, and when |
1057 | | /// you might or might not choose a `_with` suffixed method. |
1058 | | /// |
1059 | | /// ## Panics |
1060 | | /// |
1061 | | /// Panics if reserving space for `T` fails. |
1062 | | /// |
1063 | | /// ## Example |
1064 | | /// |
1065 | | /// ``` |
1066 | | /// let bump = bumpalo::Bump::new(); |
1067 | | /// let x = bump.alloc_with(|| "hello"); |
1068 | | /// assert_eq!(*x, "hello"); |
1069 | | /// ``` |
1070 | | #[inline(always)] |
1071 | 0 | pub fn alloc_with<F, T>(&self, f: F) -> &mut T |
1072 | 0 | where |
1073 | 0 | F: FnOnce() -> T, |
1074 | | { |
1075 | | #[inline(always)] |
1076 | 0 | unsafe fn inner_writer<T, F>(ptr: *mut T, f: F) |
1077 | 0 | where |
1078 | 0 | F: FnOnce() -> T, |
1079 | | { |
1080 | | // This function is translated as: |
1081 | | // - allocate space for a T on the stack |
1082 | | // - call f() with the return value being put onto this stack space |
1083 | | // - memcpy from the stack to the heap |
1084 | | // |
1085 | | // Ideally we want LLVM to always realize that doing a stack |
1086 | | // allocation is unnecessary and optimize the code so it writes |
1087 | | // directly into the heap instead. It seems we get it to realize |
1088 | | // this most consistently if we put this critical line into it's |
1089 | | // own function instead of inlining it into the surrounding code. |
1090 | 0 | ptr::write(ptr, f()); |
1091 | 0 | } |
1092 | | |
1093 | 0 | let layout = Layout::new::<T>(); |
1094 | | |
1095 | 0 | unsafe { |
1096 | 0 | let p = self.alloc_layout(layout); |
1097 | 0 | let p = p.as_ptr() as *mut T; |
1098 | 0 | inner_writer(p, f); |
1099 | 0 | &mut *p |
1100 | 0 | } |
1101 | 0 | } |
1102 | | |
1103 | | /// Tries to pre-allocate space for an object in this `Bump`, initializes |
1104 | | /// it using the closure, then returns an exclusive reference to it. |
1105 | | /// |
1106 | | /// See [The `_with` Method Suffix](#initializer-functions-the-_with-method-suffix) for a |
1107 | | /// discussion on the differences between the `_with` suffixed methods and |
1108 | | /// those methods without it, their performance characteristics, and when |
1109 | | /// you might or might not choose a `_with` suffixed method. |
1110 | | /// |
1111 | | /// ## Errors |
1112 | | /// |
1113 | | /// Errors if reserving space for `T` fails. |
1114 | | /// |
1115 | | /// ## Example |
1116 | | /// |
1117 | | /// ``` |
1118 | | /// let bump = bumpalo::Bump::new(); |
1119 | | /// let x = bump.try_alloc_with(|| "hello"); |
1120 | | /// assert_eq!(x, Ok(&mut "hello")); |
1121 | | /// ``` |
1122 | | #[inline(always)] |
1123 | 0 | pub fn try_alloc_with<F, T>(&self, f: F) -> Result<&mut T, AllocErr> |
1124 | 0 | where |
1125 | 0 | F: FnOnce() -> T, |
1126 | | { |
1127 | | #[inline(always)] |
1128 | 0 | unsafe fn inner_writer<T, F>(ptr: *mut T, f: F) |
1129 | 0 | where |
1130 | 0 | F: FnOnce() -> T, |
1131 | | { |
1132 | | // This function is translated as: |
1133 | | // - allocate space for a T on the stack |
1134 | | // - call f() with the return value being put onto this stack space |
1135 | | // - memcpy from the stack to the heap |
1136 | | // |
1137 | | // Ideally we want LLVM to always realize that doing a stack |
1138 | | // allocation is unnecessary and optimize the code so it writes |
1139 | | // directly into the heap instead. It seems we get it to realize |
1140 | | // this most consistently if we put this critical line into it's |
1141 | | // own function instead of inlining it into the surrounding code. |
1142 | 0 | ptr::write(ptr, f()); |
1143 | 0 | } |
1144 | | |
1145 | | //SAFETY: Self-contained: |
1146 | | // `p` is allocated for `T` and then a `T` is written. |
1147 | 0 | let layout = Layout::new::<T>(); |
1148 | 0 | let p = self.try_alloc_layout(layout)?; |
1149 | 0 | let p = p.as_ptr() as *mut T; |
1150 | | |
1151 | | unsafe { |
1152 | 0 | inner_writer(p, f); |
1153 | 0 | Ok(&mut *p) |
1154 | | } |
1155 | 0 | } |
1156 | | |
1157 | | /// Pre-allocates space for a [`Result`] in this `Bump`, initializes it using |
1158 | | /// the closure, then returns an exclusive reference to its `T` if [`Ok`]. |
1159 | | /// |
1160 | | /// Iff the allocation fails, the closure is not run. |
1161 | | /// |
1162 | | /// Iff [`Err`], an allocator rewind is *attempted* and the `E` instance is |
1163 | | /// moved out of the allocator to be consumed or dropped as normal. |
1164 | | /// |
1165 | | /// See [The `_with` Method Suffix](#initializer-functions-the-_with-method-suffix) for a |
1166 | | /// discussion on the differences between the `_with` suffixed methods and |
1167 | | /// those methods without it, their performance characteristics, and when |
1168 | | /// you might or might not choose a `_with` suffixed method. |
1169 | | /// |
1170 | | /// For caveats specific to fallible initialization, see |
1171 | | /// [The `_try_with` Method Suffix](#fallible-initialization-the-_try_with-method-suffix). |
1172 | | /// |
1173 | | /// [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html |
1174 | | /// [`Ok`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Ok |
1175 | | /// [`Err`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Err |
1176 | | /// |
1177 | | /// ## Errors |
1178 | | /// |
1179 | | /// Iff the allocation succeeds but `f` fails, that error is forwarded by value. |
1180 | | /// |
1181 | | /// ## Panics |
1182 | | /// |
1183 | | /// Panics if reserving space for `Result<T, E>` fails. |
1184 | | /// |
1185 | | /// ## Example |
1186 | | /// |
1187 | | /// ``` |
1188 | | /// let bump = bumpalo::Bump::new(); |
1189 | | /// let x = bump.alloc_try_with(|| Ok("hello"))?; |
1190 | | /// assert_eq!(*x, "hello"); |
1191 | | /// # Result::<_, ()>::Ok(()) |
1192 | | /// ``` |
1193 | | #[inline(always)] |
1194 | 0 | pub fn alloc_try_with<F, T, E>(&self, f: F) -> Result<&mut T, E> |
1195 | 0 | where |
1196 | 0 | F: FnOnce() -> Result<T, E>, |
1197 | | { |
1198 | 0 | let rewind_footer = self.current_chunk_footer.get(); |
1199 | 0 | let rewind_ptr = unsafe { rewind_footer.as_ref() }.ptr.get(); |
1200 | 0 | let mut inner_result_ptr = NonNull::from(self.alloc_with(f)); |
1201 | 0 | match unsafe { inner_result_ptr.as_mut() } { |
1202 | 0 | Ok(t) => Ok(unsafe { |
1203 | 0 | //SAFETY: |
1204 | 0 | // The `&mut Result<T, E>` returned by `alloc_with` may be |
1205 | 0 | // lifetime-limited by `E`, but the derived `&mut T` still has |
1206 | 0 | // the same validity as in `alloc_with` since the error variant |
1207 | 0 | // is already ruled out here. |
1208 | 0 |
|
1209 | 0 | // We could conditionally truncate the allocation here, but |
1210 | 0 | // since it grows backwards, it seems unlikely that we'd get |
1211 | 0 | // any more than the `Result`'s discriminant this way, if |
1212 | 0 | // anything at all. |
1213 | 0 | &mut *(t as *mut _) |
1214 | 0 | }), |
1215 | 0 | Err(e) => unsafe { |
1216 | | // If this result was the last allocation in this arena, we can |
1217 | | // reclaim its space. In fact, sometimes we can do even better |
1218 | | // than simply calling `dealloc` on the result pointer: we can |
1219 | | // reclaim any alignment padding we might have added (which |
1220 | | // `dealloc` cannot do) if we didn't allocate a new chunk for |
1221 | | // this result. |
1222 | 0 | if self.is_last_allocation(inner_result_ptr.cast()) { |
1223 | 0 | let current_footer_p = self.current_chunk_footer.get(); |
1224 | 0 | let current_ptr = ¤t_footer_p.as_ref().ptr; |
1225 | 0 | if current_footer_p == rewind_footer { |
1226 | 0 | // It's still the same chunk, so reset the bump pointer |
1227 | 0 | // to its original value upon entry to this method |
1228 | 0 | // (reclaiming any alignment padding we may have |
1229 | 0 | // added). |
1230 | 0 | current_ptr.set(rewind_ptr); |
1231 | 0 | } else { |
1232 | 0 | // We allocated a new chunk for this result. |
1233 | 0 | // |
1234 | 0 | // We know the result is the only allocation in this |
1235 | 0 | // chunk: Any additional allocations since the start of |
1236 | 0 | // this method could only have happened when running |
1237 | 0 | // the initializer function, which is called *after* |
1238 | 0 | // reserving space for this result. Therefore, since we |
1239 | 0 | // already determined via the check above that this |
1240 | 0 | // result was the last allocation, there must not have |
1241 | 0 | // been any other allocations, and this result is the |
1242 | 0 | // only allocation in this chunk. |
1243 | 0 | // |
1244 | 0 | // Because this is the only allocation in this chunk, |
1245 | 0 | // we can reset the chunk's bump finger to the start of |
1246 | 0 | // the chunk. |
1247 | 0 | current_ptr.set(current_footer_p.as_ref().data); |
1248 | 0 | } |
1249 | 0 | } |
1250 | | //SAFETY: |
1251 | | // As we received `E` semantically by value from `f`, we can |
1252 | | // just copy that value here as long as we avoid a double-drop |
1253 | | // (which can't happen as any specific references to the `E`'s |
1254 | | // data in `self` are destroyed when this function returns). |
1255 | | // |
1256 | | // The order between this and the deallocation doesn't matter |
1257 | | // because `Self: !Sync`. |
1258 | 0 | Err(ptr::read(e as *const _)) |
1259 | | }, |
1260 | | } |
1261 | 0 | } |
1262 | | |
1263 | | /// Tries to pre-allocates space for a [`Result`] in this `Bump`, |
1264 | | /// initializes it using the closure, then returns an exclusive reference |
1265 | | /// to its `T` if all [`Ok`]. |
1266 | | /// |
1267 | | /// Iff the allocation fails, the closure is not run. |
1268 | | /// |
1269 | | /// Iff the closure returns [`Err`], an allocator rewind is *attempted* and |
1270 | | /// the `E` instance is moved out of the allocator to be consumed or dropped |
1271 | | /// as normal. |
1272 | | /// |
1273 | | /// See [The `_with` Method Suffix](#initializer-functions-the-_with-method-suffix) for a |
1274 | | /// discussion on the differences between the `_with` suffixed methods and |
1275 | | /// those methods without it, their performance characteristics, and when |
1276 | | /// you might or might not choose a `_with` suffixed method. |
1277 | | /// |
1278 | | /// For caveats specific to fallible initialization, see |
1279 | | /// [The `_try_with` Method Suffix](#fallible-initialization-the-_try_with-method-suffix). |
1280 | | /// |
1281 | | /// [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html |
1282 | | /// [`Ok`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Ok |
1283 | | /// [`Err`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Err |
1284 | | /// |
1285 | | /// ## Errors |
1286 | | /// |
1287 | | /// Errors with the [`Alloc`](`AllocOrInitError::Alloc`) variant iff |
1288 | | /// reserving space for `Result<T, E>` fails. |
1289 | | /// |
1290 | | /// Iff the allocation succeeds but `f` fails, that error is forwarded by |
1291 | | /// value inside the [`Init`](`AllocOrInitError::Init`) variant. |
1292 | | /// |
1293 | | /// ## Example |
1294 | | /// |
1295 | | /// ``` |
1296 | | /// let bump = bumpalo::Bump::new(); |
1297 | | /// let x = bump.try_alloc_try_with(|| Ok("hello"))?; |
1298 | | /// assert_eq!(*x, "hello"); |
1299 | | /// # Result::<_, bumpalo::AllocOrInitError<()>>::Ok(()) |
1300 | | /// ``` |
1301 | | #[inline(always)] |
1302 | 0 | pub fn try_alloc_try_with<F, T, E>(&self, f: F) -> Result<&mut T, AllocOrInitError<E>> |
1303 | 0 | where |
1304 | 0 | F: FnOnce() -> Result<T, E>, |
1305 | | { |
1306 | 0 | let rewind_footer = self.current_chunk_footer.get(); |
1307 | 0 | let rewind_ptr = unsafe { rewind_footer.as_ref() }.ptr.get(); |
1308 | 0 | let mut inner_result_ptr = NonNull::from(self.try_alloc_with(f)?); |
1309 | 0 | match unsafe { inner_result_ptr.as_mut() } { |
1310 | 0 | Ok(t) => Ok(unsafe { |
1311 | 0 | //SAFETY: |
1312 | 0 | // The `&mut Result<T, E>` returned by `alloc_with` may be |
1313 | 0 | // lifetime-limited by `E`, but the derived `&mut T` still has |
1314 | 0 | // the same validity as in `alloc_with` since the error variant |
1315 | 0 | // is already ruled out here. |
1316 | 0 |
|
1317 | 0 | // We could conditionally truncate the allocation here, but |
1318 | 0 | // since it grows backwards, it seems unlikely that we'd get |
1319 | 0 | // any more than the `Result`'s discriminant this way, if |
1320 | 0 | // anything at all. |
1321 | 0 | &mut *(t as *mut _) |
1322 | 0 | }), |
1323 | 0 | Err(e) => unsafe { |
1324 | | // If this result was the last allocation in this arena, we can |
1325 | | // reclaim its space. In fact, sometimes we can do even better |
1326 | | // than simply calling `dealloc` on the result pointer: we can |
1327 | | // reclaim any alignment padding we might have added (which |
1328 | | // `dealloc` cannot do) if we didn't allocate a new chunk for |
1329 | | // this result. |
1330 | 0 | if self.is_last_allocation(inner_result_ptr.cast()) { |
1331 | 0 | let current_footer_p = self.current_chunk_footer.get(); |
1332 | 0 | let current_ptr = ¤t_footer_p.as_ref().ptr; |
1333 | 0 | if current_footer_p == rewind_footer { |
1334 | 0 | // It's still the same chunk, so reset the bump pointer |
1335 | 0 | // to its original value upon entry to this method |
1336 | 0 | // (reclaiming any alignment padding we may have |
1337 | 0 | // added). |
1338 | 0 | current_ptr.set(rewind_ptr); |
1339 | 0 | } else { |
1340 | 0 | // We allocated a new chunk for this result. |
1341 | 0 | // |
1342 | 0 | // We know the result is the only allocation in this |
1343 | 0 | // chunk: Any additional allocations since the start of |
1344 | 0 | // this method could only have happened when running |
1345 | 0 | // the initializer function, which is called *after* |
1346 | 0 | // reserving space for this result. Therefore, since we |
1347 | 0 | // already determined via the check above that this |
1348 | 0 | // result was the last allocation, there must not have |
1349 | 0 | // been any other allocations, and this result is the |
1350 | 0 | // only allocation in this chunk. |
1351 | 0 | // |
1352 | 0 | // Because this is the only allocation in this chunk, |
1353 | 0 | // we can reset the chunk's bump finger to the start of |
1354 | 0 | // the chunk. |
1355 | 0 | current_ptr.set(current_footer_p.as_ref().data); |
1356 | 0 | } |
1357 | 0 | } |
1358 | | //SAFETY: |
1359 | | // As we received `E` semantically by value from `f`, we can |
1360 | | // just copy that value here as long as we avoid a double-drop |
1361 | | // (which can't happen as any specific references to the `E`'s |
1362 | | // data in `self` are destroyed when this function returns). |
1363 | | // |
1364 | | // The order between this and the deallocation doesn't matter |
1365 | | // because `Self: !Sync`. |
1366 | 0 | Err(AllocOrInitError::Init(ptr::read(e as *const _))) |
1367 | | }, |
1368 | | } |
1369 | 0 | } |
1370 | | |
1371 | | /// `Copy` a slice into this `Bump` and return an exclusive reference to |
1372 | | /// the copy. |
1373 | | /// |
1374 | | /// ## Panics |
1375 | | /// |
1376 | | /// Panics if reserving space for the slice fails. |
1377 | | /// |
1378 | | /// ## Example |
1379 | | /// |
1380 | | /// ``` |
1381 | | /// let bump = bumpalo::Bump::new(); |
1382 | | /// let x = bump.alloc_slice_copy(&[1, 2, 3]); |
1383 | | /// assert_eq!(x, &[1, 2, 3]); |
1384 | | /// ``` |
1385 | | #[inline(always)] |
1386 | 2.03M | pub fn alloc_slice_copy<T>(&self, src: &[T]) -> &mut [T] |
1387 | 2.03M | where |
1388 | 2.03M | T: Copy, |
1389 | | { |
1390 | 2.03M | let layout = Layout::for_value(src); |
1391 | 2.03M | let dst = self.alloc_layout(layout).cast::<T>(); |
1392 | | |
1393 | 2.03M | unsafe { |
1394 | 2.03M | ptr::copy_nonoverlapping(src.as_ptr(), dst.as_ptr(), src.len()); |
1395 | 2.03M | slice::from_raw_parts_mut(dst.as_ptr(), src.len()) |
1396 | 2.03M | } |
1397 | 2.03M | } <bumpalo::Bump>::alloc_slice_copy::<cranelift_codegen::ir::entities::Value> Line | Count | Source | 1386 | 221k | pub fn alloc_slice_copy<T>(&self, src: &[T]) -> &mut [T] | 1387 | 221k | where | 1388 | 221k | T: Copy, | 1389 | | { | 1390 | 221k | let layout = Layout::for_value(src); | 1391 | 221k | let dst = self.alloc_layout(layout).cast::<T>(); | 1392 | | | 1393 | 221k | unsafe { | 1394 | 221k | ptr::copy_nonoverlapping(src.as_ptr(), dst.as_ptr(), src.len()); | 1395 | 221k | slice::from_raw_parts_mut(dst.as_ptr(), src.len()) | 1396 | 221k | } | 1397 | 221k | } |
Unexecuted instantiation: <bumpalo::Bump>::alloc_slice_copy::<u8> Unexecuted instantiation: <bumpalo::Bump<_>>::alloc_slice_copy::<_> <bumpalo::Bump>::alloc_slice_copy::<cranelift_codegen::ir::entities::Value> Line | Count | Source | 1386 | 1.81M | pub fn alloc_slice_copy<T>(&self, src: &[T]) -> &mut [T] | 1387 | 1.81M | where | 1388 | 1.81M | T: Copy, | 1389 | | { | 1390 | 1.81M | let layout = Layout::for_value(src); | 1391 | 1.81M | let dst = self.alloc_layout(layout).cast::<T>(); | 1392 | | | 1393 | 1.81M | unsafe { | 1394 | 1.81M | ptr::copy_nonoverlapping(src.as_ptr(), dst.as_ptr(), src.len()); | 1395 | 1.81M | slice::from_raw_parts_mut(dst.as_ptr(), src.len()) | 1396 | 1.81M | } | 1397 | 1.81M | } |
|
1398 | | |
1399 | | /// Like `alloc_slice_copy`, but does not panic in case of allocation failure. |
1400 | | /// |
1401 | | /// ## Example |
1402 | | /// |
1403 | | /// ``` |
1404 | | /// let bump = bumpalo::Bump::new(); |
1405 | | /// let x = bump.try_alloc_slice_copy(&[1, 2, 3]); |
1406 | | /// assert_eq!(x, Ok(&mut[1, 2, 3] as &mut [_])); |
1407 | | /// |
1408 | | /// |
1409 | | /// let bump = bumpalo::Bump::new(); |
1410 | | /// bump.set_allocation_limit(Some(4)); |
1411 | | /// let x = bump.try_alloc_slice_copy(&[1, 2, 3, 4, 5, 6]); |
1412 | | /// assert_eq!(x, Err(bumpalo::AllocErr)); // too big |
1413 | | /// ``` |
1414 | | #[inline(always)] |
1415 | 0 | pub fn try_alloc_slice_copy<T>(&self, src: &[T]) -> Result<&mut [T], AllocErr> |
1416 | 0 | where |
1417 | 0 | T: Copy, |
1418 | | { |
1419 | 0 | let layout = Layout::for_value(src); |
1420 | 0 | let dst = self.try_alloc_layout(layout)?.cast::<T>(); |
1421 | 0 | let result = unsafe { |
1422 | 0 | core::ptr::copy_nonoverlapping(src.as_ptr(), dst.as_ptr(), src.len()); |
1423 | 0 | slice::from_raw_parts_mut(dst.as_ptr(), src.len()) |
1424 | | }; |
1425 | 0 | Ok(result) |
1426 | 0 | } |
1427 | | |
1428 | | /// `Clone` a slice into this `Bump` and return an exclusive reference to |
1429 | | /// the clone. Prefer [`alloc_slice_copy`](#method.alloc_slice_copy) if `T` is `Copy`. |
1430 | | /// |
1431 | | /// ## Panics |
1432 | | /// |
1433 | | /// Panics if reserving space for the slice fails. |
1434 | | /// |
1435 | | /// ## Example |
1436 | | /// |
1437 | | /// ``` |
1438 | | /// #[derive(Clone, Debug, Eq, PartialEq)] |
1439 | | /// struct Sheep { |
1440 | | /// name: String, |
1441 | | /// } |
1442 | | /// |
1443 | | /// let originals = [ |
1444 | | /// Sheep { name: "Alice".into() }, |
1445 | | /// Sheep { name: "Bob".into() }, |
1446 | | /// Sheep { name: "Cathy".into() }, |
1447 | | /// ]; |
1448 | | /// |
1449 | | /// let bump = bumpalo::Bump::new(); |
1450 | | /// let clones = bump.alloc_slice_clone(&originals); |
1451 | | /// assert_eq!(originals, clones); |
1452 | | /// ``` |
1453 | | #[inline(always)] |
1454 | 0 | pub fn alloc_slice_clone<T>(&self, src: &[T]) -> &mut [T] |
1455 | 0 | where |
1456 | 0 | T: Clone, |
1457 | | { |
1458 | 0 | let layout = Layout::for_value(src); |
1459 | 0 | let dst = self.alloc_layout(layout).cast::<T>(); |
1460 | | |
1461 | | unsafe { |
1462 | 0 | for (i, val) in src.iter().cloned().enumerate() { |
1463 | 0 | ptr::write(dst.as_ptr().add(i), val); |
1464 | 0 | } |
1465 | | |
1466 | 0 | slice::from_raw_parts_mut(dst.as_ptr(), src.len()) |
1467 | | } |
1468 | 0 | } |
1469 | | |
1470 | | /// Like `alloc_slice_clone` but does not panic on failure. |
1471 | | #[inline(always)] |
1472 | 0 | pub fn try_alloc_slice_clone<T>(&self, src: &[T]) -> Result<&mut [T], AllocErr> |
1473 | 0 | where |
1474 | 0 | T: Clone, |
1475 | | { |
1476 | 0 | let layout = Layout::for_value(src); |
1477 | 0 | let dst = self.try_alloc_layout(layout)?.cast::<T>(); |
1478 | | |
1479 | | unsafe { |
1480 | 0 | for (i, val) in src.iter().cloned().enumerate() { |
1481 | 0 | ptr::write(dst.as_ptr().add(i), val); |
1482 | 0 | } |
1483 | | |
1484 | 0 | Ok(slice::from_raw_parts_mut(dst.as_ptr(), src.len())) |
1485 | | } |
1486 | 0 | } |
1487 | | |
1488 | | /// `Copy` a string slice into this `Bump` and return an exclusive reference to it. |
1489 | | /// |
1490 | | /// ## Panics |
1491 | | /// |
1492 | | /// Panics if reserving space for the string fails. |
1493 | | /// |
1494 | | /// ## Example |
1495 | | /// |
1496 | | /// ``` |
1497 | | /// let bump = bumpalo::Bump::new(); |
1498 | | /// let hello = bump.alloc_str("hello world"); |
1499 | | /// assert_eq!("hello world", hello); |
1500 | | /// ``` |
1501 | | #[inline(always)] |
1502 | 0 | pub fn alloc_str(&self, src: &str) -> &mut str { |
1503 | 0 | let buffer = self.alloc_slice_copy(src.as_bytes()); |
1504 | 0 | unsafe { |
1505 | 0 | // This is OK, because it already came in as str, so it is guaranteed to be utf8 |
1506 | 0 | str::from_utf8_unchecked_mut(buffer) |
1507 | 0 | } |
1508 | 0 | } |
1509 | | |
1510 | | /// Same as `alloc_str` but does not panic on failure. |
1511 | | /// |
1512 | | /// ## Example |
1513 | | /// |
1514 | | /// ``` |
1515 | | /// let bump = bumpalo::Bump::new(); |
1516 | | /// let hello = bump.try_alloc_str("hello world").unwrap(); |
1517 | | /// assert_eq!("hello world", hello); |
1518 | | /// |
1519 | | /// |
1520 | | /// let bump = bumpalo::Bump::new(); |
1521 | | /// bump.set_allocation_limit(Some(5)); |
1522 | | /// let hello = bump.try_alloc_str("hello world"); |
1523 | | /// assert_eq!(Err(bumpalo::AllocErr), hello); |
1524 | | /// ``` |
1525 | | #[inline(always)] |
1526 | 0 | pub fn try_alloc_str(&self, src: &str) -> Result<&mut str, AllocErr> { |
1527 | 0 | let buffer = self.try_alloc_slice_copy(src.as_bytes())?; |
1528 | | unsafe { |
1529 | | // This is OK, because it already came in as str, so it is guaranteed to be utf8 |
1530 | 0 | Ok(str::from_utf8_unchecked_mut(buffer)) |
1531 | | } |
1532 | 0 | } |
1533 | | |
1534 | | /// Allocates a new slice of size `len` into this `Bump` and returns an |
1535 | | /// exclusive reference to the copy. |
1536 | | /// |
1537 | | /// The elements of the slice are initialized using the supplied closure. |
1538 | | /// The closure argument is the position in the slice. |
1539 | | /// |
1540 | | /// ## Panics |
1541 | | /// |
1542 | | /// Panics if reserving space for the slice fails. |
1543 | | /// |
1544 | | /// ## Example |
1545 | | /// |
1546 | | /// ``` |
1547 | | /// let bump = bumpalo::Bump::new(); |
1548 | | /// let x = bump.alloc_slice_fill_with(5, |i| 5 * (i + 1)); |
1549 | | /// assert_eq!(x, &[5, 10, 15, 20, 25]); |
1550 | | /// ``` |
1551 | | #[inline(always)] |
1552 | 679k | pub fn alloc_slice_fill_with<T, F>(&self, len: usize, mut f: F) -> &mut [T] |
1553 | 679k | where |
1554 | 679k | F: FnMut(usize) -> T, |
1555 | | { |
1556 | 679k | let layout = Layout::array::<T>(len).unwrap_or_else(|_| oom()); Unexecuted instantiation: <bumpalo::Bump>::alloc_slice_fill_with::<cranelift_codegen::ir::instructions::BlockArg, <bumpalo::Bump>::alloc_slice_fill_iter<cranelift_codegen::ir::instructions::BlockArg, core::iter::adapters::map::Map<core::iter::adapters::map::Map<core::slice::iter::Iter<cranelift_codegen::ir::entities::Value>, <cranelift_codegen::ir::instructions::BlockCall>::args::{closure#0}>, <cranelift_codegen::remove_constant_phis::OutEdge>::new::{closure#0}>>::{closure#0}>::{closure#0}Unexecuted instantiation: <bumpalo::Bump<_>>::alloc_slice_fill_with::<_, _>::{closure#0}Unexecuted instantiation: <bumpalo::Bump>::alloc_slice_fill_with::<cranelift_codegen::ir::instructions::BlockArg, <bumpalo::Bump>::alloc_slice_fill_iter<cranelift_codegen::ir::instructions::BlockArg, core::iter::adapters::map::Map<core::iter::adapters::map::Map<core::slice::iter::Iter<cranelift_codegen::ir::entities::Value>, <cranelift_codegen::ir::instructions::BlockCall>::args::{closure#0}>, <cranelift_codegen::remove_constant_phis::OutEdge>::new::{closure#0}>>::{closure#0}>::{closure#0} |
1557 | 679k | let dst = self.alloc_layout(layout).cast::<T>(); |
1558 | | |
1559 | | unsafe { |
1560 | 2.75M | for i in 0..len { |
1561 | 2.75M | ptr::write(dst.as_ptr().add(i), f(i)); |
1562 | 2.75M | } |
1563 | | |
1564 | 679k | let result = slice::from_raw_parts_mut(dst.as_ptr(), len); |
1565 | 679k | debug_assert_eq!(Layout::for_value(result), layout); |
1566 | 679k | result |
1567 | | } |
1568 | 679k | } <bumpalo::Bump>::alloc_slice_fill_with::<cranelift_codegen::ir::instructions::BlockArg, <bumpalo::Bump>::alloc_slice_fill_iter<cranelift_codegen::ir::instructions::BlockArg, core::iter::adapters::map::Map<core::iter::adapters::map::Map<core::slice::iter::Iter<cranelift_codegen::ir::entities::Value>, <cranelift_codegen::ir::instructions::BlockCall>::args::{closure#0}>, <cranelift_codegen::remove_constant_phis::OutEdge>::new::{closure#0}>>::{closure#0}>Line | Count | Source | 1552 | 64.7k | pub fn alloc_slice_fill_with<T, F>(&self, len: usize, mut f: F) -> &mut [T] | 1553 | 64.7k | where | 1554 | 64.7k | F: FnMut(usize) -> T, | 1555 | | { | 1556 | 64.7k | let layout = Layout::array::<T>(len).unwrap_or_else(|_| oom()); | 1557 | 64.7k | let dst = self.alloc_layout(layout).cast::<T>(); | 1558 | | | 1559 | | unsafe { | 1560 | 201k | for i in 0..len { | 1561 | 201k | ptr::write(dst.as_ptr().add(i), f(i)); | 1562 | 201k | } | 1563 | | | 1564 | 64.7k | let result = slice::from_raw_parts_mut(dst.as_ptr(), len); | 1565 | 64.7k | debug_assert_eq!(Layout::for_value(result), layout); | 1566 | 64.7k | result | 1567 | | } | 1568 | 64.7k | } |
Unexecuted instantiation: <bumpalo::Bump<_>>::alloc_slice_fill_with::<_, _> <bumpalo::Bump>::alloc_slice_fill_with::<cranelift_codegen::ir::instructions::BlockArg, <bumpalo::Bump>::alloc_slice_fill_iter<cranelift_codegen::ir::instructions::BlockArg, core::iter::adapters::map::Map<core::iter::adapters::map::Map<core::slice::iter::Iter<cranelift_codegen::ir::entities::Value>, <cranelift_codegen::ir::instructions::BlockCall>::args::{closure#0}>, <cranelift_codegen::remove_constant_phis::OutEdge>::new::{closure#0}>>::{closure#0}>Line | Count | Source | 1552 | 614k | pub fn alloc_slice_fill_with<T, F>(&self, len: usize, mut f: F) -> &mut [T] | 1553 | 614k | where | 1554 | 614k | F: FnMut(usize) -> T, | 1555 | | { | 1556 | 614k | let layout = Layout::array::<T>(len).unwrap_or_else(|_| oom()); | 1557 | 614k | let dst = self.alloc_layout(layout).cast::<T>(); | 1558 | | | 1559 | | unsafe { | 1560 | 2.55M | for i in 0..len { | 1561 | 2.55M | ptr::write(dst.as_ptr().add(i), f(i)); | 1562 | 2.55M | } | 1563 | | | 1564 | 614k | let result = slice::from_raw_parts_mut(dst.as_ptr(), len); | 1565 | 614k | debug_assert_eq!(Layout::for_value(result), layout); | 1566 | 614k | result | 1567 | | } | 1568 | 614k | } |
|
1569 | | |
1570 | | /// Allocates a new slice of size `len` into this `Bump` and returns an |
1571 | | /// exclusive reference to the copy, failing if the closure return an Err. |
1572 | | /// |
1573 | | /// The elements of the slice are initialized using the supplied closure. |
1574 | | /// The closure argument is the position in the slice. |
1575 | | /// |
1576 | | /// ## Panics |
1577 | | /// |
1578 | | /// Panics if reserving space for the slice fails. |
1579 | | /// |
1580 | | /// ## Example |
1581 | | /// |
1582 | | /// ``` |
1583 | | /// let bump = bumpalo::Bump::new(); |
1584 | | /// let x: Result<&mut [usize], ()> = bump.alloc_slice_try_fill_with(5, |i| Ok(5 * i)); |
1585 | | /// assert_eq!(x, Ok(bump.alloc_slice_copy(&[0, 5, 10, 15, 20]))); |
1586 | | /// ``` |
1587 | | /// |
1588 | | /// ``` |
1589 | | /// let bump = bumpalo::Bump::new(); |
1590 | | /// let x: Result<&mut [usize], ()> = bump.alloc_slice_try_fill_with( |
1591 | | /// 5, |
1592 | | /// |n| if n == 2 { Err(()) } else { Ok(n) } |
1593 | | /// ); |
1594 | | /// assert_eq!(x, Err(())); |
1595 | | /// ``` |
1596 | | #[inline(always)] |
1597 | 0 | pub fn alloc_slice_try_fill_with<T, F, E>(&self, len: usize, mut f: F) -> Result<&mut [T], E> |
1598 | 0 | where |
1599 | 0 | F: FnMut(usize) -> Result<T, E>, |
1600 | | { |
1601 | 0 | let layout = Layout::array::<T>(len).unwrap_or_else(|_| oom()); |
1602 | 0 | let base_ptr = self.alloc_layout(layout); |
1603 | 0 | let dst = base_ptr.cast::<T>(); |
1604 | | |
1605 | | unsafe { |
1606 | 0 | for i in 0..len { |
1607 | 0 | match f(i) { |
1608 | 0 | Ok(el) => ptr::write(dst.as_ptr().add(i), el), |
1609 | 0 | Err(e) => { |
1610 | 0 | self.dealloc(base_ptr, layout); |
1611 | 0 | return Err(e); |
1612 | | } |
1613 | | } |
1614 | | } |
1615 | | |
1616 | 0 | let result = slice::from_raw_parts_mut(dst.as_ptr(), len); |
1617 | 0 | debug_assert_eq!(Layout::for_value(result), layout); |
1618 | 0 | Ok(result) |
1619 | | } |
1620 | 0 | } |
1621 | | |
1622 | | /// Allocates a new slice of size `len` into this `Bump` and returns an |
1623 | | /// exclusive reference to the copy. |
1624 | | /// |
1625 | | /// The elements of the slice are initialized using the supplied closure. |
1626 | | /// The closure argument is the position in the slice. |
1627 | | /// |
1628 | | /// ## Example |
1629 | | /// |
1630 | | /// ``` |
1631 | | /// let bump = bumpalo::Bump::new(); |
1632 | | /// let x = bump.try_alloc_slice_fill_with(5, |i| 5 * (i + 1)); |
1633 | | /// assert_eq!(x, Ok(&mut[5usize, 10, 15, 20, 25] as &mut [_])); |
1634 | | /// |
1635 | | /// |
1636 | | /// let bump = bumpalo::Bump::new(); |
1637 | | /// bump.set_allocation_limit(Some(4)); |
1638 | | /// let x = bump.try_alloc_slice_fill_with(10, |i| 5 * (i + 1)); |
1639 | | /// assert_eq!(x, Err(bumpalo::AllocErr)); |
1640 | | /// ``` |
1641 | | #[inline(always)] |
1642 | 0 | pub fn try_alloc_slice_fill_with<T, F>( |
1643 | 0 | &self, |
1644 | 0 | len: usize, |
1645 | 0 | mut f: F, |
1646 | 0 | ) -> Result<&mut [T], AllocErr> |
1647 | 0 | where |
1648 | 0 | F: FnMut(usize) -> T, |
1649 | | { |
1650 | 0 | let layout = Layout::array::<T>(len).map_err(|_| AllocErr)?; |
1651 | 0 | let dst = self.try_alloc_layout(layout)?.cast::<T>(); |
1652 | | |
1653 | | unsafe { |
1654 | 0 | for i in 0..len { |
1655 | 0 | ptr::write(dst.as_ptr().add(i), f(i)); |
1656 | 0 | } |
1657 | | |
1658 | 0 | let result = slice::from_raw_parts_mut(dst.as_ptr(), len); |
1659 | 0 | debug_assert_eq!(Layout::for_value(result), layout); |
1660 | 0 | Ok(result) |
1661 | | } |
1662 | 0 | } |
1663 | | |
1664 | | /// Allocates a new slice of size `len` into this `Bump` and returns an |
1665 | | /// exclusive reference to the copy. |
1666 | | /// |
1667 | | /// All elements of the slice are initialized to `value`. |
1668 | | /// |
1669 | | /// ## Panics |
1670 | | /// |
1671 | | /// Panics if reserving space for the slice fails. |
1672 | | /// |
1673 | | /// ## Example |
1674 | | /// |
1675 | | /// ``` |
1676 | | /// let bump = bumpalo::Bump::new(); |
1677 | | /// let x = bump.alloc_slice_fill_copy(5, 42); |
1678 | | /// assert_eq!(x, &[42, 42, 42, 42, 42]); |
1679 | | /// ``` |
1680 | | #[inline(always)] |
1681 | 0 | pub fn alloc_slice_fill_copy<T: Copy>(&self, len: usize, value: T) -> &mut [T] { |
1682 | 0 | self.alloc_slice_fill_with(len, |_| value) |
1683 | 0 | } |
1684 | | |
1685 | | /// Same as `alloc_slice_fill_copy` but does not panic on failure. |
1686 | | #[inline(always)] |
1687 | 0 | pub fn try_alloc_slice_fill_copy<T: Copy>( |
1688 | 0 | &self, |
1689 | 0 | len: usize, |
1690 | 0 | value: T, |
1691 | 0 | ) -> Result<&mut [T], AllocErr> { |
1692 | 0 | self.try_alloc_slice_fill_with(len, |_| value) |
1693 | 0 | } |
1694 | | |
1695 | | /// Allocates a new slice of size `len` slice into this `Bump` and return an |
1696 | | /// exclusive reference to the copy. |
1697 | | /// |
1698 | | /// All elements of the slice are initialized to `value.clone()`. |
1699 | | /// |
1700 | | /// ## Panics |
1701 | | /// |
1702 | | /// Panics if reserving space for the slice fails. |
1703 | | /// |
1704 | | /// ## Example |
1705 | | /// |
1706 | | /// ``` |
1707 | | /// let bump = bumpalo::Bump::new(); |
1708 | | /// let s: String = "Hello Bump!".to_string(); |
1709 | | /// let x: &[String] = bump.alloc_slice_fill_clone(2, &s); |
1710 | | /// assert_eq!(x.len(), 2); |
1711 | | /// assert_eq!(&x[0], &s); |
1712 | | /// assert_eq!(&x[1], &s); |
1713 | | /// ``` |
1714 | | #[inline(always)] |
1715 | 0 | pub fn alloc_slice_fill_clone<T: Clone>(&self, len: usize, value: &T) -> &mut [T] { |
1716 | 0 | self.alloc_slice_fill_with(len, |_| value.clone()) |
1717 | 0 | } |
1718 | | |
1719 | | /// Like `alloc_slice_fill_clone` but does not panic on failure. |
1720 | | #[inline(always)] |
1721 | 0 | pub fn try_alloc_slice_fill_clone<T: Clone>( |
1722 | 0 | &self, |
1723 | 0 | len: usize, |
1724 | 0 | value: &T, |
1725 | 0 | ) -> Result<&mut [T], AllocErr> { |
1726 | 0 | self.try_alloc_slice_fill_with(len, |_| value.clone()) |
1727 | 0 | } |
1728 | | |
1729 | | /// Allocates a new slice of size `len` slice into this `Bump` and return an |
1730 | | /// exclusive reference to the copy. |
1731 | | /// |
1732 | | /// The elements are initialized using the supplied iterator. |
1733 | | /// |
1734 | | /// ## Panics |
1735 | | /// |
1736 | | /// Panics if reserving space for the slice fails, or if the supplied |
1737 | | /// iterator returns fewer elements than it promised. |
1738 | | /// |
1739 | | /// ## Example |
1740 | | /// |
1741 | | /// ``` |
1742 | | /// let bump = bumpalo::Bump::new(); |
1743 | | /// let x: &[i32] = bump.alloc_slice_fill_iter([2, 3, 5].iter().cloned().map(|i| i * i)); |
1744 | | /// assert_eq!(x, [4, 9, 25]); |
1745 | | /// ``` |
1746 | | #[inline(always)] |
1747 | 679k | pub fn alloc_slice_fill_iter<T, I>(&self, iter: I) -> &mut [T] |
1748 | 679k | where |
1749 | 679k | I: IntoIterator<Item = T>, |
1750 | 679k | I::IntoIter: ExactSizeIterator, |
1751 | | { |
1752 | 679k | let mut iter = iter.into_iter(); |
1753 | 2.75M | self.alloc_slice_fill_with(iter.len(), |_| { |
1754 | 2.75M | iter.next().expect("Iterator supplied too few elements") |
1755 | 2.75M | }) <bumpalo::Bump>::alloc_slice_fill_iter::<cranelift_codegen::ir::instructions::BlockArg, core::iter::adapters::map::Map<core::iter::adapters::map::Map<core::slice::iter::Iter<cranelift_codegen::ir::entities::Value>, <cranelift_codegen::ir::instructions::BlockCall>::args::{closure#0}>, <cranelift_codegen::remove_constant_phis::OutEdge>::new::{closure#0}>>::{closure#0}Line | Count | Source | 1753 | 201k | self.alloc_slice_fill_with(iter.len(), |_| { | 1754 | 201k | iter.next().expect("Iterator supplied too few elements") | 1755 | 201k | }) |
Unexecuted instantiation: <bumpalo::Bump<_>>::alloc_slice_fill_iter::<_, _>::{closure#0}<bumpalo::Bump>::alloc_slice_fill_iter::<cranelift_codegen::ir::instructions::BlockArg, core::iter::adapters::map::Map<core::iter::adapters::map::Map<core::slice::iter::Iter<cranelift_codegen::ir::entities::Value>, <cranelift_codegen::ir::instructions::BlockCall>::args::{closure#0}>, <cranelift_codegen::remove_constant_phis::OutEdge>::new::{closure#0}>>::{closure#0}Line | Count | Source | 1753 | 2.55M | self.alloc_slice_fill_with(iter.len(), |_| { | 1754 | 2.55M | iter.next().expect("Iterator supplied too few elements") | 1755 | 2.55M | }) |
|
1756 | 679k | } <bumpalo::Bump>::alloc_slice_fill_iter::<cranelift_codegen::ir::instructions::BlockArg, core::iter::adapters::map::Map<core::iter::adapters::map::Map<core::slice::iter::Iter<cranelift_codegen::ir::entities::Value>, <cranelift_codegen::ir::instructions::BlockCall>::args::{closure#0}>, <cranelift_codegen::remove_constant_phis::OutEdge>::new::{closure#0}>>Line | Count | Source | 1747 | 64.7k | pub fn alloc_slice_fill_iter<T, I>(&self, iter: I) -> &mut [T] | 1748 | 64.7k | where | 1749 | 64.7k | I: IntoIterator<Item = T>, | 1750 | 64.7k | I::IntoIter: ExactSizeIterator, | 1751 | | { | 1752 | 64.7k | let mut iter = iter.into_iter(); | 1753 | 64.7k | self.alloc_slice_fill_with(iter.len(), |_| { | 1754 | | iter.next().expect("Iterator supplied too few elements") | 1755 | | }) | 1756 | 64.7k | } |
Unexecuted instantiation: <bumpalo::Bump<_>>::alloc_slice_fill_iter::<_, _> <bumpalo::Bump>::alloc_slice_fill_iter::<cranelift_codegen::ir::instructions::BlockArg, core::iter::adapters::map::Map<core::iter::adapters::map::Map<core::slice::iter::Iter<cranelift_codegen::ir::entities::Value>, <cranelift_codegen::ir::instructions::BlockCall>::args::{closure#0}>, <cranelift_codegen::remove_constant_phis::OutEdge>::new::{closure#0}>>Line | Count | Source | 1747 | 614k | pub fn alloc_slice_fill_iter<T, I>(&self, iter: I) -> &mut [T] | 1748 | 614k | where | 1749 | 614k | I: IntoIterator<Item = T>, | 1750 | 614k | I::IntoIter: ExactSizeIterator, | 1751 | | { | 1752 | 614k | let mut iter = iter.into_iter(); | 1753 | 614k | self.alloc_slice_fill_with(iter.len(), |_| { | 1754 | | iter.next().expect("Iterator supplied too few elements") | 1755 | | }) | 1756 | 614k | } |
|
1757 | | |
1758 | | /// Allocates a new slice of size `len` slice into this `Bump` and return an |
1759 | | /// exclusive reference to the copy, failing if the iterator returns an Err. |
1760 | | /// |
1761 | | /// The elements are initialized using the supplied iterator. |
1762 | | /// |
1763 | | /// ## Panics |
1764 | | /// |
1765 | | /// Panics if reserving space for the slice fails, or if the supplied |
1766 | | /// iterator returns fewer elements than it promised. |
1767 | | /// |
1768 | | /// ## Examples |
1769 | | /// |
1770 | | /// ``` |
1771 | | /// let bump = bumpalo::Bump::new(); |
1772 | | /// let x: Result<&mut [i32], ()> = bump.alloc_slice_try_fill_iter( |
1773 | | /// [2, 3, 5].iter().cloned().map(|i| Ok(i * i)) |
1774 | | /// ); |
1775 | | /// assert_eq!(x, Ok(bump.alloc_slice_copy(&[4, 9, 25]))); |
1776 | | /// ``` |
1777 | | /// |
1778 | | /// ``` |
1779 | | /// let bump = bumpalo::Bump::new(); |
1780 | | /// let x: Result<&mut [i32], ()> = bump.alloc_slice_try_fill_iter( |
1781 | | /// [Ok(2), Err(()), Ok(5)].iter().cloned() |
1782 | | /// ); |
1783 | | /// assert_eq!(x, Err(())); |
1784 | | /// ``` |
1785 | | #[inline(always)] |
1786 | 0 | pub fn alloc_slice_try_fill_iter<T, I, E>(&self, iter: I) -> Result<&mut [T], E> |
1787 | 0 | where |
1788 | 0 | I: IntoIterator<Item = Result<T, E>>, |
1789 | 0 | I::IntoIter: ExactSizeIterator, |
1790 | | { |
1791 | 0 | let mut iter = iter.into_iter(); |
1792 | 0 | self.alloc_slice_try_fill_with(iter.len(), |_| { |
1793 | 0 | iter.next().expect("Iterator supplied too few elements") |
1794 | 0 | }) |
1795 | 0 | } |
1796 | | |
1797 | | /// Allocates a new slice of size `iter.len()` slice into this `Bump` and return an |
1798 | | /// exclusive reference to the copy. Does not panic on failure. |
1799 | | /// |
1800 | | /// The elements are initialized using the supplied iterator. |
1801 | | /// |
1802 | | /// ## Example |
1803 | | /// |
1804 | | /// ``` |
1805 | | /// let bump = bumpalo::Bump::new(); |
1806 | | /// let x: &[i32] = bump.try_alloc_slice_fill_iter([2, 3, 5] |
1807 | | /// .iter().cloned().map(|i| i * i)).unwrap(); |
1808 | | /// assert_eq!(x, [4, 9, 25]); |
1809 | | /// ``` |
1810 | | #[inline(always)] |
1811 | 0 | pub fn try_alloc_slice_fill_iter<T, I>(&self, iter: I) -> Result<&mut [T], AllocErr> |
1812 | 0 | where |
1813 | 0 | I: IntoIterator<Item = T>, |
1814 | 0 | I::IntoIter: ExactSizeIterator, |
1815 | | { |
1816 | 0 | let mut iter = iter.into_iter(); |
1817 | 0 | self.try_alloc_slice_fill_with(iter.len(), |_| { |
1818 | 0 | iter.next().expect("Iterator supplied too few elements") |
1819 | 0 | }) |
1820 | 0 | } |
1821 | | |
1822 | | /// Allocates a new slice of size `len` slice into this `Bump` and return an |
1823 | | /// exclusive reference to the copy. |
1824 | | /// |
1825 | | /// All elements of the slice are initialized to [`T::default()`]. |
1826 | | /// |
1827 | | /// [`T::default()`]: https://doc.rust-lang.org/std/default/trait.Default.html#tymethod.default |
1828 | | /// |
1829 | | /// ## Panics |
1830 | | /// |
1831 | | /// Panics if reserving space for the slice fails. |
1832 | | /// |
1833 | | /// ## Example |
1834 | | /// |
1835 | | /// ``` |
1836 | | /// let bump = bumpalo::Bump::new(); |
1837 | | /// let x = bump.alloc_slice_fill_default::<u32>(5); |
1838 | | /// assert_eq!(x, &[0, 0, 0, 0, 0]); |
1839 | | /// ``` |
1840 | | #[inline(always)] |
1841 | 0 | pub fn alloc_slice_fill_default<T: Default>(&self, len: usize) -> &mut [T] { |
1842 | 0 | self.alloc_slice_fill_with(len, |_| T::default()) |
1843 | 0 | } |
1844 | | |
1845 | | /// Like `alloc_slice_fill_default` but does not panic on failure. |
1846 | | #[inline(always)] |
1847 | 0 | pub fn try_alloc_slice_fill_default<T: Default>( |
1848 | 0 | &self, |
1849 | 0 | len: usize, |
1850 | 0 | ) -> Result<&mut [T], AllocErr> { |
1851 | 0 | self.try_alloc_slice_fill_with(len, |_| T::default()) |
1852 | 0 | } |
1853 | | |
1854 | | /// Allocate space for an object with the given `Layout`. |
1855 | | /// |
1856 | | /// The returned pointer points at uninitialized memory, and should be |
1857 | | /// initialized with |
1858 | | /// [`std::ptr::write`](https://doc.rust-lang.org/std/ptr/fn.write.html). |
1859 | | /// |
1860 | | /// # Panics |
1861 | | /// |
1862 | | /// Panics if reserving space matching `layout` fails. |
1863 | | #[inline(always)] |
1864 | 2.71M | pub fn alloc_layout(&self, layout: Layout) -> NonNull<u8> { |
1865 | 2.71M | self.try_alloc_layout(layout).unwrap_or_else(|_| oom()) Unexecuted instantiation: <bumpalo::Bump>::alloc_layout::{closure#0}Unexecuted instantiation: <bumpalo::Bump>::alloc_layout::{closure#0}Unexecuted instantiation: <bumpalo::Bump<_>>::alloc_layout::{closure#0}Unexecuted instantiation: <bumpalo::Bump>::alloc_layout::{closure#0} |
1866 | 2.71M | } <bumpalo::Bump>::alloc_layout Line | Count | Source | 1864 | 286k | pub fn alloc_layout(&self, layout: Layout) -> NonNull<u8> { | 1865 | 286k | self.try_alloc_layout(layout).unwrap_or_else(|_| oom()) | 1866 | 286k | } |
Unexecuted instantiation: <bumpalo::Bump>::alloc_layout Unexecuted instantiation: <bumpalo::Bump<_>>::alloc_layout <bumpalo::Bump>::alloc_layout Line | Count | Source | 1864 | 2.42M | pub fn alloc_layout(&self, layout: Layout) -> NonNull<u8> { | 1865 | 2.42M | self.try_alloc_layout(layout).unwrap_or_else(|_| oom()) | 1866 | 2.42M | } |
|
1867 | | |
1868 | | /// Attempts to allocate space for an object with the given `Layout` or else returns |
1869 | | /// an `Err`. |
1870 | | /// |
1871 | | /// The returned pointer points at uninitialized memory, and should be |
1872 | | /// initialized with |
1873 | | /// [`std::ptr::write`](https://doc.rust-lang.org/std/ptr/fn.write.html). |
1874 | | /// |
1875 | | /// # Errors |
1876 | | /// |
1877 | | /// Errors if reserving space matching `layout` fails. |
1878 | | #[inline(always)] |
1879 | 41.0M | pub fn try_alloc_layout(&self, layout: Layout) -> Result<NonNull<u8>, AllocErr> { |
1880 | 41.0M | if let Some(p) = self.try_alloc_layout_fast(layout) { |
1881 | 40.0M | Ok(p) |
1882 | | } else { |
1883 | 1.04M | self.alloc_layout_slow(layout).ok_or(AllocErr) |
1884 | | } |
1885 | 41.0M | } <bumpalo::Bump>::try_alloc_layout Line | Count | Source | 1879 | 286k | pub fn try_alloc_layout(&self, layout: Layout) -> Result<NonNull<u8>, AllocErr> { | 1880 | 286k | if let Some(p) = self.try_alloc_layout_fast(layout) { | 1881 | 286k | Ok(p) | 1882 | | } else { | 1883 | 22 | self.alloc_layout_slow(layout).ok_or(AllocErr) | 1884 | | } | 1885 | 286k | } |
<bumpalo::Bump>::try_alloc_layout Line | Count | Source | 1879 | 38.3M | pub fn try_alloc_layout(&self, layout: Layout) -> Result<NonNull<u8>, AllocErr> { | 1880 | 38.3M | if let Some(p) = self.try_alloc_layout_fast(layout) { | 1881 | 37.2M | Ok(p) | 1882 | | } else { | 1883 | 1.04M | self.alloc_layout_slow(layout).ok_or(AllocErr) | 1884 | | } | 1885 | 38.3M | } |
Unexecuted instantiation: <bumpalo::Bump>::try_alloc_layout Unexecuted instantiation: <bumpalo::Bump<_>>::try_alloc_layout <bumpalo::Bump>::try_alloc_layout Line | Count | Source | 1879 | 2.42M | pub fn try_alloc_layout(&self, layout: Layout) -> Result<NonNull<u8>, AllocErr> { | 1880 | 2.42M | if let Some(p) = self.try_alloc_layout_fast(layout) { | 1881 | 2.42M | Ok(p) | 1882 | | } else { | 1883 | 1.11k | self.alloc_layout_slow(layout).ok_or(AllocErr) | 1884 | | } | 1885 | 2.42M | } |
|
1886 | | |
1887 | | #[inline(always)] |
1888 | 42.2M | fn try_alloc_layout_fast(&self, layout: Layout) -> Option<NonNull<u8>> { |
1889 | | // We don't need to check for ZSTs here since they will automatically |
1890 | | // be handled properly: the pointer will be bumped by zero bytes, |
1891 | | // modulo alignment. This keeps the fast path optimized for non-ZSTs, |
1892 | | // which are much more common. |
1893 | | unsafe { |
1894 | 42.2M | let footer_ptr = self.current_chunk_footer.get(); |
1895 | 42.2M | let footer = footer_ptr.as_ref(); |
1896 | | |
1897 | 42.2M | let ptr = footer.ptr.get().as_ptr(); |
1898 | 42.2M | let start = footer.data.as_ptr(); |
1899 | 42.2M | debug_assert!( |
1900 | 0 | start <= ptr, |
1901 | | "start pointer {start:#p} should be less than or equal to bump pointer {ptr:#p}" |
1902 | | ); |
1903 | 42.2M | debug_assert!( |
1904 | 0 | ptr <= footer_ptr.cast::<u8>().as_ptr(), |
1905 | | "bump pointer {ptr:#p} should be less than or equal to footer pointer {footer_ptr:#p}" |
1906 | | ); |
1907 | 42.2M | debug_assert!( |
1908 | 0 | is_pointer_aligned_to(ptr, MIN_ALIGN), |
1909 | | "bump pointer {ptr:#p} should be aligned to the minimum alignment of {MIN_ALIGN:#x}" |
1910 | | ); |
1911 | | |
1912 | | // This `match` should be boiled away by LLVM: `MIN_ALIGN` is a |
1913 | | // constant and the layout's alignment is also constant in practice |
1914 | | // after inlining. |
1915 | 42.2M | let aligned_ptr = match layout.align().cmp(&MIN_ALIGN) { |
1916 | | Ordering::Less => { |
1917 | | // We need to round the size up to a multiple of `MIN_ALIGN` |
1918 | | // to preserve the minimum alignment. This might overflow |
1919 | | // since we cannot rely on `Layout`'s guarantees. |
1920 | 0 | let aligned_size = round_up_to(layout.size(), MIN_ALIGN)?; |
1921 | | |
1922 | 0 | let capacity = (ptr as usize) - (start as usize); |
1923 | 0 | if aligned_size > capacity { |
1924 | 0 | return None; |
1925 | 0 | } |
1926 | | |
1927 | 0 | ptr.wrapping_sub(aligned_size) |
1928 | | } |
1929 | | Ordering::Equal => { |
1930 | | // `Layout` guarantees that rounding the size up to its |
1931 | | // align cannot overflow (but does not guarantee that the |
1932 | | // size is initially a multiple of the alignment, which is |
1933 | | // why we need to do this rounding). |
1934 | 0 | let aligned_size = round_up_to_unchecked(layout.size(), layout.align()); |
1935 | | |
1936 | 0 | let capacity = (ptr as usize) - (start as usize); |
1937 | 0 | if aligned_size > capacity { |
1938 | 0 | return None; |
1939 | 0 | } |
1940 | | |
1941 | 0 | ptr.wrapping_sub(aligned_size) |
1942 | | } |
1943 | | Ordering::Greater => { |
1944 | | // `Layout` guarantees that rounding the size up to its |
1945 | | // align cannot overflow (but does not guarantee that the |
1946 | | // size is initially a multiple of the alignment, which is |
1947 | | // why we need to do this rounding). |
1948 | 42.2M | let aligned_size = round_up_to_unchecked(layout.size(), layout.align()); |
1949 | | |
1950 | 42.2M | let aligned_ptr = round_mut_ptr_down_to(ptr, layout.align()); |
1951 | 42.2M | let capacity = (aligned_ptr as usize).wrapping_sub(start as usize); |
1952 | 42.2M | if aligned_ptr < start || aligned_size > capacity { |
1953 | 1.04M | return None; |
1954 | 41.1M | } |
1955 | | |
1956 | 41.1M | aligned_ptr.wrapping_sub(aligned_size) |
1957 | | } |
1958 | | }; |
1959 | | |
1960 | 41.1M | debug_assert!( |
1961 | 0 | is_pointer_aligned_to(aligned_ptr, layout.align()), |
1962 | | "pointer {aligned_ptr:#p} should be aligned to layout alignment of {:#}", |
1963 | 0 | layout.align() |
1964 | | ); |
1965 | 41.1M | debug_assert!( |
1966 | 0 | is_pointer_aligned_to(aligned_ptr, MIN_ALIGN), |
1967 | | "pointer {aligned_ptr:#p} should be aligned to minimum alignment of {:#}", |
1968 | | MIN_ALIGN |
1969 | | ); |
1970 | 41.1M | debug_assert!( |
1971 | 0 | start <= aligned_ptr && aligned_ptr <= ptr, |
1972 | | "pointer {aligned_ptr:#p} should be in range {start:#p}..{ptr:#p}" |
1973 | | ); |
1974 | | |
1975 | 41.1M | debug_assert!(!aligned_ptr.is_null()); |
1976 | 41.1M | let aligned_ptr = NonNull::new_unchecked(aligned_ptr); |
1977 | | |
1978 | 41.1M | footer.ptr.set(aligned_ptr); |
1979 | 41.1M | Some(aligned_ptr) |
1980 | | } |
1981 | 42.2M | } <bumpalo::Bump>::try_alloc_layout_fast Line | Count | Source | 1888 | 286k | fn try_alloc_layout_fast(&self, layout: Layout) -> Option<NonNull<u8>> { | 1889 | | // We don't need to check for ZSTs here since they will automatically | 1890 | | // be handled properly: the pointer will be bumped by zero bytes, | 1891 | | // modulo alignment. This keeps the fast path optimized for non-ZSTs, | 1892 | | // which are much more common. | 1893 | | unsafe { | 1894 | 286k | let footer_ptr = self.current_chunk_footer.get(); | 1895 | 286k | let footer = footer_ptr.as_ref(); | 1896 | | | 1897 | 286k | let ptr = footer.ptr.get().as_ptr(); | 1898 | 286k | let start = footer.data.as_ptr(); | 1899 | 286k | debug_assert!( | 1900 | 0 | start <= ptr, | 1901 | | "start pointer {start:#p} should be less than or equal to bump pointer {ptr:#p}" | 1902 | | ); | 1903 | 286k | debug_assert!( | 1904 | 0 | ptr <= footer_ptr.cast::<u8>().as_ptr(), | 1905 | | "bump pointer {ptr:#p} should be less than or equal to footer pointer {footer_ptr:#p}" | 1906 | | ); | 1907 | 286k | debug_assert!( | 1908 | 0 | is_pointer_aligned_to(ptr, MIN_ALIGN), | 1909 | | "bump pointer {ptr:#p} should be aligned to the minimum alignment of {MIN_ALIGN:#x}" | 1910 | | ); | 1911 | | | 1912 | | // This `match` should be boiled away by LLVM: `MIN_ALIGN` is a | 1913 | | // constant and the layout's alignment is also constant in practice | 1914 | | // after inlining. | 1915 | 286k | let aligned_ptr = match layout.align().cmp(&MIN_ALIGN) { | 1916 | | Ordering::Less => { | 1917 | | // We need to round the size up to a multiple of `MIN_ALIGN` | 1918 | | // to preserve the minimum alignment. This might overflow | 1919 | | // since we cannot rely on `Layout`'s guarantees. | 1920 | 0 | let aligned_size = round_up_to(layout.size(), MIN_ALIGN)?; | 1921 | | | 1922 | 0 | let capacity = (ptr as usize) - (start as usize); | 1923 | 0 | if aligned_size > capacity { | 1924 | 0 | return None; | 1925 | 0 | } | 1926 | | | 1927 | 0 | ptr.wrapping_sub(aligned_size) | 1928 | | } | 1929 | | Ordering::Equal => { | 1930 | | // `Layout` guarantees that rounding the size up to its | 1931 | | // align cannot overflow (but does not guarantee that the | 1932 | | // size is initially a multiple of the alignment, which is | 1933 | | // why we need to do this rounding). | 1934 | 0 | let aligned_size = round_up_to_unchecked(layout.size(), layout.align()); | 1935 | | | 1936 | 0 | let capacity = (ptr as usize) - (start as usize); | 1937 | 0 | if aligned_size > capacity { | 1938 | 0 | return None; | 1939 | 0 | } | 1940 | | | 1941 | 0 | ptr.wrapping_sub(aligned_size) | 1942 | | } | 1943 | | Ordering::Greater => { | 1944 | | // `Layout` guarantees that rounding the size up to its | 1945 | | // align cannot overflow (but does not guarantee that the | 1946 | | // size is initially a multiple of the alignment, which is | 1947 | | // why we need to do this rounding). | 1948 | 286k | let aligned_size = round_up_to_unchecked(layout.size(), layout.align()); | 1949 | | | 1950 | 286k | let aligned_ptr = round_mut_ptr_down_to(ptr, layout.align()); | 1951 | 286k | let capacity = (aligned_ptr as usize).wrapping_sub(start as usize); | 1952 | 286k | if aligned_ptr < start || aligned_size > capacity { | 1953 | 22 | return None; | 1954 | 286k | } | 1955 | | | 1956 | 286k | aligned_ptr.wrapping_sub(aligned_size) | 1957 | | } | 1958 | | }; | 1959 | | | 1960 | 286k | debug_assert!( | 1961 | 0 | is_pointer_aligned_to(aligned_ptr, layout.align()), | 1962 | | "pointer {aligned_ptr:#p} should be aligned to layout alignment of {:#}", | 1963 | 0 | layout.align() | 1964 | | ); | 1965 | 286k | debug_assert!( | 1966 | 0 | is_pointer_aligned_to(aligned_ptr, MIN_ALIGN), | 1967 | | "pointer {aligned_ptr:#p} should be aligned to minimum alignment of {:#}", | 1968 | | MIN_ALIGN | 1969 | | ); | 1970 | 286k | debug_assert!( | 1971 | 0 | start <= aligned_ptr && aligned_ptr <= ptr, | 1972 | | "pointer {aligned_ptr:#p} should be in range {start:#p}..{ptr:#p}" | 1973 | | ); | 1974 | | | 1975 | 286k | debug_assert!(!aligned_ptr.is_null()); | 1976 | 286k | let aligned_ptr = NonNull::new_unchecked(aligned_ptr); | 1977 | | | 1978 | 286k | footer.ptr.set(aligned_ptr); | 1979 | 286k | Some(aligned_ptr) | 1980 | | } | 1981 | 286k | } |
<bumpalo::Bump>::try_alloc_layout_fast Line | Count | Source | 1888 | 39.5M | fn try_alloc_layout_fast(&self, layout: Layout) -> Option<NonNull<u8>> { | 1889 | | // We don't need to check for ZSTs here since they will automatically | 1890 | | // be handled properly: the pointer will be bumped by zero bytes, | 1891 | | // modulo alignment. This keeps the fast path optimized for non-ZSTs, | 1892 | | // which are much more common. | 1893 | | unsafe { | 1894 | 39.5M | let footer_ptr = self.current_chunk_footer.get(); | 1895 | 39.5M | let footer = footer_ptr.as_ref(); | 1896 | | | 1897 | 39.5M | let ptr = footer.ptr.get().as_ptr(); | 1898 | 39.5M | let start = footer.data.as_ptr(); | 1899 | 39.5M | debug_assert!( | 1900 | 0 | start <= ptr, | 1901 | | "start pointer {start:#p} should be less than or equal to bump pointer {ptr:#p}" | 1902 | | ); | 1903 | 39.5M | debug_assert!( | 1904 | 0 | ptr <= footer_ptr.cast::<u8>().as_ptr(), | 1905 | | "bump pointer {ptr:#p} should be less than or equal to footer pointer {footer_ptr:#p}" | 1906 | | ); | 1907 | 39.5M | debug_assert!( | 1908 | 0 | is_pointer_aligned_to(ptr, MIN_ALIGN), | 1909 | | "bump pointer {ptr:#p} should be aligned to the minimum alignment of {MIN_ALIGN:#x}" | 1910 | | ); | 1911 | | | 1912 | | // This `match` should be boiled away by LLVM: `MIN_ALIGN` is a | 1913 | | // constant and the layout's alignment is also constant in practice | 1914 | | // after inlining. | 1915 | 39.5M | let aligned_ptr = match layout.align().cmp(&MIN_ALIGN) { | 1916 | | Ordering::Less => { | 1917 | | // We need to round the size up to a multiple of `MIN_ALIGN` | 1918 | | // to preserve the minimum alignment. This might overflow | 1919 | | // since we cannot rely on `Layout`'s guarantees. | 1920 | 0 | let aligned_size = round_up_to(layout.size(), MIN_ALIGN)?; | 1921 | | | 1922 | 0 | let capacity = (ptr as usize) - (start as usize); | 1923 | 0 | if aligned_size > capacity { | 1924 | 0 | return None; | 1925 | 0 | } | 1926 | | | 1927 | 0 | ptr.wrapping_sub(aligned_size) | 1928 | | } | 1929 | | Ordering::Equal => { | 1930 | | // `Layout` guarantees that rounding the size up to its | 1931 | | // align cannot overflow (but does not guarantee that the | 1932 | | // size is initially a multiple of the alignment, which is | 1933 | | // why we need to do this rounding). | 1934 | 0 | let aligned_size = round_up_to_unchecked(layout.size(), layout.align()); | 1935 | | | 1936 | 0 | let capacity = (ptr as usize) - (start as usize); | 1937 | 0 | if aligned_size > capacity { | 1938 | 0 | return None; | 1939 | 0 | } | 1940 | | | 1941 | 0 | ptr.wrapping_sub(aligned_size) | 1942 | | } | 1943 | | Ordering::Greater => { | 1944 | | // `Layout` guarantees that rounding the size up to its | 1945 | | // align cannot overflow (but does not guarantee that the | 1946 | | // size is initially a multiple of the alignment, which is | 1947 | | // why we need to do this rounding). | 1948 | 39.5M | let aligned_size = round_up_to_unchecked(layout.size(), layout.align()); | 1949 | | | 1950 | 39.5M | let aligned_ptr = round_mut_ptr_down_to(ptr, layout.align()); | 1951 | 39.5M | let capacity = (aligned_ptr as usize).wrapping_sub(start as usize); | 1952 | 39.5M | if aligned_ptr < start || aligned_size > capacity { | 1953 | 1.04M | return None; | 1954 | 38.4M | } | 1955 | | | 1956 | 38.4M | aligned_ptr.wrapping_sub(aligned_size) | 1957 | | } | 1958 | | }; | 1959 | | | 1960 | 38.4M | debug_assert!( | 1961 | 0 | is_pointer_aligned_to(aligned_ptr, layout.align()), | 1962 | | "pointer {aligned_ptr:#p} should be aligned to layout alignment of {:#}", | 1963 | 0 | layout.align() | 1964 | | ); | 1965 | 38.4M | debug_assert!( | 1966 | 0 | is_pointer_aligned_to(aligned_ptr, MIN_ALIGN), | 1967 | | "pointer {aligned_ptr:#p} should be aligned to minimum alignment of {:#}", | 1968 | | MIN_ALIGN | 1969 | | ); | 1970 | 38.4M | debug_assert!( | 1971 | 0 | start <= aligned_ptr && aligned_ptr <= ptr, | 1972 | | "pointer {aligned_ptr:#p} should be in range {start:#p}..{ptr:#p}" | 1973 | | ); | 1974 | | | 1975 | 38.4M | debug_assert!(!aligned_ptr.is_null()); | 1976 | 38.4M | let aligned_ptr = NonNull::new_unchecked(aligned_ptr); | 1977 | | | 1978 | 38.4M | footer.ptr.set(aligned_ptr); | 1979 | 38.4M | Some(aligned_ptr) | 1980 | | } | 1981 | 39.5M | } |
Unexecuted instantiation: <bumpalo::Bump>::try_alloc_layout_fast Unexecuted instantiation: <bumpalo::Bump<_>>::try_alloc_layout_fast <bumpalo::Bump>::try_alloc_layout_fast Line | Count | Source | 1888 | 2.42M | fn try_alloc_layout_fast(&self, layout: Layout) -> Option<NonNull<u8>> { | 1889 | | // We don't need to check for ZSTs here since they will automatically | 1890 | | // be handled properly: the pointer will be bumped by zero bytes, | 1891 | | // modulo alignment. This keeps the fast path optimized for non-ZSTs, | 1892 | | // which are much more common. | 1893 | | unsafe { | 1894 | 2.42M | let footer_ptr = self.current_chunk_footer.get(); | 1895 | 2.42M | let footer = footer_ptr.as_ref(); | 1896 | | | 1897 | 2.42M | let ptr = footer.ptr.get().as_ptr(); | 1898 | 2.42M | let start = footer.data.as_ptr(); | 1899 | 2.42M | debug_assert!( | 1900 | 0 | start <= ptr, | 1901 | | "start pointer {start:#p} should be less than or equal to bump pointer {ptr:#p}" | 1902 | | ); | 1903 | 2.42M | debug_assert!( | 1904 | 0 | ptr <= footer_ptr.cast::<u8>().as_ptr(), | 1905 | | "bump pointer {ptr:#p} should be less than or equal to footer pointer {footer_ptr:#p}" | 1906 | | ); | 1907 | 2.42M | debug_assert!( | 1908 | 0 | is_pointer_aligned_to(ptr, MIN_ALIGN), | 1909 | | "bump pointer {ptr:#p} should be aligned to the minimum alignment of {MIN_ALIGN:#x}" | 1910 | | ); | 1911 | | | 1912 | | // This `match` should be boiled away by LLVM: `MIN_ALIGN` is a | 1913 | | // constant and the layout's alignment is also constant in practice | 1914 | | // after inlining. | 1915 | 2.42M | let aligned_ptr = match layout.align().cmp(&MIN_ALIGN) { | 1916 | | Ordering::Less => { | 1917 | | // We need to round the size up to a multiple of `MIN_ALIGN` | 1918 | | // to preserve the minimum alignment. This might overflow | 1919 | | // since we cannot rely on `Layout`'s guarantees. | 1920 | 0 | let aligned_size = round_up_to(layout.size(), MIN_ALIGN)?; | 1921 | | | 1922 | 0 | let capacity = (ptr as usize) - (start as usize); | 1923 | 0 | if aligned_size > capacity { | 1924 | 0 | return None; | 1925 | 0 | } | 1926 | | | 1927 | 0 | ptr.wrapping_sub(aligned_size) | 1928 | | } | 1929 | | Ordering::Equal => { | 1930 | | // `Layout` guarantees that rounding the size up to its | 1931 | | // align cannot overflow (but does not guarantee that the | 1932 | | // size is initially a multiple of the alignment, which is | 1933 | | // why we need to do this rounding). | 1934 | 0 | let aligned_size = round_up_to_unchecked(layout.size(), layout.align()); | 1935 | | | 1936 | 0 | let capacity = (ptr as usize) - (start as usize); | 1937 | 0 | if aligned_size > capacity { | 1938 | 0 | return None; | 1939 | 0 | } | 1940 | | | 1941 | 0 | ptr.wrapping_sub(aligned_size) | 1942 | | } | 1943 | | Ordering::Greater => { | 1944 | | // `Layout` guarantees that rounding the size up to its | 1945 | | // align cannot overflow (but does not guarantee that the | 1946 | | // size is initially a multiple of the alignment, which is | 1947 | | // why we need to do this rounding). | 1948 | 2.42M | let aligned_size = round_up_to_unchecked(layout.size(), layout.align()); | 1949 | | | 1950 | 2.42M | let aligned_ptr = round_mut_ptr_down_to(ptr, layout.align()); | 1951 | 2.42M | let capacity = (aligned_ptr as usize).wrapping_sub(start as usize); | 1952 | 2.42M | if aligned_ptr < start || aligned_size > capacity { | 1953 | 1.11k | return None; | 1954 | 2.42M | } | 1955 | | | 1956 | 2.42M | aligned_ptr.wrapping_sub(aligned_size) | 1957 | | } | 1958 | | }; | 1959 | | | 1960 | 2.42M | debug_assert!( | 1961 | 0 | is_pointer_aligned_to(aligned_ptr, layout.align()), | 1962 | | "pointer {aligned_ptr:#p} should be aligned to layout alignment of {:#}", | 1963 | 0 | layout.align() | 1964 | | ); | 1965 | 2.42M | debug_assert!( | 1966 | 0 | is_pointer_aligned_to(aligned_ptr, MIN_ALIGN), | 1967 | | "pointer {aligned_ptr:#p} should be aligned to minimum alignment of {:#}", | 1968 | | MIN_ALIGN | 1969 | | ); | 1970 | 2.42M | debug_assert!( | 1971 | 0 | start <= aligned_ptr && aligned_ptr <= ptr, | 1972 | | "pointer {aligned_ptr:#p} should be in range {start:#p}..{ptr:#p}" | 1973 | | ); | 1974 | | | 1975 | 2.42M | debug_assert!(!aligned_ptr.is_null()); | 1976 | 2.42M | let aligned_ptr = NonNull::new_unchecked(aligned_ptr); | 1977 | | | 1978 | 2.42M | footer.ptr.set(aligned_ptr); | 1979 | 2.42M | Some(aligned_ptr) | 1980 | | } | 1981 | 2.42M | } |
|
1982 | | |
1983 | | /// Gets the remaining capacity in the current chunk (in bytes). |
1984 | | /// |
1985 | | /// ## Example |
1986 | | /// |
1987 | | /// ``` |
1988 | | /// use bumpalo::Bump; |
1989 | | /// |
1990 | | /// let bump = Bump::with_capacity(100); |
1991 | | /// |
1992 | | /// let capacity = bump.chunk_capacity(); |
1993 | | /// assert!(capacity >= 100); |
1994 | | /// ``` |
1995 | 0 | pub fn chunk_capacity(&self) -> usize { |
1996 | 0 | let current_footer = self.current_chunk_footer.get(); |
1997 | 0 | let current_footer = unsafe { current_footer.as_ref() }; |
1998 | | |
1999 | 0 | current_footer.ptr.get().as_ptr() as usize - current_footer.data.as_ptr() as usize |
2000 | 0 | } |
2001 | | |
2002 | | /// Slow path allocation for when we need to allocate a new chunk from the |
2003 | | /// parent bump set because there isn't enough room in our current chunk. |
2004 | | #[inline(never)] |
2005 | | #[cold] |
2006 | 1.04M | fn alloc_layout_slow(&self, layout: Layout) -> Option<NonNull<u8>> { |
2007 | | unsafe { |
2008 | 1.04M | let allocation_limit_remaining = self.allocation_limit_remaining(); |
2009 | | |
2010 | | // Get a new chunk from the global allocator. |
2011 | 1.04M | let current_footer = self.current_chunk_footer.get(); |
2012 | 1.04M | let current_layout = current_footer.as_ref().layout; |
2013 | | |
2014 | | // By default, we want our new chunk to be about twice as big |
2015 | | // as the previous chunk. If the global allocator refuses it, |
2016 | | // we try to divide it by half until it works or the requested |
2017 | | // size is smaller than the default footer size. |
2018 | 1.04M | let min_new_chunk_size = layout.size().max(DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER); |
2019 | 1.04M | let mut base_size = (current_layout.size() - FOOTER_SIZE) |
2020 | 1.04M | .checked_mul(2)? |
2021 | 1.04M | .max(min_new_chunk_size); |
2022 | 1.04M | let chunk_memory_details = iter::from_fn(|| { |
2023 | 1.04M | let bypass_min_chunk_size_for_small_limits = matches!(self.allocation_limit(), Some(limit) if layout.size() < limit |
2024 | 0 | && base_size >= layout.size() |
2025 | 0 | && limit < DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER |
2026 | 0 | && self.allocated_bytes() == 0); |
2027 | | |
2028 | 1.04M | if base_size >= min_new_chunk_size || bypass_min_chunk_size_for_small_limits { |
2029 | 1.04M | let size = base_size; |
2030 | 1.04M | base_size /= 2; |
2031 | 1.04M | Self::new_chunk_memory_details(Some(size), layout) |
2032 | | } else { |
2033 | 0 | None |
2034 | | } |
2035 | 1.04M | }); <bumpalo::Bump>::alloc_layout_slow::{closure#0}Line | Count | Source | 2022 | 1.04M | let chunk_memory_details = iter::from_fn(|| { | 2023 | 1.04M | let bypass_min_chunk_size_for_small_limits = matches!(self.allocation_limit(), Some(limit) if layout.size() < limit | 2024 | 0 | && base_size >= layout.size() | 2025 | 0 | && limit < DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER | 2026 | 0 | && self.allocated_bytes() == 0); | 2027 | | | 2028 | 1.04M | if base_size >= min_new_chunk_size || bypass_min_chunk_size_for_small_limits { | 2029 | 1.04M | let size = base_size; | 2030 | 1.04M | base_size /= 2; | 2031 | 1.04M | Self::new_chunk_memory_details(Some(size), layout) | 2032 | | } else { | 2033 | 0 | None | 2034 | | } | 2035 | 1.04M | }); |
Unexecuted instantiation: <bumpalo::Bump>::alloc_layout_slow::{closure#0}Unexecuted instantiation: <bumpalo::Bump<_>>::alloc_layout_slow::{closure#0} |
2036 | | |
2037 | 1.04M | let new_footer = chunk_memory_details |
2038 | 1.04M | .filter_map(|chunk_memory_details| { |
2039 | 1.04M | if Self::chunk_fits_under_limit( |
2040 | 1.04M | allocation_limit_remaining, |
2041 | 1.04M | chunk_memory_details, |
2042 | | ) { |
2043 | 1.04M | Self::new_chunk(chunk_memory_details, layout, current_footer) |
2044 | | } else { |
2045 | 0 | None |
2046 | | } |
2047 | 1.04M | }) <bumpalo::Bump>::alloc_layout_slow::{closure#1}Line | Count | Source | 2038 | 1.04M | .filter_map(|chunk_memory_details| { | 2039 | 1.04M | if Self::chunk_fits_under_limit( | 2040 | 1.04M | allocation_limit_remaining, | 2041 | 1.04M | chunk_memory_details, | 2042 | | ) { | 2043 | 1.04M | Self::new_chunk(chunk_memory_details, layout, current_footer) | 2044 | | } else { | 2045 | 0 | None | 2046 | | } | 2047 | 1.04M | }) |
Unexecuted instantiation: <bumpalo::Bump>::alloc_layout_slow::{closure#1}Unexecuted instantiation: <bumpalo::Bump<_>>::alloc_layout_slow::{closure#1} |
2048 | 1.04M | .next()?; |
2049 | | |
2050 | 1.04M | debug_assert_eq!( |
2051 | 0 | new_footer.as_ref().data.as_ptr() as usize % layout.align(), |
2052 | | 0 |
2053 | | ); |
2054 | | |
2055 | | // Set the new chunk as our new current chunk. |
2056 | 1.04M | self.current_chunk_footer.set(new_footer); |
2057 | | |
2058 | | // And then we can rely on `tray_alloc_layout_fast` to allocate |
2059 | | // space within this chunk. |
2060 | 1.04M | let ptr = self.try_alloc_layout_fast(layout); |
2061 | 1.04M | debug_assert!(ptr.is_some()); |
2062 | 1.04M | ptr |
2063 | | } |
2064 | 1.04M | } <bumpalo::Bump>::alloc_layout_slow Line | Count | Source | 2006 | 1.04M | fn alloc_layout_slow(&self, layout: Layout) -> Option<NonNull<u8>> { | 2007 | | unsafe { | 2008 | 1.04M | let allocation_limit_remaining = self.allocation_limit_remaining(); | 2009 | | | 2010 | | // Get a new chunk from the global allocator. | 2011 | 1.04M | let current_footer = self.current_chunk_footer.get(); | 2012 | 1.04M | let current_layout = current_footer.as_ref().layout; | 2013 | | | 2014 | | // By default, we want our new chunk to be about twice as big | 2015 | | // as the previous chunk. If the global allocator refuses it, | 2016 | | // we try to divide it by half until it works or the requested | 2017 | | // size is smaller than the default footer size. | 2018 | 1.04M | let min_new_chunk_size = layout.size().max(DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER); | 2019 | 1.04M | let mut base_size = (current_layout.size() - FOOTER_SIZE) | 2020 | 1.04M | .checked_mul(2)? | 2021 | 1.04M | .max(min_new_chunk_size); | 2022 | 1.04M | let chunk_memory_details = iter::from_fn(|| { | 2023 | | let bypass_min_chunk_size_for_small_limits = matches!(self.allocation_limit(), Some(limit) if layout.size() < limit | 2024 | | && base_size >= layout.size() | 2025 | | && limit < DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER | 2026 | | && self.allocated_bytes() == 0); | 2027 | | | 2028 | | if base_size >= min_new_chunk_size || bypass_min_chunk_size_for_small_limits { | 2029 | | let size = base_size; | 2030 | | base_size /= 2; | 2031 | | Self::new_chunk_memory_details(Some(size), layout) | 2032 | | } else { | 2033 | | None | 2034 | | } | 2035 | | }); | 2036 | | | 2037 | 1.04M | let new_footer = chunk_memory_details | 2038 | 1.04M | .filter_map(|chunk_memory_details| { | 2039 | | if Self::chunk_fits_under_limit( | 2040 | | allocation_limit_remaining, | 2041 | | chunk_memory_details, | 2042 | | ) { | 2043 | | Self::new_chunk(chunk_memory_details, layout, current_footer) | 2044 | | } else { | 2045 | | None | 2046 | | } | 2047 | | }) | 2048 | 1.04M | .next()?; | 2049 | | | 2050 | 1.04M | debug_assert_eq!( | 2051 | 0 | new_footer.as_ref().data.as_ptr() as usize % layout.align(), | 2052 | | 0 | 2053 | | ); | 2054 | | | 2055 | | // Set the new chunk as our new current chunk. | 2056 | 1.04M | self.current_chunk_footer.set(new_footer); | 2057 | | | 2058 | | // And then we can rely on `tray_alloc_layout_fast` to allocate | 2059 | | // space within this chunk. | 2060 | 1.04M | let ptr = self.try_alloc_layout_fast(layout); | 2061 | 1.04M | debug_assert!(ptr.is_some()); | 2062 | 1.04M | ptr | 2063 | | } | 2064 | 1.04M | } |
Unexecuted instantiation: <bumpalo::Bump>::alloc_layout_slow Unexecuted instantiation: <bumpalo::Bump<_>>::alloc_layout_slow |
2065 | | |
2066 | | /// Returns an iterator over each chunk of allocated memory that |
2067 | | /// this arena has bump allocated into. |
2068 | | /// |
2069 | | /// The chunks are returned ordered by allocation time, with the most |
2070 | | /// recently allocated chunk being returned first, and the least recently |
2071 | | /// allocated chunk being returned last. |
2072 | | /// |
2073 | | /// The values inside each chunk are also ordered by allocation time, with |
2074 | | /// the most recent allocation being earlier in the slice, and the least |
2075 | | /// recent allocation being towards the end of the slice. |
2076 | | /// |
2077 | | /// ## Safety |
2078 | | /// |
2079 | | /// Because this method takes `&mut self`, we know that the bump arena |
2080 | | /// reference is unique and therefore there aren't any active references to |
2081 | | /// any of the objects we've allocated in it either. This potential aliasing |
2082 | | /// of exclusive references is one common footgun for unsafe code that we |
2083 | | /// don't need to worry about here. |
2084 | | /// |
2085 | | /// However, there could be regions of uninitialized memory used as padding |
2086 | | /// between allocations, which is why this iterator has items of type |
2087 | | /// `[MaybeUninit<u8>]`, instead of simply `[u8]`. |
2088 | | /// |
2089 | | /// The only way to guarantee that there is no padding between allocations |
2090 | | /// or within allocated objects is if all of these properties hold: |
2091 | | /// |
2092 | | /// 1. Every object allocated in this arena has the same alignment, |
2093 | | /// and that alignment is at most 16. |
2094 | | /// 2. Every object's size is a multiple of its alignment. |
2095 | | /// 3. None of the objects allocated in this arena contain any internal |
2096 | | /// padding. |
2097 | | /// |
2098 | | /// If you want to use this `iter_allocated_chunks` method, it is *your* |
2099 | | /// responsibility to ensure that these properties hold before calling |
2100 | | /// `MaybeUninit::assume_init` or otherwise reading the returned values. |
2101 | | /// |
2102 | | /// Finally, you must also ensure that any values allocated into the bump |
2103 | | /// arena have not had their `Drop` implementations called on them, |
2104 | | /// e.g. after dropping a [`bumpalo::boxed::Box<T>`][crate::boxed::Box]. |
2105 | | /// |
2106 | | /// ## Example |
2107 | | /// |
2108 | | /// ``` |
2109 | | /// let mut bump = bumpalo::Bump::new(); |
2110 | | /// |
2111 | | /// // Allocate a bunch of `i32`s in this bump arena, potentially causing |
2112 | | /// // additional memory chunks to be reserved. |
2113 | | /// for i in 0..10000 { |
2114 | | /// bump.alloc(i); |
2115 | | /// } |
2116 | | /// |
2117 | | /// // Iterate over each chunk we've bump allocated into. This is safe |
2118 | | /// // because we have only allocated `i32`s in this arena, which fulfills |
2119 | | /// // the above requirements. |
2120 | | /// for ch in bump.iter_allocated_chunks() { |
2121 | | /// println!("Used a chunk that is {} bytes long", ch.len()); |
2122 | | /// println!("The first byte is {:?}", unsafe { |
2123 | | /// ch[0].assume_init() |
2124 | | /// }); |
2125 | | /// } |
2126 | | /// |
2127 | | /// // Within a chunk, allocations are ordered from most recent to least |
2128 | | /// // recent. If we allocated 'a', then 'b', then 'c', when we iterate |
2129 | | /// // through the chunk's data, we get them in the order 'c', then 'b', |
2130 | | /// // then 'a'. |
2131 | | /// |
2132 | | /// bump.reset(); |
2133 | | /// bump.alloc(b'a'); |
2134 | | /// bump.alloc(b'b'); |
2135 | | /// bump.alloc(b'c'); |
2136 | | /// |
2137 | | /// assert_eq!(bump.iter_allocated_chunks().count(), 1); |
2138 | | /// let chunk = bump.iter_allocated_chunks().nth(0).unwrap(); |
2139 | | /// assert_eq!(chunk.len(), 3); |
2140 | | /// |
2141 | | /// // Safe because we've only allocated `u8`s in this arena, which |
2142 | | /// // fulfills the above requirements. |
2143 | | /// unsafe { |
2144 | | /// assert_eq!(chunk[0].assume_init(), b'c'); |
2145 | | /// assert_eq!(chunk[1].assume_init(), b'b'); |
2146 | | /// assert_eq!(chunk[2].assume_init(), b'a'); |
2147 | | /// } |
2148 | | /// ``` |
2149 | 0 | pub fn iter_allocated_chunks(&mut self) -> ChunkIter<'_, MIN_ALIGN> { |
2150 | | // Safety: Ensured by mutable borrow of `self`. |
2151 | 0 | let raw = unsafe { self.iter_allocated_chunks_raw() }; |
2152 | 0 | ChunkIter { |
2153 | 0 | raw, |
2154 | 0 | bump: PhantomData, |
2155 | 0 | } |
2156 | 0 | } |
2157 | | |
2158 | | /// Returns an iterator over raw pointers to chunks of allocated memory that |
2159 | | /// this arena has bump allocated into. |
2160 | | /// |
2161 | | /// This is an unsafe version of [`iter_allocated_chunks()`](Bump::iter_allocated_chunks), |
2162 | | /// with the caller responsible for safe usage of the returned pointers as |
2163 | | /// well as ensuring that the iterator is not invalidated by new |
2164 | | /// allocations. |
2165 | | /// |
2166 | | /// ## Safety |
2167 | | /// |
2168 | | /// Allocations from this arena must not be performed while the returned |
2169 | | /// iterator is alive. If reading the chunk data (or casting to a reference) |
2170 | | /// the caller must ensure that there exist no mutable references to |
2171 | | /// previously allocated data. |
2172 | | /// |
2173 | | /// In addition, all of the caveats when reading the chunk data from |
2174 | | /// [`iter_allocated_chunks()`](Bump::iter_allocated_chunks) still apply. |
2175 | 0 | pub unsafe fn iter_allocated_chunks_raw(&self) -> ChunkRawIter<'_, MIN_ALIGN> { |
2176 | 0 | ChunkRawIter { |
2177 | 0 | footer: self.current_chunk_footer.get(), |
2178 | 0 | bump: PhantomData, |
2179 | 0 | } |
2180 | 0 | } |
2181 | | |
2182 | | /// Calculates the number of bytes currently allocated across all chunks in |
2183 | | /// this bump arena. |
2184 | | /// |
2185 | | /// If you allocate types of different alignments or types with |
2186 | | /// larger-than-typical alignment in the same arena, some padding |
2187 | | /// bytes might get allocated in the bump arena. Note that those padding |
2188 | | /// bytes will add to this method's resulting sum, so you cannot rely |
2189 | | /// on it only counting the sum of the sizes of the things |
2190 | | /// you've allocated in the arena. |
2191 | | /// |
2192 | | /// The allocated bytes do not include the size of bumpalo's metadata, |
2193 | | /// so the amount of memory requested from the Rust allocator is higher |
2194 | | /// than the returned value. |
2195 | | /// |
2196 | | /// ## Example |
2197 | | /// |
2198 | | /// ``` |
2199 | | /// let bump = bumpalo::Bump::new(); |
2200 | | /// let _x = bump.alloc_slice_fill_default::<u32>(5); |
2201 | | /// let bytes = bump.allocated_bytes(); |
2202 | | /// assert!(bytes >= core::mem::size_of::<u32>() * 5); |
2203 | | /// ``` |
2204 | 0 | pub fn allocated_bytes(&self) -> usize { |
2205 | 0 | let footer = self.current_chunk_footer.get(); |
2206 | | |
2207 | 0 | unsafe { footer.as_ref().allocated_bytes } |
2208 | 0 | } Unexecuted instantiation: <bumpalo::Bump>::allocated_bytes Unexecuted instantiation: <bumpalo::Bump>::allocated_bytes Unexecuted instantiation: <bumpalo::Bump<_>>::allocated_bytes |
2209 | | |
2210 | | /// Calculates the number of bytes requested from the Rust allocator for this `Bump`. |
2211 | | /// |
2212 | | /// This number is equal to the [`allocated_bytes()`](Self::allocated_bytes) plus |
2213 | | /// the size of the bump metadata. |
2214 | 0 | pub fn allocated_bytes_including_metadata(&self) -> usize { |
2215 | 0 | let metadata_size = |
2216 | 0 | unsafe { self.iter_allocated_chunks_raw().count() * mem::size_of::<ChunkFooter>() }; |
2217 | 0 | self.allocated_bytes() + metadata_size |
2218 | 0 | } |
2219 | | |
2220 | | #[inline] |
2221 | 40.2M | unsafe fn is_last_allocation(&self, ptr: NonNull<u8>) -> bool { |
2222 | 40.2M | let footer = self.current_chunk_footer.get(); |
2223 | 40.2M | let footer = footer.as_ref(); |
2224 | 40.2M | footer.ptr.get() == ptr |
2225 | 40.2M | } <bumpalo::Bump>::is_last_allocation Line | Count | Source | 2221 | 40.2M | unsafe fn is_last_allocation(&self, ptr: NonNull<u8>) -> bool { | 2222 | 40.2M | let footer = self.current_chunk_footer.get(); | 2223 | 40.2M | let footer = footer.as_ref(); | 2224 | 40.2M | footer.ptr.get() == ptr | 2225 | 40.2M | } |
Unexecuted instantiation: <bumpalo::Bump<_>>::is_last_allocation |
2226 | | |
2227 | | #[inline] |
2228 | 36.5M | unsafe fn dealloc(&self, ptr: NonNull<u8>, layout: Layout) { |
2229 | | // If the pointer is the last allocation we made, we can reuse the bytes, |
2230 | | // otherwise they are simply leaked -- at least until somebody calls reset(). |
2231 | 36.5M | if self.is_last_allocation(ptr) { |
2232 | 458k | let ptr = self.current_chunk_footer.get().as_ref().ptr.get(); |
2233 | 458k | let ptr = ptr.as_ptr().add(layout.size()); |
2234 | | |
2235 | 458k | let ptr = round_mut_ptr_up_to_unchecked(ptr, MIN_ALIGN); |
2236 | 458k | debug_assert!( |
2237 | 0 | is_pointer_aligned_to(ptr, MIN_ALIGN), |
2238 | | "bump pointer {ptr:#p} should be aligned to the minimum alignment of {MIN_ALIGN:#x}" |
2239 | | ); |
2240 | 458k | let ptr = NonNull::new_unchecked(ptr); |
2241 | 458k | self.current_chunk_footer.get().as_ref().ptr.set(ptr); |
2242 | 36.0M | } |
2243 | 36.5M | } Line | Count | Source | 2228 | 36.5M | unsafe fn dealloc(&self, ptr: NonNull<u8>, layout: Layout) { | 2229 | | // If the pointer is the last allocation we made, we can reuse the bytes, | 2230 | | // otherwise they are simply leaked -- at least until somebody calls reset(). | 2231 | 36.5M | if self.is_last_allocation(ptr) { | 2232 | 458k | let ptr = self.current_chunk_footer.get().as_ref().ptr.get(); | 2233 | 458k | let ptr = ptr.as_ptr().add(layout.size()); | 2234 | | | 2235 | 458k | let ptr = round_mut_ptr_up_to_unchecked(ptr, MIN_ALIGN); | 2236 | 458k | debug_assert!( | 2237 | 0 | is_pointer_aligned_to(ptr, MIN_ALIGN), | 2238 | | "bump pointer {ptr:#p} should be aligned to the minimum alignment of {MIN_ALIGN:#x}" | 2239 | | ); | 2240 | 458k | let ptr = NonNull::new_unchecked(ptr); | 2241 | 458k | self.current_chunk_footer.get().as_ref().ptr.set(ptr); | 2242 | 36.0M | } | 2243 | 36.5M | } |
Unexecuted instantiation: <bumpalo::Bump<_>>::dealloc |
2244 | | |
2245 | | #[inline] |
2246 | 1.80M | unsafe fn shrink( |
2247 | 1.80M | &self, |
2248 | 1.80M | ptr: NonNull<u8>, |
2249 | 1.80M | old_layout: Layout, |
2250 | 1.80M | new_layout: Layout, |
2251 | 1.80M | ) -> Result<NonNull<u8>, AllocErr> { |
2252 | | // If the new layout demands greater alignment than the old layout has, |
2253 | | // then either |
2254 | | // |
2255 | | // 1. the pointer happens to satisfy the new layout's alignment, so we |
2256 | | // got lucky and can return the pointer as-is, or |
2257 | | // |
2258 | | // 2. the pointer is not aligned to the new layout's demanded alignment, |
2259 | | // and we are unlucky. |
2260 | | // |
2261 | | // In the case of (2), to successfully "shrink" the allocation, we have |
2262 | | // to allocate a whole new region for the new layout. |
2263 | 1.80M | if old_layout.align() < new_layout.align() { |
2264 | 0 | return if is_pointer_aligned_to(ptr.as_ptr(), new_layout.align()) { |
2265 | 0 | Ok(ptr) |
2266 | | } else { |
2267 | 0 | let new_ptr = self.try_alloc_layout(new_layout)?; |
2268 | | |
2269 | | // We know that these regions are nonoverlapping because |
2270 | | // `new_ptr` is a fresh allocation. |
2271 | 0 | ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), new_layout.size()); |
2272 | | |
2273 | 0 | Ok(new_ptr) |
2274 | | }; |
2275 | 1.80M | } |
2276 | | |
2277 | 1.80M | debug_assert!(is_pointer_aligned_to(ptr.as_ptr(), new_layout.align())); |
2278 | | |
2279 | 1.80M | let old_size = old_layout.size(); |
2280 | 1.80M | let new_size = new_layout.size(); |
2281 | | |
2282 | | // This is how much space we would *actually* reclaim while satisfying |
2283 | | // the requested alignment. |
2284 | 1.80M | let delta = round_down_to(old_size - new_size, new_layout.align().max(MIN_ALIGN)); |
2285 | | |
2286 | 1.80M | if self.is_last_allocation(ptr) |
2287 | | // Only reclaim the excess space (which requires a copy) if it |
2288 | | // is worth it: we are actually going to recover "enough" space |
2289 | | // and we can do a non-overlapping copy. |
2290 | | // |
2291 | | // We do `(old_size + 1) / 2` so division rounds up rather than |
2292 | | // down. Consider when: |
2293 | | // |
2294 | | // old_size = 5 |
2295 | | // new_size = 3 |
2296 | | // |
2297 | | // If we do not take care to round up, this will result in: |
2298 | | // |
2299 | | // delta = 2 |
2300 | | // (old_size / 2) = (5 / 2) = 2 |
2301 | | // |
2302 | | // And the the check will succeed even though we are have |
2303 | | // overlapping ranges: |
2304 | | // |
2305 | | // |--------old-allocation-------| |
2306 | | // |------from-------| |
2307 | | // |-------to--------| |
2308 | | // +-----+-----+-----+-----+-----+ |
2309 | | // | a | b | c | . | . | |
2310 | | // +-----+-----+-----+-----+-----+ |
2311 | | // |
2312 | | // But we MUST NOT have overlapping ranges because we use |
2313 | | // `copy_nonoverlapping` below! Therefore, we round the division |
2314 | | // up to avoid this issue. |
2315 | 0 | && delta >= (old_size + 1) / 2 |
2316 | | { |
2317 | 0 | let footer = self.current_chunk_footer.get(); |
2318 | 0 | let footer = footer.as_ref(); |
2319 | | |
2320 | | // NB: new_ptr is aligned, because ptr *has to* be aligned, and we |
2321 | | // made sure delta is aligned. |
2322 | 0 | let new_ptr = NonNull::new_unchecked(footer.ptr.get().as_ptr().add(delta)); |
2323 | 0 | debug_assert!( |
2324 | 0 | is_pointer_aligned_to(new_ptr.as_ptr(), MIN_ALIGN), |
2325 | | "bump pointer {new_ptr:#p} should be aligned to the minimum alignment of {MIN_ALIGN:#x}" |
2326 | | ); |
2327 | 0 | footer.ptr.set(new_ptr); |
2328 | | |
2329 | | // NB: we know it is non-overlapping because of the size check |
2330 | | // in the `if` condition. |
2331 | 0 | ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), new_size); |
2332 | | |
2333 | 0 | return Ok(new_ptr); |
2334 | 1.80M | } |
2335 | | |
2336 | | // If this wasn't the last allocation, or shrinking wasn't worth it, |
2337 | | // simply return the old pointer as-is. |
2338 | 1.80M | Ok(ptr) |
2339 | 1.80M | } Line | Count | Source | 2246 | 1.80M | unsafe fn shrink( | 2247 | 1.80M | &self, | 2248 | 1.80M | ptr: NonNull<u8>, | 2249 | 1.80M | old_layout: Layout, | 2250 | 1.80M | new_layout: Layout, | 2251 | 1.80M | ) -> Result<NonNull<u8>, AllocErr> { | 2252 | | // If the new layout demands greater alignment than the old layout has, | 2253 | | // then either | 2254 | | // | 2255 | | // 1. the pointer happens to satisfy the new layout's alignment, so we | 2256 | | // got lucky and can return the pointer as-is, or | 2257 | | // | 2258 | | // 2. the pointer is not aligned to the new layout's demanded alignment, | 2259 | | // and we are unlucky. | 2260 | | // | 2261 | | // In the case of (2), to successfully "shrink" the allocation, we have | 2262 | | // to allocate a whole new region for the new layout. | 2263 | 1.80M | if old_layout.align() < new_layout.align() { | 2264 | 0 | return if is_pointer_aligned_to(ptr.as_ptr(), new_layout.align()) { | 2265 | 0 | Ok(ptr) | 2266 | | } else { | 2267 | 0 | let new_ptr = self.try_alloc_layout(new_layout)?; | 2268 | | | 2269 | | // We know that these regions are nonoverlapping because | 2270 | | // `new_ptr` is a fresh allocation. | 2271 | 0 | ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), new_layout.size()); | 2272 | | | 2273 | 0 | Ok(new_ptr) | 2274 | | }; | 2275 | 1.80M | } | 2276 | | | 2277 | 1.80M | debug_assert!(is_pointer_aligned_to(ptr.as_ptr(), new_layout.align())); | 2278 | | | 2279 | 1.80M | let old_size = old_layout.size(); | 2280 | 1.80M | let new_size = new_layout.size(); | 2281 | | | 2282 | | // This is how much space we would *actually* reclaim while satisfying | 2283 | | // the requested alignment. | 2284 | 1.80M | let delta = round_down_to(old_size - new_size, new_layout.align().max(MIN_ALIGN)); | 2285 | | | 2286 | 1.80M | if self.is_last_allocation(ptr) | 2287 | | // Only reclaim the excess space (which requires a copy) if it | 2288 | | // is worth it: we are actually going to recover "enough" space | 2289 | | // and we can do a non-overlapping copy. | 2290 | | // | 2291 | | // We do `(old_size + 1) / 2` so division rounds up rather than | 2292 | | // down. Consider when: | 2293 | | // | 2294 | | // old_size = 5 | 2295 | | // new_size = 3 | 2296 | | // | 2297 | | // If we do not take care to round up, this will result in: | 2298 | | // | 2299 | | // delta = 2 | 2300 | | // (old_size / 2) = (5 / 2) = 2 | 2301 | | // | 2302 | | // And the the check will succeed even though we are have | 2303 | | // overlapping ranges: | 2304 | | // | 2305 | | // |--------old-allocation-------| | 2306 | | // |------from-------| | 2307 | | // |-------to--------| | 2308 | | // +-----+-----+-----+-----+-----+ | 2309 | | // | a | b | c | . | . | | 2310 | | // +-----+-----+-----+-----+-----+ | 2311 | | // | 2312 | | // But we MUST NOT have overlapping ranges because we use | 2313 | | // `copy_nonoverlapping` below! Therefore, we round the division | 2314 | | // up to avoid this issue. | 2315 | 0 | && delta >= (old_size + 1) / 2 | 2316 | | { | 2317 | 0 | let footer = self.current_chunk_footer.get(); | 2318 | 0 | let footer = footer.as_ref(); | 2319 | | | 2320 | | // NB: new_ptr is aligned, because ptr *has to* be aligned, and we | 2321 | | // made sure delta is aligned. | 2322 | 0 | let new_ptr = NonNull::new_unchecked(footer.ptr.get().as_ptr().add(delta)); | 2323 | 0 | debug_assert!( | 2324 | 0 | is_pointer_aligned_to(new_ptr.as_ptr(), MIN_ALIGN), | 2325 | | "bump pointer {new_ptr:#p} should be aligned to the minimum alignment of {MIN_ALIGN:#x}" | 2326 | | ); | 2327 | 0 | footer.ptr.set(new_ptr); | 2328 | | | 2329 | | // NB: we know it is non-overlapping because of the size check | 2330 | | // in the `if` condition. | 2331 | 0 | ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), new_size); | 2332 | | | 2333 | 0 | return Ok(new_ptr); | 2334 | 1.80M | } | 2335 | | | 2336 | | // If this wasn't the last allocation, or shrinking wasn't worth it, | 2337 | | // simply return the old pointer as-is. | 2338 | 1.80M | Ok(ptr) | 2339 | 1.80M | } |
Unexecuted instantiation: <bumpalo::Bump<_>>::shrink |
2340 | | |
2341 | | #[inline] |
2342 | 1.92M | unsafe fn grow( |
2343 | 1.92M | &self, |
2344 | 1.92M | ptr: NonNull<u8>, |
2345 | 1.92M | old_layout: Layout, |
2346 | 1.92M | new_layout: Layout, |
2347 | 1.92M | ) -> Result<NonNull<u8>, AllocErr> { |
2348 | 1.92M | let old_size = old_layout.size(); |
2349 | | |
2350 | 1.92M | let new_size = new_layout.size(); |
2351 | 1.92M | let new_size = round_up_to(new_size, MIN_ALIGN).ok_or(AllocErr)?; |
2352 | | |
2353 | 1.92M | let align_is_compatible = old_layout.align() >= new_layout.align(); |
2354 | | |
2355 | 1.92M | if align_is_compatible && self.is_last_allocation(ptr) { |
2356 | | // Try to allocate the delta size within this same block so we can |
2357 | | // reuse the currently allocated space. |
2358 | 138k | let delta = new_size - old_size; |
2359 | 132k | if let Some(p) = |
2360 | 138k | self.try_alloc_layout_fast(layout_from_size_align(delta, old_layout.align())?) |
2361 | | { |
2362 | 132k | ptr::copy(ptr.as_ptr(), p.as_ptr(), old_size); |
2363 | 132k | return Ok(p); |
2364 | 6.33k | } |
2365 | 1.78M | } |
2366 | | |
2367 | | // Fallback: do a fresh allocation and copy the existing data into it. |
2368 | 1.79M | let new_ptr = self.try_alloc_layout(new_layout)?; |
2369 | 1.79M | ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), old_size); |
2370 | 1.79M | Ok(new_ptr) |
2371 | 1.92M | } Line | Count | Source | 2342 | 1.92M | unsafe fn grow( | 2343 | 1.92M | &self, | 2344 | 1.92M | ptr: NonNull<u8>, | 2345 | 1.92M | old_layout: Layout, | 2346 | 1.92M | new_layout: Layout, | 2347 | 1.92M | ) -> Result<NonNull<u8>, AllocErr> { | 2348 | 1.92M | let old_size = old_layout.size(); | 2349 | | | 2350 | 1.92M | let new_size = new_layout.size(); | 2351 | 1.92M | let new_size = round_up_to(new_size, MIN_ALIGN).ok_or(AllocErr)?; | 2352 | | | 2353 | 1.92M | let align_is_compatible = old_layout.align() >= new_layout.align(); | 2354 | | | 2355 | 1.92M | if align_is_compatible && self.is_last_allocation(ptr) { | 2356 | | // Try to allocate the delta size within this same block so we can | 2357 | | // reuse the currently allocated space. | 2358 | 138k | let delta = new_size - old_size; | 2359 | 132k | if let Some(p) = | 2360 | 138k | self.try_alloc_layout_fast(layout_from_size_align(delta, old_layout.align())?) | 2361 | | { | 2362 | 132k | ptr::copy(ptr.as_ptr(), p.as_ptr(), old_size); | 2363 | 132k | return Ok(p); | 2364 | 6.33k | } | 2365 | 1.78M | } | 2366 | | | 2367 | | // Fallback: do a fresh allocation and copy the existing data into it. | 2368 | 1.79M | let new_ptr = self.try_alloc_layout(new_layout)?; | 2369 | 1.79M | ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), old_size); | 2370 | 1.79M | Ok(new_ptr) | 2371 | 1.92M | } |
Unexecuted instantiation: <bumpalo::Bump<_>>::grow |
2372 | | } |
2373 | | |
2374 | | /// An iterator over each chunk of allocated memory that |
2375 | | /// an arena has bump allocated into. |
2376 | | /// |
2377 | | /// The chunks are returned ordered by allocation time, with the most recently |
2378 | | /// allocated chunk being returned first. |
2379 | | /// |
2380 | | /// The values inside each chunk are also ordered by allocation time, with the most |
2381 | | /// recent allocation being earlier in the slice. |
2382 | | /// |
2383 | | /// This struct is created by the [`iter_allocated_chunks`] method on |
2384 | | /// [`Bump`]. See that function for a safety description regarding reading from the returned items. |
2385 | | /// |
2386 | | /// [`Bump`]: struct.Bump.html |
2387 | | /// [`iter_allocated_chunks`]: struct.Bump.html#method.iter_allocated_chunks |
2388 | | #[derive(Debug)] |
2389 | | pub struct ChunkIter<'a, const MIN_ALIGN: usize = 1> { |
2390 | | raw: ChunkRawIter<'a, MIN_ALIGN>, |
2391 | | bump: PhantomData<&'a mut Bump>, |
2392 | | } |
2393 | | |
2394 | | impl<'a, const MIN_ALIGN: usize> Iterator for ChunkIter<'a, MIN_ALIGN> { |
2395 | | type Item = &'a [mem::MaybeUninit<u8>]; |
2396 | | |
2397 | 0 | fn next(&mut self) -> Option<Self::Item> { |
2398 | | unsafe { |
2399 | 0 | let (ptr, len) = self.raw.next()?; |
2400 | 0 | let slice = slice::from_raw_parts(ptr as *const mem::MaybeUninit<u8>, len); |
2401 | 0 | Some(slice) |
2402 | | } |
2403 | 0 | } |
2404 | | } |
2405 | | |
2406 | | impl<'a, const MIN_ALIGN: usize> iter::FusedIterator for ChunkIter<'a, MIN_ALIGN> {} |
2407 | | |
2408 | | /// An iterator over raw pointers to chunks of allocated memory that this |
2409 | | /// arena has bump allocated into. |
2410 | | /// |
2411 | | /// See [`ChunkIter`] for details regarding the returned chunks. |
2412 | | /// |
2413 | | /// This struct is created by the [`iter_allocated_chunks_raw`] method on |
2414 | | /// [`Bump`]. See that function for a safety description regarding reading from |
2415 | | /// the returned items. |
2416 | | /// |
2417 | | /// [`Bump`]: struct.Bump.html |
2418 | | /// [`iter_allocated_chunks_raw`]: struct.Bump.html#method.iter_allocated_chunks_raw |
2419 | | #[derive(Debug)] |
2420 | | pub struct ChunkRawIter<'a, const MIN_ALIGN: usize = 1> { |
2421 | | footer: NonNull<ChunkFooter>, |
2422 | | bump: PhantomData<&'a Bump<MIN_ALIGN>>, |
2423 | | } |
2424 | | |
2425 | | impl<const MIN_ALIGN: usize> Iterator for ChunkRawIter<'_, MIN_ALIGN> { |
2426 | | type Item = (*mut u8, usize); |
2427 | 0 | fn next(&mut self) -> Option<(*mut u8, usize)> { |
2428 | | unsafe { |
2429 | 0 | let foot = self.footer.as_ref(); |
2430 | 0 | if foot.is_empty() { |
2431 | 0 | return None; |
2432 | 0 | } |
2433 | 0 | let (ptr, len) = foot.as_raw_parts(); |
2434 | 0 | self.footer = foot.prev.get(); |
2435 | 0 | Some((ptr as *mut u8, len)) |
2436 | | } |
2437 | 0 | } |
2438 | | } |
2439 | | |
2440 | | impl<const MIN_ALIGN: usize> iter::FusedIterator for ChunkRawIter<'_, MIN_ALIGN> {} |
2441 | | |
2442 | | #[inline(never)] |
2443 | | #[cold] |
2444 | 0 | fn oom() -> ! { |
2445 | 0 | panic!("out of memory") |
2446 | | } |
2447 | | |
2448 | | unsafe impl<'a, const MIN_ALIGN: usize> alloc::Alloc for &'a Bump<MIN_ALIGN> { |
2449 | | #[inline(always)] |
2450 | 0 | unsafe fn alloc(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocErr> { |
2451 | 0 | self.try_alloc_layout(layout) |
2452 | 0 | } |
2453 | | |
2454 | | #[inline] |
2455 | 0 | unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) { |
2456 | 0 | Bump::<MIN_ALIGN>::dealloc(self, ptr, layout); |
2457 | 0 | } |
2458 | | |
2459 | | #[inline] |
2460 | 0 | unsafe fn realloc( |
2461 | 0 | &mut self, |
2462 | 0 | ptr: NonNull<u8>, |
2463 | 0 | layout: Layout, |
2464 | 0 | new_size: usize, |
2465 | 0 | ) -> Result<NonNull<u8>, AllocErr> { |
2466 | 0 | let old_size = layout.size(); |
2467 | | |
2468 | 0 | if old_size == 0 { |
2469 | 0 | return self.try_alloc_layout(layout); |
2470 | 0 | } |
2471 | | |
2472 | 0 | let new_layout = layout_from_size_align(new_size, layout.align())?; |
2473 | 0 | if new_size <= old_size { |
2474 | 0 | Bump::shrink(self, ptr, layout, new_layout) |
2475 | | } else { |
2476 | 0 | Bump::grow(self, ptr, layout, new_layout) |
2477 | | } |
2478 | 0 | } |
2479 | | } |
2480 | | |
2481 | | #[cfg(any(feature = "allocator_api", feature = "allocator-api2"))] |
2482 | | unsafe impl<'a, const MIN_ALIGN: usize> Allocator for &'a Bump<MIN_ALIGN> { |
2483 | | #[inline] |
2484 | 36.5M | fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> { |
2485 | 36.5M | self.try_alloc_layout(layout) |
2486 | 36.5M | .map(|p| unsafe { |
2487 | 36.5M | NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), layout.size())) |
2488 | 36.5M | }) <&bumpalo::Bump as allocator_api2::stable::alloc::Allocator>::allocate::{closure#0}Line | Count | Source | 2487 | 36.5M | NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), layout.size())) | 2488 | 36.5M | }) |
Unexecuted instantiation: <&bumpalo::Bump<_> as allocator_api2::stable::alloc::Allocator>::allocate::{closure#0} |
2489 | 36.5M | .map_err(|_| AllocError) |
2490 | 36.5M | } <&bumpalo::Bump as allocator_api2::stable::alloc::Allocator>::allocate Line | Count | Source | 2484 | 36.5M | fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> { | 2485 | 36.5M | self.try_alloc_layout(layout) | 2486 | 36.5M | .map(|p| unsafe { | 2487 | | NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), layout.size())) | 2488 | | }) | 2489 | 36.5M | .map_err(|_| AllocError) | 2490 | 36.5M | } |
Unexecuted instantiation: <&bumpalo::Bump<_> as allocator_api2::stable::alloc::Allocator>::allocate |
2491 | | |
2492 | | #[inline] |
2493 | 36.5M | unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) { |
2494 | 36.5M | Bump::<MIN_ALIGN>::dealloc(self, ptr, layout) |
2495 | 36.5M | } <&bumpalo::Bump as allocator_api2::stable::alloc::Allocator>::deallocate Line | Count | Source | 2493 | 36.5M | unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) { | 2494 | 36.5M | Bump::<MIN_ALIGN>::dealloc(self, ptr, layout) | 2495 | 36.5M | } |
Unexecuted instantiation: <&bumpalo::Bump<_> as allocator_api2::stable::alloc::Allocator>::deallocate |
2496 | | |
2497 | | #[inline] |
2498 | 1.80M | unsafe fn shrink( |
2499 | 1.80M | &self, |
2500 | 1.80M | ptr: NonNull<u8>, |
2501 | 1.80M | old_layout: Layout, |
2502 | 1.80M | new_layout: Layout, |
2503 | 1.80M | ) -> Result<NonNull<[u8]>, AllocError> { |
2504 | 1.80M | Bump::<MIN_ALIGN>::shrink(self, ptr, old_layout, new_layout) |
2505 | 1.80M | .map(|p| unsafe { |
2506 | 1.80M | NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), new_layout.size())) |
2507 | 1.80M | }) <&bumpalo::Bump as allocator_api2::stable::alloc::Allocator>::shrink::{closure#0}Line | Count | Source | 2506 | 1.80M | NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), new_layout.size())) | 2507 | 1.80M | }) |
Unexecuted instantiation: <&bumpalo::Bump<_> as allocator_api2::stable::alloc::Allocator>::shrink::{closure#0} |
2508 | 1.80M | .map_err(|_| AllocError) |
2509 | 1.80M | } <&bumpalo::Bump as allocator_api2::stable::alloc::Allocator>::shrink Line | Count | Source | 2498 | 1.80M | unsafe fn shrink( | 2499 | 1.80M | &self, | 2500 | 1.80M | ptr: NonNull<u8>, | 2501 | 1.80M | old_layout: Layout, | 2502 | 1.80M | new_layout: Layout, | 2503 | 1.80M | ) -> Result<NonNull<[u8]>, AllocError> { | 2504 | 1.80M | Bump::<MIN_ALIGN>::shrink(self, ptr, old_layout, new_layout) | 2505 | 1.80M | .map(|p| unsafe { | 2506 | | NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), new_layout.size())) | 2507 | | }) | 2508 | 1.80M | .map_err(|_| AllocError) | 2509 | 1.80M | } |
Unexecuted instantiation: <&bumpalo::Bump<_> as allocator_api2::stable::alloc::Allocator>::shrink |
2510 | | |
2511 | | #[inline] |
2512 | 1.92M | unsafe fn grow( |
2513 | 1.92M | &self, |
2514 | 1.92M | ptr: NonNull<u8>, |
2515 | 1.92M | old_layout: Layout, |
2516 | 1.92M | new_layout: Layout, |
2517 | 1.92M | ) -> Result<NonNull<[u8]>, AllocError> { |
2518 | 1.92M | Bump::<MIN_ALIGN>::grow(self, ptr, old_layout, new_layout) |
2519 | 1.92M | .map(|p| unsafe { |
2520 | 1.92M | NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), new_layout.size())) |
2521 | 1.92M | }) <&bumpalo::Bump as allocator_api2::stable::alloc::Allocator>::grow::{closure#0}Line | Count | Source | 2520 | 1.92M | NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), new_layout.size())) | 2521 | 1.92M | }) |
Unexecuted instantiation: <&bumpalo::Bump<_> as allocator_api2::stable::alloc::Allocator>::grow::{closure#0} |
2522 | 1.92M | .map_err(|_| AllocError) |
2523 | 1.92M | } <&bumpalo::Bump as allocator_api2::stable::alloc::Allocator>::grow Line | Count | Source | 2512 | 1.92M | unsafe fn grow( | 2513 | 1.92M | &self, | 2514 | 1.92M | ptr: NonNull<u8>, | 2515 | 1.92M | old_layout: Layout, | 2516 | 1.92M | new_layout: Layout, | 2517 | 1.92M | ) -> Result<NonNull<[u8]>, AllocError> { | 2518 | 1.92M | Bump::<MIN_ALIGN>::grow(self, ptr, old_layout, new_layout) | 2519 | 1.92M | .map(|p| unsafe { | 2520 | | NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), new_layout.size())) | 2521 | | }) | 2522 | 1.92M | .map_err(|_| AllocError) | 2523 | 1.92M | } |
Unexecuted instantiation: <&bumpalo::Bump<_> as allocator_api2::stable::alloc::Allocator>::grow |
2524 | | |
2525 | | #[inline] |
2526 | 0 | unsafe fn grow_zeroed( |
2527 | 0 | &self, |
2528 | 0 | ptr: NonNull<u8>, |
2529 | 0 | old_layout: Layout, |
2530 | 0 | new_layout: Layout, |
2531 | 0 | ) -> Result<NonNull<[u8]>, AllocError> { |
2532 | 0 | let mut ptr = self.grow(ptr, old_layout, new_layout)?; |
2533 | 0 | ptr.as_mut()[old_layout.size()..].fill(0); |
2534 | 0 | Ok(ptr) |
2535 | 0 | } Unexecuted instantiation: <&bumpalo::Bump as allocator_api2::stable::alloc::Allocator>::grow_zeroed Unexecuted instantiation: <&bumpalo::Bump<_> as allocator_api2::stable::alloc::Allocator>::grow_zeroed |
2536 | | } |
2537 | | |
2538 | | // NB: Only tests which require private types, fields, or methods should be in |
2539 | | // here. Anything that can just be tested via public API surface should be in |
2540 | | // `bumpalo/tests/all/*`. |
2541 | | #[cfg(test)] |
2542 | | mod tests { |
2543 | | use super::*; |
2544 | | |
2545 | | // Uses private type `ChunkFooter`. |
2546 | | #[test] |
2547 | | fn chunk_footer_is_five_words() { |
2548 | | assert_eq!(mem::size_of::<ChunkFooter>(), mem::size_of::<usize>() * 6); |
2549 | | } |
2550 | | |
2551 | | // Uses private `DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER` and `FOOTER_SIZE`. |
2552 | | #[test] |
2553 | | fn allocated_bytes() { |
2554 | | let mut b = Bump::with_capacity(1); |
2555 | | |
2556 | | assert_eq!(b.allocated_bytes(), DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER); |
2557 | | assert_eq!( |
2558 | | b.allocated_bytes_including_metadata(), |
2559 | | DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER + FOOTER_SIZE |
2560 | | ); |
2561 | | |
2562 | | b.reset(); |
2563 | | |
2564 | | assert_eq!(b.allocated_bytes(), DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER); |
2565 | | assert_eq!( |
2566 | | b.allocated_bytes_including_metadata(), |
2567 | | DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER + FOOTER_SIZE |
2568 | | ); |
2569 | | } |
2570 | | |
2571 | | // Uses private `alloc` module. |
2572 | | #[test] |
2573 | | fn test_realloc() { |
2574 | | use crate::alloc::Alloc; |
2575 | | |
2576 | | unsafe { |
2577 | | const CAPACITY: usize = DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER; |
2578 | | let mut b = Bump::<1>::with_min_align_and_capacity(CAPACITY); |
2579 | | |
2580 | | // `realloc` doesn't shrink allocations that aren't "worth it". |
2581 | | let layout = Layout::from_size_align(100, 1).unwrap(); |
2582 | | let p = b.alloc_layout(layout); |
2583 | | let q = (&b).realloc(p, layout, 51).unwrap(); |
2584 | | assert_eq!(p, q); |
2585 | | b.reset(); |
2586 | | |
2587 | | // `realloc` will shrink allocations that are "worth it". |
2588 | | let layout = Layout::from_size_align(100, 1).unwrap(); |
2589 | | let p = b.alloc_layout(layout); |
2590 | | let q = (&b).realloc(p, layout, 50).unwrap(); |
2591 | | assert!(p != q); |
2592 | | b.reset(); |
2593 | | |
2594 | | // `realloc` will reuse the last allocation when growing. |
2595 | | let layout = Layout::from_size_align(10, 1).unwrap(); |
2596 | | let p = b.alloc_layout(layout); |
2597 | | let q = (&b).realloc(p, layout, 11).unwrap(); |
2598 | | assert_eq!(q.as_ptr() as usize, p.as_ptr() as usize - 1); |
2599 | | b.reset(); |
2600 | | |
2601 | | // `realloc` will allocate a new chunk when growing the last |
2602 | | // allocation, if need be. |
2603 | | let layout = Layout::from_size_align(1, 1).unwrap(); |
2604 | | let p = b.alloc_layout(layout); |
2605 | | let q = (&b).realloc(p, layout, CAPACITY + 1).unwrap(); |
2606 | | assert_ne!(q.as_ptr() as usize, p.as_ptr() as usize - CAPACITY); |
2607 | | b.reset(); |
2608 | | |
2609 | | // `realloc` will allocate and copy when reallocating anything that |
2610 | | // wasn't the last allocation. |
2611 | | let layout = Layout::from_size_align(1, 1).unwrap(); |
2612 | | let p = b.alloc_layout(layout); |
2613 | | let _ = b.alloc_layout(layout); |
2614 | | let q = (&b).realloc(p, layout, 2).unwrap(); |
2615 | | assert!(q.as_ptr() as usize != p.as_ptr() as usize - 1); |
2616 | | b.reset(); |
2617 | | } |
2618 | | } |
2619 | | |
2620 | | // Uses our private `alloc` module. |
2621 | | #[test] |
2622 | | fn invalid_read() { |
2623 | | use alloc::Alloc; |
2624 | | |
2625 | | let mut b = &Bump::new(); |
2626 | | |
2627 | | unsafe { |
2628 | | let l1 = Layout::from_size_align(12000, 4).unwrap(); |
2629 | | let p1 = Alloc::alloc(&mut b, l1).unwrap(); |
2630 | | |
2631 | | let l2 = Layout::from_size_align(1000, 4).unwrap(); |
2632 | | Alloc::alloc(&mut b, l2).unwrap(); |
2633 | | |
2634 | | let p1 = b.realloc(p1, l1, 24000).unwrap(); |
2635 | | let l3 = Layout::from_size_align(24000, 4).unwrap(); |
2636 | | b.realloc(p1, l3, 48000).unwrap(); |
2637 | | } |
2638 | | } |
2639 | | } |