bumpalo/lib.rs
1#![doc = include_str!("../README.md")]
2#![deny(missing_debug_implementations)]
3#![deny(missing_docs)]
4#![cfg_attr(not(feature = "std"), no_std)]
5#![cfg_attr(feature = "allocator_api", feature(allocator_api))]
6
7#[doc(hidden)]
8pub extern crate alloc as core_alloc;
9
10#[cfg(feature = "boxed")]
11pub mod boxed;
12#[cfg(feature = "collections")]
13pub mod collections;
14
15mod alloc;
16
17use core::cell::Cell;
18use core::fmt::Display;
19use core::iter;
20use core::marker::PhantomData;
21use core::mem;
22use core::ptr::{self, NonNull};
23use core::slice;
24use core::str;
25use core_alloc::alloc::{alloc, dealloc, Layout};
26
27#[cfg(feature = "allocator_api")]
28use core_alloc::alloc::{AllocError, Allocator};
29
30#[cfg(all(feature = "allocator-api2", not(feature = "allocator_api")))]
31use allocator_api2::alloc::{AllocError, Allocator};
32
33pub use alloc::AllocErr;
34
35/// An error returned from [`Bump::try_alloc_try_with`].
36#[derive(Clone, PartialEq, Eq, Debug)]
37pub enum AllocOrInitError<E> {
38 /// Indicates that the initial allocation failed.
39 Alloc(AllocErr),
40 /// Indicates that the initializer failed with the contained error after
41 /// allocation.
42 ///
43 /// It is possible but not guaranteed that the allocated memory has been
44 /// released back to the allocator at this point.
45 Init(E),
46}
47impl<E> From<AllocErr> for AllocOrInitError<E> {
48 fn from(e: AllocErr) -> Self {
49 Self::Alloc(e)
50 }
51}
52impl<E: Display> Display for AllocOrInitError<E> {
53 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
54 match self {
55 AllocOrInitError::Alloc(err) => err.fmt(f),
56 AllocOrInitError::Init(err) => write!(f, "initialization failed: {}", err),
57 }
58 }
59}
60
61/// An arena to bump allocate into.
62///
63/// ## No `Drop`s
64///
65/// Objects that are bump-allocated will never have their [`Drop`] implementation
66/// called — unless you do it manually yourself. This makes it relatively
67/// easy to leak memory or other resources.
68///
69/// If you have a type which internally manages
70///
71/// * an allocation from the global heap (e.g. [`Vec<T>`]),
72/// * open file descriptors (e.g. [`std::fs::File`]), or
73/// * any other resource that must be cleaned up (e.g. an `mmap`)
74///
75/// and relies on its `Drop` implementation to clean up the internal resource,
76/// then if you allocate that type with a `Bump`, you need to find a new way to
77/// clean up after it yourself.
78///
79/// Potential solutions are:
80///
81/// * Using [`bumpalo::boxed::Box::new_in`] instead of [`Bump::alloc`], that
82/// will drop wrapped values similarly to [`std::boxed::Box`]. Note that this
83/// requires enabling the `"boxed"` Cargo feature for this crate. **This is
84/// often the easiest solution.**
85///
86/// * Calling [`drop_in_place`][drop_in_place] or using
87/// [`std::mem::ManuallyDrop`][manuallydrop] to manually drop these types.
88///
89/// * Using [`bumpalo::collections::Vec`] instead of [`std::vec::Vec`].
90///
91/// * Avoiding allocating these problematic types within a `Bump`.
92///
93/// Note that not calling `Drop` is memory safe! Destructors are never
94/// guaranteed to run in Rust, you can't rely on them for enforcing memory
95/// safety.
96///
97/// [`Drop`]: https://doc.rust-lang.org/std/ops/trait.Drop.html
98/// [`Vec<T>`]: https://doc.rust-lang.org/std/vec/struct.Vec.html
99/// [`std::fs::File`]: https://doc.rust-lang.org/std/fs/struct.File.html
100/// [drop_in_place]: https://doc.rust-lang.org/std/ptr/fn.drop_in_place.html
101/// [manuallydrop]: https://doc.rust-lang.org/std/mem/struct.ManuallyDrop.html
102/// [`bumpalo::collections::Vec`]: collections/vec/struct.Vec.html
103/// [`std::vec::Vec`]: https://doc.rust-lang.org/std/vec/struct.Vec.html
104/// [`bumpalo::boxed::Box::new_in`]: boxed/struct.Box.html#method.new_in
105/// [`std::boxed::Box`]: https://doc.rust-lang.org/std/boxed/struct.Box.html
106///
107/// ## Example
108///
109/// ```
110/// use bumpalo::Bump;
111///
112/// // Create a new bump arena.
113/// let bump = Bump::new();
114///
115/// // Allocate values into the arena.
116/// let forty_two = bump.alloc(42);
117/// assert_eq!(*forty_two, 42);
118///
119/// // Mutable references are returned from allocation.
120/// let mut s = bump.alloc("bumpalo");
121/// *s = "the bump allocator; and also is a buffalo";
122/// ```
123///
124/// ## Allocation Methods Come in Many Flavors
125///
126/// There are various allocation methods on `Bump`, the simplest being
127/// [`alloc`][Bump::alloc]. The others exist to satisfy some combination of
128/// fallible allocation and initialization. The allocation methods are
129/// summarized in the following table:
130///
131/// <table>
132/// <thead>
133/// <tr>
134/// <th></th>
135/// <th>Infallible Allocation</th>
136/// <th>Fallible Allocation</th>
137/// </tr>
138/// </thead>
139/// <tr>
140/// <th>By Value</th>
141/// <td><a href="#method.alloc"><code>alloc</code></a></td>
142/// <td><a href="#method.try_alloc"><code>try_alloc</code></a></td>
143/// </tr>
144/// <tr>
145/// <th>Infallible Initializer Function</th>
146/// <td><a href="#method.alloc_with"><code>alloc_with</code></a></td>
147/// <td><a href="#method.try_alloc_with"><code>try_alloc_with</code></a></td>
148/// </tr>
149/// <tr>
150/// <th>Fallible Initializer Function</th>
151/// <td><a href="#method.alloc_try_with"><code>alloc_try_with</code></a></td>
152/// <td><a href="#method.try_alloc_try_with"><code>try_alloc_try_with</code></a></td>
153/// </tr>
154/// <tbody>
155/// </tbody>
156/// </table>
157///
158/// ### Fallible Allocation: The `try_alloc_` Method Prefix
159///
160/// These allocation methods let you recover from out-of-memory (OOM)
161/// scenarioes, rather than raising a panic on OOM.
162///
163/// ```
164/// use bumpalo::Bump;
165///
166/// let bump = Bump::new();
167///
168/// match bump.try_alloc(MyStruct {
169/// // ...
170/// }) {
171/// Ok(my_struct) => {
172/// // Allocation succeeded.
173/// }
174/// Err(e) => {
175/// // Out of memory.
176/// }
177/// }
178///
179/// struct MyStruct {
180/// // ...
181/// }
182/// ```
183///
184/// ### Initializer Functions: The `_with` Method Suffix
185///
186/// Calling one of the generic `…alloc(x)` methods is essentially equivalent to
187/// the matching [`…alloc_with(|| x)`](?search=alloc_with). However if you use
188/// `…alloc_with`, then the closure will not be invoked until after allocating
189/// space for storing `x` on the heap.
190///
191/// This can be useful in certain edge-cases related to compiler optimizations.
192/// When evaluating for example `bump.alloc(x)`, semantically `x` is first put
193/// on the stack and then moved onto the heap. In some cases, the compiler is
194/// able to optimize this into constructing `x` directly on the heap, however
195/// in many cases it does not.
196///
197/// The `…alloc_with` functions try to help the compiler be smarter. In most
198/// cases doing for example `bump.try_alloc_with(|| x)` on release mode will be
199/// enough to help the compiler realize that this optimization is valid and
200/// to construct `x` directly onto the heap.
201///
202/// #### Warning
203///
204/// These functions critically depend on compiler optimizations to achieve their
205/// desired effect. This means that it is not an effective tool when compiling
206/// without optimizations on.
207///
208/// Even when optimizations are on, these functions do not **guarantee** that
209/// the value is constructed on the heap. To the best of our knowledge no such
210/// guarantee can be made in stable Rust as of 1.54.
211///
212/// ### Fallible Initialization: The `_try_with` Method Suffix
213///
214/// The generic [`…alloc_try_with(|| x)`](?search=_try_with) methods behave
215/// like the purely `_with` suffixed methods explained above. However, they
216/// allow for fallible initialization by accepting a closure that returns a
217/// [`Result`] and will attempt to undo the initial allocation if this closure
218/// returns [`Err`].
219///
220/// #### Warning
221///
222/// If the inner closure returns [`Ok`], space for the entire [`Result`] remains
223/// allocated inside `self`. This can be a problem especially if the [`Err`]
224/// variant is larger, but even otherwise there may be overhead for the
225/// [`Result`]'s discriminant.
226///
227/// <p><details><summary>Undoing the allocation in the <code>Err</code> case
228/// always fails if <code>f</code> successfully made any additional allocations
229/// in <code>self</code>.</summary>
230///
231/// For example, the following will always leak also space for the [`Result`]
232/// into this `Bump`, even though the inner reference isn't kept and the [`Err`]
233/// payload is returned semantically by value:
234///
235/// ```rust
236/// let bump = bumpalo::Bump::new();
237///
238/// let r: Result<&mut [u8; 1000], ()> = bump.alloc_try_with(|| {
239/// let _ = bump.alloc(0_u8);
240/// Err(())
241/// });
242///
243/// assert!(r.is_err());
244/// ```
245///
246///</details></p>
247///
248/// Since [`Err`] payloads are first placed on the heap and then moved to the
249/// stack, `bump.…alloc_try_with(|| x)?` is likely to execute more slowly than
250/// the matching `bump.…alloc(x?)` in case of initialization failure. If this
251/// happens frequently, using the plain un-suffixed method may perform better.
252///
253/// [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html
254/// [`Ok`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Ok
255/// [`Err`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Err
256///
257/// ### `Bump` Allocation Limits
258///
259/// `bumpalo` supports setting a limit on the maximum bytes of memory that can
260/// be allocated for use in a particular `Bump` arena. This limit can be set and removed with
261/// [`set_allocation_limit`][Bump::set_allocation_limit].
262/// The allocation limit is only enforced when allocating new backing chunks for
263/// a `Bump`. Updating the allocation limit will not affect existing allocations
264/// or any future allocations within the `Bump`'s current chunk.
265///
266/// #### Example
267///
268/// ```
269/// let bump = bumpalo::Bump::new();
270///
271/// assert_eq!(bump.allocation_limit(), None);
272/// bump.set_allocation_limit(Some(0));
273///
274/// assert!(bump.try_alloc(5).is_err());
275///
276/// bump.set_allocation_limit(Some(6));
277///
278/// assert_eq!(bump.allocation_limit(), Some(6));
279///
280/// bump.set_allocation_limit(None);
281///
282/// assert_eq!(bump.allocation_limit(), None);
283/// ```
284///
285/// #### Warning
286///
287/// Because of backwards compatibility, allocations that fail
288/// due to allocation limits will not present differently than
289/// errors due to resource exhaustion.
290
291#[derive(Debug)]
292pub struct Bump {
293 // The current chunk we are bump allocating within.
294 current_chunk_footer: Cell<NonNull<ChunkFooter>>,
295 allocation_limit: Cell<Option<usize>>,
296}
297
298#[repr(C)]
299#[derive(Debug)]
300struct ChunkFooter {
301 // Pointer to the start of this chunk allocation. This footer is always at
302 // the end of the chunk.
303 data: NonNull<u8>,
304
305 // The layout of this chunk's allocation.
306 layout: Layout,
307
308 // Link to the previous chunk.
309 //
310 // Note that the last node in the `prev` linked list is the canonical empty
311 // chunk, whose `prev` link points to itself.
312 prev: Cell<NonNull<ChunkFooter>>,
313
314 // Bump allocation finger that is always in the range `self.data..=self`.
315 ptr: Cell<NonNull<u8>>,
316
317 // The bytes allocated in all chunks so far, the canonical empty chunk has
318 // a size of 0 and for all other chunks, `allocated_bytes` will be
319 // the allocated_bytes of the current chunk plus the allocated bytes
320 // of the `prev` chunk.
321 allocated_bytes: usize,
322}
323
324/// A wrapper type for the canonical, statically allocated empty chunk.
325///
326/// For the canonical empty chunk to be `static`, its type must be `Sync`, which
327/// is the purpose of this wrapper type. This is safe because the empty chunk is
328/// immutable and never actually modified.
329#[repr(transparent)]
330struct EmptyChunkFooter(ChunkFooter);
331
332unsafe impl Sync for EmptyChunkFooter {}
333
334static EMPTY_CHUNK: EmptyChunkFooter = EmptyChunkFooter(ChunkFooter {
335 // This chunk is empty (except the foot itself).
336 layout: Layout::new::<ChunkFooter>(),
337
338 // The start of the (empty) allocatable region for this chunk is itself.
339 data: unsafe { NonNull::new_unchecked(&EMPTY_CHUNK as *const EmptyChunkFooter as *mut u8) },
340
341 // The end of the (empty) allocatable region for this chunk is also itself.
342 ptr: Cell::new(unsafe {
343 NonNull::new_unchecked(&EMPTY_CHUNK as *const EmptyChunkFooter as *mut u8)
344 }),
345
346 // Invariant: the last chunk footer in all `ChunkFooter::prev` linked lists
347 // is the empty chunk footer, whose `prev` points to itself.
348 prev: Cell::new(unsafe {
349 NonNull::new_unchecked(&EMPTY_CHUNK as *const EmptyChunkFooter as *mut ChunkFooter)
350 }),
351
352 // Empty chunks count as 0 allocated bytes in an arena.
353 allocated_bytes: 0,
354});
355
356impl EmptyChunkFooter {
357 fn get(&'static self) -> NonNull<ChunkFooter> {
358 NonNull::from(&self.0)
359 }
360}
361
362impl ChunkFooter {
363 // Returns the start and length of the currently allocated region of this
364 // chunk.
365 fn as_raw_parts(&self) -> (*const u8, usize) {
366 let data = self.data.as_ptr() as *const u8;
367 let ptr = self.ptr.get().as_ptr() as *const u8;
368 debug_assert!(data <= ptr);
369 debug_assert!(ptr <= self as *const ChunkFooter as *const u8);
370 let len = unsafe { (self as *const ChunkFooter as *const u8).offset_from(ptr) as usize };
371 (ptr, len)
372 }
373
374 /// Is this chunk the last empty chunk?
375 fn is_empty(&self) -> bool {
376 ptr::eq(self, EMPTY_CHUNK.get().as_ptr())
377 }
378}
379
380impl Default for Bump {
381 fn default() -> Bump {
382 Bump::new()
383 }
384}
385
386impl Drop for Bump {
387 fn drop(&mut self) {
388 unsafe {
389 dealloc_chunk_list(self.current_chunk_footer.get());
390 }
391 }
392}
393
394#[inline]
395unsafe fn dealloc_chunk_list(mut footer: NonNull<ChunkFooter>) {
396 while !footer.as_ref().is_empty() {
397 let f = footer;
398 footer = f.as_ref().prev.get();
399 dealloc(f.as_ref().data.as_ptr(), f.as_ref().layout);
400 }
401}
402
403// `Bump`s are safe to send between threads because nothing aliases its owned
404// chunks until you start allocating from it. But by the time you allocate from
405// it, the returned references to allocations borrow the `Bump` and therefore
406// prevent sending the `Bump` across threads until the borrows end.
407unsafe impl Send for Bump {}
408
409#[inline]
410fn is_pointer_aligned_to<T>(pointer: *mut T, align: usize) -> bool {
411 debug_assert!(align.is_power_of_two());
412
413 let pointer = pointer as usize;
414 let pointer_aligned = round_down_to(pointer, align);
415 pointer == pointer_aligned
416}
417
418#[inline]
419pub(crate) fn round_up_to(n: usize, divisor: usize) -> Option<usize> {
420 debug_assert!(divisor > 0);
421 debug_assert!(divisor.is_power_of_two());
422 Some(n.checked_add(divisor - 1)? & !(divisor - 1))
423}
424
425#[inline]
426pub(crate) fn round_down_to(n: usize, divisor: usize) -> usize {
427 debug_assert!(divisor > 0);
428 debug_assert!(divisor.is_power_of_two());
429 n & !(divisor - 1)
430}
431
432/// Same as `round_down_to` but preserves pointer provenance.
433#[inline]
434pub(crate) fn round_mut_ptr_down_to(ptr: *mut u8, divisor: usize) -> *mut u8 {
435 debug_assert!(divisor > 0);
436 debug_assert!(divisor.is_power_of_two());
437 ptr.wrapping_sub(ptr as usize & (divisor - 1))
438}
439
440// After this point, we try to hit page boundaries instead of powers of 2
441const PAGE_STRATEGY_CUTOFF: usize = 0x1000;
442
443// We only support alignments of up to 16 bytes for iter_allocated_chunks.
444const SUPPORTED_ITER_ALIGNMENT: usize = 16;
445const CHUNK_ALIGN: usize = SUPPORTED_ITER_ALIGNMENT;
446const FOOTER_SIZE: usize = mem::size_of::<ChunkFooter>();
447
448// Assert that ChunkFooter is at most the supported alignment. This will give a compile time error if it is not the case
449const _FOOTER_ALIGN_ASSERTION: bool = mem::align_of::<ChunkFooter>() <= CHUNK_ALIGN;
450const _: [(); _FOOTER_ALIGN_ASSERTION as usize] = [()];
451
452// Maximum typical overhead per allocation imposed by allocators.
453const MALLOC_OVERHEAD: usize = 16;
454
455// This is the overhead from malloc, footer and alignment. For instance, if
456// we want to request a chunk of memory that has at least X bytes usable for
457// allocations (where X is aligned to CHUNK_ALIGN), then we expect that the
458// after adding a footer, malloc overhead and alignment, the chunk of memory
459// the allocator actually sets aside for us is X+OVERHEAD rounded up to the
460// nearest suitable size boundary.
461const OVERHEAD: usize = (MALLOC_OVERHEAD + FOOTER_SIZE + (CHUNK_ALIGN - 1)) & !(CHUNK_ALIGN - 1);
462
463// Choose a relatively small default initial chunk size, since we double chunk
464// sizes as we grow bump arenas to amortize costs of hitting the global
465// allocator.
466const FIRST_ALLOCATION_GOAL: usize = 1 << 9;
467
468// The actual size of the first allocation is going to be a bit smaller
469// than the goal. We need to make room for the footer, and we also need
470// take the alignment into account.
471const DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER: usize = FIRST_ALLOCATION_GOAL - OVERHEAD;
472
473/// The memory size and alignment details for a potential new chunk
474/// allocation.
475#[derive(Debug, Clone, Copy)]
476struct NewChunkMemoryDetails {
477 new_size_without_footer: usize,
478 align: usize,
479 size: usize,
480}
481
482/// Wrapper around `Layout::from_size_align` that adds debug assertions.
483#[inline]
484fn layout_from_size_align(size: usize, align: usize) -> Result<Layout, AllocErr> {
485 Layout::from_size_align(size, align).map_err(|_| AllocErr)
486}
487
488#[inline(never)]
489fn allocation_size_overflow<T>() -> T {
490 panic!("requested allocation size overflowed")
491}
492
493impl Bump {
494 /// Construct a new arena to bump allocate into.
495 ///
496 /// ## Example
497 ///
498 /// ```
499 /// let bump = bumpalo::Bump::new();
500 /// # let _ = bump;
501 /// ```
502 pub fn new() -> Bump {
503 Self::with_capacity(0)
504 }
505
506 /// Attempt to construct a new arena to bump allocate into.
507 ///
508 /// ## Example
509 ///
510 /// ```
511 /// let bump = bumpalo::Bump::try_new();
512 /// # let _ = bump.unwrap();
513 /// ```
514 pub fn try_new() -> Result<Bump, AllocErr> {
515 Bump::try_with_capacity(0)
516 }
517
518 /// Construct a new arena with the specified byte capacity to bump allocate into.
519 ///
520 /// ## Example
521 ///
522 /// ```
523 /// let bump = bumpalo::Bump::with_capacity(100);
524 /// # let _ = bump;
525 /// ```
526 pub fn with_capacity(capacity: usize) -> Bump {
527 Bump::try_with_capacity(capacity).unwrap_or_else(|_| oom())
528 }
529
530 /// Attempt to construct a new arena with the specified byte capacity to bump allocate into.
531 ///
532 /// ## Example
533 ///
534 /// ```
535 /// let bump = bumpalo::Bump::try_with_capacity(100);
536 /// # let _ = bump.unwrap();
537 /// ```
538 pub fn try_with_capacity(capacity: usize) -> Result<Self, AllocErr> {
539 if capacity == 0 {
540 return Ok(Bump {
541 current_chunk_footer: Cell::new(EMPTY_CHUNK.get()),
542 allocation_limit: Cell::new(None),
543 });
544 }
545
546 let layout = layout_from_size_align(capacity, 1)?;
547
548 let chunk_footer = unsafe {
549 Self::new_chunk(
550 Bump::new_chunk_memory_details(None, layout).ok_or(AllocErr)?,
551 layout,
552 EMPTY_CHUNK.get(),
553 )
554 .ok_or(AllocErr)?
555 };
556
557 Ok(Bump {
558 current_chunk_footer: Cell::new(chunk_footer),
559 allocation_limit: Cell::new(None),
560 })
561 }
562
563 /// The allocation limit for this arena in bytes.
564 ///
565 /// ## Example
566 ///
567 /// ```
568 /// let bump = bumpalo::Bump::with_capacity(0);
569 ///
570 /// assert_eq!(bump.allocation_limit(), None);
571 ///
572 /// bump.set_allocation_limit(Some(6));
573 ///
574 /// assert_eq!(bump.allocation_limit(), Some(6));
575 ///
576 /// bump.set_allocation_limit(None);
577 ///
578 /// assert_eq!(bump.allocation_limit(), None);
579 /// ```
580 pub fn allocation_limit(&self) -> Option<usize> {
581 self.allocation_limit.get()
582 }
583
584 /// Set the allocation limit in bytes for this arena.
585 ///
586 /// The allocation limit is only enforced when allocating new backing chunks for
587 /// a `Bump`. Updating the allocation limit will not affect existing allocations
588 /// or any future allocations within the `Bump`'s current chunk.
589 ///
590 /// ## Example
591 ///
592 /// ```
593 /// let bump = bumpalo::Bump::with_capacity(0);
594 ///
595 /// bump.set_allocation_limit(Some(0));
596 ///
597 /// assert!(bump.try_alloc(5).is_err());
598 /// ```
599 pub fn set_allocation_limit(&self, limit: Option<usize>) {
600 self.allocation_limit.set(limit);
601 }
602
603 /// How much headroom an arena has before it hits its allocation
604 /// limit.
605 fn allocation_limit_remaining(&self) -> Option<usize> {
606 self.allocation_limit.get().and_then(|allocation_limit| {
607 let allocated_bytes = self.allocated_bytes();
608 if allocated_bytes > allocation_limit {
609 None
610 } else {
611 Some(usize::abs_diff(allocation_limit, allocated_bytes))
612 }
613 })
614 }
615
616 /// Whether a request to allocate a new chunk with a given size for a given
617 /// requested layout will fit under the allocation limit set on a `Bump`.
618 fn chunk_fits_under_limit(
619 allocation_limit_remaining: Option<usize>,
620 new_chunk_memory_details: NewChunkMemoryDetails,
621 ) -> bool {
622 allocation_limit_remaining
623 .map(|allocation_limit_left| {
624 allocation_limit_left >= new_chunk_memory_details.new_size_without_footer
625 })
626 .unwrap_or(true)
627 }
628
629 /// Determine the memory details including final size, alignment and
630 /// final size without footer for a new chunk that would be allocated
631 /// to fulfill an allocation request.
632 fn new_chunk_memory_details(
633 new_size_without_footer: Option<usize>,
634 requested_layout: Layout,
635 ) -> Option<NewChunkMemoryDetails> {
636 let mut new_size_without_footer =
637 new_size_without_footer.unwrap_or(DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER);
638
639 // We want to have CHUNK_ALIGN or better alignment
640 let mut align = CHUNK_ALIGN;
641
642 // If we already know we need to fulfill some request,
643 // make sure we allocate at least enough to satisfy it
644 align = align.max(requested_layout.align());
645 let requested_size =
646 round_up_to(requested_layout.size(), align).unwrap_or_else(allocation_size_overflow);
647 new_size_without_footer = new_size_without_footer.max(requested_size);
648
649 // We want our allocations to play nice with the memory allocator,
650 // and waste as little memory as possible.
651 // For small allocations, this means that the entire allocation
652 // including the chunk footer and mallocs internal overhead is
653 // as close to a power of two as we can go without going over.
654 // For larger allocations, we only need to get close to a page
655 // boundary without going over.
656 if new_size_without_footer < PAGE_STRATEGY_CUTOFF {
657 new_size_without_footer =
658 (new_size_without_footer + OVERHEAD).next_power_of_two() - OVERHEAD;
659 } else {
660 new_size_without_footer =
661 round_up_to(new_size_without_footer + OVERHEAD, 0x1000)? - OVERHEAD;
662 }
663
664 debug_assert_eq!(align % CHUNK_ALIGN, 0);
665 debug_assert_eq!(new_size_without_footer % CHUNK_ALIGN, 0);
666 let size = new_size_without_footer
667 .checked_add(FOOTER_SIZE)
668 .unwrap_or_else(allocation_size_overflow);
669
670 Some(NewChunkMemoryDetails {
671 new_size_without_footer,
672 size,
673 align,
674 })
675 }
676
677 /// Allocate a new chunk and return its initialized footer.
678 ///
679 /// If given, `layouts` is a tuple of the current chunk size and the
680 /// layout of the allocation request that triggered us to fall back to
681 /// allocating a new chunk of memory.
682 unsafe fn new_chunk(
683 new_chunk_memory_details: NewChunkMemoryDetails,
684 requested_layout: Layout,
685 prev: NonNull<ChunkFooter>,
686 ) -> Option<NonNull<ChunkFooter>> {
687 let NewChunkMemoryDetails {
688 new_size_without_footer,
689 align,
690 size,
691 } = new_chunk_memory_details;
692
693 let layout = layout_from_size_align(size, align).ok()?;
694
695 debug_assert!(size >= requested_layout.size());
696
697 let data = alloc(layout);
698 let data = NonNull::new(data)?;
699
700 // The `ChunkFooter` is at the end of the chunk.
701 let footer_ptr = data.as_ptr().add(new_size_without_footer);
702 debug_assert_eq!((data.as_ptr() as usize) % align, 0);
703 debug_assert_eq!(footer_ptr as usize % CHUNK_ALIGN, 0);
704 let footer_ptr = footer_ptr as *mut ChunkFooter;
705
706 // The bump pointer is initialized to the end of the range we will
707 // bump out of.
708 let ptr = Cell::new(NonNull::new_unchecked(footer_ptr as *mut u8));
709
710 // The `allocated_bytes` of a new chunk counts the total size
711 // of the chunks, not how much of the chunks are used.
712 let allocated_bytes = prev.as_ref().allocated_bytes + new_size_without_footer;
713
714 ptr::write(
715 footer_ptr,
716 ChunkFooter {
717 data,
718 layout,
719 prev: Cell::new(prev),
720 ptr,
721 allocated_bytes,
722 },
723 );
724
725 Some(NonNull::new_unchecked(footer_ptr))
726 }
727
728 /// Reset this bump allocator.
729 ///
730 /// Performs mass deallocation on everything allocated in this arena by
731 /// resetting the pointer into the underlying chunk of memory to the start
732 /// of the chunk. Does not run any `Drop` implementations on deallocated
733 /// objects; see [the top-level documentation](struct.Bump.html) for details.
734 ///
735 /// If this arena has allocated multiple chunks to bump allocate into, then
736 /// the excess chunks are returned to the global allocator.
737 ///
738 /// ## Example
739 ///
740 /// ```
741 /// let mut bump = bumpalo::Bump::new();
742 ///
743 /// // Allocate a bunch of things.
744 /// {
745 /// for i in 0..100 {
746 /// bump.alloc(i);
747 /// }
748 /// }
749 ///
750 /// // Reset the arena.
751 /// bump.reset();
752 ///
753 /// // Allocate some new things in the space previously occupied by the
754 /// // original things.
755 /// for j in 200..400 {
756 /// bump.alloc(j);
757 /// }
758 ///```
759 pub fn reset(&mut self) {
760 // Takes `&mut self` so `self` must be unique and there can't be any
761 // borrows active that would get invalidated by resetting.
762 unsafe {
763 if self.current_chunk_footer.get().as_ref().is_empty() {
764 return;
765 }
766
767 let mut cur_chunk = self.current_chunk_footer.get();
768
769 // Deallocate all chunks except the current one
770 let prev_chunk = cur_chunk.as_ref().prev.replace(EMPTY_CHUNK.get());
771 dealloc_chunk_list(prev_chunk);
772
773 // Reset the bump finger to the end of the chunk.
774 cur_chunk.as_ref().ptr.set(cur_chunk.cast());
775
776 // Reset the allocated size of the chunk.
777 cur_chunk.as_mut().allocated_bytes = cur_chunk.as_ref().layout.size();
778
779 debug_assert!(
780 self.current_chunk_footer
781 .get()
782 .as_ref()
783 .prev
784 .get()
785 .as_ref()
786 .is_empty(),
787 "We should only have a single chunk"
788 );
789 debug_assert_eq!(
790 self.current_chunk_footer.get().as_ref().ptr.get(),
791 self.current_chunk_footer.get().cast(),
792 "Our chunk's bump finger should be reset to the start of its allocation"
793 );
794 }
795 }
796
797 /// Allocate an object in this `Bump` and return an exclusive reference to
798 /// it.
799 ///
800 /// ## Panics
801 ///
802 /// Panics if reserving space for `T` fails.
803 ///
804 /// ## Example
805 ///
806 /// ```
807 /// let bump = bumpalo::Bump::new();
808 /// let x = bump.alloc("hello");
809 /// assert_eq!(*x, "hello");
810 /// ```
811 #[inline(always)]
812 pub fn alloc<T>(&self, val: T) -> &mut T {
813 self.alloc_with(|| val)
814 }
815
816 /// Try to allocate an object in this `Bump` and return an exclusive
817 /// reference to it.
818 ///
819 /// ## Errors
820 ///
821 /// Errors if reserving space for `T` fails.
822 ///
823 /// ## Example
824 ///
825 /// ```
826 /// let bump = bumpalo::Bump::new();
827 /// let x = bump.try_alloc("hello");
828 /// assert_eq!(x, Ok(&mut "hello"));
829 /// ```
830 #[inline(always)]
831 pub fn try_alloc<T>(&self, val: T) -> Result<&mut T, AllocErr> {
832 self.try_alloc_with(|| val)
833 }
834
835 /// Pre-allocate space for an object in this `Bump`, initializes it using
836 /// the closure, then returns an exclusive reference to it.
837 ///
838 /// See [The `_with` Method Suffix](#initializer-functions-the-_with-method-suffix) for a
839 /// discussion on the differences between the `_with` suffixed methods and
840 /// those methods without it, their performance characteristics, and when
841 /// you might or might not choose a `_with` suffixed method.
842 ///
843 /// ## Panics
844 ///
845 /// Panics if reserving space for `T` fails.
846 ///
847 /// ## Example
848 ///
849 /// ```
850 /// let bump = bumpalo::Bump::new();
851 /// let x = bump.alloc_with(|| "hello");
852 /// assert_eq!(*x, "hello");
853 /// ```
854 #[inline(always)]
855 pub fn alloc_with<F, T>(&self, f: F) -> &mut T
856 where
857 F: FnOnce() -> T,
858 {
859 #[inline(always)]
860 unsafe fn inner_writer<T, F>(ptr: *mut T, f: F)
861 where
862 F: FnOnce() -> T,
863 {
864 // This function is translated as:
865 // - allocate space for a T on the stack
866 // - call f() with the return value being put onto this stack space
867 // - memcpy from the stack to the heap
868 //
869 // Ideally we want LLVM to always realize that doing a stack
870 // allocation is unnecessary and optimize the code so it writes
871 // directly into the heap instead. It seems we get it to realize
872 // this most consistently if we put this critical line into it's
873 // own function instead of inlining it into the surrounding code.
874 ptr::write(ptr, f());
875 }
876
877 let layout = Layout::new::<T>();
878
879 unsafe {
880 let p = self.alloc_layout(layout);
881 let p = p.as_ptr() as *mut T;
882 inner_writer(p, f);
883 &mut *p
884 }
885 }
886
887 /// Tries to pre-allocate space for an object in this `Bump`, initializes
888 /// it using the closure, then returns an exclusive reference to it.
889 ///
890 /// See [The `_with` Method Suffix](#initializer-functions-the-_with-method-suffix) for a
891 /// discussion on the differences between the `_with` suffixed methods and
892 /// those methods without it, their performance characteristics, and when
893 /// you might or might not choose a `_with` suffixed method.
894 ///
895 /// ## Errors
896 ///
897 /// Errors if reserving space for `T` fails.
898 ///
899 /// ## Example
900 ///
901 /// ```
902 /// let bump = bumpalo::Bump::new();
903 /// let x = bump.try_alloc_with(|| "hello");
904 /// assert_eq!(x, Ok(&mut "hello"));
905 /// ```
906 #[inline(always)]
907 pub fn try_alloc_with<F, T>(&self, f: F) -> Result<&mut T, AllocErr>
908 where
909 F: FnOnce() -> T,
910 {
911 #[inline(always)]
912 unsafe fn inner_writer<T, F>(ptr: *mut T, f: F)
913 where
914 F: FnOnce() -> T,
915 {
916 // This function is translated as:
917 // - allocate space for a T on the stack
918 // - call f() with the return value being put onto this stack space
919 // - memcpy from the stack to the heap
920 //
921 // Ideally we want LLVM to always realize that doing a stack
922 // allocation is unnecessary and optimize the code so it writes
923 // directly into the heap instead. It seems we get it to realize
924 // this most consistently if we put this critical line into it's
925 // own function instead of inlining it into the surrounding code.
926 ptr::write(ptr, f());
927 }
928
929 //SAFETY: Self-contained:
930 // `p` is allocated for `T` and then a `T` is written.
931 let layout = Layout::new::<T>();
932 let p = self.try_alloc_layout(layout)?;
933 let p = p.as_ptr() as *mut T;
934
935 unsafe {
936 inner_writer(p, f);
937 Ok(&mut *p)
938 }
939 }
940
941 /// Pre-allocates space for a [`Result`] in this `Bump`, initializes it using
942 /// the closure, then returns an exclusive reference to its `T` if [`Ok`].
943 ///
944 /// Iff the allocation fails, the closure is not run.
945 ///
946 /// Iff [`Err`], an allocator rewind is *attempted* and the `E` instance is
947 /// moved out of the allocator to be consumed or dropped as normal.
948 ///
949 /// See [The `_with` Method Suffix](#initializer-functions-the-_with-method-suffix) for a
950 /// discussion on the differences between the `_with` suffixed methods and
951 /// those methods without it, their performance characteristics, and when
952 /// you might or might not choose a `_with` suffixed method.
953 ///
954 /// For caveats specific to fallible initialization, see
955 /// [The `_try_with` Method Suffix](#fallible-initialization-the-_try_with-method-suffix).
956 ///
957 /// [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html
958 /// [`Ok`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Ok
959 /// [`Err`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Err
960 ///
961 /// ## Errors
962 ///
963 /// Iff the allocation succeeds but `f` fails, that error is forwarded by value.
964 ///
965 /// ## Panics
966 ///
967 /// Panics if reserving space for `Result<T, E>` fails.
968 ///
969 /// ## Example
970 ///
971 /// ```
972 /// let bump = bumpalo::Bump::new();
973 /// let x = bump.alloc_try_with(|| Ok("hello"))?;
974 /// assert_eq!(*x, "hello");
975 /// # Result::<_, ()>::Ok(())
976 /// ```
977 #[inline(always)]
978 pub fn alloc_try_with<F, T, E>(&self, f: F) -> Result<&mut T, E>
979 where
980 F: FnOnce() -> Result<T, E>,
981 {
982 let rewind_footer = self.current_chunk_footer.get();
983 let rewind_ptr = unsafe { rewind_footer.as_ref() }.ptr.get();
984 let mut inner_result_ptr = NonNull::from(self.alloc_with(f));
985 match unsafe { inner_result_ptr.as_mut() } {
986 Ok(t) => Ok(unsafe {
987 //SAFETY:
988 // The `&mut Result<T, E>` returned by `alloc_with` may be
989 // lifetime-limited by `E`, but the derived `&mut T` still has
990 // the same validity as in `alloc_with` since the error variant
991 // is already ruled out here.
992
993 // We could conditionally truncate the allocation here, but
994 // since it grows backwards, it seems unlikely that we'd get
995 // any more than the `Result`'s discriminant this way, if
996 // anything at all.
997 &mut *(t as *mut _)
998 }),
999 Err(e) => unsafe {
1000 // If this result was the last allocation in this arena, we can
1001 // reclaim its space. In fact, sometimes we can do even better
1002 // than simply calling `dealloc` on the result pointer: we can
1003 // reclaim any alignment padding we might have added (which
1004 // `dealloc` cannot do) if we didn't allocate a new chunk for
1005 // this result.
1006 if self.is_last_allocation(inner_result_ptr.cast()) {
1007 let current_footer_p = self.current_chunk_footer.get();
1008 let current_ptr = ¤t_footer_p.as_ref().ptr;
1009 if current_footer_p == rewind_footer {
1010 // It's still the same chunk, so reset the bump pointer
1011 // to its original value upon entry to this method
1012 // (reclaiming any alignment padding we may have
1013 // added).
1014 current_ptr.set(rewind_ptr);
1015 } else {
1016 // We allocated a new chunk for this result.
1017 //
1018 // We know the result is the only allocation in this
1019 // chunk: Any additional allocations since the start of
1020 // this method could only have happened when running
1021 // the initializer function, which is called *after*
1022 // reserving space for this result. Therefore, since we
1023 // already determined via the check above that this
1024 // result was the last allocation, there must not have
1025 // been any other allocations, and this result is the
1026 // only allocation in this chunk.
1027 //
1028 // Because this is the only allocation in this chunk,
1029 // we can reset the chunk's bump finger to the start of
1030 // the chunk.
1031 current_ptr.set(current_footer_p.as_ref().data);
1032 }
1033 }
1034 //SAFETY:
1035 // As we received `E` semantically by value from `f`, we can
1036 // just copy that value here as long as we avoid a double-drop
1037 // (which can't happen as any specific references to the `E`'s
1038 // data in `self` are destroyed when this function returns).
1039 //
1040 // The order between this and the deallocation doesn't matter
1041 // because `Self: !Sync`.
1042 Err(ptr::read(e as *const _))
1043 },
1044 }
1045 }
1046
1047 /// Tries to pre-allocates space for a [`Result`] in this `Bump`,
1048 /// initializes it using the closure, then returns an exclusive reference
1049 /// to its `T` if all [`Ok`].
1050 ///
1051 /// Iff the allocation fails, the closure is not run.
1052 ///
1053 /// Iff the closure returns [`Err`], an allocator rewind is *attempted* and
1054 /// the `E` instance is moved out of the allocator to be consumed or dropped
1055 /// as normal.
1056 ///
1057 /// See [The `_with` Method Suffix](#initializer-functions-the-_with-method-suffix) for a
1058 /// discussion on the differences between the `_with` suffixed methods and
1059 /// those methods without it, their performance characteristics, and when
1060 /// you might or might not choose a `_with` suffixed method.
1061 ///
1062 /// For caveats specific to fallible initialization, see
1063 /// [The `_try_with` Method Suffix](#fallible-initialization-the-_try_with-method-suffix).
1064 ///
1065 /// [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html
1066 /// [`Ok`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Ok
1067 /// [`Err`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Err
1068 ///
1069 /// ## Errors
1070 ///
1071 /// Errors with the [`Alloc`](`AllocOrInitError::Alloc`) variant iff
1072 /// reserving space for `Result<T, E>` fails.
1073 ///
1074 /// Iff the allocation succeeds but `f` fails, that error is forwarded by
1075 /// value inside the [`Init`](`AllocOrInitError::Init`) variant.
1076 ///
1077 /// ## Example
1078 ///
1079 /// ```
1080 /// let bump = bumpalo::Bump::new();
1081 /// let x = bump.try_alloc_try_with(|| Ok("hello"))?;
1082 /// assert_eq!(*x, "hello");
1083 /// # Result::<_, bumpalo::AllocOrInitError<()>>::Ok(())
1084 /// ```
1085 #[inline(always)]
1086 pub fn try_alloc_try_with<F, T, E>(&self, f: F) -> Result<&mut T, AllocOrInitError<E>>
1087 where
1088 F: FnOnce() -> Result<T, E>,
1089 {
1090 let rewind_footer = self.current_chunk_footer.get();
1091 let rewind_ptr = unsafe { rewind_footer.as_ref() }.ptr.get();
1092 let mut inner_result_ptr = NonNull::from(self.try_alloc_with(f)?);
1093 match unsafe { inner_result_ptr.as_mut() } {
1094 Ok(t) => Ok(unsafe {
1095 //SAFETY:
1096 // The `&mut Result<T, E>` returned by `alloc_with` may be
1097 // lifetime-limited by `E`, but the derived `&mut T` still has
1098 // the same validity as in `alloc_with` since the error variant
1099 // is already ruled out here.
1100
1101 // We could conditionally truncate the allocation here, but
1102 // since it grows backwards, it seems unlikely that we'd get
1103 // any more than the `Result`'s discriminant this way, if
1104 // anything at all.
1105 &mut *(t as *mut _)
1106 }),
1107 Err(e) => unsafe {
1108 // If this result was the last allocation in this arena, we can
1109 // reclaim its space. In fact, sometimes we can do even better
1110 // than simply calling `dealloc` on the result pointer: we can
1111 // reclaim any alignment padding we might have added (which
1112 // `dealloc` cannot do) if we didn't allocate a new chunk for
1113 // this result.
1114 if self.is_last_allocation(inner_result_ptr.cast()) {
1115 let current_footer_p = self.current_chunk_footer.get();
1116 let current_ptr = ¤t_footer_p.as_ref().ptr;
1117 if current_footer_p == rewind_footer {
1118 // It's still the same chunk, so reset the bump pointer
1119 // to its original value upon entry to this method
1120 // (reclaiming any alignment padding we may have
1121 // added).
1122 current_ptr.set(rewind_ptr);
1123 } else {
1124 // We allocated a new chunk for this result.
1125 //
1126 // We know the result is the only allocation in this
1127 // chunk: Any additional allocations since the start of
1128 // this method could only have happened when running
1129 // the initializer function, which is called *after*
1130 // reserving space for this result. Therefore, since we
1131 // already determined via the check above that this
1132 // result was the last allocation, there must not have
1133 // been any other allocations, and this result is the
1134 // only allocation in this chunk.
1135 //
1136 // Because this is the only allocation in this chunk,
1137 // we can reset the chunk's bump finger to the start of
1138 // the chunk.
1139 current_ptr.set(current_footer_p.as_ref().data);
1140 }
1141 }
1142 //SAFETY:
1143 // As we received `E` semantically by value from `f`, we can
1144 // just copy that value here as long as we avoid a double-drop
1145 // (which can't happen as any specific references to the `E`'s
1146 // data in `self` are destroyed when this function returns).
1147 //
1148 // The order between this and the deallocation doesn't matter
1149 // because `Self: !Sync`.
1150 Err(AllocOrInitError::Init(ptr::read(e as *const _)))
1151 },
1152 }
1153 }
1154
1155 /// `Copy` a slice into this `Bump` and return an exclusive reference to
1156 /// the copy.
1157 ///
1158 /// ## Panics
1159 ///
1160 /// Panics if reserving space for the slice fails.
1161 ///
1162 /// ## Example
1163 ///
1164 /// ```
1165 /// let bump = bumpalo::Bump::new();
1166 /// let x = bump.alloc_slice_copy(&[1, 2, 3]);
1167 /// assert_eq!(x, &[1, 2, 3]);
1168 /// ```
1169 #[inline(always)]
1170 pub fn alloc_slice_copy<T>(&self, src: &[T]) -> &mut [T]
1171 where
1172 T: Copy,
1173 {
1174 let layout = Layout::for_value(src);
1175 let dst = self.alloc_layout(layout).cast::<T>();
1176
1177 unsafe {
1178 ptr::copy_nonoverlapping(src.as_ptr(), dst.as_ptr(), src.len());
1179 slice::from_raw_parts_mut(dst.as_ptr(), src.len())
1180 }
1181 }
1182
1183 /// `Clone` a slice into this `Bump` and return an exclusive reference to
1184 /// the clone. Prefer [`alloc_slice_copy`](#method.alloc_slice_copy) if `T` is `Copy`.
1185 ///
1186 /// ## Panics
1187 ///
1188 /// Panics if reserving space for the slice fails.
1189 ///
1190 /// ## Example
1191 ///
1192 /// ```
1193 /// #[derive(Clone, Debug, Eq, PartialEq)]
1194 /// struct Sheep {
1195 /// name: String,
1196 /// }
1197 ///
1198 /// let originals = [
1199 /// Sheep { name: "Alice".into() },
1200 /// Sheep { name: "Bob".into() },
1201 /// Sheep { name: "Cathy".into() },
1202 /// ];
1203 ///
1204 /// let bump = bumpalo::Bump::new();
1205 /// let clones = bump.alloc_slice_clone(&originals);
1206 /// assert_eq!(originals, clones);
1207 /// ```
1208 #[inline(always)]
1209 pub fn alloc_slice_clone<T>(&self, src: &[T]) -> &mut [T]
1210 where
1211 T: Clone,
1212 {
1213 let layout = Layout::for_value(src);
1214 let dst = self.alloc_layout(layout).cast::<T>();
1215
1216 unsafe {
1217 for (i, val) in src.iter().cloned().enumerate() {
1218 ptr::write(dst.as_ptr().add(i), val);
1219 }
1220
1221 slice::from_raw_parts_mut(dst.as_ptr(), src.len())
1222 }
1223 }
1224
1225 /// `Copy` a string slice into this `Bump` and return an exclusive reference to it.
1226 ///
1227 /// ## Panics
1228 ///
1229 /// Panics if reserving space for the string fails.
1230 ///
1231 /// ## Example
1232 ///
1233 /// ```
1234 /// let bump = bumpalo::Bump::new();
1235 /// let hello = bump.alloc_str("hello world");
1236 /// assert_eq!("hello world", hello);
1237 /// ```
1238 #[inline(always)]
1239 pub fn alloc_str(&self, src: &str) -> &mut str {
1240 let buffer = self.alloc_slice_copy(src.as_bytes());
1241 unsafe {
1242 // This is OK, because it already came in as str, so it is guaranteed to be utf8
1243 str::from_utf8_unchecked_mut(buffer)
1244 }
1245 }
1246
1247 /// Allocates a new slice of size `len` into this `Bump` and returns an
1248 /// exclusive reference to the copy.
1249 ///
1250 /// The elements of the slice are initialized using the supplied closure.
1251 /// The closure argument is the position in the slice.
1252 ///
1253 /// ## Panics
1254 ///
1255 /// Panics if reserving space for the slice fails.
1256 ///
1257 /// ## Example
1258 ///
1259 /// ```
1260 /// let bump = bumpalo::Bump::new();
1261 /// let x = bump.alloc_slice_fill_with(5, |i| 5 * (i + 1));
1262 /// assert_eq!(x, &[5, 10, 15, 20, 25]);
1263 /// ```
1264 #[inline(always)]
1265 pub fn alloc_slice_fill_with<T, F>(&self, len: usize, mut f: F) -> &mut [T]
1266 where
1267 F: FnMut(usize) -> T,
1268 {
1269 let layout = Layout::array::<T>(len).unwrap_or_else(|_| oom());
1270 let dst = self.alloc_layout(layout).cast::<T>();
1271
1272 unsafe {
1273 for i in 0..len {
1274 ptr::write(dst.as_ptr().add(i), f(i));
1275 }
1276
1277 let result = slice::from_raw_parts_mut(dst.as_ptr(), len);
1278 debug_assert_eq!(Layout::for_value(result), layout);
1279 result
1280 }
1281 }
1282
1283 /// Allocates a new slice of size `len` into this `Bump` and returns an
1284 /// exclusive reference to the copy.
1285 ///
1286 /// All elements of the slice are initialized to `value`.
1287 ///
1288 /// ## Panics
1289 ///
1290 /// Panics if reserving space for the slice fails.
1291 ///
1292 /// ## Example
1293 ///
1294 /// ```
1295 /// let bump = bumpalo::Bump::new();
1296 /// let x = bump.alloc_slice_fill_copy(5, 42);
1297 /// assert_eq!(x, &[42, 42, 42, 42, 42]);
1298 /// ```
1299 #[inline(always)]
1300 pub fn alloc_slice_fill_copy<T: Copy>(&self, len: usize, value: T) -> &mut [T] {
1301 self.alloc_slice_fill_with(len, |_| value)
1302 }
1303
1304 /// Allocates a new slice of size `len` slice into this `Bump` and return an
1305 /// exclusive reference to the copy.
1306 ///
1307 /// All elements of the slice are initialized to `value.clone()`.
1308 ///
1309 /// ## Panics
1310 ///
1311 /// Panics if reserving space for the slice fails.
1312 ///
1313 /// ## Example
1314 ///
1315 /// ```
1316 /// let bump = bumpalo::Bump::new();
1317 /// let s: String = "Hello Bump!".to_string();
1318 /// let x: &[String] = bump.alloc_slice_fill_clone(2, &s);
1319 /// assert_eq!(x.len(), 2);
1320 /// assert_eq!(&x[0], &s);
1321 /// assert_eq!(&x[1], &s);
1322 /// ```
1323 #[inline(always)]
1324 pub fn alloc_slice_fill_clone<T: Clone>(&self, len: usize, value: &T) -> &mut [T] {
1325 self.alloc_slice_fill_with(len, |_| value.clone())
1326 }
1327
1328 /// Allocates a new slice of size `len` slice into this `Bump` and return an
1329 /// exclusive reference to the copy.
1330 ///
1331 /// The elements are initialized using the supplied iterator.
1332 ///
1333 /// ## Panics
1334 ///
1335 /// Panics if reserving space for the slice fails, or if the supplied
1336 /// iterator returns fewer elements than it promised.
1337 ///
1338 /// ## Example
1339 ///
1340 /// ```
1341 /// let bump = bumpalo::Bump::new();
1342 /// let x: &[i32] = bump.alloc_slice_fill_iter([2, 3, 5].iter().cloned().map(|i| i * i));
1343 /// assert_eq!(x, [4, 9, 25]);
1344 /// ```
1345 #[inline(always)]
1346 pub fn alloc_slice_fill_iter<T, I>(&self, iter: I) -> &mut [T]
1347 where
1348 I: IntoIterator<Item = T>,
1349 I::IntoIter: ExactSizeIterator,
1350 {
1351 let mut iter = iter.into_iter();
1352 self.alloc_slice_fill_with(iter.len(), |_| {
1353 iter.next().expect("Iterator supplied too few elements")
1354 })
1355 }
1356
1357 /// Allocates a new slice of size `len` slice into this `Bump` and return an
1358 /// exclusive reference to the copy.
1359 ///
1360 /// All elements of the slice are initialized to [`T::default()`].
1361 ///
1362 /// [`T::default()`]: https://doc.rust-lang.org/std/default/trait.Default.html#tymethod.default
1363 ///
1364 /// ## Panics
1365 ///
1366 /// Panics if reserving space for the slice fails.
1367 ///
1368 /// ## Example
1369 ///
1370 /// ```
1371 /// let bump = bumpalo::Bump::new();
1372 /// let x = bump.alloc_slice_fill_default::<u32>(5);
1373 /// assert_eq!(x, &[0, 0, 0, 0, 0]);
1374 /// ```
1375 #[inline(always)]
1376 pub fn alloc_slice_fill_default<T: Default>(&self, len: usize) -> &mut [T] {
1377 self.alloc_slice_fill_with(len, |_| T::default())
1378 }
1379
1380 /// Allocate space for an object with the given `Layout`.
1381 ///
1382 /// The returned pointer points at uninitialized memory, and should be
1383 /// initialized with
1384 /// [`std::ptr::write`](https://doc.rust-lang.org/std/ptr/fn.write.html).
1385 ///
1386 /// # Panics
1387 ///
1388 /// Panics if reserving space matching `layout` fails.
1389 #[inline(always)]
1390 pub fn alloc_layout(&self, layout: Layout) -> NonNull<u8> {
1391 self.try_alloc_layout(layout).unwrap_or_else(|_| oom())
1392 }
1393
1394 /// Attempts to allocate space for an object with the given `Layout` or else returns
1395 /// an `Err`.
1396 ///
1397 /// The returned pointer points at uninitialized memory, and should be
1398 /// initialized with
1399 /// [`std::ptr::write`](https://doc.rust-lang.org/std/ptr/fn.write.html).
1400 ///
1401 /// # Errors
1402 ///
1403 /// Errors if reserving space matching `layout` fails.
1404 #[inline(always)]
1405 pub fn try_alloc_layout(&self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
1406 if let Some(p) = self.try_alloc_layout_fast(layout) {
1407 Ok(p)
1408 } else {
1409 self.alloc_layout_slow(layout).ok_or(AllocErr)
1410 }
1411 }
1412
1413 #[inline(always)]
1414 fn try_alloc_layout_fast(&self, layout: Layout) -> Option<NonNull<u8>> {
1415 // We don't need to check for ZSTs here since they will automatically
1416 // be handled properly: the pointer will be bumped by zero bytes,
1417 // modulo alignment. This keeps the fast path optimized for non-ZSTs,
1418 // which are much more common.
1419 unsafe {
1420 let footer = self.current_chunk_footer.get();
1421 let footer = footer.as_ref();
1422 let ptr = footer.ptr.get().as_ptr();
1423 let start = footer.data.as_ptr();
1424 debug_assert!(start <= ptr);
1425 debug_assert!(ptr as *const u8 <= footer as *const _ as *const u8);
1426
1427 if (ptr as usize) < layout.size() {
1428 return None;
1429 }
1430
1431 let ptr = ptr.wrapping_sub(layout.size());
1432 let aligned_ptr = round_mut_ptr_down_to(ptr, layout.align());
1433
1434 if aligned_ptr >= start {
1435 let aligned_ptr = NonNull::new_unchecked(aligned_ptr);
1436 footer.ptr.set(aligned_ptr);
1437 Some(aligned_ptr)
1438 } else {
1439 None
1440 }
1441 }
1442 }
1443
1444 /// Gets the remaining capacity in the current chunk (in bytes).
1445 ///
1446 /// ## Example
1447 ///
1448 /// ```
1449 /// use bumpalo::Bump;
1450 ///
1451 /// let bump = Bump::with_capacity(100);
1452 ///
1453 /// let capacity = bump.chunk_capacity();
1454 /// assert!(capacity >= 100);
1455 /// ```
1456 pub fn chunk_capacity(&self) -> usize {
1457 let current_footer = self.current_chunk_footer.get();
1458 let current_footer = unsafe { current_footer.as_ref() };
1459
1460 current_footer.ptr.get().as_ptr() as usize - current_footer.data.as_ptr() as usize
1461 }
1462
1463 /// Slow path allocation for when we need to allocate a new chunk from the
1464 /// parent bump set because there isn't enough room in our current chunk.
1465 #[inline(never)]
1466 #[cold]
1467 fn alloc_layout_slow(&self, layout: Layout) -> Option<NonNull<u8>> {
1468 unsafe {
1469 let size = layout.size();
1470 let allocation_limit_remaining = self.allocation_limit_remaining();
1471
1472 // Get a new chunk from the global allocator.
1473 let current_footer = self.current_chunk_footer.get();
1474 let current_layout = current_footer.as_ref().layout;
1475
1476 // By default, we want our new chunk to be about twice as big
1477 // as the previous chunk. If the global allocator refuses it,
1478 // we try to divide it by half until it works or the requested
1479 // size is smaller than the default footer size.
1480 let min_new_chunk_size = layout.size().max(DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER);
1481 let mut base_size = (current_layout.size() - FOOTER_SIZE)
1482 .checked_mul(2)?
1483 .max(min_new_chunk_size);
1484 let chunk_memory_details = iter::from_fn(|| {
1485 let bypass_min_chunk_size_for_small_limits = matches!(self.allocation_limit(), Some(limit) if layout.size() < limit
1486 && base_size >= layout.size()
1487 && limit < DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER
1488 && self.allocated_bytes() == 0);
1489
1490 if base_size >= min_new_chunk_size || bypass_min_chunk_size_for_small_limits {
1491 let size = base_size;
1492 base_size /= 2;
1493 Bump::new_chunk_memory_details(Some(size), layout)
1494 } else {
1495 None
1496 }
1497 });
1498
1499 let new_footer = chunk_memory_details
1500 .filter_map(|chunk_memory_details| {
1501 if Bump::chunk_fits_under_limit(
1502 allocation_limit_remaining,
1503 chunk_memory_details,
1504 ) {
1505 Bump::new_chunk(chunk_memory_details, layout, current_footer)
1506 } else {
1507 None
1508 }
1509 })
1510 .next()?;
1511
1512 debug_assert_eq!(
1513 new_footer.as_ref().data.as_ptr() as usize % layout.align(),
1514 0
1515 );
1516
1517 // Set the new chunk as our new current chunk.
1518 self.current_chunk_footer.set(new_footer);
1519
1520 let new_footer = new_footer.as_ref();
1521
1522 // Move the bump ptr finger down to allocate room for `val`. We know
1523 // this can't overflow because we successfully allocated a chunk of
1524 // at least the requested size.
1525 let mut ptr = new_footer.ptr.get().as_ptr().sub(size);
1526 // Round the pointer down to the requested alignment.
1527 ptr = round_mut_ptr_down_to(ptr, layout.align());
1528 debug_assert!(
1529 ptr as *const _ <= new_footer,
1530 "{:p} <= {:p}",
1531 ptr,
1532 new_footer
1533 );
1534 let ptr = NonNull::new_unchecked(ptr);
1535 new_footer.ptr.set(ptr);
1536
1537 // Return a pointer to the freshly allocated region in this chunk.
1538 Some(ptr)
1539 }
1540 }
1541
1542 /// Returns an iterator over each chunk of allocated memory that
1543 /// this arena has bump allocated into.
1544 ///
1545 /// The chunks are returned ordered by allocation time, with the most
1546 /// recently allocated chunk being returned first, and the least recently
1547 /// allocated chunk being returned last.
1548 ///
1549 /// The values inside each chunk are also ordered by allocation time, with
1550 /// the most recent allocation being earlier in the slice, and the least
1551 /// recent allocation being towards the end of the slice.
1552 ///
1553 /// ## Safety
1554 ///
1555 /// Because this method takes `&mut self`, we know that the bump arena
1556 /// reference is unique and therefore there aren't any active references to
1557 /// any of the objects we've allocated in it either. This potential aliasing
1558 /// of exclusive references is one common footgun for unsafe code that we
1559 /// don't need to worry about here.
1560 ///
1561 /// However, there could be regions of uninitialized memory used as padding
1562 /// between allocations, which is why this iterator has items of type
1563 /// `[MaybeUninit<u8>]`, instead of simply `[u8]`.
1564 ///
1565 /// The only way to guarantee that there is no padding between allocations
1566 /// or within allocated objects is if all of these properties hold:
1567 ///
1568 /// 1. Every object allocated in this arena has the same alignment,
1569 /// and that alignment is at most 16.
1570 /// 2. Every object's size is a multiple of its alignment.
1571 /// 3. None of the objects allocated in this arena contain any internal
1572 /// padding.
1573 ///
1574 /// If you want to use this `iter_allocated_chunks` method, it is *your*
1575 /// responsibility to ensure that these properties hold before calling
1576 /// `MaybeUninit::assume_init` or otherwise reading the returned values.
1577 ///
1578 /// Finally, you must also ensure that any values allocated into the bump
1579 /// arena have not had their `Drop` implementations called on them,
1580 /// e.g. after dropping a [`bumpalo::boxed::Box<T>`][crate::boxed::Box].
1581 ///
1582 /// ## Example
1583 ///
1584 /// ```
1585 /// let mut bump = bumpalo::Bump::new();
1586 ///
1587 /// // Allocate a bunch of `i32`s in this bump arena, potentially causing
1588 /// // additional memory chunks to be reserved.
1589 /// for i in 0..10000 {
1590 /// bump.alloc(i);
1591 /// }
1592 ///
1593 /// // Iterate over each chunk we've bump allocated into. This is safe
1594 /// // because we have only allocated `i32`s in this arena, which fulfills
1595 /// // the above requirements.
1596 /// for ch in bump.iter_allocated_chunks() {
1597 /// println!("Used a chunk that is {} bytes long", ch.len());
1598 /// println!("The first byte is {:?}", unsafe {
1599 /// ch[0].assume_init()
1600 /// });
1601 /// }
1602 ///
1603 /// // Within a chunk, allocations are ordered from most recent to least
1604 /// // recent. If we allocated 'a', then 'b', then 'c', when we iterate
1605 /// // through the chunk's data, we get them in the order 'c', then 'b',
1606 /// // then 'a'.
1607 ///
1608 /// bump.reset();
1609 /// bump.alloc(b'a');
1610 /// bump.alloc(b'b');
1611 /// bump.alloc(b'c');
1612 ///
1613 /// assert_eq!(bump.iter_allocated_chunks().count(), 1);
1614 /// let chunk = bump.iter_allocated_chunks().nth(0).unwrap();
1615 /// assert_eq!(chunk.len(), 3);
1616 ///
1617 /// // Safe because we've only allocated `u8`s in this arena, which
1618 /// // fulfills the above requirements.
1619 /// unsafe {
1620 /// assert_eq!(chunk[0].assume_init(), b'c');
1621 /// assert_eq!(chunk[1].assume_init(), b'b');
1622 /// assert_eq!(chunk[2].assume_init(), b'a');
1623 /// }
1624 /// ```
1625 pub fn iter_allocated_chunks(&mut self) -> ChunkIter<'_> {
1626 // SAFE: Ensured by mutable borrow of `self`.
1627 let raw = unsafe { self.iter_allocated_chunks_raw() };
1628 ChunkIter {
1629 raw,
1630 bump: PhantomData,
1631 }
1632 }
1633
1634 /// Returns an iterator over raw pointers to chunks of allocated memory that
1635 /// this arena has bump allocated into.
1636 ///
1637 /// This is an unsafe version of [`iter_allocated_chunks()`](Bump::iter_allocated_chunks),
1638 /// with the caller responsible for safe usage of the returned pointers as
1639 /// well as ensuring that the iterator is not invalidated by new
1640 /// allocations.
1641 ///
1642 /// ## Safety
1643 ///
1644 /// Allocations from this arena must not be performed while the returned
1645 /// iterator is alive. If reading the chunk data (or casting to a reference)
1646 /// the caller must ensure that there exist no mutable references to
1647 /// previously allocated data.
1648 ///
1649 /// In addition, all of the caveats when reading the chunk data from
1650 /// [`iter_allocated_chunks()`](Bump::iter_allocated_chunks) still apply.
1651 pub unsafe fn iter_allocated_chunks_raw(&self) -> ChunkRawIter<'_> {
1652 ChunkRawIter {
1653 footer: self.current_chunk_footer.get(),
1654 bump: PhantomData,
1655 }
1656 }
1657
1658 /// Calculates the number of bytes currently allocated across all chunks in
1659 /// this bump arena.
1660 ///
1661 /// If you allocate types of different alignments or types with
1662 /// larger-than-typical alignment in the same arena, some padding
1663 /// bytes might get allocated in the bump arena. Note that those padding
1664 /// bytes will add to this method's resulting sum, so you cannot rely
1665 /// on it only counting the sum of the sizes of the things
1666 /// you've allocated in the arena.
1667 ///
1668 /// The allocated bytes do not include the size of bumpalo's metadata,
1669 /// so the amount of memory requested from the Rust allocator is higher
1670 /// than the returned value.
1671 ///
1672 /// ## Example
1673 ///
1674 /// ```
1675 /// let bump = bumpalo::Bump::new();
1676 /// let _x = bump.alloc_slice_fill_default::<u32>(5);
1677 /// let bytes = bump.allocated_bytes();
1678 /// assert!(bytes >= core::mem::size_of::<u32>() * 5);
1679 /// ```
1680 pub fn allocated_bytes(&self) -> usize {
1681 let footer = self.current_chunk_footer.get();
1682
1683 unsafe { footer.as_ref().allocated_bytes }
1684 }
1685
1686 /// Calculates the number of bytes requested from the Rust allocator for this `Bump`.
1687 ///
1688 /// This number is equal to the [`allocated_bytes()`](Self::allocated_bytes) plus
1689 /// the size of the bump metadata.
1690 pub fn allocated_bytes_including_metadata(&self) -> usize {
1691 let metadata_size =
1692 unsafe { self.iter_allocated_chunks_raw().count() * mem::size_of::<ChunkFooter>() };
1693 self.allocated_bytes() + metadata_size
1694 }
1695
1696 #[inline]
1697 unsafe fn is_last_allocation(&self, ptr: NonNull<u8>) -> bool {
1698 let footer = self.current_chunk_footer.get();
1699 let footer = footer.as_ref();
1700 footer.ptr.get() == ptr
1701 }
1702
1703 #[inline]
1704 unsafe fn dealloc(&self, ptr: NonNull<u8>, layout: Layout) {
1705 // If the pointer is the last allocation we made, we can reuse the bytes,
1706 // otherwise they are simply leaked -- at least until somebody calls reset().
1707 if self.is_last_allocation(ptr) {
1708 let ptr = NonNull::new_unchecked(ptr.as_ptr().add(layout.size()));
1709 self.current_chunk_footer.get().as_ref().ptr.set(ptr);
1710 }
1711 }
1712
1713 #[inline]
1714 unsafe fn shrink(
1715 &self,
1716 ptr: NonNull<u8>,
1717 old_layout: Layout,
1718 new_layout: Layout,
1719 ) -> Result<NonNull<u8>, AllocErr> {
1720 // If the new layout demands greater alignment than the old layout has,
1721 // then either
1722 //
1723 // 1. the pointer happens to satisfy the new layout's alignment, so we
1724 // got lucky and can return the pointer as-is, or
1725 //
1726 // 2. the pointer is not aligned to the new layout's demanded alignment,
1727 // and we are unlucky.
1728 //
1729 // In the case of (2), to successfully "shrink" the allocation, we would
1730 // have to allocate a whole new region for the new layout, without being
1731 // able to free the old region. That is unacceptable, so simply return
1732 // an allocation failure error instead.
1733 if old_layout.align() < new_layout.align() {
1734 if is_pointer_aligned_to(ptr.as_ptr(), new_layout.align()) {
1735 return Ok(ptr);
1736 } else {
1737 return Err(AllocErr);
1738 }
1739 }
1740
1741 debug_assert!(is_pointer_aligned_to(ptr.as_ptr(), new_layout.align()));
1742
1743 let old_size = old_layout.size();
1744 let new_size = new_layout.size();
1745
1746 // This is how much space we would *actually* reclaim while satisfying
1747 // the requested alignment.
1748 let delta = round_down_to(old_size - new_size, new_layout.align());
1749
1750 if self.is_last_allocation(ptr)
1751 // Only reclaim the excess space (which requires a copy) if it
1752 // is worth it: we are actually going to recover "enough" space
1753 // and we can do a non-overlapping copy.
1754 //
1755 // We do `(old_size + 1) / 2` so division rounds up rather than
1756 // down. Consider when:
1757 //
1758 // old_size = 5
1759 // new_size = 3
1760 //
1761 // If we do not take care to round up, this will result in:
1762 //
1763 // delta = 2
1764 // (old_size / 2) = (5 / 2) = 2
1765 //
1766 // And the the check will succeed even though we are have
1767 // overlapping ranges:
1768 //
1769 // |--------old-allocation-------|
1770 // |------from-------|
1771 // |-------to--------|
1772 // +-----+-----+-----+-----+-----+
1773 // | a | b | c | . | . |
1774 // +-----+-----+-----+-----+-----+
1775 //
1776 // But we MUST NOT have overlapping ranges because we use
1777 // `copy_nonoverlapping` below! Therefore, we round the division
1778 // up to avoid this issue.
1779 && delta >= (old_size + 1) / 2
1780 {
1781 let footer = self.current_chunk_footer.get();
1782 let footer = footer.as_ref();
1783
1784 // NB: new_ptr is aligned, because ptr *has to* be aligned, and we
1785 // made sure delta is aligned.
1786 let new_ptr = NonNull::new_unchecked(footer.ptr.get().as_ptr().add(delta));
1787 footer.ptr.set(new_ptr);
1788
1789 // NB: we know it is non-overlapping because of the size check
1790 // in the `if` condition.
1791 ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), new_size);
1792
1793 return Ok(new_ptr);
1794 }
1795
1796 // If this wasn't the last allocation, or shrinking wasn't worth it,
1797 // simply return the old pointer as-is.
1798 Ok(ptr)
1799 }
1800
1801 #[inline]
1802 unsafe fn grow(
1803 &self,
1804 ptr: NonNull<u8>,
1805 old_layout: Layout,
1806 new_layout: Layout,
1807 ) -> Result<NonNull<u8>, AllocErr> {
1808 let old_size = old_layout.size();
1809 let new_size = new_layout.size();
1810 let align_is_compatible = old_layout.align() >= new_layout.align();
1811
1812 if align_is_compatible && self.is_last_allocation(ptr) {
1813 // Try to allocate the delta size within this same block so we can
1814 // reuse the currently allocated space.
1815 let delta = new_size - old_size;
1816 if let Some(p) =
1817 self.try_alloc_layout_fast(layout_from_size_align(delta, old_layout.align())?)
1818 {
1819 ptr::copy(ptr.as_ptr(), p.as_ptr(), old_size);
1820 return Ok(p);
1821 }
1822 }
1823
1824 // Fallback: do a fresh allocation and copy the existing data into it.
1825 let new_ptr = self.try_alloc_layout(new_layout)?;
1826 ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), old_size);
1827 Ok(new_ptr)
1828 }
1829}
1830
1831/// An iterator over each chunk of allocated memory that
1832/// an arena has bump allocated into.
1833///
1834/// The chunks are returned ordered by allocation time, with the most recently
1835/// allocated chunk being returned first.
1836///
1837/// The values inside each chunk are also ordered by allocation time, with the most
1838/// recent allocation being earlier in the slice.
1839///
1840/// This struct is created by the [`iter_allocated_chunks`] method on
1841/// [`Bump`]. See that function for a safety description regarding reading from the returned items.
1842///
1843/// [`Bump`]: struct.Bump.html
1844/// [`iter_allocated_chunks`]: struct.Bump.html#method.iter_allocated_chunks
1845#[derive(Debug)]
1846pub struct ChunkIter<'a> {
1847 raw: ChunkRawIter<'a>,
1848 bump: PhantomData<&'a mut Bump>,
1849}
1850
1851impl<'a> Iterator for ChunkIter<'a> {
1852 type Item = &'a [mem::MaybeUninit<u8>];
1853 fn next(&mut self) -> Option<&'a [mem::MaybeUninit<u8>]> {
1854 unsafe {
1855 let (ptr, len) = self.raw.next()?;
1856 let slice = slice::from_raw_parts(ptr as *const mem::MaybeUninit<u8>, len);
1857 Some(slice)
1858 }
1859 }
1860}
1861
1862impl<'a> iter::FusedIterator for ChunkIter<'a> {}
1863
1864/// An iterator over raw pointers to chunks of allocated memory that this
1865/// arena has bump allocated into.
1866///
1867/// See [`ChunkIter`] for details regarding the returned chunks.
1868///
1869/// This struct is created by the [`iter_allocated_chunks_raw`] method on
1870/// [`Bump`]. See that function for a safety description regarding reading from
1871/// the returned items.
1872///
1873/// [`Bump`]: struct.Bump.html
1874/// [`iter_allocated_chunks_raw`]: struct.Bump.html#method.iter_allocated_chunks_raw
1875#[derive(Debug)]
1876pub struct ChunkRawIter<'a> {
1877 footer: NonNull<ChunkFooter>,
1878 bump: PhantomData<&'a Bump>,
1879}
1880
1881impl Iterator for ChunkRawIter<'_> {
1882 type Item = (*mut u8, usize);
1883 fn next(&mut self) -> Option<(*mut u8, usize)> {
1884 unsafe {
1885 let foot = self.footer.as_ref();
1886 if foot.is_empty() {
1887 return None;
1888 }
1889 let (ptr, len) = foot.as_raw_parts();
1890 self.footer = foot.prev.get();
1891 Some((ptr as *mut u8, len))
1892 }
1893 }
1894}
1895
1896impl iter::FusedIterator for ChunkRawIter<'_> {}
1897
1898#[inline(never)]
1899#[cold]
1900fn oom() -> ! {
1901 panic!("out of memory")
1902}
1903
1904unsafe impl<'a> alloc::Alloc for &'a Bump {
1905 #[inline(always)]
1906 unsafe fn alloc(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
1907 self.try_alloc_layout(layout)
1908 }
1909
1910 #[inline]
1911 unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
1912 Bump::dealloc(self, ptr, layout);
1913 }
1914
1915 #[inline]
1916 unsafe fn realloc(
1917 &mut self,
1918 ptr: NonNull<u8>,
1919 layout: Layout,
1920 new_size: usize,
1921 ) -> Result<NonNull<u8>, AllocErr> {
1922 let old_size = layout.size();
1923
1924 if old_size == 0 {
1925 return self.try_alloc_layout(layout);
1926 }
1927
1928 let new_layout = layout_from_size_align(new_size, layout.align())?;
1929 if new_size <= old_size {
1930 self.shrink(ptr, layout, new_layout)
1931 } else {
1932 self.grow(ptr, layout, new_layout)
1933 }
1934 }
1935}
1936
1937#[cfg(any(feature = "allocator_api", feature = "allocator-api2"))]
1938unsafe impl<'a> Allocator for &'a Bump {
1939 #[inline]
1940 fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
1941 self.try_alloc_layout(layout)
1942 .map(|p| unsafe {
1943 NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), layout.size()))
1944 })
1945 .map_err(|_| AllocError)
1946 }
1947
1948 #[inline]
1949 unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
1950 Bump::dealloc(self, ptr, layout)
1951 }
1952
1953 #[inline]
1954 unsafe fn shrink(
1955 &self,
1956 ptr: NonNull<u8>,
1957 old_layout: Layout,
1958 new_layout: Layout,
1959 ) -> Result<NonNull<[u8]>, AllocError> {
1960 Bump::shrink(self, ptr, old_layout, new_layout)
1961 .map(|p| unsafe {
1962 NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), new_layout.size()))
1963 })
1964 .map_err(|_| AllocError)
1965 }
1966
1967 #[inline]
1968 unsafe fn grow(
1969 &self,
1970 ptr: NonNull<u8>,
1971 old_layout: Layout,
1972 new_layout: Layout,
1973 ) -> Result<NonNull<[u8]>, AllocError> {
1974 Bump::grow(self, ptr, old_layout, new_layout)
1975 .map(|p| unsafe {
1976 NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), new_layout.size()))
1977 })
1978 .map_err(|_| AllocError)
1979 }
1980
1981 #[inline]
1982 unsafe fn grow_zeroed(
1983 &self,
1984 ptr: NonNull<u8>,
1985 old_layout: Layout,
1986 new_layout: Layout,
1987 ) -> Result<NonNull<[u8]>, AllocError> {
1988 let mut ptr = self.grow(ptr, old_layout, new_layout)?;
1989 ptr.as_mut()[old_layout.size()..].fill(0);
1990 Ok(ptr)
1991 }
1992}
1993
1994// NB: Only tests which require private types, fields, or methods should be in
1995// here. Anything that can just be tested via public API surface should be in
1996// `bumpalo/tests/all/*`.
1997#[cfg(test)]
1998mod tests {
1999 use super::*;
2000
2001 // Uses private type `ChunkFooter`.
2002 #[test]
2003 fn chunk_footer_is_five_words() {
2004 assert_eq!(mem::size_of::<ChunkFooter>(), mem::size_of::<usize>() * 6);
2005 }
2006
2007 // Uses private `alloc` module.
2008 #[test]
2009 fn test_realloc() {
2010 use crate::alloc::Alloc;
2011
2012 unsafe {
2013 const CAPACITY: usize = 1024 - OVERHEAD;
2014 let mut b = Bump::with_capacity(CAPACITY);
2015
2016 // `realloc` doesn't shrink allocations that aren't "worth it".
2017 let layout = Layout::from_size_align(100, 1).unwrap();
2018 let p = b.alloc_layout(layout);
2019 let q = (&b).realloc(p, layout, 51).unwrap();
2020 assert_eq!(p, q);
2021 b.reset();
2022
2023 // `realloc` will shrink allocations that are "worth it".
2024 let layout = Layout::from_size_align(100, 1).unwrap();
2025 let p = b.alloc_layout(layout);
2026 let q = (&b).realloc(p, layout, 50).unwrap();
2027 assert!(p != q);
2028 b.reset();
2029
2030 // `realloc` will reuse the last allocation when growing.
2031 let layout = Layout::from_size_align(10, 1).unwrap();
2032 let p = b.alloc_layout(layout);
2033 let q = (&b).realloc(p, layout, 11).unwrap();
2034 assert_eq!(q.as_ptr() as usize, p.as_ptr() as usize - 1);
2035 b.reset();
2036
2037 // `realloc` will allocate a new chunk when growing the last
2038 // allocation, if need be.
2039 let layout = Layout::from_size_align(1, 1).unwrap();
2040 let p = b.alloc_layout(layout);
2041 let q = (&b).realloc(p, layout, CAPACITY + 1).unwrap();
2042 assert!(q.as_ptr() as usize != p.as_ptr() as usize - CAPACITY);
2043 b = Bump::with_capacity(CAPACITY);
2044
2045 // `realloc` will allocate and copy when reallocating anything that
2046 // wasn't the last allocation.
2047 let layout = Layout::from_size_align(1, 1).unwrap();
2048 let p = b.alloc_layout(layout);
2049 let _ = b.alloc_layout(layout);
2050 let q = (&b).realloc(p, layout, 2).unwrap();
2051 assert!(q.as_ptr() as usize != p.as_ptr() as usize - 1);
2052 b.reset();
2053 }
2054 }
2055
2056 // Uses our private `alloc` module.
2057 #[test]
2058 fn invalid_read() {
2059 use alloc::Alloc;
2060
2061 let mut b = &Bump::new();
2062
2063 unsafe {
2064 let l1 = Layout::from_size_align(12000, 4).unwrap();
2065 let p1 = Alloc::alloc(&mut b, l1).unwrap();
2066
2067 let l2 = Layout::from_size_align(1000, 4).unwrap();
2068 Alloc::alloc(&mut b, l2).unwrap();
2069
2070 let p1 = b.realloc(p1, l1, 24000).unwrap();
2071 let l3 = Layout::from_size_align(24000, 4).unwrap();
2072 b.realloc(p1, l3, 48000).unwrap();
2073 }
2074 }
2075}