wasmtime/runtime/vm/gc/enabled/
free_list.rs

1use crate::prelude::*;
2use alloc::collections::BTreeMap;
3use core::{alloc::Layout, num::NonZeroU32, ops::Bound};
4
5/// A very simple first-fit free list for use by our garbage collectors.
6pub(crate) struct FreeList {
7    /// The total capacity of the contiguous range of memory we are managing.
8    ///
9    /// NB: we keep `self.capacity` unrounded because otherwise we would get
10    /// rounding errors where we lose track of the actual capacity we have when
11    /// repeatedly adding capacity `n` where `n < ALIGN`:
12    ///
13    /// ```ignore
14    /// let mut free_list = FreeList::new(0);
15    /// loop {
16    ///     free_list.add_capacity(1);
17    /// }
18    /// ```
19    ///
20    /// If we eagerly rounded capacity down to our alignment on every call to
21    /// `add_capacity`, the free list would always think it has zero capacity,
22    /// even though it would have enough capacity for many allocations after
23    /// enough iterations of the loop.
24    capacity: usize,
25    /// Our free blocks, as a map from index to length of the free block at that
26    /// index.
27    free_block_index_to_len: BTreeMap<u32, u32>,
28}
29
30/// Our minimum and maximum supported alignment. Every allocation is aligned to
31/// this. Additionally, this is the minimum allocation size, and every
32/// allocation is rounded up to this size.
33const ALIGN_U32: u32 = 16;
34const ALIGN_USIZE: usize = ALIGN_U32 as usize;
35
36impl FreeList {
37    /// Create a new `Layout` from the given `size` with an alignment that is
38    /// compatible with this free list.
39    pub fn layout(size: usize) -> Layout {
40        Layout::from_size_align(size, ALIGN_USIZE).unwrap()
41    }
42
43    /// Create a new `FreeList` for a contiguous region of memory of the given
44    /// size.
45    pub fn new(capacity: usize) -> Self {
46        log::debug!("FreeList::new({capacity})");
47
48        let mut free_list = FreeList {
49            capacity,
50            free_block_index_to_len: BTreeMap::new(),
51        };
52
53        let end = u32::try_from(free_list.capacity).unwrap_or_else(|_| {
54            assert!(free_list.capacity > usize::try_from(u32::MAX).unwrap());
55            u32::MAX
56        });
57
58        // Don't start at `0`. Reserve that for "null pointers" and free_list way we
59        // can use `NonZeroU32` as out pointer type, giving us some more
60        // bitpacking opportunities.
61        let start = ALIGN_U32;
62
63        let len = round_u32_down_to_pow2(end.saturating_sub(start), ALIGN_U32);
64
65        let entire_range = if len >= ALIGN_U32 {
66            Some((start, len))
67        } else {
68            None
69        };
70
71        free_list.free_block_index_to_len.extend(entire_range);
72
73        free_list
74    }
75
76    /// Add additional capacity to this free list.
77    pub fn add_capacity(&mut self, additional: usize) {
78        let old_cap = self.capacity;
79        self.capacity = self.capacity.saturating_add(additional);
80        log::debug!(
81            "FreeList::add_capacity({additional:#x}): capacity growing from {old_cap:#x} to {:#x}",
82            self.capacity
83        );
84
85        // See the comment on `self.capacity` about why we need to do the
86        // alignment-rounding here, rather than keeping `self.capacity` aligned
87        // at rest.
88        let old_cap_rounded = round_usize_down_to_pow2(old_cap, ALIGN_USIZE);
89
90        // If we are adding capacity beyond what a `u32` can address, then we
91        // can't actually use that capacity, so don't bother adding a new block
92        // to the free list.
93        let Ok(old_cap_rounded) = u32::try_from(old_cap_rounded) else {
94            return;
95        };
96
97        // Our new block's index is the end of the old capacity.
98        let index = NonZeroU32::new(old_cap_rounded).unwrap_or(
99            // But additionally all indices must be non-zero, so start the new
100            // block at the first aligned index if necessary.
101            NonZeroU32::new(ALIGN_U32).unwrap(),
102        );
103
104        // If, after rounding everything to our alignment, we aren't actually
105        // gaining any new capacity, then don't add a new block to the free
106        // list.
107        let new_cap = u32::try_from(self.capacity).unwrap_or(u32::MAX);
108        let new_cap = round_u32_down_to_pow2(new_cap, ALIGN_U32);
109
110        // If we haven't added enough capacity for our first allocation yet,
111        // then just return and wait for more capacity.
112        if index.get() > new_cap {
113            return;
114        }
115
116        let size = new_cap - index.get();
117        debug_assert_eq!(size % ALIGN_U32, 0);
118        if size == 0 {
119            return;
120        }
121
122        // If we can't represent this block in a `Layout`, then don't add it to
123        // our free list either.
124        let Ok(layout) = Layout::from_size_align(usize::try_from(size).unwrap(), ALIGN_USIZE)
125        else {
126            return;
127        };
128
129        // Okay! Add a block to our free list for the new capacity, potentially
130        // merging it with existing blocks at the end of the free list.
131        log::trace!(
132            "FreeList::add_capacity(..): adding block {index:#x}..{:#x}",
133            index.get() + size
134        );
135        self.dealloc(index, layout);
136    }
137
138    #[cfg(test)]
139    fn max_size(&self) -> usize {
140        let cap = core::cmp::min(self.capacity, usize::try_from(u32::MAX).unwrap());
141        round_usize_down_to_pow2(cap.saturating_sub(ALIGN_USIZE), ALIGN_USIZE)
142    }
143
144    /// Check the given layout for compatibility with this free list and return
145    /// the actual block size we will use for this layout.
146    fn check_layout(&self, layout: Layout) -> Result<u32> {
147        ensure!(
148            layout.align() <= ALIGN_USIZE,
149            "requested allocation's alignment of {} is greater than max supported \
150             alignment of {ALIGN_USIZE}",
151            layout.align(),
152        );
153
154        let alloc_size = u32::try_from(layout.size()).map_err(|e| {
155            let trap = crate::Trap::AllocationTooLarge;
156            let err = anyhow::Error::from(trap);
157            err.context(e)
158                .context("requested allocation's size does not fit in a u32")
159        })?;
160        alloc_size
161            .checked_next_multiple_of(ALIGN_U32)
162            .ok_or_else(|| {
163                let trap = crate::Trap::AllocationTooLarge;
164                let err = anyhow::Error::from(trap);
165                let err = err.context(format!(
166                    "failed to round allocation size of {alloc_size} up to next \
167                     multiple of {ALIGN_USIZE}"
168                ));
169                err
170            })
171    }
172
173    /// Find the first free block that can hold an allocation of the given size
174    /// and remove it from the free list.
175    fn first_fit(&mut self, alloc_size: u32) -> Option<(u32, u32)> {
176        debug_assert_eq!(alloc_size % ALIGN_U32, 0);
177
178        let (&block_index, &block_len) = self
179            .free_block_index_to_len
180            .iter()
181            .find(|(_idx, len)| **len >= alloc_size)?;
182
183        debug_assert_eq!(block_index % ALIGN_U32, 0);
184        debug_assert_eq!(block_len % ALIGN_U32, 0);
185
186        let entry = self.free_block_index_to_len.remove(&block_index);
187        debug_assert!(entry.is_some());
188
189        Some((block_index, block_len))
190    }
191
192    /// If the given allocated block is large enough such that we can split it
193    /// and still have enough space left for future allocations, then split it.
194    ///
195    /// Returns the new length of the allocated block.
196    fn maybe_split(&mut self, alloc_size: u32, block_index: u32, block_len: u32) -> u32 {
197        debug_assert_eq!(alloc_size % ALIGN_U32, 0);
198        debug_assert_eq!(block_index % ALIGN_U32, 0);
199        debug_assert_eq!(block_len % ALIGN_U32, 0);
200
201        if block_len - alloc_size < ALIGN_U32 {
202            // The block is not large enough to split.
203            return block_len;
204        }
205
206        // The block is large enough to split. Split the block at exactly the
207        // requested allocation size and put the tail back in the free list.
208        let new_block_len = alloc_size;
209        let split_start = block_index + alloc_size;
210        let split_len = block_len - alloc_size;
211
212        debug_assert_eq!(new_block_len % ALIGN_U32, 0);
213        debug_assert_eq!(split_start % ALIGN_U32, 0);
214        debug_assert_eq!(split_len % ALIGN_U32, 0);
215
216        self.free_block_index_to_len.insert(split_start, split_len);
217
218        new_block_len
219    }
220
221    /// Allocate space for an object of the given layout.
222    ///
223    /// Returns:
224    ///
225    /// * `Ok(Some(_))`: Allocation succeeded.
226    ///
227    /// * `Ok(None)`: Can't currently fulfill the allocation request, but might
228    ///   be able to if some stuff was reallocated.
229    ///
230    /// * `Err(_)`:
231    pub fn alloc(&mut self, layout: Layout) -> Result<Option<NonZeroU32>> {
232        log::trace!("FreeList::alloc({layout:?})");
233        let alloc_size = self.check_layout(layout)?;
234        debug_assert_eq!(alloc_size % ALIGN_U32, 0);
235
236        let (block_index, block_len) = match self.first_fit(alloc_size) {
237            None => return Ok(None),
238            Some(tup) => tup,
239        };
240        debug_assert_ne!(block_index, 0);
241        debug_assert_eq!(block_index % ALIGN_U32, 0);
242        debug_assert!(block_len >= alloc_size);
243        debug_assert_eq!(block_len % ALIGN_U32, 0);
244
245        let block_len = self.maybe_split(alloc_size, block_index, block_len);
246        debug_assert!(block_len >= alloc_size);
247        debug_assert_eq!(block_len % ALIGN_U32, 0);
248
249        // After we've mutated the free list, double check its integrity.
250        #[cfg(debug_assertions)]
251        self.check_integrity();
252
253        log::trace!("FreeList::alloc({layout:?}) -> {block_index:#x}");
254        Ok(Some(unsafe { NonZeroU32::new_unchecked(block_index) }))
255    }
256
257    /// Deallocate an object with the given layout.
258    pub fn dealloc(&mut self, index: NonZeroU32, layout: Layout) {
259        log::trace!("FreeList::dealloc({index:#x}, {layout:?})");
260
261        let index = index.get();
262        debug_assert_eq!(index % ALIGN_U32, 0);
263
264        let alloc_size = self.check_layout(layout).unwrap();
265        debug_assert_eq!(alloc_size % ALIGN_U32, 0);
266
267        let prev_block = self
268            .free_block_index_to_len
269            .range((Bound::Unbounded, Bound::Excluded(index)))
270            .next_back()
271            .map(|(idx, len)| (*idx, *len));
272
273        let next_block = self
274            .free_block_index_to_len
275            .range((Bound::Excluded(index), Bound::Unbounded))
276            .next()
277            .map(|(idx, len)| (*idx, *len));
278
279        // Try and merge this block with its previous and next blocks in the
280        // free list, if any and if they are contiguous.
281        match (prev_block, next_block) {
282            // The prev, this, and next blocks are all contiguous: merge this
283            // and next into prev.
284            (Some((prev_index, prev_len)), Some((next_index, next_len)))
285                if blocks_are_contiguous(prev_index, prev_len, index)
286                    && blocks_are_contiguous(index, alloc_size, next_index) =>
287            {
288                log::trace!(
289                    "merging blocks {prev_index:#x}..{prev_len:#x}, {index:#x}..{index_end:#x}, {next_index:#x}..{next_end:#x}",
290                    prev_len = prev_index + prev_len,
291                    index_end = index + u32::try_from(layout.size()).unwrap(),
292                    next_end = next_index + next_len,
293                );
294                self.free_block_index_to_len.remove(&next_index);
295                let merged_block_len = next_index + next_len - prev_index;
296                debug_assert_eq!(merged_block_len % ALIGN_U32, 0);
297                *self.free_block_index_to_len.get_mut(&prev_index).unwrap() = merged_block_len;
298            }
299
300            // The prev and this blocks are contiguous: merge this into prev.
301            (Some((prev_index, prev_len)), _)
302                if blocks_are_contiguous(prev_index, prev_len, index) =>
303            {
304                log::trace!(
305                    "merging blocks {prev_index:#x}..{prev_len:#x}, {index:#x}..{index_end:#x}",
306                    prev_len = prev_index + prev_len,
307                    index_end = index + u32::try_from(layout.size()).unwrap(),
308                );
309                let merged_block_len = index + alloc_size - prev_index;
310                debug_assert_eq!(merged_block_len % ALIGN_U32, 0);
311                *self.free_block_index_to_len.get_mut(&prev_index).unwrap() = merged_block_len;
312            }
313
314            // The this and next blocks are contiguous: merge next into this.
315            (_, Some((next_index, next_len)))
316                if blocks_are_contiguous(index, alloc_size, next_index) =>
317            {
318                log::trace!(
319                    "merging blocks {index:#x}..{index_end:#x}, {next_index:#x}..{next_end:#x}",
320                    index_end = index + u32::try_from(layout.size()).unwrap(),
321                    next_end = next_index + next_len,
322                );
323                self.free_block_index_to_len.remove(&next_index);
324                let merged_block_len = next_index + next_len - index;
325                debug_assert_eq!(merged_block_len % ALIGN_U32, 0);
326                self.free_block_index_to_len.insert(index, merged_block_len);
327            }
328
329            // None of the blocks are contiguous: insert this block into the
330            // free list.
331            (_, _) => {
332                log::trace!("cannot merge blocks");
333                self.free_block_index_to_len.insert(index, alloc_size);
334            }
335        }
336
337        // After we've added to/mutated the free list, double check its
338        // integrity.
339        #[cfg(debug_assertions)]
340        self.check_integrity();
341    }
342
343    /// Assert that the free list is valid:
344    ///
345    /// 1. All blocks are within `ALIGN..self.capacity`
346    ///
347    /// 2. No blocks are overlapping.
348    ///
349    /// 3. All blocks are aligned to `ALIGN`
350    ///
351    /// 4. All block sizes are a multiple of `ALIGN`
352    #[cfg(debug_assertions)]
353    fn check_integrity(&self) {
354        let mut prev_end = None;
355        for (&index, &len) in self.free_block_index_to_len.iter() {
356            // (1)
357            let end = index + len;
358            assert!(usize::try_from(end).unwrap() <= self.capacity);
359
360            // (2)
361            if let Some(prev_end) = prev_end {
362                // We could assert `prev_end <= index`, and that would be
363                // correct, but it would also mean that we missed an opportunity
364                // to merge the previous block and this current block
365                // together. We don't want to allow that kind of fragmentation,
366                // so do the stricter `prev_end < index` assert here.
367                assert!(prev_end < index);
368            }
369
370            // (3)
371            assert_eq!(index % ALIGN_U32, 0);
372
373            // (4)
374            assert_eq!(len % ALIGN_U32, 0);
375
376            prev_end = Some(end);
377        }
378    }
379}
380
381#[inline]
382fn blocks_are_contiguous(prev_index: u32, prev_len: u32, next_index: u32) -> bool {
383    // NB: We might have decided *not* to split the prev block if it was larger
384    // than the requested allocation size but not large enough such that if we
385    // split it, the remainder could fulfill future allocations. In such cases,
386    // the size of the `Layout` given to us upon deallocation (aka `prev_len`)
387    // is smaller than the actual size of the block we allocated.
388    let end_of_prev = prev_index + prev_len;
389    debug_assert!(
390        next_index >= end_of_prev,
391        "overlapping blocks: \n\
392         \t prev_index = {prev_index:#x}\n\
393         \t   prev_len = {prev_len:#x}\n\
394         \tend_of_prev = {end_of_prev:#x}\n\
395         \t next_index = {next_index:#x}\n\
396         `next_index` should be >= `end_of_prev`"
397    );
398    let delta_to_next = next_index - end_of_prev;
399    delta_to_next < ALIGN_U32
400}
401
402#[inline]
403fn round_u32_down_to_pow2(value: u32, divisor: u32) -> u32 {
404    debug_assert!(divisor > 0);
405    debug_assert!(divisor.is_power_of_two());
406    value & !(divisor - 1)
407}
408
409#[inline]
410fn round_usize_down_to_pow2(value: usize, divisor: usize) -> usize {
411    debug_assert!(divisor > 0);
412    debug_assert!(divisor.is_power_of_two());
413    value & !(divisor - 1)
414}
415
416#[cfg(test)]
417mod tests {
418    use super::*;
419    use crate::hash_map::HashMap;
420    use proptest::prelude::*;
421    use std::num::NonZeroUsize;
422
423    fn free_list_block_len_and_size(free_list: &FreeList) -> (usize, Option<usize>) {
424        let len = free_list.free_block_index_to_len.len();
425        let size = free_list
426            .free_block_index_to_len
427            .values()
428            .next()
429            .map(|s| usize::try_from(*s).unwrap());
430        (len, size)
431    }
432
433    proptest! {
434        /// This property test ensures that `FreeList` doesn't suffer from
435        /// permanent fragmentation. That is, it can always merge neighboring
436        /// free blocks together into a single, larger free block that can be
437        /// used to satisfy larger allocations than either of those smaller
438        /// blocks could have. In the limit, once we've freed all blocks, that
439        /// means we should end up with a single block that represents the whole
440        /// range of memory that the `FreeList` is portioning out (just like
441        /// what we started with when we initially created the `FreeList`).
442        #[test]
443        #[cfg_attr(miri, ignore)]
444        fn check_no_fragmentation((capacity, ops) in ops()) {
445            let _ = env_logger::try_init();
446
447            // Map from allocation id to ptr.
448            let mut live = HashMap::new();
449
450            // Set of deferred deallocations, where the strategy told us to
451            // deallocate an id before it was allocated. These simply get
452            // deallocated en-mass at the end.
453            let mut deferred = vec![];
454
455            // The free list we are testing.
456            let mut free_list = FreeList::new(capacity.get());
457
458            let (initial_len, initial_size) = free_list_block_len_and_size(&free_list);
459            assert!(initial_len == 0 || initial_len == 1);
460            assert!(initial_size.unwrap_or(0) <= capacity.get());
461            assert_eq!(initial_size.unwrap_or(0), free_list.max_size());
462
463            // Run through the generated ops and perform each operation.
464            for (id, op) in ops {
465                match op {
466                    Op::Alloc(layout) => {
467                        if let Ok(Some(ptr)) = free_list.alloc(layout) {
468                            live.insert(id, ptr);
469                        }
470                    }
471                    Op::Dealloc(layout) => {
472                        if let Some(ptr) = live.remove(&id) {
473                            free_list.dealloc(ptr, layout);
474                        } else {
475                            deferred.push((id, layout));
476                        }
477                    }
478                }
479            }
480
481            // Now that we've completed all allocations, perform the deferred
482            // deallocations.
483            for (id, layout) in deferred {
484                // NB: not all IDs necessarily got successful allocations, so
485                // there might not be a live pointer for this ID, even after
486                // we've already performed all the allocation operations.
487                if let Some(ptr) = live.remove(&id) {
488                    free_list.dealloc(ptr, layout);
489                }
490            }
491
492            // Now we can assert various properties that should hold after we
493            // have deallocated everything that was allocated.
494            //
495            // First, assert we did in fact deallocate everything.
496            assert!(live.is_empty());
497
498            let (final_len, final_size) = free_list_block_len_and_size(&free_list);
499
500            // The free list should have a single chunk again (or no chunks if
501            // the capacity was too small).
502            assert_eq!(final_len, initial_len);
503
504            // And the size of that chunk should be the same as the initial size.
505            assert_eq!(final_size, initial_size);
506        }
507    }
508
509    #[derive(Clone, Debug)]
510    enum Op {
511        Alloc(Layout),
512        Dealloc(Layout),
513    }
514
515    /// Map an arbitrary `x` to a power of 2 that is less than or equal to
516    /// `max`, but with as little bias as possible (e.g. rounding `min(x, max)`
517    /// to the nearest power of 2 is unacceptable because it would majorly bias
518    /// the distribution towards `max` when `max` is much smaller than
519    /// `usize::MAX`).
520    fn clamp_to_pow2_in_range(x: usize, max: usize) -> usize {
521        let log_x = max.ilog2() as usize;
522        if log_x == 0 {
523            return 1;
524        }
525        let divisor = usize::MAX / log_x;
526        let y = 1_usize << (x / divisor);
527        assert!(y.is_power_of_two(), "{y} is not a power of two");
528        assert!(y <= max, "{y} is larger than {max}");
529        y
530    }
531
532    /// Helper to turn a pair of arbitrary `usize`s into a valid `Layout` of
533    /// reasonable size for use with quickchecks.
534    fn arbitrary_layout(max_size: NonZeroUsize, size: usize, align: usize) -> Layout {
535        // The maximum size cannot be larger than `isize::MAX` because `Layout`
536        // imposes that constraint on its size.
537        let max_size = std::cmp::min(max_size.get(), usize::try_from(isize::MAX).unwrap());
538
539        // Ensure that the alignment is a power of 2 that is less than or equal
540        // to the maximum alignment that `FreeList` supports.
541        let align = clamp_to_pow2_in_range(align, super::ALIGN_USIZE);
542
543        // Ensure that `size` is less than or equal to `max_size`.
544        let size = size % (max_size + 1);
545
546        // Ensure that `size` is a multiple of `align`.
547        //
548        // NB: We round `size` *down* to the previous multiple of `align` to
549        // preserve `size <= max_size`.
550        let size = round_usize_down_to_pow2(size, align);
551        assert!(size <= max_size);
552
553        // Double check that we satisfied all of `Layout::from_size_align`'s
554        // success requirements.
555        assert_ne!(align, 0);
556        assert!(align.is_power_of_two());
557        assert_eq!(size % align, 0);
558        assert!(size <= usize::try_from(isize::MAX).unwrap());
559
560        Layout::from_size_align(size, align).unwrap()
561    }
562
563    /// Proptest strategy to generate a free list capacity and a series of
564    /// allocation operations to perform in a free list of that capacity.
565    fn ops() -> impl Strategy<Value = (NonZeroUsize, Vec<(usize, Op)>)> {
566        any::<usize>().prop_flat_map(|capacity| {
567            let capacity =
568                NonZeroUsize::new(capacity).unwrap_or_else(|| NonZeroUsize::new(1 << 31).unwrap());
569
570            (
571                Just(capacity),
572                (any::<usize>(), any::<usize>(), any::<usize>())
573                    .prop_flat_map(move |(id, size, align)| {
574                        let layout = arbitrary_layout(capacity, size, align);
575                        vec![
576                            Just((id, Op::Alloc(layout))),
577                            Just((id, Op::Dealloc(layout))),
578                        ]
579                    })
580                    .prop_shuffle(),
581            )
582        })
583    }
584
585    #[test]
586    fn allocate_no_split() {
587        // Create a free list with the capacity to allocate two blocks of size
588        // `ALIGN_U32`.
589        let mut free_list = FreeList::new(ALIGN_USIZE + ALIGN_USIZE * 2);
590
591        assert_eq!(free_list.free_block_index_to_len.len(), 1);
592        assert_eq!(free_list.max_size(), ALIGN_USIZE * 2);
593
594        // Allocate a block such that the remainder is not worth splitting.
595        free_list
596            .alloc(Layout::from_size_align(ALIGN_USIZE + ALIGN_USIZE, ALIGN_USIZE).unwrap())
597            .expect("allocation within 'static' free list limits")
598            .expect("have free space available for allocation");
599
600        // Should not have split the block.
601        assert_eq!(free_list.free_block_index_to_len.len(), 0);
602    }
603
604    #[test]
605    fn allocate_and_split() {
606        // Create a free list with the capacity to allocate three blocks of size
607        // `ALIGN_U32`.
608        let mut free_list = FreeList::new(ALIGN_USIZE + ALIGN_USIZE * 3);
609
610        assert_eq!(free_list.free_block_index_to_len.len(), 1);
611        assert_eq!(free_list.max_size(), ALIGN_USIZE * 3);
612
613        // Allocate a block such that the remainder is not worth splitting.
614        free_list
615            .alloc(Layout::from_size_align(ALIGN_USIZE + ALIGN_USIZE, ALIGN_USIZE).unwrap())
616            .expect("allocation within 'static' free list limits")
617            .expect("have free space available for allocation");
618
619        // Should have split the block.
620        assert_eq!(free_list.free_block_index_to_len.len(), 1);
621    }
622
623    #[test]
624    fn dealloc_merge_prev_and_next() {
625        let layout = Layout::from_size_align(ALIGN_USIZE, ALIGN_USIZE).unwrap();
626
627        let mut free_list = FreeList::new(ALIGN_USIZE + ALIGN_USIZE * 100);
628        assert_eq!(
629            free_list.free_block_index_to_len.len(),
630            1,
631            "initially one big free block"
632        );
633
634        let a = free_list
635            .alloc(layout)
636            .expect("allocation within 'static' free list limits")
637            .expect("have free space available for allocation");
638        assert_eq!(
639            free_list.free_block_index_to_len.len(),
640            1,
641            "should have split the block to allocate `a`"
642        );
643
644        let b = free_list
645            .alloc(layout)
646            .expect("allocation within 'static' free list limits")
647            .expect("have free space available for allocation");
648        assert_eq!(
649            free_list.free_block_index_to_len.len(),
650            1,
651            "should have split the block to allocate `b`"
652        );
653
654        free_list.dealloc(a, layout);
655        assert_eq!(
656            free_list.free_block_index_to_len.len(),
657            2,
658            "should have two non-contiguous free blocks after deallocating `a`"
659        );
660
661        free_list.dealloc(b, layout);
662        assert_eq!(
663            free_list.free_block_index_to_len.len(),
664            1,
665            "should have merged `a` and `b` blocks with the rest to form a \
666             single, contiguous free block after deallocating `b`"
667        );
668    }
669
670    #[test]
671    fn dealloc_merge_with_prev_and_not_next() {
672        let layout = Layout::from_size_align(ALIGN_USIZE, ALIGN_USIZE).unwrap();
673
674        let mut free_list = FreeList::new(ALIGN_USIZE + ALIGN_USIZE * 100);
675        assert_eq!(
676            free_list.free_block_index_to_len.len(),
677            1,
678            "initially one big free block"
679        );
680
681        let a = free_list
682            .alloc(layout)
683            .expect("allocation within 'static' free list limits")
684            .expect("have free space available for allocation");
685        let b = free_list
686            .alloc(layout)
687            .expect("allocation within 'static' free list limits")
688            .expect("have free space available for allocation");
689        let c = free_list
690            .alloc(layout)
691            .expect("allocation within 'static' free list limits")
692            .expect("have free space available for allocation");
693        assert_eq!(
694            free_list.free_block_index_to_len.len(),
695            1,
696            "should have split the block to allocate `a`, `b`, and `c`"
697        );
698
699        free_list.dealloc(a, layout);
700        assert_eq!(
701            free_list.free_block_index_to_len.len(),
702            2,
703            "should have two non-contiguous free blocks after deallocating `a`"
704        );
705
706        free_list.dealloc(b, layout);
707        assert_eq!(
708            free_list.free_block_index_to_len.len(),
709            2,
710            "should have merged `a` and `b` blocks, but not merged with the \
711             rest of the free space"
712        );
713
714        let _ = c;
715    }
716
717    #[test]
718    fn dealloc_merge_with_next_and_not_prev() {
719        let layout = Layout::from_size_align(ALIGN_USIZE, ALIGN_USIZE).unwrap();
720
721        let mut free_list = FreeList::new(ALIGN_USIZE + ALIGN_USIZE * 100);
722        assert_eq!(
723            free_list.free_block_index_to_len.len(),
724            1,
725            "initially one big free block"
726        );
727
728        let a = free_list
729            .alloc(layout)
730            .expect("allocation within 'static' free list limits")
731            .expect("have free space available for allocation");
732        let b = free_list
733            .alloc(layout)
734            .expect("allocation within 'static' free list limits")
735            .expect("have free space available for allocation");
736        let c = free_list
737            .alloc(layout)
738            .expect("allocation within 'static' free list limits")
739            .expect("have free space available for allocation");
740        assert_eq!(
741            free_list.free_block_index_to_len.len(),
742            1,
743            "should have split the block to allocate `a`, `b`, and `c`"
744        );
745
746        free_list.dealloc(a, layout);
747        assert_eq!(
748            free_list.free_block_index_to_len.len(),
749            2,
750            "should have two non-contiguous free blocks after deallocating `a`"
751        );
752
753        free_list.dealloc(c, layout);
754        assert_eq!(
755            free_list.free_block_index_to_len.len(),
756            2,
757            "should have merged `c` block with rest of the free space, but not \
758             with `a` block"
759        );
760
761        let _ = b;
762    }
763
764    #[test]
765    fn dealloc_no_merge() {
766        let layout = Layout::from_size_align(ALIGN_USIZE, ALIGN_USIZE).unwrap();
767
768        let mut free_list = FreeList::new(ALIGN_USIZE + ALIGN_USIZE * 100);
769        assert_eq!(
770            free_list.free_block_index_to_len.len(),
771            1,
772            "initially one big free block"
773        );
774
775        let a = free_list
776            .alloc(layout)
777            .expect("allocation within 'static' free list limits")
778            .expect("have free space available for allocation");
779        let b = free_list
780            .alloc(layout)
781            .expect("allocation within 'static' free list limits")
782            .expect("have free space available for allocation");
783        let c = free_list
784            .alloc(layout)
785            .expect("allocation within 'static' free list limits")
786            .expect("have free space available for allocation");
787        let d = free_list
788            .alloc(layout)
789            .expect("allocation within 'static' free list limits")
790            .expect("have free space available for allocation");
791        assert_eq!(
792            free_list.free_block_index_to_len.len(),
793            1,
794            "should have split the block to allocate `a`, `b`, `c`, and `d`"
795        );
796
797        free_list.dealloc(a, layout);
798        assert_eq!(
799            free_list.free_block_index_to_len.len(),
800            2,
801            "should have two non-contiguous free blocks after deallocating `a`"
802        );
803
804        free_list.dealloc(c, layout);
805        assert_eq!(
806            free_list.free_block_index_to_len.len(),
807            3,
808            "should not have merged `c` block `a` block or rest of the free \
809             space"
810        );
811
812        let _ = (b, d);
813    }
814
815    #[test]
816    fn alloc_size_too_large() {
817        // Free list with room for 10 min-sized blocks.
818        let mut free_list = FreeList::new(ALIGN_USIZE + ALIGN_USIZE * 10);
819        assert_eq!(free_list.max_size(), ALIGN_USIZE * 10);
820
821        // Attempt to allocate something that is 20 times the size of our
822        // min-sized block.
823        assert!(
824            free_list
825                .alloc(Layout::from_size_align(ALIGN_USIZE * 20, ALIGN_USIZE).unwrap())
826                .unwrap()
827                .is_none()
828        );
829    }
830
831    #[test]
832    fn alloc_align_too_large() {
833        // Free list with room for 10 min-sized blocks.
834        let mut free_list = FreeList::new(ALIGN_USIZE + ALIGN_USIZE * 10);
835        assert_eq!(free_list.max_size(), ALIGN_USIZE * 10);
836
837        // Attempt to allocate something that requires larger alignment than
838        // `FreeList` supports.
839        assert!(
840            free_list
841                .alloc(Layout::from_size_align(ALIGN_USIZE, ALIGN_USIZE * 2).unwrap(),)
842                .is_err()
843        );
844    }
845
846    #[test]
847    fn all_pairwise_alloc_dealloc_orderings() {
848        let tests: &[fn(&mut FreeList, Layout)] = &[
849            |f, l| {
850                let a = f.alloc(l).unwrap().unwrap();
851                let b = f.alloc(l).unwrap().unwrap();
852                f.dealloc(a, l);
853                f.dealloc(b, l);
854            },
855            |f, l| {
856                let a = f.alloc(l).unwrap().unwrap();
857                let b = f.alloc(l).unwrap().unwrap();
858                f.dealloc(b, l);
859                f.dealloc(a, l);
860            },
861            |f, l| {
862                let a = f.alloc(l).unwrap().unwrap();
863                f.dealloc(a, l);
864                let b = f.alloc(l).unwrap().unwrap();
865                f.dealloc(b, l);
866            },
867        ];
868
869        let l = Layout::from_size_align(16, 8).unwrap();
870        for test in tests {
871            let mut f = FreeList::new(0x100);
872            test(&mut f, l);
873        }
874    }
875
876    #[test]
877    fn add_capacity() {
878        let layout = Layout::from_size_align(ALIGN_USIZE, ALIGN_USIZE).unwrap();
879
880        let mut free_list = FreeList::new(0);
881        assert!(free_list.alloc(layout).unwrap().is_none(), "no capacity");
882
883        free_list.add_capacity(ALIGN_USIZE);
884        assert!(
885            free_list.alloc(layout).unwrap().is_none(),
886            "still not enough capacity because we won't allocate the zero index"
887        );
888
889        free_list.add_capacity(1);
890        assert!(
891            free_list.alloc(layout).unwrap().is_none(),
892            "still not enough capacity because allocations are multiples of the alignment"
893        );
894
895        free_list.add_capacity(ALIGN_USIZE - 1);
896        let a = free_list
897            .alloc(layout)
898            .unwrap()
899            .expect("now we have enough capacity for one");
900        assert!(
901            free_list.alloc(layout).unwrap().is_none(),
902            "but not enough capacity for two"
903        );
904
905        free_list.add_capacity(ALIGN_USIZE);
906        let b = free_list
907            .alloc(layout)
908            .unwrap()
909            .expect("now we have enough capacity for two");
910
911        free_list.dealloc(a, layout);
912        free_list.dealloc(b, layout);
913        assert_eq!(
914            free_list.free_block_index_to_len.len(),
915            1,
916            "`dealloc` should merge blocks from different `add_capacity` calls together"
917        );
918
919        free_list.add_capacity(ALIGN_USIZE);
920        assert_eq!(
921            free_list.free_block_index_to_len.len(),
922            1,
923            "`add_capacity` should eagerly merge new capacity into the last block \
924             in the free list, when possible"
925        );
926    }
927
928    #[test]
929    fn add_capacity_not_enough_for_first_alloc() {
930        let layout = Layout::from_size_align(ALIGN_USIZE, ALIGN_USIZE).unwrap();
931
932        let mut free_list = FreeList::new(0);
933        assert!(free_list.alloc(layout).unwrap().is_none(), "no capacity");
934
935        for _ in 1..2 * ALIGN_USIZE {
936            free_list.add_capacity(1);
937            assert!(
938                free_list.alloc(layout).unwrap().is_none(),
939                "not enough capacity"
940            );
941        }
942
943        free_list.add_capacity(1);
944        free_list
945            .alloc(layout)
946            .unwrap()
947            .expect("now we have enough capacity for one");
948        assert!(
949            free_list.alloc(layout).unwrap().is_none(),
950            "but not enough capacity for two"
951        );
952    }
953}