wasmtime/runtime/vm/instance/allocator/pooling/
gc_heap_pool.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
use super::index_allocator::{SimpleIndexAllocator, SlotId};
use super::GcHeapAllocationIndex;
use crate::prelude::*;
use crate::runtime::vm::{GcHeap, GcRuntime, PoolingInstanceAllocatorConfig, Result};
use std::sync::Mutex;

/// A pool of reusable GC heaps.
pub struct GcHeapPool {
    max_gc_heaps: usize,
    index_allocator: SimpleIndexAllocator,
    heaps: Mutex<Vec<Option<Box<dyn GcHeap>>>>,
}

impl std::fmt::Debug for GcHeapPool {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        f.debug_struct("GcHeapPool")
            .field("max_gc_heaps", &self.max_gc_heaps)
            .field("index_allocator", &self.index_allocator)
            .field("heaps", &"..")
            .finish()
    }
}

impl GcHeapPool {
    /// Create a new `GcHeapPool` with the given configuration.
    pub fn new(config: &PoolingInstanceAllocatorConfig) -> Result<Self> {
        let index_allocator = SimpleIndexAllocator::new(config.limits.total_gc_heaps);
        let max_gc_heaps = usize::try_from(config.limits.total_gc_heaps).unwrap();

        // Each individual GC heap in the pool is lazily allocated. See the
        // `allocate` method.
        let heaps = Mutex::new((0..max_gc_heaps).map(|_| None).collect());

        Ok(Self {
            max_gc_heaps,
            index_allocator,
            heaps,
        })
    }

    /// Are there zero slots in use right now?
    #[allow(unused)] // some cfgs don't use this
    pub fn is_empty(&self) -> bool {
        self.index_allocator.is_empty()
    }

    /// Allocate a single table for the given instance allocation request.
    pub fn allocate(
        &self,
        gc_runtime: &dyn GcRuntime,
    ) -> Result<(GcHeapAllocationIndex, Box<dyn GcHeap>)> {
        let allocation_index = self
            .index_allocator
            .alloc()
            .map(|slot| GcHeapAllocationIndex(slot.0))
            .ok_or_else(|| {
                anyhow!(
                    "maximum concurrent GC heap limit of {} reached",
                    self.max_gc_heaps
                )
            })?;
        debug_assert_ne!(allocation_index, GcHeapAllocationIndex::default());

        let heap = match {
            let mut heaps = self.heaps.lock().unwrap();
            heaps[allocation_index.index()].take()
        } {
            // If we already have a heap at this slot, reuse it.
            Some(heap) => heap,
            // Otherwise, we haven't forced this slot's lazily allocated heap
            // yet. So do that now.
            None => gc_runtime.new_gc_heap()?,
        };

        Ok((allocation_index, heap))
    }

    /// Deallocate a previously-allocated GC heap.
    pub fn deallocate(&self, allocation_index: GcHeapAllocationIndex, mut heap: Box<dyn GcHeap>) {
        debug_assert_ne!(allocation_index, GcHeapAllocationIndex::default());
        heap.reset();

        // NB: Replace the heap before freeing the index. If we did it in the
        // opposite order, a concurrent allocation request could reallocate the
        // index before we have replaced the heap.

        {
            let mut heaps = self.heaps.lock().unwrap();
            let old_entry = std::mem::replace(&mut heaps[allocation_index.index()], Some(heap));
            debug_assert!(old_entry.is_none());
        }

        self.index_allocator.free(SlotId(allocation_index.0));
    }
}