wasmtime/runtime/store/
gc.rs

1//! GC-related methods for stores.
2
3use super::*;
4use crate::runtime::vm::VMGcRef;
5
6impl StoreOpaque {
7    /// Attempt to grow the GC heap by `bytes_needed` or, if that fails, perform
8    /// a garbage collection.
9    ///
10    /// Note that even when this function returns it is not guaranteed
11    /// that a GC allocation of size `bytes_needed` will succeed. Growing the GC
12    /// heap could fail, and then performing a collection could succeed but
13    /// might not free up enough space. Therefore, callers should not assume
14    /// that a retried allocation will always succeed.
15    ///
16    /// The `root` argument passed in is considered a root for this GC operation
17    /// and its new value is returned as well.
18    pub(crate) async fn gc(
19        &mut self,
20        limiter: Option<&mut StoreResourceLimiter<'_>>,
21        root: Option<VMGcRef>,
22        bytes_needed: Option<u64>,
23    ) -> Option<VMGcRef> {
24        let mut scope = crate::OpaqueRootScope::new(self);
25        scope.trim_gc_liveness_flags(true);
26        let store_id = scope.id();
27        let root = root.map(|r| scope.gc_roots_mut().push_lifo_root(store_id, r));
28
29        scope.grow_or_collect_gc_heap(limiter, bytes_needed).await;
30
31        root.map(|r| {
32            let r = r
33                .get_gc_ref(&scope)
34                .expect("still in scope")
35                .unchecked_copy();
36            scope.clone_gc_ref(&r)
37        })
38    }
39
40    // This lives on the Store because it must simultaneously borrow
41    // `gc_store` and `gc_roots`, and is invoked from other modules to
42    // which we do not want to expose the raw fields for piecewise
43    // borrows.
44    pub(crate) fn trim_gc_liveness_flags(&mut self, eager: bool) {
45        if let Some(gc_store) = self.gc_store.as_mut() {
46            self.gc_roots.trim_liveness_flags(gc_store, eager);
47        }
48    }
49
50    async fn grow_or_collect_gc_heap(
51        &mut self,
52        limiter: Option<&mut StoreResourceLimiter<'_>>,
53        bytes_needed: Option<u64>,
54    ) {
55        if let Some(n) = bytes_needed {
56            if self.grow_gc_heap(limiter, n).await.is_ok() {
57                return;
58            }
59        }
60        self.do_gc().await;
61    }
62
63    /// Attempt to grow the GC heap by `bytes_needed` bytes.
64    ///
65    /// Returns an error if growing the GC heap fails.
66    async fn grow_gc_heap(
67        &mut self,
68        limiter: Option<&mut StoreResourceLimiter<'_>>,
69        bytes_needed: u64,
70    ) -> Result<()> {
71        log::trace!("Attempting to grow the GC heap by {bytes_needed} bytes");
72        assert!(bytes_needed > 0);
73
74        let page_size = self.engine().tunables().gc_heap_memory_type().page_size();
75
76        // Take the GC heap's underlying memory out of the GC heap, attempt to
77        // grow it, then replace it.
78        let mut heap = TakenGcHeap::new(self);
79
80        let current_size_in_bytes = u64::try_from(heap.memory.byte_size()).unwrap();
81        let current_size_in_pages = current_size_in_bytes / page_size;
82
83        // Aim to double the heap size, amortizing the cost of growth.
84        let doubled_size_in_pages = current_size_in_pages.saturating_mul(2);
85        assert!(doubled_size_in_pages >= current_size_in_pages);
86        let delta_pages_for_doubling = doubled_size_in_pages - current_size_in_pages;
87
88        // When doubling our size, saturate at the maximum memory size in pages.
89        //
90        // TODO: we should consult the instance allocator for its configured
91        // maximum memory size, if any, rather than assuming the index
92        // type's maximum size.
93        let max_size_in_bytes = 1 << 32;
94        let max_size_in_pages = max_size_in_bytes / page_size;
95        let delta_to_max_size_in_pages = max_size_in_pages - current_size_in_pages;
96        let delta_pages_for_alloc = delta_pages_for_doubling.min(delta_to_max_size_in_pages);
97
98        // But always make sure we are attempting to grow at least as many pages
99        // as needed by the requested allocation. This must happen *after* the
100        // max-size saturation, so that if we are at the max already, we do not
101        // succeed in growing by zero delta pages, and then return successfully
102        // to our caller, who would be assuming that there is now capacity for
103        // their allocation.
104        let pages_needed = bytes_needed.div_ceil(page_size);
105        assert!(pages_needed > 0);
106        let delta_pages_for_alloc = delta_pages_for_alloc.max(pages_needed);
107        assert!(delta_pages_for_alloc > 0);
108
109        // Safety: we pair growing the GC heap with updating its associated
110        // `VMMemoryDefinition` in the `VMStoreContext` immediately
111        // afterwards.
112        unsafe {
113            heap.memory
114                .grow(delta_pages_for_alloc, limiter)
115                .await?
116                .ok_or_else(|| anyhow!("failed to grow GC heap"))?;
117        }
118        heap.store.vm_store_context.gc_heap = heap.memory.vmmemory();
119
120        let new_size_in_bytes = u64::try_from(heap.memory.byte_size()).unwrap();
121        assert!(new_size_in_bytes > current_size_in_bytes);
122        heap.delta_bytes_grown = new_size_in_bytes - current_size_in_bytes;
123        let delta_bytes_for_alloc = delta_pages_for_alloc.checked_mul(page_size).unwrap();
124        assert!(
125            heap.delta_bytes_grown >= delta_bytes_for_alloc,
126            "{} should be greater than or equal to {delta_bytes_for_alloc}",
127            heap.delta_bytes_grown,
128        );
129        return Ok(());
130
131        struct TakenGcHeap<'a> {
132            store: &'a mut StoreOpaque,
133            memory: ManuallyDrop<vm::Memory>,
134            delta_bytes_grown: u64,
135        }
136
137        impl<'a> TakenGcHeap<'a> {
138            fn new(store: &'a mut StoreOpaque) -> TakenGcHeap<'a> {
139                TakenGcHeap {
140                    memory: ManuallyDrop::new(store.unwrap_gc_store_mut().gc_heap.take_memory()),
141                    store,
142                    delta_bytes_grown: 0,
143                }
144            }
145        }
146
147        impl Drop for TakenGcHeap<'_> {
148            fn drop(&mut self) {
149                // SAFETY: this `Drop` guard ensures that this has exclusive
150                // ownership of fields and is thus safe to take `self.memory`.
151                // Additionally for `replace_memory` the memory was previously
152                // taken when this was created so it should be safe to place
153                // back inside the GC heap.
154                unsafe {
155                    self.store.unwrap_gc_store_mut().gc_heap.replace_memory(
156                        ManuallyDrop::take(&mut self.memory),
157                        self.delta_bytes_grown,
158                    );
159                }
160            }
161        }
162    }
163
164    /// Attempt an allocation, if it fails due to GC OOM, then do a GC and
165    /// retry.
166    pub(crate) async fn retry_after_gc_async<T, U>(
167        &mut self,
168        mut limiter: Option<&mut StoreResourceLimiter<'_>>,
169        value: T,
170        alloc_func: impl Fn(&mut Self, T) -> Result<U>,
171    ) -> Result<U>
172    where
173        T: Send + Sync + 'static,
174    {
175        self.ensure_gc_store(limiter.as_deref_mut()).await?;
176        match alloc_func(self, value) {
177            Ok(x) => Ok(x),
178            Err(e) => match e.downcast::<crate::GcHeapOutOfMemory<T>>() {
179                Ok(oom) => {
180                    let (value, oom) = oom.take_inner();
181                    self.gc(limiter, None, Some(oom.bytes_needed())).await;
182                    alloc_func(self, value)
183                }
184                Err(e) => Err(e),
185            },
186        }
187    }
188}