wasmtime/runtime/vm/instance/allocator/pooling/
generic_stack_pool.rs1#![cfg_attr(
2 all(unix, not(miri), not(asan)),
3 expect(dead_code, reason = "not used, but typechecked")
4)]
5
6use crate::PoolConcurrencyLimitError;
7use crate::prelude::*;
8use crate::runtime::vm::PoolingInstanceAllocatorConfig;
9use std::sync::atomic::{AtomicU64, Ordering};
10
11#[derive(Debug)]
23pub struct StackPool {
24 stack_size: usize,
25 stack_zeroing: bool,
26 live_stacks: AtomicU64,
27 stack_limit: u64,
28}
29
30impl StackPool {
31 pub fn new(config: &PoolingInstanceAllocatorConfig) -> Result<Self> {
32 Ok(StackPool {
33 stack_size: config.stack_size,
34 stack_zeroing: config.async_stack_zeroing,
35 live_stacks: AtomicU64::new(0),
36 stack_limit: config.limits.total_stacks.into(),
37 })
38 }
39
40 pub fn is_empty(&self) -> bool {
41 self.live_stacks.load(Ordering::Acquire) == 0
42 }
43
44 pub fn allocate(&self) -> Result<wasmtime_fiber::FiberStack> {
45 if self.stack_size == 0 {
46 bail!("fiber stack allocation not supported")
47 }
48
49 let old_count = self.live_stacks.fetch_add(1, Ordering::AcqRel);
50 if old_count >= self.stack_limit {
51 self.live_stacks.fetch_sub(1, Ordering::AcqRel);
52 return Err(PoolConcurrencyLimitError::new(
53 usize::try_from(self.stack_limit).unwrap(),
54 "fibers",
55 )
56 .into());
57 }
58
59 match wasmtime_fiber::FiberStack::new(self.stack_size, self.stack_zeroing) {
60 Ok(stack) => Ok(stack),
61 Err(e) => {
62 self.live_stacks.fetch_sub(1, Ordering::AcqRel);
63 Err(anyhow::Error::from(e))
64 }
65 }
66 }
67
68 pub unsafe fn zero_stack(
69 &self,
70 _stack: &mut wasmtime_fiber::FiberStack,
71 _decommit: impl FnMut(*mut u8, usize),
72 ) {
73 }
76
77 pub unsafe fn deallocate(&self, stack: wasmtime_fiber::FiberStack) {
79 self.live_stacks.fetch_sub(1, Ordering::AcqRel);
80 let _ = stack;
82 }
83}