wasmtime/runtime/vm/instance/allocator/pooling/
unix_stack_pool.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
#![cfg_attr(asan, allow(dead_code))]

use super::index_allocator::{SimpleIndexAllocator, SlotId};
use crate::prelude::*;
use crate::runtime::vm::sys::vm::commit_pages;
use crate::runtime::vm::{
    mmap::AlignedLength, HostAlignedByteCount, Mmap, PoolingInstanceAllocatorConfig,
};

/// Represents a pool of execution stacks (used for the async fiber implementation).
///
/// Each index into the pool represents a single execution stack. The maximum number of
/// stacks is the same as the maximum number of instances.
///
/// As stacks grow downwards, each stack starts (lowest address) with a guard page
/// that can be used to detect stack overflow.
///
/// The top of the stack (starting stack pointer) is returned when a stack is allocated
/// from the pool.
#[derive(Debug)]
pub struct StackPool {
    mapping: Mmap<AlignedLength>,
    stack_size: HostAlignedByteCount,
    max_stacks: usize,
    page_size: HostAlignedByteCount,
    index_allocator: SimpleIndexAllocator,
    async_stack_zeroing: bool,
    async_stack_keep_resident: HostAlignedByteCount,
}

impl StackPool {
    pub fn new(config: &PoolingInstanceAllocatorConfig) -> Result<Self> {
        use rustix::mm::{mprotect, MprotectFlags};

        let page_size = HostAlignedByteCount::host_page_size();

        // Add a page to the stack size for the guard page when using fiber stacks
        let stack_size = if config.stack_size == 0 {
            HostAlignedByteCount::ZERO
        } else {
            HostAlignedByteCount::new_rounded_up(config.stack_size)
                .and_then(|size| size.checked_add(HostAlignedByteCount::host_page_size()))
                .context("stack size exceeds addressable memory")?
        };

        let max_stacks = usize::try_from(config.limits.total_stacks).unwrap();

        let allocation_size = stack_size
            .checked_mul(max_stacks)
            .context("total size of execution stacks exceeds addressable memory")?;

        let mapping = Mmap::accessible_reserved(allocation_size, allocation_size)
            .context("failed to create stack pool mapping")?;

        // Set up the stack guard pages.
        if !allocation_size.is_zero() {
            unsafe {
                for i in 0..max_stacks {
                    // Safety: i < max_stacks and we've already checked that
                    // stack_size * max_stacks is valid.
                    let offset = stack_size.unchecked_mul(i);
                    // Make the stack guard page inaccessible.
                    let bottom_of_stack = mapping.as_ptr().add(offset.byte_count()).cast_mut();
                    mprotect(
                        bottom_of_stack.cast(),
                        page_size.byte_count(),
                        MprotectFlags::empty(),
                    )
                    .context("failed to protect stack guard page")?;
                }
            }
        }

        Ok(Self {
            mapping,
            stack_size,
            max_stacks,
            page_size,
            async_stack_zeroing: config.async_stack_zeroing,
            async_stack_keep_resident: HostAlignedByteCount::new_rounded_up(
                config.async_stack_keep_resident,
            )?,
            index_allocator: SimpleIndexAllocator::new(config.limits.total_stacks),
        })
    }

    /// Are there zero slots in use right now?
    #[allow(unused)] // some cfgs don't use this
    pub fn is_empty(&self) -> bool {
        self.index_allocator.is_empty()
    }

    /// Allocate a new fiber.
    pub fn allocate(&self) -> Result<wasmtime_fiber::FiberStack> {
        if self.stack_size.is_zero() {
            bail!("pooling allocator not configured to enable fiber stack allocation");
        }

        let index = self
            .index_allocator
            .alloc()
            .ok_or_else(|| super::PoolConcurrencyLimitError::new(self.max_stacks, "fibers"))?
            .index();

        assert!(index < self.max_stacks);

        unsafe {
            // Remove the guard page from the size
            let size_without_guard = self.stack_size.checked_sub(self.page_size).expect(
                "self.stack_size is host-page-aligned and is > 0,\
                 so it must be >= self.page_size",
            );

            let bottom_of_stack = self
                .mapping
                .as_ptr()
                .add(self.stack_size.unchecked_mul(index).byte_count())
                .cast_mut();

            commit_pages(bottom_of_stack, size_without_guard.byte_count())?;

            let stack = wasmtime_fiber::FiberStack::from_raw_parts(
                bottom_of_stack,
                self.page_size.byte_count(),
                size_without_guard.byte_count(),
            )?;
            Ok(stack)
        }
    }

    /// Zero the given stack, if we are configured to do so.
    ///
    /// This will call the given `decommit` function for each region of memory
    /// that should be decommitted. It is the caller's responsibility to ensure
    /// that those decommits happen before this stack is reused.
    ///
    /// # Panics
    ///
    /// `zero_stack` panics if the passed in `stack` was not created by
    /// [`Self::allocate`].
    ///
    /// # Safety
    ///
    /// The stack must no longer be in use, and ready for returning to the pool
    /// after it is zeroed and decommitted.
    pub unsafe fn zero_stack(
        &self,
        stack: &mut wasmtime_fiber::FiberStack,
        mut decommit: impl FnMut(*mut u8, usize),
    ) {
        assert!(stack.is_from_raw_parts());
        assert!(
            !self.stack_size.is_zero(),
            "pooling allocator not configured to enable fiber stack allocation \
             (Self::allocate should have returned an error)"
        );

        if !self.async_stack_zeroing {
            return;
        }

        let top = stack
            .top()
            .expect("fiber stack not allocated from the pool") as usize;

        let base = self.mapping.as_ptr() as usize;
        let len = self.mapping.len();
        assert!(
            top > base && top <= (base + len),
            "fiber stack top pointer not in range"
        );

        // Remove the guard page from the size.
        let stack_size = self.stack_size.checked_sub(self.page_size).expect(
            "self.stack_size is host-page-aligned and is > 0,\
             so it must be >= self.page_size",
        );
        let bottom_of_stack = top - stack_size.byte_count();
        let start_of_stack = bottom_of_stack - self.page_size.byte_count();
        assert!(start_of_stack >= base && start_of_stack < (base + len));
        assert!((start_of_stack - base) % self.stack_size.byte_count() == 0);

        // Manually zero the top of the stack to keep the pages resident in
        // memory and avoid future page faults. Use the system to deallocate
        // pages past this. This hopefully strikes a reasonable balance between:
        //
        // * memset for the whole range is probably expensive
        // * madvise for the whole range incurs expensive future page faults
        // * most threads probably don't use most of the stack anyway
        let size_to_memset = stack_size.min(self.async_stack_keep_resident);
        let rest = stack_size
            .checked_sub(size_to_memset)
            .expect("stack_size >= size_to_memset");
        std::ptr::write_bytes(
            (bottom_of_stack + rest.byte_count()) as *mut u8,
            0,
            size_to_memset.byte_count(),
        );

        // Use the system to reset remaining stack pages to zero.
        decommit(bottom_of_stack as _, rest.byte_count());
    }

    /// Deallocate a previously-allocated fiber.
    ///
    /// # Safety
    ///
    /// The fiber must have been allocated by this pool, must be in an allocated
    /// state, and must never be used again.
    ///
    /// The caller must have already called `zero_stack` on the fiber stack and
    /// flushed any enqueued decommits for this stack's memory.
    pub unsafe fn deallocate(&self, stack: wasmtime_fiber::FiberStack) {
        assert!(stack.is_from_raw_parts());

        let top = stack
            .top()
            .expect("fiber stack not allocated from the pool") as usize;

        let base = self.mapping.as_ptr() as usize;
        let len = self.mapping.len();
        assert!(
            top > base && top <= (base + len),
            "fiber stack top pointer not in range"
        );

        // Remove the guard page from the size
        let stack_size = self.stack_size.byte_count() - self.page_size.byte_count();
        let bottom_of_stack = top - stack_size;
        let start_of_stack = bottom_of_stack - self.page_size.byte_count();
        assert!(start_of_stack >= base && start_of_stack < (base + len));
        assert!((start_of_stack - base) % self.stack_size.byte_count() == 0);

        let index = (start_of_stack - base) / self.stack_size.byte_count();
        assert!(index < self.max_stacks);
        let index = u32::try_from(index).unwrap();

        self.index_allocator.free(SlotId(index));
    }
}

#[cfg(all(test, unix, feature = "async", not(miri)))]
mod tests {
    use super::*;
    use crate::runtime::vm::InstanceLimits;

    #[test]
    fn test_stack_pool() -> Result<()> {
        let config = PoolingInstanceAllocatorConfig {
            limits: InstanceLimits {
                total_stacks: 10,
                ..Default::default()
            },
            stack_size: 1,
            async_stack_zeroing: true,
            ..PoolingInstanceAllocatorConfig::default()
        };
        let pool = StackPool::new(&config)?;

        let native_page_size = crate::runtime::vm::host_page_size();
        assert_eq!(pool.stack_size, 2 * native_page_size);
        assert_eq!(pool.max_stacks, 10);
        assert_eq!(pool.page_size, native_page_size);

        assert_eq!(pool.index_allocator.testing_freelist(), []);

        let base = pool.mapping.as_ptr() as usize;

        let mut stacks = Vec::new();
        for i in 0..10 {
            let stack = pool.allocate().expect("allocation should succeed");
            assert_eq!(
                ((stack.top().unwrap() as usize - base) / pool.stack_size.byte_count()) - 1,
                i
            );
            stacks.push(stack);
        }

        assert_eq!(pool.index_allocator.testing_freelist(), []);

        assert!(pool.allocate().is_err(), "allocation should fail");

        for stack in stacks {
            unsafe {
                pool.deallocate(stack);
            }
        }

        assert_eq!(
            pool.index_allocator.testing_freelist(),
            [
                SlotId(0),
                SlotId(1),
                SlotId(2),
                SlotId(3),
                SlotId(4),
                SlotId(5),
                SlotId(6),
                SlotId(7),
                SlotId(8),
                SlotId(9)
            ],
        );

        Ok(())
    }
}