wasmtime/runtime/vm/
mmap.rs

1//! Low-level abstraction for allocating and managing zero-filled pages
2//! of memory.
3
4use super::HostAlignedByteCount;
5use crate::prelude::*;
6use crate::runtime::vm::sys::{mmap, vm::MemoryImageSource};
7use alloc::sync::Arc;
8use core::ops::Range;
9use core::ptr::NonNull;
10#[cfg(feature = "std")]
11use std::fs::File;
12
13/// A marker type for an [`Mmap`] where both the start address and length are a
14/// multiple of the host page size.
15///
16/// For more information, see the documentation on [`Mmap`].
17#[derive(Clone, Debug)]
18pub struct AlignedLength {}
19
20/// A type of [`Mmap`] where the start address is host page-aligned, but the
21/// length is possibly not a multiple of the host page size.
22///
23/// For more information, see the documentation on [`Mmap`].
24#[derive(Clone, Debug)]
25pub struct UnalignedLength {
26    #[cfg(feature = "std")]
27    file: Option<Arc<File>>,
28}
29
30/// A platform-independent abstraction over memory-mapped data.
31///
32/// The type parameter can be one of:
33///
34/// * [`AlignedLength`]: Both the start address and length are page-aligned
35/// (i.e. a multiple of the host page size). This is always the result of an
36/// mmap backed by anonymous memory.
37///
38/// * [`UnalignedLength`]: The start address is host page-aligned, but the
39/// length is not necessarily page-aligned. This is usually backed by a file,
40/// but can also be backed by anonymous memory.
41///
42/// ## Notes
43///
44/// If the length of a file is not a multiple of the host page size, [POSIX does
45/// not specify any semantics][posix-mmap] for the rest of the last page. Linux
46/// [does say][linux-mmap] that the rest of the page is reserved and zeroed out,
47/// but for portability it's best to not assume anything about the rest of
48/// memory. `UnalignedLength` achieves a type-level distinction between an mmap
49/// that is backed purely by memory, and one that is possibly backed by a file.
50///
51/// Currently, the OS-specific `mmap` implementations in this crate do not make
52/// this this distinction -- alignment is managed at this platform-independent
53/// layer. It might make sense to add this distinction to the OS-specific
54/// implementations in the future.
55///
56/// [posix-mmap]: https://pubs.opengroup.org/onlinepubs/9799919799/functions/mmap.html
57/// [linux-mmap]: https://man7.org/linux/man-pages/man2/mmap.2.html#NOTES
58#[derive(Debug)]
59pub struct Mmap<T> {
60    sys: mmap::Mmap,
61    data: T,
62}
63
64impl Mmap<AlignedLength> {
65    /// Create a new `Mmap` pointing to at least `size` bytes of page-aligned
66    /// accessible memory.
67    pub fn with_at_least(size: usize) -> Result<Self> {
68        let rounded_size = HostAlignedByteCount::new_rounded_up(size)?;
69        Self::accessible_reserved(rounded_size, rounded_size)
70    }
71
72    /// Create a new `Mmap` pointing to `accessible_size` bytes of page-aligned
73    /// accessible memory, within a reserved mapping of `mapping_size` bytes.
74    /// `accessible_size` and `mapping_size` must be native page-size multiples.
75    ///
76    /// # Panics
77    ///
78    /// This function will panic if `accessible_size` is greater than
79    /// `mapping_size`.
80    pub fn accessible_reserved(
81        accessible_size: HostAlignedByteCount,
82        mapping_size: HostAlignedByteCount,
83    ) -> Result<Self> {
84        assert!(accessible_size <= mapping_size);
85
86        if mapping_size.is_zero() {
87            Ok(Mmap {
88                sys: mmap::Mmap::new_empty(),
89                data: AlignedLength {},
90            })
91        } else if accessible_size == mapping_size {
92            Ok(Mmap {
93                sys: mmap::Mmap::new(mapping_size)
94                    .context(format!("mmap failed to allocate {mapping_size:#x} bytes"))?,
95                data: AlignedLength {},
96            })
97        } else {
98            let result = Mmap {
99                sys: mmap::Mmap::reserve(mapping_size)
100                    .context(format!("mmap failed to reserve {mapping_size:#x} bytes"))?,
101                data: AlignedLength {},
102            };
103            if !accessible_size.is_zero() {
104                // SAFETY: result was just created and is not in use.
105                unsafe {
106                    result
107                        .make_accessible(HostAlignedByteCount::ZERO, accessible_size)
108                        .context(format!(
109                            "mmap failed to allocate {accessible_size:#x} bytes"
110                        ))?;
111                }
112            }
113            Ok(result)
114        }
115    }
116
117    /// Converts this `Mmap` into a `Mmap<UnalignedLength>`.
118    ///
119    /// `UnalignedLength` really means "_possibly_ unaligned length", so it can
120    /// be freely converted over at the cost of losing the alignment guarantee.
121    pub fn into_unaligned(self) -> Mmap<UnalignedLength> {
122        Mmap {
123            sys: self.sys,
124            data: UnalignedLength {
125                #[cfg(feature = "std")]
126                file: None,
127            },
128        }
129    }
130
131    /// Returns the length of the memory mapping as an aligned byte count.
132    pub fn len_aligned(&self) -> HostAlignedByteCount {
133        // SAFETY: The type parameter indicates that self.sys.len() is aligned.
134        unsafe { HostAlignedByteCount::new_unchecked(self.sys.len()) }
135    }
136
137    /// Return a struct representing a page-aligned offset into the mmap.
138    ///
139    /// Returns an error if `offset > self.len_aligned()`.
140    pub fn offset(self: &Arc<Self>, offset: HostAlignedByteCount) -> Result<MmapOffset> {
141        if offset > self.len_aligned() {
142            bail!(
143                "offset {} is not in bounds for mmap: {}",
144                offset,
145                self.len_aligned()
146            );
147        }
148
149        Ok(MmapOffset::new(self.clone(), offset))
150    }
151
152    /// Return an `MmapOffset` corresponding to zero bytes into the mmap.
153    pub fn zero_offset(self: &Arc<Self>) -> MmapOffset {
154        MmapOffset::new(self.clone(), HostAlignedByteCount::ZERO)
155    }
156
157    /// Make the memory starting at `start` and extending for `len` bytes
158    /// accessible. `start` and `len` must be native page-size multiples and
159    /// describe a range within `self`'s reserved memory.
160    ///
161    /// # Safety
162    ///
163    /// There must not be any other references to the region of memory being
164    /// made accessible.
165    ///
166    /// # Panics
167    ///
168    /// Panics if `start + len >= self.len()`.
169    pub unsafe fn make_accessible(
170        &self,
171        start: HostAlignedByteCount,
172        len: HostAlignedByteCount,
173    ) -> Result<()> {
174        if len.is_zero() {
175            // A zero-sized mprotect (or equivalent) is allowed on some
176            // platforms but not others (notably Windows). Treat it as a no-op
177            // everywhere.
178            return Ok(());
179        }
180
181        let end = start
182            .checked_add(len)
183            .expect("start + len must not overflow");
184        assert!(
185            end <= self.len_aligned(),
186            "start + len ({end}) must be <= mmap region {}",
187            self.len_aligned()
188        );
189
190        unsafe { self.sys.make_accessible(start, len) }
191    }
192}
193
194#[cfg(feature = "std")]
195impl Mmap<UnalignedLength> {
196    /// Creates a new `Mmap` by opening the file located at `path` and mapping
197    /// it into memory.
198    ///
199    /// The memory is mapped in read-only mode for the entire file. If portions
200    /// of the file need to be modified then the `region` crate can be use to
201    /// alter permissions of each page.
202    ///
203    /// The memory mapping and the length of the file within the mapping are
204    /// returned.
205    pub fn from_file(file: Arc<File>) -> Result<Self> {
206        let sys = mmap::Mmap::from_file(&file)?;
207        Ok(Mmap {
208            sys,
209            data: UnalignedLength { file: Some(file) },
210        })
211    }
212
213    /// Returns the underlying file that this mmap is mapping, if present.
214    pub fn original_file(&self) -> Option<&Arc<File>> {
215        self.data.file.as_ref()
216    }
217}
218
219impl<T> Mmap<T> {
220    /// Return the allocated memory as a slice of u8.
221    ///
222    /// # Safety
223    ///
224    /// The caller must ensure that the range of bytes is accessible to the
225    /// program and additionally has previously been initialized.
226    ///
227    /// # Panics
228    ///
229    /// Panics of the `range` provided is outside of the limits of this mmap.
230    #[inline]
231    pub unsafe fn slice(&self, range: Range<usize>) -> &[u8] {
232        assert!(range.start <= range.end);
233        assert!(range.end <= self.len());
234        unsafe {
235            core::slice::from_raw_parts(self.as_ptr().add(range.start), range.end - range.start)
236        }
237    }
238
239    /// Return the allocated memory as a mutable slice of u8.
240    ///
241    /// # Safety
242    ///
243    /// The caller must ensure that the range of bytes is accessible to the
244    /// program and additionally has previously been initialized.
245    ///
246    /// # Panics
247    ///
248    /// Panics of the `range` provided is outside of the limits of this mmap.
249    pub unsafe fn slice_mut(&mut self, range: Range<usize>) -> &mut [u8] {
250        assert!(range.start <= range.end);
251        assert!(range.end <= self.len());
252        unsafe {
253            core::slice::from_raw_parts_mut(
254                self.as_mut_ptr().add(range.start),
255                range.end - range.start,
256            )
257        }
258    }
259
260    /// Return the allocated memory as a pointer to u8.
261    #[inline]
262    pub fn as_ptr(&self) -> *const u8 {
263        self.sys.as_send_sync_ptr().as_ptr() as *const u8
264    }
265
266    /// Return the allocated memory as a mutable pointer to u8.
267    #[inline]
268    pub fn as_mut_ptr(&self) -> *mut u8 {
269        self.sys.as_send_sync_ptr().as_ptr()
270    }
271
272    /// Return the allocated memory as a mutable pointer to u8.
273    #[inline]
274    pub fn as_non_null(&self) -> NonNull<u8> {
275        self.sys.as_send_sync_ptr().as_non_null()
276    }
277
278    /// Return the length of the allocated memory.
279    ///
280    /// This is the byte length of this entire mapping which includes both
281    /// addressable and non-addressable memory.
282    ///
283    /// If the length is statically known to be page-aligned via the
284    /// [`AlignedLength`] type parameter, use [`Self::len_aligned`].
285    #[inline]
286    pub fn len(&self) -> usize {
287        self.sys.len()
288    }
289
290    /// Makes the specified `range` within this `Mmap` to be read/execute.
291    ///
292    /// # Unsafety
293    ///
294    /// This method is unsafe as it's generally not valid to simply make memory
295    /// executable, so it's up to the caller to ensure that everything is in
296    /// order and this doesn't overlap with other memory that should only be
297    /// read or only read/write.
298    ///
299    /// # Panics
300    ///
301    /// Panics of `range` is out-of-bounds or not page-aligned.
302    pub unsafe fn make_executable(
303        &self,
304        range: Range<usize>,
305        enable_branch_protection: bool,
306    ) -> Result<()> {
307        assert!(range.start <= self.len());
308        assert!(range.end <= self.len());
309        assert!(range.start <= range.end);
310        assert!(
311            range.start % crate::runtime::vm::host_page_size() == 0,
312            "changing of protections isn't page-aligned",
313        );
314
315        if range.start == range.end {
316            // A zero-sized mprotect (or equivalent) is allowed on some
317            // platforms but not others (notably Windows). Treat it as a no-op
318            // everywhere.
319            return Ok(());
320        }
321
322        unsafe {
323            self.sys
324                .make_executable(range, enable_branch_protection)
325                .context("failed to make memory executable")
326        }
327    }
328
329    /// Makes the specified `range` within this `Mmap` to be readonly.
330    pub unsafe fn make_readonly(&self, range: Range<usize>) -> Result<()> {
331        assert!(range.start <= self.len());
332        assert!(range.end <= self.len());
333        assert!(range.start <= range.end);
334        assert!(
335            range.start % crate::runtime::vm::host_page_size() == 0,
336            "changing of protections isn't page-aligned",
337        );
338
339        if range.start == range.end {
340            // A zero-sized mprotect (or equivalent) is allowed on some
341            // platforms but not others (notably Windows). Treat it as a no-op
342            // everywhere.
343            return Ok(());
344        }
345
346        unsafe {
347            self.sys
348                .make_readonly(range)
349                .context("failed to make memory readonly")
350        }
351    }
352}
353
354fn _assert() {
355    fn _assert_send_sync<T: Send + Sync>() {}
356    _assert_send_sync::<Mmap<AlignedLength>>();
357    _assert_send_sync::<Mmap<UnalignedLength>>();
358}
359
360impl From<Mmap<AlignedLength>> for Mmap<UnalignedLength> {
361    fn from(mmap: Mmap<AlignedLength>) -> Mmap<UnalignedLength> {
362        mmap.into_unaligned()
363    }
364}
365
366/// A reference to an [`Mmap`], along with a host-page-aligned index within it.
367///
368/// The main invariant this type asserts is that the index is in bounds within
369/// the `Mmap` (i.e. `self.mmap[self.offset]` is valid). In the future, this
370/// type may also assert other invariants.
371#[derive(Clone, Debug)]
372pub struct MmapOffset {
373    mmap: Arc<Mmap<AlignedLength>>,
374    offset: HostAlignedByteCount,
375}
376
377impl MmapOffset {
378    #[inline]
379    fn new(mmap: Arc<Mmap<AlignedLength>>, offset: HostAlignedByteCount) -> Self {
380        assert!(
381            offset <= mmap.len_aligned(),
382            "offset {} is in bounds (< {})",
383            offset,
384            mmap.len_aligned(),
385        );
386        Self { mmap, offset }
387    }
388
389    /// Returns the mmap this offset is within.
390    #[inline]
391    pub fn mmap(&self) -> &Arc<Mmap<AlignedLength>> {
392        &self.mmap
393    }
394
395    /// Returns the host-page-aligned offset within the mmap.
396    #[inline]
397    pub fn offset(&self) -> HostAlignedByteCount {
398        self.offset
399    }
400
401    /// Returns the raw pointer in memory represented by this offset.
402    #[inline]
403    pub fn as_mut_ptr(&self) -> *mut u8 {
404        self.as_non_null().as_ptr()
405    }
406
407    /// Returns the raw pointer in memory represented by this offset.
408    #[inline]
409    pub fn as_non_null(&self) -> NonNull<u8> {
410        // SAFETY: constructor checks that offset is within this allocation.
411        unsafe { self.mmap().as_non_null().byte_add(self.offset.byte_count()) }
412    }
413
414    /// Maps an image into the mmap with read/write permissions.
415    ///
416    /// The image is mapped at `self.mmap.as_ptr() + self.offset +
417    /// memory_offset`.
418    ///
419    /// ## Safety
420    ///
421    /// The caller must ensure that no one else has a reference to this memory.
422    pub unsafe fn map_image_at(
423        &self,
424        image_source: &MemoryImageSource,
425        source_offset: u64,
426        memory_offset: HostAlignedByteCount,
427        memory_len: HostAlignedByteCount,
428    ) -> Result<()> {
429        let total_offset = self
430            .offset
431            .checked_add(memory_offset)
432            .expect("self.offset + memory_offset is in bounds");
433        unsafe {
434            self.mmap
435                .sys
436                .map_image_at(image_source, source_offset, total_offset, memory_len)
437        }
438    }
439}
440
441#[cfg(test)]
442mod tests {
443    use super::*;
444
445    /// Test zero-length calls to mprotect (or the OS equivalent).
446    ///
447    /// These should be treated as no-ops on all platforms. This test ensures
448    /// that such calls at least don't error out.
449    #[test]
450    fn mprotect_zero_length() {
451        let page_size = HostAlignedByteCount::host_page_size();
452        let pagex2 = page_size.checked_mul(2).unwrap();
453        let pagex3 = page_size.checked_mul(3).unwrap();
454        let pagex4 = page_size.checked_mul(4).unwrap();
455
456        let mem = Mmap::accessible_reserved(pagex2, pagex4).expect("allocated memory");
457
458        unsafe {
459            mem.make_accessible(pagex3, HostAlignedByteCount::ZERO)
460                .expect("make_accessible succeeded");
461
462            mem.make_executable(pagex3.byte_count()..pagex3.byte_count(), false)
463                .expect("make_executable succeeded");
464
465            mem.make_readonly(pagex3.byte_count()..pagex3.byte_count())
466                .expect("make_readonly succeeded");
467        };
468    }
469}