rustix/backend/linux_raw/mm/
syscalls.rs

1//! linux_raw syscalls supporting `rustix::io`.
2//!
3//! # Safety
4//!
5//! See the `rustix::backend` module documentation for details.
6#![allow(unsafe_code)]
7#![allow(clippy::undocumented_unsafe_blocks)]
8
9use super::types::{
10    Advice, MapFlags, MlockAllFlags, MlockFlags, MprotectFlags, MremapFlags, MsyncFlags, ProtFlags,
11    UserfaultfdFlags,
12};
13use crate::backend::c;
14#[cfg(target_pointer_width = "64")]
15use crate::backend::conv::loff_t_from_u64;
16use crate::backend::conv::{c_uint, no_fd, pass_usize, ret, ret_owned_fd, ret_void_star};
17use crate::fd::{BorrowedFd, OwnedFd};
18use crate::ffi::c_void;
19use crate::io;
20use linux_raw_sys::general::{MAP_ANONYMOUS, MREMAP_FIXED};
21
22#[inline]
23pub(crate) fn madvise(addr: *mut c_void, len: usize, advice: Advice) -> io::Result<()> {
24    unsafe {
25        ret(syscall!(
26            __NR_madvise,
27            addr,
28            pass_usize(len),
29            c_uint(advice as c::c_uint)
30        ))
31    }
32}
33
34#[inline]
35pub(crate) unsafe fn msync(addr: *mut c_void, len: usize, flags: MsyncFlags) -> io::Result<()> {
36    ret(syscall!(__NR_msync, addr, pass_usize(len), flags))
37}
38
39/// # Safety
40///
41/// `mmap` is primarily unsafe due to the `addr` parameter, as anything working
42/// with memory pointed to by raw pointers is unsafe.
43#[inline]
44pub(crate) unsafe fn mmap(
45    addr: *mut c_void,
46    length: usize,
47    prot: ProtFlags,
48    flags: MapFlags,
49    fd: BorrowedFd<'_>,
50    offset: u64,
51) -> io::Result<*mut c_void> {
52    #[cfg(target_pointer_width = "32")]
53    {
54        ret_void_star(syscall!(
55            __NR_mmap2,
56            addr,
57            pass_usize(length),
58            prot,
59            flags,
60            fd,
61            (offset / 4096)
62                .try_into()
63                .map(pass_usize)
64                .map_err(|_| io::Errno::INVAL)?
65        ))
66    }
67    #[cfg(target_pointer_width = "64")]
68    {
69        ret_void_star(syscall!(
70            __NR_mmap,
71            addr,
72            pass_usize(length),
73            prot,
74            flags,
75            fd,
76            loff_t_from_u64(offset)
77        ))
78    }
79}
80
81/// # Safety
82///
83/// `mmap` is primarily unsafe due to the `addr` parameter, as anything working
84/// with memory pointed to by raw pointers is unsafe.
85#[inline]
86pub(crate) unsafe fn mmap_anonymous(
87    addr: *mut c_void,
88    length: usize,
89    prot: ProtFlags,
90    flags: MapFlags,
91) -> io::Result<*mut c_void> {
92    #[cfg(target_pointer_width = "32")]
93    {
94        ret_void_star(syscall!(
95            __NR_mmap2,
96            addr,
97            pass_usize(length),
98            prot,
99            c_uint(flags.bits() | MAP_ANONYMOUS),
100            no_fd(),
101            pass_usize(0)
102        ))
103    }
104    #[cfg(target_pointer_width = "64")]
105    {
106        ret_void_star(syscall!(
107            __NR_mmap,
108            addr,
109            pass_usize(length),
110            prot,
111            c_uint(flags.bits() | MAP_ANONYMOUS),
112            no_fd(),
113            loff_t_from_u64(0)
114        ))
115    }
116}
117
118#[inline]
119pub(crate) unsafe fn mprotect(
120    ptr: *mut c_void,
121    len: usize,
122    flags: MprotectFlags,
123) -> io::Result<()> {
124    ret(syscall!(__NR_mprotect, ptr, pass_usize(len), flags))
125}
126
127/// # Safety
128///
129/// `munmap` is primarily unsafe due to the `addr` parameter, as anything
130/// working with memory pointed to by raw pointers is unsafe.
131#[inline]
132pub(crate) unsafe fn munmap(addr: *mut c_void, length: usize) -> io::Result<()> {
133    ret(syscall!(__NR_munmap, addr, pass_usize(length)))
134}
135
136/// # Safety
137///
138/// `mremap` is primarily unsafe due to the `old_address` parameter, as
139/// anything working with memory pointed to by raw pointers is unsafe.
140#[inline]
141pub(crate) unsafe fn mremap(
142    old_address: *mut c_void,
143    old_size: usize,
144    new_size: usize,
145    flags: MremapFlags,
146) -> io::Result<*mut c_void> {
147    ret_void_star(syscall!(
148        __NR_mremap,
149        old_address,
150        pass_usize(old_size),
151        pass_usize(new_size),
152        flags
153    ))
154}
155
156/// # Safety
157///
158/// `mremap_fixed` is primarily unsafe due to the `old_address` and
159/// `new_address` parameters, as anything working with memory pointed to by raw
160/// pointers is unsafe.
161#[inline]
162pub(crate) unsafe fn mremap_fixed(
163    old_address: *mut c_void,
164    old_size: usize,
165    new_size: usize,
166    flags: MremapFlags,
167    new_address: *mut c_void,
168) -> io::Result<*mut c_void> {
169    ret_void_star(syscall!(
170        __NR_mremap,
171        old_address,
172        pass_usize(old_size),
173        pass_usize(new_size),
174        c_uint(flags.bits() | MREMAP_FIXED),
175        new_address
176    ))
177}
178
179/// # Safety
180///
181/// `mlock` operates on raw pointers and may round out to the nearest page
182/// boundaries.
183#[inline]
184pub(crate) unsafe fn mlock(addr: *mut c_void, length: usize) -> io::Result<()> {
185    ret(syscall!(__NR_mlock, addr, pass_usize(length)))
186}
187
188/// # Safety
189///
190/// `mlock_with` operates on raw pointers and may round out to the nearest page
191/// boundaries.
192#[inline]
193pub(crate) unsafe fn mlock_with(
194    addr: *mut c_void,
195    length: usize,
196    flags: MlockFlags,
197) -> io::Result<()> {
198    ret(syscall!(__NR_mlock2, addr, pass_usize(length), flags))
199}
200
201/// # Safety
202///
203/// `munlock` operates on raw pointers and may round out to the nearest page
204/// boundaries.
205#[inline]
206pub(crate) unsafe fn munlock(addr: *mut c_void, length: usize) -> io::Result<()> {
207    ret(syscall!(__NR_munlock, addr, pass_usize(length)))
208}
209
210#[inline]
211pub(crate) unsafe fn userfaultfd(flags: UserfaultfdFlags) -> io::Result<OwnedFd> {
212    ret_owned_fd(syscall_readonly!(__NR_userfaultfd, flags))
213}
214
215/// Locks all pages mapped into the address space of the calling process.
216///
217/// This includes the pages of the code, data, and stack segment, as well as
218/// shared libraries, user space kernel data, shared memory, and memory-mapped
219/// files. All mapped pages are guaranteed to be resident in RAM when the call
220/// returns successfully; the pages are guaranteed to stay in RAM until later
221/// unlocked.
222#[inline]
223pub(crate) fn mlockall(flags: MlockAllFlags) -> io::Result<()> {
224    // When `mlockall` is used with `MCL_ONFAULT | MCL_FUTURE`, the ordering
225    // of `mlockall` with respect to arbitrary loads may be significant,
226    // because if a load happens and evokes a fault before the `mlockall`,
227    // the memory doesn't get locked, but if the load and therefore
228    // the fault happens after, then the memory does get locked.
229    //
230    // So to be conservative in this regard, we use `syscall` instead of
231    // `syscall_readonly`
232    unsafe { ret(syscall!(__NR_mlockall, flags)) }
233}
234
235/// Unlocks all pages mapped into the address space of the calling process.
236#[inline]
237pub(crate) fn munlockall() -> io::Result<()> {
238    unsafe { ret(syscall_readonly!(__NR_munlockall)) }
239}