1use super::sys::DecommitBehavior;
5use crate::Engine;
6use crate::prelude::*;
7use crate::runtime::vm::sys::vm::{self, MemoryImageSource, PageMap, reset_with_pagemap};
8use crate::runtime::vm::{
9 HostAlignedByteCount, MmapOffset, ModuleMemoryImageSource, host_page_size,
10};
11use alloc::sync::Arc;
12use core::fmt;
13use core::ops::Range;
14use wasmtime_environ::{DefinedMemoryIndex, MemoryInitialization, Module, PrimaryMap, Tunables};
15
16pub struct ModuleMemoryImages {
21 memories: PrimaryMap<DefinedMemoryIndex, Option<Arc<MemoryImage>>>,
22}
23
24impl ModuleMemoryImages {
25 pub fn get_memory_image(&self, defined_index: DefinedMemoryIndex) -> Option<&Arc<MemoryImage>> {
27 self.memories[defined_index].as_ref()
28 }
29}
30
31pub struct MemoryImage {
33 source: MemoryImageSource,
39
40 len: HostAlignedByteCount,
47
48 source_offset: u64,
60
61 linear_memory_offset: HostAlignedByteCount,
65
66 module_source: Arc<dyn ModuleMemoryImageSource>,
68
69 module_source_offset: usize,
72}
73
74impl MemoryImage {
75 fn new(
76 engine: &Engine,
77 page_size: u32,
78 linear_memory_offset: HostAlignedByteCount,
79 module_source: &Arc<impl ModuleMemoryImageSource>,
80 data_range: Range<usize>,
81 ) -> Result<Option<MemoryImage>> {
82 let assert_page_aligned = |val: usize| {
83 assert_eq!(val % (page_size as usize), 0);
84 };
85 let len =
87 HostAlignedByteCount::new(data_range.len()).expect("memory image data is page-aligned");
88
89 let data = &module_source.wasm_data()[data_range.clone()];
104 if !engine.config().force_memory_init_memfd {
105 if let Some(mmap) = module_source.mmap() {
106 let start = mmap.as_ptr() as usize;
107 let end = start + mmap.len();
108 let data_start = data.as_ptr() as usize;
109 let data_end = data_start + data.len();
110 assert!(start <= data_start && data_end <= end);
111 assert_page_aligned(start);
112 assert_page_aligned(data_start);
113 assert_page_aligned(data_end);
114
115 #[cfg(feature = "std")]
116 if let Some(file) = mmap.original_file() {
117 if let Some(source) = MemoryImageSource::from_file(file) {
118 return Ok(Some(MemoryImage {
119 source,
120 source_offset: u64::try_from(data_start - start).unwrap(),
121 linear_memory_offset,
122 len,
123 module_source: module_source.clone(),
124 module_source_offset: data_range.start,
125 }));
126 }
127 }
128 }
129 }
130
131 if let Some(source) = MemoryImageSource::from_data(data)? {
134 return Ok(Some(MemoryImage {
135 source,
136 source_offset: 0,
137 linear_memory_offset,
138 len,
139 module_source: module_source.clone(),
140 module_source_offset: data_range.start,
141 }));
142 }
143
144 Ok(None)
145 }
146
147 unsafe fn map_at(&self, mmap_base: &MmapOffset) -> Result<()> {
148 unsafe {
149 mmap_base.map_image_at(
150 &self.source,
151 self.source_offset,
152 self.linear_memory_offset,
153 self.len,
154 )
155 }
156 }
157
158 unsafe fn remap_as_zeros_at(&self, base: *mut u8) -> Result<()> {
159 unsafe {
160 self.source.remap_as_zeros_at(
161 base.add(self.linear_memory_offset.byte_count()),
162 self.len.byte_count(),
163 )?;
164 }
165 Ok(())
166 }
167}
168
169impl ModuleMemoryImages {
170 pub fn new(
174 engine: &Engine,
175 module: &Module,
176 source: &Arc<impl ModuleMemoryImageSource>,
177 ) -> Result<Option<ModuleMemoryImages>> {
178 let map = match &module.memory_initialization {
179 MemoryInitialization::Static { map } => map,
180 _ => return Ok(None),
181 };
182 let mut memories = PrimaryMap::with_capacity(map.len());
183 let page_size = crate::runtime::vm::host_page_size();
184 let page_size = u32::try_from(page_size).unwrap();
185 for (memory_index, init) in map {
186 let defined_memory = match module.defined_memory_index(memory_index) {
190 Some(idx) => idx,
191 None => return Ok(None),
192 };
193
194 let init = match init {
197 Some(init) => init,
198 None => {
199 memories.push(None);
200 continue;
201 }
202 };
203
204 let data_range = init.data.start as usize..init.data.end as usize;
205 if module.memories[memory_index]
206 .minimum_byte_size()
207 .map_or(false, |mem_initial_len| {
208 init.offset + u64::try_from(data_range.len()).unwrap() > mem_initial_len
209 })
210 {
211 return Ok(None);
220 }
221
222 let offset_usize = match usize::try_from(init.offset) {
223 Ok(offset) => offset,
224 Err(_) => return Ok(None),
225 };
226 let offset = HostAlignedByteCount::new(offset_usize)
227 .expect("memory init offset is a multiple of the host page size");
228
229 let image = match MemoryImage::new(engine, page_size, offset, source, data_range)? {
232 Some(image) => image,
233 None => return Ok(None),
234 };
235
236 let idx = memories.push(Some(Arc::new(image)));
237 assert_eq!(idx, defined_memory);
238 }
239
240 Ok(Some(ModuleMemoryImages { memories }))
241 }
242}
243
244pub struct MemoryImageSlot {
301 base: MmapOffset,
304
305 static_size: usize,
307
308 image: Option<Arc<MemoryImage>>,
313
314 accessible: HostAlignedByteCount,
323
324 dirty: bool,
336}
337
338impl fmt::Debug for MemoryImageSlot {
339 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
340 f.debug_struct("MemoryImageSlot")
341 .field("base", &self.base)
342 .field("static_size", &self.static_size)
343 .field("accessible", &self.accessible)
344 .field("dirty", &self.dirty)
345 .finish_non_exhaustive()
346 }
347}
348
349impl MemoryImageSlot {
350 pub(crate) fn create(
359 base: MmapOffset,
360 accessible: HostAlignedByteCount,
361 static_size: usize,
362 ) -> Self {
363 MemoryImageSlot {
364 base,
365 static_size,
366 accessible,
367 image: None,
368 dirty: false,
369 }
370 }
371
372 pub(crate) fn set_heap_limit(&mut self, size_bytes: usize) -> Result<()> {
373 let size_bytes_aligned = HostAlignedByteCount::new_rounded_up(size_bytes)?;
374 assert!(size_bytes <= self.static_size);
375 assert!(size_bytes_aligned.byte_count() <= self.static_size);
376
377 if size_bytes_aligned <= self.accessible {
384 return Ok(());
385 }
386
387 self.set_protection(self.accessible..size_bytes_aligned, true)?;
389 self.accessible = size_bytes_aligned;
390
391 Ok(())
392 }
393
394 pub(crate) fn instantiate(
414 &mut self,
415 initial_size_bytes: usize,
416 maybe_image: Option<&Arc<MemoryImage>>,
417 ty: &wasmtime_environ::Memory,
418 tunables: &Tunables,
419 ) -> Result<()> {
420 assert!(!self.dirty);
421 assert!(
422 initial_size_bytes <= self.static_size,
423 "initial_size_bytes <= self.static_size failed: \
424 initial_size_bytes={initial_size_bytes}, self.static_size={}",
425 self.static_size
426 );
427 let initial_size_bytes_page_aligned =
428 HostAlignedByteCount::new_rounded_up(initial_size_bytes)?;
429
430 let images_equal = match (self.image.as_ref(), maybe_image) {
439 (Some(a), Some(b)) if Arc::ptr_eq(a, b) => true,
440 (None, None) => true,
441 _ => false,
442 };
443 if !images_equal {
444 self.remove_image()?;
445 }
446
447 if self.accessible < initial_size_bytes_page_aligned {
451 self.set_protection(self.accessible..initial_size_bytes_page_aligned, true)?;
452 self.accessible = initial_size_bytes_page_aligned;
453 }
454
455 let host_page_size_log2 = u8::try_from(host_page_size().ilog2()).unwrap();
461 if initial_size_bytes_page_aligned < self.accessible
462 && (tunables.memory_guard_size > 0
463 || ty.can_elide_bounds_check(tunables, host_page_size_log2))
464 {
465 self.set_protection(initial_size_bytes_page_aligned..self.accessible, false)?;
466 self.accessible = initial_size_bytes_page_aligned;
467 }
468
469 assert!(initial_size_bytes <= self.accessible.byte_count());
473 assert!(initial_size_bytes_page_aligned <= self.accessible);
474 if !images_equal {
475 if let Some(image) = maybe_image.as_ref() {
476 assert!(
477 image
478 .linear_memory_offset
479 .checked_add(image.len)
480 .unwrap()
481 .byte_count()
482 <= initial_size_bytes
483 );
484 if !image.len.is_zero() {
485 unsafe {
486 image.map_at(&self.base)?;
487 }
488 }
489 }
490 self.image = maybe_image.cloned();
491 }
492
493 self.dirty = true;
496
497 Ok(())
498 }
499
500 pub(crate) fn remove_image(&mut self) -> Result<()> {
501 if let Some(image) = &self.image {
502 unsafe {
503 image.remap_as_zeros_at(self.base.as_mut_ptr())?;
504 }
505 self.image = None;
506 }
507 Ok(())
508 }
509
510 #[allow(dead_code, reason = "only used in some cfgs")]
518 pub(crate) fn clear_and_remain_ready(
519 &mut self,
520 pagemap: Option<&PageMap>,
521 keep_resident: HostAlignedByteCount,
522 decommit: impl FnMut(*mut u8, usize),
523 ) -> Result<()> {
524 assert!(self.dirty);
525
526 unsafe {
527 self.reset_all_memory_contents(pagemap, keep_resident, decommit)?;
528 }
529
530 self.dirty = false;
531 Ok(())
532 }
533
534 #[allow(dead_code, reason = "only used in some cfgs")]
535 unsafe fn reset_all_memory_contents(
536 &mut self,
537 pagemap: Option<&PageMap>,
538 keep_resident: HostAlignedByteCount,
539 decommit: impl FnMut(*mut u8, usize),
540 ) -> Result<()> {
541 match vm::decommit_behavior() {
542 DecommitBehavior::Zero => {
543 self.reset_with_anon_memory()
550 }
551 DecommitBehavior::RestoreOriginalMapping => {
552 unsafe {
553 self.reset_with_original_mapping(pagemap, keep_resident, decommit);
554 }
555 Ok(())
556 }
557 }
558 }
559
560 #[allow(dead_code, reason = "only used in some cfgs")]
561 unsafe fn reset_with_original_mapping(
562 &mut self,
563 pagemap: Option<&PageMap>,
564 keep_resident: HostAlignedByteCount,
565 decommit: impl FnMut(*mut u8, usize),
566 ) {
567 assert_eq!(
568 vm::decommit_behavior(),
569 DecommitBehavior::RestoreOriginalMapping
570 );
571
572 unsafe {
573 match &self.image {
574 Some(image) => {
578 reset_with_pagemap(
579 pagemap,
580 self.base.as_mut_ptr(),
581 self.accessible,
582 keep_resident,
583 |region| {
584 manually_reset_region(self.base.as_mut_ptr().addr(), image, region)
585 },
586 decommit,
587 );
588 }
589
590 None => reset_with_pagemap(
593 pagemap,
594 self.base.as_mut_ptr(),
595 self.accessible,
596 keep_resident,
597 |region| region.fill(0),
598 decommit,
599 ),
600 }
601 }
602
603 fn manually_reset_region(base_addr: usize, image: &MemoryImage, mut region: &mut [u8]) {
614 let image_start = image.linear_memory_offset.byte_count();
615 let image_end = image_start + image.len.byte_count();
616 let mut region_start = region.as_ptr().addr() - base_addr;
617 let region_end = region_start + region.len();
618 let image_bytes = image.module_source.wasm_data();
619 let image_bytes = &image_bytes[image.module_source_offset..][..image.len.byte_count()];
620
621 if let Some(len_before_image) = image_start.checked_sub(region_start) {
623 let len = len_before_image.min(region.len());
624 let (a, b) = region.split_at_mut(len);
625 a.fill(0);
626 region = b;
627 region_start += len;
628
629 if region.is_empty() {
630 return;
631 }
632 }
633
634 debug_assert_eq!(region_end - region_start, region.len());
635 debug_assert!(region_start >= image_start);
636
637 if let Some(len_in_image) = image_end.checked_sub(region_start) {
640 let len = len_in_image.min(region.len());
641 let (a, b) = region.split_at_mut(len);
642 a.copy_from_slice(&image_bytes[region_start - image_start..][..len]);
643 region = b;
644 region_start += len;
645
646 if region.is_empty() {
647 return;
648 }
649 }
650
651 debug_assert_eq!(region_end - region_start, region.len());
652 debug_assert!(region_start >= image_end);
653
654 region.fill(0);
656 }
657 }
658
659 fn set_protection(&self, range: Range<HostAlignedByteCount>, readwrite: bool) -> Result<()> {
660 let len = range
661 .end
662 .checked_sub(range.start)
663 .expect("range.start <= range.end");
664 assert!(range.end.byte_count() <= self.static_size);
665 if len.is_zero() {
666 return Ok(());
667 }
668
669 unsafe {
672 let start = self.base.as_mut_ptr().add(range.start.byte_count());
673 if readwrite {
674 vm::expose_existing_mapping(start, len.byte_count())?;
675 } else {
676 vm::hide_existing_mapping(start, len.byte_count())?;
677 }
678 }
679
680 Ok(())
681 }
682
683 pub(crate) fn has_image(&self) -> bool {
684 self.image.is_some()
685 }
686
687 #[allow(dead_code, reason = "only used in some cfgs")]
688 pub(crate) fn is_dirty(&self) -> bool {
689 self.dirty
690 }
691
692 pub(crate) fn reset_with_anon_memory(&mut self) -> Result<()> {
695 if self.static_size == 0 {
696 assert!(self.image.is_none());
697 assert_eq!(self.accessible, 0);
698 return Ok(());
699 }
700
701 unsafe {
702 vm::erase_existing_mapping(self.base.as_mut_ptr(), self.static_size)?;
703 }
704
705 self.image = None;
706 self.accessible = HostAlignedByteCount::ZERO;
707
708 Ok(())
709 }
710}
711
712#[cfg(all(test, target_os = "linux", not(miri)))]
713mod test {
714 use super::*;
715 use crate::runtime::vm::mmap::{AlignedLength, Mmap};
716 use crate::runtime::vm::sys::vm::decommit_pages;
717 use crate::runtime::vm::{HostAlignedByteCount, MmapVec, host_page_size};
718 use std::sync::Arc;
719 use wasmtime_environ::{IndexType, Limits, Memory};
720
721 fn create_memfd_with_data(offset: usize, data: &[u8]) -> Result<MemoryImage> {
722 let linear_memory_offset =
724 HostAlignedByteCount::new(offset).expect("offset is page-aligned");
725 let image_len = HostAlignedByteCount::new_rounded_up(data.len()).unwrap();
727
728 let mut source = TestDataSource {
729 data: vec![0; image_len.byte_count()],
730 };
731 source.data[..data.len()].copy_from_slice(data);
732
733 return Ok(MemoryImage {
734 source: MemoryImageSource::from_data(data)?.unwrap(),
735 len: image_len,
736 source_offset: 0,
737 linear_memory_offset,
738 module_source: Arc::new(source),
739 module_source_offset: 0,
740 });
741
742 struct TestDataSource {
743 data: Vec<u8>,
744 }
745
746 impl ModuleMemoryImageSource for TestDataSource {
747 fn wasm_data(&self) -> &[u8] {
748 &self.data
749 }
750 fn mmap(&self) -> Option<&MmapVec> {
751 None
752 }
753 }
754 }
755
756 fn dummy_memory() -> Memory {
757 Memory {
758 idx_type: IndexType::I32,
759 limits: Limits { min: 0, max: None },
760 shared: false,
761 page_size_log2: Memory::DEFAULT_PAGE_SIZE_LOG2,
762 }
763 }
764
765 fn mmap_4mib_inaccessible() -> Arc<Mmap<AlignedLength>> {
766 let four_mib = HostAlignedByteCount::new(4 << 20).expect("4 MiB is page aligned");
767 Arc::new(Mmap::accessible_reserved(HostAlignedByteCount::ZERO, four_mib).unwrap())
768 }
769
770 unsafe fn with_slice_mut(
783 mmap: &Arc<Mmap<AlignedLength>>,
784 range: Range<usize>,
785 f: impl FnOnce(&mut [u8]) + 'static,
786 ) {
787 let ptr = mmap.as_ptr().cast_mut();
788 let slice = unsafe {
789 core::slice::from_raw_parts_mut(ptr.add(range.start), range.end - range.start)
790 };
791 f(slice);
792 }
793
794 #[test]
795 fn instantiate_no_image() {
796 let ty = dummy_memory();
797 let tunables = Tunables {
798 memory_reservation: 4 << 30,
799 ..Tunables::default_miri()
800 };
801 let mmap = mmap_4mib_inaccessible();
803 let mut memfd =
805 MemoryImageSlot::create(mmap.zero_offset(), HostAlignedByteCount::ZERO, 4 << 20);
806 assert!(!memfd.is_dirty());
807 memfd.instantiate(64 << 10, None, &ty, &tunables).unwrap();
809 assert!(memfd.is_dirty());
810
811 unsafe {
814 with_slice_mut(&mmap, 0..65536, |slice| {
815 assert_eq!(0, slice[0]);
816 assert_eq!(0, slice[65535]);
817 slice[1024] = 42;
818 assert_eq!(42, slice[1024]);
819 });
820 }
821
822 memfd.set_heap_limit(128 << 10).unwrap();
824 let slice = unsafe { mmap.slice(0..1 << 20) };
825 assert_eq!(42, slice[1024]);
826 assert_eq!(0, slice[131071]);
827 memfd
830 .clear_and_remain_ready(None, HostAlignedByteCount::ZERO, |ptr, len| unsafe {
831 decommit_pages(ptr, len).unwrap()
832 })
833 .unwrap();
834 assert!(!memfd.is_dirty());
835 memfd.instantiate(64 << 10, None, &ty, &tunables).unwrap();
836 let slice = unsafe { mmap.slice(0..65536) };
837 assert_eq!(0, slice[1024]);
838 }
839
840 #[test]
841 fn instantiate_image() {
842 let page_size = host_page_size();
843 let ty = dummy_memory();
844 let tunables = Tunables {
845 memory_reservation: 4 << 30,
846 ..Tunables::default_miri()
847 };
848 let mmap = mmap_4mib_inaccessible();
850 let mut memfd =
852 MemoryImageSlot::create(mmap.zero_offset(), HostAlignedByteCount::ZERO, 4 << 20);
853 let image = Arc::new(create_memfd_with_data(page_size, &[1, 2, 3, 4]).unwrap());
855 memfd
857 .instantiate(64 << 10, Some(&image), &ty, &tunables)
858 .unwrap();
859 assert!(memfd.has_image());
860
861 unsafe {
862 with_slice_mut(&mmap, 0..65536, move |slice| {
863 assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]);
864 slice[page_size] = 5;
865 });
866 }
867
868 memfd
870 .clear_and_remain_ready(None, HostAlignedByteCount::ZERO, |ptr, len| unsafe {
871 decommit_pages(ptr, len).unwrap()
872 })
873 .unwrap();
874 memfd
875 .instantiate(64 << 10, Some(&image), &ty, &tunables)
876 .unwrap();
877 let slice = unsafe { mmap.slice(0..65536) };
878 assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]);
879
880 memfd
882 .clear_and_remain_ready(None, HostAlignedByteCount::ZERO, |ptr, len| unsafe {
883 decommit_pages(ptr, len).unwrap()
884 })
885 .unwrap();
886 memfd.instantiate(64 << 10, None, &ty, &tunables).unwrap();
887 assert!(!memfd.has_image());
888 let slice = unsafe { mmap.slice(0..65536) };
889 assert_eq!(&[0, 0, 0, 0], &slice[page_size..][..4]);
890
891 memfd
893 .clear_and_remain_ready(None, HostAlignedByteCount::ZERO, |ptr, len| unsafe {
894 decommit_pages(ptr, len).unwrap()
895 })
896 .unwrap();
897 memfd
898 .instantiate(64 << 10, Some(&image), &ty, &tunables)
899 .unwrap();
900 let slice = unsafe { mmap.slice(0..65536) };
901 assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]);
902
903 let image2 = Arc::new(create_memfd_with_data(page_size, &[10, 11, 12, 13]).unwrap());
905 memfd
906 .clear_and_remain_ready(None, HostAlignedByteCount::ZERO, |ptr, len| unsafe {
907 decommit_pages(ptr, len).unwrap()
908 })
909 .unwrap();
910 memfd
911 .instantiate(128 << 10, Some(&image2), &ty, &tunables)
912 .unwrap();
913 let slice = unsafe { mmap.slice(0..65536) };
914 assert_eq!(&[10, 11, 12, 13], &slice[page_size..][..4]);
915
916 memfd
919 .clear_and_remain_ready(None, HostAlignedByteCount::ZERO, |ptr, len| unsafe {
920 decommit_pages(ptr, len).unwrap()
921 })
922 .unwrap();
923 memfd
924 .instantiate(64 << 10, Some(&image), &ty, &tunables)
925 .unwrap();
926 let slice = unsafe { mmap.slice(0..65536) };
927 assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]);
928 }
929
930 #[test]
931 #[cfg(target_os = "linux")]
932 fn memset_instead_of_madvise() {
933 let page_size = host_page_size();
934 let ty = dummy_memory();
935 let tunables = Tunables {
936 memory_reservation: 100 << 16,
937 ..Tunables::default_miri()
938 };
939 let mmap = mmap_4mib_inaccessible();
940 let mut memfd =
941 MemoryImageSlot::create(mmap.zero_offset(), HostAlignedByteCount::ZERO, 4 << 20);
942
943 for image_off in [0, page_size, page_size * 2] {
945 let image = Arc::new(create_memfd_with_data(image_off, &[1, 2, 3, 4]).unwrap());
946 for amt_to_memset in [0, page_size, page_size * 10, 1 << 20, 10 << 20] {
947 let amt_to_memset = HostAlignedByteCount::new(amt_to_memset).unwrap();
948 memfd
949 .instantiate(64 << 10, Some(&image), &ty, &tunables)
950 .unwrap();
951 assert!(memfd.has_image());
952
953 unsafe {
954 with_slice_mut(&mmap, 0..64 << 10, move |slice| {
955 if image_off > 0 {
956 assert_eq!(slice[image_off - 1], 0);
957 }
958 assert_eq!(slice[image_off + 5], 0);
959 assert_eq!(&[1, 2, 3, 4], &slice[image_off..][..4]);
960 slice[image_off] = 5;
961 assert_eq!(&[5, 2, 3, 4], &slice[image_off..][..4]);
962 })
963 };
964
965 memfd
966 .clear_and_remain_ready(None, amt_to_memset, |ptr, len| unsafe {
967 decommit_pages(ptr, len).unwrap()
968 })
969 .unwrap();
970 }
971 }
972
973 for amt_to_memset in [0, page_size, page_size * 10, 1 << 20, 10 << 20] {
975 let amt_to_memset = HostAlignedByteCount::new(amt_to_memset).unwrap();
976 memfd.instantiate(64 << 10, None, &ty, &tunables).unwrap();
977
978 unsafe {
979 with_slice_mut(&mmap, 0..64 << 10, |slice| {
980 for chunk in slice.chunks_mut(1024) {
981 assert_eq!(chunk[0], 0);
982 chunk[0] = 5;
983 }
984 });
985 }
986 memfd
987 .clear_and_remain_ready(None, amt_to_memset, |ptr, len| unsafe {
988 decommit_pages(ptr, len).unwrap()
989 })
990 .unwrap();
991 }
992 }
993
994 #[test]
995 #[cfg(target_os = "linux")]
996 fn dynamic() {
997 let page_size = host_page_size();
998 let ty = dummy_memory();
999 let tunables = Tunables {
1000 memory_reservation: 0,
1001 memory_reservation_for_growth: 200,
1002 ..Tunables::default_miri()
1003 };
1004
1005 let mmap = mmap_4mib_inaccessible();
1006 let mut memfd =
1007 MemoryImageSlot::create(mmap.zero_offset(), HostAlignedByteCount::ZERO, 4 << 20);
1008 let image = Arc::new(create_memfd_with_data(page_size, &[1, 2, 3, 4]).unwrap());
1009 let initial = 64 << 10;
1010
1011 memfd
1014 .instantiate(initial, Some(&image), &ty, &tunables)
1015 .unwrap();
1016 assert!(memfd.has_image());
1017
1018 unsafe {
1019 with_slice_mut(&mmap, 0..(64 << 10) + page_size, move |slice| {
1020 assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]);
1021 slice[page_size] = 5;
1022 assert_eq!(&[5, 2, 3, 4], &slice[page_size..][..4]);
1023 });
1024 }
1025
1026 memfd
1027 .clear_and_remain_ready(None, HostAlignedByteCount::ZERO, |ptr, len| unsafe {
1028 decommit_pages(ptr, len).unwrap()
1029 })
1030 .unwrap();
1031 let slice = unsafe { mmap.slice(0..(64 << 10) + page_size) };
1032 assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]);
1033
1034 memfd
1037 .instantiate(initial, Some(&image), &ty, &tunables)
1038 .unwrap();
1039 assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]);
1040
1041 memfd.set_heap_limit(initial * 2).unwrap();
1042
1043 unsafe {
1044 with_slice_mut(&mmap, 0..(64 << 10) + page_size, move |slice| {
1045 assert_eq!(&[0, 0], &slice[initial..initial + 2]);
1046 slice[initial] = 100;
1047 assert_eq!(&[100, 0], &slice[initial..initial + 2]);
1048 });
1049 }
1050
1051 memfd
1052 .clear_and_remain_ready(None, HostAlignedByteCount::ZERO, |ptr, len| unsafe {
1053 decommit_pages(ptr, len).unwrap()
1054 })
1055 .unwrap();
1056
1057 assert_eq!(&[0, 0], &slice[initial..initial + 2]);
1059
1060 memfd
1063 .instantiate(initial, Some(&image), &ty, &tunables)
1064 .unwrap();
1065 assert_eq!(&[0, 0], &slice[initial..initial + 2]);
1066 memfd.set_heap_limit(initial * 2).unwrap();
1067
1068 unsafe {
1069 with_slice_mut(&mmap, 0..(64 << 10) + page_size, move |slice| {
1070 assert_eq!(&[0, 0], &slice[initial..initial + 2]);
1071 slice[initial] = 100;
1072 assert_eq!(&[100, 0], &slice[initial..initial + 2]);
1073 });
1074 }
1075
1076 memfd
1077 .clear_and_remain_ready(None, HostAlignedByteCount::ZERO, |ptr, len| unsafe {
1078 decommit_pages(ptr, len).unwrap()
1079 })
1080 .unwrap();
1081
1082 memfd.instantiate(64 << 10, None, &ty, &tunables).unwrap();
1084 assert!(!memfd.has_image());
1085 assert_eq!(&[0, 0, 0, 0], &slice[page_size..][..4]);
1086 assert_eq!(&[0, 0], &slice[initial..initial + 2]);
1087 }
1088
1089 #[test]
1090 fn reset_with_pagemap() {
1091 let page_size = host_page_size();
1092 let ty = dummy_memory();
1093 let tunables = Tunables {
1094 memory_reservation: 100 << 16,
1095 ..Tunables::default_miri()
1096 };
1097 let mmap = mmap_4mib_inaccessible();
1098 let mmap_len = page_size * 9;
1099 let mut memfd =
1100 MemoryImageSlot::create(mmap.zero_offset(), HostAlignedByteCount::ZERO, mmap_len);
1101 let pagemap = PageMap::new();
1102 let pagemap = pagemap.as_ref();
1103
1104 let mut data = vec![0; 3 * page_size];
1105 for (i, chunk) in data.chunks_mut(page_size).enumerate() {
1106 for slot in chunk {
1107 *slot = u8::try_from(i + 1).unwrap();
1108 }
1109 }
1110 let image = Arc::new(create_memfd_with_data(3 * page_size, &data).unwrap());
1111
1112 memfd
1113 .instantiate(mmap_len, Some(&image), &ty, &tunables)
1114 .unwrap();
1115
1116 let keep_resident = HostAlignedByteCount::new(mmap_len).unwrap();
1117 let assert_pristine_after_reset = |memfd: &mut MemoryImageSlot| unsafe {
1118 memfd
1120 .clear_and_remain_ready(pagemap, keep_resident, |ptr, len| {
1121 decommit_pages(ptr, len).unwrap()
1122 })
1123 .unwrap();
1124
1125 with_slice_mut(&mmap, 0..mmap_len, move |slice| {
1128 for (i, chunk) in slice.chunks(page_size).enumerate() {
1129 let expected = match i {
1130 0..3 => 0,
1131 3..6 => u8::try_from(i).unwrap() - 2,
1132 6..9 => 0,
1133 _ => unreachable!(),
1134 };
1135 for slot in chunk {
1136 assert_eq!(*slot, expected);
1137 }
1138 }
1139 });
1140
1141 memfd
1144 .instantiate(mmap_len, Some(&image), &ty, &tunables)
1145 .unwrap();
1146 memfd
1147 .clear_and_remain_ready(pagemap, HostAlignedByteCount::ZERO, |ptr, len| {
1148 decommit_pages(ptr, len).unwrap()
1149 })
1150 .unwrap();
1151
1152 memfd
1154 .instantiate(mmap_len, Some(&image), &ty, &tunables)
1155 .unwrap();
1156 };
1157
1158 let write_page = |_memfd: &mut MemoryImageSlot, page: usize| unsafe {
1159 with_slice_mut(
1160 &mmap,
1161 page * page_size..(page + 1) * page_size,
1162 move |slice| slice.fill(0xff),
1163 );
1164 };
1165
1166 assert_pristine_after_reset(&mut memfd);
1172
1173 for i in 0..9 {
1174 write_page(&mut memfd, i);
1175 assert_pristine_after_reset(&mut memfd);
1176 }
1177 write_page(&mut memfd, 0);
1178 write_page(&mut memfd, 1);
1179 assert_pristine_after_reset(&mut memfd);
1180 write_page(&mut memfd, 1);
1181 assert_pristine_after_reset(&mut memfd);
1182 write_page(&mut memfd, 2);
1183 write_page(&mut memfd, 3);
1184 assert_pristine_after_reset(&mut memfd);
1185 write_page(&mut memfd, 3);
1186 write_page(&mut memfd, 4);
1187 write_page(&mut memfd, 5);
1188 assert_pristine_after_reset(&mut memfd);
1189 write_page(&mut memfd, 0);
1190 write_page(&mut memfd, 1);
1191 write_page(&mut memfd, 2);
1192 assert_pristine_after_reset(&mut memfd);
1193 write_page(&mut memfd, 0);
1194 write_page(&mut memfd, 3);
1195 write_page(&mut memfd, 6);
1196 assert_pristine_after_reset(&mut memfd);
1197 write_page(&mut memfd, 2);
1198 write_page(&mut memfd, 3);
1199 write_page(&mut memfd, 4);
1200 write_page(&mut memfd, 5);
1201 write_page(&mut memfd, 6);
1202 assert_pristine_after_reset(&mut memfd);
1203 write_page(&mut memfd, 4);
1204 write_page(&mut memfd, 5);
1205 write_page(&mut memfd, 6);
1206 write_page(&mut memfd, 7);
1207 assert_pristine_after_reset(&mut memfd);
1208 write_page(&mut memfd, 4);
1209 write_page(&mut memfd, 5);
1210 write_page(&mut memfd, 8);
1211 assert_pristine_after_reset(&mut memfd);
1212 }
1213}