wasmtime/runtime/vm/instance/
allocator.rs

1use crate::prelude::*;
2use crate::runtime::vm::const_expr::{ConstEvalContext, ConstExprEvaluator};
3use crate::runtime::vm::imports::Imports;
4use crate::runtime::vm::instance::{Instance, InstanceHandle};
5use crate::runtime::vm::memory::Memory;
6use crate::runtime::vm::mpk::ProtectionKey;
7use crate::runtime::vm::table::Table;
8use crate::runtime::vm::{CompiledModuleId, ModuleRuntimeInfo};
9use crate::store::{InstanceId, StoreOpaque, StoreResourceLimiter};
10use crate::{OpaqueRootScope, Val};
11use core::{mem, ptr};
12use wasmtime_environ::{
13    DefinedMemoryIndex, DefinedTableIndex, HostPtr, InitMemory, MemoryInitialization,
14    MemoryInitializer, Module, PrimaryMap, SizeOverflow, TableInitialValue, Trap, VMOffsets,
15};
16
17#[cfg(feature = "gc")]
18use crate::runtime::vm::{GcHeap, GcRuntime};
19
20#[cfg(feature = "component-model")]
21use wasmtime_environ::{
22    StaticModuleIndex,
23    component::{Component, VMComponentOffsets},
24};
25
26mod on_demand;
27pub use self::on_demand::OnDemandInstanceAllocator;
28
29#[cfg(feature = "pooling-allocator")]
30mod pooling;
31#[cfg(feature = "pooling-allocator")]
32pub use self::pooling::{
33    InstanceLimits, PoolConcurrencyLimitError, PoolingAllocatorMetrics, PoolingInstanceAllocator,
34    PoolingInstanceAllocatorConfig,
35};
36
37/// Represents a request for a new runtime instance.
38pub struct InstanceAllocationRequest<'a, 'b> {
39    /// The instance id that this will be assigned within the store once the
40    /// allocation has finished.
41    pub id: InstanceId,
42
43    /// The info related to the compiled version of this module,
44    /// needed for instantiation: function metadata, JIT code
45    /// addresses, precomputed images for lazy memory and table
46    /// initialization, and the like. This Arc is cloned and held for
47    /// the lifetime of the instance.
48    pub runtime_info: &'a ModuleRuntimeInfo,
49
50    /// The imports to use for the instantiation.
51    pub imports: Imports<'a>,
52
53    /// The store that this instance is being allocated into.
54    pub store: &'a StoreOpaque,
55
56    /// The store's resource limiter, if configured by the embedder.
57    pub limiter: Option<&'a mut StoreResourceLimiter<'b>>,
58}
59
60/// The index of a memory allocation within an `InstanceAllocator`.
61#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord)]
62pub struct MemoryAllocationIndex(u32);
63
64impl Default for MemoryAllocationIndex {
65    fn default() -> Self {
66        // A default `MemoryAllocationIndex` that can be used with
67        // `InstanceAllocator`s that don't actually need indices.
68        MemoryAllocationIndex(u32::MAX)
69    }
70}
71
72impl MemoryAllocationIndex {
73    /// Get the underlying index of this `MemoryAllocationIndex`.
74    #[cfg(feature = "pooling-allocator")]
75    pub fn index(&self) -> usize {
76        self.0 as usize
77    }
78}
79
80/// The index of a table allocation within an `InstanceAllocator`.
81#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord)]
82pub struct TableAllocationIndex(u32);
83
84impl Default for TableAllocationIndex {
85    fn default() -> Self {
86        // A default `TableAllocationIndex` that can be used with
87        // `InstanceAllocator`s that don't actually need indices.
88        TableAllocationIndex(u32::MAX)
89    }
90}
91
92impl TableAllocationIndex {
93    /// Get the underlying index of this `TableAllocationIndex`.
94    #[cfg(feature = "pooling-allocator")]
95    pub fn index(&self) -> usize {
96        self.0 as usize
97    }
98}
99
100/// The index of a table allocation within an `InstanceAllocator`.
101#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord)]
102pub struct GcHeapAllocationIndex(u32);
103
104impl Default for GcHeapAllocationIndex {
105    fn default() -> Self {
106        // A default `GcHeapAllocationIndex` that can be used with
107        // `InstanceAllocator`s that don't actually need indices.
108        GcHeapAllocationIndex(u32::MAX)
109    }
110}
111
112impl GcHeapAllocationIndex {
113    /// Get the underlying index of this `GcHeapAllocationIndex`.
114    pub fn index(&self) -> usize {
115        self.0 as usize
116    }
117}
118
119/// Trait that represents the hooks needed to implement an instance allocator.
120///
121/// Implement this trait when implementing new instance allocators, but don't
122/// use this trait when you need an instance allocator. Instead use the
123/// `InstanceAllocator` trait for that, which has additional helper methods and
124/// a blanket implementation for all types that implement this trait.
125///
126/// # Safety
127///
128/// This trait is unsafe as it requires knowledge of Wasmtime's runtime
129/// internals to implement correctly.
130#[async_trait::async_trait]
131pub unsafe trait InstanceAllocator: Send + Sync {
132    /// Validate whether a component (including all of its contained core
133    /// modules) is allocatable by this instance allocator.
134    #[cfg(feature = "component-model")]
135    fn validate_component<'a>(
136        &self,
137        component: &Component,
138        offsets: &VMComponentOffsets<HostPtr>,
139        get_module: &'a dyn Fn(StaticModuleIndex) -> &'a Module,
140    ) -> Result<()>;
141
142    /// Validate whether a module is allocatable by this instance allocator.
143    fn validate_module(&self, module: &Module, offsets: &VMOffsets<HostPtr>) -> Result<()>;
144
145    /// Validate whether a memory is allocatable by this instance allocator.
146    #[cfg(feature = "gc")]
147    fn validate_memory(&self, memory: &wasmtime_environ::Memory) -> Result<()>;
148
149    /// Increment the count of concurrent component instances that are currently
150    /// allocated, if applicable.
151    ///
152    /// Not all instance allocators will have limits for the maximum number of
153    /// concurrent component instances that can be live at the same time, and
154    /// these allocators may implement this method with a no-op.
155    //
156    // Note: It would be nice to have an associated type that on construction
157    // does the increment and on drop does the decrement but there are two
158    // problems with this:
159    //
160    // 1. This trait's implementations are always used as trait objects, and
161    //    associated types are not object safe.
162    //
163    // 2. We would want a parameterized `Drop` implementation so that we could
164    //    pass in the `InstanceAllocator` on drop, but this doesn't exist in
165    //    Rust. Therefore, we would be forced to add reference counting and
166    //    stuff like that to keep a handle on the instance allocator from this
167    //    theoretical type. That's a bummer.
168    #[cfg(feature = "component-model")]
169    fn increment_component_instance_count(&self) -> Result<()>;
170
171    /// The dual of `increment_component_instance_count`.
172    #[cfg(feature = "component-model")]
173    fn decrement_component_instance_count(&self);
174
175    /// Increment the count of concurrent core module instances that are
176    /// currently allocated, if applicable.
177    ///
178    /// Not all instance allocators will have limits for the maximum number of
179    /// concurrent core module instances that can be live at the same time, and
180    /// these allocators may implement this method with a no-op.
181    fn increment_core_instance_count(&self) -> Result<()>;
182
183    /// The dual of `increment_core_instance_count`.
184    fn decrement_core_instance_count(&self);
185
186    /// Allocate a memory for an instance.
187    async fn allocate_memory(
188        &self,
189        request: &mut InstanceAllocationRequest<'_, '_>,
190        ty: &wasmtime_environ::Memory,
191        memory_index: Option<DefinedMemoryIndex>,
192    ) -> Result<(MemoryAllocationIndex, Memory)>;
193
194    /// Deallocate an instance's previously allocated memory.
195    ///
196    /// # Unsafety
197    ///
198    /// The memory must have previously been allocated by
199    /// `Self::allocate_memory`, be at the given index, and must currently be
200    /// allocated. It must never be used again.
201    unsafe fn deallocate_memory(
202        &self,
203        memory_index: Option<DefinedMemoryIndex>,
204        allocation_index: MemoryAllocationIndex,
205        memory: Memory,
206    );
207
208    /// Allocate a table for an instance.
209    async fn allocate_table(
210        &self,
211        req: &mut InstanceAllocationRequest<'_, '_>,
212        table: &wasmtime_environ::Table,
213        table_index: DefinedTableIndex,
214    ) -> Result<(TableAllocationIndex, Table)>;
215
216    /// Deallocate an instance's previously allocated table.
217    ///
218    /// # Unsafety
219    ///
220    /// The table must have previously been allocated by `Self::allocate_table`,
221    /// be at the given index, and must currently be allocated. It must never be
222    /// used again.
223    unsafe fn deallocate_table(
224        &self,
225        table_index: DefinedTableIndex,
226        allocation_index: TableAllocationIndex,
227        table: Table,
228    );
229
230    /// Allocates a fiber stack for calling async functions on.
231    #[cfg(feature = "async")]
232    fn allocate_fiber_stack(&self) -> Result<wasmtime_fiber::FiberStack>;
233
234    /// Deallocates a fiber stack that was previously allocated with
235    /// `allocate_fiber_stack`.
236    ///
237    /// # Safety
238    ///
239    /// The provided stack is required to have been allocated with
240    /// `allocate_fiber_stack`.
241    #[cfg(feature = "async")]
242    unsafe fn deallocate_fiber_stack(&self, stack: wasmtime_fiber::FiberStack);
243
244    /// Allocate a GC heap for allocating Wasm GC objects within.
245    #[cfg(feature = "gc")]
246    fn allocate_gc_heap(
247        &self,
248        engine: &crate::Engine,
249        gc_runtime: &dyn GcRuntime,
250        memory_alloc_index: MemoryAllocationIndex,
251        memory: Memory,
252    ) -> Result<(GcHeapAllocationIndex, Box<dyn GcHeap>)>;
253
254    /// Deallocate a GC heap that was previously allocated with
255    /// `allocate_gc_heap`.
256    #[cfg(feature = "gc")]
257    #[must_use = "it is the caller's responsibility to deallocate the GC heap's underlying memory \
258                  storage after the GC heap is deallocated"]
259    fn deallocate_gc_heap(
260        &self,
261        allocation_index: GcHeapAllocationIndex,
262        gc_heap: Box<dyn GcHeap>,
263    ) -> (MemoryAllocationIndex, Memory);
264
265    /// Purges all lingering resources related to `module` from within this
266    /// allocator.
267    ///
268    /// Primarily present for the pooling allocator to remove mappings of
269    /// this module from slots in linear memory.
270    fn purge_module(&self, module: CompiledModuleId);
271
272    /// Use the next available protection key.
273    ///
274    /// The pooling allocator can use memory protection keys (MPK) for
275    /// compressing the guard regions protecting against OOB. Each
276    /// pool-allocated store needs its own key.
277    fn next_available_pkey(&self) -> Option<ProtectionKey>;
278
279    /// Restrict access to memory regions protected by `pkey`.
280    ///
281    /// This is useful for the pooling allocator, which can use memory
282    /// protection keys (MPK). Note: this may still allow access to other
283    /// protection keys, such as the default kernel key; see implementations of
284    /// this.
285    fn restrict_to_pkey(&self, pkey: ProtectionKey);
286
287    /// Allow access to memory regions protected by any protection key.
288    fn allow_all_pkeys(&self);
289
290    /// Returns `Some(&PoolingInstanceAllocator)` if this is one.
291    #[cfg(feature = "pooling-allocator")]
292    fn as_pooling(&self) -> Option<&PoolingInstanceAllocator> {
293        None
294    }
295}
296
297impl dyn InstanceAllocator + '_ {
298    /// Allocates a fresh `InstanceHandle` for the `req` given.
299    ///
300    /// This will allocate memories and tables internally from this allocator
301    /// and weave that altogether into a final and complete `InstanceHandle`
302    /// ready to be registered with a store.
303    ///
304    /// Note that the returned instance must still have `.initialize(..)` called
305    /// on it to complete the instantiation process.
306    ///
307    /// # Safety
308    ///
309    /// The `request` provided must be valid, e.g. the imports within are
310    /// correctly sized/typed for the instance being created.
311    pub(crate) async unsafe fn allocate_module(
312        &self,
313        mut request: InstanceAllocationRequest<'_, '_>,
314    ) -> Result<InstanceHandle> {
315        let module = request.runtime_info.env_module();
316
317        if cfg!(debug_assertions) {
318            InstanceAllocator::validate_module(self, module, request.runtime_info.offsets())
319                .expect("module should have already been validated before allocation");
320        }
321
322        self.increment_core_instance_count()?;
323
324        let num_defined_memories = module.num_defined_memories();
325        let num_defined_tables = module.num_defined_tables();
326
327        let mut guard = DeallocateOnDrop {
328            run_deallocate: true,
329            memories: PrimaryMap::with_capacity(num_defined_memories),
330            tables: PrimaryMap::with_capacity(num_defined_tables),
331            allocator: self,
332        };
333
334        self.allocate_memories(&mut request, &mut guard.memories)
335            .await?;
336        self.allocate_tables(&mut request, &mut guard.tables)
337            .await?;
338        guard.run_deallocate = false;
339        // SAFETY: memories/tables were just allocated from the store within
340        // `request` and this function's own contract requires that the
341        // imports are valid.
342        return unsafe {
343            Ok(Instance::new(
344                request,
345                mem::take(&mut guard.memories),
346                mem::take(&mut guard.tables),
347                &module.memories,
348            ))
349        };
350
351        struct DeallocateOnDrop<'a> {
352            run_deallocate: bool,
353            memories: PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
354            tables: PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
355            allocator: &'a (dyn InstanceAllocator + 'a),
356        }
357
358        impl Drop for DeallocateOnDrop<'_> {
359            fn drop(&mut self) {
360                if !self.run_deallocate {
361                    return;
362                }
363                // SAFETY: these were previously allocated by this allocator
364                unsafe {
365                    self.allocator.deallocate_memories(&mut self.memories);
366                    self.allocator.deallocate_tables(&mut self.tables);
367                }
368                self.allocator.decrement_core_instance_count();
369            }
370        }
371    }
372
373    /// Deallocates the provided instance.
374    ///
375    /// This will null-out the pointer within `handle` and otherwise reclaim
376    /// resources such as tables, memories, and the instance memory itself.
377    ///
378    /// # Unsafety
379    ///
380    /// The instance must have previously been allocated by `Self::allocate`.
381    pub(crate) unsafe fn deallocate_module(&self, handle: &mut InstanceHandle) {
382        // SAFETY: the contract of `deallocate_*` is itself a contract of this
383        // function, that the memories/tables were previously allocated from
384        // here.
385        unsafe {
386            self.deallocate_memories(handle.get_mut().memories_mut());
387            self.deallocate_tables(handle.get_mut().tables_mut());
388        }
389
390        self.decrement_core_instance_count();
391    }
392
393    /// Allocate the memories for the given instance allocation request, pushing
394    /// them into `memories`.
395    async fn allocate_memories(
396        &self,
397        request: &mut InstanceAllocationRequest<'_, '_>,
398        memories: &mut PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
399    ) -> Result<()> {
400        let module = request.runtime_info.env_module();
401
402        if cfg!(debug_assertions) {
403            InstanceAllocator::validate_module(self, module, request.runtime_info.offsets())
404                .expect("module should have already been validated before allocation");
405        }
406
407        for (memory_index, ty) in module.memories.iter().skip(module.num_imported_memories) {
408            let memory_index = module
409                .defined_memory_index(memory_index)
410                .expect("should be a defined memory since we skipped imported ones");
411
412            let memory = self
413                .allocate_memory(request, ty, Some(memory_index))
414                .await?;
415            memories.push(memory);
416        }
417
418        Ok(())
419    }
420
421    /// Deallocate all the memories in the given primary map.
422    ///
423    /// # Unsafety
424    ///
425    /// The memories must have previously been allocated by
426    /// `Self::allocate_memories`.
427    unsafe fn deallocate_memories(
428        &self,
429        memories: &mut PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
430    ) {
431        for (memory_index, (allocation_index, memory)) in mem::take(memories) {
432            // Because deallocating memory is infallible, we don't need to worry
433            // about leaking subsequent memories if the first memory failed to
434            // deallocate. If deallocating memory ever becomes fallible, we will
435            // need to be careful here!
436            //
437            // SAFETY: the unsafe contract here is the same as the unsafe
438            // contract of this function, that the memories were previously
439            // allocated by this allocator.
440            unsafe {
441                self.deallocate_memory(Some(memory_index), allocation_index, memory);
442            }
443        }
444    }
445
446    /// Allocate tables for the given instance allocation request, pushing them
447    /// into `tables`.
448    async fn allocate_tables(
449        &self,
450        request: &mut InstanceAllocationRequest<'_, '_>,
451        tables: &mut PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
452    ) -> Result<()> {
453        let module = request.runtime_info.env_module();
454
455        if cfg!(debug_assertions) {
456            InstanceAllocator::validate_module(self, module, request.runtime_info.offsets())
457                .expect("module should have already been validated before allocation");
458        }
459
460        for (index, table) in module.tables.iter().skip(module.num_imported_tables) {
461            let def_index = module
462                .defined_table_index(index)
463                .expect("should be a defined table since we skipped imported ones");
464
465            let table = self.allocate_table(request, table, def_index).await?;
466            tables.push(table);
467        }
468
469        Ok(())
470    }
471
472    /// Deallocate all the tables in the given primary map.
473    ///
474    /// # Unsafety
475    ///
476    /// The tables must have previously been allocated by
477    /// `Self::allocate_tables`.
478    unsafe fn deallocate_tables(
479        &self,
480        tables: &mut PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
481    ) {
482        for (table_index, (allocation_index, table)) in mem::take(tables) {
483            // SAFETY: the tables here were allocated from this allocator per
484            // the contract on this function itself.
485            unsafe {
486                self.deallocate_table(table_index, allocation_index, table);
487            }
488        }
489    }
490}
491
492fn check_table_init_bounds(
493    store: &mut StoreOpaque,
494    instance: InstanceId,
495    module: &Module,
496) -> Result<()> {
497    let mut const_evaluator = ConstExprEvaluator::default();
498    let mut store = OpaqueRootScope::new(store);
499
500    for segment in module.table_initialization.segments.iter() {
501        let mut context = ConstEvalContext::new(instance);
502        let start = const_evaluator
503            .eval_int(&mut store, &mut context, &segment.offset)
504            .expect("const expression should be valid");
505        let start = usize::try_from(start.unwrap_i32().cast_unsigned()).unwrap();
506        let end = start.checked_add(usize::try_from(segment.elements.len()).unwrap());
507
508        let table = store.instance_mut(instance).get_table(segment.table_index);
509        match end {
510            Some(end) if end <= table.size() => {
511                // Initializer is in bounds
512            }
513            _ => {
514                bail!("table out of bounds: elements segment does not fit")
515            }
516        }
517    }
518
519    Ok(())
520}
521
522async fn initialize_tables(
523    store: &mut StoreOpaque,
524    mut limiter: Option<&mut StoreResourceLimiter<'_>>,
525    context: &mut ConstEvalContext,
526    const_evaluator: &mut ConstExprEvaluator,
527    module: &Module,
528) -> Result<()> {
529    let mut store = OpaqueRootScope::new(store);
530    for (table, init) in module.table_initialization.initial_values.iter() {
531        match init {
532            // Tables are always initially null-initialized at this time
533            TableInitialValue::Null { precomputed: _ } => {}
534
535            TableInitialValue::Expr(expr) => {
536                let init = const_evaluator
537                    .eval(&mut store, limiter.as_deref_mut(), context, expr)
538                    .await?;
539                let idx = module.table_index(table);
540                let id = store.id();
541                let table = store
542                    .instance_mut(context.instance)
543                    .get_exported_table(id, idx);
544                let size = table._size(&store);
545                table._fill(&mut store, 0, init.ref_().unwrap(), size)?;
546            }
547        }
548    }
549
550    // Note: if the module's table initializer state is in
551    // FuncTable mode, we will lazily initialize tables based on
552    // any statically-precomputed image of FuncIndexes, but there
553    // may still be "leftover segments" that could not be
554    // incorporated. So we have a unified handler here that
555    // iterates over all segments (Segments mode) or leftover
556    // segments (FuncTable mode) to initialize.
557    for segment in module.table_initialization.segments.iter() {
558        let start = const_evaluator
559            .eval_int(&mut store, context, &segment.offset)
560            .expect("const expression should be valid");
561        let start = get_index(
562            start,
563            store.instance(context.instance).env_module().tables[segment.table_index].idx_type,
564        );
565        Instance::table_init_segment(
566            &mut store,
567            limiter.as_deref_mut(),
568            context.instance,
569            const_evaluator,
570            segment.table_index,
571            &segment.elements,
572            start,
573            0,
574            segment.elements.len(),
575        )
576        .await?;
577    }
578
579    Ok(())
580}
581
582fn get_index(val: &Val, ty: wasmtime_environ::IndexType) -> u64 {
583    match ty {
584        wasmtime_environ::IndexType::I32 => val.unwrap_i32().cast_unsigned().into(),
585        wasmtime_environ::IndexType::I64 => val.unwrap_i64().cast_unsigned(),
586    }
587}
588
589fn get_memory_init_start(
590    store: &mut StoreOpaque,
591    init: &MemoryInitializer,
592    instance: InstanceId,
593) -> Result<u64> {
594    let mut context = ConstEvalContext::new(instance);
595    let mut const_evaluator = ConstExprEvaluator::default();
596    let mut store = OpaqueRootScope::new(store);
597    const_evaluator
598        .eval_int(&mut store, &mut context, &init.offset)
599        .map(|v| {
600            get_index(
601                v,
602                store.instance(instance).env_module().memories[init.memory_index].idx_type,
603            )
604        })
605}
606
607fn check_memory_init_bounds(
608    store: &mut StoreOpaque,
609    instance: InstanceId,
610    initializers: &[MemoryInitializer],
611) -> Result<()> {
612    for init in initializers {
613        let memory = store.instance_mut(instance).get_memory(init.memory_index);
614        let start = get_memory_init_start(store, init, instance)?;
615        let end = usize::try_from(start)
616            .ok()
617            .and_then(|start| start.checked_add(init.data.len()));
618
619        match end {
620            Some(end) if end <= memory.current_length() => {
621                // Initializer is in bounds
622            }
623            _ => {
624                bail!("memory out of bounds: data segment does not fit")
625            }
626        }
627    }
628
629    Ok(())
630}
631
632fn initialize_memories(
633    store: &mut StoreOpaque,
634    context: &mut ConstEvalContext,
635    const_evaluator: &mut ConstExprEvaluator,
636    module: &Module,
637) -> Result<()> {
638    // Delegates to the `init_memory` method which is sort of a duplicate of
639    // `instance.memory_init_segment` but is used at compile-time in other
640    // contexts so is shared here to have only one method of memory
641    // initialization.
642    //
643    // This call to `init_memory` notably implements all the bells and whistles
644    // so errors only happen if an out-of-bounds segment is found, in which case
645    // a trap is returned.
646
647    struct InitMemoryAtInstantiation<'a> {
648        module: &'a Module,
649        store: &'a mut StoreOpaque,
650        context: &'a mut ConstEvalContext,
651        const_evaluator: &'a mut ConstExprEvaluator,
652    }
653
654    impl InitMemory for InitMemoryAtInstantiation<'_> {
655        fn memory_size_in_bytes(
656            &mut self,
657            memory: wasmtime_environ::MemoryIndex,
658        ) -> Result<u64, SizeOverflow> {
659            let len = self
660                .store
661                .instance(self.context.instance)
662                .get_memory(memory)
663                .current_length();
664            let len = u64::try_from(len).unwrap();
665            Ok(len)
666        }
667
668        fn eval_offset(
669            &mut self,
670            memory: wasmtime_environ::MemoryIndex,
671            expr: &wasmtime_environ::ConstExpr,
672        ) -> Option<u64> {
673            let mut store = OpaqueRootScope::new(&mut *self.store);
674            let val = self
675                .const_evaluator
676                .eval_int(&mut store, self.context, expr)
677                .expect("const expression should be valid");
678            Some(get_index(
679                val,
680                store.instance(self.context.instance).env_module().memories[memory].idx_type,
681            ))
682        }
683
684        fn write(
685            &mut self,
686            memory_index: wasmtime_environ::MemoryIndex,
687            init: &wasmtime_environ::StaticMemoryInitializer,
688        ) -> bool {
689            // If this initializer applies to a defined memory but that memory
690            // doesn't need initialization, due to something like copy-on-write
691            // pre-initializing it via mmap magic, then this initializer can be
692            // skipped entirely.
693            let instance = self.store.instance_mut(self.context.instance);
694            if let Some(memory_index) = self.module.defined_memory_index(memory_index) {
695                if !instance.memories[memory_index].1.needs_init() {
696                    return true;
697                }
698            }
699            let memory = instance.get_memory(memory_index);
700
701            unsafe {
702                let src = instance.wasm_data(init.data.clone());
703                let offset = usize::try_from(init.offset).unwrap();
704                let dst = memory.base.as_ptr().add(offset);
705
706                assert!(offset + src.len() <= memory.current_length());
707
708                // FIXME audit whether this is safe in the presence of shared
709                // memory
710                // (https://github.com/bytecodealliance/wasmtime/issues/4203).
711                ptr::copy_nonoverlapping(src.as_ptr(), dst, src.len())
712            }
713            true
714        }
715    }
716
717    let ok = module
718        .memory_initialization
719        .init_memory(&mut InitMemoryAtInstantiation {
720            module,
721            store,
722            context,
723            const_evaluator,
724        });
725    if !ok {
726        return Err(Trap::MemoryOutOfBounds.into());
727    }
728
729    Ok(())
730}
731
732fn check_init_bounds(store: &mut StoreOpaque, instance: InstanceId, module: &Module) -> Result<()> {
733    check_table_init_bounds(store, instance, module)?;
734
735    match &module.memory_initialization {
736        MemoryInitialization::Segmented(initializers) => {
737            check_memory_init_bounds(store, instance, initializers)?;
738        }
739        // Statically validated already to have everything in-bounds.
740        MemoryInitialization::Static { .. } => {}
741    }
742
743    Ok(())
744}
745
746async fn initialize_globals(
747    store: &mut StoreOpaque,
748    mut limiter: Option<&mut StoreResourceLimiter<'_>>,
749    context: &mut ConstEvalContext,
750    const_evaluator: &mut ConstExprEvaluator,
751    module: &Module,
752) -> Result<()> {
753    assert!(core::ptr::eq(
754        &**store.instance(context.instance).env_module(),
755        module
756    ));
757
758    let mut store = OpaqueRootScope::new(store);
759
760    for (index, init) in module.global_initializers.iter() {
761        // Attempt a simple, synchronous evaluation before hitting the
762        // general-purpose `.await` point below. This benchmarks ~15% faster in
763        // instantiation vs just falling through to `.await` below.
764        let val = if let Some(val) = const_evaluator.try_simple(init) {
765            val
766        } else {
767            const_evaluator
768                .eval(&mut store, limiter.as_deref_mut(), context, init)
769                .await?
770        };
771
772        let id = store.id();
773        let index = module.global_index(index);
774        let mut instance = store.instance_mut(context.instance);
775
776        #[cfg(feature = "wmemcheck")]
777        if index.as_u32() == 0
778            && module.globals[index].wasm_ty == wasmtime_environ::WasmValType::I32
779        {
780            if let Some(wmemcheck) = instance.as_mut().wmemcheck_state_mut() {
781                let size = usize::try_from(val.unwrap_i32()).unwrap();
782                wmemcheck.set_stack_size(size);
783            }
784        }
785
786        let global = instance.as_mut().get_exported_global(id, index);
787
788        // Note that mutability is bypassed here because this is, by definition,
789        // initialization of globals meaning that if it's an immutable global
790        // this is the one and only write.
791        //
792        // SAFETY: this is a valid module so `val` should have the correct type
793        // for this global, and it's safe to write to a global for the first
794        // time as-is happening here.
795        unsafe {
796            global.set_unchecked(&mut store, &val)?;
797        }
798    }
799    Ok(())
800}
801
802pub async fn initialize_instance(
803    store: &mut StoreOpaque,
804    mut limiter: Option<&mut StoreResourceLimiter<'_>>,
805    instance: InstanceId,
806    module: &Module,
807    is_bulk_memory: bool,
808) -> Result<()> {
809    // If bulk memory is not enabled, bounds check the data and element segments before
810    // making any changes. With bulk memory enabled, initializers are processed
811    // in-order and side effects are observed up to the point of an out-of-bounds
812    // initializer, so the early checking is not desired.
813    if !is_bulk_memory {
814        check_init_bounds(store, instance, module)?;
815    }
816
817    let mut context = ConstEvalContext::new(instance);
818    let mut const_evaluator = ConstExprEvaluator::default();
819
820    initialize_globals(
821        store,
822        limiter.as_deref_mut(),
823        &mut context,
824        &mut const_evaluator,
825        module,
826    )
827    .await?;
828    initialize_tables(
829        store,
830        limiter.as_deref_mut(),
831        &mut context,
832        &mut const_evaluator,
833        module,
834    )
835    .await?;
836    initialize_memories(store, &mut context, &mut const_evaluator, &module)?;
837
838    Ok(())
839}
840
841#[cfg(test)]
842mod tests {
843    use super::*;
844
845    #[test]
846    fn allocator_traits_are_object_safe() {
847        fn _instance_allocator(_: &dyn InstanceAllocator) {}
848    }
849}