wasmtime/runtime/
type_registry.rs

1//! Implement a registry of types: function, struct, and array definitions.
2//!
3//! Helps implement fast indirect call signature checking, reference type
4//! downcasting, and etc...
5
6use crate::Engine;
7use crate::hash_set::HashSet;
8use crate::prelude::*;
9use crate::sync::RwLock;
10use crate::vm::GcRuntime;
11use alloc::borrow::Cow;
12use alloc::sync::Arc;
13use core::iter;
14use core::{
15    borrow::Borrow,
16    fmt::{self, Debug},
17    hash::{Hash, Hasher},
18    ops::Range,
19    sync::atomic::{
20        AtomicBool, AtomicUsize,
21        Ordering::{AcqRel, Acquire, Release},
22    },
23};
24use wasmtime_environ::{
25    EngineOrModuleTypeIndex, GcLayout, ModuleInternedTypeIndex, ModuleTypes, PrimaryMap,
26    SecondaryMap, TypeTrace, VMSharedTypeIndex, WasmRecGroup, WasmSubType, iter_entity_range,
27    packed_option::{PackedOption, ReservedValue},
28};
29use wasmtime_slab::{Id as SlabId, Slab};
30
31// ### Notes on the Lifetime Management of Types
32//
33// All defined types from all Wasm modules loaded into Wasmtime are interned
34// into their engine's `TypeRegistry`.
35//
36// With Wasm MVP, managing type lifetimes within the registry was easy: we only
37// cared about canonicalizing types so that `call_indirect` was fast and we
38// didn't waste memory on many copies of the same function type definition.
39// Function types could only take and return simple scalars (i32/f64/etc...) and
40// there were no type-to-type references. We could simply deduplicate function
41// types and reference count their entries in the registry.
42//
43// The typed function references and GC proposals change everything. The former
44// introduced function types that take a reference to a function of another
45// specific type. This is a type-to-type reference. The latter introduces struct
46// and array types that can have references to other struct, array, and function
47// types, as well as recursion groups that allow cyclic references between
48// types. Now type canonicalization additionally enables fast type checks and
49// downcasts *across* modules: so that two modules which define the same struct
50// type, for example, can pass instances of that struct type to each other, and
51// we can quickly check that those instances are in fact of the expected types.
52//
53// But how do we manage the lifetimes of types that can reference other types as
54// Wasm modules are dynamically loaded and unloaded from the engine? These
55// modules can define subsets of the same types and there can be cyclic type
56// references. Dynamic lifetimes, sharing, and cycles is a classic combination
57// of constraints that push a design towards a tracing garbage collector (or,
58// equivalently, a reference-counting collector with a cycle collector).
59//
60// However, we can rely on the following properties:
61//
62// 1. The unit of type canonicalization is a whole recursion group.
63//
64// 2. Type-to-type reference cycles may only happen within a recursion group and
65//    therefore type-to-type references across recursion groups are acyclic.
66//
67// Therefore, our type registry makes the following design decisions:
68//
69// * We manage the lifetime of whole recursion groups, not individual
70//   types. That is, every type in the recursion group stays alive as long as
71//   any type in the recursion group is kept alive. This is effectively mandated
72//   by property (1) and the hash consing it implies.
73//
74// * We still use naive reference counting to manage the lifetimes of recursion
75//   groups. A type-to-type reference that crosses the boundary from recursion
76//   group A to recursion group B will increment B's reference count when A is
77//   first registered and decrement B's reference count when A is removed from
78//   the registry. Because of property (2) we don't need to worry about cycles,
79//   which are the classic weakness of reference counting.
80
81/// Represents a collection of shared types.
82///
83/// This is used to register shared types with a shared type registry.
84///
85/// The collection will unregister any contained types with the registry
86/// when dropped.
87pub struct TypeCollection {
88    engine: Engine,
89    rec_groups: Vec<RecGroupEntry>,
90    types: PrimaryMap<ModuleInternedTypeIndex, VMSharedTypeIndex>,
91    trampolines: SecondaryMap<VMSharedTypeIndex, PackedOption<ModuleInternedTypeIndex>>,
92}
93
94impl Debug for TypeCollection {
95    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
96        let TypeCollection {
97            engine: _,
98            rec_groups,
99            types,
100            trampolines,
101        } = self;
102        f.debug_struct("TypeCollection")
103            .field("rec_groups", rec_groups)
104            .field("types", types)
105            .field("trampolines", trampolines)
106            .finish_non_exhaustive()
107    }
108}
109
110impl Engine {
111    /// Registers the given types in this engine, re-canonicalizing them for
112    /// runtime usage.
113    #[must_use = "types are only registered as long as the `TypeCollection` is live"]
114    pub(crate) fn register_and_canonicalize_types<'a, I>(
115        &self,
116        module_types: &mut ModuleTypes,
117        env_modules: I,
118    ) -> TypeCollection
119    where
120        I: IntoIterator<Item = &'a mut wasmtime_environ::Module>,
121        I::IntoIter: ExactSizeIterator,
122    {
123        if cfg!(debug_assertions) {
124            module_types
125                .trace(&mut |idx| match idx {
126                    EngineOrModuleTypeIndex::Module(_) => Ok(()),
127                    EngineOrModuleTypeIndex::Engine(_) | EngineOrModuleTypeIndex::RecGroup(_) => {
128                        Err(idx)
129                    }
130                })
131                .expect("should only have module type indices");
132        }
133
134        let engine = self.clone();
135        let registry = engine.signatures();
136        let gc_runtime = engine.gc_runtime().map(|rt| &**rt);
137
138        // First, register these types in this engine's registry.
139        let (rec_groups, types) = registry
140            .0
141            .write()
142            .register_module_types(gc_runtime, module_types);
143
144        // Then build our map from each function type's engine index to the
145        // module-index of its trampoline. Trampoline functions are queried by
146        // module-index in a compiled module, and doing this engine-to-module
147        // resolution now means we don't need to do it on the function call hot
148        // path.
149        let mut trampolines = SecondaryMap::with_capacity(types.len());
150        for (module_ty, module_trampoline_ty) in module_types.trampoline_types() {
151            let shared_ty = types[module_ty];
152            let trampoline_shared_ty = registry.trampoline_type(shared_ty);
153            trampolines[trampoline_shared_ty] = Some(module_trampoline_ty).into();
154        }
155
156        // Finally, to ensure that no matter which API from which layer
157        // (`wasmtime::runtime::vm` vs `wasmtime_environ`, etc...) we use to
158        // grab an entity's type, we will always end up with a type that has
159        // `VMSharedTypeIndex` rather than `ModuleInternedTypeIndex` type
160        // references, we canonicalize both the `ModuleTypes` and
161        // `wasmtime_environ::Module`s for runtime usage. All our type-of-X APIs
162        // ultimately use one of these two structures.
163        module_types.canonicalize_for_runtime_usage(&mut |idx| types[idx]);
164        for module in env_modules {
165            module.canonicalize_for_runtime_usage(&mut |idx| types[idx]);
166        }
167
168        TypeCollection {
169            engine,
170            rec_groups,
171            types,
172            trampolines,
173        }
174    }
175}
176
177impl TypeCollection {
178    /// Treats the type collection as a map from a module type index to
179    /// registered shared type indexes.
180    ///
181    /// This is used for looking up module shared type indexes during module
182    /// instantiation.
183    pub fn as_module_map(&self) -> &PrimaryMap<ModuleInternedTypeIndex, VMSharedTypeIndex> {
184        &self.types
185    }
186
187    /// Gets the shared type index given a module type index.
188    #[inline]
189    pub fn shared_type(&self, index: ModuleInternedTypeIndex) -> Option<VMSharedTypeIndex> {
190        let shared_ty = self.types.get(index).copied();
191        log::trace!("TypeCollection::shared_type({index:?}) -> {shared_ty:?}");
192        shared_ty
193    }
194
195    /// Get the module-level type index of the trampoline type for the given
196    /// engine-level function type, if any.
197    ///
198    /// This allows callers to look up the pre-compiled wasm-to-native
199    /// trampoline in this type collection's associated module.
200    ///
201    /// See the docs for `WasmFuncType::trampoline_type` for details on
202    /// trampoline types.
203    #[inline]
204    pub fn trampoline_type(&self, ty: VMSharedTypeIndex) -> Option<ModuleInternedTypeIndex> {
205        let trampoline_ty = self.trampolines[ty].expand();
206        log::trace!("TypeCollection::trampoline_type({ty:?}) -> {trampoline_ty:?}");
207        trampoline_ty
208    }
209}
210
211impl Drop for TypeCollection {
212    fn drop(&mut self) {
213        if !self.rec_groups.is_empty() {
214            self.engine
215                .signatures()
216                .0
217                .write()
218                .unregister_type_collection(self);
219        }
220    }
221}
222
223#[inline]
224fn shared_type_index_to_slab_id(index: VMSharedTypeIndex) -> SlabId {
225    assert!(!index.is_reserved_value());
226    SlabId::from_raw(index.bits())
227}
228
229#[inline]
230fn slab_id_to_shared_type_index(id: SlabId) -> VMSharedTypeIndex {
231    let index = VMSharedTypeIndex::new(id.into_raw());
232    assert!(!index.is_reserved_value());
233    index
234}
235
236/// A Wasm type that has been registered in the engine's `TypeRegistry`.
237///
238/// Prevents its associated type from being unregistered while it is alive.
239///
240/// Automatically unregisters the type on drop. (Unless other `RegisteredTypes`
241/// are keeping the type registered).
242///
243/// Dereferences to its underlying `WasmSubType`.
244pub struct RegisteredType {
245    engine: Engine,
246    entry: RecGroupEntry,
247    ty: Arc<WasmSubType>,
248    index: VMSharedTypeIndex,
249    layout: Option<GcLayout>,
250}
251
252impl Debug for RegisteredType {
253    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
254        let RegisteredType {
255            engine: _,
256            entry: _,
257            ty,
258            index,
259            layout,
260        } = self;
261        f.debug_struct("RegisteredType")
262            .field("index", index)
263            .field("ty", ty)
264            .field("layout", layout)
265            .finish_non_exhaustive()
266    }
267}
268
269impl Clone for RegisteredType {
270    fn clone(&self) -> Self {
271        self.engine.signatures().debug_assert_contains(self.index);
272        self.entry.incref("RegisteredType::clone");
273        RegisteredType {
274            engine: self.engine.clone(),
275            entry: self.entry.clone(),
276            ty: self.ty.clone(),
277            index: self.index,
278            layout: self.layout.clone(),
279        }
280    }
281}
282
283impl Drop for RegisteredType {
284    fn drop(&mut self) {
285        self.engine.signatures().debug_assert_contains(self.index);
286        if self.entry.decref("RegisteredType::drop") {
287            self.engine
288                .signatures()
289                .0
290                .write()
291                .unregister_entry(self.entry.clone());
292        }
293    }
294}
295
296impl core::ops::Deref for RegisteredType {
297    type Target = WasmSubType;
298
299    fn deref(&self) -> &Self::Target {
300        &self.ty
301    }
302}
303
304impl PartialEq for RegisteredType {
305    fn eq(&self, other: &Self) -> bool {
306        self.engine.signatures().debug_assert_contains(self.index);
307        other.engine.signatures().debug_assert_contains(other.index);
308
309        let eq = self.index == other.index && Engine::same(&self.engine, &other.engine);
310
311        if cfg!(debug_assertions) && eq {
312            // If they are the same, then their rec group entries and
313            // `WasmSubType`s had better also be the same.
314            assert!(Arc::ptr_eq(&self.entry.0, &other.entry.0));
315            assert_eq!(self.ty, other.ty);
316        }
317
318        eq
319    }
320}
321
322impl Eq for RegisteredType {}
323
324impl Hash for RegisteredType {
325    fn hash<H: Hasher>(&self, state: &mut H) {
326        self.engine.signatures().debug_assert_contains(self.index);
327        let ptr = Arc::as_ptr(&self.entry.0);
328        ptr.hash(state);
329    }
330}
331
332impl RegisteredType {
333    /// Constructs a new `RegisteredType`, registering the given type with the
334    /// engine's `TypeRegistry`.
335    pub fn new(engine: &Engine, ty: WasmSubType) -> RegisteredType {
336        let (entry, index, ty, layout) = {
337            log::trace!("RegisteredType::new({ty:?})");
338
339            let gc_runtime = engine.gc_runtime().map(|rt| &**rt);
340            let mut inner = engine.signatures().0.write();
341
342            // It shouldn't be possible for users to construct non-canonical
343            // types via the embedding API, and the only other types they can
344            // get are already-canonicalized types from modules, so we shouldn't
345            // ever get non-canonical types here. Furthermore, this is only
346            // called internally to Wasmtime, so we shouldn't ever have an
347            // engine mismatch; those should be caught earlier.
348            inner.assert_canonicalized_for_runtime_usage_in_this_registry(&ty);
349
350            let entry = inner.register_singleton_rec_group(gc_runtime, ty);
351
352            let index = entry.0.shared_type_indices[0];
353            let id = shared_type_index_to_slab_id(index);
354            let ty = inner.types[id].clone().unwrap();
355            let layout = inner.type_to_gc_layout.get(index).and_then(|l| l.clone());
356
357            (entry, index, ty, layout)
358        };
359
360        RegisteredType::from_parts(engine.clone(), entry, index, ty, layout)
361    }
362
363    /// Create an owning handle to the given index's associated type.
364    ///
365    /// This will prevent the associated type from being unregistered as long as
366    /// the returned `RegisteredType` is kept alive.
367    pub fn root(engine: &Engine, index: VMSharedTypeIndex) -> RegisteredType {
368        engine.signatures().debug_assert_contains(index);
369
370        let (entry, ty, layout) = {
371            let id = shared_type_index_to_slab_id(index);
372            let inner = engine.signatures().0.read();
373
374            let ty = inner.types[id].clone().unwrap();
375            let entry = inner.type_to_rec_group[index].clone().unwrap();
376            let layout = inner.type_to_gc_layout.get(index).and_then(|l| l.clone());
377
378            // NB: make sure to incref while the lock is held to prevent:
379            //
380            // * This thread: read locks registry, gets entry E, unlocks registry
381            // * Other thread: drops `RegisteredType` for entry E, decref
382            //   reaches zero, write locks registry, unregisters entry
383            // * This thread: increfs entry, but it isn't in the registry anymore
384            entry.incref("RegisteredType::root");
385
386            (entry, ty, layout)
387        };
388
389        RegisteredType::from_parts(engine.clone(), entry, index, ty, layout)
390    }
391
392    /// Construct a new `RegisteredType`.
393    ///
394    /// It is the caller's responsibility to ensure that the entry's reference
395    /// count has already been incremented.
396    fn from_parts(
397        engine: Engine,
398        entry: RecGroupEntry,
399        index: VMSharedTypeIndex,
400        ty: Arc<WasmSubType>,
401        layout: Option<GcLayout>,
402    ) -> Self {
403        log::trace!(
404            "RegisteredType::from_parts({engine:?}, {entry:?}, {index:?}, {ty:?}, {layout:?})"
405        );
406        engine.signatures().debug_assert_contains(index);
407        debug_assert!(
408            entry.0.registrations.load(Acquire) != 0,
409            "entry should have a non-zero registration count"
410        );
411        RegisteredType {
412            engine,
413            entry,
414            ty,
415            index,
416            layout,
417        }
418    }
419
420    /// Get the engine whose registry this type is registered within.
421    pub fn engine(&self) -> &Engine {
422        &self.engine
423    }
424
425    /// Get this registered type's index.
426    pub fn index(&self) -> VMSharedTypeIndex {
427        self.index
428    }
429
430    /// Get this registered type's GC layout, if any.
431    ///
432    /// Only struct and array types have GC layouts; function types do not have
433    /// layouts.
434    #[cfg(feature = "gc")]
435    pub fn layout(&self) -> Option<&GcLayout> {
436        self.layout.as_ref()
437    }
438}
439
440/// An entry in the type registry.
441///
442/// Implements `Borrow`, `Eq`, and `Hash` by forwarding to the underlying Wasm
443/// rec group, so that this can be a hash consing key. (We can't use
444/// `Arc<RecGroupEntryInner>` directly for this purpose because `Arc<T>` doesn't
445/// implement `Borrow<U>` when `T: Borrow<U>`).
446#[derive(Clone)]
447struct RecGroupEntry(Arc<RecGroupEntryInner>);
448
449impl Debug for RecGroupEntry {
450    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
451        struct FormatAsPtr<'a, P>(&'a P);
452        impl<P: fmt::Pointer> Debug for FormatAsPtr<'_, P> {
453            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
454                write!(f, "{:#p}", *self.0)
455            }
456        }
457
458        f.debug_tuple("RecGroupEntry")
459            .field(&FormatAsPtr(&self.0))
460            .finish()
461    }
462}
463
464struct RecGroupEntryInner {
465    /// The Wasm rec group, canonicalized for hash consing.
466    hash_consing_key: WasmRecGroup,
467
468    /// The shared type indices for each type in this rec group.
469    shared_type_indices: Box<[VMSharedTypeIndex]>,
470
471    /// The number of times that this entry has been registered in the
472    /// `TypeRegistryInner`.
473    ///
474    /// This is an atomic counter so that cloning a `RegisteredType`, and
475    /// temporarily keeping a type registered, doesn't require locking the full
476    /// registry.
477    registrations: AtomicUsize,
478
479    /// Whether this entry has already been unregistered from the
480    /// `TypeRegistryInner`.
481    ///
482    /// This flag exists to detect and avoid double-unregistration bugs that
483    /// could otherwise occur in rare cases. See the comments in
484    /// `TypeRegistryInner::unregister_type` for details.
485    unregistered: AtomicBool,
486}
487
488impl PartialEq for RecGroupEntry {
489    fn eq(&self, other: &Self) -> bool {
490        self.0.hash_consing_key == other.0.hash_consing_key
491    }
492}
493
494impl Eq for RecGroupEntry {}
495
496impl Hash for RecGroupEntry {
497    fn hash<H: Hasher>(&self, state: &mut H) {
498        self.0.hash_consing_key.hash(state);
499    }
500}
501
502impl Borrow<WasmRecGroup> for RecGroupEntry {
503    #[inline]
504    fn borrow(&self) -> &WasmRecGroup {
505        &self.0.hash_consing_key
506    }
507}
508
509impl RecGroupEntry {
510    /// Increment the registration count.
511    fn incref(&self, why: &str) {
512        let old_count = self.0.registrations.fetch_add(1, AcqRel);
513        log::trace!("incref({self:?}) -> count {}: {why}", old_count + 1);
514    }
515
516    /// Decrement the registration count and return `true` if the registration
517    /// count reached zero and this entry should be removed from the registry.
518    #[must_use = "caller must remove entry from registry if `decref` returns `true`"]
519    fn decref(&self, why: &str) -> bool {
520        let old_count = self.0.registrations.fetch_sub(1, AcqRel);
521        debug_assert_ne!(old_count, 0);
522        log::trace!("decref({self:?}) -> count {}: {why}", old_count - 1);
523        old_count == 1
524    }
525}
526
527#[derive(Debug, Default)]
528struct TypeRegistryInner {
529    // A hash map from a canonicalized-for-hash-consing rec group to its
530    // `VMSharedTypeIndex`es.
531    //
532    // There is an entry in this map for every rec group we have already
533    // registered. Before registering new rec groups, we first check this map to
534    // see if we've already registered an identical rec group that we should
535    // reuse instead.
536    hash_consing_map: HashSet<RecGroupEntry>,
537
538    // A map from `VMSharedTypeIndex::bits()` to the type index's associated
539    // Wasm type.
540    //
541    // These types are always canonicalized for runtime usage.
542    //
543    // These are only `None` during the process of inserting a new rec group
544    // into the registry, where we need registered `VMSharedTypeIndex`es for
545    // forward type references within the rec group, but have not actually
546    // inserted all the types within the rec group yet.
547    types: Slab<Option<Arc<WasmSubType>>>,
548
549    // A map that lets you walk backwards from a `VMSharedTypeIndex` to its
550    // `RecGroupEntry`.
551    type_to_rec_group: SecondaryMap<VMSharedTypeIndex, Option<RecGroupEntry>>,
552
553    // A map from a registered type to its complete list of supertypes.
554    //
555    // The supertypes are ordered from super- to subtype, i.e. the immediate
556    // parent supertype is the last element and the least-upper-bound of all
557    // supertypes is the first element.
558    //
559    // Types without any supertypes are omitted from this map. This means that
560    // we never allocate any backing storage for this map when Wasm GC is not in
561    // use.
562    type_to_supertypes: SecondaryMap<VMSharedTypeIndex, Option<Box<[VMSharedTypeIndex]>>>,
563
564    // A map from each registered function type to its trampoline type.
565    //
566    // Note that when a function type is its own trampoline type, then we omit
567    // the entry in this map as a memory optimization. This means that if only
568    // core Wasm function types are ever used, then we will never allocate any
569    // backing storage for this map. As a nice bonus, this also avoids cycles (a
570    // function type referencing itself) that our naive reference counting
571    // doesn't play well with.
572    type_to_trampoline: SecondaryMap<VMSharedTypeIndex, PackedOption<VMSharedTypeIndex>>,
573
574    // A map from each registered GC type to its layout.
575    //
576    // Function types do not have an entry in this map. Similar to the
577    // `type_to_{supertypes,trampoline}` maps, we completely omit the `None`
578    // entries for these types as a memory optimization.
579    type_to_gc_layout: SecondaryMap<VMSharedTypeIndex, Option<GcLayout>>,
580
581    // An explicit stack of entries that we are in the middle of dropping. Used
582    // to avoid recursion when dropping a type that is holding the last
583    // reference to another type, etc...
584    drop_stack: Vec<RecGroupEntry>,
585}
586
587impl TypeRegistryInner {
588    #[inline]
589    #[track_caller]
590    fn debug_assert_registered(&self, index: VMSharedTypeIndex) {
591        debug_assert!(
592            !index.is_reserved_value(),
593            "should have an actual VMSharedTypeIndex, not the reserved value"
594        );
595        debug_assert!(
596            self.types.contains(shared_type_index_to_slab_id(index)),
597            "registry's slab should contain {index:?}",
598        );
599        debug_assert!(
600            self.types[shared_type_index_to_slab_id(index)].is_some(),
601            "registry's slab should actually contain a type for {index:?}",
602        );
603        debug_assert!(
604            self.type_to_rec_group[index].is_some(),
605            "{index:?} should have an associated rec group entry"
606        );
607    }
608
609    #[inline]
610    #[track_caller]
611    fn debug_assert_all_registered(&self, indices: impl IntoIterator<Item = VMSharedTypeIndex>) {
612        if cfg!(debug_assertions) {
613            for index in indices {
614                self.debug_assert_registered(index);
615            }
616        }
617    }
618
619    fn register_module_types(
620        &mut self,
621        gc_runtime: Option<&dyn GcRuntime>,
622        types: &ModuleTypes,
623    ) -> (
624        Vec<RecGroupEntry>,
625        PrimaryMap<ModuleInternedTypeIndex, VMSharedTypeIndex>,
626    ) {
627        log::trace!("Start registering module types");
628
629        // The engine's type registry entries for these module types.
630        let mut entries = Vec::with_capacity(types.rec_groups().len());
631
632        // The map from a module type index to an engine type index for these
633        // module types.
634        let mut map = PrimaryMap::<ModuleInternedTypeIndex, VMSharedTypeIndex>::with_capacity(
635            types.wasm_types().len(),
636        );
637
638        for (_rec_group_index, module_group) in types.rec_groups() {
639            let entry = self.register_rec_group(
640                gc_runtime,
641                &map,
642                module_group.clone(),
643                iter_entity_range(module_group.clone()).map(|ty| types[ty].clone()),
644            );
645
646            // Update the module-to-engine map with this rec group's
647            // newly-registered types.
648            for (module_ty, engine_ty) in
649                iter_entity_range(module_group).zip(entry.0.shared_type_indices.iter())
650            {
651                let module_ty2 = map.push(*engine_ty);
652                assert_eq!(module_ty, module_ty2);
653            }
654
655            entries.push(entry);
656        }
657
658        log::trace!("End registering module types");
659
660        (entries, map)
661    }
662
663    /// Register a rec group in this registry.
664    ///
665    /// The rec group may be either module-level canonical (i.e. straight from
666    /// `wasmparser`) or engine-level canonicalized for runtime usage in this
667    /// registry. It may *not* be engine-level canonicalized for hash consing or
668    /// engine-level canonicalized for a different type registry instance.
669    ///
670    /// If this rec group is determined to be a duplicate of an
671    /// already-registered rec group, the existing rec group is reused.
672    ///
673    /// Parameters:
674    ///
675    /// * `map`: A map that we use to canonicalize inter-group type references
676    ///   from module-canonical to engine-canonical indices. This must contain
677    ///   entries for each inter-group type reference that this rec group
678    ///   contains.
679    ///
680    /// * `range`: The range of (module-level) types defined by this rec
681    ///   group. This is used to determine which type references inside this rec
682    ///   group are inter- vs intra-group.
683    ///
684    /// * `types`: The types defined within this rec group. Must have the same
685    ///   length as `range`.
686    ///
687    /// The returned entry will have already had its reference count incremented
688    /// on behalf of callers.
689    fn register_rec_group(
690        &mut self,
691        gc_runtime: Option<&dyn GcRuntime>,
692        map: &PrimaryMap<ModuleInternedTypeIndex, VMSharedTypeIndex>,
693        range: Range<ModuleInternedTypeIndex>,
694        types: impl ExactSizeIterator<Item = WasmSubType>,
695    ) -> RecGroupEntry {
696        log::trace!("registering rec group of length {}", types.len());
697        debug_assert_eq!(iter_entity_range(range.clone()).len(), types.len());
698
699        // We need two different canonicalization of this rec group: one for
700        // hash-consing and another for runtime usage within this
701        // engine. However, we only need the latter if this is a new rec group
702        // that hasn't been registered before. Therefore, we only eagerly create
703        // the hash-consing canonicalized version, and while we lazily
704        // canonicalize for runtime usage in this engine, we must still eagerly
705        // clone and set aside the original, non-canonicalized types for that
706        // potential engine canonicalization eventuality.
707        let mut non_canon_types = Vec::with_capacity(types.len());
708        let hash_consing_key = WasmRecGroup {
709            types: types
710                .zip(iter_entity_range(range.clone()))
711                .map(|(mut ty, module_index)| {
712                    non_canon_types.push((module_index, ty.clone()));
713                    ty.canonicalize_for_hash_consing(range.clone(), &mut |idx| {
714                        debug_assert!(idx < range.clone().start);
715                        map[idx]
716                    });
717                    ty
718                })
719                .collect::<Box<[_]>>(),
720        };
721
722        // Any references in the hash-consing key to types outside of this rec
723        // group may only be to fully-registered types.
724        if cfg!(debug_assertions) {
725            hash_consing_key
726                .trace_engine_indices::<_, ()>(&mut |index| Ok(self.debug_assert_registered(index)))
727                .unwrap();
728        }
729
730        // If we've already registered this rec group before, reuse it.
731        if let Some(entry) = self.hash_consing_map.get(&hash_consing_key) {
732            log::trace!("hash-consing map hit: reusing {entry:?}");
733            assert_eq!(entry.0.unregistered.load(Acquire), false);
734            self.debug_assert_all_registered(entry.0.shared_type_indices.iter().copied());
735            entry.incref("hash-consing map hit");
736            return entry.clone();
737        }
738
739        log::trace!("hash-consing map miss: making new registration");
740
741        // Inter-group edges: increment the referenced group's ref
742        // count, because these other rec groups shouldn't be dropped
743        // while this rec group is still alive.
744        hash_consing_key
745            .trace_engine_indices::<_, ()>(&mut |index| {
746                self.debug_assert_registered(index);
747                let other_entry = self.type_to_rec_group[index].as_ref().unwrap();
748                assert_eq!(other_entry.0.unregistered.load(Acquire), false);
749                other_entry.incref("new rec group's type references");
750                Ok(())
751            })
752            .unwrap();
753
754        // Register the individual types.
755        //
756        // Note that we can't update the reverse type-to-rec-group map until
757        // after we've constructed the `RecGroupEntry`, since that map needs the
758        // fully-constructed entry for its values.
759        let module_rec_group_start = range.start;
760        let shared_type_indices: Box<[_]> = non_canon_types
761            .iter()
762            .map(|(module_index, ty)| {
763                let engine_index = slab_id_to_shared_type_index(self.types.alloc(None));
764                log::trace!(
765                    "reserved {engine_index:?} for {module_index:?} = non-canonical {ty:?}"
766                );
767                engine_index
768            })
769            .collect();
770        for (engine_index, (module_index, mut ty)) in
771            shared_type_indices.iter().copied().zip(non_canon_types)
772        {
773            log::trace!("canonicalizing {engine_index:?} for runtime usage");
774            ty.canonicalize_for_runtime_usage(&mut |module_index| {
775                if module_index < module_rec_group_start {
776                    let engine_index = map[module_index];
777                    log::trace!("    cross-group {module_index:?} becomes {engine_index:?}");
778                    self.debug_assert_registered(engine_index);
779                    engine_index
780                } else {
781                    assert!(module_index < range.end);
782                    let rec_group_offset = module_index.as_u32() - module_rec_group_start.as_u32();
783                    let rec_group_offset = usize::try_from(rec_group_offset).unwrap();
784                    let engine_index = shared_type_indices[rec_group_offset];
785                    log::trace!("    intra-group {module_index:?} becomes {engine_index:?}");
786                    assert!(!engine_index.is_reserved_value());
787                    assert!(
788                        self.types
789                            .contains(shared_type_index_to_slab_id(engine_index))
790                    );
791                    engine_index
792                }
793            });
794            self.insert_one_type_from_rec_group(gc_runtime, module_index, engine_index, ty);
795        }
796
797        // Although we haven't finished registering all their metadata, the
798        // types themselves should all be filled in now.
799        if cfg!(debug_assertions) {
800            for index in &shared_type_indices {
801                let id = shared_type_index_to_slab_id(*index);
802                debug_assert!(self.types.contains(id));
803                debug_assert!(self.types[id].is_some());
804            }
805        }
806        debug_assert_eq!(
807            shared_type_indices.len(),
808            shared_type_indices
809                .iter()
810                .copied()
811                .collect::<crate::hash_set::HashSet<_>>()
812                .len(),
813            "should not have any duplicate type indices",
814        );
815
816        let entry = RecGroupEntry(Arc::new(RecGroupEntryInner {
817            hash_consing_key,
818            shared_type_indices,
819            registrations: AtomicUsize::new(1),
820            unregistered: AtomicBool::new(false),
821        }));
822        log::trace!("new {entry:?} -> count 1");
823
824        let is_new_entry = self.hash_consing_map.insert(entry.clone());
825        debug_assert!(is_new_entry);
826
827        // Now that we've constructed the entry, we can update the reverse
828        // type-to-rec-group map.
829        for ty in entry.0.shared_type_indices.iter().copied() {
830            debug_assert!(self.type_to_rec_group[ty].is_none());
831            self.type_to_rec_group[ty] = Some(entry.clone());
832        }
833        self.debug_assert_all_registered(entry.0.shared_type_indices.iter().copied());
834
835        // Finally, make sure to register the trampoline type for each function
836        // type in the rec group.
837        for shared_type_index in entry.0.shared_type_indices.iter().copied() {
838            let slab_id = shared_type_index_to_slab_id(shared_type_index);
839            let sub_ty = self.types[slab_id].as_ref().unwrap();
840            if let Some(f) = sub_ty.as_func() {
841                let trampoline = f.trampoline_type();
842                match &trampoline {
843                    Cow::Borrowed(_) if sub_ty.is_final && sub_ty.supertype.is_none() => {
844                        // The function type is its own trampoline type. Leave
845                        // its entry in `type_to_trampoline` empty to signal
846                        // this.
847                        log::trace!(
848                            "trampoline_type({shared_type_index:?}) = {shared_type_index:?}",
849                        );
850                    }
851                    Cow::Borrowed(_) | Cow::Owned(_) => {
852                        // This will recursively call into rec group
853                        // registration, but at most once since trampoline
854                        // function types are their own trampoline type.
855                        let trampoline_entry = self.register_singleton_rec_group(
856                            gc_runtime,
857                            WasmSubType {
858                                is_final: true,
859                                supertype: None,
860                                composite_type: wasmtime_environ::WasmCompositeType {
861                                    shared: sub_ty.composite_type.shared,
862                                    inner: wasmtime_environ::WasmCompositeInnerType::Func(
863                                        trampoline.into_owned(),
864                                    ),
865                                },
866                            },
867                        );
868                        assert_eq!(trampoline_entry.0.shared_type_indices.len(), 1);
869                        let trampoline_index = trampoline_entry.0.shared_type_indices[0];
870                        log::trace!(
871                            "trampoline_type({shared_type_index:?}) = {trampoline_index:?}",
872                        );
873                        self.debug_assert_registered(trampoline_index);
874                        debug_assert_ne!(shared_type_index, trampoline_index);
875                        self.type_to_trampoline[shared_type_index] = Some(trampoline_index).into();
876                    }
877                }
878            }
879        }
880
881        entry
882    }
883
884    /// Is the given type canonicalized for runtime usage this registry?
885    fn assert_canonicalized_for_runtime_usage_in_this_registry(&self, ty: &WasmSubType) {
886        ty.trace::<_, ()>(&mut |index| match index {
887            EngineOrModuleTypeIndex::RecGroup(_) | EngineOrModuleTypeIndex::Module(_) => {
888                panic!("not canonicalized for runtime usage: {ty:?}")
889            }
890            EngineOrModuleTypeIndex::Engine(idx) => {
891                self.debug_assert_registered(idx);
892                Ok(())
893            }
894        })
895        .unwrap();
896    }
897
898    /// Insert a new type as part of registering a new rec group.
899    ///
900    /// The type must be canonicalized for runtime usage in this registry and
901    /// its rec group must be a new one that we are currently registering, not
902    /// an already-registered rec group.
903    fn insert_one_type_from_rec_group(
904        &mut self,
905        gc_runtime: Option<&dyn GcRuntime>,
906        module_index: ModuleInternedTypeIndex,
907        engine_index: VMSharedTypeIndex,
908        ty: WasmSubType,
909    ) {
910        // Despite being canonicalized for runtime usage, this type may still
911        // have forward references to other types in the rec group we haven't
912        // yet registered. Therefore, we can't use our usual
913        // `assert_canonicalized_for_runtime_usage_in_this_registry` helper here
914        // as that will see the forward references and think they must be
915        // references to types in other registries.
916        assert!(
917            ty.is_canonicalized_for_runtime_usage(),
918            "type is not canonicalized for runtime usage: {ty:?}"
919        );
920
921        assert!(!ty.composite_type.shared);
922        let gc_layout = match &ty.composite_type.inner {
923            wasmtime_environ::WasmCompositeInnerType::Func(_) => None,
924            wasmtime_environ::WasmCompositeInnerType::Array(a) => Some(
925                gc_runtime
926                    .expect("must have a GC runtime to register array types")
927                    .layouts()
928                    .array_layout(a)
929                    .into(),
930            ),
931            wasmtime_environ::WasmCompositeInnerType::Struct(s) => Some(
932                gc_runtime
933                    .expect("must have a GC runtime to register struct types")
934                    .layouts()
935                    .struct_layout(s)
936                    .into(),
937            ),
938            wasmtime_environ::WasmCompositeInnerType::Exn(e) => Some(
939                gc_runtime
940                    .expect("must have a GC runtime to register exception types")
941                    .layouts()
942                    .exn_layout(e)
943                    .into(),
944            ),
945            wasmtime_environ::WasmCompositeInnerType::Cont(_) => None, // FIXME: #10248 stack switching support.
946        };
947
948        // Add the type to our slab.
949        let id = shared_type_index_to_slab_id(engine_index);
950        assert!(self.types.contains(id));
951        assert!(self.types[id].is_none());
952        self.types[id] = Some(Arc::new(ty));
953
954        // Create the supertypes list for this type.
955        if let Some(supertype) = self.types[id].as_ref().unwrap().supertype {
956            let supertype = supertype.unwrap_engine_type_index();
957            let supers_supertypes = self.supertypes(supertype);
958            let mut supertypes = Vec::with_capacity(supers_supertypes.len() + 1);
959            supertypes.extend(
960                supers_supertypes
961                    .iter()
962                    .copied()
963                    .chain(iter::once(supertype)),
964            );
965            self.type_to_supertypes[engine_index] = Some(supertypes.into_boxed_slice());
966        }
967
968        // Only write the type-to-gc-layout entry if we have a GC layout, so
969        // that the map can avoid any heap allocation for backing storage in the
970        // case where Wasm GC is disabled.
971        if let Some(layout) = gc_layout {
972            self.type_to_gc_layout[engine_index] = Some(layout);
973        }
974
975        log::trace!(
976            "finished registering type {module_index:?} as {engine_index:?} = runtime-canonical {:?}",
977            self.types[id].as_ref().unwrap()
978        );
979    }
980
981    /// Get the supertypes list for the given type.
982    ///
983    /// The supertypes are listed in super-to-sub order. `ty` itself is not
984    /// included in the list.
985    fn supertypes(&self, ty: VMSharedTypeIndex) -> &[VMSharedTypeIndex] {
986        self.type_to_supertypes
987            .get(ty)
988            .and_then(|s| s.as_deref())
989            .unwrap_or(&[])
990    }
991
992    /// Register a rec group consisting of a single type.
993    ///
994    /// The type must already be canonicalized for runtime usage in this
995    /// registry.
996    ///
997    /// The returned entry will have already had its reference count incremented
998    /// on behalf of callers.
999    fn register_singleton_rec_group(
1000        &mut self,
1001        gc_runtime: Option<&dyn GcRuntime>,
1002        ty: WasmSubType,
1003    ) -> RecGroupEntry {
1004        self.assert_canonicalized_for_runtime_usage_in_this_registry(&ty);
1005
1006        // This type doesn't have any module-level type references, since it is
1007        // already canonicalized for runtime usage in this registry, so an empty
1008        // map suffices.
1009        let map = PrimaryMap::default();
1010
1011        // This must have `range.len() == 1`, even though we know this type
1012        // doesn't have any intra-group type references, to satisfy
1013        // `register_rec_group`'s preconditions.
1014        let range = ModuleInternedTypeIndex::from_bits(u32::MAX - 1)
1015            ..ModuleInternedTypeIndex::from_bits(u32::MAX);
1016
1017        self.register_rec_group(gc_runtime, &map, range, iter::once(ty))
1018    }
1019
1020    /// Unregister all of a type collection's rec groups.
1021    fn unregister_type_collection(&mut self, collection: &TypeCollection) {
1022        log::trace!("Begin unregistering `TypeCollection`");
1023        for entry in &collection.rec_groups {
1024            self.debug_assert_all_registered(entry.0.shared_type_indices.iter().copied());
1025            if entry.decref("TypeRegistryInner::unregister_type_collection") {
1026                self.unregister_entry(entry.clone());
1027            }
1028        }
1029        log::trace!("Finished unregistering `TypeCollection`");
1030    }
1031
1032    /// Remove a zero-refcount entry from the registry.
1033    ///
1034    /// This does *not* decrement the entry's registration count, it should
1035    /// instead be invoked only after a previous decrement operation observed
1036    /// zero remaining registrations.
1037    fn unregister_entry(&mut self, entry: RecGroupEntry) {
1038        log::trace!("Attempting to unregister {entry:?}");
1039        debug_assert!(self.drop_stack.is_empty());
1040
1041        // There are two races to guard against before we can unregister the
1042        // entry, even though it was on the drop stack:
1043        //
1044        // 1. Although an entry has to reach zero registrations before it is
1045        //    enqueued in the drop stack, we need to double check whether the
1046        //    entry is *still* at zero registrations. This is because someone
1047        //    else can resurrect the entry in between when the
1048        //    zero-registrations count was first observed and when we actually
1049        //    acquire the lock to unregister it. In this example, we have
1050        //    threads A and B, an existing rec group entry E, and a rec group
1051        //    entry E' that is a duplicate of E:
1052        //
1053        //    Thread A                        | Thread B
1054        //    --------------------------------+-----------------------------
1055        //    acquire(type registry lock)     |
1056        //                                    |
1057        //                                    | decref(E) --> 0
1058        //                                    |
1059        //                                    | block_on(type registry lock)
1060        //                                    |
1061        //    register(E') == incref(E) --> 1 |
1062        //                                    |
1063        //    release(type registry lock)     |
1064        //                                    |
1065        //                                    | acquire(type registry lock)
1066        //                                    |
1067        //                                    | unregister(E)         !!!!!!
1068        //
1069        //    If we aren't careful, we can unregister a type while it is still
1070        //    in use!
1071        //
1072        //    The fix in this case is that we skip unregistering the entry if
1073        //    its reference count is non-zero, since that means it was
1074        //    concurrently resurrected and is now in use again.
1075        //
1076        // 2. In a slightly more convoluted version of (1), where an entry is
1077        //    resurrected but then dropped *again*, someone might attempt to
1078        //    unregister an entry a second time:
1079        //
1080        //    Thread A                        | Thread B
1081        //    --------------------------------|-----------------------------
1082        //    acquire(type registry lock)     |
1083        //                                    |
1084        //                                    | decref(E) --> 0
1085        //                                    |
1086        //                                    | block_on(type registry lock)
1087        //                                    |
1088        //    register(E') == incref(E) --> 1 |
1089        //                                    |
1090        //    release(type registry lock)     |
1091        //                                    |
1092        //    decref(E) --> 0                 |
1093        //                                    |
1094        //    acquire(type registry lock)     |
1095        //                                    |
1096        //    unregister(E)                   |
1097        //                                    |
1098        //    release(type registry lock)     |
1099        //                                    |
1100        //                                    | acquire(type registry lock)
1101        //                                    |
1102        //                                    | unregister(E)         !!!!!!
1103        //
1104        //    If we aren't careful, we can unregister a type twice, which leads
1105        //    to panics and registry corruption!
1106        //
1107        //    To detect this scenario and avoid the double-unregistration bug,
1108        //    we maintain an `unregistered` flag on entries. We set this flag
1109        //    once an entry is unregistered and therefore, even if it is
1110        //    enqueued in the drop stack multiple times, we only actually
1111        //    unregister the entry the first time.
1112        //
1113        // A final note: we don't need to worry about any concurrent
1114        // modifications during the middle of this function's execution, only
1115        // between (a) when we first observed a zero-registrations count and
1116        // decided to unregister the type, and (b) when we acquired the type
1117        // registry's lock so that we could perform that unregistration. This is
1118        // because this method has exclusive access to `&mut self` -- that is,
1119        // we have a write lock on the whole type registry -- and therefore no
1120        // one else can create new references to this zero-registration entry
1121        // and bring it back to life (which would require finding it in
1122        // `self.hash_consing_map`, which no one else has access to, because we
1123        // now have an exclusive lock on `self`).
1124
1125        // Handle scenario (1) from above.
1126        let registrations = entry.0.registrations.load(Acquire);
1127        if registrations != 0 {
1128            log::trace!(
1129                "    {entry:?} was concurrently resurrected and no longer has \
1130                 zero registrations (registrations -> {registrations})",
1131            );
1132            assert_eq!(entry.0.unregistered.load(Acquire), false);
1133            return;
1134        }
1135
1136        // Handle scenario (2) from above.
1137        if entry.0.unregistered.load(Acquire) {
1138            log::trace!(
1139                "    {entry:?} was concurrently resurrected, dropped again, \
1140                 and already unregistered"
1141            );
1142            return;
1143        }
1144
1145        // Okay, we are really going to unregister this entry. Enqueue it on the
1146        // drop stack.
1147        self.drop_stack.push(entry);
1148
1149        // Keep unregistering entries until the drop stack is empty. This is
1150        // logically a recursive process where if we unregister a type that was
1151        // the only thing keeping another type alive, we then recursively
1152        // unregister that other type as well. However, we use this explicit
1153        // drop stack to avoid recursion and the potential stack overflows that
1154        // recursion implies.
1155        while let Some(entry) = self.drop_stack.pop() {
1156            log::trace!("Begin unregistering {entry:?}");
1157            self.debug_assert_all_registered(entry.0.shared_type_indices.iter().copied());
1158
1159            // All entries on the drop stack should *really* be ready for
1160            // unregistration, since no one can resurrect entries once we've
1161            // locked the registry.
1162            assert_eq!(entry.0.registrations.load(Acquire), 0);
1163            assert_eq!(entry.0.unregistered.load(Acquire), false);
1164
1165            // We are taking responsibility for unregistering this entry, so
1166            // prevent anyone else from attempting to do it again.
1167            entry.0.unregistered.store(true, Release);
1168
1169            // Decrement any other types that this type was shallowly
1170            // (i.e. non-transitively) referencing and keeping alive. If this
1171            // was the last thing keeping them registered, its okay to
1172            // unregister them as well now.
1173            debug_assert!(entry.0.hash_consing_key.is_canonicalized_for_hash_consing());
1174            entry
1175                .0
1176                .hash_consing_key
1177                .trace_engine_indices::<_, ()>(&mut |other_index| {
1178                    self.debug_assert_registered(other_index);
1179                    let other_entry = self.type_to_rec_group[other_index].as_ref().unwrap();
1180                    if other_entry.decref("dropping rec group's type references") {
1181                        self.drop_stack.push(other_entry.clone());
1182                    }
1183                    Ok(())
1184                })
1185                .unwrap();
1186
1187            // Remove the entry from the hash-consing map. If we register a
1188            // duplicate definition of this rec group again in the future, it
1189            // will be as if it is the first time it has ever been registered,
1190            // and it will be inserted into the hash-consing map again at that
1191            // time.
1192            let was_in_map = self.hash_consing_map.remove(&entry);
1193            debug_assert!(was_in_map);
1194
1195            // Similarly, remove the rec group's types from the registry, as
1196            // well as their entries from the reverse type-to-rec-group
1197            // map. Additionally, stop holding a strong reference from each
1198            // function type in the rec group to that function type's trampoline
1199            // type.
1200            debug_assert_eq!(
1201                entry.0.shared_type_indices.len(),
1202                entry
1203                    .0
1204                    .shared_type_indices
1205                    .iter()
1206                    .copied()
1207                    .collect::<crate::hash_set::HashSet<_>>()
1208                    .len(),
1209                "should not have any duplicate type indices",
1210            );
1211            for ty in entry.0.shared_type_indices.iter().copied() {
1212                log::trace!("removing {ty:?} from registry");
1213
1214                let removed_entry = self.type_to_rec_group[ty].take();
1215                debug_assert_eq!(removed_entry.unwrap(), entry);
1216
1217                // Remove the associated trampoline type, if any.
1218                if let Some(trampoline_ty) =
1219                    self.type_to_trampoline.get(ty).and_then(|x| x.expand())
1220                {
1221                    self.debug_assert_registered(trampoline_ty);
1222                    self.type_to_trampoline[ty] = None.into();
1223                    let trampoline_entry = self.type_to_rec_group[trampoline_ty].as_ref().unwrap();
1224                    if trampoline_entry.decref("dropping rec group's trampoline-type references") {
1225                        self.drop_stack.push(trampoline_entry.clone());
1226                    }
1227                }
1228
1229                // Remove the type's supertypes list, if any. Take care to guard
1230                // this assignment so that we don't accidentally force the
1231                // secondary map to allocate even when we never actually use
1232                // Wasm GC.
1233                if self.type_to_supertypes.get(ty).is_some() {
1234                    self.type_to_supertypes[ty] = None;
1235                }
1236
1237                // Same as above, but for the type's GC layout.
1238                if self.type_to_gc_layout.get(ty).is_some() {
1239                    self.type_to_gc_layout[ty] = None;
1240                }
1241
1242                let id = shared_type_index_to_slab_id(ty);
1243                let deallocated_ty = self.types.dealloc(id);
1244                assert!(deallocated_ty.is_some());
1245            }
1246
1247            log::trace!("End unregistering {entry:?}");
1248        }
1249    }
1250}
1251
1252// `TypeRegistryInner` implements `Drop` in debug builds to assert that
1253// all types have been unregistered for the registry.
1254#[cfg(debug_assertions)]
1255impl Drop for TypeRegistryInner {
1256    fn drop(&mut self) {
1257        let TypeRegistryInner {
1258            hash_consing_map,
1259            types,
1260            type_to_rec_group,
1261            type_to_supertypes,
1262            type_to_trampoline,
1263            type_to_gc_layout,
1264            drop_stack,
1265        } = self;
1266        assert!(
1267            hash_consing_map.is_empty(),
1268            "type registry not empty: hash consing map is not empty: {hash_consing_map:#?}"
1269        );
1270        assert!(
1271            types.is_empty(),
1272            "type registry not empty: types slab is not empty: {types:#?}"
1273        );
1274        assert!(
1275            type_to_rec_group.is_empty() || type_to_rec_group.values().all(|x| x.is_none()),
1276            "type registry not empty: type-to-rec-group map is not empty: {type_to_rec_group:#?}"
1277        );
1278        assert!(
1279            type_to_supertypes.is_empty() || type_to_supertypes.values().all(|x| x.is_none()),
1280            "type registry not empty: type-to-supertypes map is not empty: {type_to_supertypes:#?}"
1281        );
1282        assert!(
1283            type_to_trampoline.is_empty() || type_to_trampoline.values().all(|x| x.is_none()),
1284            "type registry not empty: type-to-trampoline map is not empty: {type_to_trampoline:#?}"
1285        );
1286        assert!(
1287            type_to_gc_layout.is_empty() || type_to_gc_layout.values().all(|x| x.is_none()),
1288            "type registry not empty: type-to-gc-layout map is not empty: {type_to_gc_layout:#?}"
1289        );
1290        assert!(
1291            drop_stack.is_empty(),
1292            "type registry not empty: drop stack is not empty: {drop_stack:#?}"
1293        );
1294    }
1295}
1296
1297/// Implements a shared type registry.
1298///
1299/// WebAssembly requires that the caller and callee types in an indirect
1300/// call must match. To implement this efficiently, keep a registry of all
1301/// types, shared by all instances, so that call sites can just do an
1302/// index comparison.
1303#[derive(Debug)]
1304pub struct TypeRegistry(RwLock<TypeRegistryInner>);
1305
1306impl TypeRegistry {
1307    /// Creates a new shared type registry.
1308    pub fn new() -> Self {
1309        Self(RwLock::new(TypeRegistryInner::default()))
1310    }
1311
1312    #[inline]
1313    pub fn debug_assert_contains(&self, index: VMSharedTypeIndex) {
1314        if cfg!(debug_assertions) {
1315            self.0.read().debug_assert_registered(index);
1316        }
1317    }
1318
1319    /// Looks up a function type from a shared type index.
1320    ///
1321    /// This does *NOT* prevent the type from being unregistered while you are
1322    /// still using the resulting value! Use the `RegisteredType::root`
1323    /// constructor if you need to ensure that property and you don't have some
1324    /// other mechanism already keeping the type registered.
1325    pub fn borrow(&self, index: VMSharedTypeIndex) -> Option<Arc<WasmSubType>> {
1326        let id = shared_type_index_to_slab_id(index);
1327        let inner = self.0.read();
1328        inner.types.get(id).and_then(|ty| ty.clone())
1329    }
1330
1331    /// Get the GC layout for the given index's type.
1332    ///
1333    /// Returns `None` for types that do not have GC layouts (i.e. function
1334    /// types).
1335    pub fn layout(&self, index: VMSharedTypeIndex) -> Option<GcLayout> {
1336        let inner = self.0.read();
1337        inner.type_to_gc_layout.get(index).and_then(|l| l.clone())
1338    }
1339
1340    /// Get the trampoline type for the given function type index.
1341    ///
1342    /// Panics for non-function type indices.
1343    pub fn trampoline_type(&self, index: VMSharedTypeIndex) -> VMSharedTypeIndex {
1344        let slab_id = shared_type_index_to_slab_id(index);
1345        let inner = self.0.read();
1346        inner.debug_assert_registered(index);
1347
1348        let ty = inner.types[slab_id].as_ref().unwrap();
1349        debug_assert!(
1350            ty.is_func(),
1351            "cannot get the trampoline type of a non-function type: {index:?} = {ty:?}"
1352        );
1353
1354        match inner.type_to_trampoline.get(index).and_then(|x| x.expand()) {
1355            Some(ty) => ty,
1356            None => {
1357                // The function type is its own trampoline type.
1358                index
1359            }
1360        }
1361    }
1362
1363    /// Is type `sub` a subtype of `sup`?
1364    #[inline]
1365    pub fn is_subtype(&self, sub: VMSharedTypeIndex, sup: VMSharedTypeIndex) -> bool {
1366        if cfg!(debug_assertions) {
1367            self.0.read().debug_assert_registered(sub);
1368            self.0.read().debug_assert_registered(sup);
1369        }
1370
1371        if sub == sup {
1372            return true;
1373        }
1374
1375        self.is_subtype_slow(sub, sup)
1376    }
1377
1378    fn is_subtype_slow(&self, sub: VMSharedTypeIndex, sup: VMSharedTypeIndex) -> bool {
1379        // Do the O(1) subtype checking trick:
1380        //
1381        // In a type system with single inheritance, the subtyping relationships
1382        // between all types form a set of trees. The root of each tree is a
1383        // type that has no supertype; each node's immediate children are the
1384        // types that directly subtype that node.
1385        //
1386        // For example, consider these types:
1387        //
1388        //     class Base {}
1389        //     class A subtypes Base {}
1390        //     class B subtypes Base {}
1391        //     class C subtypes A {}
1392        //     class D subtypes A {}
1393        //     class E subtypes C {}
1394        //
1395        // These types produce the following tree:
1396        //
1397        //                Base
1398        //               /    \
1399        //              A      B
1400        //             / \
1401        //            C   D
1402        //           /
1403        //          E
1404        //
1405        // Note the following properties:
1406        //
1407        // 1. If `sub` is a subtype of `sup` (either directly or transitively)
1408        //    then `sup` *must* be on the path from `sub` up to the root of
1409        //    `sub`'s tree.
1410        //
1411        // 2. Additionally, `sup` *must* be the `i`th node down from the root in
1412        //    that path, where `i` is the length of the path from `sup` to its
1413        //    tree's root.
1414        //
1415        // Therefore, if we have the path to the root for each type (we do) then
1416        // we can simply check if `sup` is at index `supertypes(sup).len()`
1417        // within `supertypes(sub)`.
1418        let inner = self.0.read();
1419        let sub_supertypes = inner.supertypes(sub);
1420        let sup_supertypes = inner.supertypes(sup);
1421        sub_supertypes.get(sup_supertypes.len()) == Some(&sup)
1422    }
1423}