From c5663208c784bd12f44556fac0bc676d224a64d6 Mon Sep 17 00:00:00 2001 From: Jonas Kruckenberg Date: Mon, 20 Jan 2025 10:18:14 +0100 Subject: [PATCH] feat: user address space functionality (#246) * feat: implement `USER` permission * rename `MmapSlice` to `UserMmap` * typo * remove `UnwindSafe` bound on `catch_traps` * remove `copy_from_user` & `copy_to_user` in favor of the more universal `with_user_memory_access` * fix: clear sum when entering trap handler * fixes * fix riscv register field setting and clearing * Update mod.rs * fix: `with_user_memory_access` passthrough return value * refactor: use `VirtualAddress` in error type * fix: kernel counter creation * more helpful assert messages * feat: implement `core::iter::Step` for address types * fix: respect `VMContext` field alignments * feat: exit `array_to_wasm_trampoline` with trap instead of return * refactor: move allocator and addressspace into store * fix `UserMmap` * run wasm tests * correct jumpt to userspace * fmt & clippy * Update main.rs --- kernel/fib_cpp.wat | 2 +- kernel/src/arch/riscv64/mod.rs | 97 ++----- kernel/src/arch/riscv64/trap_handler.rs | 17 +- kernel/src/arch/riscv64/vm.rs | 22 +- kernel/src/main.rs | 3 +- kernel/src/metrics.rs | 18 +- kernel/src/trap_handler.rs | 27 +- kernel/src/vm/address.rs | 36 +++ kernel/src/vm/address_space.rs | 44 ++- kernel/src/vm/address_space_region.rs | 60 +++++ kernel/src/vm/error.rs | 4 +- kernel/src/vm/mmap.rs | 155 ----------- kernel/src/vm/mod.rs | 27 +- kernel/src/vm/trap_handler.rs | 8 +- kernel/src/vm/user_mmap.rs | 255 ++++++++++++++++++ kernel/src/vm/vmo/paged.rs | 4 +- kernel/src/wasm/cranelift/compiler.rs | 5 +- kernel/src/wasm/errors.rs | 5 +- kernel/src/wasm/func.rs | 24 +- kernel/src/wasm/instance.rs | 7 +- kernel/src/wasm/instance_allocator.rs | 46 ++-- kernel/src/wasm/linker.rs | 6 +- kernel/src/wasm/mod.rs | 29 +- kernel/src/wasm/module.rs | 16 +- kernel/src/wasm/runtime/code_memory.rs | 15 +- kernel/src/wasm/runtime/code_registry.rs | 8 +- kernel/src/wasm/runtime/instance.rs | 59 ++-- kernel/src/wasm/runtime/instance_allocator.rs | 52 +--- kernel/src/wasm/runtime/memory.rs | 25 +- kernel/src/wasm/runtime/mmap_vec.rs | 70 +++-- kernel/src/wasm/runtime/owned_vmcontext.rs | 53 +++- kernel/src/wasm/runtime/table.rs | 2 +- kernel/src/wasm/runtime/vmoffsets.rs | 30 ++- kernel/src/wasm/store.rs | 5 + kernel/src/wasm/trap.rs | 13 + libs/riscv/src/register/mod.rs | 49 ++-- libs/riscv/src/register/satp.rs | 6 +- libs/riscv/src/register/scause.rs | 8 +- libs/riscv/src/register/scounteren.rs | 14 +- libs/riscv/src/register/sepc.rs | 4 +- libs/riscv/src/register/sie.rs | 6 +- libs/riscv/src/register/sscratch.rs | 4 +- libs/riscv/src/register/sstatus.rs | 65 ++--- libs/riscv/src/register/stval.rs | 4 +- libs/riscv/src/register/stvec.rs | 4 +- libs/wavltree/src/lib.rs | 5 +- 46 files changed, 856 insertions(+), 562 deletions(-) delete mode 100644 kernel/src/vm/mmap.rs create mode 100644 kernel/src/vm/user_mmap.rs diff --git a/kernel/fib_cpp.wat b/kernel/fib_cpp.wat index c7af8109..772625f7 100644 --- a/kernel/fib_cpp.wat +++ b/kernel/fib_cpp.wat @@ -97,7 +97,7 @@ local.get 21 return) (table (;0;) 1 1 funcref) - (memory (;0;) 2) + (memory (;0;) 1) (global (;0;) (mut i32) (i32.const 66560)) (export "memory" (memory 0)) (export "fib" (func 0)) diff --git a/kernel/src/arch/riscv64/mod.rs b/kernel/src/arch/riscv64/mod.rs index a1a19a63..17c2a9a2 100644 --- a/kernel/src/arch/riscv64/mod.rs +++ b/kernel/src/arch/riscv64/mod.rs @@ -11,13 +11,9 @@ mod trap_handler; mod utils; mod vm; -use crate::ensure; -use crate::error::Error; use crate::vm::VirtualAddress; use bitflags::bitflags; use core::arch::asm; -use core::panic::RefUnwindSafe; -use core::ptr; use dtb_parser::Strings; use fallible_iterator::FallibleIterator; use riscv::sstatus::FS; @@ -228,87 +224,24 @@ pub fn rmb() { } } -/// Copies `count * size_of::()` bytes from `src` to `dst`. `src` must be a valid pointer in userspace, `dst` -/// must be a valid pointer in kernelspace. -/// -/// # Errors -/// -/// Returns and error if the pointers are invalid or the copy operation failed. -pub fn copy_from_user(src: *const T, dst: *mut T, count: usize) -> crate::Result<()> -where - T: Clone + RefUnwindSafe, -{ - check_ranges(src, dst, count)?; - - // Safety: checked above - unsafe { copy_inner(src, dst, count) } -} - -/// Copies `count * size_of::()` bytes from `src` to `dst`. `src` must be a valid pointer in kernelspace, `dst` -/// must be a valid pointer in userspace. -/// -/// # Errors -/// -/// Returns and error if the pointers are invalid or the copy operation failed. -pub fn copy_to_user(src: *const T, dst: *mut T, count: usize) -> crate::Result<()> +#[inline] +pub unsafe fn with_user_memory_access(f: F) -> R where - T: Clone + RefUnwindSafe, + F: FnOnce() -> R, { - check_ranges(dst, src, count)?; - - // Safety: checked above - unsafe { copy_inner(src, dst, count) } -} - -fn check_ranges(user: *const T, kernel: *const T, count: usize) -> crate::Result<()> { - // ensure slice is in user space - ensure!( - VirtualAddress::new(user as usize).is_some_and(|addr| addr.is_user_accessible()), - Error::InvalidArgument - ); - ensure!( - VirtualAddress::new(user as usize) - .and_then(|addr| addr.checked_add(count * size_of::())) - .is_some_and(|addr| addr.is_user_accessible()), - Error::InvalidArgument - ); - - // ensure src slice is in kernel space - ensure!( - VirtualAddress::new(kernel as usize).is_some_and(is_kernel_address), - Error::InvalidArgument - ); - ensure!( - VirtualAddress::new(kernel as usize) - .and_then(|addr| addr.checked_add(count * size_of::())) - .is_some_and(is_kernel_address), - Error::InvalidArgument - ); - - Ok(()) -} + // Allow supervisor access to user memory + // Safety: register access + unsafe { + sstatus::set_sum(); + } -unsafe fn copy_inner(src: *const T, dst: *mut T, count: usize) -> crate::Result<()> -where - T: Clone + RefUnwindSafe, -{ - crate::trap_handler::catch_traps(|| { - // Allow supervisor access to user memory - // Safety: register access - unsafe { - sstatus::set_sum(); - } + let r = f(); - // Safety: checked by caller and `catch_traps` - unsafe { - ptr::copy_nonoverlapping(src, dst, count); - } + // Disable supervisor access to user memory + // Safety: register access + unsafe { + sstatus::clear_sum(); + } - // Disable supervisor access to user memory - // Safety: register access - unsafe { - sstatus::clear_sum(); - } - }) - .map_err(|_| Error::AccessDenied) + r } diff --git a/kernel/src/arch/riscv64/trap_handler.rs b/kernel/src/arch/riscv64/trap_handler.rs index 3d88be12..eb06379c 100644 --- a/kernel/src/arch/riscv64/trap_handler.rs +++ b/kernel/src/arch/riscv64/trap_handler.rs @@ -8,6 +8,7 @@ use super::utils::{define_op, load_fp, load_gp, save_fp, save_gp}; use crate::arch::PAGE_SIZE; use crate::trap_handler::TrapReason; +use crate::vm::VirtualAddress; use crate::TRAP_STACK_SIZE_PAGES; use core::arch::{asm, naked_asm}; use riscv::scause::{Exception, Interrupt, Trap}; @@ -84,7 +85,7 @@ unsafe extern "C" fn default_trap_entry() { unsafe { naked_asm! { ".align 2", - // "mv t0, sp", // save the correct stack pointer + "csrrw sp, sscratch, sp", // sp points to the TrapFrame "add sp, sp, -0x210", @@ -251,12 +252,18 @@ fn default_trap_handler( a6: usize, a7: usize, ) -> *mut TrapFrame { + // Clear the SUM bit to prevent userspace memory access in case we interrupted the kernel + // Safety: register access + unsafe { + sstatus::clear_sum(); + } + let cause = scause::read().cause(); - log::trace!("{:?}", sstatus::read()); log::trace!("trap_handler cause {cause:?}, a1 {a1:#x} a2 {a2:#x} a3 {a3:#x} a4 {a4:#x} a5 {a5:#x} a6 {a6:#x} a7 {a7:#x}"); let epc = sepc::read(); let tval = stval::read(); + log::trace!("{:?};epc={epc:#x};tval={tval:#x}", sstatus::read()); let reason = match cause { Trap::Interrupt(Interrupt::SupervisorSoft | Interrupt::VirtualSupervisorSoft) => { @@ -286,9 +293,9 @@ fn default_trap_handler( }; crate::trap_handler::begin_trap(crate::trap_handler::Trap { - pc: epc, - fp: 0, - faulting_address: tval, + pc: VirtualAddress::new(epc).unwrap(), + fp: VirtualAddress::default(), + faulting_address: VirtualAddress::new(tval).unwrap(), reason, }); diff --git a/kernel/src/arch/riscv64/vm.rs b/kernel/src/arch/riscv64/vm.rs index dad2795c..dc5aae11 100644 --- a/kernel/src/arch/riscv64/vm.rs +++ b/kernel/src/arch/riscv64/vm.rs @@ -587,13 +587,31 @@ impl PageTableEntry { bitflags! { #[derive(Debug, Copy, Clone, Eq, PartialEq, Default)] pub struct PTEFlags: usize { + /// Indicates the page table entry is initialized const VALID = 1 << 0; + /// Whether the page is readable const READ = 1 << 1; + /// Whether the page is writable const WRITE = 1 << 2; + /// Whether the page is executable const EXECUTE = 1 << 3; + /// Whether the page is accessible to user mode. + /// + /// By default, pages are only accessible in supervisor mode but marking a page as user-accessible + /// allows user mode code to access the page too. const USER = 1 << 4; + /// Designates a global mapping. + /// + /// Global mappings exist in all address space. + /// + /// Note that as stated in the RISCV privileged spec, forgetting to mark a global mapping as global + /// is *fine* since it just results in slower performance. However, marking a non-global mapping as + /// global by accident will result in undefined behaviour (the CPU might use any of the competing + /// mappings for the address). const GLOBAL = 1 << 5; + /// Indicated the page has been read, written, or executed from. const ACCESSED = 1 << 6; + /// Indicates the page has been written to. const DIRTY = 1 << 7; } } @@ -602,13 +620,15 @@ impl From for PTEFlags { fn from(flags: crate::vm::Permissions) -> Self { use crate::vm::Permissions; - let mut out = Self::VALID | Self::DIRTY | Self::ACCESSED; + // we currently don't use the accessed & dirty bits and, it's recommended to set them if unused + let mut out = Self::VALID | Self::ACCESSED | Self::DIRTY; for flag in flags { match flag { Permissions::READ => out.insert(Self::READ), Permissions::WRITE => out.insert(Self::WRITE), Permissions::EXECUTE => out.insert(Self::EXECUTE), + Permissions::USER => out.insert(Self::USER), _ => unreachable!(), } } diff --git a/kernel/src/main.rs b/kernel/src/main.rs index 02e4d77d..1367d787 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -14,6 +14,7 @@ #![feature(debug_closure_helpers)] #![expect(internal_features, reason = "panic internals")] #![feature(std_internals, panic_can_unwind, fmt_internals)] +#![feature(step_trait)] #![expect(dead_code, reason = "TODO")] // TODO remove #![expect(edition_2024_expr_fragment_specifier, reason = "vetted")] @@ -59,7 +60,7 @@ pub const TRAP_STACK_SIZE_PAGES: usize = 64; // TODO find a lower more appropria /// doesn't cause startup slowdown & inefficient mapping, but large enough so we can bootstrap /// our own virtual memory subsystem. At that point we are no longer reliant on this initial heap /// size and can dynamically grow the heap as needed. -pub const INITIAL_HEAP_SIZE_PAGES: usize = 4096; // 32 MiB +pub const INITIAL_HEAP_SIZE_PAGES: usize = 4096 * 2; // 32 MiB pub type Result = core::result::Result; diff --git a/kernel/src/metrics.rs b/kernel/src/metrics.rs index f17f6957..55f62ffd 100644 --- a/kernel/src/metrics.rs +++ b/kernel/src/metrics.rs @@ -32,26 +32,26 @@ use core::sync::atomic::{AtomicU64, Ordering}; #[macro_export] macro_rules! counter { ($name:expr) => {{ - let name: &str = $name; - - #[unsafe(link_section = concat!(".bss.kcounter.", name))] - static ARENA: $crate::thread_local::ThreadLocal = + #[unsafe(link_section = concat!(".bss.kcounter.", $name))] + static ARENA: $crate::thread_local::ThreadLocal<::core::sync::atomic::AtomicU64> = $crate::thread_local::ThreadLocal::new(); - Counter { - arena: &ARENA, - name, - } + Counter::new(&ARENA, $name) }}; } /// A kernel counter. -struct Counter { +pub struct Counter { arena: &'static ThreadLocal, name: &'static str, } impl Counter { + #[doc(hidden)] + pub const fn new(arena: &'static ThreadLocal, name: &'static str) -> Self { + Self { arena, name } + } + /// Increment the counter. pub fn increment(&self, value: u64) { self.arena diff --git a/kernel/src/trap_handler.rs b/kernel/src/trap_handler.rs index 75f888d0..1a479f46 100644 --- a/kernel/src/trap_handler.rs +++ b/kernel/src/trap_handler.rs @@ -6,12 +6,12 @@ // copied, modified, or distributed except according to those terms. use crate::arch::longjmp; +use crate::vm::VirtualAddress; use crate::{arch, vm}; use core::cell::Cell; use core::fmt::Write; use core::mem::{ManuallyDrop, MaybeUninit}; use core::ops::ControlFlow; -use core::panic::UnwindSafe; use core::ptr; use core::ptr::addr_of_mut; use thread_local::thread_local; @@ -23,9 +23,9 @@ thread_local! { #[derive(Debug, Copy, Clone)] pub struct Trap { - pub pc: usize, - pub fp: usize, - pub faulting_address: usize, + pub pc: VirtualAddress, + pub fp: VirtualAddress, + pub faulting_address: VirtualAddress, pub reason: TrapReason, } @@ -103,18 +103,10 @@ pub fn resume_trap(trap: Trap) -> ! { /// result if the closure didn't trigger a trap, and will return `Err(trap)` if it did. The `trap` object /// holds further information about the traps instruction pointer, faulting address and trap reason. /// -/// # `UnwindSafe` -/// -/// This function borrows the [`UnwindSafe`] trait bound from [`catch_unwind`][1] for the same reasons. -/// A hardware trap might happen while a data structure is in a temporarily invalid state (i.e. during -/// mutation) and continuing to access such data would lead to hard to debug bugs. If in the future we -/// determine the restrictions implied by `UnwindSafe` aren't enough for the purposes of -/// signal safety we can introduce a new trait. -/// /// [1]: [crate::panic::catch_unwind] pub fn catch_traps(f: F) -> Result where - F: FnOnce() -> R + UnwindSafe, + F: FnOnce() -> R, { union Data { // when the closure completed successfully, this will hold the return @@ -160,8 +152,13 @@ where } } -fn fault_resume_panic(reason: TrapReason, pc: usize, fp: usize, faulting_address: usize) -> ! { - panic!("UNCAUGHT KERNEL TRAP {reason:?} pc={pc:#x};fp={fp:#x};faulting_address={faulting_address:#x};"); +fn fault_resume_panic( + reason: TrapReason, + pc: VirtualAddress, + fp: VirtualAddress, + faulting_address: VirtualAddress, +) -> ! { + panic!("UNCAUGHT KERNEL TRAP {reason:?} pc={pc};fp={fp};faulting_address={faulting_address};"); } /// Begins processing a trap. diff --git a/kernel/src/vm/address.rs b/kernel/src/vm/address.rs index d9b57a92..29eb1684 100644 --- a/kernel/src/vm/address.rs +++ b/kernel/src/vm/address.rs @@ -225,6 +225,38 @@ macro_rules! address_impl { .finish() } } + + impl core::iter::Step for $addr { + fn steps_between(start: &Self, end: &Self) -> Option { + core::iter::Step::steps_between(&start.0, &end.0) + } + + fn forward_checked(start: Self, count: usize) -> Option { + core::iter::Step::forward_checked(start.0, count).map(Self) + } + + fn forward(start: Self, count: usize) -> Self { + Self(core::iter::Step::forward(start.0, count)) + } + + unsafe fn forward_unchecked(start: Self, count: usize) -> Self { + // Safety: checked by the caller + Self(unsafe { core::iter::Step::forward_unchecked(start.0, count) }) + } + + fn backward_checked(start: Self, count: usize) -> Option { + core::iter::Step::backward_checked(start.0, count).map(Self) + } + + fn backward(start: Self, count: usize) -> Self { + Self(core::iter::Step::backward(start.0, count)) + } + + unsafe fn backward_unchecked(start: Self, count: usize) -> Self { + // Safety: checked by the caller + Self(unsafe { core::iter::Step::backward_unchecked(start.0, count) }) + } + } }; } @@ -291,6 +323,9 @@ macro_rules! address_range_impl { let b = Range::from(other.end..self.end); ((!a.is_empty()).then_some(a), (!b.is_empty()).then_some(b)) } + fn clamp(&self, range: Self) -> Self { + Range::from(self.start.max(range.start)..self.end.min(range.end)) + } }; } @@ -363,6 +398,7 @@ pub trait AddressRangeExt { fn difference(&self, other: Self) -> (Option, Option) where Self: Sized; + fn clamp(&self, range: Self) -> Self; } impl AddressRangeExt for Range { diff --git a/kernel/src/vm/address_space.rs b/kernel/src/vm/address_space.rs index c23712db..5c1683aa 100644 --- a/kernel/src/vm/address_space.rs +++ b/kernel/src/vm/address_space.rs @@ -29,6 +29,7 @@ use rand_chacha::ChaCha20Rng; // const VIRT_ALLOC_ENTROPY: u8 = u8::try_from((arch::VIRT_ADDR_BITS - arch::PAGE_SHIFT as u32) + 1).unwrap(); const VIRT_ALLOC_ENTROPY: u8 = 27; +#[derive(Debug, Clone, Copy)] pub enum AddressSpaceKind { User, Kernel, @@ -37,10 +38,10 @@ pub enum AddressSpaceKind { /// Represents the address space of a process (or the kernel). pub struct AddressSpace { /// A binary search tree of regions that make up this address space. - pub(super) regions: wavltree::WAVLTree, + pub(crate) regions: wavltree::WAVLTree, /// The hardware address space backing this "logical" address space that changes need to be /// materialized into in order to take effect. - pub(super) arch: arch::AddressSpace, + pub arch: arch::AddressSpace, /// The maximum range this address space can encompass. /// /// This is used to check new mappings against and speed up page fault handling. @@ -80,6 +81,10 @@ impl AddressSpace { } } + pub fn kind(&self) -> AddressSpaceKind { + self.kind + } + /// Crate a new region in this address space. /// /// The mapping will be placed at a chosen spot in the address space that @@ -356,7 +361,7 @@ impl AddressSpace { /// - replace old frame with new frame /// - update MMU page table pub fn page_fault(&mut self, addr: VirtualAddress, flags: PageFaultFlags) -> Result<(), Error> { - assert!(flags.is_valid()); + assert!(flags.is_valid(), "invalid page fault flags {flags:?}"); // make sure addr is even a valid address for this address space match self.kind { @@ -463,6 +468,36 @@ impl AddressSpace { Ok(region) } + pub fn ensure_mapped( + &mut self, + range: Range, + will_write: bool, + ) -> Result<(), Error> { + ensure!( + range.start.is_aligned_to(arch::PAGE_SIZE), + Error::MisalignedStart + ); + ensure!( + range.end.is_aligned_to(arch::PAGE_SIZE), + Error::MisalignedEnd + ); + ensure!(range.size() <= self.max_range.size(), Error::SizeTooLarge); + + let mut batch = Batch::new(&mut self.arch); + let mut bytes_remaining = range.size(); + let mut c = self.regions.find_mut(&range.start); + while bytes_remaining > 0 { + let region = c.get_mut().unwrap(); + let clamped = range.clamp(region.range); + region.ensure_mapped(&mut batch, clamped, will_write)?; + + bytes_remaining -= range.size(); + } + batch.flush()?; + + Ok(()) + } + #[expect(clippy::unnecessary_wraps, reason = "TODO")] fn map_internal( &mut self, @@ -594,8 +629,7 @@ impl AddressSpace { let mut candidate_spot_count = 0; - debug_assert!(!self.regions.is_empty()); - // // if the tree is empty, treat max_range as the gap + // if the tree is empty, treat max_range as the gap if self.regions.is_empty() { let aligned_gap = self.max_range.checked_align_in(layout.align()).unwrap(); let spot_count = spots_in_range(layout, aligned_gap); diff --git a/kernel/src/vm/address_space_region.rs b/kernel/src/vm/address_space_region.rs index 48cddcd3..22aa0983 100644 --- a/kernel/src/vm/address_space_region.rs +++ b/kernel/src/vm/address_space_region.rs @@ -90,6 +90,66 @@ impl AddressSpaceRegion { Ok(()) } + pub fn ensure_mapped( + self: Pin<&mut Self>, + batch: &mut Batch, + range: Range, + will_write: bool, + ) -> Result<(), Error> { + let vmo_relative_range = Range { + start: range.start.checked_sub_addr(self.range.start).unwrap(), + end: range.end.checked_sub_addr(self.range.start).unwrap(), + }; + + match self.vmo.as_ref() { + Vmo::Wired(vmo) => { + let range_phys = vmo + .lookup_contiguous(vmo_relative_range) + .expect("contiguous lookup for wired VMOs should never fail"); + + batch.append( + range.start, + range_phys.start, + range_phys.size(), + self.permissions.into(), + )?; + } + Vmo::Paged(vmo) => { + if will_write { + let mut vmo = vmo.write(); + + for addr in range.iter().step_by(arch::PAGE_SIZE) { + debug_assert!(addr.is_aligned_to(arch::PAGE_SIZE)); + let vmo_relative_offset = addr.checked_sub_addr(self.range.start).unwrap(); + let frame = vmo.require_owned_frame(vmo_relative_offset)?; + batch.append( + addr, + frame.addr(), + arch::PAGE_SIZE, + self.permissions.into(), + )?; + } + } else { + let vmo = vmo.read(); + + for addr in range.iter().step_by(arch::PAGE_SIZE) { + debug_assert!(addr.is_aligned_to(arch::PAGE_SIZE)); + let vmo_relative_offset = addr.checked_sub_addr(self.range.start).unwrap(); + let frame = vmo.require_read_frame(vmo_relative_offset)?; + batch.append( + addr, + frame.addr(), + arch::PAGE_SIZE, + self.permissions.difference(Permissions::WRITE).into(), + )?; + } + } + } + } + + Ok(()) + } + pub fn page_fault( self: Pin<&mut Self>, batch: &mut Batch, diff --git a/kernel/src/vm/error.rs b/kernel/src/vm/error.rs index 1ec5323d..f48ad60b 100644 --- a/kernel/src/vm/error.rs +++ b/kernel/src/vm/error.rs @@ -32,6 +32,7 @@ pub enum Error { Sbi(riscv::sbi::Error), KernelFaultInUserSpace(VirtualAddress), UserFaultInKernelSpace(VirtualAddress), + Trap(crate::trap_handler::Trap), } impl From for Error { @@ -66,7 +67,8 @@ impl Display for Error { #[cfg(any(target_arch = "riscv64", target_arch = "riscv32"))] Error::Sbi(err) => write!(f, "SBI call failed: {err}"), Error::KernelFaultInUserSpace(addr) => write!(f, "non-user address fault in user address space addr={addr:?}"), - Error::UserFaultInKernelSpace(addr) => write!(f, "non-kernel address fault in kernel address space addr={addr:?}") + Error::UserFaultInKernelSpace(addr) => write!(f, "non-kernel address fault in kernel address space addr={addr:?}"), + Error::Trap(trap) => write!(f, "operation failed with trap {trap:?}") } } } diff --git a/kernel/src/vm/mmap.rs b/kernel/src/vm/mmap.rs deleted file mode 100644 index a181e806..00000000 --- a/kernel/src/vm/mmap.rs +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2025 Jonas Kruckenberg -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -use crate::arch; -use crate::vm::address::AddressRangeExt; -use crate::vm::address_space_region::AddressSpaceRegion; -use crate::vm::vmo::Vmo; -use crate::vm::{ - AddressSpace, ArchAddressSpace, Error, Permissions, VirtualAddress, THE_ZERO_FRAME, -}; -use core::alloc::Layout; -use core::num::NonZeroUsize; -use core::pin::Pin; -use core::ptr::NonNull; -use core::range::Range; -use core::{iter, ptr, slice}; - -/// A memory mapping, essentially handle to an `AddressSpaceRegion` -#[derive(Debug)] -pub struct MmapSlice { - ptr: *mut AddressSpaceRegion, - range: Range, -} - -// Safety: All mutations of the `*mut AddressSpaceRegion` are happening through a `&mut AddressSpace` -unsafe impl Send for MmapSlice {} -// Safety: All mutations of the `*mut AddressSpaceRegion` are happening through a `&mut AddressSpace` -unsafe impl Sync for MmapSlice {} - -impl MmapSlice { - pub unsafe fn from_raw(ptr: *mut AddressSpaceRegion) -> Self { - Self { - ptr, - // Safety: caller has to ensure safety - range: unsafe { ptr.as_ref().unwrap().range }, - } - } - - /// Creates a new empty `Mmap`. - /// - /// Note that the size of this cannot be changed after the fact, all accessors will return empty - /// slices and permission changing methods will always fail. - pub fn new_empty() -> Self { - Self { - ptr: ptr::null_mut(), - range: Range::default(), - } - } - - /// Creates a new read-write (`RW`) memory mapping in the given address space. - pub fn new_zeroed(aspace: &mut AddressSpace, len: usize) -> Result { - let layout = Layout::from_size_align(len, arch::PAGE_SIZE).unwrap(); - let vmo = Vmo::new_paged(iter::repeat_n( - THE_ZERO_FRAME.clone(), - layout.size().div_ceil(arch::PAGE_SIZE), - )); - - let region = aspace.map(layout, vmo, 0, Permissions::READ | Permissions::WRITE, None)?; - - Ok(Self { - range: region.range, - // Safety: we only use the ptr as an identifier in the WAVLTree - ptr: ptr::from_mut(unsafe { Pin::into_inner_unchecked(region) }), - }) - } - - /// Returns a slice to the memory mapped by this `Mmap`. - #[inline] - pub unsafe fn slice(&self, range: Range) -> &[u8] { - assert!(range.end <= self.len()); - let len = range.end.checked_sub(range.start).unwrap(); - // Safety: constructors ensure invariants are maintained - unsafe { slice::from_raw_parts(self.as_ptr().add(range.start), len) } - } - - /// Returns a mutable slice to the memory mapped by this `Mmap`. - #[inline] - pub unsafe fn slice_mut(&mut self, range: Range) -> &mut [u8] { - assert!(range.end <= self.len()); - let len = range.end.checked_sub(range.start).unwrap(); - // Safety: constructors ensure invariants are maintained - unsafe { slice::from_raw_parts_mut(self.as_mut_ptr().add(range.start), len) } - } - - /// Returns a pointer to the start of the memory mapped by this `Mmap`. - #[inline] - pub fn as_ptr(&self) -> *const u8 { - self.range.start.as_ptr() - } - - /// Returns a mutable pointer to the start of the memory mapped by this `Mmap`. - #[inline] - pub fn as_mut_ptr(&mut self) -> *mut u8 { - self.range.start.as_mut_ptr() - } - - /// Returns the size in bytes of this memory mapping. - #[inline] - pub fn len(&self) -> usize { - // Safety: the constructor ensures that the NonNull is valid. - self.range.size() - } - - /// Whether this is a mapping of zero bytes - #[inline] - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Mark this memory mapping as executable (`RX`) this will by-design make it not-writable too. - pub fn make_executable( - &mut self, - aspace: &mut AddressSpace, - _branch_protection: bool, - ) -> Result<(), Error> { - self.protect(aspace, Permissions::READ | Permissions::EXECUTE) - } - - /// Mark this memory mapping as read-only (`R`) essentially removing the write permission. - pub fn make_readonly(&mut self, aspace: &mut AddressSpace) -> Result<(), Error> { - self.protect(aspace, Permissions::READ) - } - - fn protect( - &mut self, - aspace: &mut AddressSpace, - new_permissions: Permissions, - ) -> Result<(), Error> { - if let Some(ptr) = NonNull::new(self.ptr) { - // Safety: constructors ensure ptr is valid - let mut c = unsafe { aspace.regions.cursor_mut_from_ptr(ptr) }; - let mut region = c.get_mut().unwrap(); - - region.permissions = new_permissions; - - let mut flush = aspace.arch.new_flush(); - // Safety: constructors ensure invariants are maintained - unsafe { - aspace.arch.update_flags( - self.range.start, - NonZeroUsize::new(self.range.size()).unwrap(), - new_permissions.into(), - &mut flush, - )?; - }; - flush.flush()?; - } - - Ok(()) - } -} diff --git a/kernel/src/vm/mod.rs b/kernel/src/vm/mod.rs index 1d7ba7b2..1e3cdb3b 100644 --- a/kernel/src/vm/mod.rs +++ b/kernel/src/vm/mod.rs @@ -13,28 +13,31 @@ mod error; pub mod flush; pub mod frame_alloc; mod frame_list; -mod mmap; mod trap_handler; +mod user_mmap; mod vmo; use crate::arch; use crate::machine_info::MachineInfo; use crate::vm::flush::Flush; use crate::vm::frame_alloc::Frame; -pub use address::{PhysicalAddress, VirtualAddress}; +pub use address::{AddressRangeExt, PhysicalAddress, VirtualAddress}; pub use address_space::AddressSpace; +pub use address_space::Batch; use alloc::format; use alloc::string::ToString; use core::num::NonZeroUsize; use core::range::Range; use core::{fmt, slice}; pub use error::Error; +pub use frame_list::FrameList; use loader_api::BootInfo; -pub use mmap::MmapSlice; use rand::SeedableRng; use rand_chacha::ChaCha20Rng; use sync::{LazyLock, Mutex, OnceLock}; pub use trap_handler::trap_handler; +pub use user_mmap::UserMmap; +pub use vmo::Vmo; use xmas_elf::program::Type; pub static KERNEL_ASPACE: OnceLock> = OnceLock::new(); @@ -72,17 +75,6 @@ pub fn init(boot_info: &BootInfo, minfo: &MachineInfo) -> crate::Result<()> { Ok(Mutex::new(aspace)) })?; - // let mut aspace = vm::AddressSpace::new_user(2, Some(ChaCha20Rng::from_seed( - // minfo.rng_seed.unwrap()[0..32].try_into().unwrap(), - // ))).unwrap(); - // unsafe { aspace.arch.activate(); } - // log::trace!("everything is fine?!"); - // - // let layout = Layout::from_size_align(5 * arch::PAGE_SIZE, arch::PAGE_SIZE).unwrap(); - // let vmo = Vmo::new_paged(iter::repeat_n(THE_ZERO_FRAME.clone(), 5)); - // let range = aspace.map(layout, vmo, 0, Permissions::READ | Permissions::WRITE, None).unwrap().range; - // log::trace!("{region:?}"); - Ok(()) } @@ -184,7 +176,7 @@ impl fmt::Display for PageFaultFlags { impl PageFaultFlags { pub fn is_valid(self) -> bool { - self.contains(PageFaultFlags::LOAD) != self.contains(PageFaultFlags::STORE) + !self.contains(PageFaultFlags::LOAD | PageFaultFlags::STORE) } pub fn cause_is_read(self) -> bool { @@ -201,9 +193,14 @@ impl PageFaultFlags { bitflags::bitflags! { #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub struct Permissions: u8 { + /// Allow reads from the memory region const READ = 1 << 0; + /// Allow writes to the memory region const WRITE = 1 << 1; + /// Allow code execution from the memory region const EXECUTE = 1 << 2; + /// Allow userspace to access the memory region + const USER = 1 << 3; } } diff --git a/kernel/src/vm/trap_handler.rs b/kernel/src/vm/trap_handler.rs index 8fb3c581..2fa3aab2 100644 --- a/kernel/src/vm/trap_handler.rs +++ b/kernel/src/vm/trap_handler.rs @@ -7,16 +7,12 @@ use crate::error::Error; use crate::trap_handler::{Trap, TrapReason}; -use crate::vm::{PageFaultFlags, VirtualAddress, KERNEL_ASPACE}; +use crate::vm::{PageFaultFlags, KERNEL_ASPACE}; use core::ops::ControlFlow; pub fn trap_handler(trap: Trap) -> ControlFlow> { let mut aspace = KERNEL_ASPACE.get().unwrap().lock(); - let Some(addr) = VirtualAddress::new(trap.faulting_address) else { - return ControlFlow::Break(Err(Error::AccessDenied)); - }; - let flags = match trap.reason { TrapReason::InstructionPageFault => PageFaultFlags::INSTRUCTION, TrapReason::LoadPageFault => PageFaultFlags::LOAD, @@ -24,7 +20,7 @@ pub fn trap_handler(trap: Trap) -> ControlFlow> { _ => return ControlFlow::Continue(()), }; - if let Err(_err) = aspace.page_fault(addr, flags) { + if let Err(_err) = aspace.page_fault(trap.faulting_address, flags) { ControlFlow::Break(Err(Error::AccessDenied)) } else { ControlFlow::Break(Ok(())) diff --git a/kernel/src/vm/user_mmap.rs b/kernel/src/vm/user_mmap.rs new file mode 100644 index 00000000..ec71ce6b --- /dev/null +++ b/kernel/src/vm/user_mmap.rs @@ -0,0 +1,255 @@ +// Copyright 2025 Jonas Kruckenberg +// +// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +use crate::arch; +use crate::arch::with_user_memory_access; +use crate::vm::address::AddressRangeExt; +use crate::vm::address_space::{AddressSpaceKind, Batch}; +use crate::vm::vmo::Vmo; +use crate::vm::{ + AddressSpace, ArchAddressSpace, Error, Permissions, VirtualAddress, THE_ZERO_FRAME, +}; +use core::alloc::Layout; +use core::num::NonZeroUsize; +use core::range::Range; +use core::{iter, slice}; + +/// A userspace memory mapping. +/// +/// This is essentially a handle to an [`AddressSpaceRegion`][crate::vm::address_space_region::AddressSpaceRegion] with convenience methods for userspace +/// specific needs such as copying from and to memory. +#[derive(Debug)] +pub struct UserMmap { + range: Range, +} + +// Safety: All mutations of the `*mut AddressSpaceRegion` are happening through a `&mut AddressSpace` +unsafe impl Send for UserMmap {} +// Safety: All mutations of the `*mut AddressSpaceRegion` are happening through a `&mut AddressSpace` +unsafe impl Sync for UserMmap {} + +impl UserMmap { + /// Creates a new empty `Mmap`. + /// + /// Note that the size of this cannot be changed after the fact, all accessors will return empty + /// slices and permission changing methods will always fail. + pub fn new_empty() -> Self { + Self { + range: Range::default(), + } + } + + /// Creates a new read-write (`RW`) memory mapping in the given address space. + pub fn new_zeroed(aspace: &mut AddressSpace, len: usize, align: usize) -> Result { + debug_assert!( + matches!(aspace.kind(), AddressSpaceKind::User), + "cannot create UserMmap in kernel address space" + ); + debug_assert!( + align >= arch::PAGE_SIZE, + "alignment must be at least a page" + ); + + let layout = Layout::from_size_align(len, align).unwrap(); + + let vmo = Vmo::new_paged(iter::repeat_n( + THE_ZERO_FRAME.clone(), + layout.size().div_ceil(arch::PAGE_SIZE), + )); + + let region = aspace.map( + layout, + vmo, + 0, + Permissions::READ | Permissions::WRITE | Permissions::USER, + None, + )?; + + log::trace!("new_zeroed: {len} {:?}", region.range); + + Ok(Self { + range: region.range, + }) + } + + pub fn range(&self) -> Range { + self.range + } + + pub fn copy_from_userspace( + &self, + aspace: &mut AddressSpace, + src_range: Range, + dst: &mut [u8], + ) -> Result<(), Error> { + self.with_user_slice(aspace, src_range, |src| dst.clone_from_slice(src)) + } + + pub fn copy_to_userspace( + &mut self, + aspace: &mut AddressSpace, + src: &[u8], + dst_range: Range, + ) -> Result<(), Error> { + self.with_user_slice_mut(aspace, dst_range, |dst| { + dst.copy_from_slice(src); + }) + } + + pub fn with_user_slice( + &self, + aspace: &mut AddressSpace, + range: Range, + f: F, + ) -> Result<(), Error> + where + F: FnOnce(&[u8]), + { + self.ensure_mapped(aspace, range, false)?; + + #[expect(tail_expr_drop_order, reason = "")] + crate::trap_handler::catch_traps(|| { + // Safety: checked by caller and `catch_traps` + unsafe { + with_user_memory_access(|| { + let slice = + slice::from_raw_parts(self.range.start.as_ptr(), self.range().size()); + + f(&slice[range]); + }); + } + }) + .map_err(Error::Trap) + } + + pub fn with_user_slice_mut( + &mut self, + aspace: &mut AddressSpace, + range: Range, + f: F, + ) -> Result<(), Error> + where + F: FnOnce(&mut [u8]), + { + self.ensure_mapped(aspace, range, true)?; + // Safety: user aspace also includes kernel mappings in higher half + unsafe { + aspace.arch.activate(); + } + + #[expect(tail_expr_drop_order, reason = "")] + crate::trap_handler::catch_traps(|| { + // Safety: checked by caller and `catch_traps` + unsafe { + with_user_memory_access(|| { + let slice = slice::from_raw_parts_mut( + self.range.start.as_mut_ptr(), + self.range().size(), + ); + f(&mut slice[range]); + }); + } + }) + .map_err(Error::Trap) + } + + /// Returns a pointer to the start of the memory mapped by this `Mmap`. + #[inline] + pub fn as_ptr(&self) -> *const u8 { + self.range.start.as_ptr() + } + + /// Returns a mutable pointer to the start of the memory mapped by this `Mmap`. + #[inline] + pub fn as_mut_ptr(&mut self) -> *mut u8 { + self.range.start.as_mut_ptr() + } + + /// Returns the size in bytes of this memory mapping. + #[inline] + pub fn len(&self) -> usize { + // Safety: the constructor ensures that the NonNull is valid. + self.range.size() + } + + /// Whether this is a mapping of zero bytes + #[inline] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Mark this memory mapping as executable (`RX`) this will by-design make it not-writable too. + pub fn make_executable( + &mut self, + aspace: &mut AddressSpace, + _branch_protection: bool, + ) -> Result<(), Error> { + log::trace!("UserMmap::make_executable: {:?}", self.range); + self.protect( + aspace, + Permissions::READ | Permissions::EXECUTE | Permissions::USER, + ) + } + + /// Mark this memory mapping as read-only (`R`) essentially removing the write permission. + pub fn make_readonly(&mut self, aspace: &mut AddressSpace) -> Result<(), Error> { + log::trace!("UserMmap::make_readonly: {:?}", self.range); + self.protect(aspace, Permissions::READ | Permissions::USER) + } + + fn protect( + &mut self, + aspace: &mut AddressSpace, + new_permissions: Permissions, + ) -> Result<(), Error> { + if !self.range.is_empty() { + let mut cursor = aspace.regions.find_mut(&self.range.start); + let mut region = cursor.get_mut().unwrap(); + + region.permissions = new_permissions; + + let mut flush = aspace.arch.new_flush(); + // Safety: constructors ensure invariants are maintained + unsafe { + aspace.arch.update_flags( + self.range.start, + NonZeroUsize::new(self.range.size()).unwrap(), + new_permissions.into(), + &mut flush, + )?; + }; + flush.flush()?; + } + + Ok(()) + } + + fn ensure_mapped( + &self, + aspace: &mut AddressSpace, + range: Range, + will_write: bool, + ) -> Result<(), Error> { + if !self.range.is_empty() { + let mut cursor = aspace.regions.find_mut(&self.range.start); + + let src_range = Range { + start: self.range.start.checked_add(range.start).unwrap(), + end: self.range.end.checked_add(range.start).unwrap(), + }; + + let mut batch = Batch::new(&mut aspace.arch); + cursor + .get_mut() + .unwrap() + .ensure_mapped(&mut batch, src_range, will_write)?; + batch.flush()?; + } + + Ok(()) + } +} diff --git a/kernel/src/vm/vmo/paged.rs b/kernel/src/vm/vmo/paged.rs index 3addaf93..563d56f2 100644 --- a/kernel/src/vm/vmo/paged.rs +++ b/kernel/src/vm/vmo/paged.rs @@ -55,7 +55,7 @@ impl PagedVmo { let new_frame = self.frames.insert(at_offset, new_frame.clone()); Ok(new_frame) } else { - todo!("TODO request bytes from source (later when we actually have sources)"); + todo!("TODO request bytes from source (later when we actually have sources) requested_offset={at_offset};size={}", self.frames.size()); } } @@ -64,7 +64,7 @@ impl PagedVmo { if let Some(frame) = self.frames.get(at_offset) { Ok(frame) } else { - todo!("TODO request bytes from source (later when we actually have sources)"); + todo!("TODO request bytes from source (later when we actually have sources) requested_offset={at_offset};size={}", self.frames.size()); } } diff --git a/kernel/src/wasm/cranelift/compiler.rs b/kernel/src/wasm/cranelift/compiler.rs index b0114791..0bf539f7 100644 --- a/kernel/src/wasm/cranelift/compiler.rs +++ b/kernel/src/wasm/cranelift/compiler.rs @@ -9,7 +9,7 @@ use crate::wasm::runtime::{StaticVMOffsets, VMCONTEXT_MAGIC}; use crate::wasm::translate::{ FunctionBodyData, ModuleTranslation, ModuleTypes, WasmFuncType, WasmValType, }; -use crate::wasm::trap::TRAP_INTERNAL_ASSERT; +use crate::wasm::trap::{TRAP_EXIT, TRAP_INTERNAL_ASSERT}; use crate::wasm::utils::{array_call_signature, value_type, wasm_call_signature}; use alloc::boxed::Box; use alloc::vec::Vec; @@ -191,7 +191,8 @@ impl Compiler for CraneliftCompiler { values_vec_len, ); - builder.ins().return_(&[]); + builder.ins().trap(TRAP_EXIT); + // builder.ins().return_(&[]); builder.finalize(); compiler.finish(None) diff --git a/kernel/src/wasm/errors.rs b/kernel/src/wasm/errors.rs index a1b460a2..bcd7938f 100644 --- a/kernel/src/wasm/errors.rs +++ b/kernel/src/wasm/errors.rs @@ -1,3 +1,4 @@ +use crate::vm::VirtualAddress; use crate::wasm::backtrace::RawWasmBacktrace; use crate::wasm::translate::EntityType; use crate::wasm::trap::Trap; @@ -47,9 +48,9 @@ pub enum Error { /// A WebAssembly trap occurred. Trap { /// The program counter where this trap originated. - pc: usize, + pc: VirtualAddress, /// The address of the inaccessible data or zero if trap wasn't caused by data access. - faulting_addr: usize, + faulting_addr: VirtualAddress, /// The trap that occurred. trap: Trap, /// A human-readable description of the trap. diff --git a/kernel/src/wasm/func.rs b/kernel/src/wasm/func.rs index ba072948..64efb8ef 100644 --- a/kernel/src/wasm/func.rs +++ b/kernel/src/wasm/func.rs @@ -8,6 +8,7 @@ use crate::wasm::type_registry::RegisteredType; use crate::wasm::values::Val; use crate::wasm::{runtime, Error, Store, MAX_WASM_STACK}; use alloc::string::ToString; +use core::arch::asm; use core::ffi::c_void; use core::mem; @@ -66,7 +67,9 @@ impl Func { // do the actual call // Safety: caller has to ensure safety unsafe { - self.call_unchecked_raw(store, values_vec.as_mut_ptr(), values_vec_size)?; + arch::with_user_memory_access(|| { + self.call_unchecked_raw(store, values_vec.as_mut_ptr(), values_vec_size) + })?; } // copy the results out of the storage @@ -100,18 +103,31 @@ impl Func { if let Err(trap) = crate::trap_handler::catch_traps(|| { // Safety: caller has to ensure safety unsafe { - (func_ref.array_call)(vmctx, vmctx, args_results_ptr, args_results_len); + riscv::sstatus::set_spp(riscv::sstatus::SPP::User); + riscv::sepc::set(func_ref.array_call as usize); + asm! { + "sret", + in("a0") vmctx, + in("a1") vmctx, + in("a2") args_results_ptr, + in("a3") args_results_len, + options(noreturn) + } } }) { // construct wasm trap - let (code, text_offset) = code_registry::lookup_code(trap.pc).unwrap(); + let (code, text_offset) = code_registry::lookup_code(trap.pc.get()).unwrap(); + log::trace!( + "Trap at offset: pc={};text_offset={text_offset:#x}", + trap.pc + ); let trap_code = code.lookup_trap_code(text_offset).unwrap(); let backtrace = RawWasmBacktrace::new_with_vmctx( vmctx, &module.offsets().static_, - Some((trap.pc, trap.fp)), + Some((trap.pc.get(), trap.fp.get())), ); return Err(Error::Trap { diff --git a/kernel/src/wasm/instance.rs b/kernel/src/wasm/instance.rs index 610183eb..4de5b588 100644 --- a/kernel/src/wasm/instance.rs +++ b/kernel/src/wasm/instance.rs @@ -29,16 +29,13 @@ impl Instance { /// compatibility with the `module` being instantiated. pub(crate) unsafe fn new_unchecked( store: &mut Store, - alloc: &dyn InstanceAllocator, - aspace: &mut AddressSpace, const_eval: &mut ConstExprEvaluator, module: Module, imports: Imports, ) -> crate::wasm::Result { // Safety: caller has to ensure safety - let instance = unsafe { - runtime::Instance::new_unchecked(alloc, aspace, const_eval, module, imports)? - }; + let instance = + unsafe { runtime::Instance::new_unchecked(store, const_eval, module, imports)? }; let handle = store.push_instance(instance); Ok(Self(handle)) } diff --git a/kernel/src/wasm/instance_allocator.rs b/kernel/src/wasm/instance_allocator.rs index 54c8aece..53a192ec 100644 --- a/kernel/src/wasm/instance_allocator.rs +++ b/kernel/src/wasm/instance_allocator.rs @@ -3,25 +3,38 @@ use crate::wasm::indices::{DefinedMemoryIndex, DefinedTableIndex}; use crate::wasm::runtime::{InstanceAllocator, Memory, Table}; use crate::wasm::runtime::{OwnedVMContext, VMOffsets}; use crate::wasm::translate::{MemoryDesc, TableDesc, TranslatedModule}; +use core::fmt; +use sync::Mutex; /// A placeholder allocator impl that just delegates to runtime types `new` methods. -pub struct PlaceholderAllocatorDontUse; +pub struct PlaceholderAllocatorDontUse(pub(super) Mutex); + +impl fmt::Debug for PlaceholderAllocatorDontUse { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("PlaceholderAllocatorDontUse").finish() + } +} + +impl Default for PlaceholderAllocatorDontUse { + fn default() -> Self { + Self(Mutex::new(AddressSpace::new_user(2, None).unwrap())) + } +} impl InstanceAllocator for PlaceholderAllocatorDontUse { unsafe fn allocate_vmctx( &self, - aspace: &mut AddressSpace, _module: &TranslatedModule, plan: &VMOffsets, ) -> crate::wasm::Result { - OwnedVMContext::try_new(aspace, plan) + let mut aspace = self.0.lock(); + OwnedVMContext::try_new(&mut aspace, plan) } - unsafe fn deallocate_vmctx(&self, _aspace: &mut AddressSpace, _vmctx: OwnedVMContext) {} + unsafe fn deallocate_vmctx(&self, _vmctx: OwnedVMContext) {} unsafe fn allocate_memory( &self, - aspace: &mut AddressSpace, _module: &TranslatedModule, memory_desc: &MemoryDesc, _memory_index: DefinedMemoryIndex, @@ -51,20 +64,14 @@ impl InstanceAllocator for PlaceholderAllocatorDontUse { .ok() .and_then(|m| usize::try_from(m).ok()); - Memory::try_new(aspace, memory_desc, minimum, maximum) + let mut aspace = self.0.lock(); + Memory::try_new(&mut aspace, memory_desc, minimum, maximum) } - unsafe fn deallocate_memory( - &self, - _aspace: &mut AddressSpace, - _memory_index: DefinedMemoryIndex, - _memory: Memory, - ) { - } + unsafe fn deallocate_memory(&self, _memory_index: DefinedMemoryIndex, _memory: Memory) {} unsafe fn allocate_table( &self, - aspace: &mut AddressSpace, _module: &TranslatedModule, table_desc: &TableDesc, _table_index: DefinedTableIndex, @@ -74,14 +81,9 @@ impl InstanceAllocator for PlaceholderAllocatorDontUse { // memory consumption let maximum = table_desc.maximum.and_then(|m| usize::try_from(m).ok()); - Table::try_new(aspace, table_desc, maximum) + let mut aspace = self.0.lock(); + Table::try_new(&mut aspace, table_desc, maximum) } - unsafe fn deallocate_table( - &self, - _aspace: &mut AddressSpace, - _table_index: DefinedTableIndex, - _table: Table, - ) { - } + unsafe fn deallocate_table(&self, _table_index: DefinedTableIndex, _table: Table) {} } diff --git a/kernel/src/wasm/linker.rs b/kernel/src/wasm/linker.rs index 1aadec92..dbc016f1 100644 --- a/kernel/src/wasm/linker.rs +++ b/kernel/src/wasm/linker.rs @@ -115,8 +115,6 @@ impl Linker { pub fn instantiate( &self, store: &mut Store, - alloc: &dyn InstanceAllocator, - aspace: &mut AddressSpace, const_eval: &mut ConstExprEvaluator, module: &Module, ) -> crate::wasm::Result { @@ -148,9 +146,7 @@ impl Linker { } // Safety: we have typechecked the imports above. - unsafe { - Instance::new_unchecked(store, alloc, aspace, const_eval, module.clone(), imports) - } + unsafe { Instance::new_unchecked(store, const_eval, module.clone(), imports) } } fn insert(&mut self, key: ImportKey, item: Extern) -> crate::wasm::Result<()> { diff --git a/kernel/src/wasm/mod.rs b/kernel/src/wasm/mod.rs index 88df0e5d..e6469f87 100644 --- a/kernel/src/wasm/mod.rs +++ b/kernel/src/wasm/mod.rs @@ -32,9 +32,9 @@ mod values; pub use errors::Error; use wasmparser::Validator; pub(crate) type Result = core::result::Result; -use crate::vm::{AddressSpace, KERNEL_ASPACE}; +use crate::vm::{AddressSpace, ArchAddressSpace, VirtualAddress, KERNEL_ASPACE}; use crate::wasm::instance_allocator::PlaceholderAllocatorDontUse; -use crate::{enum_accessors, owned_enum_accessors}; +use crate::{arch, enum_accessors, owned_enum_accessors}; pub use engine::Engine; pub use func::Func; pub use global::Global; @@ -128,27 +128,20 @@ pub fn test() { let mut linker = Linker::new(&engine); let mut store = Store::new(&engine); let mut const_eval = ConstExprEvaluator::default(); - let mut aspace = AddressSpace::new_user(2, None).unwrap(); + // let mut aspace = AddressSpace::new_user(2, None).unwrap(); // instantiate & define the fib_cpp module { let module = Module::from_bytes( &engine, - &mut aspace, + &mut store, &mut validator, include_bytes!("../../fib_cpp.wasm"), ) .unwrap(); - log::debug!("here"); let instance = linker - .instantiate( - &mut store, - &PlaceholderAllocatorDontUse, - &mut aspace, - &mut const_eval, - &module, - ) + .instantiate(&mut store, &mut const_eval, &module) .unwrap(); instance.debug_vmctx(&store); @@ -161,20 +154,14 @@ pub fn test() { { let module = Module::from_bytes( &engine, - &mut aspace, + &mut store, &mut validator, - include_bytes!("../../fib_cpp.wasm"), + include_bytes!("../../fib_test.wasm"), ) .unwrap(); let instance = linker - .instantiate( - &mut store, - &PlaceholderAllocatorDontUse, - &mut aspace, - &mut const_eval, - &module, - ) + .instantiate(&mut store, &mut const_eval, &module) .unwrap(); instance.debug_vmctx(&store); diff --git a/kernel/src/wasm/module.rs b/kernel/src/wasm/module.rs index fba721de..ff03623b 100644 --- a/kernel/src/wasm/module.rs +++ b/kernel/src/wasm/module.rs @@ -5,7 +5,7 @@ use crate::wasm::runtime::{code_registry, CodeMemory}; use crate::wasm::runtime::{MmapVec, VMOffsets}; use crate::wasm::translate::{Import, TranslatedModule}; use crate::wasm::type_registry::RuntimeTypeCollection; -use crate::wasm::{Engine, ModuleTranslator}; +use crate::wasm::{Engine, ModuleTranslator, Store}; use alloc::sync::Arc; use core::mem; use cranelift_entity::PrimaryMap; @@ -57,7 +57,7 @@ impl Module { #[expect(tail_expr_drop_order, reason = "")] pub fn from_bytes( engine: &Engine, - aspace: &mut AddressSpace, + store: &mut Store, validator: &mut Validator, bytes: &[u8], ) -> crate::wasm::Result { @@ -77,10 +77,14 @@ impl Module { let type_collection = engine.type_registry().register_module_types(engine, types); log::debug!("Allocating new memory map..."); - let vec = MmapVec::from_slice(aspace, &code)?; - let mut code = CodeMemory::new(vec, trap_offsets, traps); - code.publish(aspace)?; - let code = Arc::new(code); + let code = { + let mut aspace = store.alloc.0.lock(); + let vec = MmapVec::from_slice(&mut aspace, &code)?; + let mut code = CodeMemory::new(vec, trap_offsets, traps); + code.publish(&mut aspace)?; + drop(aspace); + Arc::new(code) + }; // register this code memory with the trap handler, so we can correctly unwind from traps code_registry::register_code(&code); diff --git a/kernel/src/wasm/runtime/code_memory.rs b/kernel/src/wasm/runtime/code_memory.rs index 622d9987..1583196d 100644 --- a/kernel/src/wasm/runtime/code_memory.rs +++ b/kernel/src/wasm/runtime/code_memory.rs @@ -1,4 +1,4 @@ -use crate::vm::{AddressSpace, MmapSlice, KERNEL_ASPACE}; +use crate::vm::{AddressRangeExt, AddressSpace, UserMmap, VirtualAddress, KERNEL_ASPACE}; use crate::wasm::compile::FunctionLoc; use crate::wasm::runtime::MmapVec; use crate::wasm::trap::Trap; @@ -8,7 +8,7 @@ use core::range::Range; #[derive(Debug)] pub struct CodeMemory { - mmap: MmapSlice, + mmap: UserMmap, len: usize, published: bool, @@ -46,15 +46,16 @@ impl CodeMemory { } #[inline] - pub fn text(&self) -> &[u8] { - // Safety: The constructor has to ensure that `self.len` is valid. - unsafe { self.mmap.slice(Range::from(0..self.len)) } + pub fn text_range(&self) -> Range { + let start = self.mmap.range().start; + + Range::from(start..start.checked_add(self.len).unwrap()) } pub fn resolve_function_loc(&self, func_loc: FunctionLoc) -> usize { let text_range = { - let r = self.text().as_ptr_range(); - r.start as usize..r.end as usize + let r = self.text_range(); + r.start.get()..r.end.get() }; let addr = text_range.start + func_loc.start as usize; diff --git a/kernel/src/wasm/runtime/code_registry.rs b/kernel/src/wasm/runtime/code_registry.rs index a340ef67..05b4edb7 100644 --- a/kernel/src/wasm/runtime/code_registry.rs +++ b/kernel/src/wasm/runtime/code_registry.rs @@ -35,12 +35,12 @@ pub fn lookup_code(pc: usize) -> Option<(Arc, usize)> { /// This is used by trap handling to determine which region of code a faulting /// address. pub fn register_code(code: &Arc) { - let text = code.text(); + let text = code.text_range(); if text.is_empty() { return; } - let start = text.as_ptr() as usize; - let end = start + text.len() - 1; - let prev = global_code().write().insert(end, (start, code.clone())); + let prev = global_code() + .write() + .insert(text.end.get(), (text.start.get(), code.clone())); assert!(prev.is_none()); } diff --git a/kernel/src/wasm/runtime/instance.rs b/kernel/src/wasm/runtime/instance.rs index b6a65697..7c1f17d2 100644 --- a/kernel/src/wasm/runtime/instance.rs +++ b/kernel/src/wasm/runtime/instance.rs @@ -3,6 +3,7 @@ reason = "too many trivial unsafe blocks" )] +use crate::arch; use crate::vm::AddressSpace; use crate::wasm::indices::{ DataIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex, ElemIndex, EntityIndex, @@ -21,10 +22,11 @@ use crate::wasm::runtime::{ VMTableDefinition, VMTableImport, VMCONTEXT_MAGIC, }; use crate::wasm::translate::{TableInitialValue, TableSegmentElements}; -use crate::wasm::{Extern, Module}; +use crate::wasm::{Extern, Module, Store}; use alloc::vec; use alloc::vec::Vec; use core::ptr::NonNull; +use core::range::Range; use core::{fmt, mem, ptr, slice}; use cranelift_entity::packed_option::ReservedValue; use cranelift_entity::{EntityRef, EntitySet, PrimaryMap}; @@ -45,26 +47,33 @@ pub struct Instance { impl Instance { #[expect(tail_expr_drop_order, reason = "")] pub unsafe fn new_unchecked( - alloc: &dyn InstanceAllocator, - aspace: &mut AddressSpace, + store: &mut Store, const_eval: &mut ConstExprEvaluator, module: Module, imports: Imports, ) -> crate::wasm::Result { - let (mut vmctx, mut tables, mut memories) = alloc.allocate_module(aspace, &module)?; + let (mut vmctx, mut tables, mut memories) = store.alloc.allocate_module(&module)?; + log::trace!("initializing instance"); unsafe { - initialize_vmctx( - const_eval, - &mut vmctx, - &mut tables, - &mut memories, - &module, - imports, - )?; - initialize_tables(const_eval, &mut tables, &module)?; - initialize_memories(const_eval, &mut memories, &module)?; + arch::with_user_memory_access(|| -> crate::wasm::Result<()> { + initialize_vmctx( + const_eval, + &mut vmctx, + &mut tables, + &mut memories, + &module, + imports, + )?; + initialize_tables(const_eval, &mut tables, &module)?; + + let mut aspace = store.alloc.0.lock(); + initialize_memories(&mut aspace, const_eval, &mut memories, &module)?; + + Ok(()) + })?; } + log::trace!("done initializing instance"); let exports = vec![None; module.exports().len()]; @@ -414,36 +423,44 @@ unsafe fn initialize_vmctx( let offsets = module.offsets(); // initialize vmctx magic + log::trace!("initializing vmctx magic"); *vmctx.plus_offset_mut(u32::from(offsets.static_.vmctx_magic())) = VMCONTEXT_MAGIC; // Initialize the built-in functions + log::trace!("initializing built-in functions array ptr"); *vmctx.plus_offset_mut::<*const VMBuiltinFunctionsArray>(u32::from( offsets.static_.vmctx_builtin_functions(), )) = ptr::from_ref(&VMBuiltinFunctionsArray::INIT); // initialize the type ids array ptr + log::trace!("initializing type ids array ptr"); let type_ids = module.type_ids(); *vmctx.plus_offset_mut(u32::from(offsets.static_.vmctx_type_ids())) = type_ids.as_ptr(); // initialize func_refs array + log::trace!("initializing func refs array"); initialize_vmfunc_refs(vmctx, &module, &imports, offsets); // initialize the imports + log::trace!("initializing function imports"); ptr::copy_nonoverlapping( imports.functions.as_ptr(), vmctx.plus_offset_mut::(offsets.vmctx_imported_functions_begin()), imports.functions.len(), ); + log::trace!("initialized table imports"); ptr::copy_nonoverlapping( imports.tables.as_ptr(), vmctx.plus_offset_mut::(offsets.vmctx_imported_tables_begin()), imports.tables.len(), ); + log::trace!("initialized memory imports"); ptr::copy_nonoverlapping( imports.memories.as_ptr(), vmctx.plus_offset_mut::(offsets.vmctx_imported_memories_begin()), imports.memories.len(), ); + log::trace!("initialized global imports"); ptr::copy_nonoverlapping( imports.globals.as_ptr(), vmctx.plus_offset_mut::(offsets.vmctx_imported_globals_begin()), @@ -451,6 +468,7 @@ unsafe fn initialize_vmctx( ); // Initialize the defined tables + log::trace!("initializing defined tables"); for def_index in module .translated() .tables @@ -463,6 +481,7 @@ unsafe fn initialize_vmctx( } // Initialize the `defined_memories` table. + log::trace!("initializing defined memories"); for (def_index, plan) in module .translated() .memories @@ -480,6 +499,8 @@ unsafe fn initialize_vmctx( ptr.write(memories[def_index].as_vmmemory_definition()); } + // Initialize the `defined_globals` table. + log::trace!("initializing defined globals"); for (def_index, init_expr) in &module.translated().global_initializers { let val = const_eval.eval(init_expr); let ptr = vmctx.plus_offset_mut::( @@ -604,6 +625,7 @@ unsafe fn initialize_tables( #[expect(clippy::unnecessary_wraps, reason = "TODO")] unsafe fn initialize_memories( + aspace: &mut AddressSpace, const_eval: &mut ConstExprEvaluator, memories: &mut PrimaryMap, module: &Module, @@ -613,8 +635,13 @@ unsafe fn initialize_memories( let offset = usize::try_from(offset.get_u64()).unwrap(); if let Some(def_index) = module.translated().defined_memory_index(init.memory_index) { - memories[def_index].as_slice_mut()[offset..offset + init.data.len()] - .copy_from_slice(&init.data); + memories[def_index].with_user_slice_mut( + aspace, + Range::from(offset..offset + init.data.len()), + |slice| { + slice.copy_from_slice(&init.data); + }, + ); } else { todo!("initializing imported table") } diff --git a/kernel/src/wasm/runtime/instance_allocator.rs b/kernel/src/wasm/runtime/instance_allocator.rs index 9b2817d0..ff33275e 100644 --- a/kernel/src/wasm/runtime/instance_allocator.rs +++ b/kernel/src/wasm/runtime/instance_allocator.rs @@ -21,7 +21,6 @@ pub trait InstanceAllocator { /// The safety of the entire VM depends on the correct implementation of this method. unsafe fn allocate_vmctx( &self, - aspace: &mut AddressSpace, module: &TranslatedModule, offsets: &VMOffsets, ) -> crate::wasm::Result; @@ -32,7 +31,7 @@ pub trait InstanceAllocator { /// /// The `VMContext` must have previously been allocated by /// `Self::allocate_vmctx` - unsafe fn deallocate_vmctx(&self, aspace: &mut AddressSpace, vmctx: OwnedVMContext); + unsafe fn deallocate_vmctx(&self, vmctx: OwnedVMContext); /// Allocate a memory for an instance. /// @@ -45,7 +44,6 @@ pub trait InstanceAllocator { /// The safety of the entire VM depends on the correct implementation of this method. unsafe fn allocate_memory( &self, - aspace: &mut AddressSpace, module: &TranslatedModule, memory_desc: &MemoryDesc, memory_index: DefinedMemoryIndex, @@ -58,12 +56,7 @@ pub trait InstanceAllocator { /// The memory must have previously been allocated by /// `Self::allocate_memory`, be at the given index, and must currently be /// allocated. It must never be used again. - unsafe fn deallocate_memory( - &self, - aspace: &mut AddressSpace, - memory_index: DefinedMemoryIndex, - memory: Memory, - ); + unsafe fn deallocate_memory(&self, memory_index: DefinedMemoryIndex, memory: Memory); /// Allocate a table for an instance. /// @@ -76,7 +69,6 @@ pub trait InstanceAllocator { /// The safety of the entire VM depends on the correct implementation of this method. unsafe fn allocate_table( &self, - aspace: &mut AddressSpace, module: &TranslatedModule, table_desc: &TableDesc, table_index: DefinedTableIndex, @@ -89,12 +81,7 @@ pub trait InstanceAllocator { /// The table must have previously been allocated by `Self::allocate_table`, /// be at the given index, and must currently be allocated. It must never be /// used again. - unsafe fn deallocate_table( - &self, - aspace: &mut AddressSpace, - table_index: DefinedTableIndex, - table: Table, - ); + unsafe fn deallocate_table(&self, table_index: DefinedTableIndex, table: Table); /// Allocate multiple memories at once. /// @@ -109,7 +96,6 @@ pub trait InstanceAllocator { /// The safety of the entire VM depends on the correct implementation of this method. unsafe fn allocate_memories( &self, - aspace: &mut AddressSpace, module: &TranslatedModule, memories: &mut PrimaryMap, ) -> crate::wasm::Result<()> { @@ -117,7 +103,7 @@ pub trait InstanceAllocator { if let Some(def_index) = module.defined_memory_index(index) { let new_def_index = // Safety: caller has to ensure safety - memories.push(unsafe { self.allocate_memory(aspace, module, plan, def_index)? }); + memories.push(unsafe { self.allocate_memory(module, plan, def_index)? }); debug_assert_eq!(def_index, new_def_index); } } @@ -137,7 +123,6 @@ pub trait InstanceAllocator { /// The safety of the entire VM depends on the correct implementation of this method. unsafe fn allocate_tables( &self, - aspace: &mut AddressSpace, module: &TranslatedModule, tables: &mut PrimaryMap, ) -> crate::wasm::Result<()> { @@ -145,7 +130,7 @@ pub trait InstanceAllocator { if let Some(def_index) = module.defined_table_index(index) { let new_def_index = // Safety: caller has to ensure safety - tables.push(unsafe { self.allocate_table(aspace, module, plan, def_index)? }); + tables.push(unsafe { self.allocate_table(module, plan, def_index)? }); debug_assert_eq!(def_index, new_def_index); } } @@ -160,11 +145,7 @@ pub trait InstanceAllocator { /// /// Just like `Self::deallocate_memory` all memories must have been allocated by /// `Self::allocate_memories`/`Self::allocate_memory` and must never be used again. - unsafe fn deallocate_memories( - &self, - aspace: &mut AddressSpace, - memories: &mut PrimaryMap, - ) { + unsafe fn deallocate_memories(&self, memories: &mut PrimaryMap) { for (memory_index, memory) in mem::take(memories) { // Because deallocating memory is infallible, we don't need to worry // about leaking subsequent memories if the first memory failed to @@ -172,7 +153,7 @@ pub trait InstanceAllocator { // need to be careful here! // Safety: caller has to ensure safety unsafe { - self.deallocate_memory(aspace, memory_index, memory); + self.deallocate_memory(memory_index, memory); } } } @@ -185,15 +166,11 @@ pub trait InstanceAllocator { /// /// Just like `Self::deallocate_table` all tables must have been allocated by /// `Self::allocate_tables`/`Self::allocate_table` and must never be used again. - unsafe fn deallocate_tables( - &self, - aspace: &mut AddressSpace, - tables: &mut PrimaryMap, - ) { + unsafe fn deallocate_tables(&self, tables: &mut PrimaryMap) { for (table_index, table) in mem::take(tables) { // Safety: caller has to ensure safety unsafe { - self.deallocate_table(aspace, table_index, table); + self.deallocate_table(table_index, table); } } } @@ -213,7 +190,6 @@ pub trait InstanceAllocator { )] fn allocate_module( &self, - aspace: &mut AddressSpace, module: &Module, ) -> crate::wasm::Result<( OwnedVMContext, @@ -231,15 +207,15 @@ pub trait InstanceAllocator { // Safety: TODO match (|| unsafe { - self.allocate_tables(aspace, module.translated(), &mut tables)?; - self.allocate_memories(aspace, module.translated(), &mut memories)?; - self.allocate_vmctx(aspace, module.translated(), module.offsets()) + self.allocate_tables(module.translated(), &mut tables)?; + self.allocate_memories(module.translated(), &mut memories)?; + self.allocate_vmctx(module.translated(), module.offsets()) })() { Ok(vmctx) => Ok((vmctx, tables, memories)), // Safety: memories and tables have just been allocated and will not be handed out Err(e) => unsafe { - self.deallocate_memories(aspace, &mut memories); - self.deallocate_tables(aspace, &mut tables); + self.deallocate_memories(&mut memories); + self.deallocate_tables(&mut tables); Err(e) }, } diff --git a/kernel/src/wasm/runtime/memory.rs b/kernel/src/wasm/runtime/memory.rs index 167d9ebc..1858227f 100644 --- a/kernel/src/wasm/runtime/memory.rs +++ b/kernel/src/wasm/runtime/memory.rs @@ -1,4 +1,4 @@ -use crate::vm::{AddressSpace, MmapSlice}; +use crate::vm::{AddressSpace, UserMmap}; use crate::wasm::runtime::VMMemoryDefinition; use crate::wasm::translate::MemoryDesc; use crate::wasm::utils::round_usize_up_to_host_pages; @@ -8,7 +8,7 @@ use core::range::Range; #[derive(Debug)] pub struct Memory { /// The underlying allocation backing this memory - mmap: MmapSlice, + mmap: UserMmap, /// The current length of this Wasm memory, in bytes. len: usize, /// The optional maximum accessible size, in bytes, for this linear memory. @@ -24,8 +24,9 @@ pub struct Memory { } impl Memory { + #[expect(clippy::unnecessary_wraps, reason = "TODO")] pub fn try_new( - aspace: &mut AddressSpace, + _aspace: &mut AddressSpace, desc: &MemoryDesc, actual_minimum_bytes: usize, actual_maximum_bytes: Option, @@ -34,12 +35,14 @@ impl Memory { // Ensure that our guard regions are multiples of the host page size. let offset_guard_bytes = round_usize_up_to_host_pages(offset_guard_bytes); - let bound_bytes = round_usize_up_to_host_pages(MEMORY_MAX); - let allocation_bytes = bound_bytes.min(actual_maximum_bytes.unwrap_or(usize::MAX)); - let request_bytes = allocation_bytes + offset_guard_bytes; + // let bound_bytes = round_usize_up_to_host_pages(MEMORY_MAX); + // let allocation_bytes = bound_bytes.min(actual_maximum_bytes.unwrap_or(usize::MAX)); + // let request_bytes = allocation_bytes + offset_guard_bytes; + + // let mmap = UserMmap::new_zeroed(aspace, request_bytes, 2 * 1048576).map_err(|_| Error::MmapFailed)?; Ok(Self { - mmap: MmapSlice::new_zeroed(aspace, request_bytes).map_err(|_| Error::MmapFailed)?, + mmap: UserMmap::new_empty(), len: actual_minimum_bytes, maximum: actual_maximum_bytes, page_size_log2: desc.page_size_log2, @@ -47,9 +50,11 @@ impl Memory { }) } - pub(crate) fn as_slice_mut(&mut self) -> &mut [u8] { - // Safety: The constructor has to ensure that `self.len` is valid. - unsafe { self.mmap.slice_mut(Range::from(0..self.len)) } + pub fn with_user_slice_mut(&mut self, aspace: &mut AddressSpace, range: Range, f: F) + where + F: FnOnce(&mut [u8]), + { + self.mmap.with_user_slice_mut(aspace, range, f).unwrap(); } pub(crate) fn as_vmmemory_definition(&mut self) -> VMMemoryDefinition { diff --git a/kernel/src/wasm/runtime/mmap_vec.rs b/kernel/src/wasm/runtime/mmap_vec.rs index d9971eab..6112cb53 100644 --- a/kernel/src/wasm/runtime/mmap_vec.rs +++ b/kernel/src/wasm/runtime/mmap_vec.rs @@ -1,12 +1,15 @@ -use crate::vm::{AddressSpace, MmapSlice}; +use crate::arch; +use crate::vm::{AddressSpace, UserMmap}; use crate::wasm::Error; +use core::cmp::max; use core::marker::PhantomData; use core::ops::Deref; +use core::range::Range; use core::slice; #[derive(Debug)] pub struct MmapVec { - mmap: MmapSlice, + mmap: UserMmap, len: usize, _m: PhantomData, } @@ -14,15 +17,16 @@ pub struct MmapVec { impl MmapVec { pub fn new_empty() -> Self { Self { - mmap: MmapSlice::new_empty(), + mmap: UserMmap::new_empty(), len: 0, _m: PhantomData, } } - pub fn new_zeroed(aspace: &mut AddressSpace, len: usize) -> crate::wasm::Result { + pub fn new_zeroed(aspace: &mut AddressSpace, capacity: usize) -> crate::wasm::Result { Ok(Self { - mmap: MmapSlice::new_zeroed(aspace, len).map_err(|_| Error::MmapFailed)?, + mmap: UserMmap::new_zeroed(aspace, capacity, max(align_of::(), arch::PAGE_SIZE)) + .map_err(|_| Error::MmapFailed)?, len: 0, _m: PhantomData, }) @@ -36,12 +40,16 @@ impl MmapVec { Ok(Self::new_empty()) } else { let mut this = Self::new_zeroed(aspace, slice.len())?; - this.extend_from_slice(slice); + this.extend_from_slice(aspace, slice); Ok(this) } } + pub fn capacity(&self) -> usize { + self.mmap.len() / size_of::() + } + pub fn len(&self) -> usize { self.len } @@ -78,37 +86,45 @@ impl MmapVec { self.mmap.as_mut_ptr().cast() } - pub fn extend_from_slice(&mut self, other: &[T]) + pub fn extend_from_slice(&mut self, aspace: &mut AddressSpace, other: &[T]) where T: Clone, { - let start = self.len; - let end = self.len + other.len(); - - assert!(end * size_of::() < self.mmap.len()); - // Safety: checked above - unsafe { - slice::from_raw_parts_mut(self.mmap.as_mut_ptr().cast::().add(start), other.len()) - .clone_from_slice(other); - } + assert!(self.len() + other.len() <= self.capacity()); + + // "Transmute" the slice to a byte slice + // Safety: we're just converting the slice to a byte slice of the same length + let src = unsafe { slice::from_raw_parts(other.as_ptr().cast::(), size_of_val(other)) }; + self.mmap + .copy_to_userspace( + aspace, + src, + Range::from(self.len * size_of::()..(self.len + other.len()) * size_of::()), + ) + .unwrap(); + self.len += other.len(); } - pub(crate) fn extend_with(&mut self, count: usize, elem: T) + pub(crate) fn extend_with(&mut self, aspace: &mut AddressSpace, count: usize, elem: T) where T: Clone, { - let start = self.len; - let end = self.len + count; - - assert!(end * size_of::() < self.mmap.len()); - // Safety: checked above - unsafe { - slice::from_raw_parts_mut(self.mmap.as_mut_ptr().cast::().add(start), count) - .fill(elem); - } + assert!(self.len() + count <= self.capacity()); + + self.mmap + .with_user_slice_mut(aspace, Range::from(self.len..self.len + count), |dst| { + // "Transmute" the slice to a byte slice + // Safety: we're just converting the slice to a byte slice of the same length + let dst = + unsafe { slice::from_raw_parts_mut(dst.as_mut_ptr().cast(), size_of_val(dst)) }; + + dst.fill(elem); + }) + .unwrap(); + self.len += count; } - pub(crate) fn into_parts(self) -> (MmapSlice, usize) { + pub(crate) fn into_parts(self) -> (UserMmap, usize) { (self.mmap, self.len) } } diff --git a/kernel/src/wasm/runtime/owned_vmcontext.rs b/kernel/src/wasm/runtime/owned_vmcontext.rs index 105c23ea..9c8a0c76 100644 --- a/kernel/src/wasm/runtime/owned_vmcontext.rs +++ b/kernel/src/wasm/runtime/owned_vmcontext.rs @@ -1,22 +1,63 @@ -use crate::vm::AddressSpace; +use crate::arch; +use crate::vm::{ + frame_alloc, AddressRangeExt, AddressSpace, ArchAddressSpace, Batch, FrameList, VirtualAddress, + Vmo, +}; use crate::wasm::runtime::{MmapVec, VMContext, VMOffsets}; +use alloc::string::ToString; +use core::alloc::Layout; +use core::num::NonZeroUsize; +use core::range::Range; #[derive(Debug)] -pub struct OwnedVMContext(MmapVec); +pub struct OwnedVMContext { + frames: FrameList, + range: Range, +} impl OwnedVMContext { + #[expect(tail_expr_drop_order, reason = "")] + #[expect(clippy::unnecessary_wraps, reason = "TODO")] pub fn try_new( aspace: &mut AddressSpace, offsets: &VMOffsets, ) -> crate::wasm::Result { - let vec = MmapVec::new_zeroed(aspace, offsets.size() as usize)?; - Ok(Self(vec)) + let layout = Layout::from_size_align(offsets.size() as usize, arch::PAGE_SIZE).unwrap(); + let frames = frame_alloc::alloc_contiguous(layout.pad_to_align()).unwrap(); + + log::trace!("{frames:?}"); + let phys_range = { + let start = frames.first().unwrap().addr(); + Range::from(start..start.checked_add(layout.pad_to_align().size()).unwrap()) + }; + let vmo = Vmo::new_wired(phys_range); + + let virt_range = aspace + .map( + layout, + vmo, + 0, + crate::vm::Permissions::READ | crate::vm::Permissions::WRITE, + Some("VMContext".to_string()), + ) + .unwrap() + .range; + aspace.ensure_mapped(virt_range, true).unwrap(); + + // let mut batch = Batch::new(&mut aspace.arch); + // region.ensure_mapped(&mut batch, virt_range, true).unwrap(); + // batch.flush().unwrap(); + + Ok(Self { + frames, + range: virt_range, + }) } pub fn as_ptr(&self) -> *const VMContext { - self.0.as_ptr().cast() + self.range.start.as_ptr().cast() } pub fn as_mut_ptr(&mut self) -> *mut VMContext { - self.0.as_mut_ptr().cast() + self.range.start.as_mut_ptr().cast() } pub unsafe fn plus_offset(&self, offset: u32) -> *const T { // Safety: caller has to ensure offset is valid diff --git a/kernel/src/wasm/runtime/table.rs b/kernel/src/wasm/runtime/table.rs index e38eeb89..292f2614 100644 --- a/kernel/src/wasm/runtime/table.rs +++ b/kernel/src/wasm/runtime/table.rs @@ -25,7 +25,7 @@ impl Table { MmapVec::new_empty() } else { let mut elements = MmapVec::new_zeroed(aspace, reserve_size)?; - elements.extend_with(usize::try_from(desc.minimum).unwrap(), None); + elements.extend_with(aspace, usize::try_from(desc.minimum).unwrap(), None); elements }; diff --git a/kernel/src/wasm/runtime/vmoffsets.rs b/kernel/src/wasm/runtime/vmoffsets.rs index 159120e5..b84a6734 100644 --- a/kernel/src/wasm/runtime/vmoffsets.rs +++ b/kernel/src/wasm/runtime/vmoffsets.rs @@ -150,9 +150,9 @@ impl VMOffsets { let static_ = StaticVMOffsets::new(ptr_size); let mut offset = u32::from(static_.size()); - let mut member_offset = |size_of_member: u32| -> u32 { - let out = offset; - offset += size_of_member; + let mut member_offset = |size_of_member: u32, align_of_member: u32| -> u32 { + let out = offset + (offset % align_of_member); + offset += size_of_member + (offset % align_of_member); out }; @@ -168,25 +168,37 @@ impl VMOffsets { // offsets static_, - func_refs: member_offset(u32_size_of::() * module.num_escaped_funcs()), + func_refs: member_offset( + u32_size_of::() * module.num_escaped_funcs(), + u32_align_of::(), + ), imported_functions: member_offset( u32_size_of::() * module.num_imported_functions(), + u32_align_of::(), ), imported_tables: member_offset( u32_size_of::() * module.num_imported_tables(), + u32_align_of::(), ), imported_memories: member_offset( u32_size_of::() * module.num_imported_memories(), + u32_align_of::(), ), imported_globals: member_offset( u32_size_of::() * module.num_imported_globals(), + u32_align_of::(), + ), + tables: member_offset( + u32_size_of::() * module.num_defined_tables(), + u32_align_of::(), ), - tables: member_offset(u32_size_of::() * module.num_defined_tables()), memories: member_offset( u32_size_of::() * module.num_defined_memories(), + u32_align_of::(), ), globals: member_offset( u32_size_of::() * module.num_defined_globals(), + u32_align_of::(), ), size: offset, @@ -395,3 +407,11 @@ impl VMOffsets { fn u32_size_of() -> u32 { u32::try_from(size_of::()).unwrap() } + +/// Like `mem::align_of` but returns `u32` instead of `usize` +/// # Panics +/// +/// Panics if the size of `T` is greater than `u32::MAX`. +fn u32_align_of() -> u32 { + u32::try_from(align_of::()).unwrap() +} diff --git a/kernel/src/wasm/store.rs b/kernel/src/wasm/store.rs index 79bdb36b..8ff220b1 100644 --- a/kernel/src/wasm/store.rs +++ b/kernel/src/wasm/store.rs @@ -1,3 +1,4 @@ +use crate::wasm::instance_allocator::PlaceholderAllocatorDontUse; use crate::wasm::runtime::{VMContext, VMOpaqueContext, VMVal}; use crate::wasm::{runtime, Engine}; use alloc::vec::Vec; @@ -17,6 +18,8 @@ pub struct Store { wasm_vmval_storage: Vec, vmctx2instance: HashMap<*mut VMOpaqueContext, Stored>, + + pub(super) alloc: PlaceholderAllocatorDontUse, } impl Store { @@ -32,6 +35,8 @@ impl Store { wasm_vmval_storage: Vec::new(), vmctx2instance: HashMap::new(), + + alloc: PlaceholderAllocatorDontUse::default(), } } diff --git a/kernel/src/wasm/trap.rs b/kernel/src/wasm/trap.rs index 6892f92e..fdf6b107 100644 --- a/kernel/src/wasm/trap.rs +++ b/kernel/src/wasm/trap.rs @@ -18,6 +18,7 @@ pub const TRAP_NULL_REFERENCE: TrapCode = TrapCode::unwrap_user(Trap::NullReference as u8 + TRAP_OFFSET); pub const TRAP_I31_NULL_REFERENCE: TrapCode = TrapCode::unwrap_user(Trap::NullI31Ref as u8 + TRAP_OFFSET); +pub const TRAP_EXIT: TrapCode = TrapCode::unwrap_user(Trap::Exit as u8 + TRAP_OFFSET); #[derive(Debug, Copy, Clone)] pub enum Trap { @@ -48,6 +49,8 @@ pub enum Trap { IntegerDivisionByZero, /// Failed float-to-int conversion. BadConversionToInteger, + + Exit, } impl fmt::Display for Trap { @@ -67,6 +70,8 @@ impl fmt::Display for Trap { Trap::IntegerOverflow => f.write_str("integer overflow"), Trap::IntegerDivisionByZero => f.write_str("integer divide by zero"), Trap::BadConversionToInteger => f.write_str("invalid conversion to integer"), + + Trap::Exit => f.write_str("exit"), } } } @@ -90,6 +95,9 @@ impl Trap { TRAP_UNREACHABLE => Some(Trap::UnreachableCodeReached), TRAP_NULL_REFERENCE => Some(Trap::NullReference), TRAP_I31_NULL_REFERENCE => Some(Trap::NullI31Ref), + + TRAP_EXIT => Some(Trap::Exit), + c => { log::warn!("unknown trap code {c}"); None @@ -115,6 +123,8 @@ impl From for u8 { Trap::IntegerOverflow => 10, Trap::IntegerDivisionByZero => 11, Trap::BadConversionToInteger => 12, + + Trap::Exit => 13, } } } @@ -138,6 +148,9 @@ impl TryFrom for Trap { 10 => Ok(Self::IntegerOverflow), 11 => Ok(Self::IntegerDivisionByZero), 12 => Ok(Self::BadConversionToInteger), + + 13 => Ok(Self::Exit), + _ => Err(()), } } diff --git a/libs/riscv/src/register/mod.rs b/libs/riscv/src/register/mod.rs index 45c808a9..b4585f12 100644 --- a/libs/riscv/src/register/mod.rs +++ b/libs/riscv/src/register/mod.rs @@ -99,11 +99,11 @@ macro_rules! read_composite_csr { }; } -macro_rules! set { +macro_rules! write_csr { ($csr_number: literal) => { - /// Sets the CSR + /// Writes the CSR #[inline] - unsafe fn _set(bits: usize) { + unsafe fn _write(bits: usize) { cfg_if::cfg_if! { if #[cfg(any(target_arch = "riscv64", target_arch = "riscv32"))] { unsafe { @@ -117,21 +117,39 @@ macro_rules! set { }; } -macro_rules! set_csr_as_usize { +macro_rules! write_csr_as_usize { ($csr_number:literal) => { - $crate::set!($csr_number); + $crate::write_csr!($csr_number); /// Sets the CSR. /// /// **WARNING**: panics on non-`riscv` targets. #[inline] pub fn set(bits: usize) { - unsafe { _set(bits) } + unsafe { _write(bits) } } }; } -macro_rules! clear { +macro_rules! set_csr { + ($csr_number: literal) => { + /// Sets the CSR + #[inline] + unsafe fn _set(bits: usize) { + cfg_if::cfg_if! { + if #[cfg(any(target_arch = "riscv64", target_arch = "riscv32"))] { + unsafe { + ::core::arch::asm!(concat!("csrrs x0, ", stringify!($csr_number), ", {0}"), in(reg) bits); + } + } else { + unimplemented!() + } + } + } + }; +} + +macro_rules! clear_csr { ($csr_number: literal) => { /// Writes the CSR #[inline] @@ -149,8 +167,7 @@ macro_rules! clear { }; } -#[macro_export] -macro_rules! set_csr { +macro_rules! set_csr_field { ($(#[$attr:meta])*, $set_field:ident, $e:expr) => { $(#[$attr])* #[inline] @@ -161,9 +178,7 @@ macro_rules! set_csr { }; } -/// Convenience macro to define field clear functions for a CSR type. -#[macro_export] -macro_rules! clear_csr { +macro_rules! clear_csr_field { ($(#[$attr:meta])*, $clear_field:ident, $e:expr) => { $(#[$attr])* #[inline] @@ -174,14 +189,14 @@ macro_rules! clear_csr { }; } -#[macro_export] -macro_rules! set_clear_csr { +macro_rules! set_clear_csr_field { ($(#[$attr:meta])*, $set_field:ident, $clear_field:ident, $e:expr) => { - $crate::set_csr!($(#[$attr])*, $set_field, $e); - $crate::clear_csr!($(#[$attr])*, $clear_field, $e); + $crate::set_csr_field!($(#[$attr])*, $set_field, $e); + $crate::clear_csr_field!($(#[$attr])*, $clear_field, $e); } } pub(crate) use { - clear, read_composite_csr, read_csr, read_csr_as, read_csr_as_usize, set, set_csr_as_usize, + clear_csr, clear_csr_field, read_composite_csr, read_csr, read_csr_as, read_csr_as_usize, + set_clear_csr_field, set_csr, set_csr_field, write_csr, write_csr_as_usize, }; diff --git a/libs/riscv/src/register/satp.rs b/libs/riscv/src/register/satp.rs index 30846619..b47d34c8 100644 --- a/libs/riscv/src/register/satp.rs +++ b/libs/riscv/src/register/satp.rs @@ -7,7 +7,7 @@ //! Supervisor Address Translation and Protection Register -use super::{read_csr_as, set}; +use super::{read_csr_as, write_csr}; use crate::Error; use core::fmt; @@ -18,7 +18,7 @@ pub struct Satp { } read_csr_as!(Satp, 0x180); -set!(0x180); +write_csr!(0x180); /// Sets the register to corresponding page table mode, physical page number and address space id. /// @@ -83,7 +83,7 @@ pub unsafe fn try_set(mode: Mode, asid: u16, ppn: usize) -> crate::Result<()> { } else { let bits = (mode as usize) << 60 | ((asid as usize) << 44) | ppn; unsafe { - _set(bits); + _write(bits); } Ok(()) } diff --git a/libs/riscv/src/register/scause.rs b/libs/riscv/src/register/scause.rs index bae48ffd..1e8a2186 100644 --- a/libs/riscv/src/register/scause.rs +++ b/libs/riscv/src/register/scause.rs @@ -7,7 +7,7 @@ //! Supervisor Cause Register -use super::{read_csr_as, set}; +use super::{read_csr_as, write_csr}; use core::fmt; use core::fmt::Formatter; @@ -18,14 +18,14 @@ pub struct Scause { } read_csr_as!(Scause, 0x142); -set!(0x142); +write_csr!(0x142); pub unsafe fn set(trap: Trap) { match trap { Trap::Interrupt(interrupt) => unsafe { - _set(1 << (usize::BITS as usize - 1) | interrupt as usize); + _write(1 << (usize::BITS as usize - 1) | interrupt as usize); }, - Trap::Exception(exception) => unsafe { _set(exception as usize) }, + Trap::Exception(exception) => unsafe { _write(exception as usize) }, } } diff --git a/libs/riscv/src/register/scounteren.rs b/libs/riscv/src/register/scounteren.rs index 04be000b..f538e4d8 100644 --- a/libs/riscv/src/register/scounteren.rs +++ b/libs/riscv/src/register/scounteren.rs @@ -5,8 +5,8 @@ // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. -use crate::register::{clear, read_csr_as, set}; -use crate::{set_clear_csr, Error}; +use crate::register::{clear_csr, read_csr_as, set_clear_csr_field, set_csr}; +use crate::Error; /// Scounteren register #[derive(Clone, Copy)] @@ -15,18 +15,18 @@ pub struct Scounteren { } read_csr_as!(Scounteren, 0x106); -set!(0x106); -clear!(0x106); +set_csr!(0x106); +clear_csr!(0x106); -set_clear_csr!( +set_clear_csr_field!( /// User cycle Enable , set_cy, clear_cy, 1 << 0_i32); -set_clear_csr!( +set_clear_csr_field!( /// User time Enable , set_tm, clear_tm, 1 << 1_i32); -set_clear_csr!( +set_clear_csr_field!( /// User instret Enable , set_ir, clear_ir, 1 << 2_i32); diff --git a/libs/riscv/src/register/sepc.rs b/libs/riscv/src/register/sepc.rs index bec8daa8..e5346f54 100644 --- a/libs/riscv/src/register/sepc.rs +++ b/libs/riscv/src/register/sepc.rs @@ -7,7 +7,7 @@ //! Supervisor Exception Program Counter Register -use super::{read_csr_as_usize, set_csr_as_usize}; +use super::{read_csr_as_usize, write_csr_as_usize}; read_csr_as_usize!(0x141); -set_csr_as_usize!(0x141); +write_csr_as_usize!(0x141); diff --git a/libs/riscv/src/register/sie.rs b/libs/riscv/src/register/sie.rs index bcd34551..eb26ffe6 100644 --- a/libs/riscv/src/register/sie.rs +++ b/libs/riscv/src/register/sie.rs @@ -7,7 +7,7 @@ //! Supervisor Interrupt Enable Register -use super::{clear, read_csr_as, set}; +use super::{clear_csr, read_csr_as, set_csr}; use core::fmt; use core::fmt::Formatter; @@ -18,8 +18,8 @@ pub struct Sie { } read_csr_as!(Sie, 0x104); -set!(0x104); -clear!(0x104); +set_csr!(0x104); +clear_csr!(0x104); pub unsafe fn set_ssie() { unsafe { diff --git a/libs/riscv/src/register/sscratch.rs b/libs/riscv/src/register/sscratch.rs index ede4fb3b..14628307 100644 --- a/libs/riscv/src/register/sscratch.rs +++ b/libs/riscv/src/register/sscratch.rs @@ -7,7 +7,7 @@ //! sscratch register -use crate::register::{read_csr_as_usize, set_csr_as_usize}; +use crate::register::{read_csr_as_usize, write_csr_as_usize}; read_csr_as_usize!(0x140); -set_csr_as_usize!(0x140); +write_csr_as_usize!(0x140); diff --git a/libs/riscv/src/register/sstatus.rs b/libs/riscv/src/register/sstatus.rs index 755ce2b2..7a1ef346 100644 --- a/libs/riscv/src/register/sstatus.rs +++ b/libs/riscv/src/register/sstatus.rs @@ -7,7 +7,8 @@ //! Supervisor Status Register -use super::{clear, read_csr_as, set}; +use super::{clear_csr, read_csr_as, set_clear_csr_field, set_csr}; +use crate::set_csr_field; use core::fmt; use core::fmt::Formatter; @@ -17,37 +18,35 @@ pub struct Sstatus { bits: usize, } -read_csr_as!(Sstatus, 0x100); -set!(0x100); -clear!(0x100); +set_csr!(0x100); +clear_csr!(0x100); -/// Supervisor Interrupt Enable -pub unsafe fn set_sie() { - unsafe { - _set(1 << 1); - } -} - -/// Supervisor Interrupt Enable -pub unsafe fn clear_sie() { - unsafe { - _clear(1 << 1); - } -} - -/// Supervisor Previous Interrupt Enable -pub unsafe fn set_spie() { - unsafe { - _set(1 << 5); - } -} +read_csr_as!(Sstatus, 0x100); +set_clear_csr_field!( + /// User Interrupt Enable + , set_uie, clear_uie, 1 << 0_i32); +set_clear_csr_field!( + /// Supervisor Interrupt Enable + , set_sie, clear_sie, 1 << 1_i32); +set_csr_field!( + /// User Previous Interrupt Enable + , set_upie, 1 << 4_i32); +set_csr_field!( + /// Supervisor Previous Interrupt Enable + , set_spie, 1 << 5_i32); +set_clear_csr_field!( + /// Permit Supervisor User Memory access + , set_sum, clear_sum, 1 << 18_i32); +set_clear_csr_field!( + /// Make eXecutable Readable + , set_mxr, clear_mxr, 1 << 19_i32); /// Supervisor Previous Privilege Mode #[inline] pub unsafe fn set_spp(spp: SPP) { match spp { - SPP::Supervisor => unsafe { _set(1 << 8) }, - SPP::User => unsafe { _clear(1 << 8) }, + SPP::Supervisor => unsafe { _set(1 << 8_i32) }, + SPP::User => unsafe { _clear(1 << 8_i32) }, } } @@ -61,20 +60,6 @@ pub unsafe fn set_fs(fs: FS) { } } -/// Permit Supervisor User Memory access -pub unsafe fn set_sum() { - unsafe { - _set(1 << 18); - } -} - -/// Permit Supervisor User Memory access -pub unsafe fn clear_sum() { - unsafe { - _clear(1 << 18); - } -} - #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum SPP { Supervisor = 1, diff --git a/libs/riscv/src/register/stval.rs b/libs/riscv/src/register/stval.rs index be22b367..705b4491 100644 --- a/libs/riscv/src/register/stval.rs +++ b/libs/riscv/src/register/stval.rs @@ -7,7 +7,7 @@ //! Supervisor Trap Value Register -use super::{read_csr_as_usize, set_csr_as_usize}; +use super::{read_csr_as_usize, write_csr_as_usize}; read_csr_as_usize!(0x143); -set_csr_as_usize!(0x143); +write_csr_as_usize!(0x143); diff --git a/libs/riscv/src/register/stvec.rs b/libs/riscv/src/register/stvec.rs index 4da9a63a..a975318f 100644 --- a/libs/riscv/src/register/stvec.rs +++ b/libs/riscv/src/register/stvec.rs @@ -7,7 +7,7 @@ //! Supervisor Trap Vector Base Address Register -use super::{read_csr_as, set}; +use super::{read_csr_as, set_csr}; use core::fmt; use core::fmt::Formatter; @@ -16,7 +16,7 @@ pub struct Stvec { bits: usize, } read_csr_as!(Stvec, 0x105); -set!(0x105); +set_csr!(0x105); pub unsafe fn write(base: usize, mode: Mode) { unsafe { diff --git a/libs/wavltree/src/lib.rs b/libs/wavltree/src/lib.rs index 744677ba..2128b322 100644 --- a/libs/wavltree/src/lib.rs +++ b/libs/wavltree/src/lib.rs @@ -667,7 +667,10 @@ where /// Caller has to ensure the pointer points to a valid node in the tree. #[inline] pub unsafe fn cursor_mut_from_ptr(&mut self, ptr: NonNull) -> CursorMut<'_, T> { - debug_assert!(unsafe { T::links(ptr).as_ref() }.is_linked()); + debug_assert!( + unsafe { T::links(ptr).as_ref() }.is_linked(), + "ptr {ptr:?} is not a linked element" + ); CursorMut { current: Some(ptr), _tree: self,