Skip to content

Commit

Permalink
feat: user address space functionality (#246)
Browse files Browse the repository at this point in the history
* feat: implement `USER` permission

* rename `MmapSlice` to `UserMmap`

* typo

* remove `UnwindSafe` bound on `catch_traps`

* remove `copy_from_user` & `copy_to_user` in favor of the more universal `with_user_memory_access`

* fix: clear sum when entering trap handler

* fixes

* fix riscv register field setting and clearing

* Update mod.rs

* fix: `with_user_memory_access` passthrough return value

* refactor: use `VirtualAddress` in error type

* fix: kernel counter creation

* more helpful assert messages

* feat: implement `core::iter::Step` for address types

* fix: respect `VMContext` field alignments

* feat: exit `array_to_wasm_trampoline` with trap instead of return

* refactor: move allocator and addressspace into store

* fix `UserMmap`

* run wasm tests

* correct jumpt to userspace

* fmt & clippy

* Update main.rs
  • Loading branch information
JonasKruckenberg authored Jan 20, 2025
1 parent c1f7992 commit c566320
Show file tree
Hide file tree
Showing 46 changed files with 856 additions and 562 deletions.
2 changes: 1 addition & 1 deletion kernel/fib_cpp.wat
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@
local.get 21
return)
(table (;0;) 1 1 funcref)
(memory (;0;) 2)
(memory (;0;) 1)
(global (;0;) (mut i32) (i32.const 66560))
(export "memory" (memory 0))
(export "fib" (func 0))
Expand Down
97 changes: 15 additions & 82 deletions kernel/src/arch/riscv64/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,9 @@ mod trap_handler;
mod utils;
mod vm;

use crate::ensure;
use crate::error::Error;
use crate::vm::VirtualAddress;
use bitflags::bitflags;
use core::arch::asm;
use core::panic::RefUnwindSafe;
use core::ptr;
use dtb_parser::Strings;
use fallible_iterator::FallibleIterator;
use riscv::sstatus::FS;
Expand Down Expand Up @@ -228,87 +224,24 @@ pub fn rmb() {
}
}

/// Copies `count * size_of::<T>()` bytes from `src` to `dst`. `src` must be a valid pointer in userspace, `dst`
/// must be a valid pointer in kernelspace.
///
/// # Errors
///
/// Returns and error if the pointers are invalid or the copy operation failed.
pub fn copy_from_user<T>(src: *const T, dst: *mut T, count: usize) -> crate::Result<()>
where
T: Clone + RefUnwindSafe,
{
check_ranges(src, dst, count)?;

// Safety: checked above
unsafe { copy_inner(src, dst, count) }
}

/// Copies `count * size_of::<T>()` bytes from `src` to `dst`. `src` must be a valid pointer in kernelspace, `dst`
/// must be a valid pointer in userspace.
///
/// # Errors
///
/// Returns and error if the pointers are invalid or the copy operation failed.
pub fn copy_to_user<T>(src: *const T, dst: *mut T, count: usize) -> crate::Result<()>
#[inline]
pub unsafe fn with_user_memory_access<F, R>(f: F) -> R
where
T: Clone + RefUnwindSafe,
F: FnOnce() -> R,
{
check_ranges(dst, src, count)?;

// Safety: checked above
unsafe { copy_inner(src, dst, count) }
}

fn check_ranges<T>(user: *const T, kernel: *const T, count: usize) -> crate::Result<()> {
// ensure slice is in user space
ensure!(
VirtualAddress::new(user as usize).is_some_and(|addr| addr.is_user_accessible()),
Error::InvalidArgument
);
ensure!(
VirtualAddress::new(user as usize)
.and_then(|addr| addr.checked_add(count * size_of::<T>()))
.is_some_and(|addr| addr.is_user_accessible()),
Error::InvalidArgument
);

// ensure src slice is in kernel space
ensure!(
VirtualAddress::new(kernel as usize).is_some_and(is_kernel_address),
Error::InvalidArgument
);
ensure!(
VirtualAddress::new(kernel as usize)
.and_then(|addr| addr.checked_add(count * size_of::<T>()))
.is_some_and(is_kernel_address),
Error::InvalidArgument
);

Ok(())
}
// Allow supervisor access to user memory
// Safety: register access
unsafe {
sstatus::set_sum();
}

unsafe fn copy_inner<T>(src: *const T, dst: *mut T, count: usize) -> crate::Result<()>
where
T: Clone + RefUnwindSafe,
{
crate::trap_handler::catch_traps(|| {
// Allow supervisor access to user memory
// Safety: register access
unsafe {
sstatus::set_sum();
}
let r = f();

// Safety: checked by caller and `catch_traps`
unsafe {
ptr::copy_nonoverlapping(src, dst, count);
}
// Disable supervisor access to user memory
// Safety: register access
unsafe {
sstatus::clear_sum();
}

// Disable supervisor access to user memory
// Safety: register access
unsafe {
sstatus::clear_sum();
}
})
.map_err(|_| Error::AccessDenied)
r
}
17 changes: 12 additions & 5 deletions kernel/src/arch/riscv64/trap_handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
use super::utils::{define_op, load_fp, load_gp, save_fp, save_gp};
use crate::arch::PAGE_SIZE;
use crate::trap_handler::TrapReason;
use crate::vm::VirtualAddress;
use crate::TRAP_STACK_SIZE_PAGES;
use core::arch::{asm, naked_asm};
use riscv::scause::{Exception, Interrupt, Trap};
Expand Down Expand Up @@ -84,7 +85,7 @@ unsafe extern "C" fn default_trap_entry() {
unsafe {
naked_asm! {
".align 2",
// "mv t0, sp", // save the correct stack pointer

"csrrw sp, sscratch, sp", // sp points to the TrapFrame
"add sp, sp, -0x210",

Expand Down Expand Up @@ -251,12 +252,18 @@ fn default_trap_handler(
a6: usize,
a7: usize,
) -> *mut TrapFrame {
// Clear the SUM bit to prevent userspace memory access in case we interrupted the kernel
// Safety: register access
unsafe {
sstatus::clear_sum();
}

let cause = scause::read().cause();

log::trace!("{:?}", sstatus::read());
log::trace!("trap_handler cause {cause:?}, a1 {a1:#x} a2 {a2:#x} a3 {a3:#x} a4 {a4:#x} a5 {a5:#x} a6 {a6:#x} a7 {a7:#x}");
let epc = sepc::read();
let tval = stval::read();
log::trace!("{:?};epc={epc:#x};tval={tval:#x}", sstatus::read());

let reason = match cause {
Trap::Interrupt(Interrupt::SupervisorSoft | Interrupt::VirtualSupervisorSoft) => {
Expand Down Expand Up @@ -286,9 +293,9 @@ fn default_trap_handler(
};

crate::trap_handler::begin_trap(crate::trap_handler::Trap {
pc: epc,
fp: 0,
faulting_address: tval,
pc: VirtualAddress::new(epc).unwrap(),
fp: VirtualAddress::default(),
faulting_address: VirtualAddress::new(tval).unwrap(),
reason,
});

Expand Down
22 changes: 21 additions & 1 deletion kernel/src/arch/riscv64/vm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -587,13 +587,31 @@ impl PageTableEntry {
bitflags! {
#[derive(Debug, Copy, Clone, Eq, PartialEq, Default)]
pub struct PTEFlags: usize {
/// Indicates the page table entry is initialized
const VALID = 1 << 0;
/// Whether the page is readable
const READ = 1 << 1;
/// Whether the page is writable
const WRITE = 1 << 2;
/// Whether the page is executable
const EXECUTE = 1 << 3;
/// Whether the page is accessible to user mode.
///
/// By default, pages are only accessible in supervisor mode but marking a page as user-accessible
/// allows user mode code to access the page too.
const USER = 1 << 4;
/// Designates a global mapping.
///
/// Global mappings exist in all address space.
///
/// Note that as stated in the RISCV privileged spec, forgetting to mark a global mapping as global
/// is *fine* since it just results in slower performance. However, marking a non-global mapping as
/// global by accident will result in undefined behaviour (the CPU might use any of the competing
/// mappings for the address).
const GLOBAL = 1 << 5;
/// Indicated the page has been read, written, or executed from.
const ACCESSED = 1 << 6;
/// Indicates the page has been written to.
const DIRTY = 1 << 7;
}
}
Expand All @@ -602,13 +620,15 @@ impl From<crate::vm::Permissions> for PTEFlags {
fn from(flags: crate::vm::Permissions) -> Self {
use crate::vm::Permissions;

let mut out = Self::VALID | Self::DIRTY | Self::ACCESSED;
// we currently don't use the accessed & dirty bits and, it's recommended to set them if unused
let mut out = Self::VALID | Self::ACCESSED | Self::DIRTY;

for flag in flags {
match flag {
Permissions::READ => out.insert(Self::READ),
Permissions::WRITE => out.insert(Self::WRITE),
Permissions::EXECUTE => out.insert(Self::EXECUTE),
Permissions::USER => out.insert(Self::USER),
_ => unreachable!(),
}
}
Expand Down
3 changes: 2 additions & 1 deletion kernel/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#![feature(debug_closure_helpers)]
#![expect(internal_features, reason = "panic internals")]
#![feature(std_internals, panic_can_unwind, fmt_internals)]
#![feature(step_trait)]
#![expect(dead_code, reason = "TODO")] // TODO remove
#![expect(edition_2024_expr_fragment_specifier, reason = "vetted")]

Expand Down Expand Up @@ -59,7 +60,7 @@ pub const TRAP_STACK_SIZE_PAGES: usize = 64; // TODO find a lower more appropria
/// doesn't cause startup slowdown & inefficient mapping, but large enough so we can bootstrap
/// our own virtual memory subsystem. At that point we are no longer reliant on this initial heap
/// size and can dynamically grow the heap as needed.
pub const INITIAL_HEAP_SIZE_PAGES: usize = 4096; // 32 MiB
pub const INITIAL_HEAP_SIZE_PAGES: usize = 4096 * 2; // 32 MiB

pub type Result<T> = core::result::Result<T, Error>;

Expand Down
18 changes: 9 additions & 9 deletions kernel/src/metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,26 +32,26 @@ use core::sync::atomic::{AtomicU64, Ordering};
#[macro_export]
macro_rules! counter {
($name:expr) => {{
let name: &str = $name;

#[unsafe(link_section = concat!(".bss.kcounter.", name))]
static ARENA: $crate::thread_local::ThreadLocal<AtomicU64> =
#[unsafe(link_section = concat!(".bss.kcounter.", $name))]
static ARENA: $crate::thread_local::ThreadLocal<::core::sync::atomic::AtomicU64> =
$crate::thread_local::ThreadLocal::new();

Counter {
arena: &ARENA,
name,
}
Counter::new(&ARENA, $name)
}};
}

/// A kernel counter.
struct Counter {
pub struct Counter {
arena: &'static ThreadLocal<AtomicU64>,
name: &'static str,
}

impl Counter {
#[doc(hidden)]
pub const fn new(arena: &'static ThreadLocal<AtomicU64>, name: &'static str) -> Self {
Self { arena, name }
}

/// Increment the counter.
pub fn increment(&self, value: u64) {
self.arena
Expand Down
27 changes: 12 additions & 15 deletions kernel/src/trap_handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,12 @@
// copied, modified, or distributed except according to those terms.

use crate::arch::longjmp;
use crate::vm::VirtualAddress;
use crate::{arch, vm};
use core::cell::Cell;
use core::fmt::Write;
use core::mem::{ManuallyDrop, MaybeUninit};
use core::ops::ControlFlow;
use core::panic::UnwindSafe;
use core::ptr;
use core::ptr::addr_of_mut;
use thread_local::thread_local;
Expand All @@ -23,9 +23,9 @@ thread_local! {

#[derive(Debug, Copy, Clone)]
pub struct Trap {
pub pc: usize,
pub fp: usize,
pub faulting_address: usize,
pub pc: VirtualAddress,
pub fp: VirtualAddress,
pub faulting_address: VirtualAddress,
pub reason: TrapReason,
}

Expand Down Expand Up @@ -103,18 +103,10 @@ pub fn resume_trap(trap: Trap) -> ! {
/// result if the closure didn't trigger a trap, and will return `Err(trap)` if it did. The `trap` object
/// holds further information about the traps instruction pointer, faulting address and trap reason.
///
/// # `UnwindSafe`
///
/// This function borrows the [`UnwindSafe`] trait bound from [`catch_unwind`][1] for the same reasons.
/// A hardware trap might happen while a data structure is in a temporarily invalid state (i.e. during
/// mutation) and continuing to access such data would lead to hard to debug bugs. If in the future we
/// determine the restrictions implied by `UnwindSafe` aren't enough for the purposes of
/// signal safety we can introduce a new trait.
///
/// [1]: [crate::panic::catch_unwind]
pub fn catch_traps<F, R>(f: F) -> Result<R, Trap>
where
F: FnOnce() -> R + UnwindSafe,
F: FnOnce() -> R,
{
union Data<R> {
// when the closure completed successfully, this will hold the return
Expand Down Expand Up @@ -160,8 +152,13 @@ where
}
}

fn fault_resume_panic(reason: TrapReason, pc: usize, fp: usize, faulting_address: usize) -> ! {
panic!("UNCAUGHT KERNEL TRAP {reason:?} pc={pc:#x};fp={fp:#x};faulting_address={faulting_address:#x};");
fn fault_resume_panic(
reason: TrapReason,
pc: VirtualAddress,
fp: VirtualAddress,
faulting_address: VirtualAddress,
) -> ! {
panic!("UNCAUGHT KERNEL TRAP {reason:?} pc={pc};fp={fp};faulting_address={faulting_address};");
}

/// Begins processing a trap.
Expand Down
36 changes: 36 additions & 0 deletions kernel/src/vm/address.rs
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,38 @@ macro_rules! address_impl {
.finish()
}
}

impl core::iter::Step for $addr {
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
core::iter::Step::steps_between(&start.0, &end.0)
}

fn forward_checked(start: Self, count: usize) -> Option<Self> {
core::iter::Step::forward_checked(start.0, count).map(Self)
}

fn forward(start: Self, count: usize) -> Self {
Self(core::iter::Step::forward(start.0, count))
}

unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
// Safety: checked by the caller
Self(unsafe { core::iter::Step::forward_unchecked(start.0, count) })
}

fn backward_checked(start: Self, count: usize) -> Option<Self> {
core::iter::Step::backward_checked(start.0, count).map(Self)
}

fn backward(start: Self, count: usize) -> Self {
Self(core::iter::Step::backward(start.0, count))
}

unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
// Safety: checked by the caller
Self(unsafe { core::iter::Step::backward_unchecked(start.0, count) })
}
}
};
}

Expand Down Expand Up @@ -291,6 +323,9 @@ macro_rules! address_range_impl {
let b = Range::from(other.end..self.end);
((!a.is_empty()).then_some(a), (!b.is_empty()).then_some(b))
}
fn clamp(&self, range: Self) -> Self {
Range::from(self.start.max(range.start)..self.end.min(range.end))
}
};
}

Expand Down Expand Up @@ -363,6 +398,7 @@ pub trait AddressRangeExt {
fn difference(&self, other: Self) -> (Option<Self>, Option<Self>)
where
Self: Sized;
fn clamp(&self, range: Self) -> Self;
}

impl AddressRangeExt for Range<PhysicalAddress> {
Expand Down
Loading

0 comments on commit c566320

Please sign in to comment.