MEMORY {
BOOT2 (r) : ORIGIN = 0x10000000, LENGTH = 0x100
FLASH (rx) : ORIGIN = 0x10000100, LENGTH = 1024K - 0x100
- ARAM (rw) : ORIGIN = 0x20000000, LENGTH = 256K
+ ARAM (rw) : ORIGIN = 0x20000000, LENGTH = 252K
RAM (rw) : ORIGIN = 0x2003F000, LENGTH = 8K
RAM2 (rw) : ORIGIN = 0x20041000, LENGTH = 4K
}
fn cat(_base: u32, _size: u32) -> ! {
stdlib::sleep(1000);
unsafe {
- let p = 0x4000 as *const u32;
+ // Try to read kernel memory
+ let p = 0x20040000 as *const u32;
let b = *p;
stdlib::sleep(b);
}
mod capabilities;
mod console;
mod hardfault;
+mod mpu;
mod panic;
mod peripherals;
mod stdlib;
kprintf!("Kernel started: {} ticks\r\n", ticks);
with_task_manager(|tm| {
- tm.add_task(apps::shell::shell, &[CapType::ConsoleRead, CapType::ConsoleWrite], 0).expect("launch task one");
+ tm.add_task("shell", apps::shell::shell, &[CapType::ConsoleRead, CapType::ConsoleWrite], 0).expect("launch task one");
});
unsafe {
- task::launch_idle_task()
+ task::launch_scheduler()
}
}
+use core::cell::RefCell;
+use core::mem::MaybeUninit;
+
+use critical_section::Mutex;
+
+const APPRAM_LEN: usize = 0x3F000;
+pub const PAGE_SIZE: u32 = 0x1000; // 4K
+pub const N_PAGES: usize = APPRAM_LEN / PAGE_SIZE as usize;
+const MAX_SEGMENTS: usize = 4;
+const MAX_REGIONS: usize = 4;
+const DEFAULT_MEMORY_REGIONS: &[MemoryRegion] = &[
+ // ROM and Flash
+ MemoryRegion {
+ start: 0,
+ size: MemoryRegionSize::Sz512M,
+ subregions: 0,
+ write: false,
+ execute: true,
+ },
+ // Kernel RAM RO - needed temporarily because of ROM routine address caching
+ MemoryRegion {
+ start: 0x2003F000,
+ size: MemoryRegionSize::Sz4K,
+ subregions: 0,
+ write: false,
+ execute: false,
+ },
+];
+
+#[link_section = ".appram"]
+static mut APPRAM: MaybeUninit<[u8; APPRAM_LEN]> = MaybeUninit::uninit();
+
+static MPU_DEV: Mutex<RefCell<Option<cortex_m::peripheral::MPU>>> = Mutex::new(RefCell::new(None));
+
+#[derive(Debug)]
+pub struct MemorySegment {
+ page: usize,
+ size: usize,
+ write: bool,
+ execute: bool,
+}
+
+impl MemorySegment {
+ pub fn new(page: usize, size: usize, write: bool, execute: bool) -> MemorySegment {
+ assert!(size <= 8);
+ MemorySegment {
+ page,
+ size,
+ write,
+ execute,
+ }
+ }
+
+ pub fn start_address(&self) -> u32 {
+ let appram_start = unsafe { APPRAM.as_ptr() as u32 };
+ appram_start + (self.page as u32) * PAGE_SIZE
+ }
+
+ pub fn end_address(&self) -> u32 {
+ self.start_address() + self.size_bytes() as u32
+ }
+
+ pub fn size_bytes(&self) -> usize {
+ self.size * PAGE_SIZE as usize
+ }
+
+ pub fn as_region(&self) -> MemoryRegion {
+ //let subregions = !(!(0xFF << self.size) << self.page);
+ let subregions = 0;
+ MemoryRegion {
+ start: self.start_address(),
+ size: MemoryRegionSize::Sz4K,
+ subregions,
+ write: self.write,
+ execute: self.execute,
+ }
+ }
+}
+
+#[allow(dead_code)]
+#[derive(Debug, Clone, Copy)]
+pub enum MemoryRegionSize {
+ Sz256 = 7,
+ Sz512 = 8,
+ Sz1K = 9,
+ Sz2K = 10,
+ Sz4K = 11,
+ Sz8K = 12,
+ Sz16K = 13,
+ Sz32K = 14,
+ Sz64K = 15,
+ Sz128K = 16,
+ Sz256K = 17,
+ Sz512K = 18,
+ Sz1M = 19,
+ Sz2M = 20,
+ Sz4M = 21,
+ Sz8M = 22,
+ Sz16M = 23,
+ Sz32M = 24,
+ Sz64M = 25,
+ Sz128M = 26,
+ Sz256M = 27,
+ Sz512M = 28,
+ Sz1G = 29,
+ Sz2G = 30,
+ Sz4G = 31,
+}
+
+#[derive(Debug)]
+pub struct MemoryRegion {
+ start: u32,
+ size: MemoryRegionSize,
+ subregions: u8,
+ write: bool,
+ execute: bool,
+}
+
+impl MemoryRegion {
+ pub fn new(
+ start: u32,
+ size: MemoryRegionSize,
+ subregions: u8,
+ write: bool,
+ execute: bool,
+ ) -> MemoryRegion {
+ MemoryRegion {
+ start,
+ size,
+ subregions,
+ write,
+ execute,
+ }
+ }
+
+ fn rbar(&self) -> u32 {
+ self.start & 0xFFFFFF00
+ }
+
+ fn rasr(&self) -> u32 {
+ (if self.execute { 0u32 } else { 1u32 << 28 } ) // XN
+ | (if self.write { 0b011 } else { 0b010 } << 24) // AP
+ | (0b000111 << 16) // TEX, S, C, B (normal writeback, shareable)
+ | ((self.subregions as u32) << 8) // SRD
+ | ((self.size as u32) << 1) // SIZE
+ | 1 // ENABLE
+ }
+}
+
+#[derive(Debug)]
+pub struct MemoryMap {
+ segments: heapless::Vec<MemorySegment, MAX_SEGMENTS>,
+ regions: heapless::Vec<MemoryRegion, MAX_REGIONS>,
+}
+
+impl MemoryMap {
+ pub fn new() -> MemoryMap {
+ MemoryMap {
+ segments: heapless::Vec::new(),
+ regions: heapless::Vec::new(),
+ }
+ }
+
+ pub fn add_segment(&mut self, ms: MemorySegment) {
+ self.segments.push(ms).expect("segments full");
+ }
+
+ pub fn add_region(&mut self, mr: MemoryRegion) {
+ self.regions.push(mr).expect("regions full");
+ }
+
+ pub fn configure_mpu(&self) {
+ critical_section::with(|cs| unsafe {
+ let mut mpu_dev = MPU_DEV.borrow_ref_mut(cs);
+ if let Some(ref mut mpu) = *mpu_dev {
+ let mut ri = 0;
+
+ for region in DEFAULT_MEMORY_REGIONS {
+ mpu.rnr.write(ri);
+ mpu.rbar.write(region.rbar());
+ mpu.rasr.write(region.rasr());
+ ri += 1;
+ }
+ for region in &self.regions {
+ mpu.rnr.write(ri);
+ mpu.rbar.write(region.rbar());
+ mpu.rasr.write(region.rasr());
+ ri += 1;
+ }
+ for segment in &self.segments {
+ // TODO
+ mpu.rnr.write(ri);
+ let region = segment.as_region();
+ mpu.rbar.write(region.rbar());
+ mpu.rasr.write(region.rasr());
+ ri += 1;
+ }
+
+ if ri > 8 {
+ panic!("Too many mpu regions: {}", ri);
+ }
+ }
+ })
+ }
+}
+
+pub fn init_mpu(mpu: cortex_m::peripheral::MPU) {
+ unsafe {
+ mpu.ctrl.write(0b101); // enable MPU; disabled during HardFault and NMI; background memory map enabled
+ }
+ critical_section::with(|cs| {
+ let mut mpu_dev = MPU_DEV.borrow_ref_mut(cs);
+ *mpu_dev = Some(mpu);
+ })
+}
use core::cell::RefCell;
use crate::bsp;
+use crate::mpu::init_mpu;
use crate::timer::init_timer;
use bsp::hal::clocks::init_clocks_and_plls;
use bsp::hal::gpio;
use bsp::hal::pac;
-use bsp::hal::timer::CountDown;
use bsp::hal::watchdog::Watchdog;
use bsp::hal::Sio;
use bsp::hal::Timer;
init_timer(core.SYST);
let timer = Timer::new(platform.TIMER, &mut platform.RESETS);
+ init_mpu(core.MPU);
+
critical_section::with(|cs| {
let mut peripherals = PERIPHERALS.borrow_ref_mut(cs);
*peripherals = Some(Peripherals {
panic!("Peripherals not initialized");
}
})
-}
-
-pub fn make_count_down() -> CountDown<'static> {
- with_peripherals(|p| {
- unsafe {
- // This is not interrupt safe but it will work for now
- core::mem::transmute(p.timer.count_down())
- }
- })
}
\ No newline at end of file
let entry = regs.r1 as *const ();
let entry = core::mem::transmute(entry);
let r = with_task_manager(|tm| {
- tm.add_task(entry, &[], 10)
+ tm.add_task("TODO", entry, &[], 10)
});
match r {
Ok(tid) => regs.r0 = tid,
#[no_mangle]
unsafe fn pendsv_handler(regs: Option<&mut TaskRegisters>) {
- let regs = regs.expect("PendSV from handler mode!?");
+ let Some(regs) = regs else {
+ // If we wind up here from handler mode; just bail. This should
+ // only happen during debugging.
+ return;
+ };
let t = ticks();
with_task_manager(|tm| {
tm.schedule(t);
use crate::bsp::pac::SCB;
use crate::capabilities::{with_cap_registry, CapToken, CapType, MAX_CAPS};
use crate::kprintf;
+use crate::mpu::{MemoryMap, MemorySegment, N_PAGES};
use crate::peripherals::with_peripherals;
use crate::timer::{ticks, Ticks};
const MAX_TASKS: usize = 10;
-#[link_section = ".appram"]
-static mut APPRAM: [u8; 0x10000] = [0u8; 0x10000];
-
pub type TaskId = u32;
static TASK_MANAGER: Mutex<RefCell<TaskManager>> = Mutex::new(RefCell::new(TaskManager::new()));
pub struct TaskManager {
- region_map: [Option<u32>; 10],
+ page_map: [Option<u32>; N_PAGES],
next_tid: u32,
tasks: heapless::Vec<TaskEntry, MAX_TASKS>,
current_task: usize,
impl TaskManager {
const fn new() -> TaskManager {
TaskManager {
- region_map: [None; 10],
+ page_map: [None; N_PAGES],
next_tid: 0,
tasks: heapless::Vec::new(),
current_task: 0,
pub fn add_task(
&mut self,
+ name: &str,
entry: fn(u32, u32) -> !,
requested_caps: &[CapType],
priority: u8,
with_cap_registry(|cr| cr.take(requested_caps, tid))
.ok_or(TaskError::CapabilityUnavailable)?;
- let region = self.region_map.iter().position(|v| v.is_none());
- let Some(region) = region else {
+ let entry = entry as *const () as u32;
+ let data_page = self.page_map.iter().position(|v| v.is_none());
+ let Some(data_page) = data_page else {
return Err(TaskError::Allocation);
};
- self.region_map[region] = Some(tid);
- let entry = entry as *const () as u32;
- let data: u32 = (unsafe { &mut APPRAM } as *mut u8 as u32) + (region as u32 * 0x1000);
- let data_size = 0x1000;
+ self.page_map[data_page] = Some(tid);
+ let data_segment = MemorySegment::new(data_page, 1, true, false);
let mut regs = TaskRegisters::new();
regs.pc = entry;
// subtract 0x20 for exception register stack because
// returning from scheduling will unstack this
- regs.sp = data + data_size - 0x20;
+ regs.sp = data_segment.end_address() - 0x20;
// Set up initial info about memory
- regs.r0 = data;
- regs.r1 = data_size;
+ regs.r0 = data_segment.start_address();
+ regs.r1 = data_segment.size_bytes() as u32;
+
+ let mut mem_map = MemoryMap::new();
+ mem_map.add_segment(data_segment);
let t = TaskEntry {
id: tid,
+ name: name.into(),
regs,
- data,
- data_size: data_size as usize,
+ mem_map,
state: TaskState::Running,
- priority: priority,
+ priority,
io_ready: false,
ticks_ran: 0,
caps,
let mut t = self.tasks.remove(self.current_task);
kprintf!("Task {} exited. Ran for {} ticks\r\n", t.id, t.ticks_ran);
// Deallocate memory regions
- for m in &mut self.region_map {
+ for m in &mut self.page_map {
if let Some(tid) = m {
if *tid == t.id {
*m = None;
// Restore the registers for that task
let t = &mut self.tasks[self.current_task];
+ t.mem_map.configure_mpu();
*regs = t.regs.clone();
t.ticks_ran += 1;
}
}
pub fn print_tasks(&self) {
- kprintf!("TID DATA ADDR STATE PRIO TICKS MAPS\r\n");
+ kprintf!("TID NAME STATE PRIO TICKS MAPS\r\n");
for t in &self.tasks {
let mut format_buffer: heapless::Vec<u8, 20> = heapless::Vec::new();
write!(&mut format_buffer, "{}", t.state).expect("write!");
kprintf!(
- "{:<7} {:<10X} {:<10} {:<4}{} {:<6} ",
+ "{:<7} {:<8} {:<10} {:<4}{} {:<6} ",
t.id,
- t.data,
+ t.name,
core::str::from_utf8(&format_buffer).unwrap(),
t.priority,
if t.io_ready { "+" } else { " " },
t.ticks_ran,
);
- for (i, m) in self.region_map.iter().enumerate() {
+ for (i, m) in self.page_map.iter().enumerate() {
if let Some(tid) = m {
if *tid == t.id {
kprintf!("{} ", i);
ldr r0, [r0, #0] // load r0
add sp, #88 // erase the TaskRegisters struct from the stack
- // set up PSP and drop privileges
+ adds r3, #0x20 // Remove the stack space initially reserved
+ // for register unstacking since we aren't
+ // doing that here.
+ msr PSP, r3 // set up PSP. Do this before entering thread
+ // mode so that an immediate fault will still
+ // have some place to stack registers.
movs r4, #3
- msr CONTROL, r4
+ msr CONTROL, r4 // and drop privileges
+ mov r0, r0
isb
dsb
- mov sp, r3 // set up stack
- add sp, #0x20 // Remove the space reserved for register
- // unstacking since we aren't doing that here.
// And away we go!
bx r2
"#
);
-pub unsafe fn launch_idle_task() -> ! {
+pub unsafe fn launch_scheduler() -> ! {
let regs = with_task_manager(|tm| {
- tm.add_task(idle, &[], 255).ok();
+ tm.add_task("idle", idle, &[], 255).ok();
tm.start();
- tm.tasks[0].regs.clone()
+ // FIXME: this is only task 1 because we start the shell
+ // beforehand in main(). Don't hardcode this.
+ tm.current_task = 1;
+ tm.tasks[1].mem_map.configure_mpu();
+ tm.tasks[1].regs.clone()
});
_launch(®s)
use core::fmt;
use crate::capabilities::{CapToken, MAX_CAPS, CapType};
+use crate::mpu::MemoryMap;
use crate::task::TaskId;
use crate::timer::Ticks;
#[derive(Debug)]
pub(crate) struct TaskEntry {
pub id: TaskId,
+ pub name: heapless::String<8>,
pub regs: TaskRegisters,
- pub data: u32,
- pub data_size: usize,
+ pub mem_map: MemoryMap,
pub state: TaskState,
pub priority: u8,
pub io_ready: bool,
#[exception]
fn SysTick() {
- let (t, pending) = critical_section::with(|cs| {
+ let pending = critical_section::with(|cs| {
let mut timer = TIMER.borrow_ref_mut(cs);
if let Some(ref mut timer) = *timer {
timer.tick();
- (timer.t, timer.gather_pending())
+ timer.gather_pending()
} else {
panic!("Timer not initialized");
}