/src/task.rs
mod entry;
mod error;
use core::arch::global_asm;
use core::cell::RefCell;
use core::fmt::Write;
use core::slice::sort::quicksort;
use cortex_m::asm::wfi;
use critical_section::Mutex;
use embedded_hal::digital::v2::OutputPin;
pub use entry::TaskState;
pub(crate) use entry::{TaskEntry, TaskRegisters};
pub use error::TaskError;
use crate::bsp::pac::SCB;
use crate::capabilities::{with_cap_registry, CapToken, CapType, MAX_CAPS};
use crate::mpu::{MemoryMap, MemorySegment, N_PAGES};
use crate::peripherals::with_peripherals;
use crate::timer::{ticks, Ticks};
use crate::{apps, kprint, kprintf};
const MAX_TASKS: usize = 10;
pub type TaskId = u32;
static TASK_MANAGER: Mutex<RefCell<TaskManager>> = Mutex::new(RefCell::new(TaskManager::new()));
pub struct TaskManager {
page_map: [Option<u32>; N_PAGES],
next_tid: u32,
tasks: heapless::Vec<TaskEntry, MAX_TASKS>,
current_task: usize,
pending_task: usize,
active: bool,
}
impl TaskManager {
const fn new() -> TaskManager {
TaskManager {
page_map: [None; N_PAGES],
next_tid: 0,
tasks: heapless::Vec::new(),
current_task: 0,
pending_task: 0,
active: false,
}
}
pub(crate) fn start(&mut self) {
self.active = true;
}
pub(crate) fn stop(&mut self) {
self.active = false;
}
pub fn add_task(
&mut self,
name: &str,
entry: fn(u32, u32) -> !,
requested_caps: &[CapType],
priority: u8,
) -> Result<TaskId, TaskError> {
let tid = self.next_tid;
self.next_tid += 1;
let caps: heapless::Vec<CapToken, MAX_CAPS> =
with_cap_registry(|cr| cr.take(requested_caps, tid))
.ok_or(TaskError::CapabilityUnavailable)?;
let entry = entry as *const () as u32;
let data_page = self.page_map.iter().position(|v| v.is_none());
let Some(data_page) = data_page else {
return Err(TaskError::Allocation);
};
self.page_map[data_page] = Some(tid);
let data_segment = MemorySegment::new(data_page, 1, true, false);
let mut regs = TaskRegisters::new();
regs.pc = entry;
// subtract 0x20 for exception register stack because
// returning from scheduling will unstack this
regs.sp = data_segment.end_address() - 0x20;
// Set up initial info about memory
regs.r0 = data_segment.start_address();
regs.r1 = data_segment.size_bytes() as u32;
let mut mem_map = MemoryMap::new();
mem_map.add_segment(data_segment);
let t = TaskEntry {
id: tid,
name: name.into(),
regs,
mem_map,
state: TaskState::Running,
priority,
io_ready: false,
ticks_ran: 0,
caps,
};
self.tasks.push(t).map_err(|_| TaskError::TooManyTasks)?;
Ok(tid)
}
pub(crate) fn schedule(&mut self, now: Ticks) {
if !self.active {
return;
}
if self.tasks.len() == 0 {
panic!("Scheduling with no tasks!");
}
if self.tasks.len() == 1 && self.tasks[0].name == "idle" {
kprint!("Only idle task remains - relaunching shell\r\n");
self.add_task(
"shell",
apps::shell::shell,
&[CapType::ConsoleRead, CapType::ConsoleWrite],
0,
)
.expect("cannot relaunch shell");
}
// Check if any tasks are done sleeping
for t in &mut self.tasks {
if let TaskState::Sleeping(t1) = t.state {
if now >= t1 {
t.state = TaskState::Running;
}
}
}
let mut prio_list: heapless::Vec<usize, MAX_TASKS> = self
.tasks
.iter()
.enumerate()
.filter(|(_, t)| t.state == TaskState::Running)
.map(|(i, _)| i)
.collect();
if prio_list.is_empty() {
panic!("No scheduleable tasks!");
}
quicksort(&mut prio_list, |a, b| {
let ta = &self.tasks[*a];
let tb = &self.tasks[*b];
// Tasks that are I/O ready jump to the head of the queue.
// Otherwise, order by priority.
if ta.io_ready && !tb.io_ready {
false
} else if tb.io_ready && !tb.io_ready {
true
} else {
ta.priority < tb.priority
}
});
// Remove any I/O ready boosts
for i in &prio_list {
self.tasks[*i].io_ready = false;
}
// TODO: fairness for tasks of the same priority
self.pending_task = prio_list[0];
}
pub(crate) fn task_swap(&mut self, regs: &mut TaskRegisters) {
if self.pending_task == self.current_task {
self.tasks[self.current_task].ticks_ran += 1;
return;
}
// Store the registers of the current task if it is not exiting
let t = &mut self.tasks[self.current_task];
if t.state == TaskState::Exiting {
let mut t = self.tasks.remove(self.current_task);
kprintf!("Task {} exited. Ran for {} ticks\r\n", t.id, t.ticks_ran);
// Deallocate memory regions
for m in &mut self.page_map {
if let Some(tid) = m {
if *tid == t.id {
*m = None;
}
}
}
// Reclaim capabilities
with_cap_registry(|cr| {
let caps = &mut t.caps;
while !caps.is_empty() {
let c = caps.pop().unwrap();
cr.give(c, t.id);
}
});
if self.pending_task > self.current_task {
self.pending_task -= 1;
}
} else {
t.regs = regs.clone();
}
// Set new task
self.current_task = self.pending_task;
// Restore the registers for that task
let t = &mut self.tasks[self.current_task];
t.mem_map.configure_mpu();
*regs = t.regs.clone();
t.ticks_ran += 1;
}
pub fn sleep(&mut self, t: Ticks) {
if self.tasks.len() == 0 {
panic!("Cannot sleep with no tasks");
}
let t1 = ticks() + t;
self.tasks[self.current_task].state = TaskState::Sleeping(t1);
}
pub fn current_process_has_capability(&self, cap: CapType) -> bool {
let task = &self.tasks[self.current_task];
task.caps.iter().any(|token| token.captype() == cap)
}
// Block the current task if it holds the capability. Panic if it doesn't.
pub(crate) fn block_current_task(&mut self, cap: CapType) {
let task = &mut self.tasks[self.current_task];
if !task.has_capability(cap) {
panic!("task {} does not have {:?}", task.id, cap);
}
with_peripherals(|p| p.led().set_low()).ok();
task.state = TaskState::Blocked(cap);
}
/// Unblock any task holding the given capability. Returns true if
/// a task was found, otherwise false.
pub fn unblock_task_with_capability(&mut self, cap: CapType) -> bool {
let task = self
.tasks
.iter_mut()
.find(|t| t.state == TaskState::Blocked(cap));
if let Some(t) = task {
// Rewind the PC by two bytes so the SVC call that blocked
// gets re-run.
t.regs.pc -= 2;
t.state = TaskState::Running;
// set I/O ready boost
t.io_ready = true;
// Set PendSV to schedule immediately after exiting this
// interrupt.
SCB::set_pendsv();
with_peripherals(|p| p.led().set_high()).ok();
true
} else {
false
}
}
pub fn exit_current_task(&mut self) {
let t = &mut self.tasks[self.current_task];
t.state = TaskState::Exiting;
SCB::set_pendsv();
}
pub fn print_tasks(&self) {
kprintf!("TID NAME STATE PRIO TICKS MAPS\r\n");
for t in &self.tasks {
let mut format_buffer: heapless::Vec<u8, 20> = heapless::Vec::new();
write!(&mut format_buffer, "{}", t.state).expect("write!");
kprintf!(
"{:<7} {:<8} {:<10} {:<4}{} {:<6} ",
t.id,
t.name,
core::str::from_utf8(&format_buffer).unwrap(),
t.priority,
if t.io_ready { "+" } else { " " },
t.ticks_ran,
);
for (i, m) in self.page_map.iter().enumerate() {
if let Some(tid) = m {
if *tid == t.id {
kprintf!("{} ", i);
}
}
}
kprintf!("\r\n");
}
}
}
pub fn init_tasks() {
// Does nothing since TaskManager can be statically initialized
}
pub fn with_task_manager<F, R>(mut f: F) -> R
where
F: FnMut(&mut TaskManager) -> R,
{
critical_section::with(|cs| {
let mut task_manager = TASK_MANAGER.borrow_ref_mut(cs);
f(&mut task_manager)
})
}
pub fn current_task_has_capability(cap: CapType) -> bool {
with_task_manager(|tm| tm.current_process_has_capability(cap))
}
extern "C" {
fn _launch(regs: &TaskRegisters) -> !;
}
global_asm!(
r#"
.global _launch
.type _launch,function
_launch:
ldr r3, [r0, #32] // load SP
ldr r2, [r0, #24] // load PC
ldr r1, [r0, #4] // load r1
ldr r0, [r0, #0] // load r0
add sp, #88 // erase the TaskRegisters struct from the stack
adds r3, #0x20 // Remove the stack space initially reserved
// for register unstacking since we aren't
// doing that here.
msr PSP, r3 // set up PSP. Do this before entering thread
// mode so that an immediate fault will still
// have some place to stack registers.
movs r4, #3
msr CONTROL, r4 // and drop privileges
mov r0, r0
isb
dsb
// And away we go!
bx r2
"#
);
pub unsafe fn launch_scheduler() -> ! {
let regs = with_task_manager(|tm| {
tm.add_task("idle", idle, &[], 255).ok();
tm.start();
// FIXME: this is only task 1 because we start the shell
// beforehand in main(). Don't hardcode this.
tm.current_task = 1;
tm.tasks[1].mem_map.configure_mpu();
tm.tasks[1].regs.clone()
});
_launch(®s)
}
fn idle(_base: u32, _size: u32) -> ! {
loop {
wfi();
}
}