/src/mpu.rs
use core::cell::RefCell;
use core::mem::MaybeUninit;

use critical_section::Mutex;

const APPRAM_LEN: usize = 0x3F000;
pub const PAGE_SIZE: u32 = 0x1000; // 4K
pub const N_PAGES: usize = APPRAM_LEN / PAGE_SIZE as usize;
const MAX_SEGMENTS: usize = 4;
const MAX_REGIONS: usize = 4;
const DEFAULT_MEMORY_REGIONS: &[MemoryRegion] = &[
    // ROM and Flash
    MemoryRegion {
        start: 0,
        size: MemoryRegionSize::Sz512M,
        subregions: 0,
        write: false,
        execute: true,
    },
    // Kernel RAM RO - needed temporarily because of ROM routine address caching
    MemoryRegion {
        start: 0x2003F000,
        size: MemoryRegionSize::Sz4K,
        subregions: 0,
        write: false,
        execute: false,
    },
];

#[link_section = ".appram"]
static mut APPRAM: MaybeUninit<[u8; APPRAM_LEN]> = MaybeUninit::uninit();

static MPU_DEV: Mutex<RefCell<Option<cortex_m::peripheral::MPU>>> = Mutex::new(RefCell::new(None));

#[derive(Debug)]
pub struct MemorySegment {
    page: usize,
    size: usize,
    write: bool,
    execute: bool,
}

impl MemorySegment {
    pub fn new(page: usize, size: usize, write: bool, execute: bool) -> MemorySegment {
        assert!(size <= 8);
        MemorySegment {
            page,
            size,
            write,
            execute,
        }
    }

    pub fn start_address(&self) -> u32 {
        let appram_start = unsafe { APPRAM.as_ptr() as u32 };
        appram_start + (self.page as u32) * PAGE_SIZE
    }

    pub fn end_address(&self) -> u32 {
        self.start_address() + self.size_bytes() as u32
    }

    pub fn size_bytes(&self) -> usize {
        self.size * PAGE_SIZE as usize
    }

    pub fn as_region(&self) -> MemoryRegion {
        //let subregions = !(!(0xFF << self.size) << (self.page % 8));
        let subregions = 0;
        MemoryRegion {
            start: self.start_address(),
            size: MemoryRegionSize::Sz4K,
            subregions,
            write: self.write,
            execute: self.execute,
        }
    }
}

#[allow(dead_code)]
#[derive(Debug, Clone, Copy)]
pub enum MemoryRegionSize {
    Sz256 = 7,
    Sz512 = 8,
    Sz1K = 9,
    Sz2K = 10,
    Sz4K = 11,
    Sz8K = 12,
    Sz16K = 13,
    Sz32K = 14,
    Sz64K = 15,
    Sz128K = 16,
    Sz256K = 17,
    Sz512K = 18,
    Sz1M = 19,
    Sz2M = 20,
    Sz4M = 21,
    Sz8M = 22,
    Sz16M = 23,
    Sz32M = 24,
    Sz64M = 25,
    Sz128M = 26,
    Sz256M = 27,
    Sz512M = 28,
    Sz1G = 29,
    Sz2G = 30,
    Sz4G = 31,
}

#[derive(Debug)]
pub struct MemoryRegion {
    start: u32,
    size: MemoryRegionSize,
    subregions: u8,
    write: bool,
    execute: bool,
}

impl MemoryRegion {
    pub fn new(
        start: u32,
        size: MemoryRegionSize,
        subregions: u8,
        write: bool,
        execute: bool,
    ) -> MemoryRegion {
        MemoryRegion {
            start,
            size,
            subregions,
            write,
            execute,
        }
    }

    fn rbar(&self) -> u32 {
        self.start & 0xFFFFFF00
    }

    fn rasr(&self) -> u32 {
        (if self.execute { 0u32 } else { 1u32 << 28 } )     // XN
        | (if self.write { 0b011 } else { 0b010 } << 24)    // AP
        | (0b000111 << 16)                                  // TEX, S, C, B (normal writeback, shareable)
        | ((self.subregions as u32) << 8)                   // SRD
        | ((self.size as u32) << 1)                         // SIZE
        | 1 // ENABLE
    }
}

#[derive(Debug)]
pub struct MemoryMap {
    segments: heapless::Vec<MemorySegment, MAX_SEGMENTS>,
    regions: heapless::Vec<MemoryRegion, MAX_REGIONS>,
}

impl MemoryMap {
    pub fn new() -> MemoryMap {
        MemoryMap {
            segments: heapless::Vec::new(),
            regions: heapless::Vec::new(),
        }
    }

    pub fn add_segment(&mut self, ms: MemorySegment) {
        self.segments.push(ms).expect("segments full");
    }

    pub fn add_region(&mut self, mr: MemoryRegion) {
        self.regions.push(mr).expect("regions full");
    }

    pub fn configure_mpu(&self) {
        critical_section::with(|cs| unsafe {
            let mut mpu_dev = MPU_DEV.borrow_ref_mut(cs);
            if let Some(ref mut mpu) = *mpu_dev {
                let mut ri = 0;

                for region in DEFAULT_MEMORY_REGIONS {
                    mpu.rnr.write(ri);
                    mpu.rbar.write(region.rbar());
                    mpu.rasr.write(region.rasr());
                    ri += 1;
                }
                for region in &self.regions {
                    mpu.rnr.write(ri);
                    mpu.rbar.write(region.rbar());
                    mpu.rasr.write(region.rasr());
                    ri += 1;
                }
                for segment in &self.segments {
                    // TODO
                    mpu.rnr.write(ri);
                    let region = segment.as_region();
                    mpu.rbar.write(region.rbar());
                    mpu.rasr.write(region.rasr());
                    ri += 1;
                }

                if ri > 8 {
                    panic!("Too many mpu regions: {}", ri);
                }
            }
        })
    }
}

pub fn init_mpu(mpu: cortex_m::peripheral::MPU) {
    unsafe {
        mpu.ctrl.write(0b101); // enable MPU; disabled during HardFault and NMI; background memory map enabled
    }
    critical_section::with(|cs| {
        let mut mpu_dev = MPU_DEV.borrow_ref_mut(cs);
        *mpu_dev = Some(mpu);
    })
}