aboutsummaryrefslogtreecommitdiff
path: root/src/mm
diff options
context:
space:
mode:
authorTianhao Wang <shrik3@mailbox.org>2024-05-29 19:53:52 +0200
committerTianhao Wang <shrik3@mailbox.org>2024-06-11 15:17:11 +0200
commitcd658673a35df8b0da3551e819e26d35c18b89f2 (patch)
tree74c209b519290eb43545e800b88ddf9dfa796a7a /src/mm
parent9cf85e88211512b0410f9bb9f2f19ea4ce9a8190 (diff)
mm: add stack based PMA
use 8MiB reserved array to manage up to 4GiB of physical memory (4K Pages only) Signed-off-by: Tianhao Wang <shrik3@mailbox.org>
Diffstat (limited to 'src/mm')
-rw-r--r--src/mm/heap_allocator.rs1
-rw-r--r--src/mm/mod.rs51
-rw-r--r--src/mm/pma.rs99
3 files changed, 102 insertions, 49 deletions
diff --git a/src/mm/heap_allocator.rs b/src/mm/heap_allocator.rs
deleted file mode 100644
index 9eb12a3..0000000
--- a/src/mm/heap_allocator.rs
+++ /dev/null
@@ -1 +0,0 @@
-struct HeapAllocator;
diff --git a/src/mm/mod.rs b/src/mm/mod.rs
index b030dfb..27737e8 100644
--- a/src/mm/mod.rs
+++ b/src/mm/mod.rs
@@ -1,2 +1,51 @@
-pub mod heap_allocator;
+use crate::defs::*;
+use crate::io::*;
+use crate::machine::multiboot;
pub mod pma;
+
+use lazy_static::lazy_static;
+use spin::Mutex;
+lazy_static! {
+ pub static ref GLOBAL_PMA: Mutex<pma::PageStackAllocator> =
+ Mutex::new(pma::PageStackAllocator::new());
+}
+
+pub fn init() {
+ let mbi = multiboot::get_mb_info().unwrap();
+ let mmapinfo = unsafe { mbi.get_mmap() }.unwrap();
+ let buf_start = mmapinfo.mmap_addr;
+ let buf_len = mmapinfo.mmap_length;
+ let buf_end = buf_start + buf_len;
+ let mut curr = buf_start as u64;
+ let mut inserted = 0;
+ loop {
+ if curr >= buf_end as u64 {
+ break;
+ }
+ let mblock = unsafe { &*(curr as *const multiboot::MultibootMmap) };
+ curr += mblock.size as u64;
+ curr += 4;
+ if mblock.mtype != multiboot::MultibootMmap::MTYPE_RAM {
+ continue;
+ }
+ if mblock.get_end() <= pmap_kernel_start() {
+ continue;
+ }
+ // TODO early break if the array is already full
+ if mblock.get_range().contains(pmap_kernel_end()) {
+ let r = Range {
+ addr: pmap_kernel_end(),
+ len: mblock.get_end() - pmap_kernel_end(),
+ };
+ inserted += GLOBAL_PMA.lock().insert_range(r);
+ } else {
+ inserted += GLOBAL_PMA.lock().insert_range(mblock.get_range());
+ }
+ println!(
+ "pma init: {:#X}KiB free memory, {:#X} pages inserted from block {:#X?}",
+ inserted * 0x4,
+ inserted,
+ mblock,
+ );
+ }
+}
diff --git a/src/mm/pma.rs b/src/mm/pma.rs
index f9a6811..a4c27c4 100644
--- a/src/mm/pma.rs
+++ b/src/mm/pma.rs
@@ -1,67 +1,72 @@
-use crate::defs::{self, Mem};
-use core::ffi::c_void;
-use core::{ptr, slice};
-// this is POC code, it will be ugly
+use crate::defs::*;
+use crate::io::*;
+use crate::machine::multiboot::MultibootMmap;
+use core::slice;
extern "C" {
- pub fn ___KERNEL_END__();
+ fn ___FREE_PAGE_STACK__();
}
-type BitU8 = u8;
-/// Bitmap for physical frames
-pub struct FMap {
- pub bm: &'static mut [BitU8],
- // skip over the kernel image and the bitmap itself.
- pub skip_byte: usize,
+/// There should only be one global instance of this.
+pub struct PageStackAllocator {
+ page_stack: &'static mut [u64],
+ size: usize,
+ head: usize,
}
-pub enum PMAError {
- DoubleFree,
-}
+impl PageStackAllocator {
+ // covering 4GiB physical memory of 4K frames
+ const STACK_SIZE: usize = 0x100000;
-impl FMap {
pub fn new() -> Self {
- let map_start = ___KERNEL_END__ as usize;
- let fmap = Self {
- bm: unsafe { slice::from_raw_parts_mut(map_start as *mut u8, Mem::PHY_BM_SIZE) },
- // looks ugly, perhaps FIXME
- // We'll waste several frames for the sake of easy alignment
- skip_byte: 1 + ((map_start >> Mem::PAGE_SHIFT) / 8),
+ let ps = Self {
+ page_stack: unsafe {
+ slice::from_raw_parts_mut(
+ ___FREE_PAGE_STACK__ as usize as *mut u64,
+ Self::STACK_SIZE,
+ )
+ },
+ size: Self::STACK_SIZE,
+ head: 0,
};
- fmap
+ return ps;
}
- /// return : index to the bitmap u8 , bit mask to retrive the bit.
- fn locate_bit(addr: usize) -> Option<(usize, u8)> {
- if addr >= Mem::PHY_TOP {
- return None;
+ /// push an addr into the free page stack
+ /// MUST be atomic or bad things happen...
+ pub fn free_page(&mut self, addr: u64) -> bool {
+ if self.head >= self.size {
+ return false;
}
- let pn = addr >> Mem::PAGE_SHIFT;
- let idx = pn / 8;
- let mask: u8 = 1 << (pn % 8);
- Some((idx, mask))
+ self.page_stack[self.head] = addr;
+ self.head += 1;
+ return true;
}
- pub fn alloc_frame(&mut self) -> usize {
- for i in self.skip_byte..self.bm.len() {
- if self.bm[i] == 0xff {
- continue;
- }
- todo!()
+ pub fn alloc_page(&mut self) -> Option<u64> {
+ if self.head == 0 {
+ return None;
}
- 0
+ self.head -= 1;
+ Some(self.page_stack[self.head])
}
- pub fn dealloc_frame(&mut self) -> Result<(), PMAError> {
- Ok(())
- }
-
- pub fn init(&mut self) {
- for i in 0..self.skip_byte {
- self.bm[i] = 0xff;
- }
- for i in self.skip_byte..self.bm.len() {
- self.bm[i] = 0;
+ /// 4k page only
+ pub fn insert_range(&mut self, r: Range) -> u64 {
+ let mut inserted = 0;
+ let mut page = roundup_4k(r.addr);
+ loop {
+ if !r.contains(page) {
+ break;
+ }
+ if !self.free_page(page) {
+ break;
+ } else {
+ println!("inserted: {:#X}", page);
+ inserted += 1;
+ }
+ page += 0x1000;
}
+ return inserted;
}
}