aboutsummaryrefslogtreecommitdiff
path: root/src/mm
diff options
context:
space:
mode:
Diffstat (limited to 'src/mm')
-rw-r--r--src/mm/mod.rs58
-rw-r--r--src/mm/pma.rs6
2 files changed, 47 insertions, 17 deletions
diff --git a/src/mm/mod.rs b/src/mm/mod.rs
index 3ed6b19..a483a90 100644
--- a/src/mm/mod.rs
+++ b/src/mm/mod.rs
@@ -2,15 +2,17 @@ use crate::defs::*;
use crate::io::*;
use crate::machine::multiboot;
use core::ops::Range;
+use linked_list_allocator::LockedHeap;
+
pub mod pma;
use lazy_static::lazy_static;
use spin::Mutex;
-lazy_static! {
- pub static ref GLOBAL_PMA: Mutex<pma::PageStackAllocator> =
- Mutex::new(pma::PageStackAllocator::new());
-}
+#[global_allocator]
+static ALLOCATOR: LockedHeap = LockedHeap::empty();
+
+/// half measure: simply initialize the linkedlist allocator
pub fn init() {
let mbi = multiboot::get_mb_info().unwrap();
let mmapinfo = unsafe { mbi.get_mmap() }.unwrap();
@@ -18,7 +20,8 @@ pub fn init() {
let buf_len = mmapinfo.mmap_length;
let buf_end = buf_start + buf_len;
let mut curr = buf_start as u64;
- let mut inserted = 0;
+ // initialize the heap allocator with the largest physical memory block
+ let mut largest_phy_range: Option<Range<u64>> = None;
loop {
if curr >= buf_end as u64 {
break;
@@ -29,25 +32,46 @@ pub fn init() {
if mblock.mtype != multiboot::MultibootMmap::MTYPE_RAM {
continue;
}
- if mblock.get_end() <= pmap_kernel_start() {
+ if mblock.get_end() <= unsafe { pmap_kernel_start() } {
continue;
}
// TODO early break if the array is already full
let mut r = mblock.get_range();
- if mblock.get_range().contains(&pmap_kernel_end()) {
- r.start = pmap_kernel_end();
+ if r.contains(&unsafe { pmap_kernel_end() }) {
+ assert!(
+ r.contains(&unsafe { pmap_kernel_start() }),
+ "FATAL: kernel physical map cross physical blocks, how?"
+ );
+ r.start = unsafe { pmap_kernel_end() };
+ }
+ match largest_phy_range {
+ None => largest_phy_range = Some(r),
+ Some(ref lr) => {
+ if (r.end - r.start) > (lr.end - lr.start) {
+ largest_phy_range = Some(r);
+ }
+ }
}
- inserted += GLOBAL_PMA.lock().insert_range(&r);
}
+ let pr = &largest_phy_range.expect("Can't find any available physical block");
+ assert!((pr.end - pr.start) >= Mem::MIN_PHY_MEM, "TO LITTLE RAM ...");
+ // init heap allocator on id map
+ unsafe {
+ ALLOCATOR.lock().init(
+ P2V(pr.start).unwrap() as *mut u8,
+ (pr.end - pr.start) as usize,
+ );
+ }
println!(
- "[init] pma: kernel loaded at phy: {:#X} - {:#X}",
- pmap_kernel_start(),
- pmap_kernel_end()
- );
- println!(
- "[init] pma: {:#X} KiB free memory, {:#X} frames inserted",
- inserted * 0x4,
- inserted,
+ "[init] mm: heap alloc initialized @ {:#X} - {:#X}",
+ P2V(pr.start).unwrap(),
+ P2V(pr.end).unwrap()
);
}
+
+/// populate the physical frame pool. This conflicts with the kernel heap allocator (kmalloc),
+/// which operates on the id map regions.
+pub fn _init_pma() {
+ todo!()
+}
diff --git a/src/mm/pma.rs b/src/mm/pma.rs
index 7111137..359e406 100644
--- a/src/mm/pma.rs
+++ b/src/mm/pma.rs
@@ -8,6 +8,12 @@ extern "C" {
fn ___FREE_PAGE_STACK__();
}
+// disabled for now
+// lazy_static! {
+// pub static ref GLOBAL_PMA: Mutex<pma::PageStackAllocator> =
+// Mutex::new(pma::PageStackAllocator::new());
+// }
+
/// There should only be one global instance of this.
pub struct PageStackAllocator {
page_stack: &'static mut [u64],