aboutsummaryrefslogtreecommitdiff
path: root/src/mm
diff options
context:
space:
mode:
authorTianhao Wang <shrik3@mailbox.org>2024-06-05 23:01:19 +0200
committerTianhao Wang <shrik3@mailbox.org>2024-06-11 15:17:14 +0200
commit38883485c80841f15365d0502418dcc224f01d45 (patch)
tree70f49473adccf65d7057570663c095fed8940165 /src/mm
parentbfe92f51f79f367354a933b78ec2b4e9d5336119 (diff)
mm: use linked-list-allocator as kmalloc
I'll implement my own allocator later. Currently using linked-list allocator [1] to manage the kernel heap (as in kmalloc, not vmalloc). It manages the ID-mapped region (from VA 0xffff_8000_0000_0000). This allocator is initialized to use the _largest_ physical memory block. If the kernel image (text and data) live in this zone then skip the occupied part. Key difference between kmalloc and vmalloc: - kmalloc pretty much manages the physical memory: the allocated address are within the id-mapped region (see above) therefore the allocated memory must also be contigous in physical memory. Such memory MUST NOT page fault. This is prone to fragmentation, so do not use kmalloc to allocate big objects (e.g. bigger than one 4k page). - vmalloc manages kernel heap memory and the mapping is managed by paging. Such memory could trigger pagefault in kernel mode. Note that the kmalloc conflicts with the previous used stack based PMA as they operates on the same VM zone. References: [1] https://github.com/rust-osdev/linked-list-allocator Signed-off-by: Tianhao Wang <shrik3@mailbox.org>
Diffstat (limited to 'src/mm')
-rw-r--r--src/mm/mod.rs58
-rw-r--r--src/mm/pma.rs6
2 files changed, 47 insertions, 17 deletions
diff --git a/src/mm/mod.rs b/src/mm/mod.rs
index 3ed6b19..a483a90 100644
--- a/src/mm/mod.rs
+++ b/src/mm/mod.rs
@@ -2,15 +2,17 @@ use crate::defs::*;
use crate::io::*;
use crate::machine::multiboot;
use core::ops::Range;
+use linked_list_allocator::LockedHeap;
+
pub mod pma;
use lazy_static::lazy_static;
use spin::Mutex;
-lazy_static! {
- pub static ref GLOBAL_PMA: Mutex<pma::PageStackAllocator> =
- Mutex::new(pma::PageStackAllocator::new());
-}
+#[global_allocator]
+static ALLOCATOR: LockedHeap = LockedHeap::empty();
+
+/// half measure: simply initialize the linkedlist allocator
pub fn init() {
let mbi = multiboot::get_mb_info().unwrap();
let mmapinfo = unsafe { mbi.get_mmap() }.unwrap();
@@ -18,7 +20,8 @@ pub fn init() {
let buf_len = mmapinfo.mmap_length;
let buf_end = buf_start + buf_len;
let mut curr = buf_start as u64;
- let mut inserted = 0;
+ // initialize the heap allocator with the largest physical memory block
+ let mut largest_phy_range: Option<Range<u64>> = None;
loop {
if curr >= buf_end as u64 {
break;
@@ -29,25 +32,46 @@ pub fn init() {
if mblock.mtype != multiboot::MultibootMmap::MTYPE_RAM {
continue;
}
- if mblock.get_end() <= pmap_kernel_start() {
+ if mblock.get_end() <= unsafe { pmap_kernel_start() } {
continue;
}
// TODO early break if the array is already full
let mut r = mblock.get_range();
- if mblock.get_range().contains(&pmap_kernel_end()) {
- r.start = pmap_kernel_end();
+ if r.contains(&unsafe { pmap_kernel_end() }) {
+ assert!(
+ r.contains(&unsafe { pmap_kernel_start() }),
+ "FATAL: kernel physical map cross physical blocks, how?"
+ );
+ r.start = unsafe { pmap_kernel_end() };
+ }
+ match largest_phy_range {
+ None => largest_phy_range = Some(r),
+ Some(ref lr) => {
+ if (r.end - r.start) > (lr.end - lr.start) {
+ largest_phy_range = Some(r);
+ }
+ }
}
- inserted += GLOBAL_PMA.lock().insert_range(&r);
}
+ let pr = &largest_phy_range.expect("Can't find any available physical block");
+ assert!((pr.end - pr.start) >= Mem::MIN_PHY_MEM, "TO LITTLE RAM ...");
+ // init heap allocator on id map
+ unsafe {
+ ALLOCATOR.lock().init(
+ P2V(pr.start).unwrap() as *mut u8,
+ (pr.end - pr.start) as usize,
+ );
+ }
println!(
- "[init] pma: kernel loaded at phy: {:#X} - {:#X}",
- pmap_kernel_start(),
- pmap_kernel_end()
- );
- println!(
- "[init] pma: {:#X} KiB free memory, {:#X} frames inserted",
- inserted * 0x4,
- inserted,
+ "[init] mm: heap alloc initialized @ {:#X} - {:#X}",
+ P2V(pr.start).unwrap(),
+ P2V(pr.end).unwrap()
);
}
+
+/// populate the physical frame pool. This conflicts with the kernel heap allocator (kmalloc),
+/// which operates on the id map regions.
+pub fn _init_pma() {
+ todo!()
+}
diff --git a/src/mm/pma.rs b/src/mm/pma.rs
index 7111137..359e406 100644
--- a/src/mm/pma.rs
+++ b/src/mm/pma.rs
@@ -8,6 +8,12 @@ extern "C" {
fn ___FREE_PAGE_STACK__();
}
+// disabled for now
+// lazy_static! {
+// pub static ref GLOBAL_PMA: Mutex<pma::PageStackAllocator> =
+// Mutex::new(pma::PageStackAllocator::new());
+// }
+
/// There should only be one global instance of this.
pub struct PageStackAllocator {
page_stack: &'static mut [u64],