From 38883485c80841f15365d0502418dcc224f01d45 Mon Sep 17 00:00:00 2001 From: Tianhao Wang Date: Wed, 5 Jun 2024 23:01:19 +0200 Subject: mm: use linked-list-allocator as kmalloc I'll implement my own allocator later. Currently using linked-list allocator [1] to manage the kernel heap (as in kmalloc, not vmalloc). It manages the ID-mapped region (from VA 0xffff_8000_0000_0000). This allocator is initialized to use the _largest_ physical memory block. If the kernel image (text and data) live in this zone then skip the occupied part. Key difference between kmalloc and vmalloc: - kmalloc pretty much manages the physical memory: the allocated address are within the id-mapped region (see above) therefore the allocated memory must also be contigous in physical memory. Such memory MUST NOT page fault. This is prone to fragmentation, so do not use kmalloc to allocate big objects (e.g. bigger than one 4k page). - vmalloc manages kernel heap memory and the mapping is managed by paging. Such memory could trigger pagefault in kernel mode. Note that the kmalloc conflicts with the previous used stack based PMA as they operates on the same VM zone. References: [1] https://github.com/rust-osdev/linked-list-allocator Signed-off-by: Tianhao Wang --- Cargo.lock | 19 ++++++++++++++++ Cargo.toml | 2 ++ README.md | 6 +++-- boot/startup-x86_64.s | 8 +++---- defs/x86_64-hm-linker.ld | 1 - src/defs.rs | 29 ++++++++++++++---------- src/lib.rs | 34 +++++++++++++++------------- src/mm/mod.rs | 58 ++++++++++++++++++++++++++++++++++-------------- src/mm/pma.rs | 6 +++++ 9 files changed, 112 insertions(+), 51 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0bf2326..cfc304a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -23,6 +23,15 @@ dependencies = [ "spin 0.5.2", ] +[[package]] +name = "linked_list_allocator" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afa463f5405ee81cdb9cc2baf37e08ec7e4c8209442b5d72c04cfb2cd6e6286" +dependencies = [ + "spinning_top", +] + [[package]] name = "lock_api" version = "0.4.12" @@ -39,6 +48,7 @@ version = "0.1.0" dependencies = [ "bitflags", "lazy_static", + "linked_list_allocator", "spin 0.9.8", ] @@ -62,3 +72,12 @@ checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" dependencies = [ "lock_api", ] + +[[package]] +name = "spinning_top" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b9eb1a2f4c41445a3a0ff9abc5221c5fcd28e1f13cd7c0397706f9ac938ddb0" +dependencies = [ + "lock_api", +] diff --git a/Cargo.toml b/Cargo.toml index b858d96..1d96a55 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,6 +8,8 @@ license = "eupl-1.2" [dependencies] spin = "0.9.8" bitflags = "2.4.2" +# TODO make my own heap allocator +linked_list_allocator = "0.10.5" [dependencies.lazy_static] version = "1.4" diff --git a/README.md b/README.md index 60f4228..c1daa86 100644 --- a/README.md +++ b/README.md @@ -20,8 +20,10 @@ shitty code, I'm a rust beginner. - [X] Setting up CGA display, print something (hello world) - [X] Intigrate print into rust println! etc. - [X] Keyboard controller and input handler -- [?] Interrupt handler (WIP) -- [ ] intrrupt sync (pro-/epilogue model) +- [X] Interrupt handler +- [X] kmalloc (using the [linked-list-allocator ](https://github.com/rust-osdev/linked-list-allocator)) + TODO: implement my own +- [?] intrrupt sync (pro-/epilogue model) - [?] Threading (WIP) - [ ] Scheduler (single CPU) - [ ] Timer Interrupt diff --git a/boot/startup-x86_64.s b/boot/startup-x86_64.s index 2883996..bbd7172 100644 --- a/boot/startup-x86_64.s +++ b/boot/startup-x86_64.s @@ -274,7 +274,7 @@ pdp1: pt_end: ; reserve 8MiB for frame alloc. ; (see linker file) -[SECTION .global_free_page_stack] -free_page_stack: - resb 8388608 - alignb 4096 +;[SECTION .global_free_page_stack] +;free_page_stack: +; resb 8388608 +; alignb 4096 diff --git a/defs/x86_64-hm-linker.ld b/defs/x86_64-hm-linker.ld index c8a213c..fab0699 100644 --- a/defs/x86_64-hm-linker.ld +++ b/defs/x86_64-hm-linker.ld @@ -73,7 +73,6 @@ SECTIONS .t32 : { *(".text32") - *(".text.interrupt_gate") } . = . + KERNEL_OFFSET; diff --git a/src/defs.rs b/src/defs.rs index af05da5..d9a15b5 100644 --- a/src/defs.rs +++ b/src/defs.rs @@ -1,5 +1,3 @@ -// exported symbols from asm/linker. -// They are always unsafe. extern "C" { fn ___KERNEL_PM_START__(); fn ___KERNEL_PM_END__(); @@ -7,25 +5,29 @@ extern "C" { fn ___BSS_END__(); } +// ANY ADDRESS FROM PHYSICAL MAPPING IS UNSAFE BECAUSE THE LOW MEMORY MAPPING +// WILL BE DROPPED FOR USERSPACE +// TODO: create VMAs in the MM struct #[inline] -pub fn pmap_kernel_start() -> u64 { +pub unsafe fn pmap_kernel_start() -> u64 { ___KERNEL_PM_START__ as u64 } #[inline] -pub fn pmap_kernel_end() -> u64 { +pub unsafe fn pmap_kernel_end() -> u64 { ___KERNEL_PM_END__ as u64 } #[inline] -pub fn vmap_kernel_start() -> u64 { +pub unsafe fn vmap_kernel_start() -> u64 { pmap_kernel_start() + Mem::KERNEL_OFFSET } #[inline] -pub fn vmap_kernel_end() -> u64 { +pub unsafe fn vmap_kernel_end() -> u64 { pmap_kernel_end() + Mem::KERNEL_OFFSET } +// ABOVE ONLY VALID BEFORE DROPPING LOWER MEMORY MAPPING -----// #[inline] pub fn bss_start() -> u64 { @@ -53,8 +55,6 @@ impl Mem { pub const K: u64 = 1024; pub const M: u64 = 1024 * Mem::K; pub const G: u64 = 1024 * Mem::M; - // physical memory layout: qemu defaults to 128 MiB phy Memory - pub const PHY_TOP: u64 = 128 * Mem::M; // 4 lv 4K paging pub const PAGE_SIZE: u64 = 0x1000; pub const PAGE_SHIFT: u64 = 12; @@ -67,16 +67,21 @@ impl Mem { pub const L2_MASK: u64 = 0x1ff << Mem::L2_SHIFT; pub const L3_SHIFT: u8 = 12; pub const L3_MASK: u64 = 0x1ff << Mem::L3_SHIFT; - pub const PHY_PAGES: u64 = Mem::PHY_TOP >> Mem::PAGE_SHIFT; + // 64 GiB available memory + pub const MAX_PHY_MEM: u64 = 0x1000000000; + // we should have at least 64 MiB free physical memory (excluding the kernel it self) + pub const MIN_PHY_MEM: u64 = 64 * Self::M; // size of frame allocator bitmap: number of physical frames / 8 for 128M // memory (37268) 4k pages, 37268 bits are needed, hence // 4096 bytes, exactly one page! - pub const PHY_BM_SIZE: u64 = Mem::PHY_PAGES >> 3; pub const ID_MAP_START: u64 = 0xffff_8000_0000_0000; pub const ID_MAP_END: u64 = 0xffff_8010_0000_0000; + // kernel image:0xffff_8020_0000_0000 ~ 0xffff_802f_0000_0000; pub const KERNEL_OFFSET: u64 = 0xffff_8020_0000_0000; - // 64 GiB available memory - pub const MAX_PHY_MEM: u64 = 0x1000000000; + // kernel heap: 0xffff_8030_0000_0000 ~ 0xffff_803f_0000_0000; + // (64 GiB) + pub const KERNEL_HEAP_START: u64 = 0xffff_8030_0000_0000; + pub const KERNEL_HEAP_END: u64 = 0xffff_8040_0000_0000; } // convert VA <-> PA wrt. the kernel id mapping diff --git a/src/lib.rs b/src/lib.rs index fb68168..99478be 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -11,6 +11,8 @@ mod machine; mod mm; use crate::machine::key::Modifiers; mod proc; +extern crate alloc; +use alloc::vec::Vec; use arch::x86_64::interrupt; use arch::x86_64::interrupt::pic_8259; use arch::x86_64::interrupt::pic_8259::PicDeviceInt; @@ -32,38 +34,40 @@ pub extern "C" fn _entry() -> ! { io::set_attr(0x1f); io::clear_screen(); assert!(multiboot::check(), "bad multiboot info from grub!"); - let mbi = multiboot::get_mb_info().expect("bad multiboot info flags"); - let mem = unsafe { mbi.get_mem() }.unwrap(); - println!( - "[init] available memory: lower {:#X} KiB, upper:{:#X} KiB", - mem.lower(), - mem.upper() - ); - mm::init(); + // check mbi now. This will be later used to initilize the allocator + let _mbi = multiboot::get_mb_info().expect("bad multiboot info flags"); + // initialize the idt and re-program the pic. Must do this before enabling irq + // also must initialize the idt before mm, because the later may trigger page faults, which is + // fatal and we want to catch them during system initilization. interrupt::init(); - pic_8259::allow(PicDeviceInt::KEYBOARD); - interrupt::interrupt_enable(); + mm::init(); println!( "[init] kernel mapped @ {:#X} - {:#X}", - vmap_kernel_start(), - vmap_kernel_end(), + unsafe { vmap_kernel_start() }, + unsafe { vmap_kernel_end() }, ); println!( "[init] BSS mapped @ {:#X} - {:#X}", bss_start(), bss_end() ); - - // io::print_welcome(); - // busy loop query keyboard + interrupt::interrupt_enable(); + pic_8259::allow(PicDeviceInt::KEYBOARD); + let mut test_vec = Vec::<&str>::new(); + test_vec.push("hello "); + test_vec.push("world"); + for s in test_vec.iter() { + println!("{s}"); + } loop { io::KBCTL_GLOBAL.lock().fetch_key(); if let Some(k) = io::KBCTL_GLOBAL.lock().consume_key() { println! {"key: {:?}", k} } } + // test heap } pub unsafe fn _test_pf() { diff --git a/src/mm/mod.rs b/src/mm/mod.rs index 3ed6b19..a483a90 100644 --- a/src/mm/mod.rs +++ b/src/mm/mod.rs @@ -2,15 +2,17 @@ use crate::defs::*; use crate::io::*; use crate::machine::multiboot; use core::ops::Range; +use linked_list_allocator::LockedHeap; + pub mod pma; use lazy_static::lazy_static; use spin::Mutex; -lazy_static! { - pub static ref GLOBAL_PMA: Mutex = - Mutex::new(pma::PageStackAllocator::new()); -} +#[global_allocator] +static ALLOCATOR: LockedHeap = LockedHeap::empty(); + +/// half measure: simply initialize the linkedlist allocator pub fn init() { let mbi = multiboot::get_mb_info().unwrap(); let mmapinfo = unsafe { mbi.get_mmap() }.unwrap(); @@ -18,7 +20,8 @@ pub fn init() { let buf_len = mmapinfo.mmap_length; let buf_end = buf_start + buf_len; let mut curr = buf_start as u64; - let mut inserted = 0; + // initialize the heap allocator with the largest physical memory block + let mut largest_phy_range: Option> = None; loop { if curr >= buf_end as u64 { break; @@ -29,25 +32,46 @@ pub fn init() { if mblock.mtype != multiboot::MultibootMmap::MTYPE_RAM { continue; } - if mblock.get_end() <= pmap_kernel_start() { + if mblock.get_end() <= unsafe { pmap_kernel_start() } { continue; } // TODO early break if the array is already full let mut r = mblock.get_range(); - if mblock.get_range().contains(&pmap_kernel_end()) { - r.start = pmap_kernel_end(); + if r.contains(&unsafe { pmap_kernel_end() }) { + assert!( + r.contains(&unsafe { pmap_kernel_start() }), + "FATAL: kernel physical map cross physical blocks, how?" + ); + r.start = unsafe { pmap_kernel_end() }; + } + match largest_phy_range { + None => largest_phy_range = Some(r), + Some(ref lr) => { + if (r.end - r.start) > (lr.end - lr.start) { + largest_phy_range = Some(r); + } + } } - inserted += GLOBAL_PMA.lock().insert_range(&r); } + let pr = &largest_phy_range.expect("Can't find any available physical block"); + assert!((pr.end - pr.start) >= Mem::MIN_PHY_MEM, "TO LITTLE RAM ..."); + // init heap allocator on id map + unsafe { + ALLOCATOR.lock().init( + P2V(pr.start).unwrap() as *mut u8, + (pr.end - pr.start) as usize, + ); + } println!( - "[init] pma: kernel loaded at phy: {:#X} - {:#X}", - pmap_kernel_start(), - pmap_kernel_end() - ); - println!( - "[init] pma: {:#X} KiB free memory, {:#X} frames inserted", - inserted * 0x4, - inserted, + "[init] mm: heap alloc initialized @ {:#X} - {:#X}", + P2V(pr.start).unwrap(), + P2V(pr.end).unwrap() ); } + +/// populate the physical frame pool. This conflicts with the kernel heap allocator (kmalloc), +/// which operates on the id map regions. +pub fn _init_pma() { + todo!() +} diff --git a/src/mm/pma.rs b/src/mm/pma.rs index 7111137..359e406 100644 --- a/src/mm/pma.rs +++ b/src/mm/pma.rs @@ -8,6 +8,12 @@ extern "C" { fn ___FREE_PAGE_STACK__(); } +// disabled for now +// lazy_static! { +// pub static ref GLOBAL_PMA: Mutex = +// Mutex::new(pma::PageStackAllocator::new()); +// } + /// There should only be one global instance of this. pub struct PageStackAllocator { page_stack: &'static mut [u64], -- cgit v1.2.3-70-g09d2