aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Cargo.lock19
-rw-r--r--Cargo.toml2
-rw-r--r--README.md6
-rw-r--r--boot/startup-x86_64.s8
-rw-r--r--defs/x86_64-hm-linker.ld1
-rw-r--r--src/defs.rs29
-rw-r--r--src/lib.rs34
-rw-r--r--src/mm/mod.rs58
-rw-r--r--src/mm/pma.rs6
9 files changed, 112 insertions, 51 deletions
diff --git a/Cargo.lock b/Cargo.lock
index 0bf2326..cfc304a 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -24,6 +24,15 @@ dependencies = [
]
[[package]]
+name = "linked_list_allocator"
+version = "0.10.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9afa463f5405ee81cdb9cc2baf37e08ec7e4c8209442b5d72c04cfb2cd6e6286"
+dependencies = [
+ "spinning_top",
+]
+
+[[package]]
name = "lock_api"
version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -39,6 +48,7 @@ version = "0.1.0"
dependencies = [
"bitflags",
"lazy_static",
+ "linked_list_allocator",
"spin 0.9.8",
]
@@ -62,3 +72,12 @@ checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
dependencies = [
"lock_api",
]
+
+[[package]]
+name = "spinning_top"
+version = "0.2.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5b9eb1a2f4c41445a3a0ff9abc5221c5fcd28e1f13cd7c0397706f9ac938ddb0"
+dependencies = [
+ "lock_api",
+]
diff --git a/Cargo.toml b/Cargo.toml
index b858d96..1d96a55 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -8,6 +8,8 @@ license = "eupl-1.2"
[dependencies]
spin = "0.9.8"
bitflags = "2.4.2"
+# TODO make my own heap allocator
+linked_list_allocator = "0.10.5"
[dependencies.lazy_static]
version = "1.4"
diff --git a/README.md b/README.md
index 60f4228..c1daa86 100644
--- a/README.md
+++ b/README.md
@@ -20,8 +20,10 @@ shitty code, I'm a rust beginner.
- [X] Setting up CGA display, print something (hello world)
- [X] Intigrate print into rust println! etc.
- [X] Keyboard controller and input handler
-- [?] Interrupt handler (WIP)
-- [ ] intrrupt sync (pro-/epilogue model)
+- [X] Interrupt handler
+- [X] kmalloc (using the [linked-list-allocator ](https://github.com/rust-osdev/linked-list-allocator))
+ TODO: implement my own
+- [?] intrrupt sync (pro-/epilogue model)
- [?] Threading (WIP)
- [ ] Scheduler (single CPU)
- [ ] Timer Interrupt
diff --git a/boot/startup-x86_64.s b/boot/startup-x86_64.s
index 2883996..bbd7172 100644
--- a/boot/startup-x86_64.s
+++ b/boot/startup-x86_64.s
@@ -274,7 +274,7 @@ pdp1:
pt_end:
; reserve 8MiB for frame alloc.
; (see linker file)
-[SECTION .global_free_page_stack]
-free_page_stack:
- resb 8388608
- alignb 4096
+;[SECTION .global_free_page_stack]
+;free_page_stack:
+; resb 8388608
+; alignb 4096
diff --git a/defs/x86_64-hm-linker.ld b/defs/x86_64-hm-linker.ld
index c8a213c..fab0699 100644
--- a/defs/x86_64-hm-linker.ld
+++ b/defs/x86_64-hm-linker.ld
@@ -73,7 +73,6 @@ SECTIONS
.t32 :
{
*(".text32")
- *(".text.interrupt_gate")
}
. = . + KERNEL_OFFSET;
diff --git a/src/defs.rs b/src/defs.rs
index af05da5..d9a15b5 100644
--- a/src/defs.rs
+++ b/src/defs.rs
@@ -1,5 +1,3 @@
-// exported symbols from asm/linker.
-// They are always unsafe.
extern "C" {
fn ___KERNEL_PM_START__();
fn ___KERNEL_PM_END__();
@@ -7,25 +5,29 @@ extern "C" {
fn ___BSS_END__();
}
+// ANY ADDRESS FROM PHYSICAL MAPPING IS UNSAFE BECAUSE THE LOW MEMORY MAPPING
+// WILL BE DROPPED FOR USERSPACE
+// TODO: create VMAs in the MM struct
#[inline]
-pub fn pmap_kernel_start() -> u64 {
+pub unsafe fn pmap_kernel_start() -> u64 {
___KERNEL_PM_START__ as u64
}
#[inline]
-pub fn pmap_kernel_end() -> u64 {
+pub unsafe fn pmap_kernel_end() -> u64 {
___KERNEL_PM_END__ as u64
}
#[inline]
-pub fn vmap_kernel_start() -> u64 {
+pub unsafe fn vmap_kernel_start() -> u64 {
pmap_kernel_start() + Mem::KERNEL_OFFSET
}
#[inline]
-pub fn vmap_kernel_end() -> u64 {
+pub unsafe fn vmap_kernel_end() -> u64 {
pmap_kernel_end() + Mem::KERNEL_OFFSET
}
+// ABOVE ONLY VALID BEFORE DROPPING LOWER MEMORY MAPPING -----//
#[inline]
pub fn bss_start() -> u64 {
@@ -53,8 +55,6 @@ impl Mem {
pub const K: u64 = 1024;
pub const M: u64 = 1024 * Mem::K;
pub const G: u64 = 1024 * Mem::M;
- // physical memory layout: qemu defaults to 128 MiB phy Memory
- pub const PHY_TOP: u64 = 128 * Mem::M;
// 4 lv 4K paging
pub const PAGE_SIZE: u64 = 0x1000;
pub const PAGE_SHIFT: u64 = 12;
@@ -67,16 +67,21 @@ impl Mem {
pub const L2_MASK: u64 = 0x1ff << Mem::L2_SHIFT;
pub const L3_SHIFT: u8 = 12;
pub const L3_MASK: u64 = 0x1ff << Mem::L3_SHIFT;
- pub const PHY_PAGES: u64 = Mem::PHY_TOP >> Mem::PAGE_SHIFT;
+ // 64 GiB available memory
+ pub const MAX_PHY_MEM: u64 = 0x1000000000;
+ // we should have at least 64 MiB free physical memory (excluding the kernel it self)
+ pub const MIN_PHY_MEM: u64 = 64 * Self::M;
// size of frame allocator bitmap: number of physical frames / 8 for 128M
// memory (37268) 4k pages, 37268 bits are needed, hence
// 4096 bytes, exactly one page!
- pub const PHY_BM_SIZE: u64 = Mem::PHY_PAGES >> 3;
pub const ID_MAP_START: u64 = 0xffff_8000_0000_0000;
pub const ID_MAP_END: u64 = 0xffff_8010_0000_0000;
+ // kernel image:0xffff_8020_0000_0000 ~ 0xffff_802f_0000_0000;
pub const KERNEL_OFFSET: u64 = 0xffff_8020_0000_0000;
- // 64 GiB available memory
- pub const MAX_PHY_MEM: u64 = 0x1000000000;
+ // kernel heap: 0xffff_8030_0000_0000 ~ 0xffff_803f_0000_0000;
+ // (64 GiB)
+ pub const KERNEL_HEAP_START: u64 = 0xffff_8030_0000_0000;
+ pub const KERNEL_HEAP_END: u64 = 0xffff_8040_0000_0000;
}
// convert VA <-> PA wrt. the kernel id mapping
diff --git a/src/lib.rs b/src/lib.rs
index fb68168..99478be 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -11,6 +11,8 @@ mod machine;
mod mm;
use crate::machine::key::Modifiers;
mod proc;
+extern crate alloc;
+use alloc::vec::Vec;
use arch::x86_64::interrupt;
use arch::x86_64::interrupt::pic_8259;
use arch::x86_64::interrupt::pic_8259::PicDeviceInt;
@@ -32,38 +34,40 @@ pub extern "C" fn _entry() -> ! {
io::set_attr(0x1f);
io::clear_screen();
assert!(multiboot::check(), "bad multiboot info from grub!");
- let mbi = multiboot::get_mb_info().expect("bad multiboot info flags");
- let mem = unsafe { mbi.get_mem() }.unwrap();
- println!(
- "[init] available memory: lower {:#X} KiB, upper:{:#X} KiB",
- mem.lower(),
- mem.upper()
- );
- mm::init();
+ // check mbi now. This will be later used to initilize the allocator
+ let _mbi = multiboot::get_mb_info().expect("bad multiboot info flags");
+ // initialize the idt and re-program the pic. Must do this before enabling irq
+ // also must initialize the idt before mm, because the later may trigger page faults, which is
+ // fatal and we want to catch them during system initilization.
interrupt::init();
- pic_8259::allow(PicDeviceInt::KEYBOARD);
- interrupt::interrupt_enable();
+ mm::init();
println!(
"[init] kernel mapped @ {:#X} - {:#X}",
- vmap_kernel_start(),
- vmap_kernel_end(),
+ unsafe { vmap_kernel_start() },
+ unsafe { vmap_kernel_end() },
);
println!(
"[init] BSS mapped @ {:#X} - {:#X}",
bss_start(),
bss_end()
);
-
- // io::print_welcome();
-
// busy loop query keyboard
+ interrupt::interrupt_enable();
+ pic_8259::allow(PicDeviceInt::KEYBOARD);
+ let mut test_vec = Vec::<&str>::new();
+ test_vec.push("hello ");
+ test_vec.push("world");
+ for s in test_vec.iter() {
+ println!("{s}");
+ }
loop {
io::KBCTL_GLOBAL.lock().fetch_key();
if let Some(k) = io::KBCTL_GLOBAL.lock().consume_key() {
println! {"key: {:?}", k}
}
}
+ // test heap
}
pub unsafe fn _test_pf() {
diff --git a/src/mm/mod.rs b/src/mm/mod.rs
index 3ed6b19..a483a90 100644
--- a/src/mm/mod.rs
+++ b/src/mm/mod.rs
@@ -2,15 +2,17 @@ use crate::defs::*;
use crate::io::*;
use crate::machine::multiboot;
use core::ops::Range;
+use linked_list_allocator::LockedHeap;
+
pub mod pma;
use lazy_static::lazy_static;
use spin::Mutex;
-lazy_static! {
- pub static ref GLOBAL_PMA: Mutex<pma::PageStackAllocator> =
- Mutex::new(pma::PageStackAllocator::new());
-}
+#[global_allocator]
+static ALLOCATOR: LockedHeap = LockedHeap::empty();
+
+/// half measure: simply initialize the linkedlist allocator
pub fn init() {
let mbi = multiboot::get_mb_info().unwrap();
let mmapinfo = unsafe { mbi.get_mmap() }.unwrap();
@@ -18,7 +20,8 @@ pub fn init() {
let buf_len = mmapinfo.mmap_length;
let buf_end = buf_start + buf_len;
let mut curr = buf_start as u64;
- let mut inserted = 0;
+ // initialize the heap allocator with the largest physical memory block
+ let mut largest_phy_range: Option<Range<u64>> = None;
loop {
if curr >= buf_end as u64 {
break;
@@ -29,25 +32,46 @@ pub fn init() {
if mblock.mtype != multiboot::MultibootMmap::MTYPE_RAM {
continue;
}
- if mblock.get_end() <= pmap_kernel_start() {
+ if mblock.get_end() <= unsafe { pmap_kernel_start() } {
continue;
}
// TODO early break if the array is already full
let mut r = mblock.get_range();
- if mblock.get_range().contains(&pmap_kernel_end()) {
- r.start = pmap_kernel_end();
+ if r.contains(&unsafe { pmap_kernel_end() }) {
+ assert!(
+ r.contains(&unsafe { pmap_kernel_start() }),
+ "FATAL: kernel physical map cross physical blocks, how?"
+ );
+ r.start = unsafe { pmap_kernel_end() };
+ }
+ match largest_phy_range {
+ None => largest_phy_range = Some(r),
+ Some(ref lr) => {
+ if (r.end - r.start) > (lr.end - lr.start) {
+ largest_phy_range = Some(r);
+ }
+ }
}
- inserted += GLOBAL_PMA.lock().insert_range(&r);
}
+ let pr = &largest_phy_range.expect("Can't find any available physical block");
+ assert!((pr.end - pr.start) >= Mem::MIN_PHY_MEM, "TO LITTLE RAM ...");
+ // init heap allocator on id map
+ unsafe {
+ ALLOCATOR.lock().init(
+ P2V(pr.start).unwrap() as *mut u8,
+ (pr.end - pr.start) as usize,
+ );
+ }
println!(
- "[init] pma: kernel loaded at phy: {:#X} - {:#X}",
- pmap_kernel_start(),
- pmap_kernel_end()
- );
- println!(
- "[init] pma: {:#X} KiB free memory, {:#X} frames inserted",
- inserted * 0x4,
- inserted,
+ "[init] mm: heap alloc initialized @ {:#X} - {:#X}",
+ P2V(pr.start).unwrap(),
+ P2V(pr.end).unwrap()
);
}
+
+/// populate the physical frame pool. This conflicts with the kernel heap allocator (kmalloc),
+/// which operates on the id map regions.
+pub fn _init_pma() {
+ todo!()
+}
diff --git a/src/mm/pma.rs b/src/mm/pma.rs
index 7111137..359e406 100644
--- a/src/mm/pma.rs
+++ b/src/mm/pma.rs
@@ -8,6 +8,12 @@ extern "C" {
fn ___FREE_PAGE_STACK__();
}
+// disabled for now
+// lazy_static! {
+// pub static ref GLOBAL_PMA: Mutex<pma::PageStackAllocator> =
+// Mutex::new(pma::PageStackAllocator::new());
+// }
+
/// There should only be one global instance of this.
pub struct PageStackAllocator {
page_stack: &'static mut [u64],