aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTianhao Wang <shrik3@mailbox.org>2024-06-10 20:47:54 +0200
committerTianhao Wang <shrik3@mailbox.org>2024-06-11 15:17:14 +0200
commit13e3eca8ac3f6bd8c7a8fddde8b1937757e358ee (patch)
treeeb2d4992d5692287c232ac729ad18f9690eaa153
parent8aaad696463004a9e51d35e4c466c131b3402822 (diff)
proc: basic task/stack creation
Now we can do a simple context swap (without scheduler though)
-rw-r--r--src/arch/x86_64/arch_regs.rs15
-rw-r--r--src/lib.rs24
-rw-r--r--src/mm/mod.rs58
-rw-r--r--src/proc/sched.rs16
-rw-r--r--src/proc/task.rs102
5 files changed, 181 insertions, 34 deletions
diff --git a/src/arch/x86_64/arch_regs.rs b/src/arch/x86_64/arch_regs.rs
index a49926e..6bdccb0 100644
--- a/src/arch/x86_64/arch_regs.rs
+++ b/src/arch/x86_64/arch_regs.rs
@@ -15,6 +15,21 @@ pub struct Context64 {
pub fpu: [u8; 108],
}
+impl Default for Context64 {
+ fn default() -> Context64 {
+ Context64 {
+ rbx: 0,
+ r12: 0,
+ r13: 0,
+ r14: 0,
+ r15: 0,
+ rbp: 0,
+ rsp: 0,
+ fpu: [0; 108],
+ }
+ }
+}
+
/// arch specific registers
#[repr(C)]
#[repr(packed)]
diff --git a/src/lib.rs b/src/lib.rs
index a121d49..41acca2 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -21,6 +21,7 @@ use machine::cgascr::CGAScreen;
use machine::key::Modifiers;
use machine::multiboot;
use machine::serial::Serial;
+use proc::task::Task;
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
@@ -58,6 +59,7 @@ pub extern "C" fn _entry() -> ! {
let mut test_vec = Vec::<&str>::new();
test_vec.push("hello ");
test_vec.push("world");
+ _test_proc_switch_to();
for s in test_vec.iter() {
println!("{s}");
}
@@ -78,3 +80,25 @@ pub unsafe fn _test_pf() {
let name_buf = slice::from_raw_parts_mut(0xffffffffffff0000 as *mut u64, 10);
asm!("mov [rdi], rax", in("rdi") name_buf.as_mut_ptr());
}
+
+pub fn _test_proc_switch_to() {
+ use crate::arch::x86_64::arch_regs::Context64;
+ use crate::mm::KSTACK_ALLOCATOR;
+ use crate::proc::task::*;
+ let sp = unsafe { KSTACK_ALLOCATOR.lock().allocate() };
+ println!("new task on {:#X}", sp);
+ let new_task = unsafe {
+ Task::settle_on_stack(
+ sp,
+ Task {
+ magic: Mem::KERNEL_STACK_TASK_MAGIC,
+ task_id: 42,
+ kernel_stack: sp,
+ state: TaskState::Meow,
+ context: Context64::default(),
+ },
+ )
+ };
+ new_task.prepare_context(_task_entry as u64);
+ unsafe { context_swap_to(&(new_task.context) as *const _ as u64) }
+}
diff --git a/src/mm/mod.rs b/src/mm/mod.rs
index a483a90..b8f8e07 100644
--- a/src/mm/mod.rs
+++ b/src/mm/mod.rs
@@ -1,6 +1,8 @@
use crate::defs::*;
use crate::io::*;
use crate::machine::multiboot;
+use alloc::alloc::{alloc, dealloc, Layout};
+use alloc::vec::Vec;
use core::ops::Range;
use linked_list_allocator::LockedHeap;
@@ -12,6 +14,10 @@ use spin::Mutex;
#[global_allocator]
static ALLOCATOR: LockedHeap = LockedHeap::empty();
+lazy_static! {
+ pub static ref KSTACK_ALLOCATOR: Mutex<KStackAllocator> = Mutex::new(KStackAllocator::new());
+}
+
/// half measure: simply initialize the linkedlist allocator
pub fn init() {
let mbi = multiboot::get_mb_info().unwrap();
@@ -44,6 +50,8 @@ pub fn init() {
);
r.start = unsafe { pmap_kernel_end() };
}
+ // TODO this is pretty ugly, consider 1. impl length() for Range and 2.
+ // take reference instead of copy.
match largest_phy_range {
None => largest_phy_range = Some(r),
Some(ref lr) => {
@@ -75,3 +83,53 @@ pub fn init() {
pub fn _init_pma() {
todo!()
}
+
+/// wrapper around the global allocator with caching
+pub struct KStackAllocator {
+ pool: Vec<u64>,
+}
+
+/// TODO: the heap allocator is primitive atm and it may fail to allocate new
+/// kernel stack (64K here) due to fragmentation. It may be a good idea to
+/// reserve some memory during system init to guarantee that we can at least
+impl KStackAllocator {
+ const KSTACK_ALLOC_POOL_CAP: usize = 16;
+ const KSTACK_LAYOUT: Layout = unsafe {
+ Layout::from_size_align_unchecked(
+ Mem::KERNEL_STACK_SIZE as usize,
+ Mem::KERNEL_STACK_SIZE as usize,
+ )
+ };
+
+ pub fn new() -> Self {
+ let p = Vec::with_capacity(Self::KSTACK_ALLOC_POOL_CAP);
+ Self { pool: p }
+ }
+
+ /// unsafe because this may fail (same as populate)
+ pub unsafe fn allocate(&mut self) -> u64 {
+ if let Some(addr) = self.pool.pop() {
+ return addr;
+ } else {
+ return alloc(Self::KSTACK_LAYOUT) as u64;
+ }
+ }
+
+ /// unsafe because you must make sure you give back something the allocator gave
+ /// you. Otherwise you break the kernel heap allocator.
+ pub unsafe fn free(&mut self, addr: u64) {
+ if self.pool.len() < Self::KSTACK_ALLOC_POOL_CAP {
+ self.pool.push(addr);
+ } else {
+ dealloc(addr as *mut u8, Self::KSTACK_LAYOUT);
+ }
+ }
+
+ /// unsafe because this could OOM if you stress the allocator too much
+ /// (although unlikely)
+ pub unsafe fn populate(&mut self) {
+ for _ in 0..Self::KSTACK_ALLOC_POOL_CAP {
+ self.pool.push(alloc(Self::KSTACK_LAYOUT) as u64);
+ }
+ }
+}
diff --git a/src/proc/sched.rs b/src/proc/sched.rs
index 8e86c7f..81648bf 100644
--- a/src/proc/sched.rs
+++ b/src/proc/sched.rs
@@ -1,17 +1,3 @@
use crate::proc::task::*;
-use alloc::collections::linked_list::LinkedList;
+use alloc::collections::VecDeque;
// TODO the lifetime here is pretty much broken. Fix this later
-pub struct Scheduler<'a> {
- run_list: LinkedList<&'a Task>,
-}
-
-impl<'a> Scheduler<'a> {
- #[inline]
- pub fn pop_front(&mut self) -> Option<&Task> {
- self.run_list.pop_front()
- }
- #[inline]
- pub fn push_back(&mut self, t: &'a Task) {
- self.run_list.push_back(t);
- }
-}
diff --git a/src/proc/task.rs b/src/proc/task.rs
index 5cbe2f6..75a6c1a 100644
--- a/src/proc/task.rs
+++ b/src/proc/task.rs
@@ -2,21 +2,45 @@ use crate::arch::x86_64::arch_regs;
use crate::defs::*;
use crate::io::*;
use core::arch::asm;
+use core::ptr;
/// currently only kernelSp and Context are important.
/// the task struct will be placed on the starting addr (low addr) of the kernel stack.
/// therefore we can retrive the task struct at anytime by masking the kernel stack
-#[repr(C)]
-#[repr(packed)]
+/// NOTE: we don't use repr(C) or repr(packed) here
pub struct Task {
pub magic: u64,
pub task_id: u32,
+ /// note that this points to the stack bottom (low addr)
pub kernel_stack: u64,
// pub user_stack: u64,
- pub context: arch_regs::Context64,
pub state: TaskState,
+ pub context: arch_regs::Context64,
+}
+
+/// not to confuse with a integer TID. A TaskID identifies a task and __locate__
+/// it. In this case the TaskID wraps around the task struct's address. The
+/// reason why the scheduler doesn't directly store Box<Task> (or alike) is that
+/// the smart pointer types automatically drops the owned values when their
+/// lifetime end. For now want to have manual control of when, where and how I
+/// drop the Task because there could be more plans than just freeing the memory
+pub struct TaskId(u64);
+
+impl TaskId {
+ pub fn new(addr: u64) -> Self {
+ Self { 0: addr }
+ }
+
+ pub unsafe fn get_task_ref(&self) -> &Task {
+ &*(self.0 as *mut Task)
+ }
+
+ pub unsafe fn get_task_ref_mut(&self) -> &mut Task {
+ &mut *(self.0 as *mut Task)
+ }
}
+#[derive(Debug)]
pub enum TaskState {
Run,
Block,
@@ -30,27 +54,67 @@ pub enum TaskState {
#[no_mangle]
pub extern "C" fn _task_entry() -> ! {
println!("I'm Mr.Meeseeks, look at me~");
- unsafe { asm!("hlt") };
- panic!("shoud not reach");
+ let t = Task::current().unwrap();
+ println!(
+ "I am PID {}, kernel stack {:#X}, task state {:#X?}",
+ t.task_id, t.kernel_stack, t.state
+ );
+ unsafe { asm!("cli; hlt") };
+ panic!("should not reach");
}
extern "C" {
- fn context_swap(from_ctx: u64, to_ctx: u64);
- fn context_swap_to(to_ctx: u64);
+ pub fn context_swap(from_ctx: u64, to_ctx: u64);
+ pub fn context_swap_to(to_ctx: u64);
}
impl Task {
- /// create new task. This is tricky because the task struct sits at the
- /// bottom of the kernel stack, so that you can get the current task by
- /// masking the stack pointer.
- /// 1. allocate the kernel stack with predefined size and make sure the
- /// address is properly aligned
- /// 2. cast a task struct onto the stack
- /// 3. set whatever fields necessary in the task struct, including a magic
- /// 4. create a new stack frame, and update the stack pointer in the context,
- /// so that when first swapped to, the task will immediately "return to"
- /// the _func_ptr
- pub fn new(_id: u32, _func_ptr: u64) -> Self {
- todo!()
+ /// TODO implement a proper ::new. For now use settle_on_stack instead
+ pub fn new(_id: u32, _kstack: u64, _func_ptr: u64) -> Self {
+ panic!(
+ "never ever try to manually create a task struct\n
+ gather the parts and call Task::settle_on_stack() instead"
+ )
+ }
+
+ /// unsafe because you have to make sure the stack pointer is valid
+ /// i.e. allocated through KStackAllocator.
+ #[inline(always)]
+ pub unsafe fn settle_on_stack<'a>(stack_addr: u64, t: Task) -> &'a mut Task {
+ ptr::write_volatile(stack_addr as *mut Task, t);
+ return &mut *(stack_addr as *mut Task);
+ }
+
+ /// settle_on_stack and prepare_context must be called before switching to
+ /// the task. TODO: combine them into one single API
+ #[inline(always)]
+ pub fn prepare_context(&mut self, entry: u64) {
+ // this is like OOStuBS "toc_settle"
+ let mut sp = self.get_init_kernel_sp();
+ // FIXME this is ugly
+ unsafe {
+ sp -= 8;
+ *(sp as *mut u64) = 0;
+ sp -= 8;
+ *(sp as *mut u64) = entry;
+ }
+ self.context.rsp = sp;
+ }
+
+ // stack pointer may have alignment requirement. We do 8 bytes
+ #[inline(always)]
+ fn get_init_kernel_sp(&self) -> u64 {
+ let mut sp = self.kernel_stack + Mem::KERNEL_STACK_SIZE - 1;
+ sp = sp & (!0b111);
+ sp
+ }
+
+ pub fn current<'a>() -> Option<&'a mut Task> {
+ let addr = arch_regs::get_sp() & !Mem::KERNEL_STACK_MASK;
+ let t = unsafe { &mut *(addr as *mut Task) };
+ if t.magic != Mem::KERNEL_STACK_TASK_MAGIC {
+ return None;
+ }
+ return Some(t);
}
}