aboutsummaryrefslogtreecommitdiff
path: root/src/proc
diff options
context:
space:
mode:
authorTianhao Wang <shrik3@mailbox.org>2024-06-11 03:34:24 +0200
committerTianhao Wang <shrik3@mailbox.org>2024-06-11 15:17:14 +0200
commit0609a75bb016453cd7bb4f8393c31c572d56a06e (patch)
treebcc307b736131db06c06a735bee0f63e9d50d54b /src/proc
parentf7dfc26865c586003d30bab578277e9776b98994 (diff)
proc: VecDequeue round-robin scheduler
Signed-off-by: Tianhao Wang <shrik3@mailbox.org>
Diffstat (limited to 'src/proc')
-rw-r--r--src/proc/sched.rs85
-rw-r--r--src/proc/task.rs44
2 files changed, 119 insertions, 10 deletions
diff --git a/src/proc/sched.rs b/src/proc/sched.rs
index 81648bf..0bb155d 100644
--- a/src/proc/sched.rs
+++ b/src/proc/sched.rs
@@ -1,3 +1,88 @@
+use crate::io::*;
use crate::proc::task::*;
use alloc::collections::VecDeque;
+use lazy_static::lazy_static;
+use spin::Mutex;
+// the global scheduler takes a spinlock (will change later). Must be extra
+// careful with it: never do context swap before releasing the lock on scheduler,
+// otherwise the next task won't be able to aquire the lock again.
+lazy_static! {
+ pub static ref SCHEDULER: Mutex<Scheduler> = Mutex::new(Scheduler::new());
+}
// TODO the lifetime here is pretty much broken. Fix this later
+// the scheduler should be a per-cpu instance and it shall not lock.
+// Because the `do_schedule` does not return to release the lock
+pub struct Scheduler {
+ pub run_queue: VecDeque<TaskId>,
+}
+
+impl Scheduler {
+ pub const MIN_TASK_CAP: usize = 16;
+ pub fn new() -> Self {
+ Self {
+ // btw. try_with_capacity is an unstable feature.
+ run_queue: VecDeque::try_with_capacity(16).unwrap(),
+ }
+ }
+
+ // maybe we reject inserting existing tasks?
+ pub fn insert_task(&mut self, tid: TaskId) {
+ self.run_queue.push_back(tid);
+ }
+
+ pub fn try_remove(&mut self, _tid: TaskId) {
+ // try to remove all occurence of tid in the run_queue maybe do
+ // something special if the task is in the wait queue but we are not
+ // there yet.
+ todo!("not implemented");
+ }
+
+ // pop front, push back
+ pub fn do_schedule() {
+ if SCHEDULER.is_locked() {
+ panic!("scheduler lock has been taken, something wrong");
+ }
+ let mut lock_guard = SCHEDULER.lock();
+ let t = lock_guard.run_queue.pop_front();
+ match t {
+ None => {
+ panic!("run queue empty, how? what do I do??")
+ }
+ Some(next_tid) => {
+ let next_task = next_tid.get_task_ref_mut();
+ let me = Task::current().unwrap();
+ lock_guard.run_queue.push_back(next_tid);
+ if me.pid == next_task.pid {
+ return;
+ }
+ // make sure we release the scheduler lock before doing context
+ // swap
+ drop(lock_guard);
+ unsafe {
+ context_swap(
+ &(me.context) as *const _ as u64,
+ &(next_task.context) as *const _ as u64,
+ );
+ }
+ }
+ }
+ }
+
+ // like do_schedule but we there is no running context to save
+ pub fn kickoff() {
+ if SCHEDULER.is_locked() {
+ panic!("scheduler lock has been taken, something wrong");
+ }
+ let mut lock_guard = SCHEDULER.lock();
+ let t = lock_guard
+ .run_queue
+ .pop_front()
+ .expect("run queue empty, can't start");
+ let first_task = t.get_task_ref_mut();
+ lock_guard.run_queue.push_back(t);
+ drop(lock_guard);
+ unsafe {
+ context_swap_to(&(first_task.context) as *const _ as u64);
+ }
+ }
+}
diff --git a/src/proc/task.rs b/src/proc/task.rs
index 75a6c1a..2a6626d 100644
--- a/src/proc/task.rs
+++ b/src/proc/task.rs
@@ -1,6 +1,9 @@
use crate::arch::x86_64::arch_regs;
+use crate::arch::x86_64::arch_regs::Context64;
use crate::defs::*;
use crate::io::*;
+use crate::mm::KSTACK_ALLOCATOR;
+use crate::proc::sched::*;
use core::arch::asm;
use core::ptr;
@@ -10,7 +13,7 @@ use core::ptr;
/// NOTE: we don't use repr(C) or repr(packed) here
pub struct Task {
pub magic: u64,
- pub task_id: u32,
+ pub pid: u32,
/// note that this points to the stack bottom (low addr)
pub kernel_stack: u64,
// pub user_stack: u64,
@@ -24,6 +27,7 @@ pub struct Task {
/// the smart pointer types automatically drops the owned values when their
/// lifetime end. For now want to have manual control of when, where and how I
/// drop the Task because there could be more plans than just freeing the memory
+#[derive(Copy, Clone, Debug)]
pub struct TaskId(u64);
impl TaskId {
@@ -31,12 +35,12 @@ impl TaskId {
Self { 0: addr }
}
- pub unsafe fn get_task_ref(&self) -> &Task {
- &*(self.0 as *mut Task)
+ pub fn get_task_ref(&self) -> &Task {
+ return unsafe { &*(self.0 as *mut Task) };
}
- pub unsafe fn get_task_ref_mut(&self) -> &mut Task {
- &mut *(self.0 as *mut Task)
+ pub fn get_task_ref_mut(&self) -> &mut Task {
+ return unsafe { &mut *(self.0 as *mut Task) };
}
}
@@ -53,12 +57,11 @@ pub enum TaskState {
#[no_mangle]
pub extern "C" fn _task_entry() -> ! {
- println!("I'm Mr.Meeseeks, look at me~");
let t = Task::current().unwrap();
- println!(
- "I am PID {}, kernel stack {:#X}, task state {:#X?}",
- t.task_id, t.kernel_stack, t.state
- );
+ println!("I'm Mr.Meeseeks {}, look at me~", t.pid);
+ Scheduler::do_schedule();
+ println!("I'm Mr.Meeseeks {}, look at me~", t.pid);
+ Scheduler::do_schedule();
unsafe { asm!("cli; hlt") };
panic!("should not reach");
}
@@ -117,4 +120,25 @@ impl Task {
}
return Some(t);
}
+
+ /// used for trivial tests
+ pub fn create_dummy(pid: u32) -> TaskId {
+ let sp = unsafe { KSTACK_ALLOCATOR.lock().allocate() };
+ let tid = TaskId::new(sp);
+ println!("new task on {:#X}", sp);
+ let nt = unsafe {
+ Task::settle_on_stack(
+ sp,
+ Task {
+ magic: Mem::KERNEL_STACK_TASK_MAGIC,
+ pid,
+ kernel_stack: sp,
+ state: TaskState::Meow,
+ context: Context64::default(),
+ },
+ )
+ };
+ nt.prepare_context(_task_entry as u64);
+ tid
+ }
}