diff options
| author | Tianhao Wang <shrik3@mailbox.org> | 2024-06-11 03:34:24 +0200 |
|---|---|---|
| committer | Tianhao Wang <shrik3@mailbox.org> | 2024-06-11 15:17:14 +0200 |
| commit | 0609a75bb016453cd7bb4f8393c31c572d56a06e (patch) | |
| tree | bcc307b736131db06c06a735bee0f63e9d50d54b /src/proc/sched.rs | |
| parent | f7dfc26865c586003d30bab578277e9776b98994 (diff) | |
proc: VecDequeue round-robin scheduler
Signed-off-by: Tianhao Wang <shrik3@mailbox.org>
Diffstat (limited to 'src/proc/sched.rs')
| -rw-r--r-- | src/proc/sched.rs | 85 |
1 files changed, 85 insertions, 0 deletions
diff --git a/src/proc/sched.rs b/src/proc/sched.rs index 81648bf..0bb155d 100644 --- a/src/proc/sched.rs +++ b/src/proc/sched.rs @@ -1,3 +1,88 @@ +use crate::io::*; use crate::proc::task::*; use alloc::collections::VecDeque; +use lazy_static::lazy_static; +use spin::Mutex; +// the global scheduler takes a spinlock (will change later). Must be extra +// careful with it: never do context swap before releasing the lock on scheduler, +// otherwise the next task won't be able to aquire the lock again. +lazy_static! { + pub static ref SCHEDULER: Mutex<Scheduler> = Mutex::new(Scheduler::new()); +} // TODO the lifetime here is pretty much broken. Fix this later +// the scheduler should be a per-cpu instance and it shall not lock. +// Because the `do_schedule` does not return to release the lock +pub struct Scheduler { + pub run_queue: VecDeque<TaskId>, +} + +impl Scheduler { + pub const MIN_TASK_CAP: usize = 16; + pub fn new() -> Self { + Self { + // btw. try_with_capacity is an unstable feature. + run_queue: VecDeque::try_with_capacity(16).unwrap(), + } + } + + // maybe we reject inserting existing tasks? + pub fn insert_task(&mut self, tid: TaskId) { + self.run_queue.push_back(tid); + } + + pub fn try_remove(&mut self, _tid: TaskId) { + // try to remove all occurence of tid in the run_queue maybe do + // something special if the task is in the wait queue but we are not + // there yet. + todo!("not implemented"); + } + + // pop front, push back + pub fn do_schedule() { + if SCHEDULER.is_locked() { + panic!("scheduler lock has been taken, something wrong"); + } + let mut lock_guard = SCHEDULER.lock(); + let t = lock_guard.run_queue.pop_front(); + match t { + None => { + panic!("run queue empty, how? what do I do??") + } + Some(next_tid) => { + let next_task = next_tid.get_task_ref_mut(); + let me = Task::current().unwrap(); + lock_guard.run_queue.push_back(next_tid); + if me.pid == next_task.pid { + return; + } + // make sure we release the scheduler lock before doing context + // swap + drop(lock_guard); + unsafe { + context_swap( + &(me.context) as *const _ as u64, + &(next_task.context) as *const _ as u64, + ); + } + } + } + } + + // like do_schedule but we there is no running context to save + pub fn kickoff() { + if SCHEDULER.is_locked() { + panic!("scheduler lock has been taken, something wrong"); + } + let mut lock_guard = SCHEDULER.lock(); + let t = lock_guard + .run_queue + .pop_front() + .expect("run queue empty, can't start"); + let first_task = t.get_task_ref_mut(); + lock_guard.run_queue.push_back(t); + drop(lock_guard); + unsafe { + context_swap_to(&(first_task.context) as *const _ as u64); + } + } +} |
