aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md13
-rw-r--r--src/lib.rs27
-rw-r--r--src/proc/sched.rs85
-rw-r--r--src/proc/task.rs44
4 files changed, 135 insertions, 34 deletions
diff --git a/README.md b/README.md
index c1daa86..9c272cb 100644
--- a/README.md
+++ b/README.md
@@ -24,19 +24,22 @@ shitty code, I'm a rust beginner.
- [X] kmalloc (using the [linked-list-allocator ](https://github.com/rust-osdev/linked-list-allocator))
TODO: implement my own
- [?] intrrupt sync (pro-/epilogue model)
-- [?] Threading (WIP)
-- [ ] Scheduler (single CPU)
+- [x] Threading
+- [x] Scheduler (single CPU)
- [ ] Timer Interrupt
- [ ] Synchronization Primitives
- [ ] asm! Wrappers for basic instructions
+MISC
+- [ ] FP and SSE state
+
Beyond StuBS
-- [ ] Task Descriptor structures
+- [X] Upperhalf Kernel
+- [X] Task Descriptor structures
- [X] Paging: PMA and paging structures
- [?] Paging: pagefault handler (WIP)
+- [?] Address Space for each Process
- [ ] user heap and mmap
-- [X] Upperhalf Kernel
-- [ ] Address Space for each Process
- [ ] in memory FS
- [ ] user library
- [ ] syscall
diff --git a/src/lib.rs b/src/lib.rs
index 41acca2..b4b6d66 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -3,6 +3,7 @@
#![no_std]
#![no_main]
#![feature(const_option)]
+#![feature(try_with_capacity)]
mod arch;
mod defs;
mod ds;
@@ -11,6 +12,7 @@ mod machine;
mod mm;
mod proc;
extern crate alloc;
+use crate::proc::sched::*;
use alloc::vec::Vec;
use arch::x86_64::interrupt;
use arch::x86_64::interrupt::pic_8259;
@@ -82,23 +84,10 @@ pub unsafe fn _test_pf() {
}
pub fn _test_proc_switch_to() {
- use crate::arch::x86_64::arch_regs::Context64;
- use crate::mm::KSTACK_ALLOCATOR;
- use crate::proc::task::*;
- let sp = unsafe { KSTACK_ALLOCATOR.lock().allocate() };
- println!("new task on {:#X}", sp);
- let new_task = unsafe {
- Task::settle_on_stack(
- sp,
- Task {
- magic: Mem::KERNEL_STACK_TASK_MAGIC,
- task_id: 42,
- kernel_stack: sp,
- state: TaskState::Meow,
- context: Context64::default(),
- },
- )
- };
- new_task.prepare_context(_task_entry as u64);
- unsafe { context_swap_to(&(new_task.context) as *const _ as u64) }
+ SCHEDULER.lock().insert_task(Task::create_dummy(1));
+ SCHEDULER.lock().insert_task(Task::create_dummy(2));
+ SCHEDULER.lock().insert_task(Task::create_dummy(3));
+ SCHEDULER.lock().insert_task(Task::create_dummy(4));
+ SCHEDULER.lock().insert_task(Task::create_dummy(5));
+ Scheduler::kickoff();
}
diff --git a/src/proc/sched.rs b/src/proc/sched.rs
index 81648bf..0bb155d 100644
--- a/src/proc/sched.rs
+++ b/src/proc/sched.rs
@@ -1,3 +1,88 @@
+use crate::io::*;
use crate::proc::task::*;
use alloc::collections::VecDeque;
+use lazy_static::lazy_static;
+use spin::Mutex;
+// the global scheduler takes a spinlock (will change later). Must be extra
+// careful with it: never do context swap before releasing the lock on scheduler,
+// otherwise the next task won't be able to aquire the lock again.
+lazy_static! {
+ pub static ref SCHEDULER: Mutex<Scheduler> = Mutex::new(Scheduler::new());
+}
// TODO the lifetime here is pretty much broken. Fix this later
+// the scheduler should be a per-cpu instance and it shall not lock.
+// Because the `do_schedule` does not return to release the lock
+pub struct Scheduler {
+ pub run_queue: VecDeque<TaskId>,
+}
+
+impl Scheduler {
+ pub const MIN_TASK_CAP: usize = 16;
+ pub fn new() -> Self {
+ Self {
+ // btw. try_with_capacity is an unstable feature.
+ run_queue: VecDeque::try_with_capacity(16).unwrap(),
+ }
+ }
+
+ // maybe we reject inserting existing tasks?
+ pub fn insert_task(&mut self, tid: TaskId) {
+ self.run_queue.push_back(tid);
+ }
+
+ pub fn try_remove(&mut self, _tid: TaskId) {
+ // try to remove all occurence of tid in the run_queue maybe do
+ // something special if the task is in the wait queue but we are not
+ // there yet.
+ todo!("not implemented");
+ }
+
+ // pop front, push back
+ pub fn do_schedule() {
+ if SCHEDULER.is_locked() {
+ panic!("scheduler lock has been taken, something wrong");
+ }
+ let mut lock_guard = SCHEDULER.lock();
+ let t = lock_guard.run_queue.pop_front();
+ match t {
+ None => {
+ panic!("run queue empty, how? what do I do??")
+ }
+ Some(next_tid) => {
+ let next_task = next_tid.get_task_ref_mut();
+ let me = Task::current().unwrap();
+ lock_guard.run_queue.push_back(next_tid);
+ if me.pid == next_task.pid {
+ return;
+ }
+ // make sure we release the scheduler lock before doing context
+ // swap
+ drop(lock_guard);
+ unsafe {
+ context_swap(
+ &(me.context) as *const _ as u64,
+ &(next_task.context) as *const _ as u64,
+ );
+ }
+ }
+ }
+ }
+
+ // like do_schedule but we there is no running context to save
+ pub fn kickoff() {
+ if SCHEDULER.is_locked() {
+ panic!("scheduler lock has been taken, something wrong");
+ }
+ let mut lock_guard = SCHEDULER.lock();
+ let t = lock_guard
+ .run_queue
+ .pop_front()
+ .expect("run queue empty, can't start");
+ let first_task = t.get_task_ref_mut();
+ lock_guard.run_queue.push_back(t);
+ drop(lock_guard);
+ unsafe {
+ context_swap_to(&(first_task.context) as *const _ as u64);
+ }
+ }
+}
diff --git a/src/proc/task.rs b/src/proc/task.rs
index 75a6c1a..2a6626d 100644
--- a/src/proc/task.rs
+++ b/src/proc/task.rs
@@ -1,6 +1,9 @@
use crate::arch::x86_64::arch_regs;
+use crate::arch::x86_64::arch_regs::Context64;
use crate::defs::*;
use crate::io::*;
+use crate::mm::KSTACK_ALLOCATOR;
+use crate::proc::sched::*;
use core::arch::asm;
use core::ptr;
@@ -10,7 +13,7 @@ use core::ptr;
/// NOTE: we don't use repr(C) or repr(packed) here
pub struct Task {
pub magic: u64,
- pub task_id: u32,
+ pub pid: u32,
/// note that this points to the stack bottom (low addr)
pub kernel_stack: u64,
// pub user_stack: u64,
@@ -24,6 +27,7 @@ pub struct Task {
/// the smart pointer types automatically drops the owned values when their
/// lifetime end. For now want to have manual control of when, where and how I
/// drop the Task because there could be more plans than just freeing the memory
+#[derive(Copy, Clone, Debug)]
pub struct TaskId(u64);
impl TaskId {
@@ -31,12 +35,12 @@ impl TaskId {
Self { 0: addr }
}
- pub unsafe fn get_task_ref(&self) -> &Task {
- &*(self.0 as *mut Task)
+ pub fn get_task_ref(&self) -> &Task {
+ return unsafe { &*(self.0 as *mut Task) };
}
- pub unsafe fn get_task_ref_mut(&self) -> &mut Task {
- &mut *(self.0 as *mut Task)
+ pub fn get_task_ref_mut(&self) -> &mut Task {
+ return unsafe { &mut *(self.0 as *mut Task) };
}
}
@@ -53,12 +57,11 @@ pub enum TaskState {
#[no_mangle]
pub extern "C" fn _task_entry() -> ! {
- println!("I'm Mr.Meeseeks, look at me~");
let t = Task::current().unwrap();
- println!(
- "I am PID {}, kernel stack {:#X}, task state {:#X?}",
- t.task_id, t.kernel_stack, t.state
- );
+ println!("I'm Mr.Meeseeks {}, look at me~", t.pid);
+ Scheduler::do_schedule();
+ println!("I'm Mr.Meeseeks {}, look at me~", t.pid);
+ Scheduler::do_schedule();
unsafe { asm!("cli; hlt") };
panic!("should not reach");
}
@@ -117,4 +120,25 @@ impl Task {
}
return Some(t);
}
+
+ /// used for trivial tests
+ pub fn create_dummy(pid: u32) -> TaskId {
+ let sp = unsafe { KSTACK_ALLOCATOR.lock().allocate() };
+ let tid = TaskId::new(sp);
+ println!("new task on {:#X}", sp);
+ let nt = unsafe {
+ Task::settle_on_stack(
+ sp,
+ Task {
+ magic: Mem::KERNEL_STACK_TASK_MAGIC,
+ pid,
+ kernel_stack: sp,
+ state: TaskState::Meow,
+ context: Context64::default(),
+ },
+ )
+ };
+ nt.prepare_context(_task_entry as u64);
+ tid
+ }
}