aboutsummaryrefslogtreecommitdiff
path: root/src/proc/sched.rs
blob: 701f165ac9881e5b67d3cbc5447735caf5728af3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
use crate::io::*;
use crate::proc::task::*;
use alloc::collections::VecDeque;
use lazy_static::lazy_static;
use spin::Mutex;
// the global scheduler takes a spinlock (will change later). Must be extra
// careful with it: never do context swap before releasing the lock on scheduler,
// otherwise the next task won't be able to aquire the lock again.
lazy_static! {
	pub static ref SCHEDULER: Mutex<Scheduler> = Mutex::new(Scheduler::new());
}
// TODO the lifetime here is pretty much broken. Fix this later
// the scheduler should be a per-cpu instance and it shall not lock.
// Because the `do_schedule` does not return to release the lock
pub struct Scheduler {
	pub run_queue: VecDeque<TaskId>,
}

impl Scheduler {
	pub const MIN_TASK_CAP: usize = 16;
	pub fn new() -> Self {
		Self {
			// btw. try_with_capacity is an unstable feature.
			run_queue: VecDeque::try_with_capacity(16).unwrap(),
		}
	}

	// maybe we reject inserting existing tasks?
	pub fn insert_task(&mut self, tid: TaskId) {
		self.run_queue.push_back(tid);
	}

	pub fn try_remove(&mut self, _tid: TaskId) {
		// try to remove all occurence of tid in the run_queue maybe do
		// something special if the task is in the wait queue but we are not
		// there yet.
		todo!("not implemented");
	}

	// pop front, push back
	pub fn do_schedule() {
		// TODO: remove this spinlock, because we should protect the scheduler
		// with irq_save/restore
		if SCHEDULER.is_locked() {
			panic!("scheduler lock has been taken, something wrong");
		}
		let mut lock_guard = SCHEDULER.lock();
		let next_tid = lock_guard
			.run_queue
			.pop_front()
			.expect("empty run queue, how?");
		let next_task = next_tid.get_task_ref_mut();
		let me = Task::current().unwrap();
		lock_guard.run_queue.push_back(next_tid);
		if me.pid == next_task.pid {
			return;
		}
		// make sure we release the scheduler lock before doing context
		// swap
		drop(lock_guard);
		unsafe {
			context_swap(
				&(me.context) as *const _ as u64,
				&(next_task.context) as *const _ as u64,
			);
		}
	}

	// like do_schedule but we there is no running context to save
	pub fn kickoff() {
		if SCHEDULER.is_locked() {
			panic!("scheduler lock has been taken, something wrong");
		}
		let mut lock_guard = SCHEDULER.lock();
		let t = lock_guard
			.run_queue
			.pop_front()
			.expect("run queue empty, can't start");
		let first_task = t.get_task_ref_mut();
		lock_guard.run_queue.push_back(t);
		drop(lock_guard);
		unsafe {
			context_swap_to(&(first_task.context) as *const _ as u64);
		}
	}
}