aboutsummaryrefslogtreecommitdiff
path: root/src/mm/mod.rs
blob: b8f8e0737ad6c4aff474d58ec97fea9d9cd5d767 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
use crate::defs::*;
use crate::io::*;
use crate::machine::multiboot;
use alloc::alloc::{alloc, dealloc, Layout};
use alloc::vec::Vec;
use core::ops::Range;
use linked_list_allocator::LockedHeap;

pub mod pma;

use lazy_static::lazy_static;
use spin::Mutex;

#[global_allocator]
static ALLOCATOR: LockedHeap = LockedHeap::empty();

lazy_static! {
	pub static ref KSTACK_ALLOCATOR: Mutex<KStackAllocator> = Mutex::new(KStackAllocator::new());
}

/// half measure: simply initialize the linkedlist allocator
pub fn init() {
	let mbi = multiboot::get_mb_info().unwrap();
	let mmapinfo = unsafe { mbi.get_mmap() }.unwrap();
	let buf_start = mmapinfo.mmap_addr;
	let buf_len = mmapinfo.mmap_length;
	let buf_end = buf_start + buf_len;
	let mut curr = buf_start as u64;
	// initialize the heap allocator with the largest physical memory block
	let mut largest_phy_range: Option<Range<u64>> = None;
	loop {
		if curr >= buf_end as u64 {
			break;
		}
		let mblock = unsafe { &*(curr as *const multiboot::MultibootMmap) };
		curr += mblock.size as u64;
		curr += 4;
		if mblock.mtype != multiboot::MultibootMmap::MTYPE_RAM {
			continue;
		}
		if mblock.get_end() <= unsafe { pmap_kernel_start() } {
			continue;
		}
		// TODO early break if the array is already full
		let mut r = mblock.get_range();
		if r.contains(&unsafe { pmap_kernel_end() }) {
			assert!(
				r.contains(&unsafe { pmap_kernel_start() }),
				"FATAL: kernel physical map cross physical blocks, how?"
			);
			r.start = unsafe { pmap_kernel_end() };
		}
		// TODO this is pretty ugly, consider 1. impl length() for Range and 2.
		// take reference instead of copy.
		match largest_phy_range {
			None => largest_phy_range = Some(r),
			Some(ref lr) => {
				if (r.end - r.start) > (lr.end - lr.start) {
					largest_phy_range = Some(r);
				}
			}
		}
	}

	let pr = &largest_phy_range.expect("Can't find any available physical block");
	assert!((pr.end - pr.start) >= Mem::MIN_PHY_MEM, "TO LITTLE RAM ...");
	// init heap allocator on id map
	unsafe {
		ALLOCATOR.lock().init(
			P2V(pr.start).unwrap() as *mut u8,
			(pr.end - pr.start) as usize,
		);
	}
	println!(
		"[init] mm: heap alloc initialized @ {:#X} - {:#X}",
		P2V(pr.start).unwrap(),
		P2V(pr.end).unwrap()
	);
}

/// populate the physical frame pool. This conflicts with the kernel heap allocator (kmalloc),
/// which operates on the id map regions.
pub fn _init_pma() {
	todo!()
}

/// wrapper around the global allocator with caching
pub struct KStackAllocator {
	pool: Vec<u64>,
}

/// TODO: the heap allocator is primitive atm and it may fail to allocate new
/// kernel stack (64K here) due to fragmentation. It may be a good idea to
/// reserve some memory during system init to guarantee that we can at least
impl KStackAllocator {
	const KSTACK_ALLOC_POOL_CAP: usize = 16;
	const KSTACK_LAYOUT: Layout = unsafe {
		Layout::from_size_align_unchecked(
			Mem::KERNEL_STACK_SIZE as usize,
			Mem::KERNEL_STACK_SIZE as usize,
		)
	};

	pub fn new() -> Self {
		let p = Vec::with_capacity(Self::KSTACK_ALLOC_POOL_CAP);
		Self { pool: p }
	}

	/// unsafe because this may fail (same as populate)
	pub unsafe fn allocate(&mut self) -> u64 {
		if let Some(addr) = self.pool.pop() {
			return addr;
		} else {
			return alloc(Self::KSTACK_LAYOUT) as u64;
		}
	}

	/// unsafe because you must make sure you give back something the allocator gave
	/// you. Otherwise you break the kernel heap allocator.
	pub unsafe fn free(&mut self, addr: u64) {
		if self.pool.len() < Self::KSTACK_ALLOC_POOL_CAP {
			self.pool.push(addr);
		} else {
			dealloc(addr as *mut u8, Self::KSTACK_LAYOUT);
		}
	}

	/// unsafe because this could OOM if you stress the allocator too much
	/// (although unlikely)
	pub unsafe fn populate(&mut self) {
		for _ in 0..Self::KSTACK_ALLOC_POOL_CAP {
			self.pool.push(alloc(Self::KSTACK_LAYOUT) as u64);
		}
	}
}