blob: 359e40615761a319e5e4db0ac08df60f2d5242bc (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
|
use crate::defs::*;
use crate::io::*;
use crate::machine::multiboot::MultibootMmap;
use core::ops::Range;
use core::slice;
extern "C" {
fn ___FREE_PAGE_STACK__();
}
// disabled for now
// lazy_static! {
// pub static ref GLOBAL_PMA: Mutex<pma::PageStackAllocator> =
// Mutex::new(pma::PageStackAllocator::new());
// }
/// There should only be one global instance of this.
pub struct PageStackAllocator {
page_stack: &'static mut [u64],
size: usize,
head: usize,
}
impl PageStackAllocator {
// covering 4GiB physical memory of 4K frames
const STACK_SIZE: usize = 0x100000;
pub fn new() -> Self {
let ps = Self {
page_stack: unsafe {
slice::from_raw_parts_mut(
P2V(___FREE_PAGE_STACK__ as u64).unwrap() as *mut u64,
Self::STACK_SIZE,
)
},
size: Self::STACK_SIZE,
head: 0,
};
return ps;
}
/// push an addr into the free page stack
/// MUST be atomic or bad things happen...
pub fn free_page(&mut self, addr: u64) -> bool {
if self.head >= self.size {
return false;
}
self.page_stack[self.head] = addr;
self.head += 1;
return true;
}
pub fn alloc_page(&mut self) -> Option<u64> {
if self.head == 0 {
return None;
}
self.head -= 1;
Some(self.page_stack[self.head])
}
/// 4k page only?
pub fn insert_range(&mut self, r: &Range<u64>) -> u64 {
// r.contains(&1);
let mut inserted = 0;
let mut page = roundup_4k(r.start);
loop {
if !r.contains(&page) {
break;
}
if !self.free_page(page) {
break;
} else {
inserted += 1;
}
page += 0x1000;
}
return inserted;
}
}
|