From f4b50dd826b81295dc9628b655fc5f360445230b Mon Sep 17 00:00:00 2001 From: Tianhao Wang Date: Mon, 3 Jun 2024 21:31:24 +0200 Subject: mm: provide high memory kernel mapping in asm code. we use the first pml4 entry (+one pdp table) to map phy 0~512G to virt 0~512G for init code. This doesn't change. For the kernel to work in higher half memory, we also need to create mapping for it. We take the 256th entry of pml4 entry (hence one additional pdp table). Entry 0~63 are mapped to to the physical memory (with offset 0xffff_8000_0000_0000) Entry 64~127 are not used Entry 128~191 are mapped to the kernel image (text and code) (with offset 0xffff_8020_0000_0000) details in docs/mem_layout.txt --- defs/x86_64-linker.ld | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'defs') diff --git a/defs/x86_64-linker.ld b/defs/x86_64-linker.ld index ac4a86c..6be14cb 100644 --- a/defs/x86_64-linker.ld +++ b/defs/x86_64-linker.ld @@ -30,7 +30,7 @@ SECTIONS header_end = .; } - PROVIDE (___KERNEL_START__ = .); + PROVIDE (___KERNEL_PM_START__ = .); .text : { *(".text") @@ -71,10 +71,10 @@ SECTIONS .bss : { - PROVIDE (___BSS_START__ = .); + PROVIDE (___BSS_PM_START__ = .); *(".bss") *(".bss.*") - PROVIDE (___BSS_END__ = .); + PROVIDE (___BSS_PM_END__ = .); } /* global page table for 64-bit long mode */ @@ -95,5 +95,5 @@ SECTIONS *("..global_free_page_stack") } . = ALIGN(4096); - PROVIDE (___KERNEL_END__ = .); + PROVIDE (___KERNEL_PM_END__ = .); } -- cgit v1.2.3-70-g09d2