diff options
| author | Tianhao Wang <shrik3@mailbox.org> | 2024-06-06 02:18:51 +0200 |
|---|---|---|
| committer | Tianhao Wang <shrik3@mailbox.org> | 2024-06-11 15:17:14 +0200 |
| commit | 1a0d4d56032b0326e415b8d450c601afa63ecd32 (patch) | |
| tree | c5d97b9b70f0a244e59bc05edee111e3d41d3cd2 | |
| parent | d495a1745c83a8186bf1c6c531177887dd245436 (diff) | |
chore: format asm code
mostly fixing identation and tabstop:
0. line width is 80 chars
1. tab is 8 chars wide (tabstop = 8)
2. leading indentation and only leading indentations are __hard_tabs__
3. non-leading tabs are expanded into spaces
4. instrs are always indented 1 level, and only 1 level
5. the first operand (if any) starts at the next tabstop after the
instruction, but the tab inbetween is expanded to spaces (3)
Signed-off-by: Tianhao Wang <shrik3@mailbox.org>
| -rw-r--r-- | boot/startup-x86_64.s | 194 | ||||
| -rw-r--r-- | src/arch/x86_64/asm/misc.s | 8 | ||||
| -rw-r--r-- | src/arch/x86_64/asm/vectors.s | 68 |
3 files changed, 133 insertions, 137 deletions
diff --git a/boot/startup-x86_64.s b/boot/startup-x86_64.s index bbd7172..06aefd3 100644 --- a/boot/startup-x86_64.s +++ b/boot/startup-x86_64.s @@ -53,36 +53,36 @@ startup: ; EBX: 32-bit physical address of the multiboot information struct we store ; them in global variables for future uses in rust code. TODO place them on ; the stack and pass as parameters to _entry - mov dword [mb_magic], eax - mov dword [mb_info_addr], ebx + mov dword [mb_magic], eax + mov dword [mb_info_addr], ebx ; setup GDT by loading GDT descriptor ; see docs/x86_gdt.txt - lgdt [gdt_80] + lgdt [gdt_80] ; use the 3rd gdt entry for protected mode segmentations - mov eax, 3 * 0x8 - mov ds, ax - mov es, ax - mov fs, ax - mov gs, ax + mov eax, 3 * 0x8 + mov ds, ax + mov es, ax + mov fs, ax + mov gs, ax ; define stack - mov ss, ax - lea esp, init_stack+STACKSIZE + mov ss, ax + lea esp, init_stack+STACKSIZE init_longmode: ; activate address extension (PAE) - mov eax, cr4 - or eax, 1 << 5 - mov cr4, eax + mov eax, cr4 + or eax, 1 << 5 + mov cr4, eax setup_paging: ; zero out the initial page tables (3 x 4K pages in total) - mov edi, pml4 + mov edi, pml4 clear_pt: - mov dword [edi], 0 - add edi, 4 - cmp edi, pt_end - jl clear_pt + mov dword [edi], 0 + add edi, 4 + cmp edi, pt_end + jl clear_pt ; Provisional identical page mapping, using 1G huge page, therefore only 2 ; table levels needed. see docs/x86_paging.txt We provide two additional @@ -90,44 +90,44 @@ clear_pt: ; PML4 (Page Map Level 4 / 1st level) ; PML4 entry flag: 0xf = PRESENG | R/W | USER | Write Through - mov eax, pdp0 - or eax, 0xf - mov dword [pml4+0], eax - mov dword [pml4+4], 0 + mov eax, pdp0 + or eax, 0xf + mov dword [pml4+0], eax + mov dword [pml4+4], 0 ; PDPE flags 0x87 = PageSize=1G | USER | R/W | PRESENT - mov eax, 0x0 | 0x83 ; start-address bytes bit [30:31] + flags - mov ebx, 0 ; start-address bytes bit [32:38] - mov ecx, 0 + mov eax, 0x0 | 0x83 ; start-address bytes bit [30:31] + flags + mov ebx, 0 ; start-address bytes bit [32:38] + mov ecx, 0 fill_pdp0: ; fill one single PDP table, with 1G pages, 512 PDPE maps to 512 GB - cmp ecx, MAX_MEM - je fill_pdp0_done - mov dword [pdp0 + 8*ecx + 0], eax ; low bytes - mov dword [pdp0 + 8*ecx + 4], ebx ; high bytes - add eax, 0x40000000 - ; increment high half address on carry (overflow) - adc ebx, 0 - inc ecx - ja fill_pdp0 + cmp ecx, MAX_MEM + je fill_pdp0_done + mov dword [pdp0 + 8*ecx + 0], eax ; low bytes + mov dword [pdp0 + 8*ecx + 4], ebx ; high bytes + add eax, 0x40000000 + ; increment high half address on carry (overflow) + adc ebx, 0 + inc ecx + ja fill_pdp0 fill_pdp0_done: ; set base pointer to PML4 - mov eax, pml4 - mov cr3, eax + mov eax, pml4 + mov cr3, eax activate_long_mode: ; activate Long Mode (for now in compatibility mode) ; select EFER (Extended Feature Enable Register) - mov ecx, 0x0C0000080 + mov ecx, 0x0C0000080 rdmsr - or eax, 1 << 8 ; LME (Long Mode Enable) + or eax, 1 << 8 ; LME (Long Mode Enable) wrmsr ; activate paging - mov eax, cr0 - or eax, 1 << 31 - mov cr0, eax + mov eax, cr0 + or eax, 1 << 31 + mov cr0, eax ; use the 2nd gdt entry (see definition below) ; jump to 64-bit code segment -> full activation of Long Mode - jmp 2 * 0x8 : longmode_start + jmp 2 * 0x8 : longmode_start ; ===================================================================== @@ -141,57 +141,57 @@ longmode_start: ; now we set the pagetables for higher half memory ; since we have Provisional paging now, why not using 64bit code? ; the 256th entry of pml4 points to memory from 0xffff_8000_0000_0000 - mov rax, pdp1 + mov rax, pdp1 ; privileged, r/w, present - or rax, 0x3 - mov qword [pml4+256*8], rax + or rax, 0x3 + mov qword [pml4+256*8], rax ; entry 0~63 is an identical mapping with offset 0x8000_0000_0000 ; 1G Page | Privileged | R/W | PRESENT ; TODO this should not be executable - mov rax, 0x0 - or rax, 0x83 - mov rdi, 0 + mov rax, 0x0 + or rax, 0x83 + mov rdi, 0 fill_kvma1: - mov qword [pdp1 + 8*rdi], rax - inc rdi - add rax, 0x40000000 - cmp rdi, 64 - jne fill_kvma1 + mov qword [pdp1 + 8*rdi], rax + inc rdi + add rax, 0x40000000 + cmp rdi, 64 + jne fill_kvma1 ; entry 64~127 is a hole (also some sort of protection) ; entry 128~191 are mapping of the kernel image itself - mov rax, 0x0 - or rax, 0x83 - mov rdi, 128 + mov rax, 0x0 + or rax, 0x83 + mov rdi, 128 fill_kvma2: - mov qword [pdp1 + 8*rdi], rax - inc rdi - add rax, 0x40000000 - cmp rdi, 192 - jne fill_kvma2 + mov qword [pdp1 + 8*rdi], rax + inc rdi + add rax, 0x40000000 + cmp rdi, 192 + jne fill_kvma2 ; done :-) ; clear BSS section for the rust code. - mov rdi, ___BSS_START__ - mov rax, ___BSS_END__ + mov rdi, ___BSS_START__ + mov rax, ___BSS_END__ clear_bss: ; clear the BSS section before going to rust code ; TODO: sanity check start < end, otherwise could be endless loop ; TODO speed this up by clearing 8 bytes at once. Alignment should be taken ; care of.. - mov byte [rdi], 0 - inc rdi - cmp rdi, rax - jne clear_bss + mov byte [rdi], 0 + inc rdi + cmp rdi, rax + jne clear_bss ; enable FPU fninit ; NOTE: must NOT use sse target features for rust compiler, if sse not ; enabled here. ; shift the rsp to high memory mapping: - mov rax, KERNEL_OFFSET, - or rsp, rax + mov rax, KERNEL_OFFSET, + or rsp, rax ; finally go to the rust code! - mov rax, _entry - jmp rax + mov rax, _entry + jmp rax ; should not reach below cli @@ -209,49 +209,49 @@ gdt: ; see docs/x86_gdt.txt ; GDT[0] should always be NULL descriptor - dw 0,0,0,0 + dw 0,0,0,0 ; 32-bit code segment descriptor ; limit=0xFFFF, base=0 ; Types: P|Ring0|Code/Data|Exec|NonConforming|Readable|NotAccessed ; Flags: 4K|32-bit|Not Long Mode - dw 0xFFFF - dw 0x0000 - dw 0x9A00 - dw 0x00CF + dw 0xFFFF + dw 0x0000 + dw 0x9A00 + dw 0x00CF ; 64-bit code segment descriptor ; limit=0xFFFF, base=0 ; Types: P|Ring0|Code/Data|Exec|NonConforming|Readable|NotAccessed ; Flags: 4K|-|LongMode|- - dw 0xFFFF - dw 0x0000 - dw 0x9A00 - dw 0x00AF + dw 0xFFFF + dw 0x0000 + dw 0x9A00 + dw 0x00AF ; data segment descriptor ; limit=0xFFFF, base=0 ; Types: Present|Ring0|Code/Data|NoExec|GrowUp|Writable|NotAccessed ; Flags: 4K|32-bit|Not Long Mode - dw 0xFFFF - dw 0x0000 - dw 0x9200 - dw 0x00CF + dw 0xFFFF + dw 0x0000 + dw 0x9200 + dw 0x00CF gdt_80: - dw 4*8 - 1 ; GDT limit=24, 4 GDT entries - 1 - dq gdt ; GDT address + dw 4*8 - 1 ; GDT limit=24, 4 GDT entries - 1 + dq gdt ; GDT address ; multiboot info mb_magic: - dd 0x00000000 + dd 0x00000000 mb_info_addr: - dd 0x00000000 + dd 0x00000000 [SECTION .reserved_0.init_stack] global init_stack:data (init_stack.end - init_stack) init_stack: - resb STACKSIZE + resb STACKSIZE .end: [SECTION .global_pagetable] @@ -260,21 +260,21 @@ init_stack: ; the PML4 table the whole of the first first pdp table (512G) and entry 0, 2 of ; the second pdp table all pages here are 1 GiB huge pages pml4: - resb 4096 - alignb 4096 + resb 4096 + alignb 4096 ; the first PDP covers the lower 512GiB memory pdp0: - resb 4096 - alignb 4096 + resb 4096 + alignb 4096 ; pdp1 does the same but in higher half memory with offset ; 0xffff_8000_0000_0000, i.e. the 256th entry of pml4 pdp1: - resb 4096 - alignb 4096 + resb 4096 + alignb 4096 pt_end: ; reserve 8MiB for frame alloc. ; (see linker file) ;[SECTION .global_free_page_stack] ;free_page_stack: -; resb 8388608 -; alignb 4096 +; resb 8388608 +; alignb 4096 diff --git a/src/arch/x86_64/asm/misc.s b/src/arch/x86_64/asm/misc.s index fc5aa90..532aacd 100644 --- a/src/arch/x86_64/asm/misc.s +++ b/src/arch/x86_64/asm/misc.s @@ -1,11 +1,7 @@ ; place for misc code, before they find better places to live.. - [GLOBAL _delay] - [SECTION .text] - _delay: - jmp .dummy + jmp .dummy .dummy: - ret - + ret diff --git a/src/arch/x86_64/asm/vectors.s b/src/arch/x86_64/asm/vectors.s index cc7a7b7..8b4992e 100644 --- a/src/arch/x86_64/asm/vectors.s +++ b/src/arch/x86_64/asm/vectors.s @@ -11,29 +11,29 @@ ; TODO: use a interrupt stack instead of the current stack. idt: ; reserve space for 256x idt entries (16 bytes each) -resb 16 * 256 + resb 16 * 256 [SECTION .data.idt_descr] idt_descr: - dw 256*8 - 1 ; 256 entries - dq idt + dw 256*8 - 1 ; 256 entries + dq idt [SECTION .text.vectors] %macro trap_without_err 1 align 16 vector_%1: - push 0 - push rax - mov al, %1 - jmp vector_body + push 0 + push rax + mov al, %1 + jmp vector_body %endmacro %macro trap_with_err 1 align 16 vector_e_%1: - push rax - mov al, %1 - jmp vector_body + push rax + mov al, %1 + jmp vector_body %endmacro vectors_start: @@ -84,38 +84,38 @@ vector_body: ; GCC expects the direction flag to be 0 cld ; save volatile registers - push rcx - push rdx - push rdi - push rsi - push r8 - push r9 - push r10 - push r11 + push rcx + push rdx + push rdi + push rsi + push r8 + push r9 + push r10 + push r11 ; the generated wrapper only gives us 8 bits, mask the rest - and rax, 0xff + and rax, 0xff ; the first parameter is the interrupt (exception) number - mov rdi, rax - ; the second parameter is a pointer to the trap frame - mov rsi, rsp + mov rdi, rax + ; the second parameter is a pointer to the trap frame + mov rsi, rsp ; For a long jump, we need to put the (large) address in an register ; here reusing one of the caller clobbered regs (pushed above) - mov r11, trap_gate - call r11 + mov r11, trap_gate + call r11 ; restore volatile registers - pop r11 - pop r10 - pop r9 - pop r8 - pop rsi - pop rdi - pop rdx - pop rcx + pop r11 + pop r10 + pop r9 + pop r8 + pop rsi + pop rdi + pop rdx + pop rcx - pop rax + pop rax ; "pop" the error code - add rsp, 8 + add rsp, 8 ; done iretq |
