arch/intel64: format asm files, remove unused debug macros

use tab at the beginning of lines, remove C++ comments, fix some tabs to spaces

Signed-off-by: p-szafonimateusz <p-szafonimateusz@xiaomi.com>
This commit is contained in:
p-szafonimateusz 2024-02-07 14:48:04 +01:00 committed by Xiang Xiao
parent 1b7ddab317
commit f76017ca8a
4 changed files with 565 additions and 633 deletions

View File

@ -26,45 +26,13 @@
#include <arch/irq.h>
#include "x86_64_internal.h"
.file "intel64_fullcontextrestore.S"
/**************************************************************************
* Pre-processor Definitions
**************************************************************************/
/**************************************************************************
* Public Data
**************************************************************************/
/****************************************************************************
* Macros
****************************************************************************/
/* Trace macros, use like trace 'i' to print char to serial port. */
.macro chout, addr, ch
#ifdef CONFIG_DEBUG_FEATURES
mov $\addr, %dx
mov $\ch, %al
out %al, %dx
#endif
.endm
.macro trace, ch
#ifdef CONFIG_DEBUG_FEATURES
push %eax
push %edx
chout 0x3f8, \ch
pop %edx
pop %eax
#endif
.endm
.file "intel64_fullcontextrestore.S"
/**************************************************************************
* Public Functions
**************************************************************************/
.text
.text
/**************************************************************************
* Name: x86_64_fullcontextrestore
@ -74,84 +42,85 @@
*
**************************************************************************/
.globl x86_64_fullcontextrestore
.type x86_64_fullcontextrestore, @function
.globl x86_64_fullcontextrestore
.type x86_64_fullcontextrestore, @function
x86_64_fullcontextrestore:
/* The pointer to the register save array in RDI. */
/* The pointer to the register save array in RDI. */
/* Disable interrupts now (the correct RFLAGS will be restored before we
* return
*/
/* Disable interrupts now (the correct RFLAGS will be restored before we
* return
*/
cli
cli
/* Create an interrupt stack frame for the final iret.
*
*
* IRET STACK
* ---------------
* RSP Before ->
* SS
* RSP
* RFLAGS
* CS
* RSP After -> RIP
*
*/
/* Create an interrupt stack frame for the final iret.
*
*
* IRET STACK
* ---------------
* RSP Before ->
* SS
* RSP
* RFLAGS
* CS
* RSP After -> RIP
*
*/
movq (8*REG_SS)(%rdi), %rbx
push %rbx
movq (8*REG_RSP)(%rdi), %rbx
push %rbx
movq (8*REG_SS)(%rdi), %rbx
push %rbx
movq (8*REG_RSP)(%rdi), %rbx
push %rbx
movq (8*REG_RFLAGS)(%rdi), %rbx
push %rbx
movq (8*REG_CS)(%rdi), %rbx
push %rbx
movq (8*REG_RIP)(%rdi), %rbx
push %rbx
movq (8*REG_RFLAGS)(%rdi), %rbx
push %rbx
movq (8*REG_CS)(%rdi), %rbx
push %rbx
movq (8*REG_RIP)(%rdi), %rbx
push %rbx
/* Save the value of RDI on the stack too */
/* Save the value of RDI on the stack too */
movq (8*REG_RDI)(%rdi), %rbx
push %rbx
movq (8*REG_RDI)(%rdi), %rbx
push %rbx
/* Now restore the remaining registers */
movq (8*REG_RSI)(%rdi), %rsi
movq (8*REG_RDX)(%rdi), %rdx
movq (8*REG_RCX)(%rdi), %rcx
movq (8*REG_R8 )(%rdi), %r8
movq (8*REG_R9 )(%rdi), %r9
/* Now restore the remaining registers */
movq (8*REG_RSI)(%rdi), %rsi
movq (8*REG_RDX)(%rdi), %rdx
movq (8*REG_RCX)(%rdi), %rcx
movq (8*REG_R8 )(%rdi), %r8
movq (8*REG_R9 )(%rdi), %r9
movq (8*REG_R15)(%rdi), %r15
movq (8*REG_R14)(%rdi), %r14
movq (8*REG_R13)(%rdi), %r13
movq (8*REG_R12)(%rdi), %r12
movq (8*REG_R11)(%rdi), %r11
movq (8*REG_R10)(%rdi), %r10
movq (8*REG_RBP)(%rdi), %rbp
movq (8*REG_RBX)(%rdi), %rbx
movq (8*REG_RAX)(%rdi), %rax
movq (8*REG_R15)(%rdi), %r15
movq (8*REG_R14)(%rdi), %r14
movq (8*REG_R13)(%rdi), %r13
movq (8*REG_R12)(%rdi), %r12
movq (8*REG_R11)(%rdi), %r11
movq (8*REG_R10)(%rdi), %r10
movq (8*REG_RBP)(%rdi), %rbp
movq (8*REG_RBX)(%rdi), %rbx
movq (8*REG_RAX)(%rdi), %rax
/* Restore the data segment register. I think there is an issue that will
* need to be address here at some time: If the register save area is in
* one data segment and the stack is in another, then the above would not
* work (and, conversely, if they are in the same data segment, the
* following is unnecessary and redundant).
*/
/* Restore the data segment register. I think there is an issue that will
* need to be address here at some time: If the register save area is in
* one data segment and the stack is in another, then the above would not
* work (and, conversely, if they are in the same data segment, the
* following is unnecessary and redundant).
*/
mov (8*REG_DS)(%rdi), %ds
// mov (8*REG_ES)(%rdi), %es // Not used in 64 bit
// mov (8*REG_GS)(%rdi), %gs // Disabled, otherwise we will destroy MSR_GS_BASE
// mov (8*REG_FS)(%rdi), %fs // Disabled, otherwise we will destroy MSR_FS_BASE
// XXX: Should use wrgsbase and wrfsbase to restore the gs and fs register
mov (8*REG_DS)(%rdi), %ds
/* mov (8*REG_ES)(%rdi), %es // Not used in 64 bit
* mov (8*REG_GS)(%rdi), %gs // Disabled, otherwise we will destroy MSR_GS_BASE
* mov (8*REG_FS)(%rdi), %fs // Disabled, otherwise we will destroy MSR_FS_BASE
* XXX: Should use wrgsbase and wrfsbase to restore the gs and fs register
*/
/* restore xmm registers */
fxrstorq (%rdi)
/* restore xmm registers */
fxrstorq (%rdi)
/* Restore the correct value of EAX and then return */
/* Restore the correct value of EAX and then return */
popq %rdi
iretq
.size x86_64_fullcontextrestore, . - x86_64_fullcontextrestore
.end
popq %rdi
iretq
.size x86_64_fullcontextrestore, . - x86_64_fullcontextrestore
.end

View File

@ -26,7 +26,7 @@
#include <arch/arch.h>
#include <arch/multiboot2.h>
.file "intel64_head.S"
.file "intel64_head.S"
/****************************************************************************
* Pre-processor definitions
@ -40,115 +40,114 @@
* heap continues from there until the end of memory. See g_idle_topstack below.
*/
/****************************************************************************
* Macros
****************************************************************************/
/* Trace macros, use like trace 'i' to print char to serial port. */
.macro trace, ch
#ifdef CONFIG_DEBUG_FEATURES
mov $0x3f8, %dx
mov $\ch, %al
out %al, %dx
#endif
.endm
/****************************************************************************
* Public Symbols
****************************************************************************/
.global __pmode_entry /* The 32bit protected mode entry */
.global __nxstart
.global __enable_sse_avx
.global __enable_pcid
.global __revoke_low_memory
.global nx_start /* nx_start is defined elsewhere */
.global up_lowsetup /* up_lowsetup is defined elsewhere */
.global g_idle_topstack /* The end of the idle stack, the start of the heap */
.global mb_info_struct
.global mb_magic
/* These are the page tables */
.global pdpt_low
.global pd_low
.global pt_low
.global __pmode_entry /* The 32bit protected mode entry */
.global __nxstart
.global __enable_sse_avx
.global __enable_pcid
.global __revoke_low_memory
.global nx_start /* nx_start is defined elsewhere */
.global up_lowsetup /* up_lowsetup is defined elsewhere */
.global g_idle_topstack /* The end of the idle stack, the start of the heap */
.global mb_info_struct
.global mb_magic
/* These are the GDT */
.global gdt64_low
.global gdt64_ist_low
.global gdt64_low_end
/* These are the page tables */
.global pdpt_low
.global pd_low
.global pt_low
.global ist64_low
/* These are the GDT */
.global gdt64_low
.global gdt64_ist_low
.global gdt64_low_end
.global ist64_low
/****************************************************************************
* The multiboot2 header
****************************************************************************/
.set HEADER_LENGTH, header_end - header_start
.set CHECKSUM, -(MULTIBOOT2_HEADER_MAGIC + MULTIBOOT_ARCHITECTURE_I386 + HEADER_LENGTH)
.set HEADER_LENGTH, header_end - header_start
.set CHECKSUM, -(MULTIBOOT2_HEADER_MAGIC + MULTIBOOT_ARCHITECTURE_I386 + HEADER_LENGTH)
.section ".multiboot", "a"
.align 8
.section ".multiboot", "a"
.align 8
header_start:
#ifdef CONFIG_ARCH_MULTIBOOT2
.long MULTIBOOT2_HEADER_MAGIC
.long MULTIBOOT_ARCHITECTURE_I386
.long HEADER_LENGTH
.long CHECKSUM
.long MULTIBOOT2_HEADER_MAGIC
.long MULTIBOOT_ARCHITECTURE_I386
.long HEADER_LENGTH
.long CHECKSUM
// multiboot tags go here
/* multiboot tags go here */
.short MULTIBOOT_HEADER_TAG_INFORMATION_REQUEST
.short 0 // flags, none set
.long 16 // size, including itself (short + short + long)
.long MULTIBOOT_TAG_TYPE_EFI64
.long MULTIBOOT_TAG_TYPE_FRAMEBUFFER
.short MULTIBOOT_HEADER_TAG_INFORMATION_REQUEST
.short 0 /* flags, none set */
.long 16 /* size, including itself (short + short + long) */
.long MULTIBOOT_TAG_TYPE_EFI64
.long MULTIBOOT_TAG_TYPE_FRAMEBUFFER
.short MULTIBOOT_HEADER_TAG_END
.short 0 // flags, none set
.long 8 // size, including itself (short + short + long)
.short MULTIBOOT_HEADER_TAG_END
.short 0 /* flags, none set */
.long 8 /* size, including itself (short + short + long) */
#endif
header_end:
.code16
.section ".realmode", "ax"
.type __reset_entry, @function
/****************************************************************************
* Name: __reset_entry
*
* Description:
* Real mode entry point.
*
****************************************************************************/
.type __reset_entry, @function
__reset_entry:
// Load a GDT for protected mode
movl $loader_gdt_ptr, %ebx
lgdtl (%ebx)
/* Load a GDT for protected mode */
movl $loader_gdt_ptr, %ebx
lgdtl (%ebx)
// enable protected mode in CR0
mov %cr0,%eax
or $X86_CR0_PE,%al
mov %eax,%cr0
/* enable protected mode in CR0 */
mov %cr0, %eax
or $X86_CR0_PE, %al
mov %eax, %cr0
// Long jump into protected mode
// Hardcode the address
ljmpl $0x8,$0x100000
/* Long jump into protected mode. Hardcode the address. */
ljmpl $0x8, $0x100000
// Loader GDT and GDTR
/* Loader GDT and GDTR */
.align(16)
.global loader_gdt
loader_gdt:
.quad 0
.quad 0x00cf9a000000ffff
.quad 0x00cf92000000ffff
.quad 0
.quad 0x00cf9a000000ffff
.quad 0x00cf92000000ffff
loader_gdt_ptr:
.short loader_gdt_ptr - loader_gdt - 1
.long loader_gdt
.long loader_gdt
.size __reset_entry, . - __reset_entry
.size __reset_entry, . - __reset_entry
/****************************************************************************
* .text
****************************************************************************/
.code32
.section ".loader.text", "ax"
.code32
.section ".loader.text", "ax"
start32_0:
mov $0x10, %ax
mov %ax, %ss
mov %ax, %ds
/****************************************************************************
* Name: __pmode_entry
@ -159,111 +158,108 @@ loader_gdt_ptr:
*
****************************************************************************/
start32_0:
mov $0x10, %ax
mov %ax, %ss
mov %ax, %ds
.type __pmode_entry, @function
.type __pmode_entry, @function
__pmode_entry:
start32:
#ifdef CONFIG_ARCH_MULTIBOOT2
movl %ebx, mb_info_struct
movl %eax, mb_magic
movl %ebx, mb_info_struct
movl %eax, mb_magic
#endif
// initialize rest of the page directory
lea pd_low, %edi
lea pt_low, %esi
/* initialize rest of the page directory */
lea pd_low, %edi
lea pt_low, %esi
// Popluate the lower 4GB as non-present
// for ecx = 0...512 * 4 : Loop and setup the page directories
mov $0x800, %ecx // 512 * 4
/* Popluate the lower 4GB as non-present
* for ecx = 0...512 * 4 : Loop and setup the page directories
*/
mov $0x800, %ecx /* 512 * 4 */
epd_loop:
mov %esi, %edx
or $(X86_PAGE_WR | X86_PAGE_PRESENT), %edx
mov %edx, 0(%edi)
add $(X86_PAGE_ENTRY_SIZE), %edi
mov %esi, %edx
or $(X86_PAGE_WR | X86_PAGE_PRESENT), %edx
mov %edx, 0(%edi)
add $(X86_PAGE_ENTRY_SIZE), %edi
// for ebx = 0...1024: Loop and clear the page table of each page directory
mov $1024, %ebx
/* for ebx = 0...1024: Loop and clear the page table of each page directory */
mov $1024, %ebx
ept_loop:
movl $0x0, 0(%esi)
add $4, %esi
movl $0x0, 0(%esi)
add $4, %esi
// end for ebx
dec %ebx
jnz ept_loop
/* end for ebx */
dec %ebx
jnz ept_loop
// end for ecx
dec %ecx
jnz epd_loop
/* end for ecx */
dec %ecx
jnz epd_loop
// Temporary populate the lower 128MB on 1:1 mapping
lea pd_low, %edi
mov $(X86_PAGE_GLOBAL | X86_PAGE_WR | X86_PAGE_PRESENT | X86_PAGE_HUGE), %eax
/* Temporary populate the lower 128MB on 1:1 mapping */
lea pd_low, %edi
mov $(X86_PAGE_GLOBAL | X86_PAGE_WR | X86_PAGE_PRESENT | X86_PAGE_HUGE), %eax
// for ecx = 0...64 : Loop and setup 64x 2MB page directories
mov $64, %ecx
/* for ecx = 0...64 : Loop and setup 64x 2MB page directories */
mov $64, %ecx
pd_loop:
mov %eax, 0(%edi)
add $(HUGE_PAGE_SIZE), %eax
add $(X86_PAGE_ENTRY_SIZE), %edi
mov %eax, 0(%edi)
add $(HUGE_PAGE_SIZE), %eax
add $(X86_PAGE_ENTRY_SIZE), %edi
// end for ecx
dec %ecx
jnz pd_loop
/* end for ecx */
dec %ecx
jnz pd_loop
// Populate the 1GB after 4GB boundary with Global mapping to kernel code
// This creates maps the lower 1GB to 4GB~5GB
lea pdpt_low, %edi
mov $(X86_PAGE_GLOBAL | X86_PAGE_WR | X86_PAGE_PRESENT | X86_PAGE_HUGE), %eax
/* Populate the 1GB after 4GB boundary with Global mapping to kernel code.
* This creates maps the lower 1GB to 4GB~5GB
*/
lea pdpt_low, %edi
mov $(X86_PAGE_GLOBAL | X86_PAGE_WR | X86_PAGE_PRESENT | X86_PAGE_HUGE), %eax
mov $0x4, %ecx
mov %eax, 0(%edi, %ecx, X86_PAGE_ENTRY_SIZE)
mov $0x4, %ecx
mov %eax, 0(%edi, %ecx, X86_PAGE_ENTRY_SIZE)
// Enable PAE
mov %cr4, %eax
or $(X86_CR4_PAE | X86_CR4_PGE), %eax
mov %eax, %cr4
/* Enable PAE */
mov %cr4, %eax
or $(X86_CR4_PAE | X86_CR4_PGE), %eax
mov %eax, %cr4
// Load the 4 level page table
// Level 1 and 2 were preset at build time in assembly for this loading
// process
// 4KiB page table is used
// Kernel mapped to 1GB HiMem
lea pml4, %eax
mov %eax, %cr3
/* Load the 4 level page table.
* Level 1 and 2 were preset at build time in assembly for this loading process.
* 4KiB page table is used.
* Kernel mapped to 1GB HiMem
*/
lea pml4, %eax
mov %eax, %cr3
movl $MSR_MTRR_DEF_TYPE, %ecx
rdmsr
or $MTRR_ENABLE, %eax
wrmsr
movl $MSR_MTRR_DEF_TYPE, %ecx
rdmsr
or $MTRR_ENABLE, %eax
wrmsr
movl $MSR_EFER, %ecx
rdmsr
or $EFER_LME, %eax
wrmsr
movl $MSR_EFER, %ecx
rdmsr
or $EFER_LME, %eax
wrmsr
// Enable paging related bits in CR0
mov $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
mov %eax, %cr0
/* Enable paging related bits in CR0 */
mov $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
mov %eax, %cr0
// Enable FGSBASE
mov %cr4, %eax
or $X86_CR4_FGSBASE, %eax
mov %eax, %cr4
/* Enable FGSBASE */
mov %cr4, %eax
or $X86_CR4_FGSBASE, %eax
mov %eax, %cr4
// Load a GDT with 64bits mode set
lgdt gdt64_ptr
/* Load a GDT with 64bits mode set */
lgdt gdt64_ptr
// Long jump into 64 bit mode, updating cs to new GDT
ljmpl $(X86_GDT_CODE_SEL), $start64
/* Long jump into 64 bit mode, updating cs to new GDT */
ljmpl $(X86_GDT_CODE_SEL), $start64
.code64
.code64
start64:
// Set Segement Registers for proper iret, etc. operation
/* Set Segement Registers for proper iret, etc. operation */
mov $(X86_GDT_DATA_SEL), %ax
mov %ax, %ss
mov %ax, %ds
@ -271,7 +267,7 @@ start64:
mov %ax, %fs
mov %ax, %gs
// Finally, we can start the OS
/* Finally, we can start the OS */
movabs $__nxstart, %rbx
jmp *%rbx
.size __pmode_entry, . - __pmode_entry
@ -288,9 +284,11 @@ start64:
.type __nxstart, @function
__nxstart:
/* We are now in high memory, will revoke the lower 128MB memory mapping in lowsetup*/
/* We are now in high memory, will revoke the lower 128MB memory mapping
* in lowsetup
*/
//clear out bss section
/* clear out bss section */
movabs $_sbss, %rbx
movabs $_ebss, %rdx
clear_bss:
@ -299,7 +297,7 @@ clear_bss:
cmp %rbx, %rdx
jne clear_bss
// Properly setup RSP to idle stack
/* Properly setup RSP to idle stack */
movabs $g_idle_topstack, %rbx
mov (%rbx), %rsp
@ -318,29 +316,37 @@ hang:
jmp hang
.size __nxstart, . - __nxstart
.type __revoke_low_memory, @function
/****************************************************************************
* Name: __revoke_low_memory
*
* Description:
* Revoke the lower 128MB memory mapping
*
****************************************************************************/
.type __revoke_low_memory, @function
__revoke_low_memory:
/* Revoke the lower 128MB memory mapping */
lea pd_low, %edi
lea pt_low, %esi
/* Revoke the lower 128MB memory mapping */
lea pd_low, %edi
lea pt_low, %esi
// for ecx = 0...64 : Loop and setup 64x 2MB page directories
mov $64, %ecx
/* for ecx = 0...64 : Loop and setup 64x 2MB page directories */
mov $64, %ecx
npd_loop:
mov %esi, %edx
or $(X86_PAGE_WR | X86_PAGE_PRESENT), %edx
mov %edx, 0(%edi)
add $(PAGE_SIZE), %esi
add $(X86_PAGE_ENTRY_SIZE), %edi
mov %esi, %edx
or $(X86_PAGE_WR | X86_PAGE_PRESENT), %edx
mov %edx, 0(%edi)
add $(PAGE_SIZE), %esi
add $(X86_PAGE_ENTRY_SIZE), %edi
// end for ecx
dec %ecx
jnz npd_loop
/* end for ecx */
dec %ecx
jnz npd_loop
ret
ret
.size __revoke_low_memory, . - __revoke_low_memory
.size __revoke_low_memory, . - __revoke_low_memory
/****************************************************************************
* Name: __enable_sse_avx
@ -350,28 +356,28 @@ npd_loop:
*
****************************************************************************/
.type __enable_sse_avx, @function
.type __enable_sse_avx, @function
__enable_sse_avx:
// Enable SSE
mov %cr0, %rax
mov $(X86_CR0_EM), %rbx
not %rbx
and %rbx, %rax
or $(X86_CR0_MP), %rax
mov %rax, %cr0
/* Enable SSE */
mov %cr0, %rax
mov $(X86_CR0_EM), %rbx
not %rbx
and %rbx, %rax
or $(X86_CR0_MP), %rax
mov %rax, %cr0
// Enable Saving XMM context
mov %cr4, %rax
or $(X86_CR4_OSXFSR | X86_CR4_XMMEXCPT), %rax
mov %rax, %cr4
/* Enable Saving XMM context */
mov %cr4, %rax
or $(X86_CR4_OSXFSR | X86_CR4_XMMEXCPT), %rax
mov %rax, %cr4
// Setup MXCSR, masking all SSE precision exception
ldmxcsr mxcsr_mem
/* Setup MXCSR, masking all SSE precision exception */
ldmxcsr mxcsr_mem
ret
ret
.size __enable_sse_avx, . - __enable_sse_avx
.size __enable_sse_avx, . - __enable_sse_avx
/****************************************************************************
* Name: __enable_pcid
@ -381,97 +387,95 @@ __enable_sse_avx:
*
****************************************************************************/
.type __enable_pcid, @function
.type __enable_pcid, @function
__enable_pcid:
// Enable PCID and FGSBASE
mov %cr4, %rax
or $X86_CR4_PCIDE, %rax
mov %rax, %cr4
/* Enable PCID and FGSBASE */
mov %cr4, %rax
or $X86_CR4_PCIDE, %rax
mov %rax, %cr4
ret
ret
.size __enable_pcid, . - __enable_pcid
.size __enable_pcid, . - __enable_pcid
/****************************************************************************
* .data
****************************************************************************/
.section ".loader.data", "ax"
.section ".loader.data", "ax"
// IST for 64 bit long mode
// will be filled in up_irq
.align(16)
/* IST for 64 bit long mode will be filled in up_irq */
.align(16)
ist64_low:
.long 0
.quad 0xdeadbeefdeadbee0
.quad 0xdeadbeefdeadbee1
.quad 0xdeadbeefdeadbee2
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.word 0
.long 0
.quad 0xdeadbeefdeadbee0
.quad 0xdeadbeefdeadbee1
.quad 0xdeadbeefdeadbee2
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.word 0
// GDT for 64 bit long mode
.align(16)
/* GDT for 64 bit long mode */
.align(16)
gdt64_low:
.quad 0
.quad X86_GDT_CODE64_ENTRY
.quad X86_GDT_DATA_ENTRY
.quad X86_GDT_CODE32_ENTRY
.quad X86_GDT_DATA_ENTRY
.quad X86_GDT_CODE64_ENTRY
.quad 0
.quad X86_GDT_CODE64_ENTRY
.quad X86_GDT_DATA_ENTRY
.quad X86_GDT_CODE32_ENTRY
.quad X86_GDT_DATA_ENTRY
.quad X86_GDT_CODE64_ENTRY
gdt64_ist_low:
.quad 0x0 // TSS segment low
.quad 0x0 // TSS segment high
.quad 0x0 /* TSS segment low */
.quad 0x0 /* TSS segment high */
gdt64_low_end:
gdt64_ptr:
.short gdt64_low_end - gdt64_low - 1
.long gdt64_low
.short gdt64_low_end - gdt64_low - 1
.long gdt64_low
mxcsr_mem:
.long 0x00001f80
.long 0x00001f80
.align(PAGE_SIZE)
.align(PAGE_SIZE)
pml4:
.quad pdpt_low + X86_PAGE_PRESENT + X86_PAGE_WR
.quad pdpt_low + X86_PAGE_PRESENT + X86_PAGE_WR
.align(PAGE_SIZE)
.align(PAGE_SIZE)
pdpt_low:
.quad pd_low + X86_PAGE_PRESENT + X86_PAGE_WR
.quad pd_2_low + X86_PAGE_PRESENT + X86_PAGE_WR
.quad pd_3_low + X86_PAGE_PRESENT + X86_PAGE_WR
.quad pd_4_low + X86_PAGE_PRESENT + X86_PAGE_WR
.quad pd_low + X86_PAGE_PRESENT + X86_PAGE_WR
.quad pd_2_low + X86_PAGE_PRESENT + X86_PAGE_WR
.quad pd_3_low + X86_PAGE_PRESENT + X86_PAGE_WR
.quad pd_4_low + X86_PAGE_PRESENT + X86_PAGE_WR
.fill X86_NUM_PAGE_ENTRY - 4, X86_PAGE_ENTRY_SIZE, 0
.fill X86_NUM_PAGE_ENTRY - 4, X86_PAGE_ENTRY_SIZE, 0
.align(PAGE_SIZE)
.align(PAGE_SIZE)
pd_low:
.fill X86_NUM_PAGE_ENTRY, X86_PAGE_ENTRY_SIZE, 0
.fill X86_NUM_PAGE_ENTRY, X86_PAGE_ENTRY_SIZE, 0
.align(PAGE_SIZE)
.align(PAGE_SIZE)
pd_2_low:
.fill X86_NUM_PAGE_ENTRY, X86_PAGE_ENTRY_SIZE, 0
.fill X86_NUM_PAGE_ENTRY, X86_PAGE_ENTRY_SIZE, 0
.align(PAGE_SIZE)
.align(PAGE_SIZE)
pd_3_low:
.fill X86_NUM_PAGE_ENTRY, X86_PAGE_ENTRY_SIZE, 0
.fill X86_NUM_PAGE_ENTRY, X86_PAGE_ENTRY_SIZE, 0
.align(PAGE_SIZE)
.align(PAGE_SIZE)
pd_4_low:
.fill X86_NUM_PAGE_ENTRY, X86_PAGE_ENTRY_SIZE, 0
.fill X86_NUM_PAGE_ENTRY, X86_PAGE_ENTRY_SIZE, 0
.align(PAGE_SIZE)
.align(PAGE_SIZE)
pt_low:
.fill X86_NUM_PAGE_ENTRY * X86_NUM_PAGE_ENTRY, X86_PAGE_ENTRY_SIZE, 0
.fill X86_NUM_PAGE_ENTRY * X86_NUM_PAGE_ENTRY, X86_PAGE_ENTRY_SIZE, 0
.fill X86_NUM_PAGE_ENTRY * X86_NUM_PAGE_ENTRY, X86_PAGE_ENTRY_SIZE, 0
.fill X86_NUM_PAGE_ENTRY * X86_NUM_PAGE_ENTRY, X86_PAGE_ENTRY_SIZE, 0
.fill X86_NUM_PAGE_ENTRY * X86_NUM_PAGE_ENTRY, X86_PAGE_ENTRY_SIZE, 0
.fill X86_NUM_PAGE_ENTRY * X86_NUM_PAGE_ENTRY, X86_PAGE_ENTRY_SIZE, 0
.fill X86_NUM_PAGE_ENTRY * X86_NUM_PAGE_ENTRY, X86_PAGE_ENTRY_SIZE, 0
.fill X86_NUM_PAGE_ENTRY * X86_NUM_PAGE_ENTRY, X86_PAGE_ENTRY_SIZE, 0

View File

@ -26,42 +26,15 @@
#include <arch/irq.h>
#include "x86_64_internal.h"
.file "intel64_saveusercontext.S"
/**************************************************************************
* Pre-processor Definitions
**************************************************************************/
/****************************************************************************
* Macros
****************************************************************************/
/* Trace macros, use like trace 'i' to print char to serial port. */
.macro chout, addr, ch
#ifdef CONFIG_DEBUG_FEATURES
mov $\addr, %dx
mov $\ch, %al
out %al, %dx
#endif
.endm
.macro trace, ch
#ifdef CONFIG_DEBUG_FEATURES
push %eax
push %edx
chout 0x3f8, \ch
pop %edx
pop %eax
#endif
.endm
.file "intel64_saveusercontext.S"
/**************************************************************************
* .text
**************************************************************************/
.text
.code64
.text
.code64
/**************************************************************************
* Name: up_saveusercontext
*
@ -84,66 +57,66 @@
*
**************************************************************************/
.globl up_saveusercontext
.type up_saveusercontext, @function
.globl up_saveusercontext
.type up_saveusercontext, @function
up_saveusercontext:
// callee saved regs
movq %rbx, (8*REG_RBX)(%rdi)
movq %r12, (8*REG_R12)(%rdi)
movq %r13, (8*REG_R13)(%rdi)
movq %r14, (8*REG_R14)(%rdi)
movq %r15, (8*REG_R15)(%rdi)
/* callee saved regs */
movq %rbx, (8*REG_RBX)(%rdi)
movq %r12, (8*REG_R12)(%rdi)
movq %r13, (8*REG_R13)(%rdi)
movq %r14, (8*REG_R14)(%rdi)
movq %r15, (8*REG_R15)(%rdi)
// save xmm registers
fxsaveq (%rdi)
/* save xmm registers */
fxsaveq (%rdi)
/* Save the value of SP as will be at the time of the IRET that will
* appear to be the return from this function.
*
* CURRENT STACK IRET STACK
* ------------------------------ -----------------
* RIP
* CS
* RFLAGS
* RSP
* ESP->Return address SS
* Argument Alignment (16bytes)
*
*/
/* Save the value of SP as will be at the time of the IRET that will
* appear to be the return from this function.
*
* CURRENT STACK IRET STACK
* ------------------------------ -----------------
* RIP
* CS
* RFLAGS
* RSP
* ESP->Return address SS
* Argument Alignment (16bytes)
*
*/
leaq 8(%rsp), %rcx
movq %rcx, (8*REG_RSP)(%rdi)
leaq 8(%rsp), %rcx
movq %rcx, (8*REG_RSP)(%rdi)
/* Fetch the PC from the stack and save it in the save block */
/* Fetch the PC from the stack and save it in the save block */
movq 0(%rsp), %rcx
movq %rcx, (8*REG_RIP)(%rdi)
movq 0(%rsp), %rcx
movq %rcx, (8*REG_RIP)(%rdi)
/* Save the framepointer */
/* Save the framepointer */
movq %rbp, (8*REG_RBP)(%rdi)
movq %rbp, (8*REG_RBP)(%rdi)
/* Save EAX=1. This will be the "apparent" return value from this
* function when context is switch back to this thread. The non-zero
* return value is the indication that we have been resumed.
*/
/* Save EAX=1. This will be the "apparent" return value from this
* function when context is switch back to this thread. The non-zero
* return value is the indication that we have been resumed.
*/
movq $1, (8*REG_RAX)(%rdi)
movq $1, (8*REG_RAX)(%rdi)
/* Get and save the interrupt state */
/* Get and save the interrupt state */
pushf
pop %rcx
movq %rcx, (8*REG_RFLAGS)(%rdi)
pushf
pop %rcx
movq %rcx, (8*REG_RFLAGS)(%rdi)
/* And return 0 -- The zero return value is the indication that that
* this is the original, "true" return from the function.
*
* 'ret' will remove the RIP from the top of the stack.
*/
/* And return 0 -- The zero return value is the indication that that
* this is the original, "true" return from the function.
*
* 'ret' will remove the RIP from the top of the stack.
*/
xor %rax, %rax
ret
.size up_saveusercontext, . - up_saveusercontext
.end
xor %rax, %rax
ret
.size up_saveusercontext, . - up_saveusercontext
.end

View File

@ -25,200 +25,188 @@
#include <nuttx/config.h>
#include <arch/irq.h>
.file "broadwell_vectors.S"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
.file "intel64_vectors.S"
/****************************************************************************
* .text
****************************************************************************/
.text
.code64
.code64
/****************************************************************************
* Public Symbols
****************************************************************************/
.globl irq_handler
.globl isr_handler
.globl g_interrupt_stack
.globl g_interrupt_stack_end
.globl g_isr_stack
.globl g_isr_stack_end
.globl irq_handler
.globl isr_handler
.globl g_interrupt_stack
.globl g_interrupt_stack_end
.globl g_isr_stack
.globl g_isr_stack_end
/****************************************************************************
* Macros
****************************************************************************/
/* Trace macros, use like trace 'i' to print char to serial port. */
.macro trace, ch
mov $0x3f8, %dx
mov $\ch, %al
out %al, %dx
.endm
/* This macro creates a stub for an ISR which does NOT pass it's own
* error code (adds a dummy errcode byte).
*/
/* This macro creates a stub for an ISR which does NOT pass it's own
* error code (adds a dummy errcode byte).
*/
.macro ISR_NOERRCODE, intno
.globl vector_isr\intno
.globl vector_isr\intno
vector_isr\intno:
cli /* Disable interrupts firstly. */
cli /* Disable interrupts firstly. */
// CPU has sawitched to the ISR stack using IST
/* CPU has sawitched to the ISR stack using IST */
pushq $0 /* Push a dummy error code. */
pushq $0 /* Push a dummy error code. */
// Save rdi, rsi
pushq %rdi
pushq %rsi
movq $\intno, %rsi /* INT Number is saved to 2nd parameter of function call */
jmp isr_common /* Go to the common ISR handler code. */
/* Save rdi, rsi */
pushq %rdi
pushq %rsi
movq $\intno, %rsi /* INT Number is saved to 2nd parameter of function call */
jmp isr_common /* Go to the common ISR handler code. */
.endm
/* This macro creates a stub for an ISR which passes it's own
* error code.
*/
/* This macro creates a stub for an ISR which passes it's own
* error code.
*/
.macro ISR_ERRCODE, intno
.globl vector_isr\intno
.macro ISR_ERRCODE, intno
.globl vector_isr\intno
vector_isr\intno:
cli /* Disable interrupts firstly. */
cli /* Disable interrupts firstly. */
// CPU has sawitched to the ISR stack using IST
/* CPU has sawitched to the ISR stack using IST */
// Save rdi, rsi
pushq %rdi
pushq %rsi
movq $\intno, %rsi /* INT Number is saved to 2nd parameter of function call */
jmp isr_common /* Go to the common ISR handler code. */
/* Save rdi, rsi */
pushq %rdi
pushq %rsi
movq $\intno, %rsi /* INT Number is saved to 2nd parameter of function call */
jmp isr_common /* Go to the common ISR handler code. */
.endm
/* This macro creates a stub for an IRQ - the first parameter is
* the IRQ number, the second is the ISR number it is remapped to.
*/
/* This macro creates a stub for an IRQ - the first parameter is
* the IRQ number, the second is the ISR number it is remapped to.
*/
.macro IRQ, irqno, intno
.globl vector_irq\irqno
vector_irq\irqno:
cli /* Disable interrupts firstly. */
.macro IRQ, irqno, intno
.globl vector_irq\irqno
vector_irq\irqno:
cli /* Disable interrupts firstly. */
// CPU has switched to the IRQ stack using IST
/* CPU has switched to the IRQ stack using IST */
pushq $0 /* Push a dummy error code. */
pushq $0 /* Push a dummy error code. */
// Save rdi, rsi
pushq %rdi
pushq %rsi
movq $\intno, %rsi /* INT Number is saved to 2nd parameter of function call */
jmp irq_common /* Go to the common IRQ handler code. */
/* Save rdi, rsi */
pushq %rdi
pushq %rsi
movq $\intno, %rsi /* INT Number is saved to 2nd parameter of function call */
jmp irq_common /* Go to the common IRQ handler code. */
.endm
/****************************************************************************
* IDT Vectors
****************************************************************************/
/* The following will be the vector addresses programmed into the IDT */
/* The following will be the vector addresses programmed into the IDT */
ISR_NOERRCODE ISR0
ISR_NOERRCODE ISR0
.balign 16
ISR_NOERRCODE ISR1
ISR_NOERRCODE ISR1
.balign 16
ISR_NOERRCODE ISR2
ISR_NOERRCODE ISR2
.balign 16
ISR_NOERRCODE ISR3
ISR_NOERRCODE ISR3
.balign 16
ISR_NOERRCODE ISR4
ISR_NOERRCODE ISR4
.balign 16
ISR_NOERRCODE ISR5
ISR_NOERRCODE ISR5
.balign 16
ISR_NOERRCODE ISR6
ISR_NOERRCODE ISR6
.balign 16
ISR_NOERRCODE ISR7
ISR_NOERRCODE ISR7
.balign 16
ISR_ERRCODE ISR8
ISR_ERRCODE ISR8
.balign 16
ISR_NOERRCODE ISR9
ISR_NOERRCODE ISR9
.balign 16
ISR_ERRCODE ISR10
ISR_ERRCODE ISR10
.balign 16
ISR_ERRCODE ISR11
ISR_ERRCODE ISR11
.balign 16
ISR_ERRCODE ISR12
ISR_ERRCODE ISR12
.balign 16
ISR_ERRCODE ISR13
ISR_ERRCODE ISR13
.balign 16
ISR_ERRCODE ISR14
ISR_ERRCODE ISR14
.balign 16
ISR_NOERRCODE ISR15
ISR_NOERRCODE ISR15
.balign 16
ISR_NOERRCODE ISR16
ISR_NOERRCODE ISR16
.balign 16
ISR_NOERRCODE ISR17
ISR_NOERRCODE ISR17
.balign 16
ISR_NOERRCODE ISR18
ISR_NOERRCODE ISR18
.balign 16
ISR_NOERRCODE ISR19
ISR_NOERRCODE ISR19
.balign 16
ISR_NOERRCODE ISR20
ISR_NOERRCODE ISR20
.balign 16
ISR_NOERRCODE ISR21
ISR_NOERRCODE ISR21
.balign 16
ISR_NOERRCODE ISR22
ISR_NOERRCODE ISR22
.balign 16
ISR_NOERRCODE ISR23
ISR_NOERRCODE ISR23
.balign 16
ISR_NOERRCODE ISR24
ISR_NOERRCODE ISR24
.balign 16
ISR_NOERRCODE ISR25
ISR_NOERRCODE ISR25
.balign 16
ISR_NOERRCODE ISR26
ISR_NOERRCODE ISR26
.balign 16
ISR_NOERRCODE ISR27
ISR_NOERRCODE ISR27
.balign 16
ISR_NOERRCODE ISR28
ISR_NOERRCODE ISR28
.balign 16
ISR_NOERRCODE ISR29
ISR_NOERRCODE ISR29
.balign 16
ISR_NOERRCODE ISR30
ISR_NOERRCODE ISR30
.balign 16
ISR_NOERRCODE ISR31
ISR_NOERRCODE ISR31
.balign 16
IRQ 0, IRQ0
IRQ 0, IRQ0
.balign 16
IRQ 1, IRQ1
IRQ 1, IRQ1
.balign 16
IRQ 2, IRQ2
IRQ 2, IRQ2
.balign 16
IRQ 3, IRQ3
IRQ 3, IRQ3
.balign 16
IRQ 4, IRQ4
IRQ 4, IRQ4
.balign 16
IRQ 5, IRQ5
IRQ 5, IRQ5
.balign 16
IRQ 6, IRQ6
IRQ 6, IRQ6
.balign 16
IRQ 7, IRQ7
IRQ 7, IRQ7
.balign 16
IRQ 8, IRQ8
IRQ 8, IRQ8
.balign 16
IRQ 9, IRQ9
IRQ 9, IRQ9
.balign 16
IRQ 10, IRQ10
IRQ 10, IRQ10
.balign 16
IRQ 11, IRQ11
IRQ 11, IRQ11
.balign 16
IRQ 12, IRQ12
IRQ 12, IRQ12
.balign 16
IRQ 13, IRQ13
IRQ 13, IRQ13
.balign 16
IRQ 14, IRQ14
IRQ 14, IRQ14
.balign 16
IRQ 15, IRQ15
IRQ 15, IRQ15
.balign 16
/****************************************************************************
@ -232,51 +220,50 @@ vector_irq\irqno:
****************************************************************************/
isr_common:
/* trace 'S' */
/* Already swap to the interrupt stack */
/* stack is automatically recovered by iretq using task state */
/* Already swap to the interrupt stack */
/* stack is automatically recovered by iretq using task state */
/* x86_64 don't have pusha, we have to do things manually */
/* RDI and RSI are pushed above for handling IRQ no */
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
/* x86_64 don't have pusha, we have to do things manually */
/* RDI and RSI are pushed above for handling IRQ no */
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %r11
pushq %r10
pushq %rbp
pushq %rbx
pushq %rax
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %r11
pushq %r10
pushq %rbp
pushq %rbx
pushq %rax
mov %ds, %ax /* Lower 16-bits of rax. */
pushq %rax /* Save the data segment descriptor */
mov %es, %ax /* Lower 16-bits of rax. */
pushq %rax /* Save the data segment descriptor */
mov %gs, %ax /* Lower 16-bits of rax. */
pushq %rax /* Save the data segment descriptor */
mov %fs, %ax /* Lower 16-bits of rax. */
pushq %rax /* Save the data segment descriptor */
mov %ds, %ax /* Lower 16-bits of rax. */
pushq %rax /* Save the data segment descriptor */
mov %es, %ax /* Lower 16-bits of rax. */
pushq %rax /* Save the data segment descriptor */
mov %gs, %ax /* Lower 16-bits of rax. */
pushq %rax /* Save the data segment descriptor */
mov %fs, %ax /* Lower 16-bits of rax. */
pushq %rax /* Save the data segment descriptor */
/* align to 16-bytes boundary */
leaq -8(%rsp), %rsp
/* align to 16-bytes boundary */
leaq -8(%rsp), %rsp
/* save xmm registers */
leaq -512(%rsp), %rsp
fxsaveq (%rsp)
/* save xmm registers */
leaq -512(%rsp), %rsp
fxsaveq (%rsp)
/* The current value of the SP points to the beginning of the state save
* structure. Save that in RDI as the input parameter to isr_handler.
*/
mov %rsp, %rdi
call isr_handler
jmp .Lreturn
.size isr_common, . - isr_common
mov %rsp, %rdi
call isr_handler
jmp .Lreturn
.size isr_common, . - isr_common
/****************************************************************************
* Name: irq_common
@ -288,51 +275,50 @@ isr_common:
*
****************************************************************************/
.type irq_common, @function
.type irq_common, @function
irq_common:
/* trace 'R' */
/* Already swap to the interrupt stack */
/* stack is automatically recovered by iretq using task state */
/* Already swap to the interrupt stack */
/* stack is automatically recovered by iretq using task state */
/* x86_64 don't have pusha, we have to do things manually */
/* RDI and RSI are pushed above for handling IRQ no */
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
/* x86_64 don't have pusha, we have to do things manually */
/* RDI and RSI are pushed above for handling IRQ no */
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %r11
pushq %r10
pushq %rbp
pushq %rbx
pushq %rax
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %r11
pushq %r10
pushq %rbp
pushq %rbx
pushq %rax
mov %ds, %ax /* Lower 16-bits of rax. */
pushq %rax /* Save the data segment descriptor */
mov %es, %ax /* Lower 16-bits of rax. */
pushq %rax /* Save the data segment descriptor */
mov %gs, %ax /* Lower 16-bits of rax. */
pushq %rax /* Save the data segment descriptor */
mov %fs, %ax /* Lower 16-bits of rax. */
pushq %rax /* Save the data segment descriptor */
mov %ds, %ax /* Lower 16-bits of rax. */
pushq %rax /* Save the data segment descriptor */
mov %es, %ax /* Lower 16-bits of rax. */
pushq %rax /* Save the data segment descriptor */
mov %gs, %ax /* Lower 16-bits of rax. */
pushq %rax /* Save the data segment descriptor */
mov %fs, %ax /* Lower 16-bits of rax. */
pushq %rax /* Save the data segment descriptor */
/* align to 16-bytes boundary */
leaq -8(%rsp), %rsp
/* align to 16-bytes boundary */
leaq -8(%rsp), %rsp
/* save xmm registers */
leaq -512(%rsp), %rsp
fxsaveq (%rsp)
/* save xmm registers */
leaq -512(%rsp), %rsp
fxsaveq (%rsp)
/* The current value of the SP points to the beginning of the state save
* structure. Save that in RDI as the input parameter to irq_handler.
*/
mov %rsp, %rdi
call irq_handler
mov %rsp, %rdi
call irq_handler
/* The common return point for both isr_handler and irq_handler */
@ -341,55 +327,55 @@ irq_common:
* return. Are we switching to a new context?
*/
cmp %rax, %rsp
je .Lnoswitch
cmp %rax, %rsp
je .Lnoswitch
/* A context swith will be performed. EAX holds the address of the new
* register save structure.
* register save structure.
*
* Jump to x86_64_fullcontextrestore(). We perform a call here, but that function
* never returns. The address of the new register save block is the argument
* to the x86_64_fullcontextrestore().
*/
*/
mov %rax, %rdi
call x86_64_fullcontextrestore
call x86_64_fullcontextrestore
.Lnoswitch:
fxrstorq (%rsp)
leaq 512(%rsp), %rsp
leaq 8(%rsp), %rsp
fxrstorq (%rsp)
leaq 512(%rsp), %rsp
leaq 8(%rsp), %rsp
popq %rax
mov %fs, %ax
popq %rax
mov %gs, %ax
popq %rax
mov %es, %ax
popq %rax
mov %ds, %ax
popq %rax
mov %fs, %ax
popq %rax
mov %gs, %ax
popq %rax
mov %es, %ax
popq %rax
mov %ds, %ax
popq %rax
popq %rbx
popq %rbp
popq %r10
popq %r11
popq %r12
popq %r13
popq %r14
popq %r15
popq %rax
popq %rbx
popq %rbp
popq %r10
popq %r11
popq %r12
popq %r13
popq %r14
popq %r15
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
popq %rsi
popq %rdi
add $8, %rsp /* Cleans up the pushed error code */
add $8, %rsp /* Cleans up the pushed error code */
iretq /* Pops 5 things at once: CS, RIP, RFLAGS and SS and RSP */
iretq /* Pops 5 things at once: CS, RIP, RFLAGS and SS and RSP */
.size irq_common, . - irq_common
.end