arm64: simply the vectors

Signed-off-by: ligd <liguiding1@xiaomi.com>
This commit is contained in:
ligd 2024-10-01 12:00:32 +03:00 committed by Xiang Xiao
parent 7cbcf82bed
commit 669cd3aa2c
3 changed files with 122 additions and 351 deletions

View File

@ -305,8 +305,7 @@ void arm64_pginitialize(void);
# define arm64_pginitialize()
#endif /* CONFIG_LEGACY_PAGING */
uint64_t * arm64_syscall_switch(uint64_t *regs);
int arm64_syscall(uint64_t *regs);
uint64_t *arm64_syscall(uint64_t *regs);
/* Low level serial output **************************************************/

View File

@ -53,18 +53,18 @@ typedef uintptr_t (*syscall_t)(unsigned int, ...);
****************************************************************************/
static void arm64_dump_syscall(const char *tag, uint64_t cmd,
const struct regs_context * f_regs)
const uint64_t *regs)
{
svcinfo("SYSCALL %s: regs: %p cmd: %" PRId64 "\n", tag, f_regs, cmd);
svcinfo("SYSCALL %s: regs: %p cmd: %" PRId64 "\n", tag, regs, cmd);
svcinfo("x0: 0x%-16lx x1: 0x%lx\n",
f_regs->regs[REG_X0], f_regs->regs[REG_X1]);
regs[REG_X0], regs[REG_X1]);
svcinfo("x2: 0x%-16lx x3: 0x%lx\n",
f_regs->regs[REG_X2], f_regs->regs[REG_X3]);
regs[REG_X2], regs[REG_X3]);
svcinfo("x4: 0x%-16lx x5: 0x%lx\n",
f_regs->regs[REG_X4], f_regs->regs[REG_X5]);
regs[REG_X4], regs[REG_X5]);
svcinfo("x6: 0x%-16lx x7: 0x%lx\n",
f_regs->regs[REG_X6], f_regs->regs[REG_X7]);
regs[REG_X6], regs[REG_X7]);
}
#ifdef CONFIG_LIB_SYSCALL
@ -145,32 +145,32 @@ uintptr_t dispatch_syscall(unsigned int nbr, uintptr_t parm1,
#endif
/****************************************************************************
* Name: arm64_syscall_switch
* Name: arm64_syscall
*
* Description:
* task switch syscall
*
****************************************************************************/
uint64_t *arm64_syscall_switch(uint64_t * regs)
uint64_t *arm64_syscall(uint64_t *regs)
{
uint64_t *ret_regs = regs;
uint64_t cmd;
struct regs_context *f_regs;
uint64_t *ret_regs;
struct tcb_s *tcb;
int cpu;
#ifdef CONFIG_BUILD_KERNEL
uint64_t spsr;
#endif
/* Nested interrupts are not supported */
DEBUGASSERT(regs);
f_regs = (struct regs_context *)regs;
/* The SYSCALL command is in x0 on entry. Parameters follow in x1..x7 */
cmd = f_regs->regs[REG_X0];
cmd = regs[REG_X0];
arm64_dump_syscall(__func__, cmd, f_regs);
arm64_dump_syscall(__func__, cmd, regs);
switch (cmd)
{
@ -192,8 +192,8 @@ uint64_t *arm64_syscall_switch(uint64_t * regs)
* set will determine the restored context.
*/
ret_regs = (uint64_t *)f_regs->regs[REG_X1];
f_regs->regs[REG_X1] = 0; /* set the saveregs = 0 */
ret_regs = (uint64_t *)regs[REG_X1];
regs[REG_X1] = 0; /* set the saveregs = 0 */
DEBUGASSERT(ret_regs);
}
@ -217,91 +217,13 @@ uint64_t *arm64_syscall_switch(uint64_t * regs)
case SYS_switch_context:
{
DEBUGASSERT(f_regs->regs[REG_X1] != 0 &&
f_regs->regs[REG_X2] != 0);
*(uint64_t **)f_regs->regs[REG_X1] = regs;
DEBUGASSERT(regs[REG_X1] != 0 && regs[REG_X2] != 0);
*(uint64_t **)regs[REG_X1] = regs;
ret_regs = (uint64_t *) f_regs->regs[REG_X2];
ret_regs = (uint64_t *)regs[REG_X2];
}
break;
default:
{
svcerr("ERROR: Bad SYS call: 0x%" PRIx64 "\n", cmd);
ret_regs = 0;
return 0;
}
break;
}
if ((uint64_t *)f_regs != ret_regs)
{
cpu = this_cpu();
tcb = current_task(cpu);
#ifdef CONFIG_ARCH_ADDRENV
/* Make sure that the address environment for the previously
* running task is closed down gracefully (data caches dump,
* MMU flushed) and set up the address environment for the new
* thread at the head of the ready-to-run list.
*/
addrenv_switch(NULL);
#endif
/* Update scheduler parameters */
nxsched_suspend_scheduler(g_running_tasks[cpu]);
nxsched_resume_scheduler(tcb);
/* Record the new "running" task. g_running_tasks[] is only used by
* assertion logic for reporting crashes.
*/
g_running_tasks[cpu] = tcb;
/* Restore the cpu lock */
restore_critical_section(tcb, cpu);
}
return ret_regs;
}
/****************************************************************************
* Name: arm64_syscall
*
* Description:
* SVC interrupts will vector here with insn=the SVC instruction and
* xcp=the interrupt context
*
* The handler may get the SVC number be de-referencing the return
* address saved in the xcp and decoding the SVC instruction
*
****************************************************************************/
int arm64_syscall(uint64_t *regs)
{
uint64_t cmd;
struct regs_context *f_regs;
#ifdef CONFIG_BUILD_KERNEL
uint64_t spsr;
#endif
/* Nested interrupts are not supported */
DEBUGASSERT(regs);
f_regs = (struct regs_context *)regs;
/* The SYSCALL command is in x0 on entry. Parameters follow in x1..x7 */
cmd = f_regs->regs[REG_X0];
arm64_dump_syscall(__func__, cmd, f_regs);
switch (cmd)
{
#ifdef CONFIG_BUILD_KERNEL
/* R0=SYS_signal_handler: This a user signal handler callback
*
@ -404,10 +326,44 @@ int arm64_syscall(uint64_t *regs)
#endif
default:
DEBUGPANIC();
break;
{
svcerr("ERROR: Bad SYS call: 0x%" PRIx64 "\n", cmd);
ret_regs = 0;
return 0;
}
break;
}
return 0;
if ((uint64_t *)regs != ret_regs)
{
cpu = this_cpu();
tcb = current_task(cpu);
#ifdef CONFIG_ARCH_ADDRENV
/* Make sure that the address environment for the previously
* running task is closed down gracefully (data caches dump,
* MMU flushed) and set up the address environment for the new
* thread at the head of the ready-to-run list.
*/
addrenv_switch(NULL);
#endif
/* Update scheduler parameters */
nxsched_suspend_scheduler(g_running_tasks[cpu]);
nxsched_resume_scheduler(tcb);
/* Record the new "running" task. g_running_tasks[] is only used by
* assertion logic for reporting crashes.
*/
g_running_tasks[cpu] = tcb;
/* Restore the cpu lock */
restore_critical_section(tcb, cpu);
}
return ret_regs;
}

View File

@ -99,49 +99,6 @@ SECTION_FUNC(text, up_saveusercontext)
ret
/****************************************************************************
* Function: arm64_context_switch
*
* Description:
* Routine to handle context switch
*
* arm64_context_switch( x0, x1)
* x0: restore thread stack context
* x1: save thread stack context
* note:
* x1 = 0, only restore x0
*
****************************************************************************/
GTEXT(arm64_context_switch)
SECTION_FUNC(text, arm64_context_switch)
cmp x1, #0x0
beq restore_new
/* Save the current SP_ELx */
add x4, sp, #8 * XCPTCONTEXT_REGS
str x4, [x1, #8 * REG_SP_ELX]
/* Save the current task's SP_EL0 and exception depth */
mrs x4, sp_el0
mrs x5, tpidrro_el0
stp x4, x5, [x1, #8 * REG_SP_EL0]
restore_new:
/* Restore SP_EL0 and thread's exception dept */
ldp x4, x5, [x0, #8 * REG_SP_EL0]
msr tpidrro_el0, x5
msr sp_el0, x4
/* retrieve new thread's SP_ELx */
ldr x4, [x0, #8 * REG_SP_ELX]
sub sp, x4, #8 * XCPTCONTEXT_REGS
/* Return to arm64_sync_exc() or arm64_irq_handler() */
ret
/****************************************************************************
* Function: arm64_jump_to_user
*
@ -197,7 +154,7 @@ SECTION_FUNC(text, arm64_sync_exc)
/* if this is a svc call ?*/
bne exc_handle
bne arm64_fatal_handler
#ifdef CONFIG_LIB_SYSCALL
/* Handle user system calls separately */
@ -228,111 +185,23 @@ SECTION_FUNC(text, arm64_sync_exc)
reserved_syscall:
#endif
/* x0 = syscall_cmd
* if ( x0 <= SYS_switch_context ) {
* call context_switch
* it's a context switch syscall, so context need to be done
* }
* #define SYS_save_context (0)
* #define SYS_restore_context (1)
* #define SYS_switch_context (2)
*/
ldr x0, [sp, #8 * REG_X0]
cmp x0, #SYS_save_context
beq save_context
cmp x0, #SYS_switch_context
beq context_switch
cmp x0, #SYS_restore_context
beq context_switch
/* Normal syscall, thread context will not switch
*
* call the SVC handler with interrupts disabled.
* void arm64_syscall(uint64_t *regs)
* in:
* regs = pointer to struct reg_context allocating
* from stack, esf_reg has put on it
* regs[REG_X0]: syscall cmd
* regs[REG_X1] ~ regs[REG_X6]: syscall parameter
* out:
* x0: return by arm64_syscall
*/
mov x0, sp /* x0 = reg frame */
/* Call arm64_syscall() on the user stack */
bl arm64_syscall /* Call the handler */
/* Return from exception */
b arm64_exit_exception
context_switch:
/* Call arm64_syscall_switch() for context switch
*
* uint64_t * arm64_syscall_switch(uint64_t * regs)
* out:
* x0: return by arm64_syscall_switch, restore task context
* regs[REG_X1]: save task context, if x1 = 0, only restore x0
*/
mov x0, sp
bl arm64_syscall_switch
/* get save task reg context pointer */
ldr x1, [sp, #8 * REG_X1]
cmp x1, #0x0
beq do_switch
ldr x1, [x1]
do_switch:
/* Switch to IRQ stack and save current sp on it. */
#ifdef CONFIG_SMP
/* Notes:
* Complete any pending TLB or cache maintenance on this CPU in case
* the thread migrates to a different CPU.
* This full barrier is also required by the membarrier system
* call.
*/
dsb ish
get_cpu_id x0
ldr x1, =(g_cpu_int_stacktop)
lsl x0, x0, #3
ldr x1, [x1, x0]
#else
ldr x1, =(g_interrupt_stack + CONFIG_ARCH_INTERRUPTSTACK)
#endif
bl arm64_context_switch
b arm64_exit_exception
save_context:
arm64_exception_context_save x0 x1 sp
mov x0, sp
bl arm64_syscall_save_context
mov sp, x1
/* Save the return value into the ESF */
bl arm64_syscall /* Call the handler */
str x0, [sp, #8 * REG_X0]
/* Return from exception */
b arm64_exit_exception
exc_handle:
mov x0, sp
/* void arm64_fatal_handler(struct regs_context * reg);
* x0 = Exception stack frame
*/
bl arm64_fatal_handler
/* Return here only in case of recoverable error */
b arm64_exit_exception
mov sp, x0
b arm64_exit_exception
/****************************************************************************
* Name: arm64_irq_handler
@ -346,20 +215,16 @@ GTEXT(arm64_irq_handler)
SECTION_FUNC(text, arm64_irq_handler)
/* Switch to IRQ stack and save current sp on it. */
#ifdef CONFIG_SMP
get_cpu_id x1
ldr x0, =(g_cpu_int_stacktop)
lsl x1, x1, #3
ldr x0, [x0, x1]
get_cpu_id x0
ldr x1, =(g_cpu_int_stacktop)
lsl x0, x0, #3
ldr x1, [x1, x0]
#else
ldr x0, =(g_interrupt_stack + CONFIG_ARCH_INTERRUPTSTACK)
ldr x1, =(g_interrupt_stack + CONFIG_ARCH_INTERRUPTSTACK)
#endif
/* Save the task's stack and switch irq stack */
mov x1, sp
mov sp, x0
str x1, [sp, #-16]!
mov x0, x1 /* x0 = reg frame */
mov x0, sp
mov sp, x1
/* Call arm64_decodeirq() on the interrupt stack
* with interrupts disabled
@ -367,128 +232,79 @@ SECTION_FUNC(text, arm64_irq_handler)
bl arm64_decodeirq
/* Upon return from arm64_decodeirq, x0 holds the pointer to the
* call reg context area, which can be use to restore context.
* This may or may not be the same value that was passed to arm64_decodeirq:
* It will differ if a context switch is required.
*/
mov sp, x0
b arm64_exit_exception
ldr x1, [sp], #16
/* retrieve the task's stack. */
mov sp, x1
cmp x0, x1
beq irq_exit
irq_context_switch:
#ifdef CONFIG_SMP
/* Notes:
* Complete any pending TLB or cache maintenance on this CPU in case
* the thread migrates to a different CPU.
* This full barrier is also required by the membarrier system
* call.
*/
dsb ish
#endif
/* Switch thread
* - x0: restore task reg context, return by arm64_decodeirq,
* - x1: save task reg context, save before call arm64_decodeirq
* call arm64_context_switch(x0) to switch
*/
bl arm64_context_switch
irq_exit:
b arm64_exit_exception
/* TODO: if the arm64_fatal_handler return success, maybe need context switch */
/****************************************************************************
* Name: arm64_serror_handler
*
* Description:
* SError exception handler
*
****************************************************************************/
GTEXT(arm64_serror_handler)
SECTION_FUNC(text, arm64_serror_handler)
mov x0, sp
bl arm64_fatal_handler
mov x0, sp
bl arm64_fatal_handler
/* Return here only in case of recoverable error */
b arm64_exit_exception
b arm64_exit_exception
/****************************************************************************
* Name: arm64_mode32_handler
*
* Description:
* Mode32 exception handler
*
****************************************************************************/
GTEXT(arm64_mode32_handler)
SECTION_FUNC(text, arm64_mode32_handler)
mov x0, sp
bl arm64_fatal_handler
mov x0, sp
bl arm64_fatal_handler
/* Return here only in case of recoverable error */
b arm64_exit_exception
b arm64_exit_exception
/****************************************************************************
* Name: arm64_fiq_handler
*
* Description:
* Interrupt exception handler
*
****************************************************************************/
GTEXT(arm64_fiq_handler)
SECTION_FUNC(text, arm64_fiq_handler)
#ifndef CONFIG_ARM64_DECODEFIQ
mov x0, sp
bl arm64_fatal_handler
mov x0, sp
bl arm64_fatal_handler
/* Return here only in case of recoverable error */
b arm64_exit_exception
b arm64_exit_exception
#else
/* Switch to FIQ stack and save current sp on it. */
#ifdef CONFIG_SMP
get_cpu_id x1
ldr x0, =(g_cpu_int_fiq_stacktop)
lsl x1, x1, #3
ldr x0, [x0, x1]
get_cpu_id x0
ldr x1, =(g_cpu_int_fiq_stacktop)
lsl x0, x0, #3
ldr x1, [x1, x0]
#else
ldr x0, =(g_interrupt_fiq_stack + CONFIG_ARCH_INTERRUPTSTACK)
ldr x1, =(g_interrupt_fiq_stack + CONFIG_ARCH_INTERRUPTSTACK)
#endif
/* Save the task's stack and switch fiq stack */
mov x1, sp
mov sp, x0
str x1, [sp, #-16]!
mov x0, sp
mov sp, x1
mov x0, x1 /* x0 = reg frame */
/* Call arm64_decodefiq() on the interrupt stack
/* Call arm64_decodeirq() on the interrupt stack
* with interrupts disabled
*/
bl arm64_decodefiq
/* Upon return from arm64_decodefiq, x0 holds the pointer to the
* call reg context area, which can be use to restore context.
* This may or may not be the same value that was passed to arm64_decodefiq:
* It will differ if a context switch is required.
*/
ldr x1, [sp], #16
/* retrieve the task's stack. */
mov sp, x1
cmp x0, x1
beq fiq_exit
#ifdef CONFIG_SMP
/* Notes:
* Complete any pending TLB or cache maintenance on this CPU in case
* the thread migrates to a different CPU.
* This full barrier is also required by the membarrier system
* call.
*/
dsb ish
bl arm64_decodeirq
#endif
/* Switch thread
* - x0: restore task reg context, return by arm64_decodefiq,
* - x1: save task reg context, save before call arm64_decodefiq
* call arm64_context_switch(x0) to switch
*/
bl arm64_context_switch
fiq_exit:
b arm64_exit_exception
mov sp, x0
b arm64_exit_exception
#endif