arm64_task/pthread_start: Convert the C / inline ASM code to assembly
The aforementioned functions can/will fail if the C compiler decides to use the stack for the incoming entrypt/etc. parameters. Fix this issue by converting the jump to user part into pure assembly, ensuring the stack is NOT used for the parameters.
This commit is contained in:
parent
09ab135d2f
commit
10b40abecc
@ -270,6 +270,8 @@ EXTERN uint8_t g_idle_topstack[]; /* End+1 of heap */
|
||||
****************************************************************************/
|
||||
|
||||
void arm64_new_task(struct tcb_s *tak_new);
|
||||
void arm64_jump_to_user(uint64_t entry, uint64_t x0, uint64_t x1,
|
||||
uint64_t *regs) noreturn_function;
|
||||
|
||||
/* Low level initialization provided by chip logic */
|
||||
|
||||
|
@ -68,12 +68,6 @@
|
||||
void up_pthread_start(pthread_trampoline_t startup,
|
||||
pthread_startroutine_t entrypt, pthread_addr_t arg)
|
||||
{
|
||||
uint64_t *regs = this_task()->xcp.initregs;
|
||||
|
||||
/* This must be performed atomically, the C-section ends upon user entry */
|
||||
|
||||
enter_critical_section();
|
||||
|
||||
/* Set up to enter the user-space pthread start-up function in
|
||||
* unprivileged mode. We need:
|
||||
*
|
||||
@ -83,24 +77,8 @@ void up_pthread_start(pthread_trampoline_t startup,
|
||||
* SPSR = user mode
|
||||
*/
|
||||
|
||||
regs[REG_ELR] = (uint64_t)startup;
|
||||
regs[REG_X0] = (uint64_t)entrypt;
|
||||
regs[REG_X1] = (uint64_t)arg;
|
||||
regs[REG_SPSR] = (regs[REG_SPSR] & ~SPSR_MODE_MASK) | SPSR_MODE_EL0T;
|
||||
|
||||
/* Fully unwind the kernel stack and drop to user space */
|
||||
|
||||
asm
|
||||
(
|
||||
"mov x0, %0\n" /* Get context registers */
|
||||
"mov sp, x0\n" /* Stack pointer = context */
|
||||
"b arm64_exit_exception\n"
|
||||
:
|
||||
: "r" (regs)
|
||||
: "x0", "memory"
|
||||
);
|
||||
|
||||
PANIC();
|
||||
arm64_jump_to_user((uint64_t)startup, (uint64_t)entrypt, (uint64_t)arg,
|
||||
this_task()->xcp.initregs);
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_BUILD_FLAT && __KERNEL__ && !CONFIG_DISABLE_PTHREAD */
|
||||
|
@ -65,12 +65,6 @@
|
||||
|
||||
void up_task_start(main_t taskentry, int argc, char *argv[])
|
||||
{
|
||||
uint64_t *regs = this_task()->xcp.initregs;
|
||||
|
||||
/* This must be performed atomically, the C-section ends upon user entry */
|
||||
|
||||
enter_critical_section();
|
||||
|
||||
/* Set up to return to the user-space _start function in
|
||||
* unprivileged mode. We need:
|
||||
*
|
||||
@ -80,24 +74,8 @@ void up_task_start(main_t taskentry, int argc, char *argv[])
|
||||
* SPSR = user mode
|
||||
*/
|
||||
|
||||
regs[REG_ELR] = (uint64_t)taskentry;
|
||||
regs[REG_X0] = (uint64_t)argc;
|
||||
regs[REG_X1] = (uint64_t)argv;
|
||||
regs[REG_SPSR] = (regs[REG_SPSR] & ~SPSR_MODE_MASK) | SPSR_MODE_EL0T;
|
||||
|
||||
/* Fully unwind the kernel stack and drop to user space */
|
||||
|
||||
asm
|
||||
(
|
||||
"mov x0, %0\n" /* Get context registers */
|
||||
"mov sp, x0\n" /* Stack pointer = context */
|
||||
"b arm64_exit_exception\n"
|
||||
:
|
||||
: "r" (regs)
|
||||
: "x0", "memory"
|
||||
);
|
||||
|
||||
PANIC();
|
||||
arm64_jump_to_user((uint64_t)taskentry, (uint64_t)argc, (uint64_t)argv,
|
||||
this_task()->xcp.initregs);
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_BUILD_FLAT */
|
||||
|
@ -156,6 +156,36 @@ restore_new:
|
||||
|
||||
ret
|
||||
|
||||
/****************************************************************************
|
||||
* Function: arm64_jump_to_user
|
||||
*
|
||||
* Description:
|
||||
* Routine to jump to user space, called when a user process is started and
|
||||
* the kernel is ready to give control to the user task in user space.
|
||||
*
|
||||
* arm64_jump_to_user(entry, x0, x1, regs)
|
||||
* entry: process entry point
|
||||
* x0: parameter 0 for process
|
||||
* x1: parameter 1 for process
|
||||
* regs: integer register save area to use
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifndef CONFIG_BUILD_FLAT
|
||||
GTEXT(arm64_jump_to_user)
|
||||
SECTION_FUNC(text, arm64_jump_to_user)
|
||||
msr daifset, #IRQ_DAIF_MASK
|
||||
mov sp, x3
|
||||
str x0, [sp, #8 * REG_ELR]
|
||||
str x1, [sp, #8 * REG_X0]
|
||||
str x2, [sp, #8 * REG_X1]
|
||||
mrs x0, spsr_el1
|
||||
and x0, x0, #~SPSR_MODE_MASK
|
||||
#orr x0, x0, #SPSR_MODE_EL0T # EL0T=0x00, out of range for orr
|
||||
str x0, [sp, #8 * REG_SPSR]
|
||||
b arm64_exit_exception
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Function: arm64_sync_exc
|
||||
*
|
||||
|
Loading…
Reference in New Issue
Block a user