arch/armv7-a: unify switch context from software interrupt

Signed-off-by: chao.an <anchao@xiaomi.com>
This commit is contained in:
chao.an 2022-02-16 13:01:22 +08:00 committed by Masayuki Ishikawa
parent 29c55cdda6
commit 61cd9dfca1
7 changed files with 134 additions and 186 deletions

View File

@ -125,18 +125,11 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
arm_restorestate(rtcb->xcp.regs);
}
/* Copy the user C context into the TCB at the (old) head of the
* ready-to-run Task list. if arm_saveusercontext returns a non-zero
* value, then this is really the previously running task restarting!
*/
/* No, then we will need to perform the user context switch */
else if (!arm_saveusercontext(rtcb->xcp.regs))
else
{
/* Restore the exception context of the rtcb at the (new) head
* of the ready-to-run task list.
*/
rtcb = this_task();
struct tcb_s *nexttcb = this_task();
#ifdef CONFIG_ARCH_ADDRENV
/* Make sure that the address environment for the previously
@ -145,15 +138,23 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
* thread at the head of the ready-to-run list.
*/
group_addrenv(rtcb);
group_addrenv(nexttcb);
#endif
/* Reset scheduler parameters */
nxsched_resume_scheduler(rtcb);
nxsched_resume_scheduler(nexttcb);
/* Then switch contexts */
/* Switch context to the context of the task at the head of the
* ready to run list.
*/
arm_fullcontextrestore(rtcb->xcp.regs);
arm_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
/* arm_switchcontext forces a context switch to the task at the
* head of the ready-to-run list. It does not 'return' in the
* normal sense. When it does return, it is because the blocked
* task is again ready to run and has execution priority.
*/
}
}
}

View File

@ -63,44 +63,6 @@
.type arm_fullcontextrestore, function
arm_fullcontextrestore:
/* On entry, a1 (r0) holds address of the register save area. All other
* registers are available for use.
*/
#ifdef CONFIG_ARCH_FPU
/* First, restore the floating point registers. Lets do this before we
* restore the ARM registers so that we have plenty of registers to
* work with.
*/
add r1, r0, #(4*REG_S0) /* r1=Address of FP register storage */
/* Load all floating point registers. Registers are loaded in numeric order,
* s0, s1, ... in increasing address order.
*/
#ifdef CONFIG_ARM_HAVE_FPU_D32
vldmia.64 r1!, {d0-d15} /* Restore the full FP context */
vldmia.64 r1!, {d16-d31}
#else
vldmia r1!, {s0-s31} /* Restore the full FP context */
#endif
/* Load the floating point control and status register. At the end of the
* vstmia, r1 will point to the FPSCR storage location.
*/
ldr r2, [r1], #4 /* Fetch the floating point control and status register */
vmsr fpscr, r2 /* Restore the FPSCR */
#endif
#ifdef CONFIG_BUILD_KERNEL
/* For the kernel build, we need to be able to transition gracefully
* between kernel- and user-mode tasks. Here we do that with a system
* call; the system call will execute in kernel mode and but can return
* to either user or kernel mode.
*/
/* Perform the System call with R0=1 and R1=regs */
mov r1, r0 /* R1: regs */
@ -110,46 +72,5 @@ arm_fullcontextrestore:
/* This call should not return */
bx lr /* Unnecessary ... will not return */
#else
/* For a flat build, we can do all of this here... Just think of this as
* a longjmp() all on steroids.
*/
/* Recover all registers except for r0, r1, r2, R15, and CPSR */
add r1, r0, #(4*REG_R3) /* Offset to REG_R3 storage */
ldmia r1, {r3-r14} /* Recover registers */
ldr r2, [r0, #(4*REG_CPSR)] /* Fetch the stored CPSR value */
/* Create a stack frame to hold the some registers */
sub sp, sp, #(4*4) /* Frame for four registers */
ldr r1, [r0, #(4*REG_R0)] /* Fetch the stored r0 value */
str r1, [sp] /* Save it at the top of the stack */
ldr r1, [r0, #(4*REG_R1)] /* Fetch the stored r1 value */
str r1, [sp, #4] /* Save it in the stack */
ldr r1, [r0, #(4*REG_R2)] /* Fetch the stored r2 value */
str r1, [sp, #8] /* Save it in the stack */
ldr r1, [r0, #(4*REG_PC)] /* Fetch the stored pc value */
tst r2, #PSR_T_BIT
orrne r1, r1, #1
str r1, [sp, #12] /* Save it at the bottom of the frame */
/* Now we can restore the CPSR. We wait until we are completely
* finished with the context save data to do this. Restore the CPSR
* may re-enable and interrupts and we could be in a context
* where the save structure is only protected by interrupts being
* disabled.
*/
msr spsr_cxsf, r2 /* Set the SPSR */
/* Now recover r0-r2, pc and cpsr, destroying the stack frame */
ldmia sp!, {r0-r2, pc}^
#endif
.size arm_fullcontextrestore, .-arm_fullcontextrestore
.end

View File

@ -92,19 +92,11 @@ void up_release_pending(void)
arm_restorestate(rtcb->xcp.regs);
}
/* Copy the exception context into the TCB of the task that
* was currently active. if arm_saveusercontext returns a non-zero
* value, then this is really the previously running task
* restarting!
*/
/* No, then we will need to perform the user context switch */
else if (!arm_saveusercontext(rtcb->xcp.regs))
else
{
/* Restore the exception context of the rtcb at the (new) head
* of the ready-to-run task list.
*/
rtcb = this_task();
struct tcb_s *nexttcb = this_task();
#ifdef CONFIG_ARCH_ADDRENV
/* Make sure that the address environment for the previously
@ -113,15 +105,23 @@ void up_release_pending(void)
* thread at the head of the ready-to-run list.
*/
group_addrenv(rtcb);
group_addrenv(nexttcb);
#endif
/* Update scheduler parameters */
nxsched_resume_scheduler(rtcb);
nxsched_resume_scheduler(nexttcb);
/* Then switch contexts */
/* Switch context to the context of the task at the head of the
* ready to run list.
*/
arm_fullcontextrestore(rtcb->xcp.regs);
arm_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
/* arm_switchcontext forces a context switch to the task at the
* head of the ready-to-run list. It does not 'return' in the
* normal sense. When it does return, it is because the blocked
* task is again ready to run and has execution priority.
*/
}
}
}

View File

@ -147,19 +147,11 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
arm_restorestate(rtcb->xcp.regs);
}
/* Copy the exception context into the TCB at the (old) head of
* the ready-to-run Task list. if arm_saveusercontext returns a
* non-zero value, then this is really the previously running task
* restarting!
*/
/* No, then we will need to perform the user context switch */
else if (!arm_saveusercontext(rtcb->xcp.regs))
else
{
/* Restore the exception context of the rtcb at the (new) head
* of the ready-to-run task list.
*/
rtcb = this_task();
struct tcb_s *nexttcb = this_task();
#ifdef CONFIG_ARCH_ADDRENV
/* Make sure that the address environment for the previously
@ -168,15 +160,24 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
* thread at the head of the ready-to-run list.
*/
group_addrenv(rtcb);
group_addrenv(nexttcb);
#endif
/* Update scheduler parameters */
nxsched_resume_scheduler(rtcb);
nxsched_resume_scheduler(nexttcb);
/* Then switch contexts */
/* Switch context to the context of the task at the head of the
* ready to run list.
*/
arm_fullcontextrestore(rtcb->xcp.regs);
arm_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
/* arm_switchcontext forces a context switch to the task at the
* head of the ready-to-run list. It does not 'return' in the
* normal sense. When it does return, it is because the
* blocked task is again ready to run and has execution
* priority.
*/
}
}
}

View File

@ -123,7 +123,6 @@ static void dispatch_syscall(void)
*
****************************************************************************/
#ifdef CONFIG_LIB_SYSCALL
uint32_t *arm_syscall(uint32_t *regs)
{
uint32_t cmd;
@ -170,6 +169,7 @@ uint32_t *arm_syscall(uint32_t *regs)
* unprivileged thread mode.
*/
#ifdef CONFIG_LIB_SYSCALL
case SYS_syscall_return:
{
FAR struct tcb_s *rtcb = nxsched_self();
@ -218,6 +218,7 @@ uint32_t *arm_syscall(uint32_t *regs)
(void)nxsig_unmask_pendingsignal();
}
break;
#endif
/* R0=SYS_restore_context: Restore task context
*
@ -230,7 +231,6 @@ uint32_t *arm_syscall(uint32_t *regs)
* R1 = restoreregs
*/
#ifdef CONFIG_BUILD_KERNEL
case SYS_restore_context:
{
/* Replace 'regs' with the pointer to the register set in
@ -238,11 +238,40 @@ uint32_t *arm_syscall(uint32_t *regs)
* set will determine the restored context.
*/
arm_restorefpu((uint32_t *)regs[REG_R1]);
regs = (uint32_t *)regs[REG_R1];
DEBUGASSERT(regs);
}
break;
/* R0=SYS_switch_context: This a switch context command:
*
* void arm_switchcontext(uint32_t *saveregs, uint32_t *restoreregs);
*
* At this point, the following values are saved in context:
*
* R0 = SYS_switch_context
* R1 = saveregs
* R2 = restoreregs
*
* In this case, we do both: We save the context registers to the save
* register area reference by the saved contents of R1 and then set
* regs to the save register area referenced by the saved
* contents of R2.
*/
case SYS_switch_context:
{
DEBUGASSERT(regs[REG_R1] != 0 && regs[REG_R2] != 0);
#if defined(CONFIG_ARCH_FPU)
arm_copyarmstate((uint32_t *)regs[REG_R1], regs);
arm_restorefpu((uint32_t *)regs[REG_R2]);
#else
memcpy((uint32_t *)regs[REG_R1], regs, XCPTCONTEXT_SIZE);
#endif
regs = (uint32_t *)regs[REG_R2];
}
break;
/* R0=SYS_task_start: This a user task start
*
@ -457,9 +486,6 @@ uint32_t *arm_syscall(uint32_t *regs)
/* Indicate that we are in a syscall handler. */
rtcb->flags |= TCB_FLAG_SYSCALL;
#else
svcerr("ERROR: Bad SYS call: %d\n", regs[REG_R0]);
#endif
#ifdef CONFIG_ARCH_KERNEL_STACK
/* If this is the first SYSCALL and if there is an allocated
@ -477,6 +503,9 @@ uint32_t *arm_syscall(uint32_t *regs)
/* Save the new SYSCALL nesting level */
rtcb->xcp.nsyscalls = index + 1;
#else
svcerr("ERROR: Bad SYS call: 0x%" PRIx32 "\n", regs[REG_R0]);
#endif
}
break;
}
@ -501,14 +530,3 @@ uint32_t *arm_syscall(uint32_t *regs)
return regs;
}
#else
uint32_t *arm_syscall(uint32_t *regs)
{
_alert("SYSCALL from 0x%x\n", regs[REG_PC]);
CURRENT_REGS = regs;
PANIC();
}
#endif

View File

@ -109,20 +109,11 @@ void up_unblock_task(struct tcb_s *tcb)
arm_restorestate(rtcb->xcp.regs);
}
/* We are not in an interrupt handler. Copy the user C context
* into the TCB of the task that was previously active. if
* arm_saveusercontext returns a non-zero value, then this is really
* the previously running task restarting!
*/
/* No, then we will need to perform the user context switch */
else if (!arm_saveusercontext(rtcb->xcp.regs))
else
{
/* Restore the exception context of the new task that is ready to
* run (probably tcb). This is the new rtcb at the head of the
* ready-to-run task list.
*/
rtcb = this_task();
struct tcb_s *nexttcb = this_task();
#ifdef CONFIG_ARCH_ADDRENV
/* Make sure that the address environment for the previously
@ -131,15 +122,23 @@ void up_unblock_task(struct tcb_s *tcb)
* thread at the head of the ready-to-run list.
*/
group_addrenv(rtcb);
group_addrenv(nexttcb);
#endif
/* Update scheduler parameters */
nxsched_resume_scheduler(rtcb);
nxsched_resume_scheduler(nexttcb);
/* Then switch contexts */
/* Switch context to the context of the task at the head of the
* ready to run list.
*/
arm_fullcontextrestore(rtcb->xcp.regs);
arm_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
/* arm_switchcontext forces a context switch to the task at the
* head of the ready-to-run list. It does not 'return' in the
* normal sense. When it does return, it is because the blocked
* task is again ready to run and has execution priority.
*/
}
}
}

View File

@ -29,8 +29,6 @@
#include <syscall.h>
#ifdef CONFIG_LIB_SYSCALL
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
@ -42,78 +40,88 @@
* then four more syscall values must be reserved.
*/
#ifdef CONFIG_BUILD_KERNEL
# ifndef CONFIG_SYS_RESERVED
# error "CONFIG_SYS_RESERVED must be defined to have the value 6"
# elif CONFIG_SYS_RESERVED != 6
# error "CONFIG_SYS_RESERVED must have the value 6"
# endif
#else
# ifndef CONFIG_SYS_RESERVED
# error "CONFIG_SYS_RESERVED must be defined to have the value 1"
# elif CONFIG_SYS_RESERVED != 1
# error "CONFIG_SYS_RESERVED must have the value 1"
#ifdef CONFIG_LIB_SYSCALL
# ifdef CONFIG_BUILD_KERNEL
# ifndef CONFIG_SYS_RESERVED
# error "CONFIG_SYS_RESERVED must be defined to have the value 7"
# elif CONFIG_SYS_RESERVED != 7
# error "CONFIG_SYS_RESERVED must have the value 7"
# endif
# else
# ifndef CONFIG_SYS_RESERVED
# error "CONFIG_SYS_RESERVED must be defined to have the value 4"
# elif CONFIG_SYS_RESERVED != 4
# error "CONFIG_SYS_RESERVED must have the value 4"
# endif
# endif
#endif
/* Cortex-A system calls ****************************************************/
/* SYS call 0:
*
* void arm_syscall_return(void);
*/
#define SYS_syscall_return (0)
#ifndef CONFIG_BUILD_FLAT
#ifdef CONFIG_BUILD_KERNEL
/* SYS call 1:
*
* void arm_fullcontextrestore(uint32_t *restoreregs) noreturn_function;
*/
#define SYS_restore_context (1)
#define SYS_restore_context (0)
/* SYS call 1:
*
* void arm_switchcontext(uint32_t *saveregs, uint32_t *restoreregs);
*/
#define SYS_switch_context (1)
#ifdef CONFIG_LIB_SYSCALL
/* SYS call 2:
*
* void arm_syscall_return(void);
*/
#define SYS_syscall_return (2)
#ifndef CONFIG_BUILD_FLAT
#ifdef CONFIG_BUILD_KERNEL
/* SYS call 3:
*
* void up_task_start(main_t taskentry, int argc, FAR char *argv[])
* noreturn_function;
*/
#define SYS_task_start (2)
#define SYS_task_start (3)
/* SYS call 4:
/* SYS call 5:
*
* void signal_handler(_sa_sigaction_t sighand,
* int signo, FAR siginfo_t *info,
* FAR void *ucontext);
*/
#define SYS_signal_handler (4)
#define SYS_signal_handler (5)
/* SYS call 5:
/* SYS call 6:
*
* void signal_handler_return(void);
*/
#define SYS_signal_handler_return (5)
#define SYS_signal_handler_return (6)
#endif /* !CONFIG_BUILD_FLAT */
/* SYS call 3:
/* SYS call 4:
*
* void up_pthread_start(pthread_startroutine_t startup,
* pthread_startroutine_t entrypt, pthread_addr_t arg)
* noreturn_function
*/
#define SYS_pthread_start (3)
#define SYS_pthread_start (4)
#endif /* CONFIG_BUILD_KERNEL */
#endif /* CONFIG_LIB_SYSCALL */
/****************************************************************************
* Inline Functions
****************************************************************************/
#endif /* CONFIG_LIB_SYSCALL */
#endif /* __ARCH_ARM_SRC_ARMV7_A_SVCALL_H */