xtensa: use swint to swith context

Reason for use sw-interrupt as syscall interrupt:
The xtensa `syscall` instruction can cause SYSCALL interrupt.
But SYSCALL interrupt is same interrupt level with level-one
interrupt.
Nuttx swint can enter `enter_critical_section` and gerenate
interrupt.

Signed-off-by: zhuyanlin <zhuyanlin1@xiaomi.com>
This commit is contained in:
zhuyanlin 2022-01-11 15:08:42 +08:00 committed by Xiang Xiao
parent f5d180bbdf
commit fbc1da98b7
12 changed files with 365 additions and 213 deletions

View File

@ -30,20 +30,30 @@
****************************************************************************/
#include <nuttx/config.h>
#include <arch/irq.h>
#ifndef __ASSEMBLY__
# include <debug.h>
# include <stdint.h>
#endif
#ifdef CONFIG_LIB_SYSCALL
# include <syscall.h>
#endif
#include "xtensa_swi.h"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* Configuration ************************************************************/
#define SYS_syscall 0x00
/* The SYS_signal_handler_return is executed here... its value is not always
* available in this context and so is assumed to be 7.
*/
#ifndef SYS_signal_handler_return
# define SYS_signal_handler_return (7)
#elif SYS_signal_handler_return != 7
# error "SYS_signal_handler_return was assumed to be 7"
#endif
/* This logic uses three system calls {0,1,2} for context switching and one
* for the syscall return. So a minimum of four syscall values must be
@ -92,6 +102,7 @@
#ifndef CONFIG_BUILD_FLAT
#ifdef CONFIG_LIB_SYSCALL
/* SYS call 3:
*
* void xtensa_syscall_return(void);
@ -100,6 +111,7 @@
#define SYS_syscall_return (3)
#ifdef CONFIG_BUILD_PROTECTED
/* SYS call 4:
*
* void up_task_start(main_t taskentry, int argc, char *argv[])
@ -107,6 +119,7 @@
*/
#define SYS_task_start (4)
/* SYS call 6:
*
* void signal_handler(_sa_sigaction_t sighand, int signo,
@ -144,10 +157,23 @@
#endif /* CONFIG_LIB_SYSCALL */
/****************************************************************************
* Public Function Prototypes
* Public Types
****************************************************************************/
/****************************************************************************
* Inline functions
****************************************************************************/
#ifndef __ASSEMBLY__
/****************************************************************************
* Public Data
****************************************************************************/
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
#ifdef __cplusplus
#define EXTERN extern "C"
extern "C"
@ -156,10 +182,219 @@ extern "C"
#define EXTERN extern
#endif
/****************************************************************************
* Name: sys_call0
*
* Description:
* System call SYS_ argument and no additional parameters.
*
****************************************************************************/
static inline uintptr_t sys_call0(unsigned int nbr)
{
register long reg0 __asm__("a2") = (long)(nbr);
__asm__ __volatile__
(
"movi a3, %1\n"
"wsr a3, intset\n"
"isync\n"
: "=r"(reg0)
: "i"(XCHAL_SWINT_CALL), "r"(reg0)
: "a3", "memory"
);
return reg0;
}
/****************************************************************************
* Name: sys_call1
*
* Description:
* System call SYS_ argument and one additional parameter.
*
****************************************************************************/
static inline uintptr_t sys_call1(unsigned int nbr, uintptr_t parm1)
{
register long reg0 __asm__("a2") = (long)(nbr);
register long reg1 __asm__("a3") = (long)(parm1);
__asm__ __volatile__
(
"movi a4, %1\n"
"wsr a4, intset\n"
"isync\n"
: "=r"(reg0)
: "i"(XCHAL_SWINT_CALL), "r"(reg0), "r"(reg1)
: "a4", "memory"
);
return reg0;
}
/****************************************************************************
* Name: sys_call2
*
* Description:
* System call SYS_ argument and two additional parameters.
*
****************************************************************************/
static inline uintptr_t sys_call2(unsigned int nbr, uintptr_t parm1,
uintptr_t parm2)
{
register long reg0 __asm__("a2") = (long)(nbr);
register long reg2 __asm__("a4") = (long)(parm2);
register long reg1 __asm__("a3") = (long)(parm1);
__asm__ __volatile__
(
"movi a5, %1\n"
"wsr a5, intset\n"
"isync\n"
: "=r"(reg0)
: "i"(XCHAL_SWINT_CALL), "r"(reg0), "r"(reg1), "r"(reg2)
: "a5", "memory"
);
return reg0;
}
/****************************************************************************
* Name: sys_call3
*
* Description:
* System call SYS_ argument and three additional parameters.
*
****************************************************************************/
static inline uintptr_t sys_call3(unsigned int nbr, uintptr_t parm1,
uintptr_t parm2, uintptr_t parm3)
{
register long reg0 __asm__("a2") = (long)(nbr);
register long reg3 __asm__("a5") = (long)(parm3);
register long reg2 __asm__("a4") = (long)(parm2);
register long reg1 __asm__("a3") = (long)(parm1);
__asm__ __volatile__
(
"movi a6, %1\n"
"wsr a6, intset\n"
"isync\n"
: "=r"(reg0)
: "i"(XCHAL_SWINT_CALL), "r"(reg0), "r"(reg1), "r"(reg2),
"r"(reg3)
: "a6", "memory"
);
return reg0;
}
/****************************************************************************
* Name: sys_call4
*
* Description:
* System call SYS_ argument and four additional parameters.
*
****************************************************************************/
static inline uintptr_t sys_call4(unsigned int nbr, uintptr_t parm1,
uintptr_t parm2, uintptr_t parm3,
uintptr_t parm4)
{
register long reg0 __asm__("a2") = (long)(nbr);
register long reg4 __asm__("a6") = (long)(parm4);
register long reg3 __asm__("a5") = (long)(parm3);
register long reg2 __asm__("a4") = (long)(parm2);
register long reg1 __asm__("a3") = (long)(parm1);
__asm__ __volatile__
(
"movi a7, %1\n"
"wsr a7, intset\n"
"isync\n"
: "=r"(reg0)
: "i"(XCHAL_SWINT_CALL), "r"(reg0), "r"(reg1), "r"(reg2),
"r"(reg3), "r"(reg4)
: "a7", "memory"
);
return reg0;
}
/****************************************************************************
* Name: sys_call5
*
* Description:
* System call SYS_ argument and five additional parameters.
*
****************************************************************************/
static inline uintptr_t sys_call5(unsigned int nbr, uintptr_t parm1,
uintptr_t parm2, uintptr_t parm3,
uintptr_t parm4, uintptr_t parm5)
{
register long reg0 __asm__("a2") = (long)(nbr);
register long reg5 __asm__("a7") = (long)(parm4);
register long reg4 __asm__("a6") = (long)(parm4);
register long reg3 __asm__("a5") = (long)(parm3);
register long reg2 __asm__("a4") = (long)(parm2);
register long reg1 __asm__("a3") = (long)(parm1);
__asm__ __volatile__
(
"movi a8, %1\n"
"wsr a8, intset\n"
"isync\n"
: "=r"(reg0)
: "i"(XCHAL_SWINT_CALL), "r"(reg0), "r"(reg1), "r"(reg2),
"r"(reg3), "r"(reg4), "r"(reg5)
: "a8", "memory"
);
return reg0;
}
/****************************************************************************
* Name: sys_call6
*
* Description:
* System call SYS_ argument and six additional parameters.
*
****************************************************************************/
static inline uintptr_t sys_call6(unsigned int nbr, uintptr_t parm1,
uintptr_t parm2, uintptr_t parm3,
uintptr_t parm4, uintptr_t parm5,
uintptr_t parm6)
{
register long reg0 __asm__("a2") = (long)(nbr);
register long reg6 __asm__("a8") = (long)(parm4);
register long reg5 __asm__("a7") = (long)(parm4);
register long reg4 __asm__("a6") = (long)(parm4);
register long reg3 __asm__("a5") = (long)(parm3);
register long reg2 __asm__("a4") = (long)(parm2);
register long reg1 __asm__("a3") = (long)(parm1);
__asm__ __volatile__
(
"movi a9, %1\n"
"wsr a9, intset\n"
"isync\n"
: "=r"(reg0)
: "i"(XCHAL_SWINT_CALL), "r"(reg0), "r"(reg1), "r"(reg2),
"r"(reg3), "r"(reg4), "r"(reg5)
: "a9", "memory"
);
return reg0;
}
#undef EXTERN
#ifdef __cplusplus
}
#endif
#endif
#endif /* __ASSEMBLY__ */
#endif /* __ARCH_XTENSA_INCLUDE_SYSCALL_H */

View File

@ -265,6 +265,7 @@ void xtensa_pause_handler(void);
int xtensa_context_save(uint32_t *regs);
void xtensa_context_restore(uint32_t *regs) noreturn_function;
void xtensa_switchcontext(uint32_t *saveregs, uint32_t *restoreregs);
#if XCHAL_CP_NUM > 0
void xtensa_coproc_savestate(struct xtensa_cpstate_s *cpstate);

View File

@ -126,49 +126,27 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
xtensa_restorestate(rtcb->xcp.regs);
}
/* Copy the user C context into the TCB at the (old) head of the
* ready-to-run Task list. if up_saveusercontext returns a non-zero
* value, then this is really the previously running task restarting!
*/
/* No, then we will need to perform the user context switch */
else if (!xtensa_context_save(rtcb->xcp.regs))
else
{
#if XCHAL_CP_NUM > 0
/* Save the co-processor state in in the suspended thread's co-
* processor save area.
struct tcb_s *nexttcb = this_task();
/* Switch context to the context of the task at the head of the
* ready to run list.
*/
xtensa_coproc_savestate(&rtcb->xcp.cpstate);
#endif
/* Restore the exception context of the rtcb at the (new) head
* of the ready-to-run task list.
*/
rtcb = this_task();
#if XCHAL_CP_NUM > 0
/* Set up the co-processor state for the newly started thread. */
xtensa_coproc_restorestate(&rtcb->xcp.cpstate);
#endif
#ifdef CONFIG_ARCH_ADDRENV
/* Make sure that the address environment for the previously
* running task is closed down gracefully (data caches dump,
* MMU flushed) and set up the address environment for the new
* thread at the head of the ready-to-run list.
*/
group_addrenv(rtcb);
#endif
/* Reset scheduler parameters */
nxsched_resume_scheduler(rtcb);
nxsched_resume_scheduler(nexttcb);
/* Then switch contexts */
xtensa_context_restore(rtcb->xcp.regs);
xtensa_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
/* xtensa_switchcontext forces a context switch to the task at the
* head of the ready-to-run list. It does not 'return' in the
* normal sense. When it does return, it is because the blocked
* task is again ready to run and has execution priority.
*/
}
}
}

View File

@ -66,6 +66,9 @@
#include <arch/xtensa/xtensa_abi.h>
#include <arch/xtensa/xtensa_specregs.h>
#include "syscall.h"
#include "xtensa_swi.h"
/****************************************************************************
* Public Functions
****************************************************************************/
@ -541,55 +544,12 @@ _xtensa_context_restore:
xtensa_context_restore:
ENTRY(16)
#ifndef __XTENSA_CALL0_ABI__
/* Force a spill of the live registers of the thread that has been
* suspended.
*
* _xtensa_window_spill return state:
* a2, a3: clobbered
* a4,a5,a8,a9,a12,a13: preserved
* a6,a7,a10,a11,a14,a15 clobbered if they were part of window(s)
* to be spilled, otherwise they are the same as on entry
* loop registers: Preserved
* SAR: clobbered
*
* We need to preserve only a2 for _xtensa_context_restore
*/
mov a3, a2
movi a2, SYS_restore_context
movi a4, XCHAL_SWINT_CALL
wsr a4, intset
isync
mov a4, a2 /* Save a2 in a preserved register */
rsr a5, PS /* Save PS in preserved register */
movi a3, ~(PS_WOE_MASK | PS_INTLEVEL_MASK)
and a2, a5, a3 /* Clear WOE, INTLEVEL */
addi a2, a2, XCHAL_EXCM_LEVEL /* Set INTLEVEL = XCHAL_EXCM_LEVEL */
wsr a2, PS /* Apply to PS */
rsync
call0 _xtensa_window_spill
wsr a5, PS /* Restore PS */
rsync
mov a2, a4 /* Recover a2 */
#endif
/* Restore the processor state for the newly started thread */
call0 _xtensa_context_restore /* Restore full register state */
/* Restore PC, PS, A0, and A2. */
l32i a0, a2, (4 * REG_PS) /* Restore PS */
wsr a0, PS
l32i a0, a2, (4 * REG_PC) /* Set up for RFE */
wsr a0, EPC_1
l32i a0, a2, (4 * REG_A0) /* Restore a0 */
l32i a2, a2, (4 * REG_A2) /* Restore A2 */
/* Return from exception. RFE returns from either the UserExceptionVector
* or the KernelExceptionVector. RFE sets PS.EXCM back to 0, and then
* jumps to the address in EPC[1]. PS.UM and PS.WOE are left unchanged.
*/
rfe /* And return from "exception" */
RET(16)
.size xtensa_context_restore, . - xtensa_context_restore

View File

@ -87,17 +87,15 @@ void up_initial_state(struct tcb_s *tcb)
xcp->regs[REG_A1] = (uint32_t)tcb->stack_base_ptr + /* Physical top of stack frame */
tcb->adj_stack_size;
/* Set initial PS to int level 0, EXCM disabled ('rfe' will enable), user
* mode.
*/
/* Set initial PS to int level 0, user mode. */
#ifdef __XTENSA_CALL0_ABI__
xcp->regs[REG_PS] = PS_UM | PS_EXCM;
xcp->regs[REG_PS] = PS_UM;
#else
/* For windowed ABI set WOE and CALLINC (pretend task was 'call4'd). */
xcp->regs[REG_PS] = PS_UM | PS_EXCM | PS_WOE | PS_CALLINC(1);
xcp->regs[REG_PS] = PS_UM | PS_WOE | PS_CALLINC(1);
#endif
#if XCHAL_CP_NUM > 0

View File

@ -97,49 +97,27 @@ void up_release_pending(void)
xtensa_restorestate(rtcb->xcp.regs);
}
/* Copy the exception context into the TCB of the task that
* was currently active. if up_saveusercontext returns a non-zero
* value, then this is really the previously running task
* restarting!
*/
/* No, then we will need to perform the user context switch */
else if (!xtensa_context_save(rtcb->xcp.regs))
else
{
#if XCHAL_CP_NUM > 0
/* Save the co-processor state in in the suspended thread's co-
* processor save area.
struct tcb_s *nexttcb = this_task();
/* Reset scheduler parameters */
nxsched_resume_scheduler(nexttcb);
/* Switch context to the context of the task at the head of the
* ready to run list.
*/
xtensa_coproc_savestate(&rtcb->xcp.cpstate);
#endif
/* Restore the exception context of the rtcb at the (new) head
* of the ready-to-run task list.
xtensa_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
/* xtensa_switchcontext forces a context switch to the task at the
* head of the ready-to-run list. It does not 'return' in the
* normal sense. When it does return, it is because the blocked
* task is again ready to run and has execution priority.
*/
rtcb = this_task();
#if XCHAL_CP_NUM > 0
/* Set up the co-processor state for the newly started thread. */
xtensa_coproc_restorestate(&rtcb->xcp.cpstate);
#endif
#ifdef CONFIG_ARCH_ADDRENV
/* Make sure that the address environment for the previously
* running task is closed down gracefully (data caches dump,
* MMU flushed) and set up the address environment for the new
* thread at the head of the ready-to-run list.
*/
group_addrenv(rtcb);
#endif
/* Update scheduler parameters */
nxsched_resume_scheduler(rtcb);
/* Then switch contexts */
xtensa_context_restore(rtcb->xcp.regs);
}
}
}

View File

@ -149,51 +149,28 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
xtensa_restorestate(rtcb->xcp.regs);
}
/* Copy the exception context into the TCB at the (old) head of the
* ready-to-run Task list. if up_saveusercontext returns a non-zero
* value, then this is really the previously running task
* restarting!
*/
/* No, then we will need to perform the user context switch */
else if (!xtensa_context_save(rtcb->xcp.regs))
else
{
#if XCHAL_CP_NUM > 0
/* Save the co-processor state in in the suspended thread's co-
* processor save area.
struct tcb_s *nexttcb = this_task();
/* Reset scheduler parameters */
nxsched_resume_scheduler(nexttcb);
/* Switch context to the context of the task at the head of the
* ready to run list.
*/
xtensa_coproc_savestate(&rtcb->xcp.cpstate);
#endif
/* Restore the exception context of the rtcb at the (new) head
* of the ready-to-run task list.
xtensa_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
/* xtensa_switchcontext forces a context switch to the task at
* the head of the ready-to-run list. It does not 'return' in
* the normal sense. When it does return, it is because the
* blocked task is again ready to run and has execution
* priority.
*/
rtcb = this_task();
#if XCHAL_CP_NUM > 0
/* Set up the co-processor state for the newly started
* thread.
*/
xtensa_coproc_restorestate(&rtcb->xcp.cpstate);
#endif
#ifdef CONFIG_ARCH_ADDRENV
/* Make sure that the address environment for the previously
* running task is closed down gracefully (data caches dump,
* MMU flushed) and set up the address environment for the new
* thread at the head of the ready-to-run list.
*/
group_addrenv(rtcb);
#endif
/* Update scheduler parameters */
nxsched_resume_scheduler(rtcb);
/* Then switch contexts */
xtensa_context_restore(rtcb->xcp.regs);
}
}
}

View File

@ -0,0 +1,47 @@
/****************************************************************************
* arch/xtensa/src/common/xtensa_switchcontext.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include "syscall.h"
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: xtensa_switchcontext
*
* Description:
* Save the current thread context and restore the specified context.
*
* Returned Value:
* None
*
****************************************************************************/
void xtensa_switchcontext(uint32_t *saveregs, uint32_t *restoreregs)
{
sys_call2(SYS_switch_context, (uintptr_t)saveregs, (uintptr_t)restoreregs);
}

View File

@ -111,51 +111,27 @@ void up_unblock_task(struct tcb_s *tcb)
xtensa_restorestate(rtcb->xcp.regs);
}
/* We are not in an interrupt handler. Copy the user C context
* into the TCB of the task that was previously active. if
* up_saveusercontext returns a non-zero value, then this is really the
* previously running task restarting!
*/
/* No, then we will need to perform the user context switch */
else if (!xtensa_context_save(rtcb->xcp.regs))
else
{
#if XCHAL_CP_NUM > 0
/* Save the co-processor state in in the suspended thread's co-
* processor save area.
struct tcb_s *nexttcb = this_task();
/* Reset scheduler parameters */
nxsched_resume_scheduler(nexttcb);
/* Switch context to the context of the task at the head of the
* ready to run list.
*/
xtensa_coproc_savestate(&rtcb->xcp.cpstate);
#endif
xtensa_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
/* Restore the exception context of the new task that is ready to
* run (probably tcb). This is the new rtcb at the head of the
* ready-to-run task list.
/* xtensa_switchcontext forces a context switch to the task at the
* head of the ready-to-run list. It does not 'return' in the
* normal sense. When it does return, it is because the blocked
* task is again ready to run and has execution priority.
*/
rtcb = this_task();
#if XCHAL_CP_NUM > 0
/* Set up the co-processor state for the newly started thread. */
xtensa_coproc_restorestate(&rtcb->xcp.cpstate);
#endif
#ifdef CONFIG_ARCH_ADDRENV
/* Make sure that the address environment for the previously
* running task is closed down gracefully (data caches dump,
* MMU flushed) and set up the address environment for the new
* thread at the head of the ready-to-run list.
*/
group_addrenv(rtcb);
#endif
/* Update scheduler parameters */
nxsched_resume_scheduler(rtcb);
/* Then switch contexts */
xtensa_context_restore(rtcb->xcp.regs);
}
}
}

View File

@ -40,7 +40,7 @@ CMN_CSRCS += xtensa_puts.c xtensa_releasepending.c xtensa_releasestack.c
CMN_CSRCS += xtensa_reprioritizertr.c xtensa_schedsigaction.c
CMN_CSRCS += xtensa_sigdeliver.c xtensa_stackframe.c xtensa_udelay.c
CMN_CSRCS += xtensa_unblocktask.c xtensa_usestack.c xtensa_swint.c
CMN_CSRCS += esp32_systemreset.c esp32_resetcause.c
CMN_CSRCS += esp32_systemreset.c esp32_resetcause.c xtensa_switchcontext.c
# Configuration-dependent common XTENSA files

View File

@ -40,6 +40,7 @@ CMN_CSRCS += xtensa_puts.c xtensa_releasepending.c xtensa_releasestack.c
CMN_CSRCS += xtensa_reprioritizertr.c xtensa_schedsigaction.c
CMN_CSRCS += xtensa_sigdeliver.c xtensa_stackframe.c xtensa_udelay.c
CMN_CSRCS += xtensa_unblocktask.c xtensa_usestack.c xtensa_swint.c
CMN_CSRCS += xtensa_switchcontext.c
# Configuration-dependent common XTENSA files

View File

@ -40,6 +40,7 @@ CMN_CSRCS += xtensa_puts.c xtensa_releasepending.c xtensa_releasestack.c
CMN_CSRCS += xtensa_reprioritizertr.c xtensa_schedsigaction.c
CMN_CSRCS += xtensa_sigdeliver.c xtensa_stackframe.c xtensa_udelay.c
CMN_CSRCS += xtensa_unblocktask.c xtensa_usestack.c xtensa_swint.c
CMN_CSRCS += xtensa_switchcontext.c
# Configuration-dependent common XTENSA files