arm64: save FPU regs every time
Signed-off-by: ligd <liguiding1@xiaomi.com>
This commit is contained in:
parent
d782f6c1ac
commit
7cbcf82bed
@ -299,6 +299,9 @@ struct regs_context
|
||||
uint64_t spsr;
|
||||
uint64_t sp_el0;
|
||||
uint64_t exe_depth;
|
||||
#ifdef CONFIG_ARCH_FPU
|
||||
struct fpu_reg fpu_regs;
|
||||
#endif
|
||||
};
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -67,9 +67,6 @@ void up_exit(int status)
|
||||
enter_critical_section();
|
||||
|
||||
/* Destroy the task at the head of the ready to run list. */
|
||||
#ifdef CONFIG_ARCH_FPU
|
||||
arm64_destory_fpu(tcb);
|
||||
#endif
|
||||
|
||||
nxtask_exit();
|
||||
|
||||
|
@ -77,11 +77,10 @@ struct arm64_fpu_procfs_file_s
|
||||
* Private Data
|
||||
***************************************************************************/
|
||||
|
||||
static struct fpu_reg g_idle_thread_fpu[CONFIG_SMP_NCPUS];
|
||||
static struct arm64_cpu_fpu_context g_cpu_fpu_ctx[CONFIG_SMP_NCPUS];
|
||||
|
||||
#ifdef CONFIG_FS_PROCFS_REGISTER
|
||||
|
||||
static struct arm64_cpu_fpu_context g_cpu_fpu_ctx[CONFIG_SMP_NCPUS];
|
||||
|
||||
/* procfs methods */
|
||||
|
||||
static int arm64_fpu_procfs_open(struct file *filep, const char *relpath,
|
||||
@ -262,141 +261,6 @@ static int arm64_fpu_procfs_stat(const char *relpath, struct stat *buf)
|
||||
}
|
||||
#endif
|
||||
|
||||
/***************************************************************************
|
||||
* Public Functions
|
||||
***************************************************************************/
|
||||
|
||||
void arm64_init_fpu(struct tcb_s *tcb)
|
||||
{
|
||||
if (tcb->pid < CONFIG_SMP_NCPUS)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
int cpu = tcb->cpu;
|
||||
#else
|
||||
int cpu = 0;
|
||||
#endif
|
||||
memset(&g_cpu_fpu_ctx[cpu], 0,
|
||||
sizeof(struct arm64_cpu_fpu_context));
|
||||
g_cpu_fpu_ctx[cpu].idle_thread = tcb;
|
||||
|
||||
tcb->xcp.fpu_regs = (uint64_t *)&g_idle_thread_fpu[cpu];
|
||||
}
|
||||
|
||||
memset(tcb->xcp.fpu_regs, 0, sizeof(struct fpu_reg));
|
||||
}
|
||||
|
||||
void arm64_destory_fpu(struct tcb_s *tcb)
|
||||
{
|
||||
struct tcb_s *owner;
|
||||
|
||||
/* save current fpu owner's context */
|
||||
|
||||
owner = g_cpu_fpu_ctx[this_cpu()].fpu_owner;
|
||||
|
||||
if (owner == tcb)
|
||||
{
|
||||
g_cpu_fpu_ctx[this_cpu()].fpu_owner = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/***************************************************************************
|
||||
* Name: arm64_fpu_enter_exception
|
||||
*
|
||||
* Description:
|
||||
* called at every time get into a exception
|
||||
*
|
||||
***************************************************************************/
|
||||
|
||||
void arm64_fpu_enter_exception(void)
|
||||
{
|
||||
}
|
||||
|
||||
void arm64_fpu_exit_exception(void)
|
||||
{
|
||||
}
|
||||
|
||||
void arm64_fpu_trap(struct regs_context *regs)
|
||||
{
|
||||
struct tcb_s *owner;
|
||||
|
||||
UNUSED(regs);
|
||||
|
||||
/* disable fpu trap access */
|
||||
|
||||
arm64_fpu_access_trap_disable();
|
||||
|
||||
/* save current fpu owner's context */
|
||||
|
||||
owner = g_cpu_fpu_ctx[this_cpu()].fpu_owner;
|
||||
|
||||
if (owner != NULL)
|
||||
{
|
||||
arm64_fpu_save((struct fpu_reg *)owner->xcp.fpu_regs);
|
||||
ARM64_DSB();
|
||||
g_cpu_fpu_ctx[this_cpu()].save_count++;
|
||||
g_cpu_fpu_ctx[this_cpu()].fpu_owner = NULL;
|
||||
}
|
||||
|
||||
if (arch_get_exception_depth() > 1)
|
||||
{
|
||||
/* if get_exception_depth > 1
|
||||
* it means FPU access exception occurred in exception context
|
||||
* switch FPU owner to idle thread
|
||||
*/
|
||||
|
||||
owner = g_cpu_fpu_ctx[this_cpu()].idle_thread;
|
||||
}
|
||||
else
|
||||
{
|
||||
owner = running_task();
|
||||
}
|
||||
|
||||
/* restore our context */
|
||||
|
||||
arm64_fpu_restore((struct fpu_reg *)owner->xcp.fpu_regs);
|
||||
g_cpu_fpu_ctx[this_cpu()].restore_count++;
|
||||
|
||||
/* become new owner */
|
||||
|
||||
g_cpu_fpu_ctx[this_cpu()].fpu_owner = owner;
|
||||
}
|
||||
|
||||
void arm64_fpu_context_restore(void)
|
||||
{
|
||||
struct tcb_s *new_tcb = running_task();
|
||||
|
||||
arm64_fpu_access_trap_disable();
|
||||
|
||||
/* FPU trap has happened at this task */
|
||||
|
||||
if (new_tcb == g_cpu_fpu_ctx[this_cpu()].fpu_owner)
|
||||
{
|
||||
arm64_fpu_access_trap_disable();
|
||||
}
|
||||
else
|
||||
{
|
||||
arm64_fpu_access_trap_enable();
|
||||
}
|
||||
|
||||
g_cpu_fpu_ctx[this_cpu()].switch_count++;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void arm64_fpu_context_save(void)
|
||||
{
|
||||
struct tcb_s *tcb = running_task();
|
||||
|
||||
if (tcb == g_cpu_fpu_ctx[this_cpu()].fpu_owner)
|
||||
{
|
||||
arm64_fpu_access_trap_disable();
|
||||
arm64_fpu_save((struct fpu_reg *)tcb->xcp.fpu_regs);
|
||||
ARM64_DSB();
|
||||
g_cpu_fpu_ctx[this_cpu()].save_count++;
|
||||
g_cpu_fpu_ctx[this_cpu()].fpu_owner = NULL;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void arm64_fpu_enable(void)
|
||||
{
|
||||
irqstate_t flags = up_irq_save();
|
||||
|
@ -66,10 +66,10 @@ SECTION_FUNC(text, arm64_fpu_save)
|
||||
stp q28, q29, [x0, #(16 * FPU_REG_Q28)]
|
||||
stp q30, q31, [x0, #(16 * FPU_REG_Q30)]
|
||||
|
||||
mrs x1, fpsr
|
||||
mrs x2, fpcr
|
||||
str w1, [x0, #(16 * 32 + 0)]
|
||||
str w2, [x0, #(16 * 32 + 4)]
|
||||
mrs x10, fpsr
|
||||
mrs x11, fpcr
|
||||
str w10, [x0, #(16 * 32 + 0)]
|
||||
str w11, [x0, #(16 * 32 + 4)]
|
||||
|
||||
ret
|
||||
|
||||
@ -93,9 +93,9 @@ SECTION_FUNC(text, arm64_fpu_restore)
|
||||
ldp q28, q29, [x0, #(16 * FPU_REG_Q28)]
|
||||
ldp q30, q31, [x0, #(16 * FPU_REG_Q30)]
|
||||
|
||||
ldr w1, [x0, #(16 * 32 + 0)]
|
||||
ldr w2, [x0, #(16 * 32 + 4)]
|
||||
msr fpsr, x1
|
||||
msr fpcr, x2
|
||||
ldr w10, [x0, #(16 * 32 + 0)]
|
||||
ldr w11, [x0, #(16 * 32 + 4)]
|
||||
msr fpsr, x10
|
||||
msr fpcr, x11
|
||||
|
||||
ret
|
||||
|
@ -47,10 +47,6 @@
|
||||
#include "chip.h"
|
||||
#include "arm64_fatal.h"
|
||||
|
||||
#ifdef CONFIG_ARCH_FPU
|
||||
#include "arm64_fpu.h"
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
@ -70,17 +66,6 @@ void arm64_new_task(struct tcb_s * tcb)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_FPU
|
||||
struct fpu_reg *pfpuctx;
|
||||
pfpuctx = STACK_PTR_TO_FRAME(struct fpu_reg, stack_ptr);
|
||||
tcb->xcp.fpu_regs = (uint64_t *)pfpuctx;
|
||||
|
||||
/* set fpu context */
|
||||
|
||||
arm64_init_fpu(tcb);
|
||||
stack_ptr = (uintptr_t)pfpuctx;
|
||||
#endif
|
||||
|
||||
pinitctx = STACK_PTR_TO_FRAME(struct regs_context, stack_ptr);
|
||||
memset(pinitctx, 0, sizeof(struct regs_context));
|
||||
pinitctx->elr = (uint64_t)tcb->start;
|
||||
@ -150,11 +135,6 @@ void up_initial_state(struct tcb_s *tcb)
|
||||
tcb->stack_base_ptr = tcb->stack_alloc_ptr;
|
||||
tcb->adj_stack_size = CONFIG_IDLETHREAD_STACKSIZE;
|
||||
|
||||
#ifdef CONFIG_ARCH_FPU
|
||||
/* set fpu context */
|
||||
|
||||
arm64_init_fpu(tcb);
|
||||
#endif
|
||||
/* set initialize idle thread tcb and exception depth
|
||||
* core 0, idle0
|
||||
*/
|
||||
|
@ -38,10 +38,6 @@
|
||||
#include "irq/irq.h"
|
||||
#include "arm64_fatal.h"
|
||||
|
||||
#ifdef CONFIG_ARCH_FPU
|
||||
#include "arm64_fpu.h"
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
@ -58,16 +54,6 @@ void arm64_init_signal_process(struct tcb_s *tcb, struct regs_context *regs)
|
||||
struct regs_context *psigctx;
|
||||
char *stack_ptr = (char *)pctx->sp_elx - sizeof(struct regs_context);
|
||||
|
||||
#ifdef CONFIG_ARCH_FPU
|
||||
struct fpu_reg *pfpuctx;
|
||||
pfpuctx = STACK_PTR_TO_FRAME(struct fpu_reg, stack_ptr);
|
||||
tcb->xcp.fpu_regs = (uint64_t *)pfpuctx;
|
||||
|
||||
/* set fpu context */
|
||||
|
||||
arm64_init_fpu(tcb);
|
||||
stack_ptr = (char *)pfpuctx;
|
||||
#endif
|
||||
psigctx = STACK_PTR_TO_FRAME(struct regs_context, stack_ptr);
|
||||
memset(psigctx, 0, sizeof(struct regs_context));
|
||||
psigctx->elr = (uint64_t)arm64_sigdeliver;
|
||||
@ -168,9 +154,6 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
|
||||
* have been delivered.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_ARCH_FPU
|
||||
tcb->xcp.saved_fpu_regs = tcb->xcp.fpu_regs;
|
||||
#endif
|
||||
tcb->xcp.saved_reg = tcb->xcp.regs;
|
||||
|
||||
/* create signal process context */
|
||||
|
@ -38,10 +38,6 @@
|
||||
#include "irq/irq.h"
|
||||
#include "arm64_fatal.h"
|
||||
|
||||
#ifdef CONFIG_ARCH_FPU
|
||||
#include "arm64_fpu.h"
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
@ -157,11 +153,6 @@ retry:
|
||||
rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */
|
||||
rtcb->xcp.regs = rtcb->xcp.saved_reg;
|
||||
|
||||
#ifdef CONFIG_ARCH_FPU
|
||||
arm64_destory_fpu(rtcb);
|
||||
rtcb->xcp.fpu_regs = rtcb->xcp.saved_fpu_regs;
|
||||
#endif
|
||||
|
||||
/* Then restore the correct state for this thread of execution. */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -46,7 +46,7 @@
|
||||
*/
|
||||
|
||||
.macro arm64_enter_exception xreg0, xreg1
|
||||
sub sp, sp, #8 * XCPTCONTEXT_GP_REGS
|
||||
sub sp, sp, #8 * XCPTCONTEXT_REGS
|
||||
|
||||
stp x0, x1, [sp, #8 * REG_X0]
|
||||
stp x2, x3, [sp, #8 * REG_X2]
|
||||
@ -65,8 +65,8 @@
|
||||
stp x28, x29, [sp, #8 * REG_X28]
|
||||
|
||||
/* Save the current task's SP_ELx and x30 */
|
||||
add \xreg0, sp, #8 * XCPTCONTEXT_GP_REGS
|
||||
stp x30, \xreg0, [sp, #8 * REG_X30]
|
||||
add \xreg0, sp, #8 * XCPTCONTEXT_REGS
|
||||
stp x30, \xreg0, [sp, #8 * REG_X30]
|
||||
|
||||
/* ELR and SPSR */
|
||||
#if CONFIG_ARCH_ARM64_EXCEPTION_LEVEL == 3
|
||||
@ -78,17 +78,20 @@
|
||||
#endif
|
||||
stp \xreg0, \xreg1, [sp, #8 * REG_ELR]
|
||||
|
||||
/* increment exception depth */
|
||||
/* Increment exception depth */
|
||||
|
||||
mrs \xreg0, tpidrro_el0
|
||||
mov \xreg1, #1
|
||||
add \xreg0, \xreg0, \xreg1
|
||||
msr tpidrro_el0, \xreg0
|
||||
|
||||
#ifdef CONFIG_ARCH_FPU
|
||||
bl arm64_fpu_enter_exception
|
||||
#endif
|
||||
/* Save the FPU registers */
|
||||
|
||||
#ifdef CONFIG_ARCH_FPU
|
||||
add x0, sp, #8 * XCPTCONTEXT_GP_REGS
|
||||
bl arm64_fpu_save
|
||||
ldr x0, [sp, #8 * REG_X0]
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/****************************************************************************
|
||||
@ -243,9 +246,8 @@ SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,_vector_table)
|
||||
GTEXT(arm64_exit_exception)
|
||||
SECTION_FUNC(text, arm64_exit_exception)
|
||||
#ifdef CONFIG_ARCH_FPU
|
||||
bl arm64_fpu_exit_exception
|
||||
GTEXT(arm64_exit_exc_fpu_done)
|
||||
arm64_exit_exc_fpu_done:
|
||||
add x0, sp, #8 * XCPTCONTEXT_GP_REGS
|
||||
bl arm64_fpu_restore
|
||||
#endif
|
||||
|
||||
/* restore spsr and elr at el1*/
|
||||
@ -283,6 +285,6 @@ arm64_exit_exc_fpu_done:
|
||||
ldp x28, x29, [sp, #8 * REG_X28]
|
||||
ldp x30, xzr, [sp, #8 * REG_X30]
|
||||
|
||||
add sp, sp, #8 * XCPTCONTEXT_GP_REGS
|
||||
add sp, sp, #8 * XCPTCONTEXT_REGS
|
||||
|
||||
eret
|
||||
|
@ -118,16 +118,8 @@ SECTION_FUNC(text, arm64_context_switch)
|
||||
cmp x1, #0x0
|
||||
beq restore_new
|
||||
|
||||
#if defined(CONFIG_ARCH_FPU) && defined(CONFIG_SMP)
|
||||
stp x0, x1, [sp, #-16]!
|
||||
stp xzr, x30, [sp, #-16]!
|
||||
bl arm64_fpu_context_save
|
||||
ldp xzr, x30, [sp], #16
|
||||
ldp x0, x1, [sp], #16
|
||||
#endif
|
||||
|
||||
/* Save the current SP_ELx */
|
||||
add x4, sp, #8 * XCPTCONTEXT_GP_REGS
|
||||
add x4, sp, #8 * XCPTCONTEXT_REGS
|
||||
str x4, [x1, #8 * REG_SP_ELX]
|
||||
|
||||
/* Save the current task's SP_EL0 and exception depth */
|
||||
@ -144,13 +136,7 @@ restore_new:
|
||||
|
||||
/* retrieve new thread's SP_ELx */
|
||||
ldr x4, [x0, #8 * REG_SP_ELX]
|
||||
sub sp, x4, #8 * XCPTCONTEXT_GP_REGS
|
||||
|
||||
#ifdef CONFIG_ARCH_FPU
|
||||
stp xzr, x30, [sp, #-16]!
|
||||
bl arm64_fpu_context_restore
|
||||
ldp xzr, x30, [sp], #16
|
||||
#endif
|
||||
sub sp, x4, #8 * XCPTCONTEXT_REGS
|
||||
|
||||
/* Return to arm64_sync_exc() or arm64_irq_handler() */
|
||||
|
||||
@ -205,19 +191,6 @@ SECTION_FUNC(text, arm64_sync_exc)
|
||||
#endif
|
||||
lsr x10, x9, #26
|
||||
|
||||
#ifdef CONFIG_ARCH_FPU
|
||||
/* fpu trap */
|
||||
|
||||
cmp x10, #0x07 /* Access to SIMD or floating-point */
|
||||
bne 1f
|
||||
mov x0, sp /* x0 = context */
|
||||
bl arm64_fpu_trap
|
||||
|
||||
/* when the fpu trap is handled */
|
||||
|
||||
b arm64_exit_exc_fpu_done
|
||||
1:
|
||||
#endif
|
||||
/* 0x15 = SVC system call */
|
||||
|
||||
cmp x10, #0x15
|
||||
@ -332,13 +305,7 @@ do_switch:
|
||||
|
||||
bl arm64_context_switch
|
||||
|
||||
#ifdef CONFIG_ARCH_FPU
|
||||
/* when the fpu trap is handled */
|
||||
|
||||
b arm64_exit_exc_fpu_done
|
||||
#else
|
||||
b arm64_exit_exception
|
||||
#endif
|
||||
|
||||
save_context:
|
||||
arm64_exception_context_save x0 x1 sp
|
||||
@ -433,11 +400,6 @@ irq_context_switch:
|
||||
* call arm64_context_switch(x0) to switch
|
||||
*/
|
||||
bl arm64_context_switch
|
||||
#ifdef CONFIG_ARCH_FPU
|
||||
/* when the fpu trap is handled */
|
||||
|
||||
b arm64_exit_exc_fpu_done
|
||||
#endif
|
||||
|
||||
irq_exit:
|
||||
b arm64_exit_exception
|
||||
@ -527,11 +489,6 @@ SECTION_FUNC(text, arm64_fiq_handler)
|
||||
* call arm64_context_switch(x0) to switch
|
||||
*/
|
||||
bl arm64_context_switch
|
||||
#ifdef CONFIG_ARCH_FPU
|
||||
/* when the fpu trap is handled */
|
||||
|
||||
b arm64_exit_exc_fpu_done
|
||||
#endif
|
||||
fiq_exit:
|
||||
b arm64_exit_exception
|
||||
#endif
|
||||
|
Loading…
x
Reference in New Issue
Block a user