nuttx/arch/xtensa/src/common/xtensa_context.S
2016-10-19 14:01:51 -06:00

661 lines
22 KiB
ArmAsm

/****************************************************************************
* arch/xtensa/src/common/xtensa_context.S
*
* Adapted from use in NuttX by:
*
* Copyright (C) 2016 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Derives from logic originally provided by Cadence Design Systems Inc.
*
* Copyright (c) 2006-2015 Cadence Design Systems Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
****************************************************************************/
/* XTENSA CONTEXT SAVE AND RESTORE ROUTINES
*
* Low-level Call0 functions for handling generic context save and restore of
* registers not specifically addressed by the interrupt vectors and handlers.
* Those registers (not handled by these functions) are PC, PS, A0, A1 (SP).
* Except for the calls to RTOS functions, this code is generic to Xtensa.
*
* Note that in Call0 ABI, interrupt handlers are expected to preserve the callee-
* save regs (A12-A15), which is always the case if the handlers are coded in C.
* However A12, A13 are made available as scratch registers for interrupt dispatch
* code, so are presumed saved anyway, and are always restored even in Call0 ABI.
* Only A14, A15 are truly handled as callee-save regs.
*
* Because Xtensa is a configurable architecture, this port supports all user
* generated configurations (except restrictions stated in the release notes).
* This is accomplished by conditional compilation using macros and functions
* defined in the Xtensa HAL (hardware adaptation layer) for your configuration.
* Only the processor state included in your configuration is saved and restored,
* including any processor state added by user configuration options or TIE.
*/
/****************************************************************************
* Included Files
****************************************************************************/
#include "xtensa_rtos.h"
#include "xtensa_context.h"
#ifdef CONFIG_XTENSA_USE_OVLY
# include <xtensa/overlay_os_asm.h>
#endif
/****************************************************************************
* Public Functions
****************************************************************************/
.text
/****************************************************************************
* Name: _xtensa_context_save
*
* Description:
*
* NOTE: MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION!
*
* Saves all Xtensa processor state except PC, PS, A0, A1 (SP), A12, A13,
* in the interrupt stack frame defined in xtensa_rtos.h. Its counterpart
* is _xtensa_context_restore (which also restores A12, A13).
*
* Caller is expected to have saved PC, PS, A0, A1 (SP), A12, A13 in the
* frame. This function preserves A12 & A13 in order to provide the caller
* with 2 scratch regs that need not be saved over the call to this
* function. The choice of which 2 regs to provide is governed by
* xthal_window_spill_nw and xthal_save_extra_nw, to avoid moving data
* more than necessary. Caller can assign regs accordingly.
*
* Entry Conditions:
* - A0 = Return address in caller.
* - A1 = Stack pointer of interrupted thread or handler ("interruptee").
* - Original A12, A13 have already been saved in the interrupt stack
* frame.
* - Other processor state except PC, PS, A0, A1 (SP), A12, A13, is as a
* the point of interruption.
* - If windowed ABI, PS.EXCM = 1 (exceptions disabled).
*
* Exit conditions:
* - A0 = Return address in caller.
* - A1 = Stack pointer of interrupted thread or handler ("interruptee").
* - A12, A13 as at entry (preserved).
* - If windowed ABI, PS.EXCM = 1 (exceptions disabled).
*
****************************************************************************/
.global _xtensa_context_save
.type _xtensa_context_save, @function
.align 4
.literal_position
.align 4
_xtensa_context_save:
s32i a2, sp, (4 * REG_A2)
s32i a3, sp, (4 * REG_A3)
s32i a4, sp, (4 * REG_A4)
s32i a5, sp, (4 * REG_A5)
s32i a6, sp, (4 * REG_A6)
s32i a7, sp, (4 * REG_A7)
s32i a8, sp, (4 * REG_A8)
s32i a9, sp, (4 * REG_A9)
s32i a10, sp, (4 * REG_A10)
s32i a11, sp, (4 * REG_A11)
/* Call0 ABI callee-saved regs a12-15 do not need to be saved here.
* a12-13 are the caller's responsibility so it can use them as scratch.
* So only need to save a14-a15 here for Windowed ABI (not Call0).
*/
#ifndef CONFIG_XTENSA_CALL0_ABI
s32i a14, sp, (4 * REG_A14)
s32i a15, sp, (4 * REG_A15)
#endif
rsr a3, SAR
s32i a3, sp, (4 * REG_SAR)
#ifdef CONFIG_XTENSA_HAVE_LOOPS
rsr a3, LBEG
s32i a3, sp, (4 * REG_LBEG)
rsr a3, LEND
s32i a3, sp, (4 * REG_LEND)
rsr a3, LCOUNT
s32i a3, sp, (4 * REG_LCOUNT)
#endif
#ifdef CONFIG_XTENSA_USE_SWPRI
/* Save virtual priority mask */
movi a3, _xtensa_vprimask
l32i a3, a3, 0
s32i a3, sp, (4 * REG_VPRI)
#endif
#if XCHAL_EXTRA_SA_SIZE > 0 || !defined(CONFIG_XTENSA_CALL0_ABI)
mov a9, a0 /* Preserve ret addr */
#endif
#ifndef CONFIG_XTENSA_CALL0_ABI
/* To spill the reg windows, temp. need pre-interrupt stack ptr and
* a4-15. Need to save a9,12,13 temporarily (in frame temps) and
* recover originals. Interrupts need to be disabled below
* CONFIG_XTENSA_EXCM_LEVEL and window overflow and underflow exceptions
* disabled (assured by PS.EXCM == 1).
*/
s32i a12, sp, (4 * REG_TMP0) /* Temp. save stuff in stack frame */
s32i a13, sp, (4 * REG_TMP1)
s32i a9, sp, (4 * REG_TMP2)
/* Save the overlay state if we are supporting overlays. Since we just
* saved three registers, we can conveniently use them here. Note that
* as of now, overlays only work for windowed calling ABI.
*/
#ifdef CONFIG_XTENSA_USE_OVLY
l32i a9, sp, (4 * REG_PC) /* Recover saved PC */
_xt_overlay_get_state a9, a12, a13
s32i a9, sp, (4 * REG_OVLY) /* Save overlay state */
#endif
l32i a12, sp, (4 * REG_A12) /* Recover original a9,12,13 */
l32i a13, sp, (4 * REG_A13)
l32i a9, sp, (4 * REG_A9)
addi sp, sp, (4 * REG_FRMSZ) /* Restore the interruptee's SP */
call0 xthal_window_spill_nw /* Preserves only a4,5,8,9,12,13 */
addi sp, sp, -(4 * REG_FRMSZ)
l32i a12, sp, (4 * REG_TMP0) /* Recover stuff from stack frame */
l32i a13, sp, (4 * REG_TMP1)
l32i a9, sp, (4 * REG_TMP2)
#endif
#if XCHAL_EXTRA_SA_SIZE > 0
/* NOTE: Normally the xthal_save_extra_nw macro only affects address
* registers a2-a5. It is theoretically possible for Xtensa processor
* designers to write TIE that causes more address registers to be
* affected, but it is generally unlikely. If that ever happens,
* more registers need to be saved/restored around this macro invocation.
* Here we assume a9,12,13 are preserved.
* Future Xtensa tools releases might limit the regs that can be affected.
*/
addi a2, sp, (4 * REG_EXTRA) /* Where to save it */
#if XCHAL_EXTRA_SA_ALIGN > 16
movi a3, -XCHAL_EXTRA_SA_ALIGN
and a2, a2, a3 /* Align dynamically >16 bytes */
#endif
call0 xthal_save_extra_nw /* Destroys a0,2,3,4,5 */
#endif
#if XCHAL_EXTRA_SA_SIZE > 0 || !defined(CONFIG_XTENSA_CALL0_ABI)
mov a0, a9 /* Retrieve ret addr */
#endif
ret
/****************************************************************************
* Name: _xtensa_context_restore
*
* Description:
*
* NOTE: MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION!
*
* Restores all Xtensa processor state except PC, PS, A0, A1 (SP) (and in
* Call0 ABI, A14, A15 which are preserved by all interrupt handlers) from
* an interrupt stack frame defined in xtensa_rtos.h.
* Its counterpart is _xtensa_context_save (whose caller saved A12, A13).
*
* Caller is responsible to restore PC, PS, A0, A1 (SP).
*
* Entry Conditions:
* - A0 = Return address in caller.
* - A1 = Stack pointer of interrupted thread or handler ("interruptee").
*
* Exit conditions:
* - A0 = Return address in caller.
* - A1 = Stack pointer of interrupted thread or handler ("interruptee").
* - Other processor state except PC, PS, A0, A1 (SP), is as at the point
* of interruption.
*
****************************************************************************/
.global _xtensa_context_restore
.type _xtensa_context_restore,@function
.align 4
.literal_position
.align 4
_xtensa_context_restore:
#if XCHAL_EXTRA_SA_SIZE > 0
/* NOTE: Normally the xthal_restore_extra_nw macro only affects address
* registers a2-a5. It is theoretically possible for Xtensa processor
* designers to write TIE that causes more address registers to be
* affected, but it is generally unlikely. If that ever happens,
* more registers need to be saved/restored around this macro invocation.
* Here we only assume a13 is preserved.
* Future Xtensa tools releases might limit the regs that can be affected.
*/
mov a13, a0 /* Preserve ret addr */
addi a2, sp, (4 * REG_EXTRA) /* Where to find it */
#if XCHAL_EXTRA_SA_ALIGN > 16
movi a3, -XCHAL_EXTRA_SA_ALIGN
and a2, a2, a3 /* Align dynamically >16 bytes */
#endif
call0 xthal_restore_extra_nw /* Destroys a0,2,3,4,5 */
mov a0, a13 /* Retrieve ret addr */
#endif
#ifdef CONFIG_XTENSA_HAVE_LOOPS
l32i a2, sp, (4 * REG_LBEG)
l32i a3, sp, (4 * REG_LEND)
wsr a2, LBEG
l32i a2, sp, (4 * REG_LCOUNT)
wsr a3, LEND
wsr a2, LCOUNT
#endif
#ifdef CONFIG_XTENSA_USE_OVLY
/* If we are using overlays, this is a good spot to check if we need
* to restore an overlay for the incoming task. Here we have a bunch
* of registers to spare. Note that this step is going to use a few
* bytes of storage below SP (SP-20 to SP-32) if an overlay is going
* to be restored.
*/
l32i a2, sp, (4 * REG_PC) /* Retrieve PC */
l32i a3, sp, (4 * REG_PS) /* Retrieve PS */
l32i a4, sp, (4 * REG_OVLY) /* Retrieve overlay state */
l32i a5, sp, (4 * REG_A1) /* Retrieve stack ptr */
_xt_overlay_check_map a2, a3, a4, a5, a6
s32i a2, sp, (4 * REG_PC) /* Save updated PC */
s32i a3, sp, (4 * REG_PS) /* Save updated PS */
#endif
#ifdef CONFIG_XTENSA_USE_SWPRI
/* Restore virtual interrupt priority and interrupt enable */
movi a3, _xtensa_intdata
l32i a4, a3, 0 /* a4 = _xtensa_intenable */
l32i a5, sp, (4 * REG_VPRI) /* a5 = saved _xtensa_vprimask */
and a4, a4, a5
wsr a4, INTENABLE /* Update INTENABLE */
s32i a5, a3, 4 /* Restore _xtensa_vprimask */
#endif
l32i a3, sp, (4 * REG_SAR)
l32i a2, sp, (4 * REG_A2)
wsr a3, SAR
l32i a3, sp, (4 * REG_A3)
l32i a4, sp, (4 * REG_A4)
l32i a5, sp, (4 * REG_A5)
l32i a6, sp, (4 * REG_A6)
l32i a7, sp, (4 * REG_A7)
l32i a8, sp, (4 * REG_A8)
l32i a9, sp, (4 * REG_A9)
l32i a10, sp, (4 * REG_A10)
l32i a11, sp, (4 * REG_A11)
/* Call0 ABI callee-saved regs a12-15 do not need to be restored here.
* However a12-13 were saved for scratch before XT_RTOS_INT_ENTER(),
* so need to be restored anyway, despite being callee-saved in Call0.
*/
l32i a12, sp, (4 * REG_A12)
l32i a13, sp, (4 * REG_A13)
#ifndef CONFIG_XTENSA_CALL0_ABI
l32i a14, sp, (4 * REG_A14)
l32i a15, sp, (4 * REG_A15)
#endif
ret
/****************************************************************************
* Name: _xt_coproc_init
*
* Description:
* Initializes global co-processor management data, setting all co-
* processors to "unowned". Leaves CPENABLE as it found it (does NOT clear
* it).
*
* Called during initialization of the RTOS, before any threads run.
*
* This may be called from normal Xtensa single-threaded application code
* which might use co-processors. The Xtensa run-time initialization enables
* all co-processors. They must remain enabled here, else a co-processor
* exception might occur outside of a thread, which the exception handler
* doesn't expect.
*
* Entry Conditions:
* - Xtensa single-threaded run-time environment is in effect.
* No thread is yet running.
*
* Exit conditions:
* - None.
*
* Obeys ABI conventions per prototype:
* void _xt_coproc_init(void)
*
****************************************************************************/
#if CONFIG_XTENSA_NCOPROCESSORS > 0
.global _xt_coproc_init
.type _xt_coproc_init,@function
.align 4
.literal_position
.align 4
_xt_coproc_init:
ENTRY0
/* Initialize thread co-processor ownerships to 0 (unowned). */
movi a2, _xt_coproc_owner_sa /* a2 = base of owner array */
addi a3, a2, (XCHAL_CP_MAX*portNUM_PROCESSORS) << 2 /* a3 = top+1 of owner array */
movi a4, 0 /* a4 = 0 (unowned) */
1: s32i a4, a2, 0
addi a2, a2, 4
bltu a2, a3, 1b
RET0
#endif
/****************************************************************************
* Name: _xt_coproc_release
*
* Description:
* Releases any and all co-processors owned by a given thread. The thread
* is identified by it's co-processor state save area defined in
* xtensa_context.h.
*
* Must be called before a thread's co-proc save area is deleted to avoid
* memory corruption when the exception handler tries to save the state.
* May be called when a thread terminates or completes but does not delete
* the co-proc save area, to avoid the exception handler having to save
* the thread's co-proc state before another thread can use it
* (optimization).
*
* Entry Conditions:
* - A2 = Pointer to base of co-processor state save area.
*
* Exit conditions:
* - None.
*
* Obeys ABI conventions per prototype:
* void _xt_coproc_release(void * coproc_sa_base)
*
****************************************************************************/
#if CONFIG_XTENSA_NCOPROCESSORS > 0
.global _xt_coproc_release
.type _xt_coproc_release,@function
.align 4
.literal_position
.align 4
_xt_coproc_release:
ENTRY0 /* a2 = base of save area */
getcoreid a5
movi a3, XCHAL_CP_MAX << 2
mull a5, a5, a3
movi a3, _xt_coproc_owner_sa /* a3 = base of owner array */
add a3, a3, a5
addi a4, a3, XCHAL_CP_MAX << 2 /* a4 = top+1 of owner array */
movi a5, 0 /* a5 = 0 (unowned) */
rsil a6, CONFIG_XTENSA_EXCM_LEVEL /* Lock interrupts */
1: l32i a7, a3, 0 /* a7 = owner at a3 */
bne a2, a7, 2f /* if (coproc_sa_base == owner) */
s32i a5, a3, 0 /* owner = unowned */
2: addi a3, a3, 1 << 2 /* a3 = next entry in owner array */
bltu a3, a4, 1b /* Repeat until end of array */
3: wsr a6, PS /* Restore interrupts */
RET0
#endif
/****************************************************************************
* Name: _xt_coproc_savecs
*
* Description:
* If there is a current thread and it has a coprocessor state save area,
* then save all callee-saved state into this area. This function is called
* from the solicited context switch handler. It calls a system-specific
* function to get the coprocessor save area base address.
*
* Entry conditions:
* - The thread being switched out is still the current thread.
* - CPENABLE state reflects which coprocessors are active.
* - Registers have been saved/spilled already.
*
* Exit conditions:
* - All necessary CP callee-saved state has been saved.
* - Registers a2-a7, a13-a15 have been trashed.
*
* Must be called from assembly code only, using CALL0.
*
****************************************************************************/
#if CONFIG_XTENSA_NCOPROCESSORS > 0
.extern _xt_coproc_sa_offset /* external reference */
.global _xt_coproc_savecs
.type _xt_coproc_savecs,@function
.align 4
.literal_position
.align 4
_xt_coproc_savecs:
/* At entry, CPENABLE should be showing which CPs are enabled. */
rsr a2, CPENABLE /* a2 = which CPs are enabled */
beqz a2, .Ldone /* quick exit if none */
mov a14, a0 /* save return address */
call0 XT_RTOS_CP_STATE /* get address of CP save area */
mov a0, a14 /* restore return address */
beqz a15, .Ldone /* if none then nothing to do */
s16i a2, a15, XT_CP_CS_ST /* save mask of CPs being stored */
movi a13, _xt_coproc_sa_offset /* array of CP save offsets */
l32i a15, a15, XT_CP_ASA /* a15 = base of aligned save area */
#if XCHAL_CP0_SA_SIZE
bbci.l a2, 0, 2f /* CP 0 not enabled */
l32i a14, a13, 0 /* a14 = _xt_coproc_sa_offset[0] */
add a3, a14, a15 /* a3 = save area for CP 0 */
xchal_cp0_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP1_SA_SIZE
bbci.l a2, 1, 2f /* CP 1 not enabled */
l32i a14, a13, 4 /* a14 = _xt_coproc_sa_offset[1] */
add a3, a14, a15 /* a3 = save area for CP 1 */
xchal_cp1_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP2_SA_SIZE
bbci.l a2, 2, 2f
l32i a14, a13, 8
add a3, a14, a15
xchal_cp2_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP3_SA_SIZE
bbci.l a2, 3, 2f
l32i a14, a13, 12
add a3, a14, a15
xchal_cp3_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP4_SA_SIZE
bbci.l a2, 4, 2f
l32i a14, a13, 16
add a3, a14, a15
xchal_cp4_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP5_SA_SIZE
bbci.l a2, 5, 2f
l32i a14, a13, 20
add a3, a14, a15
xchal_cp5_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP6_SA_SIZE
bbci.l a2, 6, 2f
l32i a14, a13, 24
add a3, a14, a15
xchal_cp6_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP7_SA_SIZE
bbci.l a2, 7, 2f
l32i a14, a13, 28
add a3, a14, a15
xchal_cp7_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
.Ldone:
ret
#endif
/****************************************************************************
* Name: _xt_coproc_restorecs
*
* Description:
* Restore any callee-saved coprocessor state for the incoming thread.
* This function is called from coprocessor exception handling, when
* giving ownership to a thread that solicited a context switch earlier.
* It calls a system-specific function to get the coprocessor save area
* base address.
*
* Entry conditions:
* - The incoming thread is set as the current thread.
* - CPENABLE is set up correctly for all required coprocessors.
* - a2 = mask of coprocessors to be restored.
*
* Exit conditions:
* - All necessary CP callee-saved state has been restored.
* - CPENABLE - unchanged.
* - Registers a2-a7, a13-a15 have been trashed.
*
* Must be called from assembly code only, using CALL0.
*
****************************************************************************/
#if CONFIG_XTENSA_NCOPROCESSORS > 0
.global _xt_coproc_restorecs
.type _xt_coproc_restorecs,@function
.align 4
.literal_position
.align 4
_xt_coproc_restorecs:
mov a14, a0 /* Save return address */
call0 XT_RTOS_CP_STATE /* Get address of CP save area */
mov a0, a14 /* Restore return address */
beqz a15, .Ldone2 /* If none then nothing to do */
l16ui a3, a15, XT_CP_CS_ST /* a3 = which CPs have been saved */
xor a3, a3, a2 /* Clear the ones being restored */
s32i a3, a15, XT_CP_CS_ST /* Update saved CP mask */
movi a13, _xt_coproc_sa_offset /* Array of CP save offsets */
l32i a15, a15, XT_CP_ASA /* a15 = base of aligned save area */
#if XCHAL_CP0_SA_SIZE
bbci.l a2, 0, 2f /* CP 0 not enabled */
l32i a14, a13, 0 /* a14 = _xt_coproc_sa_offset[0] */
add a3, a14, a15 /* a3 = save area for CP 0 */
xchal_cp0_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP1_SA_SIZE
bbci.l a2, 1, 2f /* CP 1 not enabled */
l32i a14, a13, 4 /* a14 = _xt_coproc_sa_offset[1] */
add a3, a14, a15 /* a3 = save area for CP 1 */
xchal_cp1_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP2_SA_SIZE
bbci.l a2, 2, 2f
l32i a14, a13, 8
add a3, a14, a15
xchal_cp2_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP3_SA_SIZE
bbci.l a2, 3, 2f
l32i a14, a13, 12
add a3, a14, a15
xchal_cp3_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP4_SA_SIZE
bbci.l a2, 4, 2f
l32i a14, a13, 16
add a3, a14, a15
xchal_cp4_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP5_SA_SIZE
bbci.l a2, 5, 2f
l32i a14, a13, 20
add a3, a14, a15
xchal_cp5_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP6_SA_SIZE
bbci.l a2, 6, 2f
l32i a14, a13, 24
add a3, a14, a15
xchal_cp6_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP7_SA_SIZE
bbci.l a2, 7, 2f
l32i a14, a13, 28
add a3, a14, a15
xchal_cp7_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
.Ldone2:
ret
#endif