arch/xtensa: Few typos and style fixes.

Signed-off-by: Abdelatif Guettouche <abdelatif.guettouche@espressif.com>
This commit is contained in:
Abdelatif Guettouche 2020-09-12 12:50:49 +01:00 committed by hartmannathan
parent bc9d3cdd14
commit a128995eab
6 changed files with 58 additions and 59 deletions

View File

@ -31,7 +31,7 @@
*
****************************************************************************/
.file "xtensa_coproc.S"
.file "xtensa_coproc.S"
/****************************************************************************
* Included Files
@ -54,26 +54,26 @@
* Public Data
****************************************************************************/
.section .rodata, "a"
.section .rodata, "a"
/* Offset to CP n save area in thread's CP save area. */
/* Offset to CP n save area in thread's CP save area. */
.global _xtensa_coproc_saoffsets
.type _xtensa_coproc_saoffsets, @object
.align 16 /* Minimize crossing cache boundaries */
.global _xtensa_coproc_saoffsets
.type _xtensa_coproc_saoffsets, @object
.align 16 /* Minimize crossing cache boundaries */
_xtensa_coproc_saoffsets:
.word XTENSA_CP0_SA, XTENSA_CP1_SA, XTENSA_CP2_SA, XTENSA_CP3_SA
.word XTENSA_CP4_SA, XTENSA_CP5_SA, XTENSA_CP6_SA, XTENSA_CP7_SA
.word XTENSA_CP0_SA, XTENSA_CP1_SA, XTENSA_CP2_SA, XTENSA_CP3_SA
.word XTENSA_CP4_SA, XTENSA_CP5_SA, XTENSA_CP6_SA, XTENSA_CP7_SA
.size _xtensa_coproc_saoffsets, . - _xtensa_coproc_saoffsets
.size _xtensa_coproc_saoffsets, . - _xtensa_coproc_saoffsets
/****************************************************************************
* Public Functions
****************************************************************************/
.text
.text
/****************************************************************************
* Name: _xtensa_coproc_savestate
@ -102,12 +102,12 @@ _xtensa_coproc_saoffsets:
*
****************************************************************************/
.global _xtensa_coproc_savestate
.type _xtensa_coproc_savestate, @function
.global _xtensa_coproc_savestate
.type _xtensa_coproc_savestate, @function
.align 4
.literal_position
.align 4
.align 4
.literal_position
.align 4
_xtensa_coproc_savestate:
@ -225,9 +225,9 @@ xtensa_coproc_savestate:
#ifdef __XTENSA_CALL0_ABI__
/* Need to preserve a8-11. _xtensa_coproc_savestate modifies a2-a7,
* a13-a15. a12-a15 are callee saved registers so a13-a14 must be
* preserved.
*/
* a13-a15. a12-a15 are callee saved registers so a13-a14 must be
* preserved.
*/
ENTRY(16)
s32i a13, sp, LOCAL_OFFSET(1) /* Save clobbered registers */
@ -235,8 +235,8 @@ xtensa_coproc_savestate:
s32i a15, sp, LOCAL_OFFSET(3)
/* Call _xtensa_coproc_savestate() with A2=address of co-processor
* save area.
*/
* save area.
*/
call0 _xtensa_coproc_savestate
@ -249,15 +249,15 @@ xtensa_coproc_savestate:
#else
/* Need to preserve a8-15. _xtensa_coproc_savestate modifies a2-a7,
* a13-a15. So a13-a15 may need to be preserved.
*/
* a13-a15. So a13-a15 may need to be preserved.
*/
ENTRY(32 /*16*/) /* REVISIT: Why 32? */
s32i a0, sp, LOCAL_OFFSET(1) /* Save return address */
/* Call _xtensa_coproc_savestate() with A2=address of co-processor
* save area.
*/
* save area.
*/
call0 _xtensa_coproc_savestate
@ -424,9 +424,9 @@ xtensa_coproc_restorestate:
#ifdef __XTENSA_CALL0_ABI__
/* Need to preserve a8-11. _xtensa_coproc_restorestate modifies a2-a7,
* a13-a15. a12-a15 are callee saved registers so a13-a14 must be
* preserved.
*/
* a13-a15. a12-a15 are callee saved registers so a13-a14 must be
* preserved.
*/
ENTRY(16)
s32i a13, sp, LOCAL_OFFSET(1) /* Save clobbered values */
@ -434,8 +434,8 @@ xtensa_coproc_restorestate:
s32i a15, sp, LOCAL_OFFSET(3)
/* Call _xtensa_coproc_restorestate() with A2=address of co-processor
* save area. Registers a0, a2-a7, a13-a15 have been trashed.
*/
* save area. Registers a0, a2-a7, a13-a15 have been trashed.
*/
call0 _xtensa_coproc_restorestate
@ -448,25 +448,25 @@ xtensa_coproc_restorestate:
#else
/* Need to preserve a8-15. _xtensa_coproc_savestate modifies a2-a7,
* a13-a15. So a13-a15 may need to be preserved.
*/
* a13-a15. So a13-a15 may need to be preserved.
*/
ENTRY(32 /*16*/) /* REVISIT: Why 32? */
s32i a0, sp, LOCAL_OFFSET(1) /* Save return address */
/* Call _xtensa_coproc_restorestate() with A2=address of co-processor
* save area. Registers a0, a2-a7, a13-a15 have been trashed.
*/
* save area. Registers a0, a2-a7, a13-a15 have been trashed.
*/
call0 _xtensa_coproc_restorestate
call0 _xtensa_coproc_restorestate
/* Restore a0 and return */
l32i a0, sp, LOCAL_OFFSET(1) /* Recover return address */
RET(32 /*16*/) /* REVISIT: Why 32? */
l32i a0, sp, LOCAL_OFFSET(1) /* Recover return address */
RET(32 /*16*/) /* REVISIT: Why 32? */
#endif
.size xtensa_coproc_restorestate, . - xtensa_coproc_restorestate
.size xtensa_coproc_restorestate, . - xtensa_coproc_restorestate
#endif /* XCHAL_CP_NUM > 0 */

View File

@ -77,12 +77,12 @@ xtensa_enable_cpuint:
xsr a4, INTENABLE /* Disables all interrupts */
rsync
l32i a4, a2, 0 /* a4 = value of INTENABLE shadow */
l32i a4, a2, 0 /* a4 = value of INTENABLE shadow */
or a5, a4, a3 /* a5 = shadow | mask */
s32i a5, a2, 0 /* shadow |= mask */
s32i a5, a2, 0 /* shadow |= mask */
wsr a5, INTENABLE /* Set CPU INTENABLE to shadow */
mov a3, a4 /* Return previous shadow content */
mov a3, a4 /* Return previous shadow content */
RET(16)
.size xtensa_enable_cpuint, . - xtensa_enable_cpuint
@ -99,7 +99,7 @@ xtensa_enable_cpuint:
* writes that value to the hardware INTENABLE register. Can be called
* from interrupt handlers.
*
* NOTE: It is possible only to enable interrupts on the current CPU
* NOTE: It is possible only to disable interrupts on the current CPU
* because there is an INTENABLE register implemented in each CPU.
*
****************************************************************************/
@ -116,13 +116,13 @@ xtensa_disable_cpuint:
xsr a4, INTENABLE /* Disables all interrupts */
rsync
l32i a4, a2, 0 /* a4 = value of INTENABLE shadow */
l32i a4, a2, 0 /* a4 = value of INTENABLE shadow */
or a5, a4, a3 /* a5 = shadow | mask */
xor a5, a5, a3 /* a5 = shadow & ~mask */
s32i a5, a2, 0 /* shadow &= ~mask */
s32i a5, a2, 0 /* shadow &= ~mask */
wsr a5, INTENABLE /* Set CPU INTENABLE to shadow */
mov a3, a4 /* Return previous shadow content */
mov a3, a4 /* Return previous shadow content */
RET(16)
.size xtensa_disable_cpuint, . - xtensa_disable_cpuint

View File

@ -72,7 +72,7 @@ static spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS] SP_SECTION;
* cpu - The index of the CPU to be queried
*
* Returned Value:
* true = a pause request is pending.
* true = a pause request is pending.
* false = no pasue request is pending.
*
****************************************************************************/
@ -88,9 +88,9 @@ bool up_cpu_pausereq(int cpu)
* Description:
* Handle a pause request from another CPU. Normally, this logic is
* executed from interrupt handling logic within the architecture-specific
* However, it is sometimes necessary necessary to perform the pending
* pause operation in other contexts where the interrupt cannot be taken
* in order to avoid deadlocks.
* However, it is sometimes necessary to perform the pending pause
* operation in other contexts where the interrupt cannot be taken in
* order to avoid deadlocks.
*
* This function performs the following operations:
*
@ -186,7 +186,7 @@ void xtensa_pause_handler(void)
int cpu = up_cpu_index();
/* Check for false alarms. Such false could occur as a consequence of
* some deadlock breaking logic that might have already serviced the SG2
* some deadlock breaking logic that might have already serviced the
* interrupt by calling up_cpu_paused.
*/
@ -228,9 +228,9 @@ int up_cpu_pause(int cpu)
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
/* Take the both spinlocks. The g_cpu_wait spinlock will prevent the SGI2
/* Take both spinlocks. The g_cpu_wait spinlock will prevent the interrupt
* handler from returning until up_cpu_resume() is called; g_cpu_paused
* is a handshake that will prefent this function from returning until
* is a handshake that will prevent this function from returning until
* the CPU is actually paused.
*/
@ -240,7 +240,7 @@ int up_cpu_pause(int cpu)
spin_lock(&g_cpu_wait[cpu]);
spin_lock(&g_cpu_paused[cpu]);
/* Execute SGI2 */
/* Execute the intercpu interrupt */
ret = xtensa_intercpu_interrupt(cpu, CPU_INTCODE_PAUSE);
if (ret < 0)
@ -276,8 +276,8 @@ int up_cpu_pause(int cpu)
* state of the task at the head of the g_assignedtasks[cpu] list, and
* resume normal tasking.
*
* This function is called after up_cpu_pause in order resume operation of
* the CPU after modifying its g_assignedtasks[cpu] list.
* This function is called after up_cpu_pause in order to resume operation
* of the CPU after modifying its g_assignedtasks[cpu] list.
*
* Input Parameters:
* cpu - The index of the CPU being re-started.
@ -297,7 +297,7 @@ int up_cpu_resume(int cpu)
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
/* Release the spinlock. Releasing the spinlock will cause the SGI2
/* Release the spinlock. Releasing the spinlock will cause the interrupt
* handler on 'cpu' to continue and return from interrupt to the newly
* established thread.
*/

View File

@ -87,7 +87,7 @@
1:
addi \aout, \ain, -1 /* aout = ain - 1 */
and \ain, \ain, \aout /* ain = ain & aout */
bnez \ain, 1b /* Repeat until ain == 0 */
bnez \ain, 1b /* Repeat until ain == 0 */
addi \aout, \aout, 1 /* Return aout + 1 */
.endm

View File

@ -109,9 +109,9 @@ void IRAM_ATTR __start(void)
}
#endif
/* Move the stack to a known location. Although we were give a stack
/* Move the stack to a known location. Although we were given a stack
* pointer at start-up, we don't know where that stack pointer is
* positioned respect to our memory map. The only safe option is to
* positioned with respect to our memory map. The only safe option is to
* switch to a well-known IDLE thread stack.
*/

View File

@ -33,7 +33,6 @@
#
############################################################################
# Supported toolchains
#
# Each toolchain definition should set: