arch/arm: unify switch context from software interrupt
Signed-off-by: chao.an <anchao@xiaomi.com>
This commit is contained in:
parent
db3a40ac25
commit
6cc0aaf5b9
@ -60,6 +60,7 @@ CMN_CSRCS += arm_doirq.c arm_initialstate.c arm_mmu.c arm_prefetchabort.c
|
||||
CMN_CSRCS += arm_releasepending.c arm_reprioritizertr.c
|
||||
CMN_CSRCS += arm_schedulesigaction.c arm_sigdeliver.c arm_syscall.c
|
||||
CMN_CSRCS += arm_unblocktask.c arm_undefinedinsn.c arm_tcbinfo.c
|
||||
CMN_CSRCS += arm_switchcontext.c
|
||||
|
||||
# Use common heap allocation for now (may need to be customized later)
|
||||
|
||||
|
@ -60,6 +60,7 @@ CMN_CSRCS += arm_doirq.c arm_initialstate.c arm_mmu.c arm_prefetchabort.c
|
||||
CMN_CSRCS += arm_releasepending.c arm_reprioritizertr.c
|
||||
CMN_CSRCS += arm_schedulesigaction.c arm_sigdeliver.c arm_syscall.c
|
||||
CMN_CSRCS += arm_unblocktask.c arm_undefinedinsn.c arm_tcbinfo.c
|
||||
CMN_CSRCS += arm_switchcontext.c
|
||||
|
||||
# Use common heap allocation for now (may need to be customized later)
|
||||
|
||||
|
@ -125,18 +125,11 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
|
||||
arm_restorestate(rtcb->xcp.regs);
|
||||
}
|
||||
|
||||
/* Copy the user C context into the TCB at the (old) head of the
|
||||
* ready-to-run Task list. if arm_saveusercontext returns a non-zero
|
||||
* value, then this is really the previously running task restarting!
|
||||
*/
|
||||
/* No, then we will need to perform the user context switch */
|
||||
|
||||
else if (!arm_saveusercontext(rtcb->xcp.regs))
|
||||
else
|
||||
{
|
||||
/* Restore the exception context of the rtcb at the (new) head
|
||||
* of the ready-to-run task list.
|
||||
*/
|
||||
|
||||
rtcb = this_task();
|
||||
struct tcb_s *nexttcb = this_task();
|
||||
|
||||
#ifdef CONFIG_ARCH_ADDRENV
|
||||
/* Make sure that the address environment for the previously
|
||||
@ -145,15 +138,23 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
|
||||
* thread at the head of the ready-to-run list.
|
||||
*/
|
||||
|
||||
group_addrenv(rtcb);
|
||||
group_addrenv(nexttcb);
|
||||
#endif
|
||||
/* Reset scheduler parameters */
|
||||
|
||||
nxsched_resume_scheduler(rtcb);
|
||||
nxsched_resume_scheduler(nexttcb);
|
||||
|
||||
/* Then switch contexts */
|
||||
/* Switch context to the context of the task at the head of the
|
||||
* ready to run list.
|
||||
*/
|
||||
|
||||
arm_fullcontextrestore(rtcb->xcp.regs);
|
||||
arm_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
|
||||
|
||||
/* arm_switchcontext forces a context switch to the task at the
|
||||
* head of the ready-to-run list. It does not 'return' in the
|
||||
* normal sense. When it does return, it is because the blocked
|
||||
* task is again ready to run and has execution priority.
|
||||
*/
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -25,6 +25,9 @@
|
||||
#include <nuttx/config.h>
|
||||
#include <arch/irq.h>
|
||||
|
||||
#include "svcall.h"
|
||||
#include "arm.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Pre-processor Definitions
|
||||
****************************************************************************/
|
||||
@ -60,38 +63,14 @@
|
||||
.type arm_fullcontextrestore, function
|
||||
arm_fullcontextrestore:
|
||||
|
||||
/* On entry, a1 (r0) holds address of the register save area. All other
|
||||
* registers are available for use.
|
||||
*/
|
||||
/* Perform the System call with R0=1 and R1=regs */
|
||||
|
||||
/* Recover all registers except for r0, r1, R15, and CPSR */
|
||||
mov r1, r0 /* R1: regs */
|
||||
mov r0, #SYS_restore_context /* R0: restore context */
|
||||
svc #SYS_syscall /* Force synchronous SVCall (or Hard Fault) */
|
||||
|
||||
add r1, r0, #(4*REG_R2) /* Offset to REG_R2 storage */
|
||||
ldmia r1, {r2-r14} /* Recover registers */
|
||||
|
||||
/* Create a stack frame to hold the some registers */
|
||||
|
||||
sub sp, sp, #(3*4) /* Frame for three registers */
|
||||
ldr r1, [r0, #(4*REG_R0)] /* Fetch the stored r0 value */
|
||||
str r1, [sp] /* Save it at the top of the stack */
|
||||
ldr r1, [r0, #(4*REG_R1)] /* Fetch the stored r1 value */
|
||||
str r1, [sp, #4] /* Save it in the stack */
|
||||
ldr r1, [r0, #(4*REG_PC)] /* Fetch the stored pc value */
|
||||
str r1, [sp, #8] /* Save it at the bottom of the frame */
|
||||
|
||||
/* Now we can restore the CPSR. We wait until we are completely
|
||||
* finished with the context save data to do this. Restore the CPSR
|
||||
* may re-enable and interrupts and we could be in a context
|
||||
* where the save structure is only protected by interrupts being
|
||||
* disabled.
|
||||
*/
|
||||
|
||||
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the stored CPSR value */
|
||||
msr spsr_cxsf, r1 /* Set the SPSR */
|
||||
|
||||
/* Now recover r0-r1, pc and cpsr, destroying the stack frame */
|
||||
|
||||
ldmia sp!, {r0-r1, r15}^
|
||||
/* This call should not return */
|
||||
|
||||
bx lr /* Unnecessary ... will not return */
|
||||
.size arm_fullcontextrestore, .-arm_fullcontextrestore
|
||||
.end
|
||||
|
@ -93,19 +93,11 @@ void up_release_pending(void)
|
||||
arm_restorestate(rtcb->xcp.regs);
|
||||
}
|
||||
|
||||
/* Copy the exception context into the TCB of the task that
|
||||
* was currently active. if arm_saveusercontext returns a non-zero
|
||||
* value, then this is really the previously running task
|
||||
* restarting!
|
||||
*/
|
||||
/* No, then we will need to perform the user context switch */
|
||||
|
||||
else if (!arm_saveusercontext(rtcb->xcp.regs))
|
||||
else
|
||||
{
|
||||
/* Restore the exception context of the rtcb at the (new) head
|
||||
* of the ready-to-run task list.
|
||||
*/
|
||||
|
||||
rtcb = this_task();
|
||||
struct tcb_s *nexttcb = this_task();
|
||||
|
||||
#ifdef CONFIG_ARCH_ADDRENV
|
||||
/* Make sure that the address environment for the previously
|
||||
@ -114,15 +106,23 @@ void up_release_pending(void)
|
||||
* thread at the head of the ready-to-run list.
|
||||
*/
|
||||
|
||||
group_addrenv(rtcb);
|
||||
group_addrenv(nexttcb);
|
||||
#endif
|
||||
/* Update scheduler parameters */
|
||||
|
||||
nxsched_resume_scheduler(rtcb);
|
||||
nxsched_resume_scheduler(nexttcb);
|
||||
|
||||
/* Then switch contexts */
|
||||
/* Switch context to the context of the task at the head of the
|
||||
* ready to run list.
|
||||
*/
|
||||
|
||||
arm_fullcontextrestore(rtcb->xcp.regs);
|
||||
arm_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
|
||||
|
||||
/* arm_switchcontext forces a context switch to the task at the
|
||||
* head of the ready-to-run list. It does not 'return' in the
|
||||
* normal sense. When it does return, it is because the blocked
|
||||
* task is again ready to run and has execution priority.
|
||||
*/
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -147,19 +147,11 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
|
||||
arm_restorestate(rtcb->xcp.regs);
|
||||
}
|
||||
|
||||
/* Copy the exception context into the TCB at the (old) head of
|
||||
* the ready-to-run Task list. if arm_saveusercontext returns a
|
||||
* non-zero value, then this is really the previously running task
|
||||
* restarting!
|
||||
*/
|
||||
/* No, then we will need to perform the user context switch */
|
||||
|
||||
else if (!arm_saveusercontext(rtcb->xcp.regs))
|
||||
else
|
||||
{
|
||||
/* Restore the exception context of the rtcb at the (new) head
|
||||
* of the ready-to-run task list.
|
||||
*/
|
||||
|
||||
rtcb = this_task();
|
||||
struct tcb_s *nexttcb = this_task();
|
||||
|
||||
#ifdef CONFIG_ARCH_ADDRENV
|
||||
/* Make sure that the address environment for the previously
|
||||
@ -168,15 +160,24 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
|
||||
* thread at the head of the ready-to-run list.
|
||||
*/
|
||||
|
||||
group_addrenv(rtcb);
|
||||
group_addrenv(nexttcb);
|
||||
#endif
|
||||
/* Update scheduler parameters */
|
||||
|
||||
nxsched_resume_scheduler(rtcb);
|
||||
nxsched_resume_scheduler(nexttcb);
|
||||
|
||||
/* Then switch contexts */
|
||||
/* Switch context to the context of the task at the head of the
|
||||
* ready to run list.
|
||||
*/
|
||||
|
||||
arm_fullcontextrestore(rtcb->xcp.regs);
|
||||
arm_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
|
||||
|
||||
/* arm_switchcontext forces a context switch to the task at the
|
||||
* head of the ready-to-run list. It does not 'return' in the
|
||||
* normal sense. When it does return, it is because the
|
||||
* blocked task is again ready to run and has execution
|
||||
* priority.
|
||||
*/
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -32,6 +32,7 @@
|
||||
|
||||
#include "arm_arch.h"
|
||||
#include "arm_internal.h"
|
||||
#include "svcall.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
@ -50,9 +51,77 @@
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void arm_syscall(uint32_t *regs)
|
||||
uint32_t *arm_syscall(uint32_t *regs)
|
||||
{
|
||||
_alert("Syscall from 0x%" PRIx32 "\n", regs[REG_PC]);
|
||||
CURRENT_REGS = regs;
|
||||
PANIC();
|
||||
uint32_t cmd;
|
||||
|
||||
DEBUGASSERT(regs);
|
||||
|
||||
cmd = regs[REG_R0];
|
||||
|
||||
switch (cmd)
|
||||
{
|
||||
/* R0=SYS_restore_context: Restore task context
|
||||
*
|
||||
* void arm_fullcontextrestore(uint32_t *restoreregs)
|
||||
* noreturn_function;
|
||||
*
|
||||
* At this point, the following values are saved in context:
|
||||
*
|
||||
* R0 = SYS_restore_context
|
||||
* R1 = restoreregs
|
||||
*/
|
||||
|
||||
case SYS_restore_context:
|
||||
{
|
||||
/* Replace 'regs' with the pointer to the register set in
|
||||
* regs[REG_R1]. On return from the system call, that register
|
||||
* set will determine the restored context.
|
||||
*/
|
||||
|
||||
regs = (uint32_t *)regs[REG_R1];
|
||||
DEBUGASSERT(regs);
|
||||
}
|
||||
break;
|
||||
|
||||
/* R0=SYS_switch_context: This a switch context command:
|
||||
*
|
||||
* void arm_switchcontext(uint32_t *saveregs, uint32_t *restoreregs);
|
||||
*
|
||||
* At this point, the following values are saved in context:
|
||||
*
|
||||
* R0 = SYS_switch_context
|
||||
* R1 = saveregs
|
||||
* R2 = restoreregs
|
||||
*
|
||||
* In this case, we do both: We save the context registers to the save
|
||||
* register area reference by the saved contents of R1 and then set
|
||||
* regs to the save register area referenced by the saved
|
||||
* contents of R2.
|
||||
*/
|
||||
|
||||
case SYS_switch_context:
|
||||
{
|
||||
DEBUGASSERT(regs[REG_R1] != 0 && regs[REG_R2] != 0);
|
||||
memcpy((uint32_t *)regs[REG_R1], regs, XCPTCONTEXT_SIZE);
|
||||
regs = (uint32_t *)regs[REG_R2];
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
{
|
||||
svcerr("ERROR: Bad SYS call: 0x%" PRIx32 "\n", regs[REG_R0]);
|
||||
_alert("Syscall from 0x%" PRIx32 "\n", regs[REG_PC]);
|
||||
CURRENT_REGS = regs;
|
||||
PANIC();
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
/* Return the last value of curent_regs. This supports context switches
|
||||
* on return from the exception. That capability is only used with the
|
||||
* SYS_context_switch system call.
|
||||
*/
|
||||
|
||||
return regs;
|
||||
}
|
||||
|
@ -109,20 +109,11 @@ void up_unblock_task(struct tcb_s *tcb)
|
||||
arm_restorestate(rtcb->xcp.regs);
|
||||
}
|
||||
|
||||
/* We are not in an interrupt handler. Copy the user C context
|
||||
* into the TCB of the task that was previously active. if
|
||||
* arm_saveusercontext returns a non-zero value, then this is really
|
||||
* the previously running task restarting!
|
||||
*/
|
||||
/* No, then we will need to perform the user context switch */
|
||||
|
||||
else if (!arm_saveusercontext(rtcb->xcp.regs))
|
||||
else
|
||||
{
|
||||
/* Restore the exception context of the new task that is ready to
|
||||
* run (probably tcb). This is the new rtcb at the head of the
|
||||
* ready-to-run task list.
|
||||
*/
|
||||
|
||||
rtcb = this_task();
|
||||
struct tcb_s *nexttcb = this_task();
|
||||
|
||||
#ifdef CONFIG_ARCH_ADDRENV
|
||||
/* Make sure that the address environment for the previously
|
||||
@ -131,15 +122,23 @@ void up_unblock_task(struct tcb_s *tcb)
|
||||
* thread at the head of the ready-to-run list.
|
||||
*/
|
||||
|
||||
group_addrenv(rtcb);
|
||||
group_addrenv(nexttcb);
|
||||
#endif
|
||||
/* Update scheduler parameters */
|
||||
|
||||
nxsched_resume_scheduler(rtcb);
|
||||
nxsched_resume_scheduler(nexttcb);
|
||||
|
||||
/* Then switch contexts */
|
||||
/* Switch context to the context of the task at the head of the
|
||||
* ready to run list.
|
||||
*/
|
||||
|
||||
arm_fullcontextrestore(rtcb->xcp.regs);
|
||||
arm_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
|
||||
|
||||
/* arm_switchcontext forces a context switch to the task at the
|
||||
* head of the ready-to-run list. It does not 'return' in the
|
||||
* normal sense. When it does return, it is because the blocked
|
||||
* task is again ready to run and has execution priority.
|
||||
*/
|
||||
}
|
||||
}
|
||||
}
|
||||
|
127
arch/arm/src/arm/svcall.h
Normal file
127
arch/arm/src/arm/svcall.h
Normal file
@ -0,0 +1,127 @@
|
||||
/****************************************************************************
|
||||
* arch/arm/src/arm/svcall.h
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership. The
|
||||
* ASF licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the
|
||||
* License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifndef __ARCH_ARM_SRC_ARM_SVCALL_H
|
||||
#define __ARCH_ARM_SRC_ARM_SVCALL_H
|
||||
|
||||
/****************************************************************************
|
||||
* Included Files
|
||||
****************************************************************************/
|
||||
|
||||
#include <nuttx/config.h>
|
||||
|
||||
#include <syscall.h>
|
||||
|
||||
/****************************************************************************
|
||||
* Pre-processor Definitions
|
||||
****************************************************************************/
|
||||
|
||||
/* Configuration ************************************************************/
|
||||
|
||||
/* This logic uses one system call for the syscall return. So a minimum of
|
||||
* one syscall values must be reserved. If CONFIG_BUILD_KERNEL is defined,
|
||||
* then four more syscall values must be reserved.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_LIB_SYSCALL
|
||||
# ifdef CONFIG_BUILD_KERNEL
|
||||
# ifndef CONFIG_SYS_RESERVED
|
||||
# error "CONFIG_SYS_RESERVED must be defined to have the value 7"
|
||||
# elif CONFIG_SYS_RESERVED != 7
|
||||
# error "CONFIG_SYS_RESERVED must have the value 7"
|
||||
# endif
|
||||
# else
|
||||
# ifndef CONFIG_SYS_RESERVED
|
||||
# error "CONFIG_SYS_RESERVED must be defined to have the value 4"
|
||||
# elif CONFIG_SYS_RESERVED != 4
|
||||
# error "CONFIG_SYS_RESERVED must have the value 4"
|
||||
# endif
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* Cortex-A system calls ****************************************************/
|
||||
|
||||
/* SYS call 0:
|
||||
*
|
||||
* void arm_fullcontextrestore(uint32_t *restoreregs) noreturn_function;
|
||||
*/
|
||||
|
||||
#define SYS_restore_context (0)
|
||||
|
||||
/* SYS call 1:
|
||||
*
|
||||
* void arm_switchcontext(uint32_t *saveregs, uint32_t *restoreregs);
|
||||
*/
|
||||
|
||||
#define SYS_switch_context (1)
|
||||
|
||||
#ifdef CONFIG_LIB_SYSCALL
|
||||
/* SYS call 2:
|
||||
*
|
||||
* void arm_syscall_return(void);
|
||||
*/
|
||||
|
||||
#define SYS_syscall_return (2)
|
||||
|
||||
#ifndef CONFIG_BUILD_FLAT
|
||||
#ifdef CONFIG_BUILD_KERNEL
|
||||
/* SYS call 3:
|
||||
*
|
||||
* void up_task_start(main_t taskentry, int argc, FAR char *argv[])
|
||||
* noreturn_function;
|
||||
*/
|
||||
|
||||
#define SYS_task_start (3)
|
||||
|
||||
/* SYS call 5:
|
||||
*
|
||||
* void signal_handler(_sa_sigaction_t sighand,
|
||||
* int signo, FAR siginfo_t *info,
|
||||
* FAR void *ucontext);
|
||||
*/
|
||||
|
||||
#define SYS_signal_handler (5)
|
||||
|
||||
/* SYS call 6:
|
||||
*
|
||||
* void signal_handler_return(void);
|
||||
*/
|
||||
|
||||
#define SYS_signal_handler_return (6)
|
||||
|
||||
#endif /* !CONFIG_BUILD_FLAT */
|
||||
|
||||
/* SYS call 4:
|
||||
*
|
||||
* void up_pthread_start(pthread_startroutine_t startup,
|
||||
* pthread_startroutine_t entrypt, pthread_addr_t arg)
|
||||
* noreturn_function
|
||||
*/
|
||||
|
||||
#define SYS_pthread_start (4)
|
||||
|
||||
#endif /* CONFIG_BUILD_KERNEL */
|
||||
#endif /* CONFIG_LIB_SYSCALL */
|
||||
|
||||
/****************************************************************************
|
||||
* Inline Functions
|
||||
****************************************************************************/
|
||||
|
||||
#endif /* __ARCH_ARM_SRC_ARM_SVCALL_H */
|
@ -29,6 +29,7 @@ CMN_CSRCS += arm_prefetchabort.c arm_releasepending.c arm_releasestack.c
|
||||
CMN_CSRCS += arm_reprioritizertr.c arm_schedulesigaction.c arm_sigdeliver.c
|
||||
CMN_CSRCS += arm_stackframe.c arm_syscall.c arm_unblocktask.c arm_undefinedinsn.c
|
||||
CMN_CSRCS += arm_usestack.c arm_vfork.c arm_puts.c arm_tcbinfo.c
|
||||
CMN_CSRCS += arm_switchcontext.c
|
||||
|
||||
ifneq ($(CONFIG_ARCH_IDLE_CUSTOM),y)
|
||||
CMN_CSRCS += arm_idle.c
|
||||
|
@ -390,7 +390,7 @@ void arm_dataabort(uint32_t *regs);
|
||||
/* Exception handlers */
|
||||
|
||||
void arm_prefetchabort(uint32_t *regs);
|
||||
void arm_syscall(uint32_t *regs);
|
||||
uint32_t *arm_syscall(uint32_t *regs);
|
||||
void arm_undefinedinsn(uint32_t *regs);
|
||||
|
||||
#endif /* CONFIG_ARCH_ARMV[6-8]M */
|
||||
|
@ -30,7 +30,7 @@ CMN_CSRCS += arm_prefetchabort.c arm_releasepending.c arm_releasestack.c
|
||||
CMN_CSRCS += arm_reprioritizertr.c arm_schedulesigaction.c
|
||||
CMN_CSRCS += arm_sigdeliver.c arm_stackframe.c arm_syscall.c arm_unblocktask.c
|
||||
CMN_CSRCS += arm_undefinedinsn.c arm_usestack.c arm_vfork.c arm_puts.c
|
||||
CMN_CSRCS += arm_tcbinfo.c
|
||||
CMN_CSRCS += arm_tcbinfo.c arm_switchcontext.c
|
||||
|
||||
ifneq ($(CONFIG_ARCH_IDLE_CUSTOM),y)
|
||||
CMN_CSRCS += arm_idle.c
|
||||
|
@ -29,7 +29,7 @@ CMN_CSRCS += arm_prefetchabort.c arm_releasepending.c arm_releasestack.c
|
||||
CMN_CSRCS += arm_reprioritizertr.c arm_schedulesigaction.c
|
||||
CMN_CSRCS += arm_sigdeliver.c arm_stackframe.c arm_syscall.c arm_unblocktask.c
|
||||
CMN_CSRCS += arm_undefinedinsn.c arm_usestack.c arm_vfork.c arm_puts.c
|
||||
CMN_CSRCS += arm_tcbinfo.c
|
||||
CMN_CSRCS += arm_tcbinfo.c arm_switchcontext.c
|
||||
|
||||
ifneq ($(CONFIG_ARCH_IDLE_CUSTOM),y)
|
||||
CMN_CSRCS += arm_idle.c
|
||||
|
@ -63,6 +63,7 @@ CMN_CSRCS += arm_doirq.c arm_gicv2.c arm_initialstate.c arm_mmu.c
|
||||
CMN_CSRCS += arm_prefetchabort.c arm_releasepending.c arm_reprioritizertr.c
|
||||
CMN_CSRCS += arm_schedulesigaction.c arm_sigdeliver.c arm_syscall.c
|
||||
CMN_CSRCS += arm_unblocktask.c arm_undefinedinsn.c arm_tcbinfo.c
|
||||
CMN_CSRCS += arm_switchcontext.c
|
||||
|
||||
ifeq ($(CONFIG_ARM_SEMIHOSTING_HOSTFS),y)
|
||||
CMN_CSRCS += arm_hostfs.c
|
||||
|
@ -31,6 +31,7 @@ CMN_CSRCS += arm_releasestack.c arm_reprioritizertr.c arm_stackframe.c
|
||||
CMN_CSRCS += arm_syscall.c arm_unblocktask.c arm_undefinedinsn.c arm_usestack.c
|
||||
CMN_CSRCS += arm_schedulesigaction.c arm_sigdeliver.c
|
||||
CMN_CSRCS += arm_lowputs.c arm_vfork.c arm_puts.c arm_tcbinfo.c
|
||||
CMN_CSRCS += arm_switchcontext.c
|
||||
|
||||
ifneq ($(CONFIG_ARCH_IDLE_CUSTOM),y)
|
||||
CMN_CSRCS += arm_idle.c
|
||||
|
@ -30,6 +30,7 @@ CMN_CSRCS += arm_releasestack.c arm_reprioritizertr.c arm_stackframe.c
|
||||
CMN_CSRCS += arm_syscall.c arm_unblocktask.c arm_undefinedinsn.c
|
||||
CMN_CSRCS += arm_usestack.c arm_lowputs.c arm_vfork.c
|
||||
CMN_CSRCS += arm_schedulesigaction.c arm_sigdeliver.c arm_puts.c arm_tcbinfo.c
|
||||
CMN_CSRCS += arm_switchcontext.c
|
||||
|
||||
ifneq ($(CONFIG_ARCH_IDLE_CUSTOM),y)
|
||||
CMN_CSRCS += arm_idle.c
|
||||
|
@ -31,7 +31,7 @@ CMN_CSRCS += arm_prefetchabort.c arm_releasepending.c arm_releasestack.c
|
||||
CMN_CSRCS += arm_reprioritizertr.c arm_schedulesigaction.c
|
||||
CMN_CSRCS += arm_sigdeliver.c arm_stackframe.c arm_syscall.c arm_unblocktask.c
|
||||
CMN_CSRCS += arm_undefinedinsn.c arm_usestack.c arm_vfork.c arm_puts.c
|
||||
CMN_CSRCS += arm_tcbinfo.c
|
||||
CMN_CSRCS += arm_tcbinfo.c arm_switchcontext.c
|
||||
|
||||
ifneq ($(CONFIG_ARCH_IDLE_CUSTOM),y)
|
||||
CMN_CSRCS += arm_idle.c
|
||||
|
@ -29,7 +29,7 @@ CMN_CSRCS += arm_interruptcontext.c arm_prefetchabort.c arm_releasepending.c
|
||||
CMN_CSRCS += arm_releasestack.c arm_reprioritizertr.c arm_schedulesigaction.c
|
||||
CMN_CSRCS += arm_sigdeliver.c arm_stackframe.c arm_syscall.c arm_unblocktask.c
|
||||
CMN_CSRCS += arm_undefinedinsn.c arm_usestack.c arm_vfork.c arm_etherstub.c
|
||||
CMN_CSRCS += arm_puts.c arm_tcbinfo.c
|
||||
CMN_CSRCS += arm_puts.c arm_tcbinfo.c arm_switchcontext.c
|
||||
|
||||
CHIP_ASRCS = moxart_lowputc.S
|
||||
|
||||
|
@ -62,6 +62,7 @@ CMN_CSRCS += arm_doirq.c arm_initialstate.c arm_mmu.c arm_prefetchabort.c
|
||||
CMN_CSRCS += arm_releasepending.c arm_reprioritizertr.c
|
||||
CMN_CSRCS += arm_schedulesigaction.c arm_sigdeliver.c arm_syscall.c
|
||||
CMN_CSRCS += arm_unblocktask.c arm_undefinedinsn.c arm_tcbinfo.c
|
||||
CMN_CSRCS += arm_switchcontext.c
|
||||
|
||||
# Configuration dependent C files
|
||||
|
||||
|
@ -31,6 +31,7 @@ CMN_CSRCS += arm_releasestack.c arm_reprioritizertr.c arm_stackframe.c
|
||||
CMN_CSRCS += arm_syscall.c arm_unblocktask.c arm_undefinedinsn.c arm_usestack.c
|
||||
CMN_CSRCS += arm_schedulesigaction.c arm_sigdeliver.c
|
||||
CMN_CSRCS += arm_lowputs.c arm_vfork.c arm_puts.c arm_tcbinfo.c
|
||||
CMN_CSRCS += arm_switchcontext.c
|
||||
|
||||
ifneq ($(CONFIG_ARCH_IDLE_CUSTOM),y)
|
||||
CMN_CSRCS += arm_idle.c
|
||||
|
@ -51,6 +51,7 @@ CMN_CSRCS += arm_doirq.c arm_initialstate.c arm_prefetchabort.c
|
||||
CMN_CSRCS += arm_releasepending.c arm_reprioritizertr.c
|
||||
CMN_CSRCS += arm_schedulesigaction.c arm_sigdeliver.c arm_syscall.c
|
||||
CMN_CSRCS += arm_unblocktask.c arm_undefinedinsn.c
|
||||
CMN_CSRCS += arm_switchcontext.c
|
||||
|
||||
# Configuration dependent C files
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user