SMP: Add spin_trylock(). Use this in conditions where other CPUs need to stopped but we cannot call enter_critical_section.

This commit is contained in:
Gregory Nutt 2016-11-24 13:33:43 -06:00
parent 7568aaf213
commit 7f636f2280
5 changed files with 35 additions and 8 deletions

View File

@ -53,6 +53,7 @@
#include "up_arch.h"
#include "sched/sched.h"
#include "irq/irq.h"
#include "up_internal.h"
/****************************************************************************
@ -346,6 +347,10 @@ static void _up_assert(int errorcode)
if (CURRENT_REGS || this_task()->pid == 0)
{
(void)up_irq_save();
#ifdef SMP
(void)spin_trylock(&g_cpu_irqlock);
#endif
for (; ; )
{
#ifdef CONFIG_ARCH_LEDS

View File

@ -117,6 +117,9 @@ void up_sigdeliver(void)
* on the local CPU. We do not want to call enter_critical_section()
* here, however, because we don't want this state to stick after the
* call to up_fullcontextrestore().
*
* I would prefer that all interrupts are disabled when
* up_fullcontextrestore() is called, but that may not be necessary.
*/
sinfo("Resuming\n");

View File

@ -50,6 +50,7 @@
#include "task/task.h"
#include "sched/sched.h"
#include "group/group.h"
#include "irq/irq.h"
#include "up_internal.h"
/****************************************************************************
@ -140,11 +141,14 @@ void _exit(int status)
{
struct tcb_s *tcb;
/* Disable interrupts. They will be restored when the next
* task is started.
/* Disable interrupts. They will be restored when the next task is
* started.
*/
(void)up_irq_save();
#ifdef SMP
(void)spin_trylock(&g_cpu_irqlock);
#endif
sinfo("TCB=%p exiting\n", this_task());
@ -177,4 +181,3 @@ void _exit(int status)
up_fullcontextrestore(tcb->xcp.regs);
}

View File

@ -555,11 +555,6 @@ Open Issues:
Update: Cache inconsistencies seem to be the root cause of all current SMP
issues.
5. Assertions. On a fatal assertions, other CPUs need to be stopped. The SCR,
however, only supports disabling CPUs 1 through 3. Perhaps if the assertion
occurs on CPUn, n > 0, then it should use and SGI to perform the assertion
on CPU0 always. From CPU0, CPU1-3 can be disabled.
Configurations
==============

View File

@ -179,6 +179,27 @@ void spin_initializer(FAR struct spinlock_s *lock);
void spin_lock(FAR volatile spinlock_t *lock);
/****************************************************************************
* Name: spin_trylock
*
* Description:
* Try once to lock the spinlock. Do not wait if the spinlock is already
* locked.
*
* Input Parameters:
* lock - A reference to the spinlock object to lock.
*
* Returned Value:
* SP_LOCKED - Failure, the spinlock was already locked
* SP_UNLOCKED - Success, the spinlock was successfully locked
*
* Assumptions:
* Not running at the interrupt level.
*
****************************************************************************/
#define spin_trylock(l) up_testset(l)
/****************************************************************************
* Name: spin_lockr
*