sched/cpu: replace up_cpu_index() to this_cpu()
In SMP mode, up_cpu_index()/this_cpu() are the same, both return the index of the physical core. In AMP mode, up_cpu_index() will return the index of the physical core, and this_cpu() will always return 0 | #ifdef CONFIG_SMP | # define this_cpu() up_cpu_index() | #elif defined(CONFIG_AMP) | # define this_cpu() (0) | #else | # define this_cpu() (0) | #endif Signed-off-by: chao an <anchao@lixiang.com>
This commit is contained in:
parent
4e62d0005a
commit
feb6ede434
@ -247,7 +247,7 @@ static void note_sysview_irqhandler(FAR struct note_driver_s *drv, int irq,
|
|||||||
|
|
||||||
if (enter)
|
if (enter)
|
||||||
{
|
{
|
||||||
driver->irq[up_cpu_index()] = irq;
|
driver->irq[this_cpu()] = irq;
|
||||||
|
|
||||||
SEGGER_SYSVIEW_OnTaskStopExec();
|
SEGGER_SYSVIEW_OnTaskStopExec();
|
||||||
SEGGER_SYSVIEW_RecordEnterISR();
|
SEGGER_SYSVIEW_RecordEnterISR();
|
||||||
@ -270,7 +270,7 @@ static void note_sysview_irqhandler(FAR struct note_driver_s *drv, int irq,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
driver->irq[up_cpu_index()] = 0;
|
driver->irq[this_cpu()] = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -337,7 +337,7 @@ static void note_sysview_syscall_leave(FAR struct note_driver_s *drv,
|
|||||||
|
|
||||||
unsigned int note_sysview_get_interrupt_id(void)
|
unsigned int note_sysview_get_interrupt_id(void)
|
||||||
{
|
{
|
||||||
return g_note_sysview_driver.irq[up_cpu_index()];
|
return g_note_sysview_driver.irq[this_cpu()];
|
||||||
}
|
}
|
||||||
|
|
||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
|
@ -196,6 +196,16 @@
|
|||||||
|
|
||||||
#define get_current_mm() (get_group_mm(nxsched_self()->group))
|
#define get_current_mm() (get_group_mm(nxsched_self()->group))
|
||||||
|
|
||||||
|
/* These are macros to access the current CPU and the current task on a CPU.
|
||||||
|
* These macros are intended to support a future SMP implementation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
# define this_cpu() up_cpu_index()
|
||||||
|
#else
|
||||||
|
# define this_cpu() (0)
|
||||||
|
#endif
|
||||||
|
|
||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
* Public Type Definitions
|
* Public Type Definitions
|
||||||
****************************************************************************/
|
****************************************************************************/
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
#include <debug.h>
|
#include <debug.h>
|
||||||
|
|
||||||
#include <nuttx/arch.h>
|
#include <nuttx/arch.h>
|
||||||
|
#include <nuttx/sched.h>
|
||||||
#include <nuttx/mm/mm.h>
|
#include <nuttx/mm/mm.h>
|
||||||
|
|
||||||
#include "mm_heap/mm.h"
|
#include "mm_heap/mm.h"
|
||||||
@ -47,11 +48,11 @@ static void add_delaylist(FAR struct mm_heap_s *heap, FAR void *mem)
|
|||||||
|
|
||||||
flags = up_irq_save();
|
flags = up_irq_save();
|
||||||
|
|
||||||
tmp->flink = heap->mm_delaylist[up_cpu_index()];
|
tmp->flink = heap->mm_delaylist[this_cpu()];
|
||||||
heap->mm_delaylist[up_cpu_index()] = tmp;
|
heap->mm_delaylist[this_cpu()] = tmp;
|
||||||
|
|
||||||
#if CONFIG_MM_FREE_DELAYCOUNT_MAX > 0
|
#if CONFIG_MM_FREE_DELAYCOUNT_MAX > 0
|
||||||
heap->mm_delaycount[up_cpu_index()]++;
|
heap->mm_delaycount[this_cpu()]++;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
up_irq_restore(flags);
|
up_irq_restore(flags);
|
||||||
|
@ -64,21 +64,21 @@ static bool free_delaylist(FAR struct mm_heap_s *heap, bool force)
|
|||||||
|
|
||||||
flags = up_irq_save();
|
flags = up_irq_save();
|
||||||
|
|
||||||
tmp = heap->mm_delaylist[up_cpu_index()];
|
tmp = heap->mm_delaylist[this_cpu()];
|
||||||
|
|
||||||
#if CONFIG_MM_FREE_DELAYCOUNT_MAX > 0
|
#if CONFIG_MM_FREE_DELAYCOUNT_MAX > 0
|
||||||
if (tmp == NULL ||
|
if (tmp == NULL ||
|
||||||
(!force &&
|
(!force &&
|
||||||
heap->mm_delaycount[up_cpu_index()] < CONFIG_MM_FREE_DELAYCOUNT_MAX))
|
heap->mm_delaycount[this_cpu()] < CONFIG_MM_FREE_DELAYCOUNT_MAX))
|
||||||
{
|
{
|
||||||
up_irq_restore(flags);
|
up_irq_restore(flags);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
heap->mm_delaycount[up_cpu_index()] = 0;
|
heap->mm_delaycount[this_cpu()] = 0;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
heap->mm_delaylist[up_cpu_index()] = NULL;
|
heap->mm_delaylist[this_cpu()] = NULL;
|
||||||
|
|
||||||
up_irq_restore(flags);
|
up_irq_restore(flags);
|
||||||
|
|
||||||
|
@ -189,11 +189,11 @@ static void add_delaylist(FAR struct mm_heap_s *heap, FAR void *mem)
|
|||||||
|
|
||||||
flags = up_irq_save();
|
flags = up_irq_save();
|
||||||
|
|
||||||
tmp->flink = heap->mm_delaylist[up_cpu_index()];
|
tmp->flink = heap->mm_delaylist[this_cpu()];
|
||||||
heap->mm_delaylist[up_cpu_index()] = tmp;
|
heap->mm_delaylist[this_cpu()] = tmp;
|
||||||
|
|
||||||
#if CONFIG_MM_FREE_DELAYCOUNT_MAX > 0
|
#if CONFIG_MM_FREE_DELAYCOUNT_MAX > 0
|
||||||
heap->mm_delaycount[up_cpu_index()]++;
|
heap->mm_delaycount[this_cpu()]++;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
up_irq_restore(flags);
|
up_irq_restore(flags);
|
||||||
@ -215,21 +215,21 @@ static bool free_delaylist(FAR struct mm_heap_s *heap, bool force)
|
|||||||
|
|
||||||
flags = up_irq_save();
|
flags = up_irq_save();
|
||||||
|
|
||||||
tmp = heap->mm_delaylist[up_cpu_index()];
|
tmp = heap->mm_delaylist[this_cpu()];
|
||||||
|
|
||||||
#if CONFIG_MM_FREE_DELAYCOUNT_MAX > 0
|
#if CONFIG_MM_FREE_DELAYCOUNT_MAX > 0
|
||||||
if (tmp == NULL ||
|
if (tmp == NULL ||
|
||||||
(!force &&
|
(!force &&
|
||||||
heap->mm_delaycount[up_cpu_index()] < CONFIG_MM_FREE_DELAYCOUNT_MAX))
|
heap->mm_delaycount[this_cpu()] < CONFIG_MM_FREE_DELAYCOUNT_MAX))
|
||||||
{
|
{
|
||||||
up_irq_restore(flags);
|
up_irq_restore(flags);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
heap->mm_delaycount[up_cpu_index()] = 0;
|
heap->mm_delaycount[this_cpu()] = 0;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
heap->mm_delaylist[up_cpu_index()] = NULL;
|
heap->mm_delaylist[this_cpu()] = NULL;
|
||||||
|
|
||||||
up_irq_restore(flags);
|
up_irq_restore(flags);
|
||||||
|
|
||||||
|
@ -100,7 +100,7 @@ irqstate_t spin_lock_irqsave(spinlock_t *lock)
|
|||||||
|
|
||||||
if (NULL == lock)
|
if (NULL == lock)
|
||||||
{
|
{
|
||||||
int me = this_cpu();
|
int me = up_cpu_index();
|
||||||
if (0 == g_irq_spin_count[me])
|
if (0 == g_irq_spin_count[me])
|
||||||
{
|
{
|
||||||
spin_lock(&g_irq_spin);
|
spin_lock(&g_irq_spin);
|
||||||
@ -128,7 +128,7 @@ irqstate_t spin_lock_irqsave_wo_note(spinlock_t *lock)
|
|||||||
|
|
||||||
if (NULL == lock)
|
if (NULL == lock)
|
||||||
{
|
{
|
||||||
int me = this_cpu();
|
int me = up_cpu_index();
|
||||||
if (0 == g_irq_spin_count[me])
|
if (0 == g_irq_spin_count[me])
|
||||||
{
|
{
|
||||||
spin_lock_wo_note(&g_irq_spin);
|
spin_lock_wo_note(&g_irq_spin);
|
||||||
@ -178,7 +178,7 @@ void spin_unlock_irqrestore(spinlock_t *lock, irqstate_t flags)
|
|||||||
{
|
{
|
||||||
if (NULL == lock)
|
if (NULL == lock)
|
||||||
{
|
{
|
||||||
int me = this_cpu();
|
int me = up_cpu_index();
|
||||||
DEBUGASSERT(0 < g_irq_spin_count[me]);
|
DEBUGASSERT(0 < g_irq_spin_count[me]);
|
||||||
g_irq_spin_count[me]--;
|
g_irq_spin_count[me]--;
|
||||||
|
|
||||||
@ -203,7 +203,7 @@ void spin_unlock_irqrestore_wo_note(spinlock_t *lock, irqstate_t flags)
|
|||||||
{
|
{
|
||||||
if (NULL == lock)
|
if (NULL == lock)
|
||||||
{
|
{
|
||||||
int me = this_cpu();
|
int me = up_cpu_index();
|
||||||
DEBUGASSERT(0 < g_irq_spin_count[me]);
|
DEBUGASSERT(0 < g_irq_spin_count[me]);
|
||||||
g_irq_spin_count[me]--;
|
g_irq_spin_count[me]--;
|
||||||
|
|
||||||
@ -351,7 +351,7 @@ irqstate_t write_lock_irqsave(rwlock_t *lock)
|
|||||||
|
|
||||||
if (NULL == lock)
|
if (NULL == lock)
|
||||||
{
|
{
|
||||||
int me = this_cpu();
|
int me = up_cpu_index();
|
||||||
if (0 == g_irq_rwspin_count[me])
|
if (0 == g_irq_rwspin_count[me])
|
||||||
{
|
{
|
||||||
write_lock(&g_irq_rwspin);
|
write_lock(&g_irq_rwspin);
|
||||||
@ -401,7 +401,7 @@ void write_unlock_irqrestore(rwlock_t *lock, irqstate_t flags)
|
|||||||
{
|
{
|
||||||
if (NULL == lock)
|
if (NULL == lock)
|
||||||
{
|
{
|
||||||
int me = this_cpu();
|
int me = up_cpu_index();
|
||||||
DEBUGASSERT(0 < g_irq_rwspin_count[me]);
|
DEBUGASSERT(0 < g_irq_rwspin_count[me]);
|
||||||
g_irq_rwspin_count[me]--;
|
g_irq_rwspin_count[me]--;
|
||||||
|
|
||||||
|
@ -581,7 +581,7 @@ void _assert(FAR const char *filename, int linenum,
|
|||||||
msg ? msg : "",
|
msg ? msg : "",
|
||||||
filename ? filename : "", linenum,
|
filename ? filename : "", linenum,
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
up_cpu_index(),
|
this_cpu(),
|
||||||
#endif
|
#endif
|
||||||
#if CONFIG_TASK_NAME_SIZE > 0
|
#if CONFIG_TASK_NAME_SIZE > 0
|
||||||
rtcb->name,
|
rtcb->name,
|
||||||
|
@ -65,11 +65,9 @@
|
|||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
# define current_task(cpu) ((FAR struct tcb_s *)list_assignedtasks(cpu)->head)
|
# define current_task(cpu) ((FAR struct tcb_s *)list_assignedtasks(cpu)->head)
|
||||||
# define this_cpu() up_cpu_index()
|
|
||||||
#else
|
#else
|
||||||
# define current_task(cpu) ((FAR struct tcb_s *)list_readytorun()->head)
|
# define current_task(cpu) ((FAR struct tcb_s *)list_readytorun()->head)
|
||||||
# define this_cpu() (0)
|
# define this_task() (current_task(up_cpu_index()))
|
||||||
# define this_task() (current_task(this_cpu()))
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define is_idle_task(t) ((t)->pid < CONFIG_SMP_NCPUS)
|
#define is_idle_task(t) ((t)->pid < CONFIG_SMP_NCPUS)
|
||||||
@ -79,7 +77,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#define running_task() \
|
#define running_task() \
|
||||||
(up_interrupt_context() ? g_running_tasks[this_cpu()] : this_task())
|
(up_interrupt_context() ? g_running_tasks[up_cpu_index()] : this_task())
|
||||||
|
|
||||||
/* List attribute flags */
|
/* List attribute flags */
|
||||||
|
|
||||||
|
@ -61,5 +61,5 @@
|
|||||||
|
|
||||||
int sched_getcpu(void)
|
int sched_getcpu(void)
|
||||||
{
|
{
|
||||||
return up_cpu_index(); /* Does not fail */
|
return this_cpu(); /* Does not fail */
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user