SMP: Fix timer related issues: Round robin and sporadic scheduling were only being performed for tasks running on the CPU that processes the system timer interrupt. Similary, CPU load measurements were only be processed for running on the CPU that receives the sampling interrupt.

This commit is contained in:
Gregory Nutt 2017-01-23 12:14:01 -06:00
parent 6f5a9eb451
commit 6a4880f415
3 changed files with 207 additions and 29 deletions

View File

@ -65,16 +65,87 @@
# define CPULOAD_TICKSPERSEC CLOCKS_PER_SEC
#endif
/* When g_cpuload_total exceeds the following time constant, the load and
* the counds will be scaled back by two. In the CONFIG_SMP, g_cpuload_total
* will be incremented multiple times per tick.
*/
#ifdef CONFIG_SMP
# define CPULOAD_TIMECONSTANT \
(CONFIG_SMP_NCPUS * \
CONFIG_SCHED_CPULOAD_TIMECONSTANT * \
CPULOAD_TICKSPERSEC)
#else
# define CPULOAD_TIMECONSTANT \
(CONFIG_SCHED_CPULOAD_TIMECONSTANT * \
CPULOAD_TICKSPERSEC)
#endif
/****************************************************************************
* Private Data
****************************************************************************/
/* This is the total number of clock tick counts. Essentially the
* 'denominator' for all CPU load calculations.
*
* For a single CPU, this value is increment once per sample interval. So,
* for example, if nothing is running but the IDLE thread, that IDLE thread
* will get 100% of the load.
*
* But for the case of multiple CPUs (with CONFIG_SMP=y), this value is
* incremented for each CPU on each sample interval. So, as an example, if
* there are four CPUs and is nothing is running but the IDLE threads, then
* each would have a load of 25% of the total.
*/
volatile uint32_t g_cpuload_total;
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Name: sched_cpu_process_cpuload
*
* Description:
* Collect data that can be used for CPU load measurements.
*
* Inputs:
* cpu - The CPU that we are performing the load operations on.
*
* Return Value:
* None
*
* Assumptions/Limitations:
* This function is called from a timer interrupt handler with all
* interrupts disabled.
*
****************************************************************************/
static inline void sched_cpu_process_cpuload(int cpu)
{
FAR struct tcb_s *rtcb = current_task(cpu);
int hash_index;
/* Increment the count on the currently executing thread
*
* NOTE also that CPU load measurement data is retained in the g_pidhash
* table vs. in the TCB which would seem to be the more logic place. It
* is place in the hash table, instead, to facilitate CPU load adjustments
* on all threads during timer interrupt handling. sched_foreach() could
* do this too, but this would require a little more overhead.
*/
hash_index = PIDHASH(rtcb->pid);
g_pidhash[hash_index].ticks++;
/* Increment tick count. NOTE that the count is increment once for each
* CPU on each sample interval.
*/
g_cpuload_total++;
}
/****************************************************************************
* Public Functions
****************************************************************************/
@ -99,27 +170,31 @@ volatile uint32_t g_cpuload_total;
void weak_function sched_process_cpuload(void)
{
FAR struct tcb_s *rtcb = this_task();
int hash_index;
int i;
/* Increment the count on the currently executing thread
*
* NOTE also that CPU load measurement data is retained in the g_pidhash
* table vs. in the TCB which would seem to be the more logic place. It
* is place in the hash table, instead, to facilitate CPU load adjustments
* on all threads during timer interrupt handling. sched_foreach() could
* do this too, but this would require a little more overhead.
#ifdef CONFIG_SMP
irqstate_t flags;
/* Perform scheduler operations on all CPUs. */
flags = enter_critical_section();
for (i = 0; i < CONFIG_SMP_NCPUS; i++)
{
sched_cpu_process_cpuload(i);
}
#else
/* Perform scheduler operations on the single CPU. */
sched_cpu_process_cpuload(0);
#endif
/* If the accumulated tick value exceed a time constant, then shift the
* accumulators and recalculate the total.
*/
hash_index = PIDHASH(rtcb->pid);
g_pidhash[hash_index].ticks++;
/* Increment tick count. If the accumulated tick value exceed a time
* constant, then shift the accumulators.
*/
if (++g_cpuload_total > (CONFIG_SCHED_CPULOAD_TIMECONSTANT * CPULOAD_TICKSPERSEC))
if (g_cpuload_total > CPULOAD_TIMECONSTANT)
{
uint32_t total = 0;
@ -137,6 +212,10 @@ void weak_function sched_process_cpuload(void)
g_cpuload_total = total;
}
#ifdef CONFIG_SMP
leave_critical_section(flags);
#endif
}
/****************************************************************************

View File

@ -1,7 +1,7 @@
/****************************************************************************
* sched/sched/sched_processtimer.c
*
* Copyright (C) 2007, 2009, 2014-2016 Gregory Nutt. All rights reserved.
* Copyright (C) 2007, 2009, 2014-2017 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
@ -55,14 +55,14 @@
****************************************************************************/
/****************************************************************************
* Name: sched_process_scheduler
* Name: sched_cpu_scheduler
*
* Description:
* Check for operations specific to scheduling policy of the currently
* active task.
* active task on one CPU.
*
* Input Parameters:
* None
* cpu - The CPU that we are performing the scheduler operations on.
*
* Returned Value:
* None
@ -70,9 +70,9 @@
****************************************************************************/
#if CONFIG_RR_INTERVAL > 0 || defined(CONFIG_SCHED_SPORADIC)
static inline void sched_process_scheduler(void)
static inline void sched_cpu_scheduler(int cpu)
{
FAR struct tcb_s *rtcb = this_task();
FAR struct tcb_s *rtcb = current_task(cpu);
#if CONFIG_RR_INTERVAL > 0
/* Check if the currently executing task uses round robin scheduling. */
@ -100,6 +100,46 @@ static inline void sched_process_scheduler(void)
}
#endif
}
#endif
/****************************************************************************
* Name: sched_process_scheduler
*
* Description:
* Check for operations specific to scheduling policy of the currently
* active task on all configured CPUs.
*
* Input Parameters:
* None
*
* Returned Value:
* None
*
****************************************************************************/
#if CONFIG_RR_INTERVAL > 0 || defined(CONFIG_SCHED_SPORADIC)
static inline void sched_process_scheduler(void)
{
#ifdef CONFIG_SMP
irqstate_t flags;
int i;
/* Perform scheduler operations on all CPUs */
flags = enter_critical_section();
for (i = 0; i < CONFIG_SMP_NCPUS; i++)
{
sched_cpu_scheduler(i);
}
leave_critical_section(flags);
#else
/* Perform scheduler operations on the single CPUs */
sched_cpu_scheduler(0);
#endif
}
#else
# define sched_process_scheduler()
#endif

View File

@ -142,13 +142,14 @@ static struct timespec g_stop_time;
****************************************************************************/
/****************************************************************************
* Name: sched_process_scheduler
* Name: sched_cpu_scheduler
*
* Description:
* Check for operations specific to scheduling policy of the currently
* active task.
* active task on a single CPU.
*
* Inputs:
* cpu - The CPU that we are performing the scheduler operations on.
* ticks - The number of ticks that have elapsed on the interval timer.
* noswitches - True: Can't do context switches now.
*
@ -166,10 +167,10 @@ static struct timespec g_stop_time;
****************************************************************************/
#if CONFIG_RR_INTERVAL > 0 || defined(CONFIG_SCHED_SPORADIC)
static inline uint32_t sched_process_scheduler(uint32_t ticks, bool noswitches)
static inline uint32_t sched_cpu_scheduler(int cpu, uint32_t ticks, bool noswitches)
{
FAR struct tcb_s *rtcb = this_task();
FAR struct tcb_s *ntcb = this_task();
FAR struct tcb_s *rtcb = current_task(cpu);
FAR struct tcb_s *ntcb = current_task(cpu);
uint32_t ret = 0;
#if CONFIG_RR_INTERVAL > 0
@ -216,7 +217,7 @@ static inline uint32_t sched_process_scheduler(uint32_t ticks, bool noswitches)
* the new task at the head of the ready to run list.
*/
ntcb = this_task();
ntcb = current_task(cpu);
/* Check if the new task at the head of the ready-to-run has changed. */
@ -240,6 +241,63 @@ static inline uint32_t sched_process_scheduler(uint32_t ticks, bool noswitches)
return ret;
}
#endif
/****************************************************************************
* Name: sched_process_scheduler
*
* Description:
* Check for operations specific to scheduling policy of the currently
* active task on a single CPU.
*
* Inputs:
* ticks - The number of ticks that have elapsed on the interval timer.
* noswitches - True: Can't do context switches now.
*
* Return Value:
* The number if ticks remaining until the next time slice expires.
* Zero is returned if there is no time slicing (i.e., the task at the
* head of the ready-to-run list does not support round robin
* scheduling).
*
* The value one may returned under certain circumstances that probably
* can't happen. The value one is the minimal timer setup and it means
* that a context switch is needed now, but cannot be performed because
* noswitches == true.
*
****************************************************************************/
#if CONFIG_RR_INTERVAL > 0 || defined(CONFIG_SCHED_SPORADIC)
static inline uint32_t sched_process_scheduler(uint32_t ticks,
bool noswitches)
{
#ifdef CONFIG_SMP
uint32_t minslice = UINT32_MAX;
uint32_t timeslice;
irqstate_t flags;
int i;
/* Perform scheduler operations on all CPUs */
flags = enter_critical_section();
for (i = 0; i < CONFIG_SMP_NCPUS; i++)
{
timeslice = sched_cpu_scheduler(i, ticks, noswitches);
if (timeslice > 0 && timeslice < minslice)
{
minslice = timeslice;
}
}
leave_critical_section(flags);
return minslice < UINT32_MAX ? minslice : 0;
#else
/* Perform scheduler operations on the single CPUs */
return sched_cpu_scheduler(0, ticks, noswitches);
#endif
}
#else
# define sched_process_scheduler(t,n) (0)
#endif
@ -656,4 +714,5 @@ void sched_timer_reassess(void)
nexttime = sched_timer_cancel();
sched_timer_start(nexttime);
}
#endif /* CONFIG_SCHED_TICKLESS */