A few fixes for the per-process load average calculation

This commit is contained in:
Gregory Nutt 2014-02-23 12:50:51 -06:00
parent cf4a362ab5
commit 3b2a3991dd
5 changed files with 76 additions and 59 deletions

View File

@ -573,7 +573,7 @@ static ssize_t proc_loadavg(FAR struct proc_file_s *procfile,
{
uint32_t tmp;
tmp = 1000 - (1000 * cpuload.active) / cpuload.total;
tmp = (1000 * cpuload.active) / cpuload.total;
intpart = tmp / 10;
fracpart = tmp - 10 * intpart;
}

View File

@ -255,8 +255,10 @@ int sched_reprioritize(FAR struct tcb_s *tcb, int sched_priority);
#else
# define sched_reprioritize(tcb,sched_priority) sched_setpriority(tcb,sched_priority)
#endif
#ifdef CONFIG_SCHED_CPULOAD
void weak_function sched_process_cpuload(void);
#endif
bool sched_verifytcb(FAR struct tcb_s *tcb);
int sched_releasetcb(FAR struct tcb_s *tcb, uint8_t ttype);
#endif /* __SCHED_OS_INTERNAL_H */

View File

@ -65,6 +65,12 @@
* Private Variables
************************************************************************/
/* This is the total number of clock tick counts. Essentially the
* 'denominator' for all CPU load calculations.
*/
volatile uint32_t g_cpuload_total;
/************************************************************************
* Private Functions
************************************************************************/
@ -73,6 +79,57 @@
* Public Functions
************************************************************************/
/************************************************************************
* Name: sched_process_cpuload
*
* Description:
* Collect data that can be used for CPU load measurements.
*
* Inputs:
* None
*
* Return Value:
* None
*
************************************************************************/
void weak_function sched_process_cpuload(void)
{
FAR struct tcb_s *rtcb = (FAR struct tcb_s*)g_readytorun.head;
int hash_index;
int i;
/* Increment the count on the currently executing thread
*
* NOTE also that CPU load measurement data is retained in the g_pidhash
* table vs. in the TCB which would seem to be the more logic place. It
* is place in the hash table, instead, to facilitate CPU load adjustments
* on all threads during timer interrupt handling. sched_foreach() could
* do this too, but this would require a little more overhead.
*/
hash_index = PIDHASH(rtcb->pid);
g_pidhash[hash_index].ticks++;
/* Increment tick count. If the accumulated tick value exceed a time
* constant, then shift the accumulators.
*/
if (++g_cpuload_total > (CONFIG_SCHED_CPULOAD_TIMECONSTANT * CLOCKS_PER_SEC))
{
/* Divide the tick count for every task by two */
for (i = 0; i < CONFIG_MAX_TASKS; i++)
{
g_pidhash[i].ticks >>= 1;
}
/* Divide the total tick count by two */
g_cpuload_total >>= 1;
}
}
/****************************************************************************
* Function: clock_cpuload
*

View File

@ -66,14 +66,6 @@
* Public Variables
************************************************************************/
#ifdef CONFIG_SCHED_CPULOAD
/* This is the total number of clock tick counts. Essentially the
* 'denominator' for all CPU load calculations.
*/
volatile uint32_t g_cpuload_total;
#endif
/************************************************************************
* Private Variables
************************************************************************/
@ -158,54 +150,6 @@ static inline void sched_process_timeslice(void)
# define sched_process_timeslice()
#endif
/************************************************************************
* Name: sched_process_cpuload
*
* Description:
* Collect data that can be used for CPU load measurements.
*
* Inputs:
* None
*
* Return Value:
* None
*
************************************************************************/
#ifdef CONFIG_SCHED_CPULOAD
static inline void sched_process_cpuload(void)
{
FAR struct tcb_s *rtcb = (FAR struct tcb_s*)g_readytorun.head;
int hash_index;
int i;
/* Increment the count on the currently executing thread */
hash_index = PIDHASH(rtcb->pid);
g_pidhash[hash_index].ticks++;
/* Increment tick count. If the accumulated tick value exceed a time
* constant, then shift the accumulators.
*/
if (++g_cpuload_total > (CONFIG_SCHED_CPULOAD_TIMECONSTANT * CLOCKS_PER_SEC))
{
/* Divide the tick count for every task by two */
for (i = 0; i < CONFIG_MAX_TASKS; i++)
{
g_pidhash[i].ticks >>= 1;
}
/* Divide the total tick count by two */
g_cpuload_total >>= 1;
}
}
#else
# define sched_process_cpuload()
#endif
/************************************************************************
* Public Functions
************************************************************************/
@ -253,7 +197,14 @@ void sched_process_timer(void)
* can occur)
*/
sched_process_cpuload();
#ifdef CONFIG_SCHED_CPULOAD
#ifdef CONFIG_HAVE_WEAKFUNCTIONS
if (sched_process_cpuload != NULL)
#endif
{
sched_process_cpuload();
}
#endif
/* Process watchdogs (if in the link) */

View File

@ -70,7 +70,14 @@ static void sched_releasepid(pid_t pid)
g_pidhash[hash_ndx].tcb = NULL;
g_pidhash[hash_ndx].pid = INVALID_PROCESS_ID;
#ifdef CONFIG_SCHED_CPULOAD
/* Decrement the total CPU load count held by this thread from the
* total for all threads. Then we can reset the count on this
* defunct thread to zero.
*/
g_cpuload_total -= g_pidhash[hash_ndx].ticks;
g_pidhash[hash_ndx].ticks = 0;
#endif
}