sem_holder: Fixes improper restoration of base_priority

in the case of CONFIG_SEM_PREALLOCHOLDERS=0

   The call to sem_restorebaseprio_task context switches in the
   sem_foreachholder(sem, sem_restoreholderprioB, stcb); call
   prior to releasing the holder. So the running task is left
   as a holder as is the started task. Leaving both slots filled
   Thus failing to perforem the boost/or restoration on the
   correct tcb.

   This PR fixes this by releasing the running task slot prior
   to reprioritization that can lead to the context switch.
   To faclitate this, the interface to sem_restorebaseprio
   needed to take the tcb from the holder prior to the
   holder being freed. In the failure case where sched_verifytcb
   fails it added the overhead of looking up the holder.

   There is also the adfitinal thunking on the foreach to
   get from holer to holder->tcb.

   An alternate approach could be to leve the interface the same
   and allocate a holder on the stack of sem_restoreholderprioB
   copy the sem's holder to it, free it as is done in this pr
   and and then pass that address sem_restoreholderprio as the
   holder. It could then get the holder's tcb but we would
   keep the same sem_findholder in sched_verifytcb.
This commit is contained in:
David Sidrane 2017-03-15 14:02:55 -10:00
parent f9c22461c4
commit 3cc2a4f7c9

View File

@ -235,6 +235,24 @@ static inline void sem_freeholder(sem_t *sem, FAR struct semholder_s *pholder)
#endif
}
/****************************************************************************
* Name: sem_findandfreeholder
****************************************************************************/
static inline void sem_findandfreeholder(sem_t *sem, FAR struct tcb_s *htcb)
{
FAR struct semholder_s *pholder = sem_findholder(sem, htcb);
/* When no more counts are held, remove the holder from the list. The
* count was decremented in sem_releaseholder.
*/
if (pholder != NULL && pholder->counts <= 0)
{
sem_freeholder(sem, pholder);
}
}
/****************************************************************************
* Name: sem_foreachholder
****************************************************************************/
@ -460,10 +478,10 @@ static int sem_dumpholder(FAR struct semholder_s *pholder, FAR sem_t *sem,
* Name: sem_restoreholderprio
****************************************************************************/
static int sem_restoreholderprio(FAR struct semholder_s *pholder,
static int sem_restoreholderprio(FAR struct tcb_s *htcb,
FAR sem_t *sem, FAR void *arg)
{
FAR struct tcb_s *htcb = (FAR struct tcb_s *)pholder->htcb;
FAR struct semholder_s *pholder = 0;
#if CONFIG_SEM_NNESTPRIO > 0
FAR struct tcb_s *stcb = (FAR struct tcb_s *)arg;
int rpriority;
@ -481,7 +499,11 @@ static int sem_restoreholderprio(FAR struct semholder_s *pholder,
{
serr("ERROR: TCB 0x%08x is a stale handle, counts lost\n", htcb);
DEBUGASSERT(!sched_verifytcb(htcb));
sem_freeholder(sem, pholder);
pholder = sem_findholder(sem, htcb);
if (pholder != NULL)
{
sem_freeholder(sem, pholder);
}
}
/* Was the priority of the holder thread boosted? If so, then drop its
@ -600,6 +622,20 @@ static int sem_restoreholderprio(FAR struct semholder_s *pholder,
return 0;
}
/****************************************************************************
* Name: sem_restoreholderprioall
*
* Description:
* Reprioritize all holders
*
****************************************************************************/
static int sem_restoreholderprioall(FAR struct semholder_s *pholder,
FAR sem_t *sem, FAR void *arg)
{
return sem_restoreholderprio(pholder->htcb, sem, arg);
}
/****************************************************************************
* Name: sem_restoreholderprioA
*
@ -614,7 +650,7 @@ static int sem_restoreholderprioA(FAR struct semholder_s *pholder,
FAR struct tcb_s *rtcb = this_task();
if (pholder->htcb != rtcb)
{
return sem_restoreholderprio(pholder, sem, arg);
return sem_restoreholderprio(pholder->htcb, sem, arg);
}
return 0;
@ -632,9 +668,18 @@ static int sem_restoreholderprioB(FAR struct semholder_s *pholder,
FAR sem_t *sem, FAR void *arg)
{
FAR struct tcb_s *rtcb = this_task();
if (pholder->htcb == rtcb)
{
(void)sem_restoreholderprio(pholder, sem, arg);
/* The running task has given up a count on the semaphore
* Release the holder if all counts have been given up.
* before reprioritizing causes a context switch.
*/
sem_findandfreeholder(sem, rtcb);
(void)sem_restoreholderprio(rtcb, sem, arg);
return 1;
}
@ -687,7 +732,7 @@ static inline void sem_restorebaseprio_irq(FAR struct tcb_s *stcb,
{
/* Drop the priority of all holder threads */
(void)sem_foreachholder(sem, sem_restoreholderprio, stcb);
(void)sem_foreachholder(sem, sem_restoreholderprioall, stcb);
}
/* If there are no tasks waiting for available counts, then all holders
@ -781,18 +826,8 @@ static inline void sem_restorebaseprio_task(FAR struct tcb_s *stcb,
* counts, then we need to remove it from the list of holders.
*/
pholder = sem_findholder(sem, rtcb);
if (pholder != NULL)
{
/* When no more counts are held, remove the holder from the list. The
* count was decremented in sem_releaseholder.
*/
sem_findandfreeholder(sem, rtcb);
if (pholder->counts <= 0)
{
sem_freeholder(sem, pholder);
}
}
}
/****************************************************************************
@ -1097,7 +1132,7 @@ void sem_canceled(FAR struct tcb_s *stcb, FAR sem_t *sem)
/* Adjust the priority of every holder as necessary */
(void)sem_foreachholder(sem, sem_restoreholderprio, stcb);
(void)sem_foreachholder(sem, sem_restoreholderprioall, stcb);
}
#endif