diff --git a/ChangeLog b/ChangeLog index 921b7014bd..7deb9fa946 100644 --- a/ChangeLog +++ b/ChangeLog @@ -3918,7 +3918,7 @@ the scenario: (1) sched_lock() is called increments the lockcount on the current TCB (i.e., the one at the head of the ready to run list), (2) sched_mergepending is called which may change the task - at the head of the readytorun list, then (2) sched_lock() is called + at the head of the readytorun list, then (2) sched_unlock() is called which decrements the lockcount on the wrong TCB. The failure case that I saw was that pre-emption got disabled in the IDLE thread, locking up the whole system. diff --git a/sched/task_deletecurrent.c b/sched/task_deletecurrent.c index 7ecfb26cc4..e1e06acf67 100644 --- a/sched/task_deletecurrent.c +++ b/sched/task_deletecurrent.c @@ -115,9 +115,12 @@ int task_deletecurrent(void) * does not correspond to the thread that is running. Disabling pre- * emption on this TCB and marking the new ready-to-run task as not * running (see, for example, get_errno_ptr()). + * + * We disable pre-emption here by directly incrementing the lockcount + * (vs. calling sched_lock()). */ - sched_lock(); + rtcb->lockcount++; rtcb->task_state = TSTATE_TASK_READYTORUN; /* Move the TCB to the specified blocked task list and delete it */ @@ -143,7 +146,6 @@ int task_deletecurrent(void) * the lockcount on rctb. */ - DEBUGASSERT(rtcb->lockcount > 0); rtcb->lockcount--; return OK; }