This patch prevent heap corruption as in below case.
TASK A TASK B malloc() mm_takesemaphore() heap holder is set to TASK B <--- preempt ... task_exit() Set to current task to TASK B Try to release tcb, and stack memory free() mm_takesemaphore() - Successfully obtain semaphore because current task and heap holder is same. Free memory.... Heap corrupt. This change forces all de-allocations via sched_kfree() and sched_ufree() to be delayed. Eliminating the immediate de-allocation prevents the above problem with the the re-entrant semaphore because the deallocation always occurs on the worker thread, never on TASK B. There could be consequences in the timing of memory availability. We will see.
This commit is contained in:
parent
c24fdb3ada
commit
91aa26774b
@ -85,37 +85,27 @@ void sched_ufree(FAR void *address)
|
||||
* must have exclusive access to the memory manager to do this.
|
||||
*/
|
||||
|
||||
if (up_interrupt_context() || kumm_trysemaphore() != 0)
|
||||
{
|
||||
irqstate_t flags;
|
||||
irqstate_t flags;
|
||||
|
||||
/* Yes.. Make sure that this is not a attempt to free kernel memory
|
||||
* using the user deallocator.
|
||||
*/
|
||||
/* Yes.. Make sure that this is not a attempt to free kernel memory
|
||||
* using the user deallocator.
|
||||
*/
|
||||
|
||||
flags = enter_critical_section();
|
||||
flags = enter_critical_section();
|
||||
#if (defined(CONFIG_BUILD_PROTECTED) || defined(CONFIG_BUILD_KERNEL)) && \
|
||||
defined(CONFIG_MM_KERNEL_HEAP)
|
||||
DEBUGASSERT(!kmm_heapmember(address));
|
||||
DEBUGASSERT(!kmm_heapmember(address));
|
||||
#endif
|
||||
|
||||
/* Delay the deallocation until a more appropriate time. */
|
||||
/* Delay the deallocation until a more appropriate time. */
|
||||
|
||||
sq_addlast((FAR sq_entry_t *)address,
|
||||
(FAR sq_queue_t *)&g_delayed_kufree);
|
||||
sq_addlast((FAR sq_entry_t *)address,
|
||||
(FAR sq_queue_t *)&g_delayed_kufree);
|
||||
|
||||
/* Signal the worker thread that is has some clean up to do */
|
||||
/* Signal the worker thread that is has some clean up to do */
|
||||
|
||||
sched_signal_free();
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* No.. just deallocate the memory now. */
|
||||
|
||||
kumm_free(address);
|
||||
kumm_givesemaphore();
|
||||
}
|
||||
sched_signal_free();
|
||||
leave_critical_section(flags);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -129,32 +119,22 @@ void sched_kfree(FAR void *address)
|
||||
* must have exclusive access to the memory manager to do this.
|
||||
*/
|
||||
|
||||
if (up_interrupt_context() || kmm_trysemaphore() != 0)
|
||||
{
|
||||
/* Yes.. Make sure that this is not a attempt to free user memory
|
||||
* using the kernel deallocator.
|
||||
*/
|
||||
/* Yes.. Make sure that this is not a attempt to free user memory
|
||||
* using the kernel deallocator.
|
||||
*/
|
||||
|
||||
flags = enter_critical_section();
|
||||
DEBUGASSERT(kmm_heapmember(address));
|
||||
flags = enter_critical_section();
|
||||
DEBUGASSERT(kmm_heapmember(address));
|
||||
|
||||
/* Delay the deallocation until a more appropriate time. */
|
||||
/* Delay the deallocation until a more appropriate time. */
|
||||
|
||||
sq_addlast((FAR sq_entry_t *)address,
|
||||
(FAR sq_queue_t *)&g_delayed_kfree);
|
||||
sq_addlast((FAR sq_entry_t *)address,
|
||||
(FAR sq_queue_t *)&g_delayed_kfree);
|
||||
|
||||
/* Signal the worker thread that is has some clean up to do */
|
||||
/* Signal the worker thread that is has some clean up to do */
|
||||
|
||||
sched_signal_free();
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* No.. just deallocate the memory now. */
|
||||
|
||||
kmm_free(address);
|
||||
kmm_givesemaphore();
|
||||
}
|
||||
sched_signal_free();
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user