Revert "This patch prevent heap corruption as in below case."

This solution to the problem noted by EunBong Song results in major memory fragmentation and and out-of-memory conditions on the PX4 platform.  On that platform the lower priority work queue is very low priority and essentially never runs when the system is busy.  As a result, the systems gets slowly starved of memory until failures and bad behaviors begin to occur.

This is an addition patch coming later to result the original problem in a different way that does not have cause memory starvation.

This reverts commit 91aa26774b.
This commit is contained in:
Gregory Nutt 2018-12-20 10:11:36 -06:00
parent 9c5d0003aa
commit 9d1c845dcc

View File

@ -85,27 +85,37 @@ void sched_ufree(FAR void *address)
* must have exclusive access to the memory manager to do this. * must have exclusive access to the memory manager to do this.
*/ */
irqstate_t flags; if (up_interrupt_context() || kumm_trysemaphore() != 0)
{
irqstate_t flags;
/* Yes.. Make sure that this is not a attempt to free kernel memory /* Yes.. Make sure that this is not a attempt to free kernel memory
* using the user deallocator. * using the user deallocator.
*/ */
flags = enter_critical_section(); flags = enter_critical_section();
#if (defined(CONFIG_BUILD_PROTECTED) || defined(CONFIG_BUILD_KERNEL)) && \ #if (defined(CONFIG_BUILD_PROTECTED) || defined(CONFIG_BUILD_KERNEL)) && \
defined(CONFIG_MM_KERNEL_HEAP) defined(CONFIG_MM_KERNEL_HEAP)
DEBUGASSERT(!kmm_heapmember(address)); DEBUGASSERT(!kmm_heapmember(address));
#endif #endif
/* Delay the deallocation until a more appropriate time. */ /* Delay the deallocation until a more appropriate time. */
sq_addlast((FAR sq_entry_t *)address, sq_addlast((FAR sq_entry_t *)address,
(FAR sq_queue_t *)&g_delayed_kufree); (FAR sq_queue_t *)&g_delayed_kufree);
/* Signal the worker thread that is has some clean up to do */ /* Signal the worker thread that is has some clean up to do */
sched_signal_free(); sched_signal_free();
leave_critical_section(flags); leave_critical_section(flags);
}
else
{
/* No.. just deallocate the memory now. */
kumm_free(address);
kumm_givesemaphore();
}
#endif #endif
} }
@ -119,22 +129,32 @@ void sched_kfree(FAR void *address)
* must have exclusive access to the memory manager to do this. * must have exclusive access to the memory manager to do this.
*/ */
/* Yes.. Make sure that this is not a attempt to free user memory if (up_interrupt_context() || kmm_trysemaphore() != 0)
* using the kernel deallocator. {
*/ /* Yes.. Make sure that this is not a attempt to free user memory
* using the kernel deallocator.
*/
flags = enter_critical_section(); flags = enter_critical_section();
DEBUGASSERT(kmm_heapmember(address)); DEBUGASSERT(kmm_heapmember(address));
/* Delay the deallocation until a more appropriate time. */ /* Delay the deallocation until a more appropriate time. */
sq_addlast((FAR sq_entry_t *)address, sq_addlast((FAR sq_entry_t *)address,
(FAR sq_queue_t *)&g_delayed_kfree); (FAR sq_queue_t *)&g_delayed_kfree);
/* Signal the worker thread that is has some clean up to do */ /* Signal the worker thread that is has some clean up to do */
sched_signal_free(); sched_signal_free();
leave_critical_section(flags); leave_critical_section(flags);
}
else
{
/* No.. just deallocate the memory now. */
kmm_free(address);
kmm_givesemaphore();
}
} }
#endif #endif