Logic for delayed memory garbage collection for the case of user space memory will not work as implemented for the case of the kernel build. That is because the correct context is in place in order to free the user memory when sched_kufree is called, but will not be in place later when the memory garbage collection runs. If this feature is really needed, then some more substantial redesign is required

This commit is contained in:
Gregory Nutt 2015-09-07 14:40:18 -06:00
parent 65abe20cdb
commit d78d3ef4ab
4 changed files with 54 additions and 7 deletions

View File

@ -152,13 +152,23 @@ volatile dq_queue_t g_inactivetasks;
* while it is within an interrupt handler.
*/
volatile sq_queue_t g_delayed_kufree;
#if (defined(CONFIG_BUILD_PROTECTED) || defined(CONFIG_BUILD_KERNEL)) && \
defined(CONFIG_MM_KERNEL_HEAP)
volatile sq_queue_t g_delayed_kfree;
#endif
#ifndef CONFIG_BUILD_KERNEL
/* REVISIT: It is not safe to defer user allocation in the kernel mode
* build. Why? Because the correct user context will not be in place
* when these deferred de-allocations are performed. In order to make this
* work, we would need to do something like: (1) move g_delayed_kufree
* into the group structure, then traverse the groups to collect garbage
* on a group-by-group basis.
*/
volatile sq_queue_t g_delayed_kufree;
#endif
/* This is the value of the last process ID assigned to a task */
volatile pid_t g_lastpid;
@ -271,11 +281,13 @@ void os_start(void)
dq_init(&g_waitingforfill);
#endif
dq_init(&g_inactivetasks);
sq_init(&g_delayed_kufree);
#if (defined(CONFIG_BUILD_PROTECTED) || defined(CONFIG_BUILD_KERNEL)) && \
defined(CONFIG_MM_KERNEL_HEAP)
sq_init(&g_delayed_kfree);
#endif
#ifndef CONFIG_BUILD_KERNEL
sq_init(&g_delayed_kufree);
#endif
/* Initialize the logic that determine unique process IDs. */

View File

@ -172,13 +172,23 @@ extern volatile dq_queue_t g_inactivetasks;
* while it is within an interrupt handler.
*/
extern volatile sq_queue_t g_delayed_kufree;
#if (defined(CONFIG_BUILD_PROTECTED) || defined(CONFIG_BUILD_KERNEL)) && \
defined(CONFIG_MM_KERNEL_HEAP)
extern volatile sq_queue_t g_delayed_kfree;
#endif
#ifndef CONFIG_BUILD_KERNEL
/* REVISIT: It is not safe to defer user allocation in the kernel mode
* build. Why? Because the correct user context will not be in place
* when these deferred de-allocations are performed. In order to make
* this work, we would need to do something like: (1) move g_delayed_kufree
* into the group structure, then traverse the groups to collect garbage on
* a group-by-group basis.
*/
extern volatile sq_queue_t g_delayed_kufree;
#endif
/* This is the value of the last process ID assigned to a task */
extern volatile pid_t g_lastpid;

View File

@ -85,8 +85,6 @@
void sched_ufree(FAR void *address)
{
irqstate_t flags;
/* Check if this is an attempt to deallocate memory from an exception
* handler. If this function is called from the IDLE task, then we
* must have exclusive access to the memory manager to do this.
@ -94,6 +92,21 @@ void sched_ufree(FAR void *address)
if (up_interrupt_context() || kumm_trysemaphore() != 0)
{
#ifdef CONFIG_BUILD_KERNEL
/* REVISIT: It is not safe to defer user allocation in the kernel
* mode build. Why? Because the correct user context is in place
* now but will not be in place when the deferred de-allocation is
* performed. In order to make this work, we would need to do
* something like: (1) move g_delayed_kufree into the group
* structure, then traverse the groups to collect garbage on a
* group-by-group basis.
*/
PANIC();
#else
irqstate_t flags;
/* Yes.. Make sure that this is not a attempt to free kernel memory
* using the user deallocator.
*/
@ -114,6 +127,7 @@ void sched_ufree(FAR void *address)
work_signal(LPWORK);
#endif
irqrestore(flags);
#endif
}
else
{

View File

@ -78,6 +78,16 @@
static inline void sched_kucleanup(void)
{
#ifdef CONFIG_BUILD_KERNEL
/* REVISIT: It is not safe to defer user allocation in the kernel mode
* build. Why? Because the correct user context will not be in place
* when these deferred de-allocations are performed. In order to make
* this work, we would need to do something like: (1) move
* g_delayed_kufree into the group structure, then traverse the groups to
* collect garbage on a group-by-group basis.
*/
#else
irqstate_t flags;
FAR void *address;
@ -106,6 +116,7 @@ static inline void sched_kucleanup(void)
kumm_free(address);
}
}
#endif
}
/****************************************************************************