mempool: safe memdump for smp, if syslog will switch context

remove alist, switch to a convenient way to traverse
the physical address directly.

At the same time, we can use `gurad` to mark whether
it is free or alloc or out of bounds check

Signed-off-by: anjiahao <anjiahao@xiaomi.com>
This commit is contained in:
anjiahao 2024-06-05 22:21:29 +08:00 committed by Xiang Xiao
parent 659f3b5ff6
commit 73f506b65f
2 changed files with 178 additions and 95 deletions

View File

@ -110,13 +110,9 @@ struct mempool_s
sq_queue_t queue; /* The free block queue in normal mempool */
sq_queue_t iqueue; /* The free block queue in interrupt mempool */
sq_queue_t equeue; /* The expand block queue for normal mempool */
#if CONFIG_MM_BACKTRACE >= 0
struct list_node alist; /* The used block list in mempool */
#else
size_t nalloc; /* The number of used block in mempool */
#endif
spinlock_t lock; /* The protect lock to mempool */
sem_t waitsem; /* The semaphore of waiter get free block */
size_t nalloc; /* The number of used block in mempool */
spinlock_t lock; /* The protect lock to mempool */
sem_t waitsem; /* The semaphore of waiter get free block */
#if defined(CONFIG_FS_PROCFS) && !defined(CONFIG_FS_PROCFS_EXCLUDE_MEMPOOL)
struct mempool_procfs_entry_s procfs; /* The entry of procfs */
#endif
@ -125,7 +121,9 @@ struct mempool_s
#if CONFIG_MM_BACKTRACE >= 0
struct mempool_backtrace_s
{
struct list_node node;
unsigned int magic; /* The guard byte, mark is alloc / free, and check
* if there is any out of bounds.
*/
pid_t pid;
unsigned long seqno; /* The sequence of memory malloc */
# if CONFIG_MM_BACKTRACE > 0

View File

@ -34,9 +34,26 @@
#include "kasan/kasan.h"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
#undef ALIGN_UP
#define ALIGN_UP(x, a) (((x) + ((a) - 1)) & (~((a) - 1)))
#if CONFIG_MM_BACKTRACE >= 0
#define MEMPOOL_MAGIC_FREE 0xAAAAAAAA
#define MEMPOOL_MAGIC_ALLOC 0x55555555
/****************************************************************************
* Private Types
****************************************************************************/
typedef void (*mempool_callback_t)(FAR struct mempool_s *pool,
FAR struct mempool_backtrace_s *buf,
FAR const void *input, FAR void *output);
#endif
/****************************************************************************
* Private Functions
****************************************************************************/
@ -64,12 +81,20 @@ mempool_remove_queue(FAR struct mempool_s *pool, FAR sq_queue_t *queue)
return ret;
}
static inline void mempool_add_queue(FAR sq_queue_t *queue,
static inline void mempool_add_queue(FAR struct mempool_s *pool,
FAR sq_queue_t *queue,
FAR char *base, size_t nblks,
size_t blocksize)
{
while (nblks-- > 0)
{
#if CONFIG_MM_BACKTRACE >= 0
FAR struct mempool_backtrace_s *buf =
(FAR struct mempool_backtrace_s *)
(base + nblks * blocksize + pool->blocksize);
buf->magic = MEMPOOL_MAGIC_FREE;
#endif
sq_addlast((FAR sq_entry_t *)(base + blocksize * nblks), queue);
}
}
@ -78,7 +103,8 @@ static inline void mempool_add_queue(FAR sq_queue_t *queue,
static inline void mempool_add_backtrace(FAR struct mempool_s *pool,
FAR struct mempool_backtrace_s *buf)
{
list_add_head(&pool->alist, &buf->node);
DEBUGASSERT(buf->magic == MEMPOOL_MAGIC_FREE);
buf->magic = MEMPOOL_MAGIC_ALLOC;
buf->pid = _SCHED_GETTID();
buf->seqno = g_mm_seqno++;
# if CONFIG_MM_BACKTRACE > 0
@ -98,6 +124,120 @@ static inline void mempool_add_backtrace(FAR struct mempool_s *pool,
}
# endif
}
static void mempool_foreach(FAR struct mempool_s *pool,
mempool_callback_t callback,
FAR const void *input, FAR void *output)
{
size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);
FAR struct mempool_backtrace_s *buf;
FAR sq_entry_t *entry;
FAR char *base ;
size_t nblks;
if (pool->ibase != NULL)
{
nblks = pool->interruptsize / blocksize;
while (nblks--)
{
buf = (FAR struct mempool_backtrace_s *)
pool->ibase + nblks * blocksize + pool->blocksize;
callback(pool, buf, input, output);
}
}
sq_for_every(&pool->equeue, entry)
{
nblks = (pool->expandsize - sizeof(sq_entry_t)) / blocksize;
base = (FAR char *)entry - (nblks * blocksize);
while (nblks--)
{
buf = (FAR struct mempool_backtrace_s *)
(base + nblks * blocksize + pool->blocksize);
callback(pool, buf, input, output);
}
}
}
static void mempool_info_task_callback(FAR struct mempool_s *pool,
FAR struct mempool_backtrace_s *buf,
FAR const void *input,
FAR void *output)
{
size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);
FAR const struct malltask *task = input;
FAR struct mallinfo_task *info = output;
if (buf->magic == MEMPOOL_MAGIC_FREE)
{
return;
}
if ((MM_DUMP_ASSIGN(task->pid, buf->pid) ||
MM_DUMP_ALLOC(task->pid, buf->pid) ||
MM_DUMP_LEAK(task->pid, buf->pid)) &&
buf->seqno >= task->seqmin && buf->seqno <= task->seqmax)
{
info->aordblks++;
info->uordblks += blocksize;
}
}
static void mempool_memdump_callback(FAR struct mempool_s *pool,
FAR struct mempool_backtrace_s *buf,
FAR const void *input, FAR void *output)
{
size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);
FAR const struct mm_memdump_s *dump = input;
if (buf->magic == MEMPOOL_MAGIC_FREE)
{
return;
}
if ((MM_DUMP_ASSIGN(dump->pid, buf->pid) ||
MM_DUMP_ALLOC(dump->pid, buf->pid) ||
MM_DUMP_LEAK(dump->pid, buf->pid)) &&
buf->seqno >= dump->seqmin && buf->seqno <= dump->seqmax)
{
char tmp[CONFIG_MM_BACKTRACE * MM_PTR_FMT_WIDTH + 1] = "";
# if CONFIG_MM_BACKTRACE > 0
FAR const char *format = " %0*p";
int i;
for (i = 0; i < CONFIG_MM_BACKTRACE &&
buf->backtrace[i]; i++)
{
snprintf(tmp + i * MM_PTR_FMT_WIDTH,
sizeof(tmp) - i * MM_PTR_FMT_WIDTH,
format, MM_PTR_FMT_WIDTH - 1,
buf->backtrace[i]);
}
# endif
syslog(LOG_INFO, "%6d%12zu%12lu%*p%s\n",
buf->pid, blocksize, buf->seqno,
MM_PTR_FMT_WIDTH,
((FAR char *)buf - blocksize), tmp);
}
}
static void
mempool_memdump_free_callback(FAR struct mempool_s *pool,
FAR struct mempool_backtrace_s *buf,
FAR const void *input, FAR void *output)
{
size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);
if (buf->magic == MEMPOOL_MAGIC_FREE)
{
syslog(LOG_INFO, "%12zu%*p\n",
blocksize, MM_PTR_FMT_WIDTH, ((FAR char *)buf - blocksize));
}
}
#endif
/****************************************************************************
@ -128,13 +268,7 @@ int mempool_init(FAR struct mempool_s *pool, FAR const char *name)
sq_init(&pool->queue);
sq_init(&pool->iqueue);
sq_init(&pool->equeue);
#if CONFIG_MM_BACKTRACE >= 0
list_initialize(&pool->alist);
#else
pool->nalloc = 0;
#endif
if (pool->interruptsize >= blocksize)
{
size_t ninterrupt = pool->interruptsize / blocksize;
@ -146,7 +280,8 @@ int mempool_init(FAR struct mempool_s *pool, FAR const char *name)
return -ENOMEM;
}
mempool_add_queue(&pool->iqueue, pool->ibase, ninterrupt, blocksize);
mempool_add_queue(pool, &pool->iqueue,
pool->ibase, ninterrupt, blocksize);
kasan_poison(pool->ibase, size);
}
else
@ -171,7 +306,8 @@ int mempool_init(FAR struct mempool_s *pool, FAR const char *name)
return -ENOMEM;
}
mempool_add_queue(&pool->queue, base, ninitial, blocksize);
mempool_add_queue(pool, &pool->queue,
base, ninitial, blocksize);
sq_addlast((FAR sq_entry_t *)(base + ninitial * blocksize),
&pool->equeue);
kasan_poison(base, size);
@ -225,7 +361,8 @@ retry:
blk = mempool_remove_queue(pool, &pool->iqueue);
if (blk == NULL)
{
goto out_with_lock;
spin_unlock_irqrestore(&pool->lock, flags);
return blk;
}
}
else
@ -247,7 +384,8 @@ retry:
kasan_poison(base, size);
flags = spin_lock_irqsave(&pool->lock);
mempool_add_queue(&pool->queue, base, nexpand, blocksize);
mempool_add_queue(pool, &pool->queue,
base, nexpand, blocksize);
sq_addlast((FAR sq_entry_t *)(base + nexpand * blocksize),
&pool->equeue);
blk = mempool_remove_queue(pool, &pool->queue);
@ -264,6 +402,9 @@ retry:
}
}
pool->nalloc++;
spin_unlock_irqrestore(&pool->lock, flags);
kasan_unpoison(blk, pool->blocksize);
#ifdef CONFIG_MM_FILL_ALLOCATIONS
memset(blk, MM_ALLOC_MAGIC, pool->blocksize);
#endif
@ -271,12 +412,8 @@ retry:
#if CONFIG_MM_BACKTRACE >= 0
mempool_add_backtrace(pool, (FAR struct mempool_backtrace_s *)
((FAR char *)blk + pool->blocksize));
#else
pool->nalloc++;
#endif
kasan_unpoison(blk, pool->blocksize);
out_with_lock:
spin_unlock_irqrestore(&pool->lock, flags);
return blk;
}
@ -299,14 +436,15 @@ void mempool_release(FAR struct mempool_s *pool, FAR void *blk)
FAR struct mempool_backtrace_s *buf =
(FAR struct mempool_backtrace_s *)((FAR char *)blk + pool->blocksize);
/* Check double free */
/* Check double free or out of out of bounds */
DEBUGASSERT(buf->magic == MEMPOOL_MAGIC_ALLOC);
buf->magic = MEMPOOL_MAGIC_FREE;
DEBUGASSERT(list_in_list(&buf->node));
list_delete(&buf->node);
#else
pool->nalloc--;
#endif
pool->nalloc--;
#ifdef CONFIG_MM_FILL_ALLOCATIONS
memset(blk, MM_FREE_MAGIC, pool->blocksize);
#endif
@ -366,11 +504,7 @@ int mempool_info(FAR struct mempool_s *pool, FAR struct mempoolinfo_s *info)
flags = spin_lock_irqsave(&pool->lock);
info->ordblks = sq_count(&pool->queue);
info->iordblks = sq_count(&pool->iqueue);
#if CONFIG_MM_BACKTRACE >= 0
info->aordblks = list_length(&pool->alist);
#else
info->aordblks = pool->nalloc;
#endif
info->arena = sq_count(&pool->equeue) * sizeof(sq_entry_t) +
(info->aordblks + info->ordblks + info->iordblks) * blocksize;
spin_unlock_irqrestore(&pool->lock, flags);
@ -412,29 +546,15 @@ mempool_info_task(FAR struct mempool_s *pool,
info.aordblks += count;
info.uordblks += count * blocksize;
}
#if CONFIG_MM_BACKTRACE < 0
else if (task->pid == PID_MM_ALLOC)
{
info.aordblks += pool->nalloc;
info.uordblks += pool->nalloc * blocksize;
}
#else
#if CONFIG_MM_BACKTRACE >= 0
else
{
FAR struct mempool_backtrace_s *buf;
list_for_every_entry(&pool->alist, buf,
struct mempool_backtrace_s, node)
{
if ((MM_DUMP_ASSIGN(task->pid, buf->pid) ||
MM_DUMP_ALLOC(task->pid, buf->pid) ||
MM_DUMP_LEAK(task->pid, buf->pid)) &&
buf->seqno >= task->seqmin && buf->seqno <= task->seqmax)
{
info.aordblks++;
info.uordblks += blocksize;
}
}
mempool_foreach(pool, mempool_info_task_callback, task, &info);
}
#endif
@ -464,53 +584,22 @@ mempool_info_task(FAR struct mempool_s *pool,
void mempool_memdump(FAR struct mempool_s *pool,
FAR const struct mm_memdump_s *dump)
{
size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);
#if CONFIG_MM_BACKTRACE >= 0
if (dump->pid == PID_MM_FREE)
{
FAR sq_entry_t *entry;
sq_for_every(&pool->queue, entry)
{
syslog(LOG_INFO, "%12zu%*p\n",
blocksize, BACKTRACE_PTR_FMT_WIDTH, (FAR char *)entry);
}
sq_for_every(&pool->iqueue, entry)
{
syslog(LOG_INFO, "%12zu%*p\n",
blocksize, BACKTRACE_PTR_FMT_WIDTH, (FAR char *)entry);
}
mempool_foreach(pool, mempool_memdump_free_callback, NULL, NULL);
}
#if CONFIG_MM_BACKTRACE >= 0
else
{
FAR struct mempool_backtrace_s *buf;
list_for_every_entry(&pool->alist, buf,
struct mempool_backtrace_s, node)
{
if ((MM_DUMP_ASSIGN(dump->pid, buf->pid) ||
MM_DUMP_ALLOC(dump->pid, buf->pid) ||
MM_DUMP_LEAK(dump->pid, buf->pid)) &&
buf->seqno >= dump->seqmin && buf->seqno <= dump->seqmax)
{
# if CONFIG_MM_BACKTRACE > 0
char tmp[BACKTRACE_BUFFER_SIZE(CONFIG_MM_BACKTRACE)];
backtrace_format(tmp, sizeof(tmp), buf->backtrace,
CONFIG_MM_BACKTRACE);
# else
char *tmp = "";
# endif
syslog(LOG_INFO, "%6d%12zu%12lu%*p %s\n",
buf->pid, blocksize, buf->seqno,
BACKTRACE_PTR_FMT_WIDTH, ((FAR char *)buf - blocksize),
tmp);
}
}
mempool_foreach(pool, mempool_memdump_callback, dump, NULL);
}
#else
size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);
/* Avoid race condition */
syslog(LOG_INFO, "%12zu%*p skip block dump\n",
blocksize, MM_PTR_FMT_WIDTH, pool);
#endif
}
@ -530,11 +619,7 @@ int mempool_deinit(FAR struct mempool_s *pool)
FAR sq_entry_t *blk;
size_t count = 0;
#if CONFIG_MM_BACKTRACE >= 0
if (!list_is_empty(&pool->alist))
#else
if (pool->nalloc != 0)
#endif
{
return -EBUSY;
}