riscv/addrenv: Do not free physical memory for SHM area

SHM area is just mapped memory, the physical backup is not owned by the
process, so the process must not free it.

In ARM this is already handled as the regions are destroyed one by one,
while this implementation does a page directory walk instead.
This commit is contained in:
Ville Juven 2023-01-18 13:58:40 +02:00 committed by Xiang Xiao
parent c19b5eeb8d
commit 0922121bc0
3 changed files with 69 additions and 2 deletions

View File

@ -63,6 +63,7 @@
#include <nuttx/addrenv.h>
#include <nuttx/arch.h>
#include <nuttx/compiler.h>
#include <nuttx/irq.h>
#include <nuttx/pgalloc.h>
@ -87,7 +88,11 @@
/* Base address for address environment */
#define ADDRENV_VBASE (CONFIG_ARCH_DATA_VBASE)
#if CONFIG_ARCH_TEXT_VBASE != 0
# define ADDRENV_VBASE (CONFIG_ARCH_TEXT_VBASE)
#else
# define ADDRENV_VBASE (CONFIG_ARCH_DATA_VBASE)
#endif
/* Make sure the address environment virtual address boundary is valid */
@ -312,6 +317,30 @@ static int create_region(group_addrenv_t *addrenv, uintptr_t vaddr,
return npages;
}
/****************************************************************************
* Name: vaddr_is_shm
*
* Description:
* Check if a vaddr is part of the SHM area
*
* Input Parameters:
* vaddr - Virtual address to check
*
* Returned value:
* true if it is; false if not
*
****************************************************************************/
static inline bool vaddr_is_shm(uintptr_t vaddr)
{
#if defined (CONFIG_ARCH_SHM_VBASE) && defined(ARCH_SHM_VEND)
return vaddr >= CONFIG_ARCH_SHM_VBASE && vaddr < ARCH_SHM_VEND;
#else
UNUSED(vaddr);
return false;
#endif
}
/****************************************************************************
* Public Functions
****************************************************************************/
@ -490,6 +519,8 @@ int up_addrenv_destroy(group_addrenv_t *addrenv)
uintptr_t *ptprev;
uintptr_t *ptlast;
uintptr_t paddr;
uintptr_t vaddr;
size_t pgsize;
int i;
int j;
@ -500,13 +531,25 @@ int up_addrenv_destroy(group_addrenv_t *addrenv)
__ISB();
__DMB();
/* Things start from the beginning of the user virtual memory */
vaddr = ADDRENV_VBASE;
pgsize = mmu_get_region_size(ARCH_SPGTS);
/* First destroy the allocated memory and the final level page table */
ptprev = (uintptr_t *)riscv_pgvaddr(addrenv->spgtables[ARCH_SPGTS - 1]);
if (ptprev)
{
for (i = 0; i < ENTRIES_PER_PGT; i++)
for (i = 0; i < ENTRIES_PER_PGT; i++, vaddr += pgsize)
{
if (vaddr_is_shm(vaddr))
{
/* Do not free memory from SHM area */
continue;
}
ptlast = (uintptr_t *)riscv_pgvaddr(mmu_pte_to_paddr(ptprev[i]));
if (ptlast)
{

View File

@ -145,3 +145,10 @@ void mmu_ln_map_region(uint32_t ptlevel, uintptr_t lnvaddr, uintptr_t paddr,
vaddr += page_size;
}
}
size_t mmu_get_region_size(uint32_t ptlevel)
{
DEBUGASSERT(ptlevel > 0 && ptlevel <= RV_MMU_PT_LEVELS);
return g_pgt_sizes[ptlevel - 1];
}

View File

@ -405,4 +405,21 @@ void mmu_ln_restore(uint32_t ptlevel, uintptr_t lnvaddr, uintptr_t vaddr,
void mmu_ln_map_region(uint32_t ptlevel, uintptr_t lnvaddr, uintptr_t paddr,
uintptr_t vaddr, size_t size, uint32_t mmuflags);
/****************************************************************************
* Name: mmu_ln_map_region
*
* Description:
* Get (giga/mega) page size for level n.
*
* Input Parameters:
* ptlevel - The translation table level, amount of levels is
* MMU implementation specific
*
* Returned Value:
* Region size for one page at level n.
*
****************************************************************************/
size_t mmu_get_region_size(uint32_t ptlevel);
#endif /* ___ARCH_RISC_V_SRC_COMMON_RISCV_MMU_H_ */