arm64_addrenv: Fix the amount of page table levels

The VMSAv8-64 translation system has 4 page table levels in total, ranging
from 0-3. The address environment code assumes only 3 levels, from 1-3 but
this is wrong; the amount of levels _utilized_ depends on the configured
VA size CONFIG_ARM64_VA_BITS. With <= 39 bits 3 levels is enough, while
if the va range is larger, the 4th translation table level is taken into
use dynamically by shifting the base translation table level.

From arm64_mmu.c, where va_bits is the amount of va bits used in address
translations:
(va_bits <= 21)       - base level 3
(22 <= va_bits <= 30) - base level 2
(31 <= va_bits <= 39) - base level 1
(40 <= va_bits <= 48) - base level 0

The base level is what is configured as the page directory root. This also
affects the performance of address translations i.e. if the VA range is
smaller, address translations are also faster as the page table walk is
shorter.
This commit is contained in:
Ville Juven 2024-09-11 15:09:56 +03:00 committed by Alan C. Assis
parent 72acec7275
commit a559f3495a
7 changed files with 26 additions and 17 deletions

View File

@ -45,9 +45,9 @@
# error Only pages sizes of 4096 are currently supported (CONFIG_ARCH_ADDRENV)
#endif
/* All implementations have 3 levels of page tables */
/* All implementations have 4 levels of page tables */
#define ARCH_PGT_MAX_LEVELS (3)
#define ARCH_PGT_MAX_LEVELS (4)
#define ARCH_SPGTS (ARCH_PGT_MAX_LEVELS - 1)
#endif /* CONFIG_ARCH_ADDRENV */

View File

@ -71,7 +71,7 @@ static int modify_region(uintptr_t vstart, uintptr_t vend, uintptr_t setmask)
for (vaddr = vstart; vaddr < vend; vaddr += MM_PGSIZE)
{
for (ptlevel = 1, lnvaddr = l1vaddr;
ptlevel < MMU_PGT_LEVELS;
ptlevel < MMU_PGT_LEVEL_MAX;
ptlevel++)
{
paddr = mmu_pte_to_paddr(mmu_ln_getentry(ptlevel, lnvaddr, vaddr));

View File

@ -90,7 +90,7 @@ uintptr_t up_addrenv_find_page(arch_addrenv_t *addrenv, uintptr_t vaddr)
/* Make table walk to find the page */
for (ptlevel = 1, lnvaddr = pgdir; ptlevel < MMU_PGT_LEVELS; ptlevel++)
for (ptlevel = 1, lnvaddr = pgdir; ptlevel < MMU_PGT_LEVEL_MAX; ptlevel++)
{
paddr = mmu_pte_to_paddr(mmu_ln_getentry(ptlevel, lnvaddr, vaddr));
lnvaddr = arm64_pgvaddr(paddr);

View File

@ -134,7 +134,7 @@ int arm64_map_pages(arch_addrenv_t *addrenv, uintptr_t *pages,
uintptr_t ptlevel;
uintptr_t paddr;
ptlevel = MMU_PGT_LEVELS;
ptlevel = MMU_PGT_LEVEL_MAX;
/* Add the references to pages[] into the caller's address environment */

View File

@ -82,7 +82,7 @@
#define XLAT_TABLE_SIZE (1U << XLAT_TABLE_SIZE_SHIFT)
#define XLAT_TABLE_ENTRY_SIZE_SHIFT 3U /* Each table entry is 8 bytes */
#define XLAT_TABLE_LEVEL_MAX MMU_PGT_LEVELS
#define XLAT_TABLE_LEVEL_MAX MMU_PGT_LEVEL_MAX
#define XLAT_TABLE_ENTRIES_SHIFT \
(XLAT_TABLE_SIZE_SHIFT - XLAT_TABLE_ENTRY_SIZE_SHIFT)
@ -213,6 +213,7 @@ static const struct arm_mmu_config g_mmu_nxrt_config =
static const size_t g_pgt_sizes[] =
{
MMU_L0_PAGE_SIZE,
MMU_L1_PAGE_SIZE,
MMU_L2_PAGE_SIZE,
MMU_L3_PAGE_SIZE
@ -716,7 +717,8 @@ void mmu_ln_setentry(uint32_t ptlevel, uintptr_t lnvaddr, uintptr_t paddr,
uintptr_t *lntable = (uintptr_t *)lnvaddr;
uint32_t index;
DEBUGASSERT(ptlevel > 0 && ptlevel <= XLAT_TABLE_LEVEL_MAX);
DEBUGASSERT(ptlevel >= XLAT_TABLE_BASE_LEVEL &&
ptlevel <= XLAT_TABLE_LEVEL_MAX);
/* Calculate index for lntable */
@ -742,7 +744,8 @@ uintptr_t mmu_ln_getentry(uint32_t ptlevel, uintptr_t lnvaddr,
uintptr_t *lntable = (uintptr_t *)lnvaddr;
uint32_t index;
DEBUGASSERT(ptlevel > 0 && ptlevel <= XLAT_TABLE_LEVEL_MAX);
DEBUGASSERT(ptlevel >= XLAT_TABLE_BASE_LEVEL &&
ptlevel <= XLAT_TABLE_LEVEL_MAX);
index = XLAT_TABLE_VA_IDX(vaddr, ptlevel);
@ -760,7 +763,8 @@ void mmu_ln_restore(uint32_t ptlevel, uintptr_t lnvaddr, uintptr_t vaddr,
uintptr_t *lntable = (uintptr_t *)lnvaddr;
uint32_t index;
DEBUGASSERT(ptlevel > 0 && ptlevel <= XLAT_TABLE_LEVEL_MAX);
DEBUGASSERT(ptlevel >= XLAT_TABLE_BASE_LEVEL &&
ptlevel <= XLAT_TABLE_LEVEL_MAX);
index = XLAT_TABLE_VA_IDX(vaddr, ptlevel);
@ -778,7 +782,8 @@ void mmu_ln_restore(uint32_t ptlevel, uintptr_t lnvaddr, uintptr_t vaddr,
size_t mmu_get_region_size(uint32_t ptlevel)
{
DEBUGASSERT(ptlevel > 0 && ptlevel <= XLAT_TABLE_LEVEL_MAX);
DEBUGASSERT(ptlevel >= XLAT_TABLE_BASE_LEVEL &&
ptlevel <= XLAT_TABLE_LEVEL_MAX);
return g_pgt_sizes[ptlevel - 1];
return g_pgt_sizes[ptlevel];
}

View File

@ -239,13 +239,15 @@
/* Amount of page table levels */
#define MMU_PGT_LEVELS (3U)
#define MMU_PGT_LEVELS (4U)
#define MMU_PGT_LEVEL_MAX (3U) /* Levels go from 0-3 */
/* Page sizes per page table level */
#define MMU_L1_PAGE_SIZE (0x40000000) /* 1G */
#define MMU_L2_PAGE_SIZE (0x200000) /* 2M */
#define MMU_L3_PAGE_SIZE (0x1000) /* 4K */
#define MMU_L0_PAGE_SIZE (0x8000000000) /* 512G */
#define MMU_L1_PAGE_SIZE (0x40000000) /* 1G */
#define MMU_L2_PAGE_SIZE (0x200000) /* 2M */
#define MMU_L3_PAGE_SIZE (0x1000) /* 4K */
/* Flags for user page tables */

View File

@ -93,6 +93,7 @@ uintptr_t pgalloc(uintptr_t brkaddr, unsigned int npages)
struct tcb_s *tcb = this_task();
struct arch_addrenv_s *addrenv;
uintptr_t ptlast;
uintptr_t ptlevel;
uintptr_t paddr;
uintptr_t vaddr;
@ -114,7 +115,8 @@ uintptr_t pgalloc(uintptr_t brkaddr, unsigned int npages)
/* Start mapping from the old heap break address */
vaddr = brkaddr;
vaddr = brkaddr;
ptlevel = MMU_PGT_LEVEL_MAX;
/* Sanity checks */
@ -145,7 +147,7 @@ uintptr_t pgalloc(uintptr_t brkaddr, unsigned int npages)
/* Then add the reference */
mmu_ln_setentry(MMU_PGT_LEVELS, ptlast, paddr, vaddr, MMU_UDATA_FLAGS);
mmu_ln_setentry(ptlevel, ptlast, paddr, vaddr, MMU_UDATA_FLAGS);
vaddr += MM_PGSIZE;
}