arch: armv7-a: Remove the code for CONFIG_ARCH_PGPOOL_MAPPING=n
Summary: - Currently, CONFIG_ARCH_PGPOOL_MAPPING=y is necessary for CONFIG_BUILD_KERNEL=y. - This commit removes the code for CONFIG_ARCH_PGPOOL_MAPPING=n Impact: - None Testing: - Tested with sabre-6quad:netknsh_smp Signed-off-by: Masayuki Ishikawa <Masayuki.Ishikawa@jp.sony.com>
This commit is contained in:
parent
53466c6d46
commit
ae378872c2
@ -149,26 +149,15 @@ static int up_addrenv_initdata(uintptr_t l2table)
|
||||
irqstate_t flags;
|
||||
uint32_t *virtptr;
|
||||
uintptr_t paddr;
|
||||
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
uint32_t l1save;
|
||||
#endif
|
||||
|
||||
DEBUGASSERT(l2table);
|
||||
flags = enter_critical_section();
|
||||
|
||||
#ifdef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
/* Get the virtual address corresponding to the physical page table
|
||||
* address
|
||||
*/
|
||||
|
||||
virtptr = (uint32_t *)arm_pgvaddr(l2table);
|
||||
#else
|
||||
/* Temporarily map the page into the virtual address space */
|
||||
|
||||
l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
|
||||
mmu_l1_setentry(l2table & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS);
|
||||
virtptr = (uint32_t *)(ARCH_SCRATCH_VBASE | (l2table & SECTION_MASK));
|
||||
#endif
|
||||
|
||||
/* Invalidate D-Cache so that we read from the physical memory */
|
||||
|
||||
@ -180,16 +169,9 @@ static int up_addrenv_initdata(uintptr_t l2table)
|
||||
paddr = (uintptr_t)(*virtptr) & PTE_SMALL_PADDR_MASK;
|
||||
DEBUGASSERT(paddr);
|
||||
|
||||
#ifdef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
/* Get the virtual address corresponding to the physical page address */
|
||||
|
||||
virtptr = (uint32_t *)arm_pgvaddr(paddr);
|
||||
#else
|
||||
/* Temporarily map the page into the virtual address space */
|
||||
|
||||
mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS);
|
||||
virtptr = (uint32_t *)(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
|
||||
#endif
|
||||
|
||||
/* Finally, after of all of that, we can initialize the tiny region at
|
||||
* the beginning of .bss/.data by setting it to zero.
|
||||
@ -204,11 +186,6 @@ static int up_addrenv_initdata(uintptr_t l2table)
|
||||
up_flush_dcache((uintptr_t)virtptr,
|
||||
(uintptr_t)virtptr + ARCH_DATA_RESERVE_SIZE);
|
||||
|
||||
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
/* Restore the scratch section L1 page table entry */
|
||||
|
||||
mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
|
||||
#endif
|
||||
leave_critical_section(flags);
|
||||
return OK;
|
||||
}
|
||||
|
@ -71,9 +71,6 @@ int up_shmat(uintptr_t *pages, unsigned int npages, uintptr_t vaddr)
|
||||
uint32_t *l2table;
|
||||
irqstate_t flags;
|
||||
uintptr_t paddr;
|
||||
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
uint32_t l1save;
|
||||
#endif
|
||||
unsigned int nmapped;
|
||||
unsigned int shmndx;
|
||||
|
||||
@ -120,21 +117,11 @@ int up_shmat(uintptr_t *pages, unsigned int npages, uintptr_t vaddr)
|
||||
flags = enter_critical_section();
|
||||
group->tg_addrenv.shm[shmndx] = (uintptr_t *)paddr;
|
||||
|
||||
#ifdef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
/* Get the virtual address corresponding to the physical page
|
||||
* address.
|
||||
*/
|
||||
|
||||
l2table = (uint32_t *)arm_pgvaddr(paddr);
|
||||
#else
|
||||
/* Temporarily map the page into the virtual address space */
|
||||
|
||||
l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
|
||||
mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE,
|
||||
MMU_MEMFLAGS);
|
||||
l2table = (uint32_t *)
|
||||
(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
|
||||
#endif
|
||||
|
||||
/* Initialize the page table */
|
||||
|
||||
@ -149,21 +136,11 @@ int up_shmat(uintptr_t *pages, unsigned int npages, uintptr_t vaddr)
|
||||
paddr = (uintptr_t)l1entry & ~SECTION_MASK;
|
||||
flags = enter_critical_section();
|
||||
|
||||
#ifdef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
/* Get the virtual address corresponding to the physical page\
|
||||
* address.
|
||||
*/
|
||||
|
||||
l2table = (uint32_t *)arm_pgvaddr(paddr);
|
||||
#else
|
||||
/* Temporarily map the page into the virtual address space */
|
||||
|
||||
l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
|
||||
mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE,
|
||||
MMU_MEMFLAGS);
|
||||
l2table = (uint32_t *)
|
||||
(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Map the virtual address to this physical address */
|
||||
@ -186,11 +163,6 @@ int up_shmat(uintptr_t *pages, unsigned int npages, uintptr_t vaddr)
|
||||
(uintptr_t)l2table +
|
||||
ENTRIES_PER_L2TABLE * sizeof(uint32_t));
|
||||
|
||||
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
/* Restore the scratch section L1 page table entry */
|
||||
|
||||
mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
|
||||
#endif
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
@ -222,9 +194,6 @@ int up_shmdt(uintptr_t vaddr, unsigned int npages)
|
||||
uint32_t *l2table;
|
||||
irqstate_t flags;
|
||||
uintptr_t paddr;
|
||||
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
uint32_t l1save;
|
||||
#endif
|
||||
unsigned int nunmapped;
|
||||
unsigned int shmndx;
|
||||
|
||||
@ -260,21 +229,11 @@ int up_shmdt(uintptr_t vaddr, unsigned int npages)
|
||||
paddr = (uintptr_t)l1entry & ~SECTION_MASK;
|
||||
flags = enter_critical_section();
|
||||
|
||||
#ifdef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
/* Get the virtual address corresponding to the physical page
|
||||
* address.
|
||||
*/
|
||||
|
||||
l2table = (uint32_t *)arm_pgvaddr(paddr);
|
||||
#else
|
||||
/* Temporarily map the page into the virtual address space */
|
||||
|
||||
l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
|
||||
mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE,
|
||||
MMU_MEMFLAGS);
|
||||
l2table = (uint32_t *)
|
||||
(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
|
||||
#endif
|
||||
|
||||
/* Unmap this virtual page address.
|
||||
*
|
||||
@ -304,11 +263,6 @@ int up_shmdt(uintptr_t vaddr, unsigned int npages)
|
||||
(uintptr_t)l2table +
|
||||
ENTRIES_PER_L2TABLE * sizeof(uint32_t));
|
||||
|
||||
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
/* Restore the scratch section L1 page table entry */
|
||||
|
||||
mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
|
||||
#endif
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
|
@ -62,9 +62,6 @@ int arm_addrenv_create_region(uintptr_t **list, unsigned int listlen,
|
||||
irqstate_t flags;
|
||||
uintptr_t paddr;
|
||||
uint32_t *l2table;
|
||||
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
uint32_t l1save;
|
||||
#endif
|
||||
size_t nmapped;
|
||||
unsigned int npages;
|
||||
unsigned int i;
|
||||
@ -110,19 +107,9 @@ int arm_addrenv_create_region(uintptr_t **list, unsigned int listlen,
|
||||
|
||||
flags = enter_critical_section();
|
||||
|
||||
#ifdef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
/* Get the virtual address corresponding to the physical page address */
|
||||
|
||||
l2table = (uint32_t *)arm_pgvaddr(paddr);
|
||||
#else
|
||||
/* Temporarily map the page into the virtual address space */
|
||||
|
||||
l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
|
||||
mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE,
|
||||
MMU_MEMFLAGS);
|
||||
l2table = (uint32_t *)(ARCH_SCRATCH_VBASE |
|
||||
(paddr & SECTION_MASK));
|
||||
#endif
|
||||
|
||||
/* Initialize the page table */
|
||||
|
||||
@ -138,9 +125,6 @@ int arm_addrenv_create_region(uintptr_t **list, unsigned int listlen,
|
||||
binfo("a new page (paddr=%x)\n", paddr);
|
||||
if (!paddr)
|
||||
{
|
||||
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
|
||||
#endif
|
||||
leave_critical_section(flags);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -160,11 +144,6 @@ int arm_addrenv_create_region(uintptr_t **list, unsigned int listlen,
|
||||
(uintptr_t)l2table +
|
||||
ENTRIES_PER_L2TABLE * sizeof(uint32_t));
|
||||
|
||||
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
/* Restore the scratch section L1 page table entry */
|
||||
|
||||
mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
|
||||
#endif
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
@ -185,9 +164,6 @@ void arm_addrenv_destroy_region(uintptr_t **list, unsigned int listlen,
|
||||
irqstate_t flags;
|
||||
uintptr_t paddr;
|
||||
uint32_t *l2table;
|
||||
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
uint32_t l1save;
|
||||
#endif
|
||||
int i;
|
||||
int j;
|
||||
|
||||
@ -206,21 +182,11 @@ void arm_addrenv_destroy_region(uintptr_t **list, unsigned int listlen,
|
||||
{
|
||||
flags = enter_critical_section();
|
||||
|
||||
#ifdef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
/* Get the virtual address corresponding to the physical page
|
||||
* address
|
||||
*/
|
||||
|
||||
l2table = (uint32_t *)arm_pgvaddr(paddr);
|
||||
#else
|
||||
/* Temporarily map the page into the virtual address space */
|
||||
|
||||
l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
|
||||
mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE,
|
||||
MMU_MEMFLAGS);
|
||||
l2table = (uint32_t *)(ARCH_SCRATCH_VBASE |
|
||||
(paddr & SECTION_MASK));
|
||||
#endif
|
||||
|
||||
/* Return the allocated pages to the page allocator unless we were
|
||||
* asked to keep the page data. We keep the page data only for
|
||||
@ -242,11 +208,6 @@ void arm_addrenv_destroy_region(uintptr_t **list, unsigned int listlen,
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
/* Restore the scratch section L1 page table entry */
|
||||
|
||||
mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
|
||||
#endif
|
||||
leave_critical_section(flags);
|
||||
|
||||
/* And free the L2 page table itself */
|
||||
|
@ -56,9 +56,6 @@ static uintptr_t alloc_pgtable(void)
|
||||
irqstate_t flags;
|
||||
uintptr_t paddr;
|
||||
uint32_t *l2table;
|
||||
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
uint32_t l1save;
|
||||
#endif
|
||||
|
||||
/* Allocate one physical page for the L2 page table */
|
||||
|
||||
@ -70,19 +67,9 @@ static uintptr_t alloc_pgtable(void)
|
||||
|
||||
flags = enter_critical_section();
|
||||
|
||||
#ifdef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
/* Get the virtual address corresponding to the physical page address */
|
||||
|
||||
l2table = (uint32_t *)arm_pgvaddr(paddr);
|
||||
#else
|
||||
/* Temporarily map the page into the virtual address space */
|
||||
|
||||
l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
|
||||
mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE,
|
||||
MMU_MEMFLAGS);
|
||||
l2table = (uint32_t *)(ARCH_SCRATCH_VBASE |
|
||||
(paddr & SECTION_MASK));
|
||||
#endif
|
||||
|
||||
/* Initialize the page table */
|
||||
|
||||
@ -95,11 +82,6 @@ static uintptr_t alloc_pgtable(void)
|
||||
up_flush_dcache((uintptr_t)l2table,
|
||||
(uintptr_t)l2table + MM_PGSIZE);
|
||||
|
||||
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
/* Restore the scratch section page table entry */
|
||||
|
||||
mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
|
||||
#endif
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
@ -205,9 +187,6 @@ uintptr_t pgalloc(uintptr_t brkaddr, unsigned int npages)
|
||||
uint32_t *l2table;
|
||||
irqstate_t flags;
|
||||
uintptr_t paddr;
|
||||
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
uint32_t l1save;
|
||||
#endif
|
||||
unsigned int index;
|
||||
|
||||
binfo("tcb->pid=%d tcb->group=%p\n", tcb->pid, tcb->group);
|
||||
@ -247,21 +226,9 @@ uintptr_t pgalloc(uintptr_t brkaddr, unsigned int npages)
|
||||
|
||||
flags = enter_critical_section();
|
||||
|
||||
#ifdef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
/* Get the virtual address corresponding to the physical page address */
|
||||
|
||||
l2table = (uint32_t *)arm_pgvaddr(paddr);
|
||||
#else
|
||||
/* Temporarily map the level 2 page table into the "scratch" virtual
|
||||
* address space
|
||||
*/
|
||||
|
||||
l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
|
||||
mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE,
|
||||
MMU_MEMFLAGS);
|
||||
l2table = (uint32_t *)(ARCH_SCRATCH_VBASE |
|
||||
(paddr & SECTION_MASK));
|
||||
#endif
|
||||
|
||||
/* Back up L2 entry with physical memory */
|
||||
|
||||
@ -269,9 +236,6 @@ uintptr_t pgalloc(uintptr_t brkaddr, unsigned int npages)
|
||||
binfo("a new page (paddr=%x)\n", paddr);
|
||||
if (paddr == 0)
|
||||
{
|
||||
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
|
||||
#endif
|
||||
leave_critical_section(flags);
|
||||
return 0;
|
||||
}
|
||||
@ -296,11 +260,6 @@ uintptr_t pgalloc(uintptr_t brkaddr, unsigned int npages)
|
||||
up_flush_dcache((uintptr_t)&l2table[index],
|
||||
(uintptr_t)&l2table[index] + sizeof(uint32_t));
|
||||
|
||||
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
/* Restore the scratch L1 page table entry */
|
||||
|
||||
mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
|
||||
#endif
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
|
@ -57,9 +57,6 @@ uintptr_t arm_physpgaddr(uintptr_t vaddr)
|
||||
uint32_t *l2table;
|
||||
uintptr_t paddr;
|
||||
uint32_t l1entry;
|
||||
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
uint32_t l1save;
|
||||
#endif
|
||||
int index;
|
||||
|
||||
/* Check if this address is within the range of one of the virtualized user
|
||||
@ -81,19 +78,10 @@ uintptr_t arm_physpgaddr(uintptr_t vaddr)
|
||||
|
||||
paddr = ((uintptr_t)l1entry & PMD_PTE_PADDR_MASK);
|
||||
|
||||
#ifdef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
/* Get the virtual address of the base of level 2 page table */
|
||||
|
||||
l2table = (uint32_t *)arm_pgvaddr(paddr);
|
||||
#else
|
||||
/* Temporarily map the page into the virtual address space */
|
||||
|
||||
l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
|
||||
mmu_l1_setentry(paddr & ~SECTION_MASK,
|
||||
ARCH_SCRATCH_VBASE, MMU_MEMFLAGS);
|
||||
l2table = (uint32_t *)(ARCH_SCRATCH_VBASE |
|
||||
(paddr & SECTION_MASK));
|
||||
#endif
|
||||
if (l2table)
|
||||
{
|
||||
/* Invalidate D-Cache line containing this virtual address so
|
||||
@ -112,12 +100,6 @@ uintptr_t arm_physpgaddr(uintptr_t vaddr)
|
||||
|
||||
paddr = ((uintptr_t)l2table[index] & PTE_SMALL_PADDR_MASK);
|
||||
|
||||
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
/* Restore the scratch section L1 page table entry */
|
||||
|
||||
mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
|
||||
#endif
|
||||
|
||||
/* Add the correct offset and return the physical address
|
||||
* corresponding to the virtual address.
|
||||
*/
|
||||
|
@ -26,7 +26,7 @@
|
||||
|
||||
#include "pgalloc.h"
|
||||
|
||||
#if defined(CONFIG_MM_PGALLOC) && defined(CONFIG_ARCH_PGPOOL_MAPPING)
|
||||
#ifdef CONFIG_MM_PGALLOC
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
@ -58,4 +58,4 @@ uintptr_t arm_virtpgaddr(uintptr_t paddr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MM_PGALLOC && CONFIG_ARCH_PGPOOL_MAPPING */
|
||||
#endif /* CONFIG_MM_PGALLOC */
|
||||
|
@ -41,6 +41,10 @@
|
||||
* Pre-processor Definitions
|
||||
****************************************************************************/
|
||||
|
||||
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
# error "ARMv7-A needs CONFIG_ARCH_PGPOOL_MAPPING"
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Public Data
|
||||
****************************************************************************/
|
||||
@ -49,39 +53,6 @@
|
||||
* Inline Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: arm_pgmap
|
||||
*
|
||||
* Description:
|
||||
* Map one page to a temporary, scratch virtual memory address
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#if !defined(CONFIG_ARCH_PGPOOL_MAPPING) && defined(CONFIG_ARCH_USE_MMU)
|
||||
static inline uintptr_t arm_tmpmap(uintptr_t paddr, uint32_t *l1save)
|
||||
{
|
||||
*l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
|
||||
mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS);
|
||||
return ((uintptr_t)ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
|
||||
}
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: arm_pgrestore
|
||||
*
|
||||
* Description:
|
||||
* Restore any previous L1 page table mapping that was in place when
|
||||
* arm_tmpmap() was called
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#if !defined(CONFIG_ARCH_PGPOOL_MAPPING) && defined(CONFIG_ARCH_USE_MMU)
|
||||
static inline void arm_tmprestore(uint32_t l1save)
|
||||
{
|
||||
mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
|
||||
}
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: arm_pgvaddr
|
||||
*
|
||||
@ -92,7 +63,6 @@ static inline void arm_tmprestore(uint32_t l1save)
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
static inline uintptr_t arm_pgvaddr(uintptr_t paddr)
|
||||
{
|
||||
DEBUGASSERT(paddr >= CONFIG_ARCH_PGPOOL_PBASE &&
|
||||
@ -100,7 +70,6 @@ static inline uintptr_t arm_pgvaddr(uintptr_t paddr)
|
||||
|
||||
return paddr - CONFIG_ARCH_PGPOOL_PBASE + CONFIG_ARCH_PGPOOL_VBASE;
|
||||
}
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: arm_uservaddr
|
||||
@ -228,9 +197,7 @@ uintptr_t arm_physpgaddr(uintptr_t vaddr);
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_ARCH_PGPOOL_MAPPING
|
||||
uintptr_t arm_virtpgaddr(uintptr_t paddr);
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_MM_PGALLOC */
|
||||
#endif /* __ARCH_ARM_SRC_ARMV7_A_PGALLOC_H */
|
||||
|
Loading…
Reference in New Issue
Block a user