2014-09-13 21:17:44 +02:00
|
|
|
/****************************************************************************
|
2021-03-08 22:39:04 +01:00
|
|
|
* arch/arm/src/armv7-a/arm_addrenv_utils.c
|
2014-09-13 21:17:44 +02:00
|
|
|
*
|
2021-03-24 09:12:29 +01:00
|
|
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
* contributor license agreements. See the NOTICE file distributed with
|
|
|
|
* this work for additional information regarding copyright ownership. The
|
|
|
|
* ASF licenses this file to you under the Apache License, Version 2.0 (the
|
|
|
|
* "License"); you may not use this file except in compliance with the
|
|
|
|
* License. You may obtain a copy of the License at
|
2014-09-13 21:17:44 +02:00
|
|
|
*
|
2021-03-24 09:12:29 +01:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2014-09-13 21:17:44 +02:00
|
|
|
*
|
2021-03-24 09:12:29 +01:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
2014-09-13 21:17:44 +02:00
|
|
|
*
|
|
|
|
****************************************************************************/
|
2020-03-10 03:44:26 +01:00
|
|
|
|
2014-09-13 21:17:44 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Included Files
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#include <nuttx/config.h>
|
|
|
|
|
|
|
|
#include <string.h>
|
2021-05-18 08:59:14 +02:00
|
|
|
#include <assert.h>
|
2014-09-13 21:17:44 +02:00
|
|
|
#include <errno.h>
|
|
|
|
#include <debug.h>
|
|
|
|
|
|
|
|
#include <nuttx/pgalloc.h>
|
2016-02-14 02:11:09 +01:00
|
|
|
#include <nuttx/irq.h>
|
2019-03-21 00:20:56 +01:00
|
|
|
#include <nuttx/cache.h>
|
2014-09-13 21:17:44 +02:00
|
|
|
|
|
|
|
#include "mmu.h"
|
2014-09-16 21:31:48 +02:00
|
|
|
#include "pgalloc.h"
|
2014-09-13 21:17:44 +02:00
|
|
|
#include "addrenv.h"
|
|
|
|
|
|
|
|
#ifdef CONFIG_ARCH_ADDRENV
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Public Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: arm_addrenv_create_region
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Create one memory region.
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* On success, the number of pages allocated is returned. Otherwise, a
|
|
|
|
* negated errno value is returned.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2022-04-17 07:57:58 +02:00
|
|
|
int arm_addrenv_create_region(uintptr_t **list, unsigned int listlen,
|
2014-09-13 21:17:44 +02:00
|
|
|
uintptr_t vaddr, size_t regionsize,
|
|
|
|
uint32_t mmuflags)
|
|
|
|
{
|
|
|
|
irqstate_t flags;
|
|
|
|
uintptr_t paddr;
|
2022-04-17 07:57:58 +02:00
|
|
|
uint32_t *l2table;
|
2014-09-13 21:17:44 +02:00
|
|
|
size_t nmapped;
|
|
|
|
unsigned int npages;
|
|
|
|
unsigned int i;
|
|
|
|
unsigned int j;
|
|
|
|
|
2016-06-11 19:59:51 +02:00
|
|
|
binfo("listlen=%d vaddr=%08lx regionsize=%ld, mmuflags=%08x\n",
|
2014-09-13 21:17:44 +02:00
|
|
|
listlen, (unsigned long)vaddr, (unsigned long)regionsize,
|
|
|
|
(unsigned int)mmuflags);
|
|
|
|
|
|
|
|
/* Verify that we are configured with enough virtual address space to
|
|
|
|
* support this memory region.
|
|
|
|
*
|
2014-09-24 17:27:17 +02:00
|
|
|
* npages pages corresponds to (npages << MM_PGSHIFT) bytes
|
2014-09-13 21:17:44 +02:00
|
|
|
* listlen sections corresponds to (listlen << 20) bytes
|
|
|
|
*/
|
|
|
|
|
|
|
|
npages = MM_NPAGES(regionsize);
|
|
|
|
if (npages > (listlen << (20 - MM_PGSHIFT)))
|
|
|
|
{
|
2016-06-11 23:50:49 +02:00
|
|
|
berr("ERROR: npages=%u listlen=%u\n", npages, listlen);
|
2014-09-13 21:17:44 +02:00
|
|
|
return -E2BIG;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Back the allocation up with physical pages and set up the level mapping
|
|
|
|
* (which of course does nothing until the L2 page table is hooked into
|
|
|
|
* the L1 page table).
|
|
|
|
*/
|
|
|
|
|
|
|
|
nmapped = 0;
|
|
|
|
for (i = 0; i < npages; i += ENTRIES_PER_L2TABLE)
|
|
|
|
{
|
|
|
|
/* Allocate one physical page for the L2 page table */
|
|
|
|
|
|
|
|
paddr = mm_pgalloc(1);
|
2022-02-09 11:29:28 +01:00
|
|
|
binfo("a new l2 page table (paddr=%x)\n", paddr);
|
2014-09-13 21:17:44 +02:00
|
|
|
if (!paddr)
|
|
|
|
{
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEBUGASSERT(MM_ISALIGNED(paddr));
|
2022-04-17 07:57:58 +02:00
|
|
|
list[i] = (uintptr_t *)paddr;
|
2014-09-13 21:17:44 +02:00
|
|
|
|
2016-02-14 02:11:09 +01:00
|
|
|
flags = enter_critical_section();
|
2014-09-13 21:17:44 +02:00
|
|
|
|
|
|
|
/* Get the virtual address corresponding to the physical page address */
|
|
|
|
|
2022-04-17 07:57:58 +02:00
|
|
|
l2table = (uint32_t *)arm_pgvaddr(paddr);
|
2014-09-13 21:17:44 +02:00
|
|
|
|
|
|
|
/* Initialize the page table */
|
|
|
|
|
|
|
|
memset(l2table, 0, ENTRIES_PER_L2TABLE * sizeof(uint32_t));
|
|
|
|
|
|
|
|
/* Back up L2 entries with physical memory */
|
|
|
|
|
|
|
|
for (j = 0; j < ENTRIES_PER_L2TABLE && nmapped < regionsize; j++)
|
|
|
|
{
|
|
|
|
/* Allocate one physical page for region data */
|
|
|
|
|
|
|
|
paddr = mm_pgalloc(1);
|
2022-02-09 11:29:28 +01:00
|
|
|
binfo("a new page (paddr=%x)\n", paddr);
|
2014-09-13 21:17:44 +02:00
|
|
|
if (!paddr)
|
|
|
|
{
|
2016-02-14 02:11:09 +01:00
|
|
|
leave_critical_section(flags);
|
2014-09-13 21:17:44 +02:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2014-09-24 17:27:17 +02:00
|
|
|
/* Map the virtual address to this physical address */
|
2014-09-13 21:17:44 +02:00
|
|
|
|
|
|
|
set_l2_entry(l2table, paddr, vaddr, mmuflags);
|
|
|
|
nmapped += MM_PGSIZE;
|
|
|
|
vaddr += MM_PGSIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make sure that the initialized L2 table is flushed to physical
|
|
|
|
* memory.
|
|
|
|
*/
|
|
|
|
|
2019-03-19 17:37:13 +01:00
|
|
|
up_flush_dcache((uintptr_t)l2table,
|
|
|
|
(uintptr_t)l2table +
|
|
|
|
ENTRIES_PER_L2TABLE * sizeof(uint32_t));
|
2014-09-13 21:17:44 +02:00
|
|
|
|
2016-02-14 02:11:09 +01:00
|
|
|
leave_critical_section(flags);
|
2014-09-13 21:17:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return npages;
|
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: arm_addrenv_destroy_region
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Destroy one memory region.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2022-04-17 07:57:58 +02:00
|
|
|
void arm_addrenv_destroy_region(uintptr_t **list, unsigned int listlen,
|
2014-09-23 21:19:30 +02:00
|
|
|
uintptr_t vaddr, bool keep)
|
2014-09-13 21:17:44 +02:00
|
|
|
{
|
|
|
|
irqstate_t flags;
|
|
|
|
uintptr_t paddr;
|
2022-04-17 07:57:58 +02:00
|
|
|
uint32_t *l2table;
|
2014-09-13 21:17:44 +02:00
|
|
|
int i;
|
|
|
|
int j;
|
|
|
|
|
2016-06-11 19:59:51 +02:00
|
|
|
binfo("listlen=%d vaddr=%08lx\n", listlen, (unsigned long)vaddr);
|
2014-09-13 21:17:44 +02:00
|
|
|
|
2019-09-08 15:04:37 +02:00
|
|
|
for (i = 0; i < listlen; vaddr += SECTION_SIZE, i++)
|
2014-09-13 21:17:44 +02:00
|
|
|
{
|
|
|
|
/* Has this page table been allocated? */
|
|
|
|
|
|
|
|
paddr = (uintptr_t)list[i];
|
|
|
|
if (paddr != 0)
|
|
|
|
{
|
2016-02-14 02:11:09 +01:00
|
|
|
flags = enter_critical_section();
|
2014-09-13 21:17:44 +02:00
|
|
|
|
2021-03-24 09:12:51 +01:00
|
|
|
/* Get the virtual address corresponding to the physical page
|
|
|
|
* address
|
|
|
|
*/
|
2014-09-13 21:17:44 +02:00
|
|
|
|
2022-04-17 07:57:58 +02:00
|
|
|
l2table = (uint32_t *)arm_pgvaddr(paddr);
|
2014-09-13 21:17:44 +02:00
|
|
|
|
2014-09-23 21:19:30 +02:00
|
|
|
/* Return the allocated pages to the page allocator unless we were
|
|
|
|
* asked to keep the page data. We keep the page data only for
|
|
|
|
* the case of shared memory. In that case, we need to tear down
|
|
|
|
* the mapping and page table entries, but keep the raw page data
|
|
|
|
* will still may be mapped by other user processes.
|
|
|
|
*/
|
2014-09-13 21:17:44 +02:00
|
|
|
|
2014-09-23 21:19:30 +02:00
|
|
|
if (!keep)
|
2014-09-13 21:17:44 +02:00
|
|
|
{
|
2014-09-23 21:19:30 +02:00
|
|
|
for (j = 0; j < ENTRIES_PER_L2TABLE; j++)
|
2014-09-13 21:17:44 +02:00
|
|
|
{
|
2014-09-23 21:19:30 +02:00
|
|
|
paddr = *l2table++;
|
|
|
|
if (paddr != 0)
|
|
|
|
{
|
|
|
|
paddr &= PTE_SMALL_PADDR_MASK;
|
|
|
|
mm_pgfree(paddr, 1);
|
|
|
|
}
|
2014-09-13 21:17:44 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-14 02:11:09 +01:00
|
|
|
leave_critical_section(flags);
|
2014-09-13 21:17:44 +02:00
|
|
|
|
|
|
|
/* And free the L2 page table itself */
|
|
|
|
|
|
|
|
mm_pgfree((uintptr_t)list[i], 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_ARCH_ADDRENV */
|