Cortex-A/SAMA5 address environment support is code complete (untested)

This commit is contained in:
Gregory Nutt 2014-08-25 11:18:32 -06:00
parent 2566ba7b1d
commit 8907616478
8 changed files with 609 additions and 20 deletions

View File

@ -114,7 +114,9 @@ struct group_addrenv_s
{
FAR uint32_t *text[CONFIG_ARCH_TEXT_NPAGES];
FAR uint32_t *data[CONFIG_ARCH_DATA_NPAGES];
#if 0 /* Not yet implemented */
FAR uint32_t *heap[CONFIG_ARCH_HEAP_NPAGES];
#endif
};
typedef struct group_addrenv_s group_addrenv_t;
@ -124,9 +126,20 @@ typedef struct group_addrenv_s group_addrenv_t;
*
* int up_addrenv_select(group_addrenv_t addrenv, save_addrenv_t *oldenv);
* int up_addrenv_restore(save_addrenv_t oldenv);
*
* In this case, the saved valued in the L1 page table are returned
*/
typedef group_addrenv_t *save_addrenv_t;
struct save_addrenv_s
{
FAR uint32_t text[CONFIG_ARCH_TEXT_NPAGES];
FAR uint32_t data[CONFIG_ARCH_DATA_NPAGES];
#if 0 /* Not yet implemented */
FAR uint32_t heap[CONFIG_ARCH_HEAP_NPAGES];
#endif
};
typedef struct save_addrenv_s save_addrenv_t;
#endif
/****************************************************************************

View File

@ -248,8 +248,10 @@ struct xcptcontext
* handling to support dynamically sized stacks for each thread.
*/
#if 0 /* Not yet implemented */
FAR uint32_t *stack[CONFIG_ARCH_STACK_NPAGES];
#endif
#endif
};
#endif

View File

@ -81,7 +81,11 @@
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* Configuration ************************************************************/
/* Using a 4KiB page size, each 1MiB section maps to a PTE containing
* 256*2KiB entries
*/
#define ENTRIES_PER_L2TABLE 256
/****************************************************************************
* Private Data
@ -91,6 +95,47 @@
* Private Functions
****************************************************************************/
/****************************************************************************
* Name: set_l2_entry
*
* Description:
* Set the L2 table entry as part of the initialization of the L2 Page
* table.
*
****************************************************************************/
static void set_l2_entry(FAR uint32_t *l2table, uintptr_t paddr,
uintptr_t vaddr, uint32_t mmuflags)
{
uint32_t index;
/* The table divides a 1Mb address space up into 256 entries, each
* corresponding to 4Kb of address space. The page table index is
* related to the offset from the beginning of 1Mb region.
*/
index = (vaddr & 0x000ff000) >> 12;
/* Save the table entry */
l2table[index] = (paddr | mmuflags);
}
/****************************************************************************
* Name: set_l1_entry
*
* Description:
* Set an L1 page table entry to refer to a specific L2 page table.
*
****************************************************************************/
static inline void set_l1_entry(uintptr_t l2vaddr, uintptr_t l2paddr)
{
mmu_l1_setentry(l2paddr & PMD_PTE_PADDR_MASK,
l2vaddr & PMD_PTE_PADDR_MASK,
MMU_L1_PGTABFLAGS);
}
/****************************************************************************
* Public Functions
****************************************************************************/
@ -120,8 +165,135 @@
int up_addrenv_create(size_t textsize, size_t datasize,
FAR group_addrenv_t *addrenv)
{
#warning Missing logic
return -ENOSYS;
irqstate_t flags;
uintptr_t vaddr;
uintptr_t paddr;
FAR uint32_t *l2table;
uint32_t l1save;
size_t nmapped;
unsigned int ntextpages;
unsigned int ndatapages;
unsigned int i;
unsigned int j;
int ret;
DEBUGASSERT(addrenv);
/* Initialize the address environment structure to all zeroes */
memset(addrenv, 0, sizeof(group_addrenv_t));
/* Verify that we are configured with enough virtual address space to
* support this address environment.
*/
ntextpages = MM_NPAGES(textsize);
ndatapages = MM_NPAGES(datasize);
if (ntextpages > CONFIG_ARCH_TEXT_NPAGES ||
ndatapages > CONFIG_ARCH_DATA_NPAGES)
{
return -E2BIG;
}
/* Back the allocation up with physical pages and set up the level mapping
* (which of course does nothing until the L2 page table is hooked into
* the L1 page table).
*/
/* Allocate .text space pages */
vaddr = CONFIG_ARCH_TEXT_VADDR;
mapped = 0;
for (i = 0; i < ntextpages; i++)
{
/* Allocate one physical page */
paddr = mm_pgalloc(1);
if (!paddr)
{
ret = -ENOMEM;
goto errout;
}
DEBUGASSERT(MM_ISALIGNED(paddr));
addrenv->text[i] = (FAR uint32_t *)paddr;
/* Temporarily map the page into the virtual address space */
flags = irqsave();
l1save = mmu_l1_getentry(vaddr);
set_l1_entry(ARCH_SCRATCH_VADDR, paddr);
l2table = (FAR uint32_t *)ARCH_SCRATCH_VADDR;
/* Initialize the page table */
memset(l2table, 0, ENTRIES_PER_L2TABLE * sizeof(uint32_t));
for (j = 0; j < ENTRIES_PER_L2TABLE && nmapped < ntextsize; j++)
{
set_l2_entry(l2table, paddr, vaddr, MMU_ROMFLAGS);
nmapped += MM_PGSIZE;
paddr += MM_PGSIZE;
vaddr += MM_PGSIZE;
}
/* Restore the original L1 page table entry */
mmu_l1_restore(ARCH_SCRATCH_VADDR, l1save);
irqrestore();
}
/* Allocate .bss/.data space pages */
vaddr = CONFIG_ARCH_DATA_VADDR;
mapped = 0;
for (i = 0; i < ndatapages; i++)
{
/* Allocate one physical page */
paddr = mm_pgalloc(1);
if (!paddr)
{
ret = -ENOMEM;
goto errout;
}
DEBUGASSERT(MM_ISALIGNED(paddr));
addrenv->data[i] = (FAR uint32_t *)paddr;
/* Temporarily map the page into the virtual address space */
flags = irqsave();
l1save = mmu_l1_getentry(vaddr);
set_l1_entry(ARCH_SCRATCH_VADDR, paddr);
l2table = (FAR uint32_t *)ARCH_SCRATCH_VADDR;
/* Initialize the page table */
memset(l2table, 0, ENTRIES_PER_L2TABLE * sizeof(uint32_t));
for (j = 0; j < ENTRIES_PER_L2TABLE && nmapped < ndatasize; j++)
{
set_l2_entry(l2table, paddr, vaddr, MMU_MEMFLAGS);
nmapped += MM_PGSIZE;
paddr += MM_PGSIZE;
vaddr += MM_PGSIZE;
}
/* Restore the original L1 page table entry */
mmu_l1_restore(ARCH_SCRATCH_VADDR, l1save);
irqrestore();
}
/* Notice that no pages are yet allocated for the heap */
return OK;
errout:
up_addrenv_destroy(addrenv);
return ret;
}
/****************************************************************************
@ -142,8 +314,48 @@ int up_addrenv_create(size_t textsize, size_t datasize,
int up_addrenv_destroy(group_addrenv_t addrenv)
{
#warning Missing logic
return -ENOSYS;
uintptr_t vaddr;
int i;
DEBUGASSERT(addrenv);
for (vaddr = CONFIG_ARCH_TEXT_VADDR, i = 0;
i < CONFIG_ARCH_TEXT_NPAGES;
vaddr += MM_PGSIZE, i++)
{
mmu_l1_clrentry(vaddr);
if (addrenv->text[i])
{
mm_pgfree((uintptr_t)addrenv->text[i], 1);
}
}
for (vaddr = CONFIG_ARCH_DATA_VADDR, i = 0;
i < CONFIG_ARCH_DATA_NPAGES;
vaddr += MM_PGSIZE, i++)
{
mmu_l1_clrentry(vaddr);
if (addrenv->data[i])
{
mm_pgfree((uintptr_t)addrenv->data[i], 1);
}
}
#if 0 /* Not yet implemented */
for (vaddr = CONFIG_ARCH_HEAP_VADDR, i = 0;
i < CONFIG_ARCH_HEAP_NPAGES;
vaddr += MM_PGSIZE, i++)
{
mmu_l1_clrentry(vaddr);
if (addrenv->heap[i])
{
mm_pgfree((uintptr_t)addrenv->heap[i], 1);
}
}
#endif
memset(addrenv, 0, sizeof(group_addrenv_t));
return OK;
}
/****************************************************************************
@ -232,8 +444,88 @@ int up_addrenv_vdata(FAR group_addrenv_t addrenv, uintptr_t textsize,
int up_addrenv_select(group_addrenv_t addrenv, save_addrenv_t *oldenv)
{
#warning Missing logic
return -ENOSYS;
uintptr_t vaddr;
uintptr_t paddr;
int i;
DEBUGASSERT(addrenv);
for (vaddr = CONFIG_ARCH_TEXT_VADDR, i = 0;
i < CONFIG_ARCH_TEXT_NPAGES;
vaddr += MM_PGSIZE, i++)
{
/* Save the old L1 page table entry */
if (oldenv)
{
oldenv->text[i] = mmu_l1_getentry(vaddr);
}
/* Set (or clear) the new page table entry */
paddr = (uintptr_t)addrenv->text[i]
if (paddr)
{
set_l1_entry(vaddr, paddr);
}
else
{
mmu_l1_clrentry(vaddr);
}
}
for (vaddr = CONFIG_ARCH_DATA_VADDR, i = 0;
i < CONFIG_ARCH_DATA_NPAGES;
vaddr += MM_PGSIZE, i++)
{
/* Save the old L1 page table entry */
if (oldenv)
{
oldenv->data[i] = mmu_l1_getentry(vaddr);
}
/* Set (or clear) the new page table entry */
paddr = (uintptr_t)addrenv->data[i]
if (paddr)
{
set_l1_entry(vaddr, paddr);
}
else
{
mmu_l1_clrentry(vaddr);
}
}
#if 0 /* Not yet implemented */
for (vaddr = CONFIG_ARCH_HEAP_VADDR, i = 0;
i < CONFIG_ARCH_HEAP_NPAGES;
vaddr += MM_PGSIZE, i++)
{
/* Save the old L1 page table entry */
if (oldenv)
{
oldenv->heap[i] = mmu_l1_getentry(vaddr);
}
/* Set (or clear) the new page table entry */
paddr = (uintptr_t)addrenv->heap[i]
if (paddr)
{
set_l1_entry(vaddr, paddr);
}
else
{
mmu_l1_clrentry(vaddr);
}
}
#endif
memset(addrenv, 0, sizeof(group_addrenv_t));
return OK;
}
/****************************************************************************
@ -255,8 +547,43 @@ int up_addrenv_select(group_addrenv_t addrenv, save_addrenv_t *oldenv)
int up_addrenv_restore(save_addrenv_t oldenv)
{
#warning Missing logic
return -ENOSYS;
uintptr_t vaddr;
uintptr_t paddr;
int i;
DEBUGASSERT(addrenv);
for (vaddr = CONFIG_ARCH_TEXT_VADDR, i = 0;
i < CONFIG_ARCH_TEXT_NPAGES;
vaddr += MM_PGSIZE, i++)
{
/* Restore the L1 page table entry */
mmu_l1_restore(vaddr, oldenv->text[i]);
}
for (vaddr = CONFIG_ARCH_DATA_VADDR, i = 0;
i < CONFIG_ARCH_DATA_NPAGES;
vaddr += MM_PGSIZE, i++)
{
/* Restore the L1 page table entry */
mmu_l1_restore(vaddr, oldenv->data[i]);
}
#if 0 /* Not yet implemented */
for (vaddr = CONFIG_ARCH_HEAP_VADDR, i = 0;
i < CONFIG_ARCH_HEAP_NPAGES;
vaddr += MM_PGSIZE, i++)
{
/* Restore the L1 page table entry */
mmu_l1_restore(vaddr, oldenv->heap[i]);
}
#endif
memset(addrenv, 0, sizeof(group_addrenv_t));
return OK;
}
/****************************************************************************
@ -280,7 +607,7 @@ int up_addrenv_assign(FAR const group_addrenv_t *addrenv,
{
DEBUGASSERT(addrenv && group);
/* Just copy the addess environment into the group */
/* Just copy the address environment into the group */
memcpy(&group->addrenv, addrenv, sizeof(group_addrenv_t));
return OK;

View File

@ -67,7 +67,7 @@
* Set a one level 1 translation table entry. Only a single L1 page table
* is supported.
*
* Input Paramters:
* Input Parameters:
* paddr - The physical address to be mapped. Must be aligned to a 1MB
* address boundary
* vaddr - The virtual address to be mapped. Must be aligned to a 1MB
@ -84,7 +84,7 @@ void mmu_l1_setentry(uint32_t paddr, uint32_t vaddr, uint32_t mmuflags)
/* Save the page table entry */
l1table[index] = (paddr | mmuflags);
l1table[index] = (paddr | mmuflags);
/* Flush the data cache entry. Make sure that the modified contents
* of the page table are flushed into physical memory.
@ -98,6 +98,41 @@ void mmu_l1_setentry(uint32_t paddr, uint32_t vaddr, uint32_t mmuflags)
}
#endif
/****************************************************************************
* Name: mmu_l1_restore
*
* Description:
* Restore one L1 table entry previously returned by mmu_l1_getentry() (or
* any other encoded L1 page table value).
*
* Input Parameters:
* vaddr - A virtual address to be mapped
* l1entry - The value to write into the page table entry
*
****************************************************************************/
#if !defined(CONFIG_ARCH_ROMPGTABLE) && defined(CONFIG_ARCH_ADDRENV)
void mmu_l1_restore(uint32ptr_t vaddr, uint32_t l1entry)
{
uint32_t *l1table = (uint32_t*)PGTABLE_BASE_VADDR;
uint32_t index = vaddr >> 20;
/* Set the encoded page table entry */
l1table[index] = l1entry;
/* Flush the data cache entry. Make sure that the modified contents
* of the page table are flushed into physical memory.
*/
cp15_clean_dcache_bymva((uint32_t)&l1table[index]);
/* Invalidate the TLB cache associated with virtual address range */
mmu_invalidate_region(vaddr & PMD_PTE_PADDR_MASK, 1024*1024);
}
#endif
/****************************************************************************
* Name: mmu_l2_setentry
*

View File

@ -405,6 +405,7 @@
#define PTE_LARGE_TEX_SHIFT (12) /* Bits 12-14: Memory region attribute bits */
#define PTE_LARGE_TEX_MASK (7 << PTE_LARGE_TEX_SHIFT)
#define PTE_LARGE_XN (1 << 15) /* Bit 15: Execute-never bit */
#define PTE_LARGE_FLAG_MASK (0x0000f03f) /* Bits 0-15: MMU flags (mostly) */
#define PTE_LARGE_PADDR_MASK (0xffff0000) /* Bits 16-31: Large page base address, PA[31:16] */
/* Small page -- 4Kb */
@ -413,6 +414,7 @@
/* Bit 2: Bufferable bit */
/* Bit 3: Cacheable bit */
/* Bits 4-5: Access Permissions bits AP[0:1] */
#define PTE_SMALL_FLAG_MASK (0x0000003f) /* Bits 0-11: MMU flags (mostly) */
#define PTE_SMALL_PADDR_MASK (0xfffff000) /* Bits 12-31: Small page base address, PA[31:12] */
/* Level 2 Translation Table Access Permissions:
@ -1336,6 +1338,39 @@ extern "C" {
void mmu_l1_setentry(uint32_t paddr, uint32_t vaddr, uint32_t mmuflags);
#endif
/****************************************************************************
* Name: mmu_l1_restore
*
* Description:
* Restore one L1 table entry previously returned by mmu_l1_getentry() (or
* any other encoded L1 page table value).
*
* Input Parameters:
* vaddr - A virtual address to be mapped
* l1entry - The value to write into the page table entry
*
****************************************************************************/
#if !defined(CONFIG_ARCH_ROMPGTABLE) && defined(CONFIG_ARCH_ADDRENV)
void mmu_l1_restore(uint32ptr_t vaddr, uint32_t l1entry);
#endif
/************************************************************************************
* Name: mmu_l1_clrentry(uint32ptr_t vaddr);
*
* Description:
* Unmap one L1 region by writing zero into the L1 page table entry and by
* flushing caches and TLBs appropriately.
*
* Input Parameters:
* vaddr - A virtual address within the L1 address region to be unmapped.
*
************************************************************************************/
#if !defined (CONFIG_ARCH_ROMPGTABLE) && defined(CONFIG_ARCH_ADDRENV)
# define mmu_l1_clrentry(v) mmu_l1_restore(v,0)
#endif
/************************************************************************************
* Name: mmu_l1_map_region
*

View File

@ -4475,7 +4475,7 @@ config SAMA5_DDRCS_HEAP
default y
depends on SAMA5_DDRCS && !SAMA5_BOOT_SDRAM
---help---
Include the DDR-SDRAM memory in the heap.
Include a portion of DDR-SDRAM memory in the heap.
NOTE: MM_REGIONS must also be set to indicate the total number of
memory regions to be added to the heap.
@ -4487,20 +4487,30 @@ config SAMA5_DDRCS_HEAP
if SAMA5_DDRCS_HEAP
config SAMA5_DDRCS_HEAP_OFFSET
int "DDR-SDRAM offset"
int "DDR-SDRAM heap offset"
default 0
---help---
Preserve this number of bytes at the beginning of SDRAM. The
portion of DRAM beginning at this offset from the DDRCS base will
be added to the heap.
NOTE: If you are using a page cache in DRAM (via SAMA5_DDRCS_PGHEAP),
then the memory regions defined by SAMA5_DDRCS_HEAP_OFFSET and
SAMA5_DDRCS_HEAP_SIZE must not overlap the memory region defined by
SAMA5_DDRCS_PGHEAP_OFFSET and SAMA5_DDRCS_PGHEAP_SIZE.
config SAMA5_DDRCS_HEAP_SIZE
int "DDR-SDRAM size"
int "DDR-SDRAM heap size"
default 0
---help---
Add the region of DDR-SDRAM beginning at SAMA5_DDRCS_HEAP_OFFSET
and of size SAMA5_DDRCS_HEAP_SIZE to the heap.
NOTE: If you are using a page cache in DRAM (via SAMA5_DDRCS_PGHEAP),
then the memory regions defined by SAMA5_DDRCS_HEAP_OFFSET and
SAMA5_DDRCS_HEAP_SIZE must not overlap the memory region defined by
SAMA5_DDRCS_PGHEAP_OFFSET and SAMA5_DDRCS_PGHEAP_SIZE.
endif # SAMA5_DDRCS_HEAP
config SAMA5_DDRCS_RESERVE
@ -4513,10 +4523,10 @@ config SAMA5_DDRCS_RESERVE
program through the end of DRAM (RAM_START + RAM_END) is
automatically added to the heap. However, there are certain cases
where you may want to reserve a block of DRAM for other purposes
such a large DMA buffer or an LCD framebuffer. In those cases, you
can select this option to specify the end of the DRAM memory to add
to the heap; DRAM after this address will not be part of the heap
and so will be available for other purposes.
such a large DMA buffer or an LCD framebuffer or a page cache. In
those cases, you can select this option to specify the end of the
DRAM memory to add to the heap; DRAM after this address will not
be part of the heap and so will be available for other purposes.
NOTE: There is way to reserve memory before the start of the
program in DRAM using this mechanism. That configuration is
@ -4533,8 +4543,61 @@ config SAMA5_DDRCS_HEAP_END
address (minus one). This will reserve the memory starting at
this address through RAM_SIZE + RAM_END for other purposes.
NOTE: If you are using a page cache in DRAM (via SAMA5_DDRCS_PGHEAP),
then the memory regions below by SAMA5_DDRCS_HEAP_END must not
overlap the memory region defined by SAMA5_DDRCS_PGHEAP_OFFSET and
SAMA5_DDRCS_PGHEAP_SIZE.
endif # SAMA5_DDRCS_RESERVE
config SAMA5_DDRCS_PGHEAP
bool "Include DDR-SDRAM in page cache"
default y
depends on SAMA5_DDRCS && ARCH_ADDRENV
---help---
Include a portion of DDR-SDRAM memory in the page cache.
if SAMA5_DDRCS_PGHEAP
config SAMA5_DDRCS_PGHEAP_OFFSET
int "DDR-SDRAM heap offset"
default 0
---help---
Preserve this number of bytes at the beginning of SDRAM. The
portion of DRAM beginning at this offset from the DDRCS base will
be added to the heap.
If you are executing from DRAM, then you must have already reserved
this region with SAMA5_DDRCS_RESERVE, setting SAMA5_DDRCS_HEAP_END
so that this page cache region defined by SAMA5_DDRCS_PGHEAP_OFFSET
and SAMA5_DDRCS_PGHEAP_SIZE does not overlap the the region of DRAM
that is added to the heap. If you are not executing from DRAM, then
you must have excluding this page cache region from the heap ether
by (1) not selecting SAMA5_DDRCS_HEAP, or (2) selecting
SAMA5_DDRCS_HEAP_OFFSET and SAMA5_DDRCS_HEAP_SIZE so that the page
cache region does not overlapy the region of DRAM that is added to
the heap.
config SAMA5_DDRCS_PGHEAP_SIZE
int "DDR-SDRAM heap size"
default 0
---help---
Add the region of DDR-SDRAM beginning at SAMA5_DDRCS_PGHEAP_OFFSET
and of size SAMA5_DDRCS_PGHEAP_SIZE to the heap.
If you are executing from DRAM, then you must have already reserved
this region with SAMA5_DDRCS_RESERVE, setting SAMA5_DDRCS_HEAP_END
so that this page cache region defined by SAMA5_DDRCS_PGHEAP_OFFSET
and SAMA5_DDRCS_PGHEAP_SIZE does not overlap the the region of DRAM
that is added to the heap. If you are not executing from DRAM, then
you must have excluding this page cache region from the heap ether
by (1) not selecting SAMA5_DDRCS_HEAP, or (2) selecting
SAMA5_DDRCS_HEAP_OFFSET and SAMA5_DDRCS_HEAP_SIZE so that the page
cache region does not overlapy the region of DRAM that is added to
the heap.
endif # SAMA5_DDRCS_PGHEAP
config SAMA5_EBICS0_HEAP
bool "Include EBICS0 SRAM/PSRAM in heap"
default y

View File

@ -113,6 +113,10 @@ CHIP_CSRCS += sam_sckc.c sam_serial.c
# Configuration dependent C and assembly language files
ifneq ($(CONFIG_MM_PGALLOC),y)
CHIP_CSRCS += sam_pgalloc.c
endif
ifneq ($(CONFIG_SCHED_TICKLESS),y)
CHIP_CSRCS += sam_timerisr.c
endif

View File

@ -0,0 +1,110 @@
/****************************************************************************
* arch/arm/src/sama5/sam_pgalloc.c
*
* Copyright (C) 2014 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sys/types.h>
#include <assert.h>
#include <nuttx/arch.h>
#include <nuttx/pgalloc.h>
#ifdef CONFIG_MM_PGALLOC
/****************************************************************************
* Private Definitions
****************************************************************************/
/* Currently, page cache memory must be allocated in DRAM. There are other
* possibilities, but the logic in this file will have to extended in order
* handle any other possibility.
*/
#ifdef CONFIG_SAMA5_DDRCS_PGHEAP
# error CONFIG_SAMA5_DDRCS_PGHEAP must be selected
#endif
#ifdef CONFIG_SAMA5_DDRCS_PGHEAP_OFFSET
# error CONFIG_SAMA5_DDRCS_PGHEAP_OFFSET must be specified
#endif
#if (CONFIG_SAMA5_DDRCS_PGHEAP_OFFSET & MM_PGMASK) != 0
# warning CONFIG_SAMA5_DDRCS_PGHEAP_OFFSET is not aligned to a page boundary
#endif
#ifdef CONFIG_SAMA5_DDRCS_PGHEAP_SIZE
# error CONFIG_SAMA5_DDRCS_PGHEAP_SIZE must be specified
#endif
#if (CONFIG_SAMA5_DDRCS_PGHEAP_SIZE & MM_PGMASK) != 0
# warning CONFIG_SAMA5_DDRCS_PGHEAP_SIZE is not aligned to a page boundary
#endif
/****************************************************************************
* Private Data
****************************************************************************/
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_allocate_pgheap
*
* Description:
* If there is a page allocator in the configuration, then this function
* must be provided by the platform-specific code. The OS initialization
* logic will call this function early in the initialization sequence to
* get the page heap information needed to configure the page allocator.
*
****************************************************************************/
void up_allocate_pgheap(FAR void **heap_start, size_t *heap_size)
{
DEBUGASSERT(heap_start && heap_size);
*heap_start = (FAR void *)((uintptr_t)SAM_DDRCS_VSECTION +
CONFIG_SAMA5_DDRCS_PGHEAP_OFFSET);
*heap_size = CONFIG_SAMA5_DDRCS_PGHEAP_SIZE;
}
#endif /* CONFIG_MM_PGALLOC */