Fix issues of virtual vs physical sizes of regions

git-svn-id: svn://svn.code.sf.net/p/nuttx/code/trunk@2875 42af7a65-404d-4744-a932-0658087f49c3
This commit is contained in:
patacongo 2010-08-21 21:53:41 +00:00
parent 3f4da59045
commit 11dc28963d
7 changed files with 194 additions and 61 deletions

View File

@ -73,6 +73,10 @@
# define PTE_NPAGES PTE_TINY_NPAGES
/* Mask to get the page table physical address from an L1 entry */
# define PG_L1_PADDRMASK PMD_FINE_TEX_MASK
/* L2 Page table address */
# define PG_L2_BASE_PADDR PGTABLE_FINE_BASE_PADDR
@ -93,6 +97,10 @@
# define PTE_NPAGES PTE_SMALL_NPAGES
/* Mask to get the page table physical address from an L1 entry */
# define PG_L1_PADDRMASK PMD_COARSE_TEX_MASK
/* L2 Page table address */
# define PG_L2_BASE_PADDR PGTABLE_COARSE_BASE_PADDR
@ -127,7 +135,7 @@
#define PG_L2_PAGED_PADDR (PG_L2_BASE_PADDR + PG_L2_LOCKED_SIZE)
#define PG_L2_PAGED_VADDR (PG_L2_BASE_VADDR + PG_L2_LOCKED_SIZE)
#define PG_L2_PAGED_SIZE (4*CONFIG_PAGING_NPAGED)
#define PG_L2_PAGED_SIZE (4*CONFIG_PAGING_NPPAGED)
/* This describes the overall text region */
@ -202,23 +210,25 @@
/* This is the total number of pages used in the text/data mapping: */
#define PG_TOTAL_NPAGES (PG_TEXT_NPAGES + PG_DATA_PAGES + PG_PGTABLE_NPAGES)
#if PG_TOTAL_NPAGES >PG_RAM_PAGES
#define PG_TOTAL_NPPAGES (PG_TEXT_NPPAGES + PG_DATA_PAGES + PG_PGTABLE_NPAGES)
#define PG_TOTAL_NVPAGES (PG_TEXT_NVPAGES + PG_DATA_PAGES + PG_PGTABLE_NPAGES)
#if PG_TOTAL_NPPAGES >PG_RAM_PAGES
# error "Total pages required exceeds RAM size"
#endif
/* For page managment purposes, the following summarize the "heap" of
* free pages, operations on free pages and the L2 page table.
*
* PG_POOL_L2NDX(va) - Converts a virtual address in the paged SRAM
* region into a index into the paged region of
* the L2 page table.
* PG_POOL_L2OFFSET(va) - Converts a virtual address in the paged SRAM
* region into a byte offset into the paged
* region of the L2 page table.
* PG_POOL_L2VADDR(va) - Converts a virtual address in the paged SRAM
* region into the virtual address of the
* corresponding PTE entry.
* PG_POOL_VA2L1OFFSET(va) - Given a virtual address, return the L1 table
* offset (in bytes).
* PG_POOL_VA2L1VADDR(va) - Given a virtual address, return the virtual
* address of the L1 table entry
* PG_POOL_L12PPTABLE(L1) - Given the value of an L1 table entry return
* the physical address of the start of the L2
* page table
* PG_POOL_L12PPTABLE(L1) - Given the value of an L1 table entry return
* the virtual address of the start of the L2
* page table.
*
* PG_POOL_L1VBASE - The virtual address of the start of the L1
* page table range corresponding to the first
@ -239,22 +249,12 @@
* text region (the address at the beginning of
* the page).
* PG_POOL_MAXL2NDX - This is the maximum value+1 of such an index.
* PG_POOL_NDX2L2VADDR(ndx) - Converts an index to the corresponding address
* in the L1 page table entry.
* PG_POOL_VA2L2VADDR(va) - Converts a virtual address within the paged
* text region to the corresponding address in
* the L2 page table entry.
*
* PG_POOL_PGPADDR(ndx) - Converts an index into the corresponding
*
* PG_POOL_PGPADDR(ndx) - Converts an page index into the corresponding
* (physical) address of the backing page memory.
* PG_POOL_PGVADDR(ndx) - Converts an index into the corresponding
* PG_POOL_PGVADDR(ndx) - Converts an page index into the corresponding
* (virtual)address of the backing page memory.
*
* PG_POOL_VIRT2PHYS(va) - Convert a virtual address within the paged
* text region into a physical address.
* PG_POOL_PHYS2VIRT(va) - Convert a physical address within the paged
* text region into a virtual address.
*
* These are used as follows: If a miss occurs at some virtual address, va,
* A new page index, ndx, is allocated. PG_POOL_PGPADDR(i) converts the index
* into the physical address of the page memory; PG_POOL_L2VADDR(va) converts
@ -262,25 +262,21 @@
* written.
*/
#define PG_POOL_L2NDX(va) ((va) - PG_PAGED_VBASE) >> PAGESHIFT)
#define PG_POOL_L2OFFSET(va) (PG_POOL_L2NDX(va) << 2)
#define PG_POOL_L2VADDR(va) (PG_L2_PAGED_VADDR + PG_POOL_L2OFFSET(va))
#define PG_POOL_VA2L1OFFSET(va) (((va) >> 20) << 2)
#define PG_POOL_VA2L1VADDR(va) (PGTABLE_BASE_VADDR + PG_POOL_VA2L1OFFSET(va))
#define PG_POOL_L12PPTABLE(L1) ((L1) & PG_L1_PADDRMASK)
#define PG_POOL_L12VPTABLE(L1) (PG_POOL_L12PPTABLE(L1) - PGTABLE_BASE_PADDR + PGTABLE_BASE_VADDR)
#define PG_POOL_L1VBASE (PGTABLE_BASE_VADDR + ((PG_PAGED_VBASE >> 20) << 2))
#define PG_POOL_L1VEND (PG_POOL_L1VBASE + (CONFIG_PAGING_NPAGED << 2))
#define PG_POOL_L1VEND (PG_POOL_L1VBASE + (CONFIG_PAGING_NVPAGED << 2))
#define PG_POOL_VA2L2NDX(va) (((va) - PG_PAGED_VBASE) >> PAGESHIFT)
#define PG_POOL_NDX2VA(ndx) (((ndx) << PAGESHIFT) + PG_PAGED_VBASE)
#define PG_POOL_MAXL2NDX PG_POOL_VA2L2NDX(PG_PAGED_VEND)
#define PG_POOL_NDX2L2VADDR(ndx) (PG_L2_PAGED_VADDR + ((ndx) << 2))
#define PG_POOL_VA2L2VADDR(va) PG_POOL_NDX2L2VADDR(PG_POOL_VA2L2NDX(va))
#define PG_POOL_PGPADDR(ndx) (PG_PAGED_PBASE + ((ndx) << PAGESHIFT))
#define PG_POOL_PGVADDR(ndx) (PG_PAGED_VBASE + ((ndx) << PAGESHIFT))
#define PG_POOL_VIRT2PHYS(va) ((va) + (PG_PAGED_PBASE - PG_PAGED_VBASE))
#define PG_POOL_PHYS2VIRT(pa) ((pa) + (PG_PAGED_VBASE - PG_PAGED_PBASE))
#endif /* CONFIG_PAGING */
/****************************************************************************

View File

@ -60,9 +60,9 @@
* Private Types
****************************************************************************/
#if CONFIG_PAGING_NPAGED < 256
#if CONFIG_PAGING_NPPAGED < 256
typedef uint8_t pgndx_t;
#elif CONFIG_PAGING_NPAGED < 65536
#elif CONFIG_PAGING_NPPAGED < 65536
typedef uint16_t pgndx_t;
#else
typedef uint32_t pgndx_t;
@ -98,7 +98,7 @@ static pgndx_t g_pgndx;
* another index to the mapped virtual page.
*/
static L1ndx_t g_ptemap[CONFIG_PAGING_NPAGED];
static L1ndx_t g_ptemap[CONFIG_PAGING_NPPAGED];
/* The contents of g_ptemap[] are not valid until g_pgndx has wrapped at
* least one time.
@ -162,7 +162,6 @@ int up_allocpage(FAR _TCB *tcb, FAR void **vpage)
uintptr_t paddr;
uint32_t *pte;
unsigned int pgndx;
unsigned int l2ndx;
/* Since interrupts are disabled, we don't need to anything special. */
@ -173,13 +172,6 @@ int up_allocpage(FAR _TCB *tcb, FAR void **vpage)
vaddr = tcb->xcp.far;
DEBUGASSERT(vaddr >= PG_PAGED_VBASE && vaddr < PG_PAGED_VEND);
/* Verify that this virtual address was previously unmapped */
#if CONFIG_DEBUG
pte = (uint32_t*)PG_POOL_L2VADDR(vaddr);
DEBUGASSERT(*pte == 0);
#endif
/* Allocate page memory to back up the mapping. Start by getting the
* index of the next page that we are going to allocate.
*/
@ -191,10 +183,6 @@ int up_allocpage(FAR _TCB *tcb, FAR void **vpage)
g_pgwrap = true;
}
/* Then convert the index to a (physical) page address. */
paddr = PG_POOL_PGPADDR(pgndx);
/* Was this physical page previously mapped? If so, then we need to un-map
* it.
*/
@ -205,27 +193,35 @@ int up_allocpage(FAR _TCB *tcb, FAR void **vpage)
* mapping -- then zero it!
*/
l2ndx = g_ptemap[pgndx];
pte = (uint32_t*)PG_POOL_NDX2L2VADDR(l2ndx);
uintptr_t oldvaddr = PG_POOL_NDX2VA(g_ptemap[pgndx]);
pte = up_va2pte(oldvaddr);
*pte = 0;
/* Invalidate the TLB corresponding to the virtual address */
/* Invalidate the instruction TLB corresponding to the virtual address */
tlb_inst_invalidate_single(PG_POOL_NDX2VA(l2ndx))
tlb_inst_invalidate_single(oldvaddr)
}
/* Then convert the index to a (physical) page address. */
paddr = PG_POOL_PGPADDR(pgndx);
/* Now setup up the new mapping. Get a pointer to the L2 entry
* corresponding to the new mapping. Then set it map to the newly
* allocated page address.
*/
pte = (uint32_t*)PG_POOL_VA2L2VADDR(va)
pte = up_va2pte(vaddr);
*pte = (paddr | MMU_L2_TEXTFLAGS);
/* Finally, return the virtual address of allocated page */
/* And save the new L1 index */
g_ptemap[pgndx] = PG_POOL_VA2L2NDX(vaddr);
*vpage = (void*)PG_POOL_PHYS2VIRT(paddr);
return OK;
/* Finally, return the virtual address of allocated page */
*vpage = (void*)(vaddr & ~PAGEMASK);
return OK;
}
#endif /* CONFIG_PAGING */

View File

@ -41,11 +41,14 @@
#include <nuttx/config.h>
#include <stdint.h>
#include <debug.h>
#include <nuttx/sched.h>
#include <nuttx/page.h>
#include "up_internal.h"
#ifdef CONFIG_PAGING
/****************************************************************************
@ -96,8 +99,25 @@
bool up_checkmapping(FAR _TCB *tcb)
{
# warning "Not implemented"
return false;
uintptr_t vaddr;
uint32_t *te;
/* Since interrupts are disabled, we don't need to anything special. */
DEBUGASSERT(tcb);
/* Get the virtual address that caused the fault */
vaddr = tcb->xcp.far;
DEBUGASSERT(vaddr >= PG_PAGED_VBASE && vaddr < PG_PAGED_VEND);
/* Get the PTE associated with this virtual address */
pte = up_va2pte(vaddr);
/* Return true if this virtual address is mapped. */
return (*pte != 0);
}
#endif /* CONFIG_PAGING */

View File

@ -352,7 +352,7 @@ __start:
.Ltxtspan:
.long PG_L2_TEXT_PADDR /* Physical address of L2 table */
.long PG_TEXT_VBASE /* Virtual address of text base */
.long PG_TEXT_NPAGES /* Total mapped text pages */
.long PG_TEXT_NVPAGES /* Total virtual text pages to be mapped */
.long MMU_L1_TEXTFLAGS /* L1 MMU flags to use */
.Ltxtmap:

120
arch/arm/src/arm/up_va2pte.c Executable file
View File

@ -0,0 +1,120 @@
/****************************************************************************
* arch/arm/src/arm/up_va2pte.c
* Utility to map a virtual address to a L2 page table entry.
*
* Copyright (C) 2010 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <spudmonkey@racsa.co.cr>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <debug.h>
#include <nuttx/sched.h>
#include <nuttx/page.h>
#include "pg_macros.h"
#include "up_internal.h"
#ifdef CONFIG_PAGING
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* Private Data
****************************************************************************/
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_va2pte()
*
* Description:
* Convert a virtual address within the paged text region into a pointer to
* the corresponding page table entry.
*
* Input Parameters:
* vaddr - The virtual address within the paged text region.
*
* Returned Value:
* A pointer to the corresponding page table entry.
*
* Assumptions:
* - This function is called from the normal tasking context (but with
* interrupts disabled). The implementation must take whatever actions
* are necessary to assure that the operation is safe within this
* context.
*
****************************************************************************/
uint32_t *up_va2pte(uintptr_t vaddr);
{
uint32_t L1;
uint32_t *L2;
unsigned int ndx;
/* The virtual address is expected to lie in the paged text region */
DEBUGASSERT(vaddr >= PG_PAGED_VBASE && vaddr < PG_PAGED_VEND);
/* Get the L1 table entry associated with this virtual address */
L1 = *(uint32_t*)PG_POOL_VA2L1VADDR(vaddr);
/* Get the address of the L2 page table from the L1 entry */
L2 = (uint32_t*)PG_POOL_L12VPTABLE(L1);
/* Get the index into the L2 page table. Each L1 entry maps
* 256 x 4Kb or 1024 x 1Kb pages.
*/
ndx = ((vaddr & 0x000fffff) >> PAGESHIFT;
/* Return true if this virtual address is mapped. */
return &L2[ndx];
}
#endif /* CONFIG_PAGING */

View File

@ -187,6 +187,7 @@ extern int up_hardfault(int irq, FAR void *context);
extern void up_doirq(int irq, uint32_t *regs);
#ifdef CONFIG_PAGING
extern void up_pginitialize(void);
extern uint32_t *up_va2pte(uintptr_t vaddr);
extern void up_dataabort(uint32_t *regs, uint32_t far, uint32_t fsr);
#else /* CONFIG_PAGING */
# define up_pginitialize()

View File

@ -47,7 +47,7 @@ CMN_CSRCS = up_assert.c up_blocktask.c up_copystate.c up_createstack.c \
up_undefinedinsn.c up_usestack.c
ifeq ($(CONFIG_PAGING),y)
CMN_CSRCS += up_pginitialize.c up_checkmapping.c up_allocpage.c
CMN_CSRCS += up_pginitialize.c up_checkmapping.c up_allocpage.c up_va1pte.c
endif
CGU_ASRCS =