Add logic necessary to handler remapping of shared memory on contex switches

This commit is contained in:
Gregory Nutt 2014-09-23 13:19:30 -06:00
parent c3c185131c
commit edbaed19f9
6 changed files with 86 additions and 14 deletions

View File

@ -48,6 +48,7 @@
#ifndef __ASSEMBLY__
# include <stdint.h>
# include <nuttx/pgalloc.h>
# include <nuttx/addrenv.h>
#endif
/****************************************************************************
@ -90,7 +91,7 @@ do { \
); \
} while (0)
#endif
#endif /* CONFIG_PIC */
#ifdef CONFIG_ARCH_ADDRENV
#if CONFIG_MM_PGSIZE != 4096
@ -109,10 +110,14 @@ do { \
# define ARCH_DATA_NSECTS ARCH_PG2SECT(CONFIG_ARCH_DATA_NPAGES)
# define ARCH_HEAP_NSECTS ARCH_PG2SECT(CONFIG_ARCH_HEAP_NPAGES)
# ifdef CONFIG_MM_SHM
# define ARCH_SHM_NSECTS ARCH_PG2SECT(ARCH_SHM_MAXPAGES)
# endif
# ifdef CONFIG_ARCH_STACK_DYNAMIC
# define ARCH_STACK_NSECTS ARCH_PG2SECT(CONFIG_ARCH_STACK_NPAGES)
# endif
#endif
#endif /* CONFIG_ARCH_ADDRENV */
/****************************************************************************
* Inline functions
@ -141,6 +146,9 @@ struct group_addrenv_s
FAR uintptr_t *data[ARCH_DATA_NSECTS];
#ifdef CONFIG_BUILD_KERNEL
FAR uintptr_t *heap[ARCH_HEAP_NSECTS];
#ifdef CONFIG_MM_SHM
FAR uintptr_t *shm[ARCH_SHM_NSECTS];
#endif
/* Initial heap allocation (in bytes). This exists only provide an
* indirect path for passing the size of the initial heap to the heap
@ -167,7 +175,12 @@ struct save_addrenv_s
{
FAR uint32_t text[ARCH_TEXT_NSECTS];
FAR uint32_t data[ARCH_DATA_NSECTS];
#ifdef CONFIG_BUILD_KERNEL
FAR uint32_t heap[ARCH_HEAP_NSECTS];
#ifdef CONFIG_MM_SHM
FAR uint32_t shm[ARCH_SHM_NSECTS];
#endif
#endif
};
typedef struct save_addrenv_s save_addrenv_t;

View File

@ -125,7 +125,7 @@ int arm_addrenv_create_region(FAR uintptr_t **list, unsigned int listlen,
****************************************************************************/
void arm_addrenv_destroy_region(FAR uintptr_t **list, unsigned int listlen,
uintptr_t vaddr);
uintptr_t vaddr, bool keep);
#undef EXTERN
#ifdef __cplusplus

View File

@ -363,18 +363,26 @@ int up_addrenv_destroy(FAR group_addrenv_t *addrenv)
/* Destroy the .text region */
arm_addrenv_destroy_region(addrenv->text, ARCH_TEXT_NSECTS,
CONFIG_ARCH_TEXT_VBASE);
CONFIG_ARCH_TEXT_VBASE, false);
/* Destroy the .bss/.data region */
arm_addrenv_destroy_region(addrenv->data, ARCH_DATA_NSECTS,
CONFIG_ARCH_DATA_VBASE);
CONFIG_ARCH_DATA_VBASE, false);
#ifdef CONFIG_BUILD_KERNEL
/* Destroy the heap region */
arm_addrenv_destroy_region(addrenv->heap, ARCH_HEAP_NSECTS,
CONFIG_ARCH_HEAP_VBASE);
CONFIG_ARCH_HEAP_VBASE, false);
#ifdef CONFIG_MM_SHM
/* Destroy the shared memory region (without freeing the physical page
* data).
*/
arm_addrenv_destroy_region(addrenv->heap, ARCH_SHM_NSECTS,
CONFIG_ARCH_SHM_VBASE, true);
#endif
#endif
memset(addrenv, 0, sizeof(group_addrenv_t));
@ -578,6 +586,33 @@ int up_addrenv_select(FAR const group_addrenv_t *addrenv,
mmu_l1_clrentry(vaddr);
}
}
#ifdef CONFIG_MM_SHM
for (vaddr = CONFIG_ARCH_SHM_VBASE, i = 0;
i < ARCH_SHM_NSECTS;
vaddr += SECTION_SIZE, i++)
{
/* Save the old L1 page table entry */
if (oldenv)
{
oldenv->shm[i] = mmu_l1_getentry(vaddr);
}
/* Set (or clear) the new page table entry */
paddr = (uintptr_t)addrenv->shm[i];
if (paddr)
{
mmu_l1_setentry(paddr, vaddr, MMU_L1_PGTABFLAGS);
}
else
{
mmu_l1_clrentry(vaddr);
}
}
#endif
#endif
return OK;
@ -635,6 +670,18 @@ int up_addrenv_restore(FAR const save_addrenv_t *oldenv)
mmu_l1_restore(vaddr, oldenv->heap[i]);
}
#ifdef CONFIG_MM_SHM
for (vaddr = CONFIG_ARCH_SHM_VBASE, i = 0;
i < ARCH_SHM_NSECTS;
vaddr += SECTION_SIZE, i++)
{
/* Restore the L1 page table entry */
mmu_l1_restore(vaddr, oldenv->shm[i]);
}
#endif
#endif
return OK;

View File

@ -204,7 +204,7 @@ int up_addrenv_ustackfree(FAR struct tcb_s *tcb)
/* Destroy the stack region */
arm_addrenv_destroy_region(tcb->xcp.ustack, ARCH_STACK_NSECTS,
CONFIG_ARCH_STACK_VBASE);
CONFIG_ARCH_STACK_VBASE, false);
memset(tcb->xcp.ustack, 0, ARCH_STACK_NSECTS * sizeof(uintptr_t *));
return OK;

View File

@ -228,7 +228,7 @@ int arm_addrenv_create_region(FAR uintptr_t **list, unsigned int listlen,
****************************************************************************/
void arm_addrenv_destroy_region(FAR uintptr_t **list, unsigned int listlen,
uintptr_t vaddr)
uintptr_t vaddr, bool keep)
{
irqstate_t flags;
uintptr_t paddr;
@ -266,15 +266,23 @@ void arm_addrenv_destroy_region(FAR uintptr_t **list, unsigned int listlen,
l2table = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
#endif
/* Return the allocated pages to the page allocator */
/* Return the allocated pages to the page allocator unless we were
* asked to keep the page data. We keep the page data only for
* the case of shared memory. In that case, we need to tear down
* the mapping and page table entries, but keep the raw page data
* will still may be mapped by other user processes.
*/
for (j = 0; j < ENTRIES_PER_L2TABLE; j++)
if (!keep)
{
paddr = *l2table++;
if (paddr != 0)
for (j = 0; j < ENTRIES_PER_L2TABLE; j++)
{
paddr &= PTE_SMALL_PADDR_MASK;
mm_pgfree(paddr, 1);
paddr = *l2table++;
if (paddr != 0)
{
paddr &= PTE_SMALL_PADDR_MASK;
mm_pgfree(paddr, 1);
}
}
}

View File

@ -197,6 +197,7 @@
# define CONFIG_ARCH_SHM_NPAGES 1
# endif
# define ARCH_SHM_MAXPAGES (CONFIG_ARCH_SHM_NPAGES * CONFIG_ARCH_SHM_MAXREGIONS)
# define ARCH_SHM_REGIONSIZE (CONFIG_ARCH_SHM_NPAGES * CONFIG_MM_PGSIZE)
# define ARCH_SHM_SIZE (CONFIG_ARCH_SHM_MAXREGIONS * ARCH_SHM_REGIONSIZE)
# define ARCH_SHM_VEND (CONFIG_ARCH_SHM_VBASE + ARCH_SHM_SIZE)
@ -235,6 +236,8 @@
* Public Types
****************************************************************************/
#ifndef __ASSEMBLY__
/* Reserved .bss/.data region. In the kernel build (CONFIG_BUILD_KERNEL),
* the region at the beginning of the .bss/.data region is reserved for use
* by the OS. This reserved region contains support for:
@ -353,5 +356,6 @@ struct addrenv_reserve_s
/* Prototyped in include/nuttx/arch.h as part of the OS/platform interface */
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_ARCH_ADDRENV */
#endif /* __INCLUDE_NUTTX_ADDRENV_H */