From edbaed19f97c4c387428d7f73f7787d8c8153374 Mon Sep 17 00:00:00 2001 From: Gregory Nutt Date: Tue, 23 Sep 2014 13:19:30 -0600 Subject: [PATCH] Add logic necessary to handler remapping of shared memory on contex switches --- arch/arm/include/arch.h | 17 +++++++- arch/arm/src/armv7-a/addrenv.h | 2 +- arch/arm/src/armv7-a/arm_addrenv.c | 53 +++++++++++++++++++++-- arch/arm/src/armv7-a/arm_addrenv_ustack.c | 2 +- arch/arm/src/armv7-a/arm_addrenv_utils.c | 22 +++++++--- include/nuttx/addrenv.h | 4 ++ 6 files changed, 86 insertions(+), 14 deletions(-) diff --git a/arch/arm/include/arch.h b/arch/arm/include/arch.h index 5b962f3fc0..fee37f167b 100644 --- a/arch/arm/include/arch.h +++ b/arch/arm/include/arch.h @@ -48,6 +48,7 @@ #ifndef __ASSEMBLY__ # include # include +# include #endif /**************************************************************************** @@ -90,7 +91,7 @@ do { \ ); \ } while (0) -#endif +#endif /* CONFIG_PIC */ #ifdef CONFIG_ARCH_ADDRENV #if CONFIG_MM_PGSIZE != 4096 @@ -109,10 +110,14 @@ do { \ # define ARCH_DATA_NSECTS ARCH_PG2SECT(CONFIG_ARCH_DATA_NPAGES) # define ARCH_HEAP_NSECTS ARCH_PG2SECT(CONFIG_ARCH_HEAP_NPAGES) +# ifdef CONFIG_MM_SHM +# define ARCH_SHM_NSECTS ARCH_PG2SECT(ARCH_SHM_MAXPAGES) +# endif + # ifdef CONFIG_ARCH_STACK_DYNAMIC # define ARCH_STACK_NSECTS ARCH_PG2SECT(CONFIG_ARCH_STACK_NPAGES) # endif -#endif +#endif /* CONFIG_ARCH_ADDRENV */ /**************************************************************************** * Inline functions @@ -141,6 +146,9 @@ struct group_addrenv_s FAR uintptr_t *data[ARCH_DATA_NSECTS]; #ifdef CONFIG_BUILD_KERNEL FAR uintptr_t *heap[ARCH_HEAP_NSECTS]; +#ifdef CONFIG_MM_SHM + FAR uintptr_t *shm[ARCH_SHM_NSECTS]; +#endif /* Initial heap allocation (in bytes). This exists only provide an * indirect path for passing the size of the initial heap to the heap @@ -167,7 +175,12 @@ struct save_addrenv_s { FAR uint32_t text[ARCH_TEXT_NSECTS]; FAR uint32_t data[ARCH_DATA_NSECTS]; +#ifdef CONFIG_BUILD_KERNEL FAR uint32_t heap[ARCH_HEAP_NSECTS]; +#ifdef CONFIG_MM_SHM + FAR uint32_t shm[ARCH_SHM_NSECTS]; +#endif +#endif }; typedef struct save_addrenv_s save_addrenv_t; diff --git a/arch/arm/src/armv7-a/addrenv.h b/arch/arm/src/armv7-a/addrenv.h index 60cfb342a4..36d2cc7fe9 100644 --- a/arch/arm/src/armv7-a/addrenv.h +++ b/arch/arm/src/armv7-a/addrenv.h @@ -125,7 +125,7 @@ int arm_addrenv_create_region(FAR uintptr_t **list, unsigned int listlen, ****************************************************************************/ void arm_addrenv_destroy_region(FAR uintptr_t **list, unsigned int listlen, - uintptr_t vaddr); + uintptr_t vaddr, bool keep); #undef EXTERN #ifdef __cplusplus diff --git a/arch/arm/src/armv7-a/arm_addrenv.c b/arch/arm/src/armv7-a/arm_addrenv.c index 65a3308bea..3b0617dcfa 100644 --- a/arch/arm/src/armv7-a/arm_addrenv.c +++ b/arch/arm/src/armv7-a/arm_addrenv.c @@ -363,18 +363,26 @@ int up_addrenv_destroy(FAR group_addrenv_t *addrenv) /* Destroy the .text region */ arm_addrenv_destroy_region(addrenv->text, ARCH_TEXT_NSECTS, - CONFIG_ARCH_TEXT_VBASE); + CONFIG_ARCH_TEXT_VBASE, false); /* Destroy the .bss/.data region */ arm_addrenv_destroy_region(addrenv->data, ARCH_DATA_NSECTS, - CONFIG_ARCH_DATA_VBASE); + CONFIG_ARCH_DATA_VBASE, false); #ifdef CONFIG_BUILD_KERNEL /* Destroy the heap region */ arm_addrenv_destroy_region(addrenv->heap, ARCH_HEAP_NSECTS, - CONFIG_ARCH_HEAP_VBASE); + CONFIG_ARCH_HEAP_VBASE, false); +#ifdef CONFIG_MM_SHM + /* Destroy the shared memory region (without freeing the physical page + * data). + */ + + arm_addrenv_destroy_region(addrenv->heap, ARCH_SHM_NSECTS, + CONFIG_ARCH_SHM_VBASE, true); +#endif #endif memset(addrenv, 0, sizeof(group_addrenv_t)); @@ -578,6 +586,33 @@ int up_addrenv_select(FAR const group_addrenv_t *addrenv, mmu_l1_clrentry(vaddr); } } + +#ifdef CONFIG_MM_SHM + for (vaddr = CONFIG_ARCH_SHM_VBASE, i = 0; + i < ARCH_SHM_NSECTS; + vaddr += SECTION_SIZE, i++) + { + /* Save the old L1 page table entry */ + + if (oldenv) + { + oldenv->shm[i] = mmu_l1_getentry(vaddr); + } + + /* Set (or clear) the new page table entry */ + + paddr = (uintptr_t)addrenv->shm[i]; + if (paddr) + { + mmu_l1_setentry(paddr, vaddr, MMU_L1_PGTABFLAGS); + } + else + { + mmu_l1_clrentry(vaddr); + } + } + +#endif #endif return OK; @@ -635,6 +670,18 @@ int up_addrenv_restore(FAR const save_addrenv_t *oldenv) mmu_l1_restore(vaddr, oldenv->heap[i]); } + +#ifdef CONFIG_MM_SHM + for (vaddr = CONFIG_ARCH_SHM_VBASE, i = 0; + i < ARCH_SHM_NSECTS; + vaddr += SECTION_SIZE, i++) + { + /* Restore the L1 page table entry */ + + mmu_l1_restore(vaddr, oldenv->shm[i]); + } + +#endif #endif return OK; diff --git a/arch/arm/src/armv7-a/arm_addrenv_ustack.c b/arch/arm/src/armv7-a/arm_addrenv_ustack.c index 02bc18ceaa..97da35992f 100644 --- a/arch/arm/src/armv7-a/arm_addrenv_ustack.c +++ b/arch/arm/src/armv7-a/arm_addrenv_ustack.c @@ -204,7 +204,7 @@ int up_addrenv_ustackfree(FAR struct tcb_s *tcb) /* Destroy the stack region */ arm_addrenv_destroy_region(tcb->xcp.ustack, ARCH_STACK_NSECTS, - CONFIG_ARCH_STACK_VBASE); + CONFIG_ARCH_STACK_VBASE, false); memset(tcb->xcp.ustack, 0, ARCH_STACK_NSECTS * sizeof(uintptr_t *)); return OK; diff --git a/arch/arm/src/armv7-a/arm_addrenv_utils.c b/arch/arm/src/armv7-a/arm_addrenv_utils.c index fbcdaa70c3..7f0d415f2c 100644 --- a/arch/arm/src/armv7-a/arm_addrenv_utils.c +++ b/arch/arm/src/armv7-a/arm_addrenv_utils.c @@ -228,7 +228,7 @@ int arm_addrenv_create_region(FAR uintptr_t **list, unsigned int listlen, ****************************************************************************/ void arm_addrenv_destroy_region(FAR uintptr_t **list, unsigned int listlen, - uintptr_t vaddr) + uintptr_t vaddr, bool keep) { irqstate_t flags; uintptr_t paddr; @@ -266,15 +266,23 @@ void arm_addrenv_destroy_region(FAR uintptr_t **list, unsigned int listlen, l2table = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK)); #endif - /* Return the allocated pages to the page allocator */ + /* Return the allocated pages to the page allocator unless we were + * asked to keep the page data. We keep the page data only for + * the case of shared memory. In that case, we need to tear down + * the mapping and page table entries, but keep the raw page data + * will still may be mapped by other user processes. + */ - for (j = 0; j < ENTRIES_PER_L2TABLE; j++) + if (!keep) { - paddr = *l2table++; - if (paddr != 0) + for (j = 0; j < ENTRIES_PER_L2TABLE; j++) { - paddr &= PTE_SMALL_PADDR_MASK; - mm_pgfree(paddr, 1); + paddr = *l2table++; + if (paddr != 0) + { + paddr &= PTE_SMALL_PADDR_MASK; + mm_pgfree(paddr, 1); + } } } diff --git a/include/nuttx/addrenv.h b/include/nuttx/addrenv.h index 586afbf7db..4aaa2d550e 100644 --- a/include/nuttx/addrenv.h +++ b/include/nuttx/addrenv.h @@ -197,6 +197,7 @@ # define CONFIG_ARCH_SHM_NPAGES 1 # endif +# define ARCH_SHM_MAXPAGES (CONFIG_ARCH_SHM_NPAGES * CONFIG_ARCH_SHM_MAXREGIONS) # define ARCH_SHM_REGIONSIZE (CONFIG_ARCH_SHM_NPAGES * CONFIG_MM_PGSIZE) # define ARCH_SHM_SIZE (CONFIG_ARCH_SHM_MAXREGIONS * ARCH_SHM_REGIONSIZE) # define ARCH_SHM_VEND (CONFIG_ARCH_SHM_VBASE + ARCH_SHM_SIZE) @@ -235,6 +236,8 @@ * Public Types ****************************************************************************/ +#ifndef __ASSEMBLY__ + /* Reserved .bss/.data region. In the kernel build (CONFIG_BUILD_KERNEL), * the region at the beginning of the .bss/.data region is reserved for use * by the OS. This reserved region contains support for: @@ -353,5 +356,6 @@ struct addrenv_reserve_s /* Prototyped in include/nuttx/arch.h as part of the OS/platform interface */ +#endif /* __ASSEMBLY__ */ #endif /* CONFIG_ARCH_ADDRENV */ #endif /* __INCLUDE_NUTTX_ADDRENV_H */