From f2195a16b218b8c48a8843488d3676dfc9b0dd69 Mon Sep 17 00:00:00 2001 From: Gregory Nutt Date: Thu, 1 Aug 2013 07:41:00 -0600 Subject: [PATCH] ARMv7-A: Separate CONFIG_PAGING start-up logic into a different startup file. Too much conditional compilation. --- ChangeLog | 6 + arch/arm/src/armv7-a/arm_head.S | 152 +------ arch/arm/src/armv7-a/arm_pghead.S | 719 ++++++++++++++++++++++++++++++ arch/arm/src/sama5/Make.defs | 25 +- 4 files changed, 756 insertions(+), 146 deletions(-) create mode 100644 arch/arm/src/armv7-a/arm_pghead.S diff --git a/ChangeLog b/ChangeLog index 3c39bec52f..6ec0b18198 100644 --- a/ChangeLog +++ b/ChangeLog @@ -5243,3 +5243,9 @@ (2013-7-31). 6.30 2013-xx-xx Gregory Nutt + + * arch/arm/src/sama5/arm_pghead.S: Separate the logic that is + enabled by CONFIG_PAGING out of arm_head.S. That was just + too much conditional compilation to be supportable + (2013-8-1). + diff --git a/arch/arm/src/armv7-a/arm_head.S b/arch/arm/src/armv7-a/arm_head.S index 5eebbdb48a..ec3a33d563 100644 --- a/arch/arm/src/armv7-a/arm_head.S +++ b/arch/arm/src/armv7-a/arm_head.S @@ -39,11 +39,6 @@ #include -#ifdef CONFIG_PAGING -# include -# include "pg_macros.h" -#endif - #include #include "arm.h" @@ -52,6 +47,8 @@ #include "mmu.h" #include "chip.h" + .file "arm_head.S" + /********************************************************************************** * Configuration **********************************************************************************/ @@ -139,25 +136,6 @@ #endif -/* For each page table offset, the following provide (1) the physical address of - * the start of the page table and (2) the number of page table entries in the - * first page table. - * - * Coarse: PG_L1_PADDRMASK=0xfffffc00 - * NPAGE1=(256 -((a) & 0x000003ff) >> 2) NPAGE1=1-256 - * Fine: PG_L1_PADDRMASK=0xfffff000 - * NPAGE1=(1024 -((a) & 0x00000fff) >> 2) NPAGE1=1-1024 - */ - -#ifdef CONFIG_PAGING -# define PG_L2_TEXT_PBASE (PG_L2_TEXT_PADDR & PG_L1_PADDRMASK) -# define PG_L2_TEXT_NPAGE1 (PTE_NPAGES - ((PG_L2_TEXT_PADDR & ~PG_L1_PADDRMASK) >> 2)) -# define PG_L2_PGTABLE_PBASE (PG_L2_PGTABLE_PADDR & PG_L1_PADDRMASK) -# define PG_L2_PGTABLE_NPAGE1 (PTE_NPAGES - ((PG_L2_PGTABLE_PADDR & ~PG_L1_PADDRMASK) >> 2)) -# define PG_L2_DATA_PBASE (PG_L2_DATA_PADDR & PG_L1_PADDRMASK) -# define PG_L2_DATA_NPAGE1 (PTE_NPAGES - ((PG_L2_DATA_PADDR & ~PG_L1_PADDRMASK) >> 2)) -#endif - /**************************************************************************** * Definitions ****************************************************************************/ @@ -256,47 +234,6 @@ __start: str r3, [r4, r0, lsr #18] /* identity mapping */ #endif -#ifdef CONFIG_PAGING - - /* Map the read-only .text region in place. This must be done - * before the MMU is enabled and the virtual addressing takes - * effect. First populate the L1 table for the locked and paged - * text regions. - * - * We could probably make the the pg_l1span and pg_l2map macros into - * call-able subroutines, but we would have to be carefully during - * this phase while we are operating in a physical address space. - * - * NOTE: That the value of r5 (L1 table base address) must be - * preserved through the following. - */ - - adr r0, .Ltxtspan - ldmia r0, {r0, r1, r2, r3, r5} - pg_l1span r0, r1, r2, r3, r5, r6 - - /* Then populate the L2 table for the locked text region only. */ - - adr r0, .Ltxtmap - ldmia r0, {r0, r1, r2, r3} - pg_l2map r0, r1, r2, r3, r5 - - /* Make sure that the page table is itself mapped and and read/write-able. - * First, populate the L1 table: - */ - - adr r0, .Lptabspan - ldmia r0, {r0, r1, r2, r3, r5} - pg_l1span r0, r1, r2, r3, r5, r6 - - /* Then populate the L2 table. */ - - adr r0, .Lptabmap - ldmia r0, {r0, r1, r2, r3} - pg_l2map r0, r1, r2, r3, r5 - -#else /* CONFIG_PAGING */ - /* Create a virtual single section mapping for the first MB of the .text * address space. Now, we have the first 1MB mapping to both physical and * virtual addresses. The rest of the .text mapping will be completed in @@ -341,7 +278,6 @@ __start: str r3, [r4, r0, lsr #18] /* Save the L1 entry using vaddress as an index */ #endif /* CONFIG_BOOT_RUNFROMFLASH */ -#endif /* CONFIG_PAGING */ #endif /* CONFIG_ARCH_ROMPGTABLE */ /* The following logic will set up the ARMv7-A for normal operation. @@ -554,7 +490,6 @@ __start: #endif .size .LCtextflags, . -.LCtextflags -#if !defined(CONFIG_PAGING) #ifdef CONFIG_BOOT_RUNFROMFLASH /* The physical base address of the primary RAM region */ @@ -576,10 +511,9 @@ __start: .LCramflags: .long MMU_MEMFLAGS /* MMU flags for RAM section */ .size .LCramflags, . -.LCramflags -#endif -#endif -#endif +#endif /* CONFIG_BOOT_RUNFROMFLASH */ +#endif /* CONFIG_ARCH_ROMPGTABLE */ /* The physical base address of the page table */ @@ -595,45 +529,9 @@ __start: .LCvpgtable: .long PGTABLE_BASE_VADDR /* Virtual start of page table */ .size .LCvpgtable, . -.LCvpgtable -#endif -#ifdef CONFIG_PAGING +#endif /* CONFIG_ARCH_ROMPGTABLE */ - .type .Ltxtspan, %object -.Ltxtspan: - .long PG_L1_TEXT_PADDR /* Physical address in the L1 table */ - .long PG_L2_TEXT_PBASE /* Physical address of the start of the L2 page table */ - .long PG_TEXT_NVPAGES /* Total (virtual) text pages to be mapped */ - .long PG_L2_TEXT_NPAGE1 /* The number of text pages in the first page table */ - .long MMU_L1_TEXTFLAGS /* L1 MMU flags to use */ - .size .Ltxtspan, . -.Ltxtspan - - .type .Ltxtmap, %object -.Ltxtmap: - .long PG_L2_LOCKED_PADDR /* Physical address in the L2 table */ - .long PG_LOCKED_PBASE /* Physical address of locked base memory */ - .long CONFIG_PAGING_NLOCKED /* Number of pages in the locked region */ - .long MMU_L2_TEXTFLAGS /* L2 MMU flags to use */ - .size .Ltxtmap, . -.Ltxtmap - - .type .Lptabspan, %object -.Lptabspan: - .long PG_L1_PGTABLE_PADDR /* Physical address in the L1 table */ - .long PG_L2_PGTABLE_PBASE /* Physical address of the start of the L2 page table */ - .long PG_PGTABLE_NPAGES /* Total mapped page table pages */ - .long PG_L2_PGTABLE_NPAGE1 /* The number of text pages in the first page table */ - .long MMU_L1_PGTABFLAGS /* L1 MMU flags to use */ - .size .Lptabspan, . -.Lptabspan - - .type .Lptabmap, %object -.Lptabmap: - .long PG_L2_PGTABLE_PADDR /* Physical address in the L2 table */ - .long PGTABLE_BASE_PADDR /* Physical address of the page table memory */ - .long PG_PGTABLE_NPAGES /* Total mapped page table pages */ - .long MMU_L2_PGTABFLAGS /* L2 MMU flags to use */ - .size .Lptabmap, . -.Lptabmap - -#endif /* CONFIG_PAGING */ .size __start, .-__start /**************************************************************************** @@ -662,20 +560,6 @@ __start: str r3, [r4, r3, lsr #18] /* identity mapping */ #endif -#if defined(CONFIG_PAGING) - /* Populate the L1 table for the data region */ - - adr r0, .Ldataspan - ldmia r0, {r0, r1, r2, r3, r4} - pg_l1span r0, r1, r2, r3, r4, r5 - - /* Populate the L2 table for the data region */ - - adr r0, .Ldatamap - ldmia r0, {r0, r1, r2, r3} - pg_l2map r0, r1, r2, r3, r4 - -#else /* Get the following value (if we did not already do so above): * * R4 = Virtual address of the page table @@ -743,7 +627,6 @@ __start: .endr #endif /* CONFIG_BOOT_RUNFROMFLASH */ -#endif /* CONFIG_PAGING */ #endif /* CONFIG_ARCH_ROMPGTABLE */ /* Zero BSS and set up the stack pointer */ @@ -769,7 +652,7 @@ __start: * address region. */ -#if defined(CONFIG_BOOT_RUNFROMFLASH) || defined(CONFIG_PAGING) +#if defined(CONFIG_BOOT_RUNFROMFLASH) adr r3, .Ldatainit ldmia r3, {r0, r1, r2} @@ -805,28 +688,7 @@ __start: .long _ebss+CONFIG_IDLETHREAD_STACKSIZE-4 .size .Linitparms, . -.Linitparms -#ifdef CONFIG_PAGING - - .type .Ldataspan, %object -.Ldataspan: - .long PG_L1_DATA_VADDR /* Virtual address in the L1 table */ - .long PG_L2_DATA_PBASE /* Physical address of the start of the L2 page table */ - .long PG_DATA_NPAGES /* Number of pages in the data region */ - .long PG_L2_DATA_NPAGE1 /* The number of text pages in the first page table */ - .long MMU_L1_DATAFLAGS /* L1 MMU flags to use */ - .size .Ldataspan, . -.Ldataspan - - .type .Ldatamap, %object -.Ldatamap: - .long PG_L2_DATA_VADDR /* Virtual address in the L2 table */ - .long PG_DATA_PBASE /* Physical address of data memory */ - .long PG_DATA_NPAGES /* Number of pages in the data region */ - .long MMU_L2_DATAFLAGS /* L2 MMU flags to use */ - .size .Ldatamap, . -.Ldatamap - -#endif /* CONFIG_PAGING */ - -#if defined(CONFIG_BOOT_RUNFROMFLASH) || defined(CONFIG_PAGING) +#if defined(CONFIG_BOOT_RUNFROMFLASH) .type .Ldatainit, %object .Ldatainit: .long _eronly /* Where .data defaults are stored in FLASH */ diff --git a/arch/arm/src/armv7-a/arm_pghead.S b/arch/arm/src/armv7-a/arm_pghead.S new file mode 100644 index 0000000000..931217e3f9 --- /dev/null +++ b/arch/arm/src/armv7-a/arm_pghead.S @@ -0,0 +1,719 @@ +/**************************************************************************** + * arch/arm/src/armv7-a/arm_pghead.S + * + * Copyright (C) 2013 Gregory Nutt. All rights reserved. + * Author: Gregory Nutt + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name NuttX nor the names of its contributors may be + * used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + ****************************************************************************/ + +/**************************************************************************** + * Included Files + ****************************************************************************/ + +#include +#include + +#include + +#include "arm.h" +#include "cp15.h" +#include "sctlr.h" +#include "mmu.h" +#include "pg_macros.h" + +#include "chip.h" + + .file "arm_pghead.S" + +/********************************************************************************** + * Configuration + **********************************************************************************/ +/* Assume these are not needed */ + +#undef ALIGNMENT_TRAP +#undef CPU_CACHE_ROUND_ROBIN +#undef CPU_DCACHE_DISABLE +#undef CPU_ICACHE_DISABLE + +/* The page table cannot be in ROM if we are going to do pagin! */ + +#ifndef CONFIG_ARCH_ROMPGTABLE +# error CONFIG_PAGING and CONFIG_ARCH_ROMPGTABLE are incompatible options +#endif + +/* There are three operational memory configurations: + * + * 1. We execute in place in FLASH (CONFIG_BOOT_RUNFROMFLASH=y). In this case + * the boot logic must: + * + * - Configure SDRAM, + * - Initialize the .data section in RAM, and + * - Clear .bss section + */ + +#ifdef CONFIG_BOOT_RUNFROMFLASH +# define DO_SDRAM_INIT 1 + + /* Check for the identity mapping: For this configuration, this would be + * the case where the virtual beginning of FLASH is the same as the physical + * beginning of FLASH. + */ + +# if !defined(CONFIG_FLASH_START) || !defined(CONFIG_FLASH_VSTART) +# error "CONFIG_FLASH_START or CONFIG_FLASH_VSTART is not defined" +# endif + +# if CONFIG_FLASH_START == CONFIG_FLASH_VSTART +# define CONFIG_IDENTITY_TEXTMAP 1 +# endif + +/* 2. We boot in FLASH but copy ourselves to DRAM from better performance. + * (CONFIG_BOOT_RUNFROMFLASH=n && CONFIG_BOOT_COPYTORAM=y). In this case + * the boot logic must: + * + * - Configure SDRAM, + * - Copy ourself to DRAM (after mapping it), and + * - Clear .bss section + * + * In this case, we assume that the logic within this file executes from FLASH. + */ + +#elif defined(CONFIG_BOOT_COPYTORAM) +# error "configuration not implemented +# define DO_SDRAM_INIT 1 + + /* Check for the identity mapping: For this configuration, this would be + * the case where the virtual beginning of FLASH is the same as the physical + * beginning of FLASH. + */ + +# if !defined(CONFIG_FLASH_START) || !defined(CONFIG_FLASH_VSTART) +# error "CONFIG_FLASH_START or CONFIG_FLASH_VSTART is not defined" +# endif + +# if CONFIG_FLASH_START == CONFIG_FLASH_VSTART +# define CONFIG_IDENTITY_TEXTMAP 1 +# endif + +/* 3. There is bootloader that copies us to DRAM (but probably not to the beginning) + * (CONFIG_BOOT_RUNFROMFLASH=n && CONFIG_BOOT_COPYTORAM=n). In this case SDRAM + * was initialized by the boot loader, and this boot logic must: + * + * - Clear .bss section + */ + +#else + + /* Check for the identity mapping: For this configuration, this would be + * the case where the virtual beginning of RAM is the same as the physical + * beginning of RAM. + */ + +# if !defined(CONFIG_RAM_START) || !defined(CONFIG_RAM_VSTART) +# error "CONFIG_RAM_START or CONFIG_RAM_VSTART is not defined" +# endif + +# if CONFIG_RAM_START == CONFIG_RAM_VSTART +# define CONFIG_IDENTITY_TEXTMAP 1 +# endif + +#endif + +/* For each page table offset, the following provide (1) the physical address of + * the start of the page table and (2) the number of page table entries in the + * first page table. + * + * Coarse: PG_L1_PADDRMASK=0xfffffc00 + * NPAGE1=(256 -((a) & 0x000003ff) >> 2) NPAGE1=1-256 + * Fine: PG_L1_PADDRMASK=0xfffff000 + * NPAGE1=(1024 -((a) & 0x00000fff) >> 2) NPAGE1=1-1024 + */ + +#define PG_L2_TEXT_PBASE (PG_L2_TEXT_PADDR & PG_L1_PADDRMASK) +#define PG_L2_TEXT_NPAGE1 (PTE_NPAGES - ((PG_L2_TEXT_PADDR & ~PG_L1_PADDRMASK) >> 2)) +#define PG_L2_PGTABLE_PBASE (PG_L2_PGTABLE_PADDR & PG_L1_PADDRMASK) +#define PG_L2_PGTABLE_NPAGE1 (PTE_NPAGES - ((PG_L2_PGTABLE_PADDR & ~PG_L1_PADDRMASK) >> 2)) +#define PG_L2_DATA_PBASE (PG_L2_DATA_PADDR & PG_L1_PADDRMASK) +#define PG_L2_DATA_NPAGE1 (PTE_NPAGES - ((PG_L2_DATA_PADDR & ~PG_L1_PADDRMASK) >> 2)) + +/**************************************************************************** + * Definitions + ****************************************************************************/ + +/* RX_NSECTIONS determines the number of 1Mb sections to map for the + * Read/eXecute address region. This is based on NUTTX_TEXT_SIZE. + */ + +#define RX_NSECTIONS ((NUTTX_TEXT_SIZE+0x000fffff) >> 20) +#define WR_NSECTIONS ((NUTTX_RAM_SIZE+0x000fffff) >> 20) + +/**************************************************************************** + * Assembly Macros + ****************************************************************************/ + +/* The ARMv7-A L1 page table can be placed at the beginning or at the end of + * the RAM space. This decision is based on the placement of the vector + * area: If the vectors are place in low memory at address 0x0000 0000, then + * the page table is placed in high memory; if the vectors are placed in + * high memory at address 0xfff0 0000, then the page table is locating at + * the beginning of RAM. + * + * For the special case where (1) the program executes out of RAM, and (2) + * the page is located at the beginning of RAM (i.e., the high vector case), + * then the following macro can easily find the physical address of the + * section that includes the first part of the text region: Since the page + * table is closely related to the NuttX base address in this case, we can + * convert the page table base address to the base address of the section + * containing both. + */ + +/* This macro will modify r0, r1, r2 and r14 */ + +#ifdef CONFIG_DEBUG + .macro showprogress, code + mov r0, #\code + bl up_lowputc + .endm +#else + .macro showprogress, code + .endm +#endif + +/**************************************************************************** + * Name: __start + ****************************************************************************/ + + .text + .global __start + .type __start, #function + +__start: + /* Make sure that we are in SVC mode with all IRQs disabled */ + + mov r0, #(PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT) + msr cpsr_c, r0 + + /* Initialize DRAM using a macro provided by board-specific logic. + * + * This must be done in two cases: + * 1. CONFIG_BOOT_RUNFROMFLASH. The system is running from FLASH + * 2. CONFIG_BOOT_COPYTORAM. The system booted from FLASH but + * will copy itself to SDRAM. + */ + +#ifdef DO_SDRAM_INIT + config_sdram +#endif + + /* Clear the 16K level 1 page table */ + + ldr r4, .LCppgtable /* r4=phys. page table */ + mov r0, r4 + mov r1, #0 + add r2, r0, #PGTABLE_SIZE +.Lpgtableclear: + str r1, [r0], #4 + str r1, [r0], #4 + str r1, [r0], #4 + str r1, [r0], #4 + teq r0, r2 + bne .Lpgtableclear + + /* Create identity mapping for first MB of the .text section to support + * this startup logic executing out of the physical address space. This + * identity mapping will be removed by .Lvstart (see below). Of course, + * we would only do this if the physical-virtual mapping is not already + * the identity mapping. + */ + +#ifndef CONFIG_IDENTITY_TEXTMAP + ldr r0, .LCptextbase /* r0=phys. base address of .text section */ + ldr r1, .LCtextflags /* R1=.text section MMU flags */ + orr r3, r1, r0 /* r3=flags + base */ + str r3, [r4, r0, lsr #18] /* identity mapping */ +#endif + + /* Map the read-only .text region in place. This must be done + * before the MMU is enabled and the virtual addressing takes + * effect. First populate the L1 table for the locked and paged + * text regions. + * + * We could probably make the the pg_l1span and pg_l2map macros into + * call-able subroutines, but we would have to be carefully during + * this phase while we are operating in a physical address space. + * + * NOTE: That the value of r5 (L1 table base address) must be + * preserved through the following. + */ + + adr r0, .Ltxtspan + ldmia r0, {r0, r1, r2, r3, r5} + pg_l1span r0, r1, r2, r3, r5, r6 + + /* Then populate the L2 table for the locked text region only. */ + + adr r0, .Ltxtmap + ldmia r0, {r0, r1, r2, r3} + pg_l2map r0, r1, r2, r3, r5 + + /* Make sure that the page table is itself mapped and and read/write-able. + * First, populate the L1 table: + */ + + adr r0, .Lptabspan + ldmia r0, {r0, r1, r2, r3, r5} + pg_l1span r0, r1, r2, r3, r5, r6 + + /* Then populate the L2 table. */ + + adr r0, .Lptabmap + ldmia r0, {r0, r1, r2, r3} + pg_l2map r0, r1, r2, r3, r5 + + /* The following logic will set up the ARMv7-A for normal operation. + * + * Here we expect to have: + * r4 = Address of the base of the L1 table + */ + + /* Invalidate caches and TLBs. + * + * NOTE: "The ARMv7 Virtual Memory System Architecture (VMSA) does not + * support a CP15 operation to invalidate the entire data cache. ... + * In normal usage the only time the entire data cache has to be + * invalidated is on reset." + * + * The instruction cache is virtually indexed and physically tagged but + * the data cache is physically indexed and physically tagged. So it + * should not be an issue if the system comes up with a dirty Dcache; + * the ICache, however, must be invalidated. + */ + + mov r0, #0 + mcr CP15_ICIALLUIS(r0) /* Invalidate entire instruction cache Inner Shareable */ + mcr CP15_TLBIALLIS(r0) /* Invalidate entire Unified TLB Inner Shareable */ + + /* Load the page table address. + * + * NOTES: + * - Here we assume that the page table address is aligned to at least + * least a 16KB boundary (bits 0-13 are zero). No masking is provided + * to protect against an unaligned page table address. + * - The Cortex-A5 has two page table address registers, TTBR0 and 1. + * Only TTBR0 is used in this implementation but both are initialized. + * + * Here we expect to have: + * r0 = Zero + * r4 = Address of the base of the L1 table + */ + + mcr CP15_TTBR0(r4) + mcr CP15_TTBR1(r4) + + /* Set the TTB control register (TTBCR) to indicate that we are using + * TTBR0. r0 still holds the value of zero. + * + * N : 0=Selects TTBR0 and 16KB page table size indexed by VA[31:20] + * PD0 : 0=Perform translation table walks using TTBR0 + * PD1 : 0=Perform translation table walks using TTBR1 (but it is disabled) + * EAE : 0=Use 32-bit translation system + */ + + mcr CP15_TTBCR(r0) + + /* Enable the MMU and caches + * lr = Resume at .Lvstart with the MMU enabled + */ + + ldr lr, .LCvstart /* Abs. virtual address */ + + /* Configure the domain access register (see mmu.h). Only domain 0 is + * supported and it uses the permissions in the TLB. + */ + + mov r0, #DACR_CLIENT(0) + mcr CP15_DACR(r0) /* Set domain access register */ + + /* Configure the system control register (see sctrl.h) */ + + mrc CP15_SCTLR(r0) /* Get control register */ + + /* Clear bits to reset values. This is only necessary in situations like, for + * example, we get here via a bootloader and the control register is in some + * unknown state. + * + * SCTLR_A Bit 1: Strict alignment disabled (reset value) + * SCTLR_C Bit 2: DCache disabled (reset value) + * + * SCTLR_SW Bit 10: SWP/SWPB not enabled (reset value) + * SCTLR_I Bit 12: ICache disabled (reset value) + * SCTLR_V Bit 13: Assume low vectors (reset value) + * SCTLR_RR Bit 14: The Cortex-A5 processor only supports a fixed random + * replacement strategy. + * SCTLR_HA Bit 17: Not supported by A5 + * + * SCTLR_EE Bit 25: Little endian (reset value). + * SCTLR_TRE Bit 28: No memory region remapping (reset value) + * SCTLR_AFE Bit 29: Full, legacy access permissions behavior (reset value). + * SCTLR_TE Bit 30: All exceptions handled in ARM state (reset value). + */ + + bic r0, r0, #(SCTLR_A | SCTLR_C) + bic r0, r0, #(SCTLR_SW | SCTLR_I | SCTLR_V | SCTLR_RR | SCTLR_HA) + bic r0, r0, #(SCTLR_EE | SCTLR_TRE | SCTLR_AFE | SCTLR_TE) + + /* Set bits to enable the MMU + * + * SCTLR_M Bit 0: Enable the MMU + * SCTLR_Z Bit 11: Program flow prediction control always enabled on A5 + */ + + orr r0, r0, #(SCTLR_M /* | SCTLR_Z */) + + /* Position vectors to 0xffff0000 if so configured. + * + * SCTLR_V Bit 13: High vectors + */ + +#ifndef CONFIG_ARCH_LOWVECTORS + orr r0, r0, #(SCTLR_V) +#endif + + /* Round Robin cache replacement + * + * SCTLR_RR Bit 14: The Cortex-A5 processor only supports a fixed random + * replacement strategy. + */ + +#ifndef CPU_CACHE_ROUND_ROBIN +#endif + + /* Dcache enable + * + * SCTLR_C Bit 2: DCache enable + */ + +#ifndef CPU_DCACHE_DISABLE + orr r0, r0, #(SCTLR_C) +#endif + + /* Icache enable + * + * SCTLR_I Bit 12: ICache enable + */ + +#ifndef CPU_ICACHE_DISABLE + orr r0, r0, #(SCTLR_I) +#endif + + /* Alignment abort enable + * + * SCTLR_A Bit 1: Strict alignment enabled + */ + +#ifdef ALIGNMENT_TRAP + orr r0, r0, #(SCTLR_A) +#endif + + /* AP[0:2] Permissions model + * + * SCTLR_AFE Bit 29: Full, legacy access permissions behavior (reset value). + * + * When AFE=1, the page table AP[0] is used as an access flag and AP[2:1] + * control. When AFE=0, AP[2:0] control access permissions. + */ + +#ifdef CONFIG_AFE_ENABLE + orr r0, r0, #(SCTLR_AFE) +#endif + + /* Then write the configured control register */ + + mcr CP15_SCTLR(r0) /* Write control reg */ + + /* Read the Main ID register. This will be available in R1 after + * MMU trampoline (not currently used) + */ + + mrc CP15_MIDR(r1) /* Read main id reg */ + mov r1, r1 /* Null-avoiding nop */ + mov r1, r1 /* Null-avoiding nop */ + + /* And "jump" to .Lvstart in the newly mapped virtual address space */ + + mov pc, lr + +/**************************************************************************** + * PC_Relative Data + ****************************************************************************/ + + /* The virtual start address of the second phase boot logic */ + + .type .LCvstart, %object +.LCvstart: + .long .Lvstart + .size .LCvstart, . -.LCvstart + + /* The aligned, physical base address of the .text section */ + + .type .LCptextbase, %object +.LCptextbase: + .long NUTTX_TEXT_PADDR & 0xfff00000 + .size .LCptextbase, . -.LCptextbase + + /* The aligned, virtual base address of the .text section */ + + .type .LCvtextbase, %object +.LCvtextbase: + .long NUTTX_TEXT_VADDR & 0xfff00000 + .size .LCvtextbase, . -.LCvtextbase + + /* The MMU flags used with the .text mapping */ + + .type .LCtextflags, %object +.LCtextflags: +#ifdef CONFIG_BOOT_RUNFROMFLASH + .long MMU_ROMFLAGS /* MMU flags text section in FLASH/ROM */ +#else + .long MMU_MEMFLAGS /* MMU flags for text section in RAM */ +#endif + .size .LCtextflags, . -.LCtextflags + + /* The physical base address of the page table */ + + .type .LCppgtable, %object +.LCppgtable: + .long PGTABLE_BASE_PADDR /* Physical start of page table */ + .size .LCppgtable, . -.LCppgtable + + /* The virtual base address of the page table */ + + .type .LCvpgtable, %object +.LCvpgtable: + .long PGTABLE_BASE_VADDR /* Virtual start of page table */ + .size .LCvpgtable, . -.LCvpgtable + + .type .Ltxtspan, %object +.Ltxtspan: + .long PG_L1_TEXT_PADDR /* Physical address in the L1 table */ + .long PG_L2_TEXT_PBASE /* Physical address of the start of the L2 page table */ + .long PG_TEXT_NVPAGES /* Total (virtual) text pages to be mapped */ + .long PG_L2_TEXT_NPAGE1 /* The number of text pages in the first page table */ + .long MMU_L1_TEXTFLAGS /* L1 MMU flags to use */ + .size .Ltxtspan, . -.Ltxtspan + + .type .Ltxtmap, %object +.Ltxtmap: + .long PG_L2_LOCKED_PADDR /* Physical address in the L2 table */ + .long PG_LOCKED_PBASE /* Physical address of locked base memory */ + .long CONFIG_PAGING_NLOCKED /* Number of pages in the locked region */ + .long MMU_L2_TEXTFLAGS /* L2 MMU flags to use */ + .size .Ltxtmap, . -.Ltxtmap + + .type .Lptabspan, %object +.Lptabspan: + .long PG_L1_PGTABLE_PADDR /* Physical address in the L1 table */ + .long PG_L2_PGTABLE_PBASE /* Physical address of the start of the L2 page table */ + .long PG_PGTABLE_NPAGES /* Total mapped page table pages */ + .long PG_L2_PGTABLE_NPAGE1 /* The number of text pages in the first page table */ + .long MMU_L1_PGTABFLAGS /* L1 MMU flags to use */ + .size .Lptabspan, . -.Lptabspan + + .type .Lptabmap, %object +.Lptabmap: + .long PG_L2_PGTABLE_PADDR /* Physical address in the L2 table */ + .long PGTABLE_BASE_PADDR /* Physical address of the page table memory */ + .long PG_PGTABLE_NPAGES /* Total mapped page table pages */ + .long MMU_L2_PGTABFLAGS /* L2 MMU flags to use */ + .size .Lptabmap, . -.Lptabmap + + .size __start, .-__start + +/**************************************************************************** + * Name: .Lvstart + ***************************************************************************/ + +/* The following is executed after the MMU has been enabled. This uses + * absolute addresses; this is not position independent. + */ + .align 5 + .local .Lvstart + .type .Lvstart, %function + +.Lvstart: + + /* Remove the temporary mapping (if one was made). The following assumes + * that the total RAM size is > 1Mb and extends that initial mapping to + * cover additional RAM sections. + */ + +#ifndef CONFIG_IDENTITY_TEXTMAP + ldr r4, .LCvpgtable /* r4=virtual page table base address */ + ldr r3, .LCvtextbase /* r0=virtual base address of .text section */ + mov r0, #0 /* flags + base = 0 */ + str r3, [r4, r3, lsr #18] /* identity mapping */ +#endif + + /* Populate the L1 table for the data region */ + + adr r0, .Ldataspan + ldmia r0, {r0, r1, r2, r3, r4} + pg_l1span r0, r1, r2, r3, r4, r5 + + /* Populate the L2 table for the data region */ + + adr r0, .Ldatamap + ldmia r0, {r0, r1, r2, r3} + pg_l2map r0, r1, r2, r3, r4 + +#ifdef CONFIG_BOOT_RUNFROMFLASH + + /* Get R3 = Value of RAM L1 page table entry */ + + ldr r3, .LCprambase /* r3=Aligned Nuttx RAM address (physical) */ + ldr r1, .LCramflags /* R1=.bss/.data section MMU flags */ + add r3, r3, r1 /* r3=flags + base */ + + /* Now setup the page tables for our normal mapped RAM region. + * We round NUTTX_RAM_VADDR down to the nearest megabyte boundary. + */ + + add r0, r4, #(NUTTX_RAM_VADDR & 0xfff00000) >> 18 + str r3, [r0], #4 + + /* Now map the remaining WR_NSECTIONS-1 sections of the RAM memory + * region. + */ + + .rept WR_NSECTIONS-1 + add r3, r3, #SECTION_SIZE + str r3, [r0], #4 + .endr + +#endif /* CONFIG_BOOT_RUNFROMFLASH */ + + /* Zero BSS and set up the stack pointer */ + + adr r0, .Linitparms + ldmia r0, {r0, r1, sp} + + /* Clear the frame pointer and .bss */ + + mov fp, #0 + +.Lbssinit: + cmp r0, r1 /* Clear up to _bss_end_ */ + strcc fp, [r0],#4 + bcc .Lbssinit + + /* If the .data section is in a separate, unitialized address space, + * then we will also need to copy the initial values of of the .data + * section from the .text region into that .data region. This would + * be the case if we are executing from FLASH and the .data section + * lies in a different physical address region OR if we are support + * on-demand paging and the .data section lies in a different virtual + * address region. + */ + + adr r3, .Ldatainit + ldmia r3, {r0, r1, r2} + +1: ldmia r0!, {r3 - r10} + stmia r1!, {r3 - r10} + cmp r1, r2 + blt 1b + + /* Perform early C-level, platform-specific initialization */ + + bl up_boot + + /* Finally branch to the OS entry point */ + + mov lr, #0 + b os_start + + /* Text-section constants: + * + * _sbss is the start of the BSS region (see ld.script) + * _ebss is the end of the BSS regsion (see ld.script) + * + * The idle task stack starts at the end of BSS and is of size + * CONFIG_IDLETHREAD_STACKSIZE. The heap continues from there until the + * end of memory. See g_idle_topstack below. + */ + + .type .Linitparms, %object +.Linitparms: + .long _sbss + .long _ebss + .long _ebss+CONFIG_IDLETHREAD_STACKSIZE-4 + .size .Linitparms, . -.Linitparms + + .type .Ldataspan, %object +.Ldataspan: + .long PG_L1_DATA_VADDR /* Virtual address in the L1 table */ + .long PG_L2_DATA_PBASE /* Physical address of the start of the L2 page table */ + .long PG_DATA_NPAGES /* Number of pages in the data region */ + .long PG_L2_DATA_NPAGE1 /* The number of text pages in the first page table */ + .long MMU_L1_DATAFLAGS /* L1 MMU flags to use */ + .size .Ldataspan, . -.Ldataspan + + .type .Ldatamap, %object +.Ldatamap: + .long PG_L2_DATA_VADDR /* Virtual address in the L2 table */ + .long PG_DATA_PBASE /* Physical address of data memory */ + .long PG_DATA_NPAGES /* Number of pages in the data region */ + .long MMU_L2_DATAFLAGS /* L2 MMU flags to use */ + .size .Ldatamap, . -.Ldatamap + + .type .Ldatainit, %object +.Ldatainit: + .long _eronly /* Where .data defaults are stored in FLASH */ + .long _sdata /* Where .data needs to reside in SDRAM */ + .long _edata + .size .Ldatainit, . -.Ldatainit + + .size .Lvstart, .-.Lvstart + + /* Data section variables */ + + /* This global variable is unsigned long g_idle_topstack and is + * exported from here only because of its coupling to .Linitparms + * above. + */ + + .section .rodata, "a" + .align 4 + .globl g_idle_topstack + .type g_idle_topstack, object +g_idle_topstack: + .long _ebss+CONFIG_IDLETHREAD_STACKSIZE + .size g_idle_topstack, .-g_idle_topstack + .end diff --git a/arch/arm/src/sama5/Make.defs b/arch/arm/src/sama5/Make.defs index 101e3f80cb..c3a65ef47a 100644 --- a/arch/arm/src/sama5/Make.defs +++ b/arch/arm/src/sama5/Make.defs @@ -1,5 +1,5 @@ ############################################################################ -# arch/arm/samad5/Make.defs +# arch/arm/sama5/Make.defs # # Copyright (C) 2013 Gregory Nutt. All rights reserved. # Author: Gregory Nutt @@ -33,14 +33,29 @@ # ############################################################################ +# The vector table is the "head" object, i.e., the one that must forced into +# the link in order to draw in all of the other components + HEAD_ASRC = arm_vectortab.S +# Force the start-up logic to be at the beginning of the .text to simplify +# debug. + +ifeq ($(CONFIG_PAGING),y) +CMN_ASRCS = arm_pghead.S +else CMN_ASRCS = arm_head.S +endif + +# Common assembly language files + CMN_ASRCS += arm_vectors.S arm_fpuconfig.S arm_fullcontextrestore.S CMN_ASRCS += arm_saveusercontext.S arm_vectoraddrexcptn.S arm_vfork.S CMN_ASRCS += cp15_coherent_dcache.S cp15_invalidate_dcache.S CMN_ASRCS += cp15_clean_dcache.S cp15_flush_dcache.S cp15_invalidate_dcache_all.S +# Common C source files + CMN_CSRCS = up_initialize.c up_idle.c up_interruptcontext.c up_exit.c CMN_CSRCS += up_createstack.c up_releasestack.c up_usestack.c up_vfork.c CMN_CSRCS += up_mdelay.c up_udelay.c @@ -52,6 +67,8 @@ CMN_CSRCS += arm_releasepending.c arm_reprioritizertr.c CMN_CSRCS += arm_schedulesigaction.c arm_sigdeliver.c arm_syscall.c CMN_CSRCS += arm_unblocktask.c arm_undefinedinsn.c +# Configuration dependent C and assembly language files + ifeq ($(CONFIG_PAGING),y) CMN_CSRCS += arm_allocpage.c arm_checkmapping.c arm_pginitialize.c CMN_CSRCS += arm_va2pte.c @@ -66,7 +83,13 @@ CMN_ASRCS += arm_savefpu.S arm_restorefpu.S CMN_CSRCS += arm_copyarmstate.c endif +# SAMA5-specific assembly language files + CHIP_ASRCS = +# SAMA5-specific C source files + CHIP_CSRCS = sam_allocateheap.c sam_boot.c sam_clockconfig.c sam_gpio.c CHIP_CSRCS += sam_irq.c sam_lowputc.c sam_serial.c sam_timerisr.c + +# Configuration dependent C and assembly language files