From 66259bfc5367cb17be0210c8f88c67f2aae44e33 Mon Sep 17 00:00:00 2001 From: Gregory Nutt Date: Sat, 20 Jul 2013 13:06:00 -0600 Subject: [PATCH] Misc Cortex-A5 changes include new file for cache operations --- ChangeLog | 3 + arch/arm/src/armv7-a/arm.h | 10 +- arch/arm/src/armv7-a/arm_cache.S | 318 +++++++++ arch/arm/src/armv7-a/arm_dataabort.c | 37 +- arch/arm/src/armv7-a/arm_head.S | 48 +- arch/arm/src/armv7-a/arm_initialstate.c | 6 +- arch/arm/src/armv7-a/arm_prefetchabort.c | 30 +- arch/arm/src/armv7-a/arm_schedulesigaction.c | 4 +- arch/arm/src/armv7-a/arm_vectors.S | 23 +- arch/arm/src/armv7-a/cache.h | 640 +++++++++++++++---- arch/arm/src/armv7-a/mmu.h | 16 +- arch/arm/src/lpc31xx/lpc31_memorymap.h | 2 +- arch/arm/src/sama5/Make.defs | 5 +- arch/arm/src/sama5/chip/sama5d3x_memorymap.h | 2 +- 14 files changed, 961 insertions(+), 183 deletions(-) create mode 100755 arch/arm/src/armv7-a/arm_cache.S diff --git a/ChangeLog b/ChangeLog index a6f7eb438d..7e9dd7af71 100644 --- a/ChangeLog +++ b/ChangeLog @@ -5141,3 +5141,6 @@ the SAMA5D3x-EK board(s) in particular. There is very little here on the first check-in, this structure is being used now primarily to create the Cortex-A5 support (2013-7-19). + * arch/arm/src/armv7-a/arm_cache.S: Cortex-A5 cache operations + (2013-7-20). + diff --git a/arch/arm/src/armv7-a/arm.h b/arch/arm/src/armv7-a/arm.h index 7c23d739df..79804909ac 100644 --- a/arch/arm/src/armv7-a/arm.h +++ b/arch/arm/src/armv7-a/arm.h @@ -58,15 +58,15 @@ #define PSR_MODE_SHIFT (1) /* Bits 0-4: Mode fields */ #define PSR_MODE_MASK (31 << PSR_MODE_SHIFT) -# define PSR_MODE_USER (16 << PSR_MODE_SHIFT) /* User mode */ +# define PSR_MODE_USR (16 << PSR_MODE_SHIFT) /* User mode */ # define PSR_MODE_FIQ (17 << PSR_MODE_SHIFT) /* FIQ mode */ # define PSR_MODE_IRQ (18 << PSR_MODE_SHIFT) /* IRQ mode */ -# define PSR_MODE_SUPER (19 << PSR_MODE_SHIFT) /* Supervisor mode */ +# define PSR_MODE_SVC (19 << PSR_MODE_SHIFT) /* Supervisor mode */ # define PSR_MODE_MON (22 << PSR_MODE_SHIFT) /* Monitor mode */ -# define PSR_MODE_ABORT (23 << PSR_MODE_SHIFT) /* Abort mode */ +# define PSR_MODE_ABT (23 << PSR_MODE_SHIFT) /* Abort mode */ # define PSR_MODE_HYP (26 << PSR_MODE_SHIFT) /* Hyp mode */ -# define PSR_MODE_UNDEF (27 << PSR_MODE_SHIFT) /* Undefined mode */ -# define PSR_MODE_SYSTEM (31 << PSR_MODE_SHIFT) /* System mode */ +# define PSR_MODE_UND (27 << PSR_MODE_SHIFT) /* Undefined mode */ +# define PSR_MODE_SYS (31 << PSR_MODE_SHIFT) /* System mode */ #define PSR_T_BIT (1 << 5) /* Bit 5: Thumb execution state bit */ #define PSR_MASK_SHIFT (6) /* Bits 6-8: Mask Bits */ #define PSR_MASK_MASK (7 << PSR_GE_SHIFT) diff --git a/arch/arm/src/armv7-a/arm_cache.S b/arch/arm/src/armv7-a/arm_cache.S new file mode 100755 index 0000000000..2b6b93a561 --- /dev/null +++ b/arch/arm/src/armv7-a/arm_cache.S @@ -0,0 +1,318 @@ +/**************************************************************************** + * arch/arm/src/armv7-a/arm_cache.S + * + * Copyright (C) 2013 Gregory Nutt. All rights reserved. + * Author: Gregory Nutt + * + * References: + * + * "Cortex-A5™ MPCore, Technical Reference Manual", Revision: r0p1, + * Copyright © 2010 ARM. All rights reserved. ARM DDI 0434B (ID101810) + * "ARM® Architecture Reference Manual, ARMv7-A and ARMv7-R edition", + * Copyright © 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM + * DDI 0406C.b (ID072512) + * + * Portions of this file derive from Atmel sample code for the SAMA5D3 Cortex-A5 + * which also has a modified BSD-style license: + * + * Copyright (c) 2012, Atmel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name NuttX nor Atmel nor the names of the contributors may + * be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + ****************************************************************************/ + +/* cp15_cache Cache Operations + * + * Usage + * + * They are performed as MCR instructions and only operate on a level 1 cache + * associated with ARM v7 processor. + * + * The supported operations are: + * + * 1. Any of these operations can be applied to any data cache or any + * unified cache. + * 2. Invalidate by MVA. Performs an invalidate of a data or unified cache + * line + * based on the address it contains. + * 3. Invalidate by set/way. Performs an invalidate of a data or unified + * cache line based on its location in the cache hierarchy. + * 4. Clean by MVA. Performs a clean of a data or unified cache line based + * on the address it contains. + * 5. Clean by set/way. Performs a clean of a data or unified cache line + * based on its location in the cache hierarchy. + * 6. Clean and Invalidate by MVA. Performs a clean and invalidate of a + * data or unified cache line based on the address it contains. + * 7. Clean and Invalidate by set/way. Performs a clean and invalidate of + * a data or unified cache line based on its location in the cache + * hierarchy. + * + * NOTE: Many of these operations are implemented as assembly language + * macros or as C inline functions in the file cache.h. The larger functions + * are implemented here as C-callable functions. + */ + +/**************************************************************************** + * Included Files + ****************************************************************************/ + + .file "arm_cache.S" + +/**************************************************************************** + * Preprocessor Definitions + ****************************************************************************/ + +/**************************************************************************** + * Public Symbols + ****************************************************************************/ + + .globl cp15_coherent_dcache_for_dma + .globl cp15_invalidate_dcache_for_dma + .globl cp15_clean_dcache_for_dma + .globl cp15_flush_dcache_for_dma + .globl cp15_flush_kern_dcache_for_dma + +/**************************************************************************** + * Public Functions + ****************************************************************************/ + + .text + +/**************************************************************************** + * Name: cp15_coherent_dcache_for_dma + * + * Description: + * Ensure that the I and D caches are coherent within specified region. + * This is typically used when code has been written to a memory region, + * and will be executed. + * + * Input Parameters: + * start - virtual start address of region + * end - virtual end address of region + * + * Returned Value: + * None + * + ****************************************************************************/ + + .globl cp15_coherent_dcache_for_dma + .type cp15_coherent_dcache_for_dma, function + +cp15_coherent_dcache_for_dma: + + mrc p15, 0, r3, c0, c0, 1 + lsr r3, r3, #16 + and r3, r3, #0xf + mov r2, #4 + mov r2, r2, lsl r3 + + sub r3, r2, #1 + bic r12, r0, r3 +1: + mcr p15, 0, r12, c7, c11, 1 + add r12, r12, r2 + cmp r12, r1 + blo 1b + dsb + + mrc p15, 0, r3, c0, c0, 1 + and r3, r3, #0xf + mov r2, #4 + mov r2, r2, lsl r3 + + sub r3, r2, #1 + bic r12, r0, r3 +2: + mcr p15, 0, r12, c7, c5, 1 + add r12, r12, r2 + cmp r12, r1 + blo 2b + mov r0, #0 + mcr p15, 0, r0, c7, c1, 6 + mcr p15, 0, r0, c7, c5, 6 + dsb + isb + bx lr + .size cp15_coherent_dcache_for_dma, . - cp15_coherent_dcache_for_dma + +/**************************************************************************** + * Name: cp15_invalidate_dcache_for_dma + * + * Description: + * Invalidate the data cache within the specified region; we will be + * performing a DMA operation in this region and we want to purge old data + * in the cache. + * + * Input Parameters: + * start - virtual start address of region + * end - virtual end address of region + * + * Returned Value: + * None + * + ****************************************************************************/ + + .globl cp15_invalidate_dcache_for_dma + .type cp15_invalidate_dcache_for_dma, function + +cp15_invalidate_dcache_for_dma: + + mrc p15, 0, r3, c0, c0, 1 + lsr r3, r3, #16 + and r3, r3, #0xf + mov r2, #4 + mov r2, r2, lsl r3 + + sub r3, r2, #1 + tst r0, r3 + bic r0, r0, r3 + + mcrne p15, 0, r0, c7, c14, 1 + + tst r1, r3 + bic r1, r1, r3 + mcrne p15, 0, r1, c7, c14, 1 +3: + mcr p15, 0, r0, c7, c6, 1 + add r0, r0, r2 + cmp r0, r1 + blo 3b + dsb + bx lr + .size cp15_coherent_dcache_for_dma, . - cp15_coherent_dcache_for_dma + +/**************************************************************************** + * Name: cp15_clean_dcache_for_dma + * + * Description: + * Clean the data cache within the specified region + * + * Input Parameters: + * start - virtual start address of region + * end - virtual end address of region + * + * Returned Value: + * None + * + ****************************************************************************/ + + .globl cp15_clean_dcache_for_dma + .type cp15_clean_dcache_for_dma, function + +cp15_clean_dcache_for_dma: + mrc p15, 0, r3, c0, c0, 1 + lsr r3, r3, #16 + and r3, r3, #0xf + mov r2, #4 + mov r2, r2, lsl r3 + + sub r3, r2, #1 + bic r0, r0, r3 +4: + mcr p15, 0, r0, c7, c10, 1 + add r0, r0, r2 + cmp r0, r1 + blo 4b + dsb + bx lr + .size cp15_clean_dcache_for_dma, . - cp15_clean_dcache_for_dma + +/**************************************************************************** + * Name: cp15_flush_dcache_for_dma + * + * Description: + * Flush the data cache within the specified region + * + * Input Parameters: + * start - virtual start address of region + * end - virtual end address of region + * + * Returned Value: + * None + * + ****************************************************************************/ + + .globl cp15_flush_dcache_for_dma + .type cp15_flush_dcache_for_dma, function + +cp15_flush_dcache_for_dma: + mrc p15, 0, r3, c0, c0, 1 + lsr r3, r3, #16 + and r3, r3, #0xf + mov r2, #4 + mov r2, r2, lsl r3 + sub r3, r2, #1 + bic r0, r0, r3 +5: + mcr p15, 0, r0, c7, c14, 1 + add r0, r0, r2 + cmp r0, r1 + blo 5b + dsb + bx lr + .size cp15_flush_dcache_for_dma, . - cp15_flush_dcache_for_dma + +/**************************************************************************** + * Name: cp15_flush_kern_dcache_for_dma + * + * Description: + * Ensure that the data held in the page kaddr is written back to the page + * in question. + * + * Input Parameters: + * start - virtual start address of region + * end - virtual end address of region + * + * Returned Value: + * None + * + ****************************************************************************/ + + .globl cp15_flush_kern_dcache_for_dma + .type cp15_flush_kern_dcache_for_dma, function + +cp15_flush_kern_dcache_for_dma: + mrc p15, 0, r3, c0, c0, 1 + lsr r3, r3, #16 + and r3, r3, #0xf + mov r2, #4 + mov r2, r2, lsl r3 + + add r1, r0, r1 + sub r3, r2, #1 + bic r0, r0, r3 + + mcr p15, 0, r0, c7, c14, 1 + add r0, r0, r2 + cmp r0, r1 + blo 1b + dsb + bx lr + .size cp15_flush_kern_dcache_for_dma, . - cp15_flush_kern_dcache_for_dma + .end diff --git a/arch/arm/src/armv7-a/arm_dataabort.c b/arch/arm/src/armv7-a/arm_dataabort.c index dc39ac2c38..25df4f8f2e 100644 --- a/arch/arm/src/armv7-a/arm_dataabort.c +++ b/arch/arm/src/armv7-a/arm_dataabort.c @@ -86,12 +86,12 @@ * If CONFIG_PAGING is selected in the NuttX configuration file, then these * additional input values are expected: * - * far - Fault address register. On a data abort, the ARM MMU places the - * miss virtual address (MVA) into the FAR register. This is the address + * dfar - Fault address register. On a data abort, the ARM MMU places the + * miss virtual address (MVA) into the DFAR register. This is the address * of the data which, when accessed, caused the fault. - * fsr - Fault status register. On a data a abort, the ARM MMU places an + * dfsr - Fault status register. On a data a abort, the ARM MMU places an * encoded four-bit value, the fault status, along with the four-bit - * encoded domain number, in the data FSR + * encoded domain number, in the data DFSR * * Description: * This is the data abort exception handler. The ARM data abort exception @@ -100,10 +100,10 @@ ****************************************************************************/ #ifdef CONFIG_PAGING -void arm_dataabort(uint32_t *regs, uint32_t far, uint32_t fsr) + +void arm_dataabort(uint32_t *regs, uint32_t dfar, uint32_t dfsr) { - FAR struct tcb_s *tcb = (FAR struct tcb_s *)g_readytorun.head; -#ifdef CONFIG_PAGING + DFAR struct tcb_s *tcb = (DFAR struct tcb_s *)g_readytorun.head; uint32_t *savestate; /* Save the saved processor context in current_regs where it can be accessed @@ -112,10 +112,8 @@ void arm_dataabort(uint32_t *regs, uint32_t far, uint32_t fsr) savestate = (uint32_t*)current_regs; -#endif current_regs = regs; -#ifdef CONFIG_PAGING /* In the NuttX on-demand paging implementation, only the read-only, .text * section is paged. However, the ARM compiler generated PC-relative data * fetches from within the .text sections. Also, it is customary to locate @@ -129,19 +127,19 @@ void arm_dataabort(uint32_t *regs, uint32_t far, uint32_t fsr) * fatal error. */ - pglldbg("FSR: %08x FAR: %08x\n", fsr, far); - if ((fsr & FSR_MASK) != FSR_PAGE) + pglldbg("DFSR: %08x DFAR: %08x\n", dfsr, dfar); + if ((dfsr & FSR_MASK) != FSR_PAGE) { goto segfault; } /* Check the (virtual) address of data that caused the data abort. When - * the exception occurred, this address was provided in the FAR register. + * the exception occurred, this address was provided in the DFAR register. * (It has not yet been saved in the register context save area). */ pgllvdbg("VBASE: %08x VEND: %08x\n", PG_PAGED_VBASE, PG_PAGED_VEND); - if (far < PG_PAGED_VBASE || far >= PG_PAGED_VEND) + if (dfar < PG_PAGED_VBASE || dfar >= PG_PAGED_VEND) { goto segfault; } @@ -152,7 +150,7 @@ void arm_dataabort(uint32_t *regs, uint32_t far, uint32_t fsr) * prefetch and data aborts. */ - tcb->xcp.far = regs[REG_R15]; + tcb->xcp.dfar = regs[REG_R15]; /* Call pg_miss() to schedule the page fill. A consequences of this * call are: @@ -177,14 +175,14 @@ void arm_dataabort(uint32_t *regs, uint32_t far, uint32_t fsr) return; segfault: -#endif - lldbg("Data abort. PC: %08x FAR: %08x FSR: %08x\n", regs[REG_PC], far, fsr); + lldbg("Data abort. PC: %08x DFAR: %08x DFSR: %08x\n", + regs[REG_PC], dfar, dfsr); PANIC(); } #else /* CONFIG_PAGING */ -void arm_dataabort(uint32_t *regs) +void arm_dataabort(uint32_t *regs, uint32_t dfar, uint32_t dfsr) { /* Save the saved processor context in current_regs where it can be accessed * for register dumps and possibly context switching. @@ -192,9 +190,10 @@ void arm_dataabort(uint32_t *regs) current_regs = regs; - /* Crash -- possibly showing diagnost debug information. */ + /* Crash -- possibly showing diagnostic debug information. */ - lldbg("Data abort. PC: %08x\n", regs[REG_PC]); + lldbg("Data abort. PC: %08x DFAR: %08x DFSR: %08x\n", + regs[REG_PC], dfar, dfsr); PANIC(); } diff --git a/arch/arm/src/armv7-a/arm_head.S b/arch/arm/src/armv7-a/arm_head.S index 71d61c556b..fdd139b360 100644 --- a/arch/arm/src/armv7-a/arm_head.S +++ b/arch/arm/src/armv7-a/arm_head.S @@ -230,7 +230,7 @@ __start: /* Make sure that we are in SVC mode with all IRQs disabled */ - mov r0, #(PSR_MODE_SUPER | PSR_I_BIT | PSR_F_BIT) + mov r0, #(PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT) msr cpsr_c, r0 /* Initialize DRAM using a macro provided by board-specific logic. @@ -341,15 +341,49 @@ __start: * r4 = Address of the base of the L1 table */ + /* Invalidate caches and TLBs. + * + * NOTE: "The ARMv7 Virtual Memory System Architecture (VMSA) does not + * support a CP15 operation to invalidate the entire data cache. ... + * In normal usage the only time the entire data cache has to be + * invalidated is on reset." + * + * REVISIT: This could be an issue if NuttX is every started in a + * context where the DCache could be dirty. + */ + mov r0, #0 - mcr p15, 0, r0, c7, c7 /* Invalidate I,D caches */ - mcr p15, 0, r0, c7, c10, 4 /* Drain write buffer */ - mcr p15, 0, r0, c8, c7 /* Invalidate I,D TLBs */ - mcr p15, 0, r4, c2, c0 /* Load page table pointer */ + mcr CP15_ICIALLUIS(r0) /* Invalidate entire instruction cache Inner Shareable */ + + /* Load the page table address. + * + * NOTES: + * - Here we assume that the page table address is aligned to at least + * least a 16KB boundary (bits 0-13 are zero). No masking is provided + * to protect against an unaligned page table address. + * - The Cortex-A5 has two page table address registers, TTBR0 and 1. + * Only TTBR0 is used in this implementation but both are initialized. + * + * Here we expect to have: + * r0 = Zero + * r4 = Address of the base of the L1 table + */ + + mcr CP15_TTBR0(r4) + mcr CP15_TTBR1(r4) + + /* Clear the TTB control register (TTBCR) to indicate that we are using + * TTBR0. r0 still holds the value of zero. + */ + + mcr CP15_TTBCR(r0) + + /* Enable DCache write-through if so configured. + * + * The Cortex-A5 MPCore data cache only supports a write-back policy. + */ #ifdef CPU_DCACHE_WRITETHROUGH - mov r0, #4 /* Disable write-back on caches explicitly */ - mcr p15, 7, r0, c15, c0, 0 #endif /* Enable the MMU and caches diff --git a/arch/arm/src/armv7-a/arm_initialstate.c b/arch/arm/src/armv7-a/arm_initialstate.c index 110faf3057..f9d7f234c2 100644 --- a/arch/arm/src/armv7-a/arm_initialstate.c +++ b/arch/arm/src/armv7-a/arm_initialstate.c @@ -119,20 +119,20 @@ void up_initial_state(struct tcb_s *tcb) { /* It is a kernel thread.. set supervisor mode */ - cpsr = PSR_MODE_SUPER | PSR_F_BIT; + cpsr = PSR_MODE_SVC | PSR_F_BIT; } else { /* It is a normal task or a pthread. Set user mode */ - cpsr = PSR_MODE_USER | PSR_F_BIT; + cpsr = PSR_MODE_USR | PSR_F_BIT; } #else /* If the kernel build is not selected, then all threads run in * supervisor-mode. */ - cpsr = PSR_MODE_SUPER | PSR_F_BIT; + cpsr = PSR_MODE_SVC | PSR_F_BIT; #endif /* Enable or disable interrupts, based on user configuration */ diff --git a/arch/arm/src/armv7-a/arm_prefetchabort.c b/arch/arm/src/armv7-a/arm_prefetchabort.c index c98bc46ec8..3f5c55c70e 100644 --- a/arch/arm/src/armv7-a/arm_prefetchabort.c +++ b/arch/arm/src/armv7-a/arm_prefetchabort.c @@ -87,9 +87,10 @@ * ****************************************************************************/ -void arm_prefetchabort(uint32_t *regs) -{ #ifdef CONFIG_PAGING + +void arm_prefetchabort(uint32_t *regs, uint32_t ifar, uint32_t ifsr) +{ uint32_t *savestate; /* Save the saved processor context in current_regs where it can be accessed @@ -97,10 +98,8 @@ void arm_prefetchabort(uint32_t *regs) */ savestate = (uint32_t*)current_regs; -#endif current_regs = regs; -#ifdef CONFIG_PAGING /* Get the (virtual) address of instruction that caused the prefetch abort. * When the exception occurred, this address was provided in the lr register * and this value was saved in the context save area as the PC at the @@ -146,9 +145,28 @@ void arm_prefetchabort(uint32_t *regs) current_regs = savestate; } else -#endif { - lldbg("Prefetch abort. PC: %08x\n", regs[REG_PC]); + lldbg("Prefetch abort. PC: %08x IFAR: %08x IFSR: %08x\n", + regs[REG_PC], ifar, ifsr); PANIC(); } } + +#else /* CONFIG_PAGING */ + +void arm_prefetchabort(uint32_t *regs, uint32_t ifar, uint32_t ifsr) +{ + /* Save the saved processor context in current_regs where it can be accessed + * for register dumps and possibly context switching. + */ + + current_regs = regs; + + /* Crash -- possibly showing diagnostic debug information. */ + + lldbg("Prefetch abort. PC: %08x IFAR: %08x IFSR: %08x\n", + regs[REG_PC], ifar, ifsr); + PANIC(); +} + +#endif /* CONFIG_PAGING */ diff --git a/arch/arm/src/armv7-a/arm_schedulesigaction.c b/arch/arm/src/armv7-a/arm_schedulesigaction.c index 6516af7913..119d50e0ba 100644 --- a/arch/arm/src/armv7-a/arm_schedulesigaction.c +++ b/arch/arm/src/armv7-a/arm_schedulesigaction.c @@ -162,7 +162,7 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) */ current_regs[REG_PC] = (uint32_t)up_sigdeliver; - current_regs[REG_CPSR] = PSR_MODE_SUPER | PSR_I_BIT | PSR_F_BIT; + current_regs[REG_CPSR] = PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT; /* And make sure that the saved context in the TCB * is the same as the interrupt return context. @@ -194,7 +194,7 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) */ tcb->xcp.regs[REG_PC] = (uint32_t)up_sigdeliver; - tcb->xcp.regs[REG_CPSR] = PSR_MODE_SUPER | PSR_I_BIT | PSR_F_BIT; + tcb->xcp.regs[REG_CPSR] = PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT; } irqrestore(flags); diff --git a/arch/arm/src/armv7-a/arm_vectors.S b/arch/arm/src/armv7-a/arm_vectors.S index 6ba12cd88e..3960133062 100644 --- a/arch/arm/src/armv7-a/arm_vectors.S +++ b/arch/arm/src/armv7-a/arm_vectors.S @@ -41,6 +41,7 @@ #include #include "arm.h" +#include "cp15.h" /************************************************************************************ * Definitions @@ -87,6 +88,7 @@ g_aborttmp: .globl arm_vectorirq .type arm_vectorirq, %function + arm_vectorirq: /* On entry, we are in IRQ mode. We are free to use * the IRQ mode r13 and r14. @@ -101,7 +103,7 @@ arm_vectorirq: /* Then switch back to SVC mode */ bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */ - orr lr, lr, #(PSR_MODE_SUPER | PSR_I_BIT) + orr lr, lr, #(PSR_MODE_SVC | PSR_I_BIT) msr cpsr_c, lr /* Switch to SVC mode */ /* Create a context structure. First set aside a stack frame @@ -163,6 +165,7 @@ arm_vectorirq: .globl arm_vectorswi .type arm_vectorswi, %function + arm_vectorswi: /* Create a context structure. First set aside a stack frame @@ -213,6 +216,7 @@ arm_vectorswi: .globl arm_vectordata .type arm_vectordata, %function + arm_vectordata: /* On entry we are free to use the ABORT mode registers * r13 and r14 @@ -227,7 +231,7 @@ arm_vectordata: /* Then switch back to SVC mode */ bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */ - orr lr, lr, #(PSR_MODE_SUPER | PSR_I_BIT) + orr lr, lr, #(PSR_MODE_SVC | PSR_I_BIT) msr cpsr_c, lr /* Switch to SVC mode */ /* Create a context structure. First set aside a stack frame @@ -256,10 +260,8 @@ arm_vectordata: mov fp, #0 /* Init frame pointer */ mov r0, sp /* Get r0=xcp */ -#ifdef CONFIG_PAGING - mrc p15, 0, r2, c5, c0, 0 /* Get r2=FSR */ - mrc p15, 0, r1, c6, c0, 0 /* Get R1=FAR */ -#endif + mrc CP15_DFAR(r1) /* Get R1=DFAR */ + mrc CP15_DFSR(r2) /* Get r2=DFSR */ bl arm_dataabort /* Call the handler */ /* Restore the CPSR, SVC modr registers and return */ @@ -287,6 +289,7 @@ arm_vectordata: .globl arm_vectorprefetch .type arm_vectorprefetch, %function + arm_vectorprefetch: /* On entry we are free to use the ABORT mode registers * r13 and r14 @@ -301,7 +304,7 @@ arm_vectorprefetch: /* Then switch back to SVC mode */ bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */ - orr lr, lr, #(PSR_MODE_SUPER | PSR_I_BIT) + orr lr, lr, #(PSR_MODE_SVC | PSR_I_BIT) msr cpsr_c, lr /* Switch to SVC mode */ /* Create a context structure. First set aside a stack frame @@ -330,6 +333,8 @@ arm_vectorprefetch: mov fp, #0 /* Init frame pointer */ mov r0, sp /* Get r0=xcp */ + mrc CP15_IFAR(r1) /* Get R1=IFAR */ + mrc CP15_IFSR(r2) /* Get r2=IFSR */ bl arm_prefetchabort /* Call the handler */ /* Restore the CPSR, SVC modr registers and return */ @@ -355,6 +360,7 @@ arm_vectorprefetch: .globl arm_vectorundefinsn .type arm_vectorundefinsn, %function + arm_vectorundefinsn: /* On entry we are free to use the UND mode registers * r13 and r14 @@ -368,7 +374,7 @@ arm_vectorundefinsn: /* Then switch back to SVC mode */ bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */ - orr lr, lr, #(PSR_MODE_SUPER | PSR_I_BIT) + orr lr, lr, #(PSR_MODE_SVC | PSR_I_BIT) msr cpsr_c, lr /* Switch to SVC mode */ /* Create a context structure. First set aside a stack frame @@ -421,6 +427,7 @@ arm_vectorundefinsn: .globl arm_vectorfiq .type arm_vectorfiq, %function + arm_vectorfiq: subs pc, lr, #4 .size arm_vectorfiq, . - arm_vectorfiq diff --git a/arch/arm/src/armv7-a/cache.h b/arch/arm/src/armv7-a/cache.h index 74aaebccb1..b5a1210018 100644 --- a/arch/arm/src/armv7-a/cache.h +++ b/arch/arm/src/armv7-a/cache.h @@ -11,6 +11,12 @@ * "ARM® Architecture Reference Manual, ARMv7-A and ARMv7-R edition", Copyright © * 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM DDI 0406C.b (ID072512) * + * Portions of this file derive from Atmel sample code for the SAMA5D3 Cortex-A5 + * which also has a modified BSD-style license: + * + * Copyright (c) 2012, Atmel Corporation + * All rights reserved. + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: @@ -21,8 +27,8 @@ * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. - * 3. Neither the name NuttX nor the names of its contributors may be - * used to endorse or promote products derived from this software + * 3. Neither the name NuttX nor Atmel nor the names of the contributors may + * be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS @@ -40,8 +46,8 @@ * ************************************************************************************/ -#ifndef __ARCH_ARM_SRC_ARMV7_A_CPSR_H -#define __ARCH_ARM_SRC_ARMV7_A_CPSR_H +#ifndef __ARCH_ARM_SRC_ARMV7_A_CACHE_H +#define __ARCH_ARM_SRC_ARMV7_A_CACHE_H /************************************************************************************ * Included Files @@ -50,7 +56,12 @@ /************************************************************************************ * Pre-processor Definitions ************************************************************************************/ +/* Cache definitions ****************************************************************/ +/* L1 Memory */ +#define CP15_L1_LINESIZE 32 + +/* CP15 Registers *******************************************************************/ /* Reference: Cortex-A5™ MPCore Paragraph 4.1.5, "Cache Operations Registers." * * Terms: @@ -66,16 +77,16 @@ * * CP15 Register: ICIALLUIS * Description: Invalidate entire instruction cache Inner Shareable. - * Register Format: SBZ + * Register Format: Should be zero (SBZ) * Instruction: MCR p15, 0, , c7, c1, 0 * CP15 Register: BPIALLIS * Description: Invalidate entire branch predictor array Inner Shareable. - * Register Format: SBZ + * Register Format: Should be zero (SBZ) * Instruction: MCR p15, 0, , c7, c1, 6 * CP15 Register: ICIALLU * Description: Invalidate all instruction caches to PoU. Also flushes branch * target cache. - * Register Format: SBZ + * Register Format: Should be zero (SBZ) * Instruction: MCR p15, 0, , c7, c5, 0 * CP15 Register: ICIMVAU * Description: Invalidate instruction cache by VA to PoU. @@ -83,11 +94,11 @@ * Instruction: MCR p15, 0, , c7, c5, 1 * CP15 Register: BPIALL * Description: Invalidate entire branch predictor array. - * Register Format: SBZ + * Register Format: Should be zero (SBZ) * Instruction: MCR p15, 0, , c7, c5, 6 * CP15 Register: BPIMVA * Description: Invalidate VA from branch predictor array. - * Register Format: SBZ + * Register Format: Should be zero (SBZ) * Instruction: MCR p15, 0, , c7, c5, 7 * CP15 Register: DCIMVAC * Description: Invalidate data cache line by VA to PoC. @@ -142,7 +153,7 @@ /* VA and SBZ format */ -#define CACHE_SBZ_SHIFT (4) /* Bits 0-4: SBZ */ +#define CACHE_SBZ_SHIFT (4) /* Bits 0-4: Should be zero (SBZ) */ #define CACHE_SBZ_MASK (31 << TLB_SBZ_SHIFT) #define CACHE_VA_MASK (0xfffffffe0) /* Bits 5-31: Virtual address */ @@ -152,95 +163,243 @@ #ifdef __ASSEMBLY__ -/* Invalidate I cache predictor array inner sharable */ +/************************************************************************************ + * Name: cp15_invalidate_icache_inner_sharable + * + * Description: + * Invalidate I cache predictor array inner sharable + * + * Input Parameters: + * None + * + * Returned Value: + * None + * + ************************************************************************************/ - .macro cp15_invalidate_icache_inner_sharable, scratch - mov \scratch, #0 - mrc p15, 0, \scratch, c7, c1, 0 + .macro cp15_invalidate_icache_inner_sharable, tmp + mov \tmp, #0 + mrc p15, 0, \tmp, c7, c1, 0 .endm -/* Invalidate entire branch predictor array inner sharable */ +/************************************************************************************ + * Name: cp15_invalidate_btb_inner_sharable + * + * Description: + * Invalidate entire branch predictor array inner sharable + * + * Input Parameters: + * None + * + * Returned Value: + * None + * + ************************************************************************************/ - .macro cp15_invalidate_btb_inner_sharable, scratch - mov \scratch, #0 - mrc p15, 0, \scratch, c7, c1, 6 + .macro cp15_invalidate_btb_inner_sharable, tmp + mov \tmp, #0 + mrc p15, 0, \tmp, c7, c1, 6 .endm -/* Invalidate all instruction caches to PoU, also flushes branch target cache */ +/************************************************************************************ + * Name: cp15_invalidate_icache + * + * Description: + * Invalidate all instruction caches to PoU, also flushes branch target cache + * + * Input Parameters: + * None + * + * Returned Value: + * None + * + ************************************************************************************/ - .macro cp15_invalidate_icache, scratch - mov \scratch, #0 - mrc p15, 0, \scratch, c7, c5, 0 + .macro cp15_invalidate_icache, tmp + mov \tmp, #0 + mrc p15, 0, \tmp, c7, c5, 0 .endm -/* Invalidate instruction caches by VA to PoU */ +/************************************************************************************ + * Name: cp15_invalidate_icache_bymva + * + * Description: + * Invalidate instruction caches by VA to PoU + * + * Input Parameters: + * va - Register with VA format + * + * Returned Value: + * None + * + ************************************************************************************/ - .macro cp15_invalidate_icache_bymva, scratch - mov \scratch, #0 - mrc p15, 0, \scratch, c7, c5, 1 + .macro cp15_invalidate_icache_bymva, va + mrc p15, 0, \va, c7, c5, 1 .endm -/* Flush entire branch predictor array */ +/************************************************************************************ + * Name: cp15_flush_btb + * + * Description: + * Invalidate entire branch predictor array + * + * Input Parameters: + * None + * + * Returned Value: + * None + * + ************************************************************************************/ - .macro cp15_flush_btb, scratch - mov \scratch, #0 - mrc p15, 0, \scratch, c7, c5, 6 + .macro cp15_flush_btb, tmp + mov \tmp, #0 + mrc p15, 0, \tmp, c7, c5, 6 .endm -/* Flush branch predictor array entry by MVA */ +/************************************************************************************ + * Name: cp15_flush_btb_bymva + * + * Description: + * Invalidate branch predictor array entry by MVA + * + * Input Parameters: + * None + * + * Returned Value: + * None + * + ************************************************************************************/ - .macro cp15_flush_btb_bymva, scratch - mov \scratch, #0 - mrc p15, 0, \scratch, c7, c5, 7 + .macro cp15_flush_btb_bymva, tmp + mov \tmp, #0 + mrc p15, 0, \tmp, c7, c5, 7 .endm -/* Invalidate data cache line by VA to PoC */ +/************************************************************************************ + * Name: cp15_invalidate_dcacheline_bymva + * + * Description: + * Invalidate data cache line by VA to PoC + * + * Input Parameters: + * va - Register with VA format + * + * Returned Value: + * None + * + ************************************************************************************/ - .macro cp15_invalidate_dcacheline_bymva, scratch - mov \scratch, #0 - mrc p15, 0, \scratch, c7, c6, 1 + .macro cp15_invalidate_dcacheline_bymva, va + mrc p15, 0, \va, c7, c6, 1 .endm -/* Invalidate data cache line by set/way */ +/************************************************************************************ + * Name: cp15_invalidate_dcacheline_bysetway + * + * Description: + * Invalidate data cache line by set/way + * + * Input Parameters: + * setway - Register with Set/Way format + * + * Returned Value: + * None + * + ************************************************************************************/ - .macro cp15_invalidate_dcacheline_bysetway, scratch - mov \scratch, #0 - mrc p15, 0, \scratch, c7, c6, 2 + .macro cp15_invalidate_dcacheline_bysetway, setway + mrc p15, 0, \setway, c7, c6, 2 .endm -/* Clean data cache line by MVA */ +/************************************************************************************ + * Name: cp15_clean_dcache_bymva + * + * Description: + * Clean data cache line by MVA + * + * Input Parameters: + * va - Register with VA format + * + * Returned Value: + * None + * + ************************************************************************************/ - .macro cp15_clean_dcache_bymva, scratch - mov \scratch, #0 - mrc p15, 0, \scratch, c7, c10, 1 + .macro cp15_clean_dcache_bymva, va + mrc p15, 0, \va, c7, c10, 1 .endm -/* Clean data cache line by Set/way */ +/************************************************************************************ + * Name: cp15_clean_dcache_bysetway + * + * Description: + * Clean data cache line by Set/way + * + * Input Parameters: + * setway - Register with Set/Way format + * + * Returned Value: + * None + * + ************************************************************************************/ - .macro cp15_clean_dcache_bysetway, scratch - mov \scratch, #0 - mrc p15, 0, \scratch, c7, c10, 2 + .macro cp15_clean_dcache_bysetway, setway + mrc p15, 0, \setway, c7, c10, 2 .endm -/* Clean unified cache line by MVA */ +/************************************************************************************ + * Name: cp15_clean_dcache_bymva + * + * Description: + * Clean unified cache line by MVA + * + * Input Parameters: + * setway - Register with VA format + * + * Returned Value: + * None + * + ************************************************************************************/ - .macro cp15_clean_dcache_bymva, scratch - mov \scratch, #0 - mrc p15, 0, \scratch, c7, c11, 1 + .macro cp15_clean_dcache_bymva, setway + mrc p15, 0, \setway, c7, c11, 1 .endm -/* Clean and invalidate data cache line by VA to PoC */ +/************************************************************************************ + * Name: cp15_cleaninvalidate_dcacheline_bymva + * + * Description: + * Clean and invalidate data cache line by VA to PoC + * + * Input Parameters: + * va - Register with VA format + * + * Returned Value: + * None + * + ************************************************************************************/ - .macro cp15_cleaninvalidate_dcacheline_bymva, scratch - mov \scratch, #0 - mrc p15, 0, \scratch, c7, c14, 1 + .macro cp15_cleaninvalidate_dcacheline_bymva, va + mrc p15, 0, \va, c7, c14, 1 .endm -/* Clean and Incalidate data cache line by Set/Way */ +/************************************************************************************ + * Name: cp15_cleaninvalidate_dcacheline + * + * Description: + * Clean and Incalidate data cache line by Set/Way + * + * Input Parameters: + * setway - Register with Set/Way format + * + * Returned Value: + * None + * + ************************************************************************************/ - .macro cp15_cleaninvalidate_dcacheline, scratch - mov \scratch, #0 - mrc p15, 0, \scratch, c7, c14, 2 + .macro cp15_cleaninvalidate_dcacheline, setway + mrc p15, 0, \setway, c7, c14, 2 .endm #endif /* __ASSEMBLY__ */ @@ -251,7 +410,19 @@ #ifndef __ASSEMBLY__ -/* Invalidate I cache predictor array inner sharable */ +/************************************************************************************ + * Name: cp15_invalidate_icache_inner_sharable + * + * Description: + * Invalidate I cache predictor array inner sharable + * + * Input Parameters: + * None + * + * Returned Value: + * None + * + ************************************************************************************/ static inline void cp15_invalidate_icache_inner_sharable(void) { @@ -260,11 +431,24 @@ static inline void cp15_invalidate_icache_inner_sharable(void) "\tmov r0, #0\n" "\tmcr p15, 0, r0, c7, c1, 0\n" : - : "r" (ttb) + : : "r0", "memory" ); +} -/* Invalidate entire branch predictor array inner sharable */ +/************************************************************************************ + * Name: cp15_invalidate_btb_inner_sharable + * + * Description: + * Invalidate entire branch predictor array inner sharable + * + * Input Parameters: + * None + * + * Returned Value: + * None + * + ************************************************************************************/ static inline void cp15_invalidate_btb_inner_sharable(void) { @@ -273,11 +457,24 @@ static inline void cp15_invalidate_btb_inner_sharable(void) "\tmov r0, #0\n" "\tmcr p15, 0, r0, c7, c1, 6\n" : - : "r" (ttb) + : : "r0", "memory" ); +} -/* Invalidate all instruction caches to PoU, also flushes branch target cache */ +/************************************************************************************ + * Name: cp15_invalidate_icache + * + * Description: + * Invalidate all instruction caches to PoU, also flushes branch target cache + * + * Input Parameters: + * None + * + * Returned Value: + * None + * + ************************************************************************************/ static inline void cp15_invalidate_icache(void) { @@ -286,24 +483,49 @@ static inline void cp15_invalidate_icache(void) "\tmov r0, #0\n" "\tmcr p15, 0, r0, c7, c5, 0\n" : - : "r" (ttb) + : : "r0", "memory" ); +} -/* Invalidate instruction caches by VA to PoU */ +/************************************************************************************ + * Name: cp15_invalidate_icache_bymva + * + * Description: + * Invalidate instruction caches by VA to PoU + * + * Input Parameters: + * va - 32-bit value with VA format + * + * Returned Value: + * None + * + ************************************************************************************/ -static inline void cp15_invalidate_icache_bymva(void) +static inline void cp15_invalidate_icache_bymva(unsigned int va) { __asm__ __volatile__ ( - "\tmov r0, #0\n" - "\tmcr p15, 0, r0, c7, c5, 1\n" + "\tmcr p15, 0, %0, c7, c5, 1\n" : - : "r" (ttb) - : "r0", "memory" + : "r" (va) + : "memory" ); +} -/* Flush entire branch predictor array */ +/************************************************************************************ + * Name: cp15_flush_btb + * + * Description: + * Invalidate entire branch predictor array + * + * Input Parameters: + * None + * + * Returned Value: + * None + * + ************************************************************************************/ static inline void cp15_flush_btb(void) { @@ -312,11 +534,24 @@ static inline void cp15_flush_btb(void) "\tmov r0, #0\n" "\tmcr p15, 0, r0, c7, c5, 6\n" : - : "r" (ttb) + : : "r0", "memory" ); +} -/* Flush branch predictor array entry by MVA */ +/************************************************************************************ + * Name: cp15_flush_btb_bymva + * + * Description: + * Invalidate branch predictor array entry by MVA + * + * Input Parameters: + * None + * + * Returned Value: + * None + * + ************************************************************************************/ static inline void cp15_flush_btb_bymva(void) { @@ -325,100 +560,191 @@ static inline void cp15_flush_btb_bymva(void) "\tmov r0, #0\n" "\tmcr p15, 0, r0, c7, c5, 7\n" : - : "r" (ttb) + : : "r0", "memory" ); +} + +/************************************************************************************ + * Name: cp15_invalidate_dcacheline_bymva + * + * Description: + * Invalidate data cache line by VA to PoC + * + * Input Parameters: + * va - 32-bit value with VA format + * + * Returned Value: + * None + * + ************************************************************************************/ /* Invalidate data cache line by VA to PoC */ -static inline void cp15_invalidate_dcacheline_bymva(void) +static inline void cp15_invalidate_dcacheline_bymva(unsigned int va) { __asm__ __volatile__ ( - "\tmov r0, #0\n" - "\tmcr p15, 0, r0, c7, c6, 1\n" + "\tmcr p15, 0, %0, c7, c6, 1\n" : - : "r" (ttb) - : "r0", "memory" + : "r" (va) + : "memory" ); +} + +/************************************************************************************ + * Name: cp15_invalidate_dcacheline_bysetway + * + * Description: + * Invalidate data cache line by set/way + * + * Input Parameters: + * setway - 32-bit value with Set/Way format + * + * Returned Value: + * None + * + ************************************************************************************/ /* Invalidate data cache line by set/way */ -static inline void cp15_invalidate_dcacheline_bysetway(void) +static inline void cp15_invalidate_dcacheline_bysetway(unsigned int setway) { __asm__ __volatile__ ( - "\tmov r0, #0\n" - "\tmcr p15, 0, r0, c7, c6, 2\n" + "\tmcr p15, 0, %0, c7, c6, 2\n" : - : "r" (ttb) - : "r0", "memory" + : "r" (setway) + : "memory" ); +} + +/************************************************************************************ + * Name: cp15_clean_dcache_bymva + * + * Description: + * Clean data cache line by MVA + * + * Input Parameters: + * va - 32-bit value with VA format + * + * Returned Value: + * None + * + ************************************************************************************/ /* Clean data cache line by MVA */ -static inline void cp15_clean_dcache_bymva(void) +static inline void cp15_clean_dcache_bymva(unsigned int va) { __asm__ __volatile__ ( - "\tmov r0, #0\n" - "\tmcr p15, 0, r0, c7, c10, 1\n" + "\tmcr p15, 0, %0, c7, c10, 1\n" : - : "r" (ttb) - : "r0", "memory" + : "r" (va) + : "memory" ); +} -/* Clean data cache line by Set/way */ +/************************************************************************************ + * Name: cp15_clean_dcache_bysetway + * + * Description: + * Clean data cache line by Set/way + * + * Input Parameters: + * setway - 32-bit value with Set/Way format + * + * Returned Value: + * None + * + ************************************************************************************/ -static inline void cp15_clean_dcache_bysetway(void) +static inline void cp15_clean_dcache_bysetway(unsigned int setway) { __asm__ __volatile__ ( - "\tmov r0, #0\n" - "\tmcr p15, 0, r0, c7, c10, 2\n" + "\tmcr p15, 0, %0, c7, c10, 2\n" : - : "r" (ttb) - : "r0", "memory" + : "r" (setway) + : "memory" ); +} -/* Clean unified cache line by MVA */ +/************************************************************************************ + * Name: cp15_clean_dcache_bymva + * + * Description: + * Clean unified cache line by MVA + * + * Input Parameters: + * setway - 32-bit value with VA format + * + * Returned Value: + * None + * + ************************************************************************************/ -static inline void cp15_clean_dcache_bymva(void) +static inline void cp15_clean_dcache_bymva(unsigned int setway) { __asm__ __volatile__ ( - "\tmov r0, #0\n" - "\tmcr p15, 0, r0, c7, c11, 1\n" + "\tmcr p15, 0, %0, c7, c11, 1\n" : - : "r" (ttb) - : "r0", "memory" + : "r" (setway) + : "memory" ); +} -/* Clean and invalidate data cache line by VA to PoC */ +/************************************************************************************ + * Name: cp15_cleaninvalidate_dcacheline_bymva + * + * Description: + * Clean and invalidate data cache line by VA to PoC + * + * Input Parameters: + * va - 32-bit value with VA format + * + * Returned Value: + * None + * + ************************************************************************************/ -static inline void cp15_cleaninvalidate_dcacheline_bymva(void) +static inline void cp15_cleaninvalidate_dcacheline_bymva(unsigned int va) { __asm__ __volatile__ ( - "\tmov r0, #0\n" "\tmcr p15, 0, r0, c7, c14, 1\n" : - : "r" (ttb) - : "r0", "memory" + : "r" (va) + : "memory" ); +} -/* Clean and Incalidate data cache line by Set/Way */ +/************************************************************************************ + * Name: cp15_cleaninvalidate_dcacheline + * + * Description: + * Clean and Incalidate data cache line by Set/Way + * + * Input Parameters: + * setway - 32-bit value with Set/Way format + * + * Returned Value: + * None + * + ************************************************************************************/ -static inline void cp15_cleaninvalidate_dcacheline(void) +static inline void cp15_cleaninvalidate_dcacheline(unsigned int setway) { __asm__ __volatile__ ( - "\tmov r0, #0\n" - "\tmcr p15, 0, r0, c7, c14, 2\n" + "\tmcr p15, 0, %0, c7, c14, 2\n" : - : "r" (ttb) - : "r0", "memory" + : "r" (setway) + : "memory" ); +} #endif /* __ASSEMBLY__ */ @@ -426,10 +752,6 @@ static inline void cp15_cleaninvalidate_dcacheline(void) * Public Variables ****************************************************************************/ -/**************************************************************************** - * Public Function Prototypes - ****************************************************************************/ - #ifndef __ASSEMBLY__ #ifdef __cplusplus #define EXTERN extern "C" @@ -438,10 +760,86 @@ extern "C" { #define EXTERN extern #endif +/**************************************************************************** + * Public Function Prototypes + ****************************************************************************/ + +/**************************************************************************** + * Name: cp15_coherent_dcache_for_dma + * + * Description: + * Ensure that the I and D caches are coherent within specified region. + * This is typically used when code has been written to a memory region, + * and will be executed. + * + * Input Parameters: + * start - virtual start address of region + * end - virtual end address of region + * + * Returned Value: + * None + * + ****************************************************************************/ + +void cp15_coherent_dcache_for_dma(uintptr_t start, uintptr_t end); + +/**************************************************************************** + * Name: cp15_invalidate_dcache_for_dma + * + * Description: + * Invalidate the data cache within the specified region; we will be + * performing a DMA operation in this region and we want to purge old data + * in the cache. + * + * Input Parameters: + * start - virtual start address of region + * end - virtual end address of region + * + * Returned Value: + * None + * + ****************************************************************************/ + +void cp15_invalidate_dcache_for_dma(uintptr_t start, uintptr_t end); + +/**************************************************************************** + * Name: cp15_clean_dcache_for_dma + * + * Description: + * Clean the data cache within the specified region + * + * Input Parameters: + * start - virtual start address of region + * end - virtual end address of region + * + * Returned Value: + * None + * + ****************************************************************************/ + +void cp15_clean_dcache_for_dma(uintptr_t start, uintptr_t end); + +/**************************************************************************** + * Name: cp15_flush_dcache_for_dma + * + * Description: + * Flush the data cache within the specified region + * + * Input Parameters: + * start - virtual start address of region + * end - virtual end address of region + * + * Returned Value: + * None + * + ****************************************************************************/ + +void cp15_flush_dcache_for_dma(uintptr_t start, uintptr_t end); + #undef EXTERN #ifdef __cplusplus } #endif #endif /* __ASSEMBLY__ */ -#endif /* __ARCH_ARM_SRC_ARMV7_A_CPSR_H */ +#endif /* __ARCH_ARM_SRC_ARMV7_A_CACHE_H */ diff --git a/arch/arm/src/armv7-a/mmu.h b/arch/arm/src/armv7-a/mmu.h index 47e0d7cd75..25d19016c3 100644 --- a/arch/arm/src/armv7-a/mmu.h +++ b/arch/arm/src/armv7-a/mmu.h @@ -238,11 +238,11 @@ nop .endm -/* The ARMv7-aA architecture supports two translation tables. This +/* The ARMv7-aA architecture supports two translation tables. This * implementation, however, uses only translation table 0. This - * functions clears the TTB control register (TTBCR), indicating that - * we are using TTB 0. This is it writes the value of the page table - * to Translation Table Base Register 0 (TTBR0). + * macro writes the address of the page table to the Translation + * Table Base Register 0 (TTBR0) . Then it clears the TTB control + * register (TTBCR), indicating that we are using TTB 0it. */ .macro cp14_wrttb, ttb, scratch @@ -288,11 +288,11 @@ static inline void cp15_wrdacr(unsigned int dacr) ); } -/* The ARMv7-aA architecture supports two translation tables. This +/* The ARMv7-aA architecture supports two translation tables. This * implementation, however, uses only translation table 0. This - * functions clears the TTB control register (TTBCR), indicating that - * we are using TTB 0. This is it writes the value of the page table - * to Translation Table Base Register 0 (TTBR0). + * function writes the address of the page table to the Translation + * Table Base Register 0 (TTBR0). Then it clears the TTB control + * register (TTBCR), indicating that we are using TTBR0. */ static inline void cp14_wrttb(unsigned int ttb) diff --git a/arch/arm/src/lpc31xx/lpc31_memorymap.h b/arch/arm/src/lpc31xx/lpc31_memorymap.h index 797ad60200..6337f9e83f 100644 --- a/arch/arm/src/lpc31xx/lpc31_memorymap.h +++ b/arch/arm/src/lpc31xx/lpc31_memorymap.h @@ -154,7 +154,7 @@ /* Sizes of sections/regions. The boot logic in lpc31_boot.c, will select * 1Mb level 1 MMU mappings to span the entire physical address space. - * The definitiions below specify the number of 1Mb entries that are + * The definitions below specify the number of 1Mb entries that are * required to span a particular address region. */ diff --git a/arch/arm/src/sama5/Make.defs b/arch/arm/src/sama5/Make.defs index 7d6390a26f..a9657c3f55 100644 --- a/arch/arm/src/sama5/Make.defs +++ b/arch/arm/src/sama5/Make.defs @@ -35,8 +35,9 @@ HEAD_ASRC = arm_head.S -CMN_ASRCS = arm_vectors.S arm_vectortab.S arm_fullcontextrestore.S -CMN_ASRCS += arm_saveusercontext.S arm_vectoraddrexcptn.S arm_vfork.S +CMN_ASRCS = arm_vectors.S arm_vectortab.S arm_cache.S +CMN_ASRCS += arm_fullcontextrestore.S arm_saveusercontext.S +CMN_ASRCS += arm_vectoraddrexcptn.S arm_vfork.S CMN_CSRCS = up_initialize.c up_idle.c up_interruptcontext.c up_exit.c CMN_CSRCS += up_createstack.c up_releasestack.c up_usestack.c up_vfork.c diff --git a/arch/arm/src/sama5/chip/sama5d3x_memorymap.h b/arch/arm/src/sama5/chip/sama5d3x_memorymap.h index d55ba3110d..70bc3c28f1 100644 --- a/arch/arm/src/sama5/chip/sama5d3x_memorymap.h +++ b/arch/arm/src/sama5/chip/sama5d3x_memorymap.h @@ -173,7 +173,7 @@ /* Sizes of sections/regions. The boot logic in sam_boot.c, will select * 1Mb level 1 MMU mappings to span the entire physical address space. - * The definitiions below specify the number of 1Mb entries that are + * The definitions below specify the number of 1Mb entries that are * required to span a particular address region. */