diff --git a/arch/arm/src/armv7-a/arm_cpustart.c b/arch/arm/src/armv7-a/arm_cpustart.c index 0cb0981f46..00addb818d 100644 --- a/arch/arm/src/armv7-a/arm_cpustart.c +++ b/arch/arm/src/armv7-a/arm_cpustart.c @@ -73,7 +73,7 @@ int arm_start_handler(int irq, FAR void *context) { - FAR struct tcb_s *tcb = this_task(); + FAR struct tcb_s *tcb; /* Invalidate CPUn L1 so that is will be reloaded from coherent L2. */ @@ -81,6 +81,7 @@ int arm_start_handler(int irq, FAR void *context) /* Reset scheduler parameters */ + tcb = this_task(); sched_resume_scheduler(tcb); /* Then switch contexts. This instantiates the exception context of the diff --git a/arch/arm/src/imx6/imx_cpuboot.c b/arch/arm/src/imx6/imx_cpuboot.c index cc8494782e..5aca8d33bb 100644 --- a/arch/arm/src/imx6/imx_cpuboot.c +++ b/arch/arm/src/imx6/imx_cpuboot.c @@ -51,6 +51,7 @@ #include "chip/imx_src.h" #include "sctlr.h" #include "smp.h" +#include "fpu.h" #include "gic.h" #ifdef CONFIG_SMP @@ -258,6 +259,12 @@ void imx_cpu_enable(void) void arm_cpu_boot(int cpu) { +#ifdef CONFIG_ARCH_FPU + /* Initialize the FPU */ + + arm_fpuconfig(); +#endif + /* Initialize the Generic Interrupt Controller (GIC) for CPUn (n != 0) */ arm_gic_initialize(); diff --git a/configs/sabre-6quad/README.txt b/configs/sabre-6quad/README.txt index f80ebce643..5b2c57d91f 100644 --- a/configs/sabre-6quad/README.txt +++ b/configs/sabre-6quad/README.txt @@ -468,12 +468,41 @@ Open Issues: CPU (which may not be CPU0). Perhaps that should be a spinlock to prohibit execution of interrupts on CPU0 when other CPUs are in a critical section? -2. Cache Concurency. This is a difficult problem. There is logic in place now to +2. Cache Concurency. This is a complex problem. There is logic in place now to clean CPU0 D-cache before starting a new CPU and for invalidating the D-Cache - when the new CPU is started. + when the new CPU is started. REVISIT: Seems that this should not be necessary. + If the Shareable bit set in the MMU mappings and my understanding is that this + should keep cache coherency at least within a cluster. I need to study more + how the inner and outer shareable attribute works to control cacheing - But there are many, many more cache coherency issues. This could, in face, be - a showstopping issue. + But there may are many, many more such cache coherency issues if I cannot find + a systematic way to manage cache coherency. + + http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dht0008a/CJABEHDA.html + http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.den0024a/CEGDBEJE.html + + Try: + + --- mmu.h.orig 2016-05-20 13:09:34.773462000 -0600 + +++ mmu.h 2016-05-20 13:03:13.261978100 -0600 + @@ -572,8 +572,14 @@ + + #define MMU_ROMFLAGS (PMD_TYPE_SECT | PMD_SECT_AP_R1 | PMD_CACHEABLE | \ + PMD_SECT_DOM(0)) + -#define MMU_MEMFLAGS (PMD_TYPE_SECT | PMD_SECT_AP_RW1 | PMD_CACHEABLE | \ + +#ifdef CONFIG_SMP + + + +# define MMU_MEMFLAGS (PMD_TYPE_SECT | PMD_SECT_AP_RW1 | PMD_CACHEABLE | \ + + PMD_SECT_S | PMD_SECT_DOM(0)) + +#else + +# define MMU_MEMFLAGS (PMD_TYPE_SECT | PMD_SECT_AP_RW1 | PMD_CACHEABLE | \ + PMD_SECT_DOM(0)) + +#endif + #define MMU_IOFLAGS (PMD_TYPE_SECT | PMD_SECT_AP_RW1 | PMD_DEVICE | \ + PMD_SECT_DOM(0) | PMD_SECT_XN) + #define MMU_STRONGLY_ORDERED (PMD_TYPE_SECT | PMD_SECT_AP_RW1 | \ + +3. Assertions. On a fatal assertions, other CPUs need to be stopped. Configurations ==============