diff --git a/include/nuttx/init.h b/include/nuttx/init.h index cc1f4f72a9..54eb9033d0 100644 --- a/include/nuttx/init.h +++ b/include/nuttx/init.h @@ -65,21 +65,22 @@ enum os_initstate_e { - OSINIT_POWERUP = 0, /* Power-up. No initialization yet performed. - * Depends on .bss initialization logic for this - * value. */ - OSINIT_BOOT = 1, /* Basic boot up initialization is complete. OS - * services and hardware resources are not yet - * available. */ - OSINIT_MEMORY = 2, /* The memory manager has been initialized */ - OSINIT_HARDWARE = 3, /* MCU-specific hardware is initialized. Hardware - * resources such as timers and device drivers are - * now avaiable. Low-level OS services sufficient - * to support the hardware are also available but - * the OS has not yet completed its full - * initialization. */ - OSINIT_OSREADY = 4 /* The OS is fully initialized and multi-tasking is - * active. */ + OSINIT_POWERUP = 0, /* Power-up. No initialization yet performed. + * Depends on .bss initialization logic for this + * value. */ + OSINIT_BOOT = 1, /* Basic boot up initialization is complete. OS + * services and hardware resources are not yet + * available. */ + OSINIT_TASKLISTS = 2, /* Head of ready-to-run/assigned task lists valid */ + OSINIT_MEMORY = 3, /* The memory manager has been initialized */ + OSINIT_HARDWARE = 4, /* MCU-specific hardware is initialized. Hardware + * resources such as timers and device drivers are + * now avaiable. Low-level OS services sufficient + * to support the hardware are also available but + * the OS has not yet completed its full + * initialization. */ + OSINIT_OSREADY = 5 /* The OS is fully initialized and multi-tasking is + * active. */ }; /**************************************************************************** diff --git a/sched/init/os_start.c b/sched/init/os_start.c index a8456c008a..5a50c1b2fc 100644 --- a/sched/init/os_start.c +++ b/sched/init/os_start.c @@ -530,6 +530,10 @@ void os_start(void) up_initial_state(&g_idletcb[cpu].cmn); } + /* Task lists are initialized */ + + g_os_initstate = OSINIT_TASKLISTS; + /* Initialize RTOS facilities *********************************************/ /* Initialize the semaphore facility. This has to be done very early * because many subsystems depend upon fully functional semaphores. diff --git a/sched/irq/irq_csection.c b/sched/irq/irq_csection.c index 573e193039..4cc2412ae7 100644 --- a/sched/irq/irq_csection.c +++ b/sched/irq/irq_csection.c @@ -41,6 +41,7 @@ #include +#include #include #include #include @@ -86,54 +87,50 @@ irqstate_t enter_critical_section(void) { FAR struct tcb_s *rtcb; - /* Do nothing if called from an interrupt handler */ + /* Check if we were called from an interrupt handler and that the tasks + * lists have been initialized. + */ - if (up_interrupt_context()) + if (!up_interrupt_context() && g_os_initstate >= OSINIT_TASKLISTS) { - /* The value returned does not matter. We assume only that it is a - * scalar here. - */ + /* Do we already have interrupts disabled? */ - return (irqstate_t)0; - } + rtcb = this_task(); + DEBUGASSERT(rtcb != NULL); - /* Do we already have interrupts disabled? */ + if (rtcb->irqcount > 0) + { + /* Yes... make sure that the spinlock is set and increment the IRQ + * lock count. + */ - rtcb = this_task(); - DEBUGASSERT(rtcb != NULL); + DEBUGASSERT(g_cpu_irqlock == SP_LOCKED && rtcb->irqcount < INT16_MAX); + rtcb->irqcount++; + } + else + { + /* NO.. Take the spinlock to get exclusive access and set the lock + * count to 1. + * + * We must avoid that case where a context occurs between taking the + * g_cpu_irqlock and disabling interrupts. Also interrupts disables + * must follow a stacked order. We cannot other context switches to + * re-order the enabling/disabling of interrupts. + * + * The scheduler accomplishes this by treating the irqcount like + * lockcount: Both will disable pre-emption. + */ - if (rtcb->irqcount > 0) - { - /* Yes... make sure that the spinlock is set and increment the IRQ - * lock count. - */ - - DEBUGASSERT(g_cpu_irqlock == SP_LOCKED && rtcb->irqcount < INT16_MAX); - rtcb->irqcount++; - } - else - { - /* NO.. Take the spinlock to get exclusive access and set the lock - * count to 1. - * - * We must avoid that case where a context occurs between taking the - * g_cpu_irqlock and disabling interrupts. Also interrupts disables - * must follow a stacked order. We cannot other context switches to - * re-order the enabling/disabling of interrupts. - * - * The scheduler accomplishes this by treating the irqcount like - * lockcount: Both will disable pre-emption. - */ - - spin_setbit(&g_cpu_irqset, this_cpu(), &g_cpu_irqsetlock, - &g_cpu_irqlock); - rtcb->irqcount = 1; + spin_setbit(&g_cpu_irqset, this_cpu(), &g_cpu_irqsetlock, + &g_cpu_irqlock); + rtcb->irqcount = 1; #ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION - /* Note that we have entered the critical section */ + /* Note that we have entered the critical section */ - sched_note_csection(rtcb, true); + sched_note_csection(rtcb, true); #endif + } } /* Then disable interrupts (they may already be disabled, be we need to @@ -145,9 +142,11 @@ irqstate_t enter_critical_section(void) #else /* defined(CONFIG_SCHED_INSTRUMENTATION_CSECTION) */ irqstate_t enter_critical_section(void) { - /* Check if we were called from an interrupt handler */ + /* Check if we were called from an interrupt handler and that the tasks + * lists have been initialized. + */ - if (!up_interrupt_context()) + if (!up_interrupt_context() && g_os_initstate >= OSINIT_TASKLISTS) { FAR struct tcb_s *rtcb = this_task(); DEBUGASSERT(rtcb != NULL); @@ -175,9 +174,11 @@ irqstate_t enter_critical_section(void) #ifdef CONFIG_SMP void leave_critical_section(irqstate_t flags) { - /* Do nothing if called from an interrupt handler */ + /* Check if we were called from an interrupt handler and that the tasks + * lists have been initialized. + */ - if (!up_interrupt_context()) + if (!up_interrupt_context() && g_os_initstate >= OSINIT_TASKLISTS) { FAR struct tcb_s *rtcb = this_task(); DEBUGASSERT(rtcb != 0 && rtcb->irqcount > 0); @@ -229,20 +230,22 @@ void leave_critical_section(irqstate_t flags) } } } - - /* Restore the previous interrupt state which may still be interrupts - * disabled (but we don't have a mechanism to verify that now) - */ - - up_irq_restore(flags); } + + /* Restore the previous interrupt state which may still be interrupts + * disabled (but we don't have a mechanism to verify that now) + */ + + up_irq_restore(flags); } #else /* defined(CONFIG_SCHED_INSTRUMENTATION_CSECTION) */ void leave_critical_section(irqstate_t flags) { - /* Check if we were called from an interrupt handler */ + /* Check if we were called from an interrupt handler and that the tasks + * lists have been initialized. + */ - if (!up_interrupt_context()) + if (!up_interrupt_context() && g_os_initstate >= OSINIT_TASKLISTS) { FAR struct tcb_s *rtcb = this_task(); DEBUGASSERT(rtcb != NULL);