diff --git a/binfmt/binfmt_execmodule.c b/binfmt/binfmt_execmodule.c index 5b9f2fe694..e1813b7034 100644 --- a/binfmt/binfmt_execmodule.c +++ b/binfmt/binfmt_execmodule.c @@ -136,13 +136,13 @@ static void exec_swap(FAR struct tcb_s *ptcb, FAR struct tcb_s *chtcb) pndx = PIDHASH(ptcb->pid); chndx = PIDHASH(chtcb->pid); - DEBUGASSERT(nxsched_pidhash()[pndx]); - DEBUGASSERT(nxsched_pidhash()[chndx]); + DEBUGASSERT(g_pidhash[pndx]); + DEBUGASSERT(g_pidhash[chndx]); - /* Exchange nxsched_pidhash() index */ + /* Exchange g_pidhash index */ - nxsched_pidhash()[pndx] = chtcb; - nxsched_pidhash()[chndx] = ptcb; + g_pidhash[pndx] = chtcb; + g_pidhash[chndx] = ptcb; /* Exchange pid */ diff --git a/binfmt/libelf/libelf_coredump.c b/binfmt/libelf/libelf_coredump.c index 4e3e85783a..725992a9ae 100644 --- a/binfmt/libelf/libelf_coredump.c +++ b/binfmt/libelf/libelf_coredump.c @@ -187,9 +187,9 @@ static int elf_get_ntcb(void) int count = 0; int i; - for (i = 0; i < nxsched_npidhash(); i++) + for (i = 0; i < g_npidhash; i++) { - if (nxsched_pidhash()[i] != NULL) + if (g_pidhash[i] != NULL) { count++; } @@ -313,11 +313,11 @@ static void elf_emit_note(FAR struct elf_dumpinfo_s *cinfo) if (cinfo->pid == INVALID_PROCESS_ID) { - for (i = 0; i < nxsched_npidhash(); i++) + for (i = 0; i < g_npidhash; i++) { - if (nxsched_pidhash()[i] != NULL) + if (g_pidhash[i] != NULL) { - elf_emit_tcb_note(cinfo, nxsched_pidhash()[i]); + elf_emit_tcb_note(cinfo, g_pidhash[i]); } } } @@ -395,11 +395,11 @@ static void elf_emit_stack(FAR struct elf_dumpinfo_s *cinfo) if (cinfo->pid == INVALID_PROCESS_ID) { - for (i = 0; i < nxsched_npidhash(); i++) + for (i = 0; i < g_npidhash; i++) { - if (nxsched_pidhash()[i] != NULL) + if (g_pidhash[i] != NULL) { - elf_emit_tcb_stack(cinfo, nxsched_pidhash()[i]); + elf_emit_tcb_stack(cinfo, g_pidhash[i]); } } } @@ -520,11 +520,11 @@ static void elf_emit_phdr(FAR struct elf_dumpinfo_s *cinfo, if (cinfo->pid == INVALID_PROCESS_ID) { - for (i = 0; i < nxsched_npidhash(); i++) + for (i = 0; i < g_npidhash; i++) { - if (nxsched_pidhash()[i] != NULL) + if (g_pidhash[i] != NULL) { - elf_emit_tcb_phdr(cinfo, nxsched_pidhash()[i], &phdr, &offset); + elf_emit_tcb_phdr(cinfo, g_pidhash[i], &phdr, &offset); } } } diff --git a/drivers/segger/note_sysview.c b/drivers/segger/note_sysview.c index fc0b00bd8b..24fe2f4c74 100644 --- a/drivers/segger/note_sysview.c +++ b/drivers/segger/note_sysview.c @@ -164,11 +164,11 @@ static void note_sysview_send_tasklist(void) { int i; - for (i = 0; i < nxsched_npidhash(); i++) + for (i = 0; i < g_npidhash; i++) { - if (nxsched_pidhash()[i] != NULL) + if (g_pidhash[i] != NULL) { - note_sysview_send_taskinfo(nxsched_pidhash()[i]); + note_sysview_send_taskinfo(g_pidhash[i]); } } } diff --git a/fs/inode/fs_foreachinode.c b/fs/inode/fs_foreachinode.c index 4fe1ef4e72..0db28289b5 100644 --- a/fs/inode/fs_foreachinode.c +++ b/fs/inode/fs_foreachinode.c @@ -187,7 +187,7 @@ int foreach_inode(foreach_inode_t handler, FAR void *arg) ret = inode_lock(); if (ret >= 0) { - ret = foreach_inodelevel(inode_root()->i_child, info); + ret = foreach_inodelevel(g_root_inode->i_child, info); inode_unlock(); } @@ -211,7 +211,7 @@ int foreach_inode(foreach_inode_t handler, FAR void *arg) ret = inode_lock(); if (ret >= 0) { - ret = foreach_inodelevel(inode_root()->i_child, &info); + ret = foreach_inodelevel(g_root_inode->i_child, &info); inode_unlock(); } diff --git a/fs/inode/fs_inodereserve.c b/fs/inode/fs_inodereserve.c index babc5e8de7..6f6ce6a2e0 100644 --- a/fs/inode/fs_inodereserve.c +++ b/fs/inode/fs_inodereserve.c @@ -141,7 +141,7 @@ static void inode_insert(FAR struct inode *node, void inode_root_reserve(void) { - inode_root() = inode_alloc("", 0777); + g_root_inode = inode_alloc("", 0777); } /**************************************************************************** diff --git a/fs/inode/fs_inodesearch.c b/fs/inode/fs_inodesearch.c index f3f718328a..0ae596b14d 100644 --- a/fs/inode/fs_inodesearch.c +++ b/fs/inode/fs_inodesearch.c @@ -214,7 +214,7 @@ static int _inode_linktarget(FAR struct inode *node, static int _inode_search(FAR struct inode_search_s *desc) { FAR const char *name; - FAR struct inode *node = inode_root(); + FAR struct inode *node = g_root_inode; FAR struct inode *left = NULL; FAR struct inode *above = NULL; FAR const char *relpath = NULL; diff --git a/fs/inode/inode.h b/fs/inode/inode.h index c11d8f6d0e..3518676765 100644 --- a/fs/inode/inode.h +++ b/fs/inode/inode.h @@ -65,8 +65,6 @@ } \ while (0) -#define inode_root() g_root_inode - /**************************************************************************** * Public Types ****************************************************************************/ diff --git a/include/nuttx/init.h b/include/nuttx/init.h index ec461492b1..af3dce335f 100644 --- a/include/nuttx/init.h +++ b/include/nuttx/init.h @@ -38,14 +38,11 @@ * initialization. */ -#define nxsched_set_initstate(s) g_nx_initstate = (s) -#define nxsched_get_initstate() g_nx_initstate - -#define OSINIT_MM_READY() (nxsched_get_initstate() >= OSINIT_MEMORY) -#define OSINIT_HW_READY() (nxsched_get_initstate() >= OSINIT_HARDWARE) -#define OSINIT_OS_READY() (nxsched_get_initstate() >= OSINIT_OSREADY) -#define OSINIT_IDLELOOP() (nxsched_get_initstate() >= OSINIT_IDLELOOP) -#define OSINIT_OS_INITIALIZING() (nxsched_get_initstate() < OSINIT_OSREADY) +#define OSINIT_MM_READY() (g_nx_initstate >= OSINIT_MEMORY) +#define OSINIT_HW_READY() (g_nx_initstate >= OSINIT_HARDWARE) +#define OSINIT_OS_READY() (g_nx_initstate >= OSINIT_OSREADY) +#define OSINIT_IDLELOOP() (g_nx_initstate >= OSINIT_IDLELOOP) +#define OSINIT_OS_INITIALIZING() (g_nx_initstate < OSINIT_OSREADY) /**************************************************************************** * Public Types diff --git a/sched/init/nx_start.c b/sched/init/nx_start.c index 26fd5400e0..1252ff9e6e 100644 --- a/sched/init/nx_start.c +++ b/sched/init/nx_start.c @@ -470,7 +470,7 @@ static void idle_group_initialize(void) tcb = &g_idletcb[i]; hashndx = PIDHASH(i); - nxsched_pidhash()[hashndx] = &tcb->cmn; + g_pidhash[hashndx] = &tcb->cmn; /* Allocate the IDLE group */ @@ -539,7 +539,7 @@ void nx_start(void) /* Boot up is complete */ - nxsched_set_initstate(OSINIT_BOOT); + g_nx_initstate = OSINIT_BOOT; /* Initialize RTOS Data ***************************************************/ @@ -555,7 +555,7 @@ void nx_start(void) /* Task lists are initialized */ - nxsched_set_initstate(OSINIT_TASKLISTS); + g_nx_initstate = OSINIT_TASKLISTS; /* Initialize RTOS facilities *********************************************/ @@ -623,25 +623,24 @@ void nx_start(void) /* Initialize the logic that determine unique process IDs. */ - nxsched_npidhash() = 4; - while (nxsched_npidhash() <= CONFIG_SMP_NCPUS) + g_npidhash = 4; + while (g_npidhash <= CONFIG_SMP_NCPUS) { - nxsched_npidhash() <<= 1; + g_npidhash <<= 1; } - nxsched_pidhash() = - kmm_zalloc(sizeof(*nxsched_pidhash()) * nxsched_npidhash()); - DEBUGASSERT(nxsched_pidhash()); + g_pidhash = kmm_zalloc(sizeof(*g_pidhash) * g_npidhash); + DEBUGASSERT(g_pidhash); /* IDLE Group Initialization **********************************************/ idle_group_initialize(); - nxsched_lastpid() = CONFIG_SMP_NCPUS - 1; + g_lastpid = CONFIG_SMP_NCPUS - 1; /* The memory manager is available */ - nxsched_set_initstate(OSINIT_MEMORY); + g_nx_initstate = OSINIT_MEMORY; /* Initialize tasking data structures */ @@ -721,7 +720,7 @@ void nx_start(void) /* Hardware resources are now available */ - nxsched_set_initstate(OSINIT_HARDWARE); + g_nx_initstate = OSINIT_HARDWARE; /* Setup for Multi-Tasking ************************************************/ @@ -767,7 +766,7 @@ void nx_start(void) /* The OS is fully initialized and we are beginning multi-tasking */ - nxsched_set_initstate(OSINIT_OSREADY); + g_nx_initstate = OSINIT_OSREADY; /* Create initial tasks and bring-up the system */ @@ -775,7 +774,7 @@ void nx_start(void) /* Enter to idleloop */ - nxsched_set_initstate(OSINIT_IDLELOOP); + g_nx_initstate = OSINIT_IDLELOOP; /* Let other threads have access to the memory manager */ diff --git a/sched/sched/sched.h b/sched/sched/sched.h index 49faca75df..2fc69db3a6 100644 --- a/sched/sched/sched.h +++ b/sched/sched/sched.h @@ -117,10 +117,6 @@ # define CRITMONITOR_PANIC(fmt, ...) _alert(fmt, ##__VA_ARGS__) #endif -#define nxsched_pidhash() g_pidhash -#define nxsched_npidhash() g_npidhash -#define nxsched_lastpid() g_lastpid - /**************************************************************************** * Public Type Definitions ****************************************************************************/ diff --git a/sched/sched/sched_cpuload.c b/sched/sched/sched_cpuload.c index 1eb7cb8906..6d46b387d1 100644 --- a/sched/sched/sched_cpuload.c +++ b/sched/sched/sched_cpuload.c @@ -111,12 +111,12 @@ void nxsched_process_taskload_ticks(FAR struct tcb_s *tcb, clock_t ticks) * total. */ - for (i = 0; i < nxsched_npidhash(); i++) + for (i = 0; i < g_npidhash; i++) { - if (nxsched_pidhash()[i]) + if (g_pidhash[i]) { - nxsched_pidhash()[i]->ticks >>= 1; - total += nxsched_pidhash()[i]->ticks; + g_pidhash[i]->ticks >>= 1; + total += g_pidhash[i]->ticks; } } @@ -209,11 +209,10 @@ int clock_cpuload(int pid, FAR struct cpuload_s *cpuload) * do this too, but this would require a little more overhead. */ - if (nxsched_pidhash()[hash_index] && - nxsched_pidhash()[hash_index]->pid == pid) + if (g_pidhash[hash_index] && g_pidhash[hash_index]->pid == pid) { cpuload->total = g_cpuload_total; - cpuload->active = nxsched_pidhash()[hash_index]->ticks; + cpuload->active = g_pidhash[hash_index]->ticks; ret = OK; } diff --git a/sched/sched/sched_foreach.c b/sched/sched/sched_foreach.c index 5b54b2bb99..ed69e979ec 100644 --- a/sched/sched/sched_foreach.c +++ b/sched/sched/sched_foreach.c @@ -64,13 +64,13 @@ void nxsched_foreach(nxsched_foreach_t handler, FAR void *arg) flags = enter_critical_section(); sched_lock(); - for (ndx = 0; ndx < nxsched_npidhash(); ndx++) + for (ndx = 0; ndx < g_npidhash; ndx++) { /* This test and the function call must be atomic */ - if (nxsched_pidhash()[ndx]) + if (g_pidhash[ndx]) { - handler(nxsched_pidhash()[ndx], arg); + handler(g_pidhash[ndx], arg); } } diff --git a/sched/sched/sched_gettcb.c b/sched/sched/sched_gettcb.c index 3030355dcf..323db70b8d 100644 --- a/sched/sched/sched_gettcb.c +++ b/sched/sched/sched_gettcb.c @@ -62,7 +62,7 @@ FAR struct tcb_s *nxsched_get_tcb(pid_t pid) * whether the PID is within range. */ - if (nxsched_pidhash() != NULL && pid >= 0) + if (g_pidhash != NULL && pid >= 0) { /* The test and the return setup should be atomic. This still does * not provide proper protection if the recipient of the TCB does not @@ -76,12 +76,11 @@ FAR struct tcb_s *nxsched_get_tcb(pid_t pid) /* Verify that the correct TCB was found. */ - if (nxsched_pidhash()[hash_ndx] != NULL && - pid == nxsched_pidhash()[hash_ndx]->pid) + if (g_pidhash[hash_ndx] != NULL && pid == g_pidhash[hash_ndx]->pid) { /* Return the TCB associated with this pid (if any) */ - ret = nxsched_pidhash()[hash_ndx]; + ret = g_pidhash[hash_ndx]; } } diff --git a/sched/sched/sched_idletask.c b/sched/sched/sched_idletask.c index 3c8edeccbd..a402a2b39a 100644 --- a/sched/sched/sched_idletask.c +++ b/sched/sched/sched_idletask.c @@ -62,7 +62,7 @@ bool sched_idletask(void) * have been initialized and, in that case, rtcb may be NULL. */ - DEBUGASSERT(rtcb != NULL || nxsched_get_initstate() < OSINIT_TASKLISTS); + DEBUGASSERT(rtcb != NULL || g_nx_initstate < OSINIT_TASKLISTS); if (rtcb != NULL) { /* The IDLE task TCB is distinguishable by a few things: diff --git a/sched/sched/sched_releasetcb.c b/sched/sched/sched_releasetcb.c index e5745116fa..9c74bf1154 100644 --- a/sched/sched/sched_releasetcb.c +++ b/sched/sched/sched_releasetcb.c @@ -57,7 +57,7 @@ static void nxsched_releasepid(pid_t pid) * total for all threads. */ - g_cpuload_total -= nxsched_pidhash()[hash_ndx]->ticks; + g_cpuload_total -= g_pidhash[hash_ndx]->ticks; #endif /* Make any pid associated with this hash available. Note: @@ -65,7 +65,7 @@ static void nxsched_releasepid(pid_t pid) * following action is atomic */ - nxsched_pidhash()[hash_ndx] = NULL; + g_pidhash[hash_ndx] = NULL; leave_critical_section(flags); } diff --git a/sched/sched/sched_verifytcb.c b/sched/sched/sched_verifytcb.c index 1dbe79f1d4..01a6f19bd9 100644 --- a/sched/sched/sched_verifytcb.c +++ b/sched/sched/sched_verifytcb.c @@ -72,7 +72,7 @@ bool nxsched_verify_tcb(FAR struct tcb_s *tcb) bool valid; flags = enter_critical_section(); - valid = tcb == nxsched_pidhash()[PIDHASH(tcb->pid)]; + valid = tcb == g_pidhash[PIDHASH(tcb->pid)]; leave_critical_section(flags); return valid; diff --git a/sched/task/task_setup.c b/sched/task/task_setup.c index 61a8b61564..d74200590e 100644 --- a/sched/task/task_setup.c +++ b/sched/task/task_setup.c @@ -88,12 +88,12 @@ static int nxtask_assign_pid(FAR struct tcb_s *tcb) int i; /* NOTE: - * ERROR means that the nxsched_pidhash()[] table is completely full. + * ERROR means that the g_pidhash[] table is completely full. * We cannot allow another task to be started. */ /* Protect the following operation with a critical section - * because nxsched_pidhash() is accessed from an interrupt context + * because g_pidhash is accessed from an interrupt context */ irqstate_t flags = enter_critical_section(); @@ -104,8 +104,8 @@ retry: /* Get the next process ID candidate */ - next_pid = nxsched_lastpid() + 1; - for (i = 0; i < nxsched_npidhash(); i++) + next_pid = g_lastpid + 1; + for (i = 0; i < g_npidhash; i++) { /* Verify that the next_pid is in the valid range */ @@ -120,13 +120,13 @@ retry: /* Check if there is a (potential) duplicate of this pid */ - if (!nxsched_pidhash()[hash_ndx]) + if (!g_pidhash[hash_ndx]) { /* Assign this PID to the task */ - nxsched_pidhash()[hash_ndx] = tcb; + g_pidhash[hash_ndx] = tcb; tcb->pid = next_pid; - nxsched_lastpid() = next_pid; + g_lastpid = next_pid; leave_critical_section(flags); return OK; @@ -135,35 +135,35 @@ retry: next_pid++; } - /* If we get here, then the nxsched_pidhash()[] table is completely full. - * We will alloc new space and copy original nxsched_pidhash() to it to + /* If we get here, then the g_pidhash[] table is completely full. + * We will alloc new space and copy original g_pidhash to it to * expand space. */ - pidhash = kmm_zalloc(nxsched_npidhash() * 2 * sizeof(*pidhash)); + pidhash = kmm_zalloc(g_npidhash * 2 * sizeof(*pidhash)); if (pidhash == NULL) { leave_critical_section(flags); return -ENOMEM; } - nxsched_npidhash() *= 2; + g_npidhash *= 2; /* All original pid and hash_ndx are mismatch, * so we need to rebuild their relationship */ - for (i = 0; i < nxsched_npidhash() / 2; i++) + for (i = 0; i < g_npidhash / 2; i++) { - hash_ndx = PIDHASH(nxsched_pidhash()[i]->pid); + hash_ndx = PIDHASH(g_pidhash[i]->pid); DEBUGASSERT(pidhash[hash_ndx] == NULL); - pidhash[hash_ndx] = nxsched_pidhash()[i]; + pidhash[hash_ndx] = g_pidhash[i]; } /* Release resource for original g_pidhash, using new g_pidhash */ - temp = nxsched_pidhash(); - nxsched_pidhash() = pidhash; + temp = g_pidhash; + g_pidhash = pidhash; kmm_free(temp); /* Let's try every allowable pid again */ diff --git a/sched/wqueue/kwork_cancel.c b/sched/wqueue/kwork_cancel.c index 66d0822227..332d3fa90a 100644 --- a/sched/wqueue/kwork_cancel.c +++ b/sched/wqueue/kwork_cancel.c @@ -148,7 +148,7 @@ int work_cancel(int qid, FAR struct work_s *work) { /* Cancel high priority work */ - return work_qcancel((FAR struct kwork_wqueue_s *)&hpwork(), + return work_qcancel((FAR struct kwork_wqueue_s *)&g_hpwork, -1, work); } else @@ -158,7 +158,7 @@ int work_cancel(int qid, FAR struct work_s *work) { /* Cancel low priority work */ - return work_qcancel((FAR struct kwork_wqueue_s *)&lpwork(), + return work_qcancel((FAR struct kwork_wqueue_s *)&g_lpwork, -1, work); } else @@ -196,7 +196,7 @@ int work_cancel_sync(int qid, FAR struct work_s *work) { /* Cancel high priority work */ - return work_qcancel((FAR struct kwork_wqueue_s *)&hpwork(), + return work_qcancel((FAR struct kwork_wqueue_s *)&g_hpwork, CONFIG_SCHED_HPNTHREADS, work); } else @@ -206,7 +206,7 @@ int work_cancel_sync(int qid, FAR struct work_s *work) { /* Cancel low priority work */ - return work_qcancel((FAR struct kwork_wqueue_s *)&lpwork(), + return work_qcancel((FAR struct kwork_wqueue_s *)&g_lpwork, CONFIG_SCHED_LPNTHREADS, work); } else diff --git a/sched/wqueue/kwork_inherit.c b/sched/wqueue/kwork_inherit.c index 920fcc094c..8dbc274271 100644 --- a/sched/wqueue/kwork_inherit.c +++ b/sched/wqueue/kwork_inherit.c @@ -224,7 +224,7 @@ void lpwork_boostpriority(uint8_t reqprio) for (wndx = 0; wndx < CONFIG_SCHED_LPNTHREADS; wndx++) { - lpwork_boostworker(lpwork().worker[wndx].pid, reqprio); + lpwork_boostworker(g_lpwork.worker[wndx].pid, reqprio); } leave_critical_section(flags); @@ -268,7 +268,7 @@ void lpwork_restorepriority(uint8_t reqprio) for (wndx = 0; wndx < CONFIG_SCHED_LPNTHREADS; wndx++) { - lpwork_restoreworker(lpwork().worker[wndx].pid, reqprio); + lpwork_restoreworker(g_lpwork.worker[wndx].pid, reqprio); } leave_critical_section(flags); diff --git a/sched/wqueue/kwork_queue.c b/sched/wqueue/kwork_queue.c index 4b60398d32..f850cad0a8 100644 --- a/sched/wqueue/kwork_queue.c +++ b/sched/wqueue/kwork_queue.c @@ -67,7 +67,7 @@ static void hp_work_timer_expiry(wdparm_t arg) { irqstate_t flags = enter_critical_section(); - queue_work(hpwork(), arg); + queue_work(g_hpwork, arg); leave_critical_section(flags); } #endif @@ -80,7 +80,7 @@ static void hp_work_timer_expiry(wdparm_t arg) static void lp_work_timer_expiry(wdparm_t arg) { irqstate_t flags = enter_critical_section(); - queue_work(lpwork(), arg); + queue_work(g_lpwork, arg); leave_critical_section(flags); } #endif @@ -152,7 +152,7 @@ int work_queue(int qid, FAR struct work_s *work, worker_t worker, if (!delay) { - queue_work(hpwork(), work); + queue_work(g_hpwork, work); } else { @@ -169,7 +169,7 @@ int work_queue(int qid, FAR struct work_s *work, worker_t worker, if (!delay) { - queue_work(lpwork(), work); + queue_work(g_lpwork, work); } else { diff --git a/sched/wqueue/kwork_thread.c b/sched/wqueue/kwork_thread.c index 879202c007..a0a9ea5406 100644 --- a/sched/wqueue/kwork_thread.c +++ b/sched/wqueue/kwork_thread.c @@ -305,7 +305,7 @@ void work_foreach(int qid, work_foreach_t handler, FAR void *arg) #ifdef CONFIG_SCHED_HPWORK if (qid == HPWORK) { - wqueue = (FAR struct kwork_wqueue_s *)&hpwork(); + wqueue = (FAR struct kwork_wqueue_s *)&g_hpwork; nthread = CONFIG_SCHED_HPNTHREADS; } else @@ -313,7 +313,7 @@ void work_foreach(int qid, work_foreach_t handler, FAR void *arg) #ifdef CONFIG_SCHED_LPWORK if (qid == LPWORK) { - wqueue = (FAR struct kwork_wqueue_s *)&lpwork(); + wqueue = (FAR struct kwork_wqueue_s *)&g_lpwork; nthread = CONFIG_SCHED_LPNTHREADS; } else @@ -352,7 +352,7 @@ int work_start_highpri(void) return work_thread_create(HPWORKNAME, CONFIG_SCHED_HPWORKPRIORITY, CONFIG_SCHED_HPWORKSTACKSIZE, CONFIG_SCHED_HPNTHREADS, - (FAR struct kwork_wqueue_s *)&hpwork()); + (FAR struct kwork_wqueue_s *)&g_hpwork); } #endif /* CONFIG_SCHED_HPWORK */ @@ -380,7 +380,7 @@ int work_start_lowpri(void) return work_thread_create(LPWORKNAME, CONFIG_SCHED_LPWORKPRIORITY, CONFIG_SCHED_LPWORKSTACKSIZE, CONFIG_SCHED_LPNTHREADS, - (FAR struct kwork_wqueue_s *)&lpwork()); + (FAR struct kwork_wqueue_s *)&g_lpwork); } #endif /* CONFIG_SCHED_LPWORK */ diff --git a/sched/wqueue/wqueue.h b/sched/wqueue/wqueue.h index 58b23f1112..66f2ed5a4c 100644 --- a/sched/wqueue/wqueue.h +++ b/sched/wqueue/wqueue.h @@ -105,16 +105,12 @@ struct lp_wqueue_s #ifdef CONFIG_SCHED_HPWORK /* The state of the kernel mode, high priority work queue. */ -#define hpwork() g_hpwork - extern struct hp_wqueue_s g_hpwork; #endif #ifdef CONFIG_SCHED_LPWORK /* The state of the kernel mode, low priority work queue(s). */ -#define lpwork() g_lpwork - extern struct lp_wqueue_s g_lpwork; #endif