libc: Move pthread_cleanup to user space

Signed-off-by: Huang Qi <huangqi3@xiaomi.com>
This commit is contained in:
Huang Qi 2021-04-30 18:12:09 +08:00 committed by patacongo
parent 103925b2b7
commit f1a92e9cbb
12 changed files with 55 additions and 162 deletions

View File

@ -168,21 +168,24 @@ int nx_pthread_create(pthread_trampoline_t trampoline, FAR pthread_t *thread,
void nx_pthread_exit(FAR void *exit_value) noreturn_function;
/****************************************************************************
* Name: pthread_cleanup_poplist
* Name: pthread_cleanup_popall
*
* Description:
* The pthread_cleanup_poplist() is function that will pop all clean-up
* functions. This function is only called from within the pthread_exit()
* The pthread_cleanup_popall() is an internal function that will pop and
* execute all clean-up functions. This function is only called from
* within the pthread_exit() and pthread_cancellation() logic
*
* Input Parameters:
* cleanup - The array of struct pthread_cleanup_s to fetch callbacks
* tcb - The TCB of the pthread that is exiting or being canceled.
*
* Returned Value:
* The index to the next available entry at the top of the stack
* None
*
****************************************************************************/
int pthread_cleanup_poplist(FAR struct pthread_cleanup_s *cleanup);
#ifdef CONFIG_PTHREAD_CLEANUP
void pthread_cleanup_popall(void);
#endif
#undef EXTERN
#ifdef __cplusplus

View File

@ -690,17 +690,6 @@ struct tcb_s
FAR struct pthread_mutex_s *mhead; /* List of mutexes held by thread */
#endif
/* Clean-up stack *************************************************************/
#ifdef CONFIG_PTHREAD_CLEANUP
/* tos - The index to the next available entry at the top of the stack.
* stack - The pre-allocated clean-up stack memory.
*/
uint8_t tos;
struct pthread_cleanup_s stack[CONFIG_PTHREAD_CLEANUP_STACKSIZE];
#endif
/* Pre-emption monitor support ************************************************/
#ifdef CONFIG_SCHED_CRITMONITOR

View File

@ -27,6 +27,7 @@
#include <nuttx/config.h>
#include <nuttx/sched.h>
#include <nuttx/lib/getopt.h>
/****************************************************************************
@ -98,6 +99,16 @@ struct tls_info_s
#if CONFIG_TLS_NELEM > 0
uintptr_t tl_elem[CONFIG_TLS_NELEM]; /* TLS elements */
#endif
#ifdef CONFIG_PTHREAD_CLEANUP
/* tos - The index to the next available entry at the top of the stack.
* stack - The pre-allocated clean-up stack memory.
*/
uint8_t tos;
struct pthread_cleanup_s stack[CONFIG_PTHREAD_CLEANUP_STACKSIZE];
#endif
int tl_errno; /* Per-thread error number */
};

View File

@ -327,11 +327,6 @@ SYSCALL_LOOKUP(telldir, 1)
SYSCALL_LOOKUP(pthread_cond_clockwait, 4)
SYSCALL_LOOKUP(pthread_kill, 2)
SYSCALL_LOOKUP(pthread_sigmask, 3)
#ifdef CONFIG_PTHREAD_CLEANUP
SYSCALL_LOOKUP(pthread_cleanup_push, 2)
SYSCALL_LOOKUP(pthread_cleanup_pop, 1)
SYSCALL_LOOKUP(pthread_cleanup_poplist, 1)
#endif
#endif
/* The following are defined only if message queues are enabled */

View File

@ -61,6 +61,10 @@ ifeq ($(CONFIG_PTHREAD_SPINLOCKS),y)
CSRCS += pthread_spinlock.c
endif
ifeq ($(CONFIG_PTHREAD_CLEANUP),y)
CSRCS += pthread_cleanup.c
endif
endif # CONFIG_DISABLE_PTHREAD
# Add the pthread directory to the build

View File

@ -1,5 +1,5 @@
/****************************************************************************
* sched/pthread/pthread_cleanup.c
* libs/libc/pthread/pthread_cleanup.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -27,10 +27,11 @@
#include <pthread.h>
#include <sched.h>
#include <nuttx/arch.h>
#include <nuttx/sched.h>
#include "sched/sched.h"
#include "pthread/pthread.h"
#include <nuttx/tls.h>
#include <nuttx/pthread.h>
#include <arch/tls.h>
#ifdef CONFIG_PTHREAD_CLEANUP
@ -39,7 +40,7 @@
****************************************************************************/
/****************************************************************************
* Name: pthread_cleanup_pop_tcb
* Name: pthread_cleanup_pop_tls
*
* Description:
* The pthread_cleanup_pop_tcb() function will remove the routine at the
@ -57,15 +58,15 @@
*
****************************************************************************/
static void pthread_cleanup_pop_tcb(FAR struct tcb_s *tcb, int execute)
static void pthread_cleanup_pop_tls(FAR struct tls_info_s *tls, int execute)
{
if (tcb->tos > 0)
if (tls->tos > 0)
{
unsigned int ndx;
/* Get the index to the last cleaner function pushed onto the stack */
ndx = tcb->tos - 1;
ndx = tls->tos - 1;
DEBUGASSERT(ndx >= 0 && ndx < CONFIG_PTHREAD_CLEANUP_STACKSIZE);
/* Should we execute the cleanup routine at the top of the stack? */
@ -81,11 +82,11 @@ static void pthread_cleanup_pop_tcb(FAR struct tcb_s *tcb, int execute)
* mode! See also on_exit() and atexit() callbacks.
*/
cb = &tcb->stack[ndx];
cb = &tls->stack[ndx];
cb->pc_cleaner(cb->pc_arg);
}
tcb->tos = ndx;
tls->tos = ndx;
}
}
@ -123,9 +124,9 @@ static void pthread_cleanup_pop_tcb(FAR struct tcb_s *tcb, int execute)
void pthread_cleanup_pop(int execute)
{
FAR struct tcb_s *tcb = this_task();
FAR struct tls_info_s *tls = up_tls_info();
DEBUGASSERT(tcb != NULL);
DEBUGASSERT(tls != NULL);
/* sched_lock() should provide sufficient protection. We only need to
* have this TCB stationary; the pthread cleanup stack should never be
@ -133,20 +134,16 @@ void pthread_cleanup_pop(int execute)
*/
sched_lock();
if ((tcb->flags & TCB_FLAG_TTYPE_MASK) != TCB_FLAG_TTYPE_KERNEL)
{
pthread_cleanup_pop_tcb(tcb, execute);
}
pthread_cleanup_pop_tls(tls, execute);
sched_unlock();
}
void pthread_cleanup_push(pthread_cleanup_t routine, FAR void *arg)
{
FAR struct tcb_s *tcb = this_task();
FAR struct tls_info_s *tls = up_tls_info();
DEBUGASSERT(tcb != NULL);
DEBUGASSERT(tcb->tos < CONFIG_PTHREAD_CLEANUP_STACKSIZE);
DEBUGASSERT(tls != NULL);
DEBUGASSERT(tls->tos < CONFIG_PTHREAD_CLEANUP_STACKSIZE);
/* sched_lock() should provide sufficient protection. We only need to
* have this TCB stationary; the pthread cleanup stack should never be
@ -154,14 +151,13 @@ void pthread_cleanup_push(pthread_cleanup_t routine, FAR void *arg)
*/
sched_lock();
if ((tcb->flags & TCB_FLAG_TTYPE_MASK) != TCB_FLAG_TTYPE_KERNEL &&
tcb->tos < CONFIG_PTHREAD_CLEANUP_STACKSIZE)
if (tls->tos < CONFIG_PTHREAD_CLEANUP_STACKSIZE)
{
unsigned int ndx = tcb->tos;
unsigned int ndx = tls->tos;
tcb->tos++;
tcb->stack[ndx].pc_cleaner = routine;
tcb->stack[ndx].pc_arg = arg;
tls->tos++;
tls->stack[ndx].pc_cleaner = routine;
tls->stack[ndx].pc_arg = arg;
}
sched_unlock();
@ -176,89 +172,26 @@ void pthread_cleanup_push(pthread_cleanup_t routine, FAR void *arg)
* within the pthread_exit() and pthread_cancellation() logic
*
* Input Parameters:
* tcb - The TCB of the pthread that is exiting or being canceled.
* None
*
* Returned Value:
* None
*
****************************************************************************/
void pthread_cleanup_popall(FAR struct tcb_s *tcb)
void pthread_cleanup_popall(void)
{
DEBUGASSERT(tcb != NULL);
FAR struct tls_info_s *tls = up_tls_info();
/* Kernel threads do not support pthread APIs */
DEBUGASSERT(tls != NULL);
if ((tcb->flags & TCB_FLAG_TTYPE_MASK) != TCB_FLAG_TTYPE_KERNEL)
sched_lock();
while (tls->tos > 0)
{
/* Pop and execute each cleanup routine/
*
* sched_lock() should provide sufficient protection. We only need to
* have this TCB stationary; the pthread cleanup stack should never be
* modified by interrupt level logic.
*/
sched_lock();
while (tcb->tos > 0)
{
pthread_cleanup_pop_tcb(tcb, 1);
}
sched_unlock();
}
}
/****************************************************************************
* Name: pthread_cleanup_poplist
*
* Description:
* The pthread_cleanup_poplist() is function that will pop all clean-up
* functions. This function is only called from within the pthread_exit()
*
* Input Parameters:
* cleanup - The array of struct pthread_cleanup_s to fetch callbacks
*
* Returned Value:
* The index to the next available entry at the top of the stack
*
****************************************************************************/
int pthread_cleanup_poplist(FAR struct pthread_cleanup_s *cleanup)
{
uint8_t tos = 0;
uint8_t ndx = 0;
FAR struct tcb_s *tcb = this_task();
DEBUGASSERT(cleanup != NULL);
DEBUGASSERT(tcb != NULL);
DEBUGASSERT(tcb->tos < CONFIG_PTHREAD_CLEANUP_STACKSIZE);
tos = tcb->tos;
/* Kernel threads do not support pthread APIs */
if ((tcb->flags & TCB_FLAG_TTYPE_MASK) != TCB_FLAG_TTYPE_KERNEL)
{
/* Pop cleanup routine list
*
* sched_lock() should provide sufficient protection. We only need to
* have this TCB stationary; the pthread cleanup stack should never be
* modified by interrupt level logic.
*/
sched_lock();
while (tcb->tos > 0)
{
ndx = tcb->tos - 1;
DEBUGASSERT(ndx >= 0 && ndx < CONFIG_PTHREAD_CLEANUP_STACKSIZE);
cleanup[ndx] = tcb->stack[ndx];
tcb->tos = ndx;
}
sched_unlock();
pthread_cleanup_pop_tls(tls, 1);
}
return tos;
sched_unlock();
}
#endif /* CONFIG_PTHREAD_CLEANUP */

View File

@ -52,19 +52,7 @@
void pthread_exit(FAR void *exit_value)
{
#ifdef CONFIG_PTHREAD_CLEANUP
int cnt;
struct pthread_cleanup_s cleanup[CONFIG_PTHREAD_CLEANUP_STACKSIZE];
cnt = pthread_cleanup_poplist(cleanup);
sched_lock();
while (cnt-- > 0)
{
struct pthread_cleanup_s cp = cleanup[cnt];
if (cp.pc_cleaner)
cp.pc_cleaner(cp.pc_arg);
}
sched_unlock();
pthread_cleanup_popall();
#endif
nx_pthread_exit(exit_value);

View File

@ -38,10 +38,6 @@ ifeq ($(CONFIG_SMP),y)
CSRCS += pthread_setaffinity.c pthread_getaffinity.c
endif
ifeq ($(CONFIG_PTHREAD_CLEANUP),y)
CSRCS += pthread_cleanup.c
endif
# Include pthread build support
DEPPATH += --dep-path pthread

View File

@ -81,10 +81,6 @@ struct task_group_s; /* Forward reference */
int pthread_setup_scheduler(FAR struct pthread_tcb_s *tcb, int priority,
start_t start, pthread_startroutine_t entry);
#ifdef CONFIG_PTHREAD_CLEANUP
void pthread_cleanup_popall(FAR struct tcb_s *tcb);
#endif
int pthread_completejoin(pid_t pid, FAR void *exit_value);
void pthread_destroyjoin(FAR struct task_group_s *group,
FAR struct join_s *pjoin);

View File

@ -85,19 +85,6 @@ int pthread_cancel(pthread_t thread)
pthread_exit(PTHREAD_CANCELED);
}
#ifdef CONFIG_PTHREAD_CLEANUP
/* Perform any stack pthread clean-up callbacks.
*
* REVISIT: In this case, the clean-up callback will execute on the
* thread of the caller of pthread cancel, not on the thread of
* the thread-to-be-canceled. This is a problem when deferred
* cancellation is not supported because, for example, the clean-up
* function will be unable to unlock its own mutexes.
*/
pthread_cleanup_popall(tcb);
#endif
/* Complete pending join operations */
pthread_completejoin((pid_t)thread, PTHREAD_CANCELED);

View File

@ -87,12 +87,6 @@ void exit(int status)
group_kill_children(tcb);
#endif
#ifdef CONFIG_PTHREAD_CLEANUP
/* Perform any stack pthread clean-up callbacks */
pthread_cleanup_popall(tcb);
#endif
#if !defined(CONFIG_DISABLE_PTHREAD) && !defined(CONFIG_PTHREAD_MUTEX_UNSAFE)
/* Recover any mutexes still held by the canceled thread */

View File

@ -83,9 +83,6 @@
"pread","unistd.h","","ssize_t","int","FAR void *","size_t","off_t"
"pselect","sys/select.h","","int","int","FAR fd_set *","FAR fd_set *","FAR fd_set *","FAR const struct timespec *","FAR const sigset_t *"
"pthread_cancel","pthread.h","!defined(CONFIG_DISABLE_PTHREAD)","int","pthread_t"
"pthread_cleanup_pop","pthread.h","defined(CONFIG_PTHREAD_CLEANUP)","void","int"
"pthread_cleanup_poplist","nuttx/pthread.h","defined(CONFIG_PTHREAD_CLEANUP)","int","struct pthread_cleanup_s *"
"pthread_cleanup_push","pthread.h","defined(CONFIG_PTHREAD_CLEANUP)","void","pthread_cleanup_t","FAR void *"
"pthread_cond_broadcast","pthread.h","!defined(CONFIG_DISABLE_PTHREAD)","int","FAR pthread_cond_t *"
"pthread_cond_clockwait","pthread.h","!defined(CONFIG_DISABLE_PTHREAD)","int","FAR pthread_cond_t *","FAR pthread_mutex_t *","clockid_t","FAR const struct timespec *"
"pthread_cond_signal","pthread.h","!defined(CONFIG_DISABLE_PTHREAD)","int","FAR pthread_cond_t *"

Can't render this file because it has a wrong number of fields in line 2.