diff --git a/include/nuttx/sched.h b/include/nuttx/sched.h index f618ac94ce..f8bc539e3d 100644 --- a/include/nuttx/sched.h +++ b/include/nuttx/sched.h @@ -699,7 +699,9 @@ struct pthread_tcb_s /* Robust mutex support *******************************************************/ +#ifndef CONFIG_PTHREAD_MUTEX_UNSAFE FAR struct pthread_mutex_s *mhead; /* List of mutexes held by thread */ +#endif /* Clean-up stack *************************************************************/ diff --git a/include/pthread.h b/include/pthread.h index 305bb8db21..214ad454fe 100644 --- a/include/pthread.h +++ b/include/pthread.h @@ -240,15 +240,19 @@ typedef struct pthread_mutexattr_s pthread_mutexattr_t; struct pthread_mutex_s { +#ifndef CONFIG_PTHREAD_MUTEX_UNSAFE /* Supports a singly linked list */ FAR struct pthread_mutex_s *flink; +#endif /* Payload */ sem_t sem; /* Semaphore underlying the implementation of the mutex */ pid_t pid; /* ID of the holder of the mutex */ +#ifndef CONFIG_PTHREAD_MUTEX_UNSAFE uint8_t flags; /* See _PTHREAD_MFLAGS_* */ +#endif #ifdef CONFIG_MUTEX_TYPES uint8_t type; /* Type of the mutex. See PTHREAD_MUTEX_* definitions */ int16_t nlocks; /* The number of recursive locks held */ @@ -438,9 +442,11 @@ int pthread_mutex_lock(FAR pthread_mutex_t *mutex); int pthread_mutex_trylock(FAR pthread_mutex_t *mutex); int pthread_mutex_unlock(FAR pthread_mutex_t *mutex); +#ifndef CONFIG_PTHREAD_MUTEX_UNSAFE /* Make sure that the pthread mutex is in a consistent state */ int pthread_mutex_consistent(FAR pthread_mutex_t *mutex); +#endif /* Operations on condition variables */ diff --git a/sched/Kconfig b/sched/Kconfig index 3a5807d4dc..369e9ac7cd 100644 --- a/sched/Kconfig +++ b/sched/Kconfig @@ -531,6 +531,26 @@ config MUTEX_TYPES: Set to enable support for recursive and errorcheck mutexes. Enables pthread_mutexattr_settype(). +choice + prompt "pthread mutex robustness" + default PTHREAD_MUTEX_ROBUST if !DEFAULT_SMALL + default PTHREAD_UNSAFE if DEFAULT_SMALL + + config PTHREAD_MUTEX_ROBUST + bool "Robust mutexes" + ---help--- + Support only the robust form of the NORMAL mutex. + + config PTHREAD_MUTEX_UNSAFE + bool "Traditional unsafe mutexes" + ---help--- + Support only the traditional non-robust form of the NORMAL mutex. + You should select this option only for backward compatibility with + software you may be porting or, perhaps, if you are trying to minimize + footprint. + +endchoice # thread mutex robustness + config NPTHREAD_KEYS int "Maximum number of pthread keys" default 4 diff --git a/sched/pthread/Make.defs b/sched/pthread/Make.defs index a39a35d208..20bc7e8d6c 100644 --- a/sched/pthread/Make.defs +++ b/sched/pthread/Make.defs @@ -37,9 +37,8 @@ ifneq ($(CONFIG_DISABLE_PTHREAD),y) CSRCS += pthread_create.c pthread_exit.c pthread_join.c pthread_detach.c CSRCS += pthread_yield.c pthread_getschedparam.c pthread_setschedparam.c -CSRCS += pthread_mutexinit.c pthread_mutexdestroy.c pthread_mutex.c +CSRCS += pthread_mutexinit.c pthread_mutexdestroy.c CSRCS += pthread_mutexlock.c pthread_mutextrylock.c pthread_mutexunlock.c -CSRCS += pthread_mutexconsistent.c pthread_mutexinconsistent.c CSRCS += pthread_condinit.c pthread_conddestroy.c CSRCS += pthread_condwait.c pthread_condsignal.c pthread_condbroadcast.c CSRCS += pthread_barrierinit.c pthread_barrierdestroy.c pthread_barrierwait.c @@ -49,6 +48,10 @@ CSRCS += pthread_keydelete.c CSRCS += pthread_initialize.c pthread_completejoin.c pthread_findjoininfo.c CSRCS += pthread_once.c pthread_release.c pthread_setschedprio.c +ifneq ($(CONFIG_PTHREAD_MUTEX_UNSAFE),y) +CSRCS += pthread_mutex.c pthread_mutexconsistent.c pthread_mutexinconsistent.c +endif + ifneq ($(CONFIG_DISABLE_SIGNALS),y) CSRCS += pthread_condtimedwait.c pthread_kill.c pthread_sigmask.c endif diff --git a/sched/pthread/pthread.h b/sched/pthread/pthread.h index 6ab044a3ba..2e7106495f 100644 --- a/sched/pthread/pthread.h +++ b/sched/pthread/pthread.h @@ -109,9 +109,15 @@ FAR struct join_s *pthread_findjoininfo(FAR struct task_group_s *group, void pthread_release(FAR struct task_group_s *group); int pthread_takesemaphore(sem_t *sem, bool intr); int pthread_givesemaphore(sem_t *sem); + +#ifndef CONFIG_PTHREAD_MUTEX_UNSAFE int pthread_mutex_take(FAR struct pthread_mutex_s *mutex, bool intr); int pthread_mutex_give(FAR struct pthread_mutex_s *mutex); void pthread_mutex_inconsistent(FAR struct pthread_tcb_s *tcb); +#else +# define pthread_mutex_take(m,i) pthread_takesemaphore(&(m)->sem,(i)) +# define pthread_mutex_give(m) pthread_givesemaphore(&(m)->sem) +#endif #ifdef CONFIG_MUTEX_TYPES int pthread_mutexattr_verifytype(int type); diff --git a/sched/pthread/pthread_cancel.c b/sched/pthread/pthread_cancel.c index 3a520b1f93..e2cad60ebb 100644 --- a/sched/pthread/pthread_cancel.c +++ b/sched/pthread/pthread_cancel.c @@ -162,9 +162,11 @@ int pthread_cancel(pthread_t thread) (void)pthread_completejoin((pid_t)thread, PTHREAD_CANCELED); +#ifndef CONFIG_PTHREAD_MUTEX_UNSAFE /* Recover any mutexes still held by the canceled thread */ pthread_mutex_inconsistent(tcb); +#endif /* Then let task_terminate do the real work */ diff --git a/sched/pthread/pthread_exit.c b/sched/pthread/pthread_exit.c index 4654d86371..bcc8e79c85 100644 --- a/sched/pthread/pthread_exit.c +++ b/sched/pthread/pthread_exit.c @@ -123,9 +123,11 @@ void pthread_exit(FAR void *exit_value) exit(EXIT_FAILURE); } +#ifndef CONFIG_PTHREAD_MUTEX_UNSAFE /* Recover any mutexes still held by the canceled thread */ - pthread_mutex_inconsistent(tcb); + pthread_mutex_inconsistent((FAR struct pthread_tcb_s *)tcb); +#endif /* Perform common task termination logic. This will get called again later * through logic kicked off by _exit(). However, we need to call it before diff --git a/sched/pthread/pthread_mutexinit.c b/sched/pthread/pthread_mutexinit.c index b521137683..46c9e67356 100644 --- a/sched/pthread/pthread_mutexinit.c +++ b/sched/pthread/pthread_mutexinit.c @@ -123,14 +123,17 @@ int pthread_mutex_init(FAR pthread_mutex_t *mutex, ret = get_errno(); } #endif + +#ifndef CONFIG_PTHREAD_MUTEX_UNSAFE /* Initial internal fields of the mutex */ mutex->flink = NULL; mutex->flags = 0; - - /* Set up attributes unique to the mutex type */ +#endif #ifdef CONFIG_MUTEX_TYPES + /* Set up attributes unique to the mutex type */ + mutex->type = type; mutex->nlocks = 0; #endif diff --git a/sched/pthread/pthread_mutexlock.c b/sched/pthread/pthread_mutexlock.c index 67b668bf9d..ffc8eadad2 100644 --- a/sched/pthread/pthread_mutexlock.c +++ b/sched/pthread/pthread_mutexlock.c @@ -120,13 +120,15 @@ int pthread_mutex_lock(FAR pthread_mutex_t *mutex) sched_lock(); - /* Does this thread already hold the semaphore? */ +#ifdef CONFIG_MUTEX_TYPES + /* All mutex types except for NORMAL (and DEFAULT) will return + * and an error error if the caller does not hold the mutex. + */ - if (mutex->pid == mypid) + if (mutex->type != PTHREAD_MUTEX_NORMAL && mutex->pid == mypid) { /* Yes.. Is this a recursive mutex? */ -#ifdef CONFIG_MUTEX_TYPES if (mutex->type == PTHREAD_MUTEX_RECURSIVE) { /* Yes... just increment the number of locks held and return @@ -144,7 +146,6 @@ int pthread_mutex_lock(FAR pthread_mutex_t *mutex) } } else -#endif { /* No, then we would deadlock... return an error (default * behavior is like PTHREAD_MUTEX_ERRORCHECK) @@ -160,14 +161,17 @@ int pthread_mutex_lock(FAR pthread_mutex_t *mutex) ret = EDEADLK; } } + else +#endif /* CONFIG_MUTEX_TYPES */ +#ifndef CONFIG_PTHREAD_MUTEX_UNSAFE /* The calling thread does not hold the semaphore. The correct * behavior for the 'robust' mutex is to verify that the holder of the * mutex is still valid. This is protection from the case * where the holder of the mutex has exitted without unlocking it. */ - else if (mutex->pid > 0 && sched_gettcb(mutex->pid) == NULL) + if (mutex->pid > 0 && sched_gettcb(mutex->pid) == NULL) { DEBUGASSERT(mutex->pid != 0); /* < 0: available, >0 owned, ==0 error */ DEBUGASSERT((mutex->flags & _PTHREAD_MFLAGS_INCONSISTENT) != 0); @@ -182,8 +186,13 @@ int pthread_mutex_lock(FAR pthread_mutex_t *mutex) ret = EOWNERDEAD; } else +#endif /* !CONFIG_PTHREAD_MUTEX_UNSAFE */ + { - /* Take the underlying semaphore, waiting if necessary */ + /* Take the underlying semaphore, waiting if necessary. NOTE that + * is required to deadlock for the case of the non-robust NORMAL or + * default mutex. + */ ret = pthread_mutex_take(mutex, true); diff --git a/sched/pthread/pthread_mutextrylock.c b/sched/pthread/pthread_mutextrylock.c index 683909c859..6e21c74b44 100644 --- a/sched/pthread/pthread_mutextrylock.c +++ b/sched/pthread/pthread_mutextrylock.c @@ -124,7 +124,7 @@ int pthread_mutex_trylock(FAR pthread_mutex_t *mutex) else { - /* Did it fail because the semaphore was not avaialabl? */ + /* Did it fail because the semaphore was not avaialable? */ int errcode = get_errno(); if (errcode == EAGAIN) @@ -148,6 +148,8 @@ int pthread_mutex_trylock(FAR pthread_mutex_t *mutex) } else #endif + +#ifndef CONFIG_PTHREAD_MUTEX_UNSAFE /* The calling thread does not hold the semaphore. The correct * behavior for the 'robust' mutex is to verify that the holder of * the mutex is still valid. This is protection from the case @@ -172,6 +174,8 @@ int pthread_mutex_trylock(FAR pthread_mutex_t *mutex) /* The mutex is locked by another, active thread */ else +#endif /* CONFIG_PTHREAD_MUTEX_UNSAFE */ + { ret = EBUSY; } diff --git a/sched/pthread/pthread_mutexunlock.c b/sched/pthread/pthread_mutexunlock.c index 7ab0c90469..9efd9ed847 100644 --- a/sched/pthread/pthread_mutexunlock.c +++ b/sched/pthread/pthread_mutexunlock.c @@ -93,9 +93,20 @@ int pthread_mutex_unlock(FAR pthread_mutex_t *mutex) sched_lock(); - /* Does the calling thread own the semaphore? */ +#if !defined(CONFIG_PTHREAD_MUTEX_UNSAFE) || !defined(CONFIG_MUTEX_TYPES) + /* Does the calling thread own the semaphore? Should we report the + * EPERM error? This applies to robust NORMAL (and DEFAULT) mutexes + * as well as ERRORCHECK and RECURSIVE mutexes. + */ if (mutex->pid != (int)getpid()) +#else + /* Does the calling thread own the semaphore? Should we report the + * EPERM error? This applies to ERRORCHECK and RECURSIVE mutexes. + */ + + if (mutex->type != PTHREAD_MUTEX_NORMAL && mutex->pid != (int)getpid()) +#endif { /* No... return an EPERM error. * @@ -111,11 +122,12 @@ int pthread_mutex_unlock(FAR pthread_mutex_t *mutex) serr("ERROR: Holder=%d returning EPERM\n", mutex->pid); ret = EPERM; } - - /* Yes, the caller owns the semaphore.. Is this a recursive mutex? */ + else #ifdef CONFIG_MUTEX_TYPES - else if (mutex->type == PTHREAD_MUTEX_RECURSIVE && mutex->nlocks > 1) + /* Yes, the caller owns the semaphore.. Is this a recursive mutex? */ + + if (mutex->type == PTHREAD_MUTEX_RECURSIVE && mutex->nlocks > 1) { /* This is a recursive mutex and we there are multiple locks held. Retain * the mutex lock, just decrement the count of locks held, and return @@ -125,13 +137,19 @@ int pthread_mutex_unlock(FAR pthread_mutex_t *mutex) mutex->nlocks--; ret = OK; } -#endif + else + +#endif /* CONFIG_MUTEX_TYPES */ /* This is either a non-recursive mutex or is the outermost unlock of * a recursive mutex. + * + * In the case where the calling thread is NOT the holder of the thread, + * the behavior is undefined per POSIX. Here we do the same as GLIBC: + * We allow the other thread to release the mutex even though it does + * not own it. */ - else { /* Nullify the pid and lock count then post the semaphore */