From 417b1213b262bbe6d34c708537dff4b062920bfa Mon Sep 17 00:00:00 2001 From: dlg <> Date: Sat, 12 Jul 2025 23:59:44 +0000 Subject: add libc specific __cmtx and __rcmtx locks for internal use. __cmtx provides mutual exclusion using futex(2) and cas on archs that support it, or _spinlocks on the rest. __rcmtx is a recursive mutex built on top of __cmtx, so it inherits the use of futex and cas/spinlock from __cmtx. until now the options we had for locking between threads in libc were spinlocks or pthread mutexes. spinlocks use sched_yield to relax if they have to spin on a contended lock, which we are trying to minimise the use of as much as possible. pthread_mutex is relatively large in memory and offers a lot of complicated features which are unecessary for most of libc. the non cas/futex version of pthread_mutexes currently relies on __thrsleep and __thrwakeup, which we also want to deprecate. having a small futex based lock available everywhere will help us move away from overuse of spinlocks, and deprecate __thrsleep and __thrwakeup. ok kettenis@ jca@ provided his eyes too. --- src/lib/libc/include/thread_private.h | 56 ++++++++++++++++++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) (limited to 'src/lib/libc/include') diff --git a/src/lib/libc/include/thread_private.h b/src/lib/libc/include/thread_private.h index 1ec1071161..61cc83db60 100644 --- a/src/lib/libc/include/thread_private.h +++ b/src/lib/libc/include/thread_private.h @@ -1,4 +1,4 @@ -/* $OpenBSD: thread_private.h,v 1.37 2024/08/18 02:25:51 guenther Exp $ */ +/* $OpenBSD: thread_private.h,v 1.38 2025/07/12 23:59:44 dlg Exp $ */ /* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman */ @@ -292,6 +292,12 @@ TAILQ_HEAD(pthread_queue, pthread); #ifdef FUTEX +/* + * CAS based implementations + */ + +#define __CMTX_CAS + struct pthread_mutex { volatile unsigned int lock; int type; @@ -312,6 +318,10 @@ struct pthread_rwlock { #else +/* + * spinlock based implementations + */ + struct pthread_mutex { _atomic_lock_t lock; struct pthread_queue lockers; @@ -336,6 +346,40 @@ struct pthread_rwlock { }; #endif /* FUTEX */ +/* libc mutex */ + +#define __CMTX_UNLOCKED 0 +#define __CMTX_LOCKED 1 +#define __CMTX_CONTENDED 2 + +#ifdef __CMTX_CAS +struct __cmtx { + volatile unsigned int lock; +}; + +#define __CMTX_INITIALIZER() { \ + .lock = __CMTX_UNLOCKED, \ +} +#else /* __CMTX_CAS */ +struct __cmtx { + _atomic_lock_t spin; + volatile unsigned int lock; +}; + +#define __CMTX_INITIALIZER() { \ + .spin = _SPINLOCK_UNLOCKED, \ + .lock = __CMTX_UNLOCKED, \ +} +#endif /* __CMTX_CAS */ + +/* libc recursive mutex */ + +struct __rcmtx { + volatile pthread_t owner; + struct __cmtx mtx; + unsigned int depth; +}; + struct pthread_mutex_attr { int ma_type; int ma_protocol; @@ -410,6 +454,16 @@ void _spinlock(volatile _atomic_lock_t *); int _spinlocktry(volatile _atomic_lock_t *); void _spinunlock(volatile _atomic_lock_t *); +void __cmtx_init(struct __cmtx *); +int __cmtx_enter_try(struct __cmtx *); +void __cmtx_enter(struct __cmtx *); +void __cmtx_leave(struct __cmtx *); + +void __rcmtx_init(struct __rcmtx *); +int __rcmtx_enter_try(struct __rcmtx *); +void __rcmtx_enter(struct __rcmtx *); +void __rcmtx_leave(struct __rcmtx *); + void _rthread_debug(int, const char *, ...) __attribute__((__format__ (printf, 2, 3))); pid_t _thread_dofork(pid_t (*_sys_fork)(void)); -- cgit v1.2.3-55-g6feb