summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authordlg <>2025-07-12 23:59:44 +0000
committerdlg <>2025-07-12 23:59:44 +0000
commit8082d2222ff57c8446f00bdd20755af6c4f93747 (patch)
tree32c204acfdb9e0e6ebf5b8e75fbee0e3d49e2228 /src
parent8fef6d309283073d27442541f73fdf94afe6cd44 (diff)
downloadopenbsd-8082d2222ff57c8446f00bdd20755af6c4f93747.tar.gz
openbsd-8082d2222ff57c8446f00bdd20755af6c4f93747.tar.bz2
openbsd-8082d2222ff57c8446f00bdd20755af6c4f93747.zip
add libc specific __cmtx and __rcmtx locks for internal use.
__cmtx provides mutual exclusion using futex(2) and cas on archs that support it, or _spinlocks on the rest. __rcmtx is a recursive mutex built on top of __cmtx, so it inherits the use of futex and cas/spinlock from __cmtx. until now the options we had for locking between threads in libc were spinlocks or pthread mutexes. spinlocks use sched_yield to relax if they have to spin on a contended lock, which we are trying to minimise the use of as much as possible. pthread_mutex is relatively large in memory and offers a lot of complicated features which are unecessary for most of libc. the non cas/futex version of pthread_mutexes currently relies on __thrsleep and __thrwakeup, which we also want to deprecate. having a small futex based lock available everywhere will help us move away from overuse of spinlocks, and deprecate __thrsleep and __thrwakeup. ok kettenis@ jca@ provided his eyes too.
Diffstat (limited to 'src')
-rw-r--r--src/lib/libc/include/thread_private.h56
1 files changed, 55 insertions, 1 deletions
diff --git a/src/lib/libc/include/thread_private.h b/src/lib/libc/include/thread_private.h
index 1ec1071161..61cc83db60 100644
--- a/src/lib/libc/include/thread_private.h
+++ b/src/lib/libc/include/thread_private.h
@@ -1,4 +1,4 @@
1/* $OpenBSD: thread_private.h,v 1.37 2024/08/18 02:25:51 guenther Exp $ */ 1/* $OpenBSD: thread_private.h,v 1.38 2025/07/12 23:59:44 dlg Exp $ */
2 2
3/* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */ 3/* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */
4 4
@@ -292,6 +292,12 @@ TAILQ_HEAD(pthread_queue, pthread);
292 292
293#ifdef FUTEX 293#ifdef FUTEX
294 294
295/*
296 * CAS based implementations
297 */
298
299#define __CMTX_CAS
300
295struct pthread_mutex { 301struct pthread_mutex {
296 volatile unsigned int lock; 302 volatile unsigned int lock;
297 int type; 303 int type;
@@ -312,6 +318,10 @@ struct pthread_rwlock {
312 318
313#else 319#else
314 320
321/*
322 * spinlock based implementations
323 */
324
315struct pthread_mutex { 325struct pthread_mutex {
316 _atomic_lock_t lock; 326 _atomic_lock_t lock;
317 struct pthread_queue lockers; 327 struct pthread_queue lockers;
@@ -336,6 +346,40 @@ struct pthread_rwlock {
336}; 346};
337#endif /* FUTEX */ 347#endif /* FUTEX */
338 348
349/* libc mutex */
350
351#define __CMTX_UNLOCKED 0
352#define __CMTX_LOCKED 1
353#define __CMTX_CONTENDED 2
354
355#ifdef __CMTX_CAS
356struct __cmtx {
357 volatile unsigned int lock;
358};
359
360#define __CMTX_INITIALIZER() { \
361 .lock = __CMTX_UNLOCKED, \
362}
363#else /* __CMTX_CAS */
364struct __cmtx {
365 _atomic_lock_t spin;
366 volatile unsigned int lock;
367};
368
369#define __CMTX_INITIALIZER() { \
370 .spin = _SPINLOCK_UNLOCKED, \
371 .lock = __CMTX_UNLOCKED, \
372}
373#endif /* __CMTX_CAS */
374
375/* libc recursive mutex */
376
377struct __rcmtx {
378 volatile pthread_t owner;
379 struct __cmtx mtx;
380 unsigned int depth;
381};
382
339struct pthread_mutex_attr { 383struct pthread_mutex_attr {
340 int ma_type; 384 int ma_type;
341 int ma_protocol; 385 int ma_protocol;
@@ -410,6 +454,16 @@ void _spinlock(volatile _atomic_lock_t *);
410int _spinlocktry(volatile _atomic_lock_t *); 454int _spinlocktry(volatile _atomic_lock_t *);
411void _spinunlock(volatile _atomic_lock_t *); 455void _spinunlock(volatile _atomic_lock_t *);
412 456
457void __cmtx_init(struct __cmtx *);
458int __cmtx_enter_try(struct __cmtx *);
459void __cmtx_enter(struct __cmtx *);
460void __cmtx_leave(struct __cmtx *);
461
462void __rcmtx_init(struct __rcmtx *);
463int __rcmtx_enter_try(struct __rcmtx *);
464void __rcmtx_enter(struct __rcmtx *);
465void __rcmtx_leave(struct __rcmtx *);
466
413void _rthread_debug(int, const char *, ...) 467void _rthread_debug(int, const char *, ...)
414 __attribute__((__format__ (printf, 2, 3))); 468 __attribute__((__format__ (printf, 2, 3)));
415pid_t _thread_dofork(pid_t (*_sys_fork)(void)); 469pid_t _thread_dofork(pid_t (*_sys_fork)(void));