diff options
| author | dlg <> | 2025-07-12 23:59:44 +0000 |
|---|---|---|
| committer | dlg <> | 2025-07-12 23:59:44 +0000 |
| commit | 417b1213b262bbe6d34c708537dff4b062920bfa (patch) | |
| tree | 32c204acfdb9e0e6ebf5b8e75fbee0e3d49e2228 /src | |
| parent | 69147ea8445a511462c02e8ac88d4f75fec3fa1b (diff) | |
| download | openbsd-417b1213b262bbe6d34c708537dff4b062920bfa.tar.gz openbsd-417b1213b262bbe6d34c708537dff4b062920bfa.tar.bz2 openbsd-417b1213b262bbe6d34c708537dff4b062920bfa.zip | |
add libc specific __cmtx and __rcmtx locks for internal use.
__cmtx provides mutual exclusion using futex(2) and cas on archs
that support it, or _spinlocks on the rest. __rcmtx is a recursive
mutex built on top of __cmtx, so it inherits the use of futex and
cas/spinlock from __cmtx.
until now the options we had for locking between threads in libc
were spinlocks or pthread mutexes. spinlocks use sched_yield to
relax if they have to spin on a contended lock, which we are trying
to minimise the use of as much as possible. pthread_mutex is
relatively large in memory and offers a lot of complicated features
which are unecessary for most of libc. the non cas/futex version
of pthread_mutexes currently relies on __thrsleep and __thrwakeup,
which we also want to deprecate.
having a small futex based lock available everywhere will help us
move away from overuse of spinlocks, and deprecate __thrsleep and
__thrwakeup.
ok kettenis@
jca@ provided his eyes too.
Diffstat (limited to 'src')
| -rw-r--r-- | src/lib/libc/include/thread_private.h | 56 |
1 files changed, 55 insertions, 1 deletions
diff --git a/src/lib/libc/include/thread_private.h b/src/lib/libc/include/thread_private.h index 1ec1071161..61cc83db60 100644 --- a/src/lib/libc/include/thread_private.h +++ b/src/lib/libc/include/thread_private.h | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | /* $OpenBSD: thread_private.h,v 1.37 2024/08/18 02:25:51 guenther Exp $ */ | 1 | /* $OpenBSD: thread_private.h,v 1.38 2025/07/12 23:59:44 dlg Exp $ */ |
| 2 | 2 | ||
| 3 | /* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */ | 3 | /* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */ |
| 4 | 4 | ||
| @@ -292,6 +292,12 @@ TAILQ_HEAD(pthread_queue, pthread); | |||
| 292 | 292 | ||
| 293 | #ifdef FUTEX | 293 | #ifdef FUTEX |
| 294 | 294 | ||
| 295 | /* | ||
| 296 | * CAS based implementations | ||
| 297 | */ | ||
| 298 | |||
| 299 | #define __CMTX_CAS | ||
| 300 | |||
| 295 | struct pthread_mutex { | 301 | struct pthread_mutex { |
| 296 | volatile unsigned int lock; | 302 | volatile unsigned int lock; |
| 297 | int type; | 303 | int type; |
| @@ -312,6 +318,10 @@ struct pthread_rwlock { | |||
| 312 | 318 | ||
| 313 | #else | 319 | #else |
| 314 | 320 | ||
| 321 | /* | ||
| 322 | * spinlock based implementations | ||
| 323 | */ | ||
| 324 | |||
| 315 | struct pthread_mutex { | 325 | struct pthread_mutex { |
| 316 | _atomic_lock_t lock; | 326 | _atomic_lock_t lock; |
| 317 | struct pthread_queue lockers; | 327 | struct pthread_queue lockers; |
| @@ -336,6 +346,40 @@ struct pthread_rwlock { | |||
| 336 | }; | 346 | }; |
| 337 | #endif /* FUTEX */ | 347 | #endif /* FUTEX */ |
| 338 | 348 | ||
| 349 | /* libc mutex */ | ||
| 350 | |||
| 351 | #define __CMTX_UNLOCKED 0 | ||
| 352 | #define __CMTX_LOCKED 1 | ||
| 353 | #define __CMTX_CONTENDED 2 | ||
| 354 | |||
| 355 | #ifdef __CMTX_CAS | ||
| 356 | struct __cmtx { | ||
| 357 | volatile unsigned int lock; | ||
| 358 | }; | ||
| 359 | |||
| 360 | #define __CMTX_INITIALIZER() { \ | ||
| 361 | .lock = __CMTX_UNLOCKED, \ | ||
| 362 | } | ||
| 363 | #else /* __CMTX_CAS */ | ||
| 364 | struct __cmtx { | ||
| 365 | _atomic_lock_t spin; | ||
| 366 | volatile unsigned int lock; | ||
| 367 | }; | ||
| 368 | |||
| 369 | #define __CMTX_INITIALIZER() { \ | ||
| 370 | .spin = _SPINLOCK_UNLOCKED, \ | ||
| 371 | .lock = __CMTX_UNLOCKED, \ | ||
| 372 | } | ||
| 373 | #endif /* __CMTX_CAS */ | ||
| 374 | |||
| 375 | /* libc recursive mutex */ | ||
| 376 | |||
| 377 | struct __rcmtx { | ||
| 378 | volatile pthread_t owner; | ||
| 379 | struct __cmtx mtx; | ||
| 380 | unsigned int depth; | ||
| 381 | }; | ||
| 382 | |||
| 339 | struct pthread_mutex_attr { | 383 | struct pthread_mutex_attr { |
| 340 | int ma_type; | 384 | int ma_type; |
| 341 | int ma_protocol; | 385 | int ma_protocol; |
| @@ -410,6 +454,16 @@ void _spinlock(volatile _atomic_lock_t *); | |||
| 410 | int _spinlocktry(volatile _atomic_lock_t *); | 454 | int _spinlocktry(volatile _atomic_lock_t *); |
| 411 | void _spinunlock(volatile _atomic_lock_t *); | 455 | void _spinunlock(volatile _atomic_lock_t *); |
| 412 | 456 | ||
| 457 | void __cmtx_init(struct __cmtx *); | ||
| 458 | int __cmtx_enter_try(struct __cmtx *); | ||
| 459 | void __cmtx_enter(struct __cmtx *); | ||
| 460 | void __cmtx_leave(struct __cmtx *); | ||
| 461 | |||
| 462 | void __rcmtx_init(struct __rcmtx *); | ||
| 463 | int __rcmtx_enter_try(struct __rcmtx *); | ||
| 464 | void __rcmtx_enter(struct __rcmtx *); | ||
| 465 | void __rcmtx_leave(struct __rcmtx *); | ||
| 466 | |||
| 413 | void _rthread_debug(int, const char *, ...) | 467 | void _rthread_debug(int, const char *, ...) |
| 414 | __attribute__((__format__ (printf, 2, 3))); | 468 | __attribute__((__format__ (printf, 2, 3))); |
| 415 | pid_t _thread_dofork(pid_t (*_sys_fork)(void)); | 469 | pid_t _thread_dofork(pid_t (*_sys_fork)(void)); |
