summaryrefslogtreecommitdiff
path: root/src/lib/libc/include/thread_private.h
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/lib/libc/include/thread_private.h66
1 files changed, 65 insertions, 1 deletions
diff --git a/src/lib/libc/include/thread_private.h b/src/lib/libc/include/thread_private.h
index 1ec1071161..3e1dbcdf6e 100644
--- a/src/lib/libc/include/thread_private.h
+++ b/src/lib/libc/include/thread_private.h
@@ -1,10 +1,13 @@
1/* $OpenBSD: thread_private.h,v 1.37 2024/08/18 02:25:51 guenther Exp $ */ 1/* $OpenBSD: thread_private.h,v 1.40 2025/08/04 01:44:33 dlg Exp $ */
2 2
3/* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */ 3/* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */
4 4
5#ifndef _THREAD_PRIVATE_H_ 5#ifndef _THREAD_PRIVATE_H_
6#define _THREAD_PRIVATE_H_ 6#define _THREAD_PRIVATE_H_
7 7
8#include <sys/types.h>
9#include <sys/gmon.h>
10
8extern int __isthreaded; 11extern int __isthreaded;
9 12
10#define _MALLOC_MUTEXES 32 13#define _MALLOC_MUTEXES 32
@@ -292,6 +295,12 @@ TAILQ_HEAD(pthread_queue, pthread);
292 295
293#ifdef FUTEX 296#ifdef FUTEX
294 297
298/*
299 * CAS based implementations
300 */
301
302#define __CMTX_CAS
303
295struct pthread_mutex { 304struct pthread_mutex {
296 volatile unsigned int lock; 305 volatile unsigned int lock;
297 int type; 306 int type;
@@ -312,6 +321,10 @@ struct pthread_rwlock {
312 321
313#else 322#else
314 323
324/*
325 * spinlock based implementations
326 */
327
315struct pthread_mutex { 328struct pthread_mutex {
316 _atomic_lock_t lock; 329 _atomic_lock_t lock;
317 struct pthread_queue lockers; 330 struct pthread_queue lockers;
@@ -336,6 +349,46 @@ struct pthread_rwlock {
336}; 349};
337#endif /* FUTEX */ 350#endif /* FUTEX */
338 351
352/* libc mutex */
353
354#define __CMTX_UNLOCKED 0
355#define __CMTX_LOCKED 1
356#define __CMTX_CONTENDED 2
357
358#ifdef __CMTX_CAS
359struct __cmtx {
360 volatile unsigned int lock;
361};
362
363#define __CMTX_INITIALIZER() { \
364 .lock = __CMTX_UNLOCKED, \
365}
366#else /* __CMTX_CAS */
367struct __cmtx {
368 _atomic_lock_t spin;
369 volatile unsigned int lock;
370};
371
372#define __CMTX_INITIALIZER() { \
373 .spin = _SPINLOCK_UNLOCKED, \
374 .lock = __CMTX_UNLOCKED, \
375}
376#endif /* __CMTX_CAS */
377
378/* libc recursive mutex */
379
380struct __rcmtx {
381 volatile pthread_t owner;
382 struct __cmtx mtx;
383 unsigned int depth;
384};
385
386#define __RCMTX_INITIALIZER() { \
387 .owner = NULL, \
388 .mtx = __CMTX_INITIALIZER(), \
389 .depth = 0, \
390}
391
339struct pthread_mutex_attr { 392struct pthread_mutex_attr {
340 int ma_type; 393 int ma_type;
341 int ma_protocol; 394 int ma_protocol;
@@ -390,6 +443,7 @@ struct pthread {
390 443
391 /* cancel received in a delayed cancel block? */ 444 /* cancel received in a delayed cancel block? */
392 int delayed_cancel; 445 int delayed_cancel;
446 struct gmonparam *gmonparam;
393}; 447};
394/* flags in pthread->flags */ 448/* flags in pthread->flags */
395#define THREAD_DONE 0x001 449#define THREAD_DONE 0x001
@@ -410,6 +464,16 @@ void _spinlock(volatile _atomic_lock_t *);
410int _spinlocktry(volatile _atomic_lock_t *); 464int _spinlocktry(volatile _atomic_lock_t *);
411void _spinunlock(volatile _atomic_lock_t *); 465void _spinunlock(volatile _atomic_lock_t *);
412 466
467void __cmtx_init(struct __cmtx *);
468int __cmtx_enter_try(struct __cmtx *);
469void __cmtx_enter(struct __cmtx *);
470void __cmtx_leave(struct __cmtx *);
471
472void __rcmtx_init(struct __rcmtx *);
473int __rcmtx_enter_try(struct __rcmtx *);
474void __rcmtx_enter(struct __rcmtx *);
475void __rcmtx_leave(struct __rcmtx *);
476
413void _rthread_debug(int, const char *, ...) 477void _rthread_debug(int, const char *, ...)
414 __attribute__((__format__ (printf, 2, 3))); 478 __attribute__((__format__ (printf, 2, 3)));
415pid_t _thread_dofork(pid_t (*_sys_fork)(void)); 479pid_t _thread_dofork(pid_t (*_sys_fork)(void));