summaryrefslogtreecommitdiff
path: root/src/lib/libc/include/thread_private.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/libc/include/thread_private.h')
-rw-r--r--src/lib/libc/include/thread_private.h426
1 files changed, 0 insertions, 426 deletions
diff --git a/src/lib/libc/include/thread_private.h b/src/lib/libc/include/thread_private.h
deleted file mode 100644
index 237c3fbd03..0000000000
--- a/src/lib/libc/include/thread_private.h
+++ /dev/null
@@ -1,426 +0,0 @@
1/* $OpenBSD: thread_private.h,v 1.36 2021/01/06 19:54:17 otto Exp $ */
2
3/* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */
4
5#ifndef _THREAD_PRIVATE_H_
6#define _THREAD_PRIVATE_H_
7
8#include <stdio.h> /* for FILE and __isthreaded */
9
10#define _MALLOC_MUTEXES 32
11void _malloc_init(int);
12#ifdef __LIBC__
13PROTO_NORMAL(_malloc_init);
14#endif /* __LIBC__ */
15
16/*
17 * The callbacks needed by libc to handle the threaded case.
18 * NOTE: Bump the version when you change the struct contents!
19 *
20 * tc_canceled:
21 * If not NULL, what to do when canceled (otherwise _exit(0))
22 *
23 * tc_flockfile, tc_ftrylockfile, and tc_funlockfile:
24 * If not NULL, these implement the flockfile() family.
25 * XXX In theory, you should be able to lock a FILE before
26 * XXX loading libpthread and have that be a real lock on it,
27 * XXX but that doesn't work without the libc base version
28 * XXX tracking the recursion count.
29 *
30 * tc_malloc_lock and tc_malloc_unlock:
31 * tc_atexit_lock and tc_atexit_unlock:
32 * tc_atfork_lock and tc_atfork_unlock:
33 * tc_arc4_lock and tc_arc4_unlock:
34 * The locks used by the malloc, atexit, atfork, and arc4 subsystems.
35 * These have to be ordered specially in the fork/vfork wrappers
36 * and may be implemented differently than the general mutexes
37 * in the callbacks below.
38 *
39 * tc_mutex_lock and tc_mutex_unlock:
40 * Lock and unlock the given mutex. If the given mutex is NULL
41 * a mutex is allocated and initialized automatically.
42 *
43 * tc_mutex_destroy:
44 * Destroy/deallocate the given mutex.
45 *
46 * tc_tag_lock and tc_tag_unlock:
47 * Lock and unlock the mutex associated with the given tag.
48 * If the given tag is NULL a tag is allocated and initialized
49 * automatically.
50 *
51 * tc_tag_storage:
52 * Returns a pointer to per-thread instance of data associated
53 * with the given tag. If the given tag is NULL a tag is
54 * allocated and initialized automatically.
55 *
56 * tc_fork, tc_vfork:
57 * If not NULL, they are called instead of the syscall stub, so that
58 * the thread library can do necessary locking and reinitialization.
59 *
60 * tc_thread_release:
61 * Handles the release of a thread's TIB and struct pthread and the
62 * notification of other threads...when there are other threads.
63 *
64 * tc_thread_key_zero:
65 * For each thread, zero out the key data associated with the given key.
66
67 * If <machine/tcb.h> doesn't define TCB_GET(), then locating the TCB in a
68 * threaded process requires a syscall (__get_tcb(2)) which is too much
69 * overhead for single-threaded processes. For those archs, there are two
70 * additional callbacks, though they are placed first in the struct for
71 * convenience in ASM:
72 *
73 * tc_errnoptr:
74 * Returns the address of the thread's errno.
75 *
76 * tc_tcb:
77 * Returns the address of the thread's TCB.
78 */
79
80struct pthread;
81struct thread_callbacks {
82 int *(*tc_errnoptr)(void); /* MUST BE FIRST */
83 void *(*tc_tcb)(void);
84 __dead void (*tc_canceled)(void);
85 void (*tc_flockfile)(FILE *);
86 int (*tc_ftrylockfile)(FILE *);
87 void (*tc_funlockfile)(FILE *);
88 void (*tc_malloc_lock)(int);
89 void (*tc_malloc_unlock)(int);
90 void (*tc_atexit_lock)(void);
91 void (*tc_atexit_unlock)(void);
92 void (*tc_atfork_lock)(void);
93 void (*tc_atfork_unlock)(void);
94 void (*tc_arc4_lock)(void);
95 void (*tc_arc4_unlock)(void);
96 void (*tc_mutex_lock)(void **);
97 void (*tc_mutex_unlock)(void **);
98 void (*tc_mutex_destroy)(void **);
99 void (*tc_tag_lock)(void **);
100 void (*tc_tag_unlock)(void **);
101 void *(*tc_tag_storage)(void **, void *, size_t, void (*)(void *),
102 void *);
103 __pid_t (*tc_fork)(void);
104 __pid_t (*tc_vfork)(void);
105 void (*tc_thread_release)(struct pthread *);
106 void (*tc_thread_key_zero)(int);
107};
108
109__BEGIN_PUBLIC_DECLS
110/*
111 * Set the callbacks used by libc
112 */
113void _thread_set_callbacks(const struct thread_callbacks *_cb, size_t _len);
114__END_PUBLIC_DECLS
115
116#ifdef __LIBC__
117__BEGIN_HIDDEN_DECLS
118/* the current set */
119extern struct thread_callbacks _thread_cb;
120__END_HIDDEN_DECLS
121#endif /* __LIBC__ */
122
123/*
124 * helper macro to make unique names in the thread namespace
125 */
126#define __THREAD_NAME(name) __CONCAT(_thread_tagname_,name)
127
128/*
129 * Macros used in libc to access thread mutex, keys, and per thread storage.
130 * _THREAD_PRIVATE_KEY and _THREAD_PRIVATE_MUTEX are different macros for
131 * historical reasons. They do the same thing, define a static variable
132 * keyed by 'name' that identifies a mutex and a key to identify per thread
133 * data.
134 */
135#define _THREAD_PRIVATE_KEY(name) \
136 static void *__THREAD_NAME(name)
137#define _THREAD_PRIVATE_MUTEX(name) \
138 static void *__THREAD_NAME(name)
139
140
141#ifndef __LIBC__ /* building some sort of reach around */
142
143#define _THREAD_PRIVATE_MUTEX_LOCK(name) do {} while (0)
144#define _THREAD_PRIVATE_MUTEX_UNLOCK(name) do {} while (0)
145#define _THREAD_PRIVATE(keyname, storage, error) &(storage)
146#define _THREAD_PRIVATE_DT(keyname, storage, dt, error) &(storage)
147#define _MUTEX_LOCK(mutex) do {} while (0)
148#define _MUTEX_UNLOCK(mutex) do {} while (0)
149#define _MUTEX_DESTROY(mutex) do {} while (0)
150#define _MALLOC_LOCK(n) do {} while (0)
151#define _MALLOC_UNLOCK(n) do {} while (0)
152#define _ATEXIT_LOCK() do {} while (0)
153#define _ATEXIT_UNLOCK() do {} while (0)
154#define _ATFORK_LOCK() do {} while (0)
155#define _ATFORK_UNLOCK() do {} while (0)
156#define _ARC4_LOCK() do {} while (0)
157#define _ARC4_UNLOCK() do {} while (0)
158
159#else /* building libc */
160#define _THREAD_PRIVATE_MUTEX_LOCK(name) \
161 do { \
162 if (_thread_cb.tc_tag_lock != NULL) \
163 _thread_cb.tc_tag_lock(&(__THREAD_NAME(name))); \
164 } while (0)
165#define _THREAD_PRIVATE_MUTEX_UNLOCK(name) \
166 do { \
167 if (_thread_cb.tc_tag_unlock != NULL) \
168 _thread_cb.tc_tag_unlock(&(__THREAD_NAME(name))); \
169 } while (0)
170#define _THREAD_PRIVATE(keyname, storage, error) \
171 (_thread_cb.tc_tag_storage == NULL ? &(storage) : \
172 _thread_cb.tc_tag_storage(&(__THREAD_NAME(keyname)), \
173 &(storage), sizeof(storage), NULL, (error)))
174
175#define _THREAD_PRIVATE_DT(keyname, storage, dt, error) \
176 (_thread_cb.tc_tag_storage == NULL ? &(storage) : \
177 _thread_cb.tc_tag_storage(&(__THREAD_NAME(keyname)), \
178 &(storage), sizeof(storage), (dt), (error)))
179
180/*
181 * Macros used in libc to access mutexes.
182 */
183#define _MUTEX_LOCK(mutex) \
184 do { \
185 if (__isthreaded) \
186 _thread_cb.tc_mutex_lock(mutex); \
187 } while (0)
188#define _MUTEX_UNLOCK(mutex) \
189 do { \
190 if (__isthreaded) \
191 _thread_cb.tc_mutex_unlock(mutex); \
192 } while (0)
193#define _MUTEX_DESTROY(mutex) \
194 do { \
195 if (__isthreaded) \
196 _thread_cb.tc_mutex_destroy(mutex); \
197 } while (0)
198
199/*
200 * malloc lock/unlock prototypes and definitions
201 */
202#define _MALLOC_LOCK(n) \
203 do { \
204 if (__isthreaded) \
205 _thread_cb.tc_malloc_lock(n); \
206 } while (0)
207#define _MALLOC_UNLOCK(n) \
208 do { \
209 if (__isthreaded) \
210 _thread_cb.tc_malloc_unlock(n); \
211 } while (0)
212
213#define _ATEXIT_LOCK() \
214 do { \
215 if (__isthreaded) \
216 _thread_cb.tc_atexit_lock(); \
217 } while (0)
218#define _ATEXIT_UNLOCK() \
219 do { \
220 if (__isthreaded) \
221 _thread_cb.tc_atexit_unlock(); \
222 } while (0)
223
224#define _ATFORK_LOCK() \
225 do { \
226 if (__isthreaded) \
227 _thread_cb.tc_atfork_lock(); \
228 } while (0)
229#define _ATFORK_UNLOCK() \
230 do { \
231 if (__isthreaded) \
232 _thread_cb.tc_atfork_unlock(); \
233 } while (0)
234
235#define _ARC4_LOCK() \
236 do { \
237 if (__isthreaded) \
238 _thread_cb.tc_arc4_lock(); \
239 } while (0)
240#define _ARC4_UNLOCK() \
241 do { \
242 if (__isthreaded) \
243 _thread_cb.tc_arc4_unlock(); \
244 } while (0)
245#endif /* __LIBC__ */
246
247
248/*
249 * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
250 * All Rights Reserved.
251 *
252 * Permission to use, copy, modify, and distribute this software for any
253 * purpose with or without fee is hereby granted, provided that the above
254 * copyright notice and this permission notice appear in all copies.
255 *
256 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
257 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
258 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
259 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
260 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
261 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
262 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
263 */
264/*
265 * Private data structures that back up the typedefs in pthread.h.
266 * Since only the thread library cares about their size or arrangement,
267 * it should be possible to switch libraries without relinking.
268 *
269 * Do not reorder _atomic_lock_t and sem_t variables in the structs.
270 * This is due to alignment requirements of certain arches like hppa.
271 * The current requirement is 16 bytes.
272 *
273 * THE MACHINE DEPENDENT CERROR CODE HAS HARD CODED OFFSETS INTO PTHREAD_T!
274 */
275
276#include <sys/queue.h>
277#include <pthread.h>
278#include <semaphore.h>
279#include <machine/spinlock.h>
280
281#define _SPINLOCK_UNLOCKED _ATOMIC_LOCK_UNLOCKED
282
283struct __sem {
284 _atomic_lock_t lock;
285 volatile int waitcount;
286 volatile int value;
287 int shared;
288};
289
290TAILQ_HEAD(pthread_queue, pthread);
291
292#ifdef FUTEX
293
294struct pthread_mutex {
295 volatile unsigned int lock;
296 int type;
297 pthread_t owner;
298 int count;
299 int prioceiling;
300};
301
302struct pthread_cond {
303 volatile unsigned int seq;
304 clockid_t clock;
305 struct pthread_mutex *mutex;
306};
307
308struct pthread_rwlock {
309 volatile unsigned int value;
310};
311
312#else
313
314struct pthread_mutex {
315 _atomic_lock_t lock;
316 struct pthread_queue lockers;
317 int type;
318 pthread_t owner;
319 int count;
320 int prioceiling;
321};
322
323struct pthread_cond {
324 _atomic_lock_t lock;
325 struct pthread_queue waiters;
326 struct pthread_mutex *mutex;
327 clockid_t clock;
328};
329
330struct pthread_rwlock {
331 _atomic_lock_t lock;
332 pthread_t owner;
333 struct pthread_queue writers;
334 int readers;
335};
336#endif /* FUTEX */
337
338struct pthread_mutex_attr {
339 int ma_type;
340 int ma_protocol;
341 int ma_prioceiling;
342};
343
344struct pthread_cond_attr {
345 clockid_t ca_clock;
346};
347
348struct pthread_attr {
349 void *stack_addr;
350 size_t stack_size;
351 size_t guard_size;
352 int detach_state;
353 int contention_scope;
354 int sched_policy;
355 struct sched_param sched_param;
356 int sched_inherit;
357};
358
359struct rthread_storage {
360 int keyid;
361 struct rthread_storage *next;
362 void *data;
363};
364
365struct rthread_cleanup_fn {
366 void (*fn)(void *);
367 void *arg;
368 struct rthread_cleanup_fn *next;
369};
370
371struct tib;
372struct stack;
373struct pthread {
374 struct __sem donesem;
375 unsigned int flags;
376 _atomic_lock_t flags_lock;
377 struct tib *tib;
378 void *retval;
379 void *(*fn)(void *);
380 void *arg;
381 char name[32];
382 struct stack *stack;
383 LIST_ENTRY(pthread) threads;
384 TAILQ_ENTRY(pthread) waiting;
385 pthread_cond_t blocking_cond;
386 struct pthread_attr attr;
387 struct rthread_storage *local_storage;
388 struct rthread_cleanup_fn *cleanup_fns;
389
390 /* cancel received in a delayed cancel block? */
391 int delayed_cancel;
392};
393/* flags in pthread->flags */
394#define THREAD_DONE 0x001
395#define THREAD_DETACHED 0x002
396
397/* flags in tib->tib_thread_flags */
398#define TIB_THREAD_ASYNC_CANCEL 0x001
399#define TIB_THREAD_INITIAL_STACK 0x002 /* has stack from exec */
400
401#define ENTER_DELAYED_CANCEL_POINT(tib, self) \
402 (self)->delayed_cancel = 0; \
403 ENTER_CANCEL_POINT_INNER(tib, 1, 1)
404
405/*
406 * Internal functions exported from libc's thread bits for use by libpthread
407 */
408void _spinlock(volatile _atomic_lock_t *);
409int _spinlocktry(volatile _atomic_lock_t *);
410void _spinunlock(volatile _atomic_lock_t *);
411
412void _rthread_debug(int, const char *, ...)
413 __attribute__((__format__ (printf, 2, 3)));
414pid_t _thread_dofork(pid_t (*_sys_fork)(void));
415void _thread_finalize(void);
416
417/*
418 * Threading syscalls not declared in system headers
419 */
420__dead void __threxit(pid_t *);
421int __thrsleep(const volatile void *, clockid_t,
422 const struct timespec *, volatile void *, const int *);
423int __thrwakeup(const volatile void *, int n);
424int __thrsigdivert(sigset_t, siginfo_t *, const struct timespec *);
425
426#endif /* _THREAD_PRIVATE_H_ */