1 | /* Private libc-internal interface for mutex locks. NPTL version. |
2 | Copyright (C) 1996-2021 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public License as |
7 | published by the Free Software Foundation; either version 2.1 of the |
8 | License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; see the file COPYING.LIB. If |
17 | not, see <https://www.gnu.org/licenses/>. */ |
18 | |
19 | #ifndef _LIBC_LOCKP_H |
20 | #define _LIBC_LOCKP_H 1 |
21 | |
22 | #include <pthread.h> |
23 | #define __need_NULL |
24 | #include <stddef.h> |
25 | |
26 | |
27 | /* Fortunately Linux now has a mean to do locking which is realtime |
28 | safe without the aid of the thread library. We also need no fancy |
29 | options like error checking mutexes etc. We only need simple |
30 | locks, maybe recursive. This can be easily and cheaply implemented |
31 | using futexes. We will use them everywhere except in ld.so since |
32 | ld.so might be used on old kernels with a different libc.so. */ |
33 | #include <lowlevellock.h> |
34 | #include <tls.h> |
35 | #include <pthread-functions.h> |
36 | |
37 | #if IS_IN (libpthread) |
38 | /* This gets us the declarations of the __pthread_* internal names, |
39 | and hidden_proto for them. */ |
40 | # include <nptl/pthreadP.h> |
41 | #endif |
42 | |
43 | /* Mutex type. */ |
44 | #if !IS_IN (libc) && !IS_IN (libpthread) |
45 | typedef pthread_mutex_t __libc_lock_t; |
46 | #else |
47 | typedef int __libc_lock_t; |
48 | #endif |
49 | typedef struct { pthread_mutex_t mutex; } __rtld_lock_recursive_t; |
50 | typedef pthread_rwlock_t __libc_rwlock_t; |
51 | |
52 | /* Type for key to thread-specific data. */ |
53 | typedef pthread_key_t __libc_key_t; |
54 | |
55 | /* Define a lock variable NAME with storage class CLASS. The lock must be |
56 | initialized with __libc_lock_init before it can be used (or define it |
57 | with __libc_lock_define_initialized, below). Use `extern' for CLASS to |
58 | declare a lock defined in another module. In public structure |
59 | definitions you must use a pointer to the lock structure (i.e., NAME |
60 | begins with a `*'), because its storage size will not be known outside |
61 | of libc. */ |
62 | #define __libc_lock_define(CLASS,NAME) \ |
63 | CLASS __libc_lock_t NAME; |
64 | #define __libc_rwlock_define(CLASS,NAME) \ |
65 | CLASS __libc_rwlock_t NAME; |
66 | #define __rtld_lock_define_recursive(CLASS,NAME) \ |
67 | CLASS __rtld_lock_recursive_t NAME; |
68 | |
69 | /* Define an initialized lock variable NAME with storage class CLASS. |
70 | |
71 | For the C library we take a deeper look at the initializer. For |
72 | this implementation all fields are initialized to zero. Therefore |
73 | we don't initialize the variable which allows putting it into the |
74 | BSS section. */ |
75 | |
76 | _Static_assert (LLL_LOCK_INITIALIZER == 0, "LLL_LOCK_INITIALIZER != 0" ); |
77 | #define _LIBC_LOCK_INITIALIZER LLL_LOCK_INITIALIZER |
78 | #define __libc_lock_define_initialized(CLASS,NAME) \ |
79 | CLASS __libc_lock_t NAME; |
80 | |
81 | #define __libc_rwlock_define_initialized(CLASS,NAME) \ |
82 | CLASS __libc_rwlock_t NAME = PTHREAD_RWLOCK_INITIALIZER; |
83 | |
84 | #define __rtld_lock_define_initialized_recursive(CLASS,NAME) \ |
85 | CLASS __rtld_lock_recursive_t NAME = _RTLD_LOCK_RECURSIVE_INITIALIZER; |
86 | #define _RTLD_LOCK_RECURSIVE_INITIALIZER \ |
87 | {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP} |
88 | |
89 | #define __rtld_lock_initialize(NAME) \ |
90 | (void) ((NAME) = (__rtld_lock_recursive_t) _RTLD_LOCK_RECURSIVE_INITIALIZER) |
91 | |
92 | /* If we check for a weakly referenced symbol and then perform a |
93 | normal jump to it te code generated for some platforms in case of |
94 | PIC is unnecessarily slow. What would happen is that the function |
95 | is first referenced as data and then it is called indirectly |
96 | through the PLT. We can make this a direct jump. */ |
97 | #ifdef __PIC__ |
98 | # define __libc_maybe_call(FUNC, ARGS, ELSE) \ |
99 | (__extension__ ({ __typeof (FUNC) *_fn = (FUNC); \ |
100 | _fn != NULL ? (*_fn) ARGS : ELSE; })) |
101 | #else |
102 | # define __libc_maybe_call(FUNC, ARGS, ELSE) \ |
103 | (FUNC != NULL ? FUNC ARGS : ELSE) |
104 | #endif |
105 | |
106 | /* Call thread functions through the function pointer table. */ |
107 | #if defined SHARED && IS_IN (libc) |
108 | # define PTFAVAIL(NAME) __libc_pthread_functions_init |
109 | # define __libc_ptf_call(FUNC, ARGS, ELSE) \ |
110 | (__libc_pthread_functions_init ? PTHFCT_CALL (ptr_##FUNC, ARGS) : ELSE) |
111 | # define __libc_ptf_call_always(FUNC, ARGS) \ |
112 | PTHFCT_CALL (ptr_##FUNC, ARGS) |
113 | #elif IS_IN (libpthread) |
114 | # define PTFAVAIL(NAME) 1 |
115 | # define __libc_ptf_call(FUNC, ARGS, ELSE) \ |
116 | FUNC ARGS |
117 | # define __libc_ptf_call_always(FUNC, ARGS) \ |
118 | FUNC ARGS |
119 | #else |
120 | # define PTFAVAIL(NAME) (NAME != NULL) |
121 | # define __libc_ptf_call(FUNC, ARGS, ELSE) \ |
122 | __libc_maybe_call (FUNC, ARGS, ELSE) |
123 | # define __libc_ptf_call_always(FUNC, ARGS) \ |
124 | FUNC ARGS |
125 | #endif |
126 | |
127 | |
128 | /* Initialize the named lock variable, leaving it in a consistent, unlocked |
129 | state. */ |
130 | #if IS_IN (libc) || IS_IN (libpthread) |
131 | # define __libc_lock_init(NAME) \ |
132 | ((void) ((NAME) = LLL_LOCK_INITIALIZER)) |
133 | #else |
134 | # define __libc_lock_init(NAME) \ |
135 | __libc_maybe_call (__pthread_mutex_init, (&(NAME), NULL), 0) |
136 | #endif |
137 | #if defined SHARED && IS_IN (libc) |
138 | /* ((NAME) = (__libc_rwlock_t) PTHREAD_RWLOCK_INITIALIZER) is inefficient. */ |
139 | # define __libc_rwlock_init(NAME) \ |
140 | ((void) __builtin_memset (&(NAME), '\0', sizeof (NAME))) |
141 | #else |
142 | # define __libc_rwlock_init(NAME) \ |
143 | __libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0) |
144 | #endif |
145 | |
146 | /* Finalize the named lock variable, which must be locked. It cannot be |
147 | used again until __libc_lock_init is called again on it. This must be |
148 | called on a lock variable before the containing storage is reused. */ |
149 | #if IS_IN (libc) || IS_IN (libpthread) |
150 | # define __libc_lock_fini(NAME) ((void) 0) |
151 | #else |
152 | # define __libc_lock_fini(NAME) \ |
153 | __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0) |
154 | #endif |
155 | #if defined SHARED && IS_IN (libc) |
156 | # define __libc_rwlock_fini(NAME) ((void) 0) |
157 | #else |
158 | # define __libc_rwlock_fini(NAME) \ |
159 | __libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0) |
160 | #endif |
161 | |
162 | /* Lock the named lock variable. */ |
163 | #if IS_IN (libc) || IS_IN (libpthread) |
164 | # ifndef __libc_lock_lock |
165 | # define __libc_lock_lock(NAME) \ |
166 | ({ lll_lock (NAME, LLL_PRIVATE); 0; }) |
167 | # endif |
168 | #else |
169 | # undef __libc_lock_lock |
170 | # define __libc_lock_lock(NAME) \ |
171 | __libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0) |
172 | #endif |
173 | #define __libc_rwlock_rdlock(NAME) \ |
174 | __libc_ptf_call (__pthread_rwlock_rdlock, (&(NAME)), 0) |
175 | #define __libc_rwlock_wrlock(NAME) \ |
176 | __libc_ptf_call (__pthread_rwlock_wrlock, (&(NAME)), 0) |
177 | |
178 | /* Try to lock the named lock variable. */ |
179 | #if IS_IN (libc) || IS_IN (libpthread) |
180 | # ifndef __libc_lock_trylock |
181 | # define __libc_lock_trylock(NAME) \ |
182 | lll_trylock (NAME) |
183 | # endif |
184 | #else |
185 | # undef __libc_lock_trylock |
186 | # define __libc_lock_trylock(NAME) \ |
187 | __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0) |
188 | #endif |
189 | #define __libc_rwlock_tryrdlock(NAME) \ |
190 | __libc_maybe_call (__pthread_rwlock_tryrdlock, (&(NAME)), 0) |
191 | #define __libc_rwlock_trywrlock(NAME) \ |
192 | __libc_maybe_call (__pthread_rwlock_trywrlock, (&(NAME)), 0) |
193 | |
194 | #define __rtld_lock_trylock_recursive(NAME) \ |
195 | __libc_maybe_call (__pthread_mutex_trylock, (&(NAME).mutex), 0) |
196 | |
197 | /* Unlock the named lock variable. */ |
198 | #if IS_IN (libc) || IS_IN (libpthread) |
199 | # define __libc_lock_unlock(NAME) \ |
200 | lll_unlock (NAME, LLL_PRIVATE) |
201 | #else |
202 | # define __libc_lock_unlock(NAME) \ |
203 | __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0) |
204 | #endif |
205 | #define __libc_rwlock_unlock(NAME) \ |
206 | __libc_ptf_call (__pthread_rwlock_unlock, (&(NAME)), 0) |
207 | |
208 | #ifdef SHARED |
209 | # define __rtld_lock_default_lock_recursive(lock) \ |
210 | ++((pthread_mutex_t *)(lock))->__data.__count; |
211 | |
212 | # define __rtld_lock_default_unlock_recursive(lock) \ |
213 | --((pthread_mutex_t *)(lock))->__data.__count; |
214 | |
215 | # define __rtld_lock_lock_recursive(NAME) \ |
216 | GL(dl_rtld_lock_recursive) (&(NAME).mutex) |
217 | |
218 | # define __rtld_lock_unlock_recursive(NAME) \ |
219 | GL(dl_rtld_unlock_recursive) (&(NAME).mutex) |
220 | #else |
221 | # define __rtld_lock_lock_recursive(NAME) \ |
222 | __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0) |
223 | |
224 | # define __rtld_lock_unlock_recursive(NAME) \ |
225 | __libc_maybe_call (__pthread_mutex_unlock, (&(NAME).mutex), 0) |
226 | #endif |
227 | |
228 | /* Define once control variable. */ |
229 | #if PTHREAD_ONCE_INIT == 0 |
230 | /* Special case for static variables where we can avoid the initialization |
231 | if it is zero. */ |
232 | # define __libc_once_define(CLASS, NAME) \ |
233 | CLASS pthread_once_t NAME |
234 | #else |
235 | # define __libc_once_define(CLASS, NAME) \ |
236 | CLASS pthread_once_t NAME = PTHREAD_ONCE_INIT |
237 | #endif |
238 | |
239 | /* Call handler iff the first call. */ |
240 | #define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \ |
241 | do { \ |
242 | if (PTFAVAIL (__pthread_once)) \ |
243 | __libc_ptf_call_always (__pthread_once, (&(ONCE_CONTROL), \ |
244 | INIT_FUNCTION)); \ |
245 | else if ((ONCE_CONTROL) == PTHREAD_ONCE_INIT) { \ |
246 | INIT_FUNCTION (); \ |
247 | (ONCE_CONTROL) |= 2; \ |
248 | } \ |
249 | } while (0) |
250 | |
251 | /* Get once control variable. */ |
252 | #define __libc_once_get(ONCE_CONTROL) ((ONCE_CONTROL) != PTHREAD_ONCE_INIT) |
253 | |
254 | /* Note that for I/O cleanup handling we are using the old-style |
255 | cancel handling. It does not have to be integrated with C++ snce |
256 | no C++ code is called in the middle. The old-style handling is |
257 | faster and the support is not going away. */ |
258 | extern void _pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer, |
259 | void (*routine) (void *), void *arg); |
260 | extern void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer, |
261 | int execute); |
262 | extern void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer, |
263 | void (*routine) (void *), void *arg); |
264 | extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer, |
265 | int execute); |
266 | |
267 | /* Sometimes we have to exit the block in the middle. */ |
268 | #define __libc_cleanup_end(DOIT) \ |
269 | if (_avail) { \ |
270 | __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\ |
271 | } else if (DOIT) \ |
272 | _buffer.__routine (_buffer.__arg) |
273 | |
274 | |
275 | /* Normal cleanup handling, based on C cleanup attribute. */ |
276 | __extern_inline void |
277 | __libc_cleanup_routine (struct __pthread_cleanup_frame *f) |
278 | { |
279 | if (f->__do_it) |
280 | f->__cancel_routine (f->__cancel_arg); |
281 | } |
282 | |
283 | #define __libc_cleanup_push(fct, arg) \ |
284 | do { \ |
285 | struct __pthread_cleanup_frame __clframe \ |
286 | __attribute__ ((__cleanup__ (__libc_cleanup_routine))) \ |
287 | = { .__cancel_routine = (fct), .__cancel_arg = (arg), \ |
288 | .__do_it = 1 }; |
289 | |
290 | #define __libc_cleanup_pop(execute) \ |
291 | __clframe.__do_it = (execute); \ |
292 | } while (0) |
293 | |
294 | |
295 | /* Create thread-specific key. */ |
296 | #define __libc_key_create(KEY, DESTRUCTOR) \ |
297 | __libc_ptf_call (__pthread_key_create, (KEY, DESTRUCTOR), 1) |
298 | |
299 | /* Get thread-specific data. */ |
300 | #define __libc_getspecific(KEY) \ |
301 | __libc_ptf_call (__pthread_getspecific, (KEY), NULL) |
302 | |
303 | /* Set thread-specific data. */ |
304 | #define __libc_setspecific(KEY, VALUE) \ |
305 | __libc_ptf_call (__pthread_setspecific, (KEY, VALUE), 0) |
306 | |
307 | |
308 | /* Register handlers to execute before and after `fork'. Note that the |
309 | last parameter is NULL. The handlers registered by the libc are |
310 | never removed so this is OK. */ |
311 | extern int __register_atfork (void (*__prepare) (void), |
312 | void (*__parent) (void), |
313 | void (*__child) (void), |
314 | void *__dso_handle); |
315 | |
316 | /* Functions that are used by this file and are internal to the GNU C |
317 | library. */ |
318 | |
319 | extern int __pthread_mutex_init (pthread_mutex_t *__mutex, |
320 | const pthread_mutexattr_t *__mutex_attr); |
321 | |
322 | extern int __pthread_mutex_destroy (pthread_mutex_t *__mutex); |
323 | |
324 | extern int __pthread_mutex_trylock (pthread_mutex_t *__mutex); |
325 | |
326 | extern int __pthread_mutex_lock (pthread_mutex_t *__mutex); |
327 | |
328 | extern int __pthread_mutex_unlock (pthread_mutex_t *__mutex); |
329 | |
330 | extern int __pthread_mutexattr_init (pthread_mutexattr_t *__attr); |
331 | |
332 | extern int __pthread_mutexattr_destroy (pthread_mutexattr_t *__attr); |
333 | |
334 | extern int __pthread_mutexattr_settype (pthread_mutexattr_t *__attr, |
335 | int __kind); |
336 | |
337 | extern int __pthread_rwlock_init (pthread_rwlock_t *__rwlock, |
338 | const pthread_rwlockattr_t *__attr); |
339 | |
340 | extern int __pthread_rwlock_destroy (pthread_rwlock_t *__rwlock); |
341 | |
342 | extern int __pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock); |
343 | |
344 | extern int __pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock); |
345 | |
346 | extern int __pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock); |
347 | |
348 | extern int __pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock); |
349 | |
350 | extern int __pthread_rwlock_unlock (pthread_rwlock_t *__rwlock); |
351 | |
352 | extern int __pthread_key_create (pthread_key_t *__key, |
353 | void (*__destr_function) (void *)); |
354 | |
355 | extern int __pthread_setspecific (pthread_key_t __key, |
356 | const void *__pointer); |
357 | |
358 | extern void *__pthread_getspecific (pthread_key_t __key); |
359 | |
360 | extern int __pthread_once (pthread_once_t *__once_control, |
361 | void (*__init_routine) (void)); |
362 | |
363 | extern int __pthread_atfork (void (*__prepare) (void), |
364 | void (*__parent) (void), |
365 | void (*__child) (void)); |
366 | |
367 | extern int __pthread_setcancelstate (int state, int *oldstate); |
368 | |
369 | |
370 | /* Make the pthread functions weak so that we can elide them from |
371 | single-threaded processes. */ |
372 | #ifndef __NO_WEAK_PTHREAD_ALIASES |
373 | # ifdef weak_extern |
374 | weak_extern (__pthread_mutex_init) |
375 | weak_extern (__pthread_mutex_destroy) |
376 | weak_extern (__pthread_mutex_lock) |
377 | weak_extern (__pthread_mutex_trylock) |
378 | weak_extern (__pthread_mutex_unlock) |
379 | weak_extern (__pthread_mutexattr_init) |
380 | weak_extern (__pthread_mutexattr_destroy) |
381 | weak_extern (__pthread_mutexattr_settype) |
382 | weak_extern (__pthread_rwlock_init) |
383 | weak_extern (__pthread_rwlock_destroy) |
384 | weak_extern (__pthread_rwlock_rdlock) |
385 | weak_extern (__pthread_rwlock_tryrdlock) |
386 | weak_extern (__pthread_rwlock_wrlock) |
387 | weak_extern (__pthread_rwlock_trywrlock) |
388 | weak_extern (__pthread_rwlock_unlock) |
389 | weak_extern (__pthread_key_create) |
390 | weak_extern (__pthread_setspecific) |
391 | weak_extern (__pthread_getspecific) |
392 | weak_extern (__pthread_once) |
393 | weak_extern (__pthread_initialize) |
394 | weak_extern (__pthread_atfork) |
395 | weak_extern (__pthread_setcancelstate) |
396 | weak_extern (_pthread_cleanup_push_defer) |
397 | weak_extern (_pthread_cleanup_pop_restore) |
398 | # else |
399 | # pragma weak __pthread_mutex_init |
400 | # pragma weak __pthread_mutex_destroy |
401 | # pragma weak __pthread_mutex_lock |
402 | # pragma weak __pthread_mutex_trylock |
403 | # pragma weak __pthread_mutex_unlock |
404 | # pragma weak __pthread_mutexattr_init |
405 | # pragma weak __pthread_mutexattr_destroy |
406 | # pragma weak __pthread_mutexattr_settype |
407 | # pragma weak __pthread_rwlock_destroy |
408 | # pragma weak __pthread_rwlock_rdlock |
409 | # pragma weak __pthread_rwlock_tryrdlock |
410 | # pragma weak __pthread_rwlock_wrlock |
411 | # pragma weak __pthread_rwlock_trywrlock |
412 | # pragma weak __pthread_rwlock_unlock |
413 | # pragma weak __pthread_key_create |
414 | # pragma weak __pthread_setspecific |
415 | # pragma weak __pthread_getspecific |
416 | # pragma weak __pthread_once |
417 | # pragma weak __pthread_initialize |
418 | # pragma weak __pthread_atfork |
419 | # pragma weak __pthread_setcancelstate |
420 | # pragma weak _pthread_cleanup_push_defer |
421 | # pragma weak _pthread_cleanup_pop_restore |
422 | # endif |
423 | #endif |
424 | |
425 | #endif /* libc-lockP.h */ |
426 | |