1 | /* Copyright (C) 2002-2023 Free Software Foundation, Inc. |
2 | This file is part of the GNU C Library. |
3 | |
4 | The GNU C Library is free software; you can redistribute it and/or |
5 | modify it under the terms of the GNU Lesser General Public |
6 | License as published by the Free Software Foundation; either |
7 | version 2.1 of the License, or (at your option) any later version. |
8 | |
9 | The GNU C Library is distributed in the hope that it will be useful, |
10 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
12 | Lesser General Public License for more details. |
13 | |
14 | You should have received a copy of the GNU Lesser General Public |
15 | License along with the GNU C Library; if not, see |
16 | <https://www.gnu.org/licenses/>. */ |
17 | |
18 | #include <assert.h> |
19 | #include <errno.h> |
20 | #include <stdlib.h> |
21 | #include <unistd.h> |
22 | #include <sys/param.h> |
23 | #include <not-cancel.h> |
24 | #include "pthreadP.h" |
25 | #include <atomic.h> |
26 | #include <futex-internal.h> |
27 | #include <stap-probe.h> |
28 | #include <shlib-compat.h> |
29 | |
30 | /* Some of the following definitions differ when pthread_mutex_cond_lock.c |
31 | includes this file. */ |
32 | #ifndef LLL_MUTEX_LOCK |
33 | /* lll_lock with single-thread optimization. */ |
34 | static inline void |
35 | lll_mutex_lock_optimized (pthread_mutex_t *mutex) |
36 | { |
37 | /* The single-threaded optimization is only valid for private |
38 | mutexes. For process-shared mutexes, the mutex could be in a |
39 | shared mapping, so synchronization with another process is needed |
40 | even without any threads. If the lock is already marked as |
41 | acquired, POSIX requires that pthread_mutex_lock deadlocks for |
42 | normal mutexes, so skip the optimization in that case as |
43 | well. */ |
44 | int private = PTHREAD_MUTEX_PSHARED (mutex); |
45 | if (private == LLL_PRIVATE && SINGLE_THREAD_P && mutex->__data.__lock == 0) |
46 | mutex->__data.__lock = 1; |
47 | else |
48 | lll_lock (mutex->__data.__lock, private); |
49 | } |
50 | |
51 | # define LLL_MUTEX_LOCK(mutex) \ |
52 | lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex)) |
53 | # define LLL_MUTEX_LOCK_OPTIMIZED(mutex) lll_mutex_lock_optimized (mutex) |
54 | # define LLL_MUTEX_TRYLOCK(mutex) \ |
55 | lll_trylock ((mutex)->__data.__lock) |
56 | # define LLL_ROBUST_MUTEX_LOCK_MODIFIER 0 |
57 | # define LLL_MUTEX_LOCK_ELISION(mutex) \ |
58 | lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \ |
59 | PTHREAD_MUTEX_PSHARED (mutex)) |
60 | # define LLL_MUTEX_TRYLOCK_ELISION(mutex) \ |
61 | lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision, \ |
62 | PTHREAD_MUTEX_PSHARED (mutex)) |
63 | # define PTHREAD_MUTEX_LOCK ___pthread_mutex_lock |
64 | # define PTHREAD_MUTEX_VERSIONS 1 |
65 | #endif |
66 | |
67 | #ifndef LLL_MUTEX_READ_LOCK |
68 | # define LLL_MUTEX_READ_LOCK(mutex) \ |
69 | atomic_load_relaxed (&(mutex)->__data.__lock) |
70 | #endif |
71 | |
72 | static int __pthread_mutex_lock_full (pthread_mutex_t *mutex) |
73 | __attribute_noinline__; |
74 | |
75 | int |
76 | PTHREAD_MUTEX_LOCK (pthread_mutex_t *mutex) |
77 | { |
78 | /* See concurrency notes regarding mutex type which is loaded from __kind |
79 | in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */ |
80 | unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex); |
81 | |
82 | LIBC_PROBE (mutex_entry, 1, mutex); |
83 | |
84 | if (__builtin_expect (type & ~(PTHREAD_MUTEX_KIND_MASK_NP |
85 | | PTHREAD_MUTEX_ELISION_FLAGS_NP), 0)) |
86 | return __pthread_mutex_lock_full (mutex); |
87 | |
88 | if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_NP)) |
89 | { |
90 | FORCE_ELISION (mutex, goto elision); |
91 | simple: |
92 | /* Normal mutex. */ |
93 | LLL_MUTEX_LOCK_OPTIMIZED (mutex); |
94 | assert (mutex->__data.__owner == 0); |
95 | } |
96 | #if ENABLE_ELISION_SUPPORT |
97 | else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP)) |
98 | { |
99 | elision: __attribute__((unused)) |
100 | /* This case can never happen on a system without elision, |
101 | as the mutex type initialization functions will not |
102 | allow to set the elision flags. */ |
103 | /* Don't record owner or users for elision case. This is a |
104 | tail call. */ |
105 | return LLL_MUTEX_LOCK_ELISION (mutex); |
106 | } |
107 | #endif |
108 | else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex) |
109 | == PTHREAD_MUTEX_RECURSIVE_NP, 1)) |
110 | { |
111 | /* Recursive mutex. */ |
112 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
113 | |
114 | /* Check whether we already hold the mutex. */ |
115 | if (mutex->__data.__owner == id) |
116 | { |
117 | /* Just bump the counter. */ |
118 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
119 | /* Overflow of the counter. */ |
120 | return EAGAIN; |
121 | |
122 | ++mutex->__data.__count; |
123 | |
124 | return 0; |
125 | } |
126 | |
127 | /* We have to get the mutex. */ |
128 | LLL_MUTEX_LOCK_OPTIMIZED (mutex); |
129 | |
130 | assert (mutex->__data.__owner == 0); |
131 | mutex->__data.__count = 1; |
132 | } |
133 | else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex) |
134 | == PTHREAD_MUTEX_ADAPTIVE_NP, 1)) |
135 | { |
136 | if (LLL_MUTEX_TRYLOCK (mutex) != 0) |
137 | { |
138 | int cnt = 0; |
139 | int max_cnt = MIN (max_adaptive_count (), |
140 | mutex->__data.__spins * 2 + 10); |
141 | int spin_count, exp_backoff = 1; |
142 | unsigned int jitter = get_jitter (); |
143 | do |
144 | { |
145 | /* In each loop, spin count is exponential backoff plus |
146 | random jitter, random range is [0, exp_backoff-1]. */ |
147 | spin_count = exp_backoff + (jitter & (exp_backoff - 1)); |
148 | cnt += spin_count; |
149 | if (cnt >= max_cnt) |
150 | { |
151 | /* If cnt exceeds max spin count, just go to wait |
152 | queue. */ |
153 | LLL_MUTEX_LOCK (mutex); |
154 | break; |
155 | } |
156 | do |
157 | atomic_spin_nop (); |
158 | while (--spin_count > 0); |
159 | /* Prepare for next loop. */ |
160 | exp_backoff = get_next_backoff (exp_backoff); |
161 | } |
162 | while (LLL_MUTEX_READ_LOCK (mutex) != 0 |
163 | || LLL_MUTEX_TRYLOCK (mutex) != 0); |
164 | |
165 | mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8; |
166 | } |
167 | assert (mutex->__data.__owner == 0); |
168 | } |
169 | else |
170 | { |
171 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
172 | assert (PTHREAD_MUTEX_TYPE (mutex) == PTHREAD_MUTEX_ERRORCHECK_NP); |
173 | /* Check whether we already hold the mutex. */ |
174 | if (__glibc_unlikely (mutex->__data.__owner == id)) |
175 | return EDEADLK; |
176 | goto simple; |
177 | } |
178 | |
179 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
180 | |
181 | /* Record the ownership. */ |
182 | mutex->__data.__owner = id; |
183 | #ifndef NO_INCR |
184 | ++mutex->__data.__nusers; |
185 | #endif |
186 | |
187 | LIBC_PROBE (mutex_acquired, 1, mutex); |
188 | |
189 | return 0; |
190 | } |
191 | |
192 | static int |
193 | __pthread_mutex_lock_full (pthread_mutex_t *mutex) |
194 | { |
195 | int oldval; |
196 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
197 | |
198 | switch (PTHREAD_MUTEX_TYPE (mutex)) |
199 | { |
200 | case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP: |
201 | case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP: |
202 | case PTHREAD_MUTEX_ROBUST_NORMAL_NP: |
203 | case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP: |
204 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
205 | &mutex->__data.__list.__next); |
206 | /* We need to set op_pending before starting the operation. Also |
207 | see comments at ENQUEUE_MUTEX. */ |
208 | __asm ("" ::: "memory" ); |
209 | |
210 | oldval = mutex->__data.__lock; |
211 | /* This is set to FUTEX_WAITERS iff we might have shared the |
212 | FUTEX_WAITERS flag with other threads, and therefore need to keep it |
213 | set to avoid lost wake-ups. We have the same requirement in the |
214 | simple mutex algorithm. |
215 | We start with value zero for a normal mutex, and FUTEX_WAITERS if we |
216 | are building the special case mutexes for use from within condition |
217 | variables. */ |
218 | unsigned int assume_other_futex_waiters = LLL_ROBUST_MUTEX_LOCK_MODIFIER; |
219 | while (1) |
220 | { |
221 | /* Try to acquire the lock through a CAS from 0 (not acquired) to |
222 | our TID | assume_other_futex_waiters. */ |
223 | if (__glibc_likely (oldval == 0)) |
224 | { |
225 | oldval |
226 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
227 | id | assume_other_futex_waiters, 0); |
228 | if (__glibc_likely (oldval == 0)) |
229 | break; |
230 | } |
231 | |
232 | if ((oldval & FUTEX_OWNER_DIED) != 0) |
233 | { |
234 | /* The previous owner died. Try locking the mutex. */ |
235 | int newval = id; |
236 | #ifdef NO_INCR |
237 | /* We are not taking assume_other_futex_waiters into account |
238 | here simply because we'll set FUTEX_WAITERS anyway. */ |
239 | newval |= FUTEX_WAITERS; |
240 | #else |
241 | newval |= (oldval & FUTEX_WAITERS) | assume_other_futex_waiters; |
242 | #endif |
243 | |
244 | newval |
245 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
246 | newval, oldval); |
247 | |
248 | if (newval != oldval) |
249 | { |
250 | oldval = newval; |
251 | continue; |
252 | } |
253 | |
254 | /* We got the mutex. */ |
255 | mutex->__data.__count = 1; |
256 | /* But it is inconsistent unless marked otherwise. */ |
257 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
258 | |
259 | /* We must not enqueue the mutex before we have acquired it. |
260 | Also see comments at ENQUEUE_MUTEX. */ |
261 | __asm ("" ::: "memory" ); |
262 | ENQUEUE_MUTEX (mutex); |
263 | /* We need to clear op_pending after we enqueue the mutex. */ |
264 | __asm ("" ::: "memory" ); |
265 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
266 | |
267 | /* Note that we deliberately exit here. If we fall |
268 | through to the end of the function __nusers would be |
269 | incremented which is not correct because the old |
270 | owner has to be discounted. If we are not supposed |
271 | to increment __nusers we actually have to decrement |
272 | it here. */ |
273 | #ifdef NO_INCR |
274 | --mutex->__data.__nusers; |
275 | #endif |
276 | |
277 | return EOWNERDEAD; |
278 | } |
279 | |
280 | /* Check whether we already hold the mutex. */ |
281 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
282 | { |
283 | int kind = PTHREAD_MUTEX_TYPE (mutex); |
284 | if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) |
285 | { |
286 | /* We do not need to ensure ordering wrt another memory |
287 | access. Also see comments at ENQUEUE_MUTEX. */ |
288 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
289 | NULL); |
290 | return EDEADLK; |
291 | } |
292 | |
293 | if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) |
294 | { |
295 | /* We do not need to ensure ordering wrt another memory |
296 | access. */ |
297 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
298 | NULL); |
299 | |
300 | /* Just bump the counter. */ |
301 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
302 | /* Overflow of the counter. */ |
303 | return EAGAIN; |
304 | |
305 | ++mutex->__data.__count; |
306 | |
307 | return 0; |
308 | } |
309 | } |
310 | |
311 | /* We cannot acquire the mutex nor has its owner died. Thus, try |
312 | to block using futexes. Set FUTEX_WAITERS if necessary so that |
313 | other threads are aware that there are potentially threads |
314 | blocked on the futex. Restart if oldval changed in the |
315 | meantime. */ |
316 | if ((oldval & FUTEX_WAITERS) == 0) |
317 | { |
318 | int val = atomic_compare_and_exchange_val_acq |
319 | (&mutex->__data.__lock, oldval | FUTEX_WAITERS, oldval); |
320 | if (val != oldval) |
321 | { |
322 | oldval = val; |
323 | continue; |
324 | } |
325 | oldval |= FUTEX_WAITERS; |
326 | } |
327 | |
328 | /* It is now possible that we share the FUTEX_WAITERS flag with |
329 | another thread; therefore, update assume_other_futex_waiters so |
330 | that we do not forget about this when handling other cases |
331 | above and thus do not cause lost wake-ups. */ |
332 | assume_other_futex_waiters |= FUTEX_WAITERS; |
333 | |
334 | /* Block using the futex and reload current lock value. */ |
335 | futex_wait ((unsigned int *) &mutex->__data.__lock, oldval, |
336 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); |
337 | oldval = mutex->__data.__lock; |
338 | } |
339 | |
340 | /* We have acquired the mutex; check if it is still consistent. */ |
341 | if (__builtin_expect (mutex->__data.__owner |
342 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
343 | { |
344 | /* This mutex is now not recoverable. */ |
345 | mutex->__data.__count = 0; |
346 | int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex); |
347 | lll_unlock (mutex->__data.__lock, private); |
348 | /* FIXME This violates the mutex destruction requirements. See |
349 | __pthread_mutex_unlock_full. */ |
350 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
351 | return ENOTRECOVERABLE; |
352 | } |
353 | |
354 | mutex->__data.__count = 1; |
355 | /* We must not enqueue the mutex before we have acquired it. |
356 | Also see comments at ENQUEUE_MUTEX. */ |
357 | __asm ("" ::: "memory" ); |
358 | ENQUEUE_MUTEX (mutex); |
359 | /* We need to clear op_pending after we enqueue the mutex. */ |
360 | __asm ("" ::: "memory" ); |
361 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
362 | break; |
363 | |
364 | /* The PI support requires the Linux futex system call. If that's not |
365 | available, pthread_mutex_init should never have allowed the type to |
366 | be set. So it will get the default case for an invalid type. */ |
367 | #ifdef __NR_futex |
368 | case PTHREAD_MUTEX_PI_RECURSIVE_NP: |
369 | case PTHREAD_MUTEX_PI_ERRORCHECK_NP: |
370 | case PTHREAD_MUTEX_PI_NORMAL_NP: |
371 | case PTHREAD_MUTEX_PI_ADAPTIVE_NP: |
372 | case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP: |
373 | case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP: |
374 | case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: |
375 | case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: |
376 | { |
377 | int kind, robust; |
378 | { |
379 | /* See concurrency notes regarding __kind in struct __pthread_mutex_s |
380 | in sysdeps/nptl/bits/thread-shared-types.h. */ |
381 | int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); |
382 | kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP; |
383 | robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; |
384 | } |
385 | |
386 | if (robust) |
387 | { |
388 | /* Note: robust PI futexes are signaled by setting bit 0. */ |
389 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
390 | (void *) (((uintptr_t) &mutex->__data.__list.__next) |
391 | | 1)); |
392 | /* We need to set op_pending before starting the operation. Also |
393 | see comments at ENQUEUE_MUTEX. */ |
394 | __asm ("" ::: "memory" ); |
395 | } |
396 | |
397 | oldval = mutex->__data.__lock; |
398 | |
399 | /* Check whether we already hold the mutex. */ |
400 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
401 | { |
402 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
403 | { |
404 | /* We do not need to ensure ordering wrt another memory |
405 | access. */ |
406 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
407 | return EDEADLK; |
408 | } |
409 | |
410 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
411 | { |
412 | /* We do not need to ensure ordering wrt another memory |
413 | access. */ |
414 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
415 | |
416 | /* Just bump the counter. */ |
417 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
418 | /* Overflow of the counter. */ |
419 | return EAGAIN; |
420 | |
421 | ++mutex->__data.__count; |
422 | |
423 | return 0; |
424 | } |
425 | } |
426 | |
427 | int newval = id; |
428 | # ifdef NO_INCR |
429 | newval |= FUTEX_WAITERS; |
430 | # endif |
431 | oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
432 | newval, 0); |
433 | |
434 | if (oldval != 0) |
435 | { |
436 | /* The mutex is locked. The kernel will now take care of |
437 | everything. */ |
438 | int private = (robust |
439 | ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex) |
440 | : PTHREAD_MUTEX_PSHARED (mutex)); |
441 | int e = __futex_lock_pi64 (&mutex->__data.__lock, 0 /* unused */, |
442 | NULL, private); |
443 | if (e == ESRCH || e == EDEADLK) |
444 | { |
445 | assert (e != EDEADLK |
446 | || (kind != PTHREAD_MUTEX_ERRORCHECK_NP |
447 | && kind != PTHREAD_MUTEX_RECURSIVE_NP)); |
448 | /* ESRCH can happen only for non-robust PI mutexes where |
449 | the owner of the lock died. */ |
450 | assert (e != ESRCH || !robust); |
451 | |
452 | /* Delay the thread indefinitely. */ |
453 | while (1) |
454 | __futex_abstimed_wait64 (&(unsigned int){0}, 0, |
455 | 0 /* ignored */, NULL, private); |
456 | } |
457 | |
458 | oldval = mutex->__data.__lock; |
459 | |
460 | assert (robust || (oldval & FUTEX_OWNER_DIED) == 0); |
461 | } |
462 | |
463 | if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED)) |
464 | { |
465 | atomic_fetch_and_acquire (&mutex->__data.__lock, ~FUTEX_OWNER_DIED); |
466 | |
467 | /* We got the mutex. */ |
468 | mutex->__data.__count = 1; |
469 | /* But it is inconsistent unless marked otherwise. */ |
470 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
471 | |
472 | /* We must not enqueue the mutex before we have acquired it. |
473 | Also see comments at ENQUEUE_MUTEX. */ |
474 | __asm ("" ::: "memory" ); |
475 | ENQUEUE_MUTEX_PI (mutex); |
476 | /* We need to clear op_pending after we enqueue the mutex. */ |
477 | __asm ("" ::: "memory" ); |
478 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
479 | |
480 | /* Note that we deliberately exit here. If we fall |
481 | through to the end of the function __nusers would be |
482 | incremented which is not correct because the old owner |
483 | has to be discounted. If we are not supposed to |
484 | increment __nusers we actually have to decrement it here. */ |
485 | # ifdef NO_INCR |
486 | --mutex->__data.__nusers; |
487 | # endif |
488 | |
489 | return EOWNERDEAD; |
490 | } |
491 | |
492 | if (robust |
493 | && __builtin_expect (mutex->__data.__owner |
494 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
495 | { |
496 | /* This mutex is now not recoverable. */ |
497 | mutex->__data.__count = 0; |
498 | |
499 | futex_unlock_pi ((unsigned int *) &mutex->__data.__lock, |
500 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); |
501 | |
502 | /* To the kernel, this will be visible after the kernel has |
503 | acquired the mutex in the syscall. */ |
504 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
505 | return ENOTRECOVERABLE; |
506 | } |
507 | |
508 | mutex->__data.__count = 1; |
509 | if (robust) |
510 | { |
511 | /* We must not enqueue the mutex before we have acquired it. |
512 | Also see comments at ENQUEUE_MUTEX. */ |
513 | __asm ("" ::: "memory" ); |
514 | ENQUEUE_MUTEX_PI (mutex); |
515 | /* We need to clear op_pending after we enqueue the mutex. */ |
516 | __asm ("" ::: "memory" ); |
517 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
518 | } |
519 | } |
520 | break; |
521 | #endif /* __NR_futex. */ |
522 | |
523 | case PTHREAD_MUTEX_PP_RECURSIVE_NP: |
524 | case PTHREAD_MUTEX_PP_ERRORCHECK_NP: |
525 | case PTHREAD_MUTEX_PP_NORMAL_NP: |
526 | case PTHREAD_MUTEX_PP_ADAPTIVE_NP: |
527 | { |
528 | /* See concurrency notes regarding __kind in struct __pthread_mutex_s |
529 | in sysdeps/nptl/bits/thread-shared-types.h. */ |
530 | int kind = atomic_load_relaxed (&(mutex->__data.__kind)) |
531 | & PTHREAD_MUTEX_KIND_MASK_NP; |
532 | |
533 | oldval = mutex->__data.__lock; |
534 | |
535 | /* Check whether we already hold the mutex. */ |
536 | if (mutex->__data.__owner == id) |
537 | { |
538 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
539 | return EDEADLK; |
540 | |
541 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
542 | { |
543 | /* Just bump the counter. */ |
544 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
545 | /* Overflow of the counter. */ |
546 | return EAGAIN; |
547 | |
548 | ++mutex->__data.__count; |
549 | |
550 | return 0; |
551 | } |
552 | } |
553 | |
554 | int oldprio = -1, ceilval; |
555 | do |
556 | { |
557 | int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) |
558 | >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
559 | |
560 | if (__pthread_current_priority () > ceiling) |
561 | { |
562 | if (oldprio != -1) |
563 | __pthread_tpp_change_priority (oldprio, -1); |
564 | return EINVAL; |
565 | } |
566 | |
567 | int retval = __pthread_tpp_change_priority (oldprio, ceiling); |
568 | if (retval) |
569 | return retval; |
570 | |
571 | ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
572 | oldprio = ceiling; |
573 | |
574 | oldval |
575 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
576 | #ifdef NO_INCR |
577 | ceilval | 2, |
578 | #else |
579 | ceilval | 1, |
580 | #endif |
581 | ceilval); |
582 | |
583 | if (oldval == ceilval) |
584 | break; |
585 | |
586 | do |
587 | { |
588 | oldval |
589 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
590 | ceilval | 2, |
591 | ceilval | 1); |
592 | |
593 | if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval) |
594 | break; |
595 | |
596 | if (oldval != ceilval) |
597 | futex_wait ((unsigned int * ) &mutex->__data.__lock, |
598 | ceilval | 2, |
599 | PTHREAD_MUTEX_PSHARED (mutex)); |
600 | } |
601 | while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
602 | ceilval | 2, ceilval) |
603 | != ceilval); |
604 | } |
605 | while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval); |
606 | |
607 | assert (mutex->__data.__owner == 0); |
608 | mutex->__data.__count = 1; |
609 | } |
610 | break; |
611 | |
612 | default: |
613 | /* Correct code cannot set any other type. */ |
614 | return EINVAL; |
615 | } |
616 | |
617 | /* Record the ownership. */ |
618 | mutex->__data.__owner = id; |
619 | #ifndef NO_INCR |
620 | ++mutex->__data.__nusers; |
621 | #endif |
622 | |
623 | LIBC_PROBE (mutex_acquired, 1, mutex); |
624 | |
625 | return 0; |
626 | } |
627 | |
628 | #if PTHREAD_MUTEX_VERSIONS |
629 | libc_hidden_ver (___pthread_mutex_lock, __pthread_mutex_lock) |
630 | # ifndef SHARED |
631 | strong_alias (___pthread_mutex_lock, __pthread_mutex_lock) |
632 | # endif |
633 | versioned_symbol (libpthread, ___pthread_mutex_lock, pthread_mutex_lock, |
634 | GLIBC_2_0); |
635 | |
636 | # if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_34) |
637 | compat_symbol (libpthread, ___pthread_mutex_lock, __pthread_mutex_lock, |
638 | GLIBC_2_0); |
639 | # endif |
640 | #endif /* PTHREAD_MUTEX_VERSIONS */ |
641 | |
642 | |
643 | #ifdef NO_INCR |
644 | void |
645 | __pthread_mutex_cond_lock_adjust (pthread_mutex_t *mutex) |
646 | { |
647 | /* See concurrency notes regarding __kind in struct __pthread_mutex_s |
648 | in sysdeps/nptl/bits/thread-shared-types.h. */ |
649 | int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); |
650 | assert ((mutex_kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0); |
651 | assert ((mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0); |
652 | assert ((mutex_kind & PTHREAD_MUTEX_PSHARED_BIT) == 0); |
653 | |
654 | /* Record the ownership. */ |
655 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
656 | mutex->__data.__owner = id; |
657 | |
658 | if (mutex_kind == PTHREAD_MUTEX_PI_RECURSIVE_NP) |
659 | ++mutex->__data.__count; |
660 | } |
661 | #endif |
662 | |