1/* Copyright (C) 2002-2021 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19#include <assert.h>
20#include <errno.h>
21#include <stdlib.h>
22#include <unistd.h>
23#include <sys/param.h>
24#include <not-cancel.h>
25#include "pthreadP.h"
26#include <atomic.h>
27#include <futex-internal.h>
28#include <stap-probe.h>
29#include <shlib-compat.h>
30
31/* Some of the following definitions differ when pthread_mutex_cond_lock.c
32 includes this file. */
33#ifndef LLL_MUTEX_LOCK
34/* lll_lock with single-thread optimization. */
35static inline void
36lll_mutex_lock_optimized (pthread_mutex_t *mutex)
37{
38 /* The single-threaded optimization is only valid for private
39 mutexes. For process-shared mutexes, the mutex could be in a
40 shared mapping, so synchronization with another process is needed
41 even without any threads. If the lock is already marked as
42 acquired, POSIX requires that pthread_mutex_lock deadlocks for
43 normal mutexes, so skip the optimization in that case as
44 well. */
45 int private = PTHREAD_MUTEX_PSHARED (mutex);
46 if (private == LLL_PRIVATE && SINGLE_THREAD_P && mutex->__data.__lock == 0)
47 mutex->__data.__lock = 1;
48 else
49 lll_lock (mutex->__data.__lock, private);
50}
51
52# define LLL_MUTEX_LOCK(mutex) \
53 lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
54# define LLL_MUTEX_LOCK_OPTIMIZED(mutex) lll_mutex_lock_optimized (mutex)
55# define LLL_MUTEX_TRYLOCK(mutex) \
56 lll_trylock ((mutex)->__data.__lock)
57# define LLL_ROBUST_MUTEX_LOCK_MODIFIER 0
58# define LLL_MUTEX_LOCK_ELISION(mutex) \
59 lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \
60 PTHREAD_MUTEX_PSHARED (mutex))
61# define LLL_MUTEX_TRYLOCK_ELISION(mutex) \
62 lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision, \
63 PTHREAD_MUTEX_PSHARED (mutex))
64# define PTHREAD_MUTEX_LOCK ___pthread_mutex_lock
65# define PTHREAD_MUTEX_VERSIONS 1
66#endif
67
68static int __pthread_mutex_lock_full (pthread_mutex_t *mutex)
69 __attribute_noinline__;
70
71int
72PTHREAD_MUTEX_LOCK (pthread_mutex_t *mutex)
73{
74 /* See concurrency notes regarding mutex type which is loaded from __kind
75 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
76 unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
77
78 LIBC_PROBE (mutex_entry, 1, mutex);
79
80 if (__builtin_expect (type & ~(PTHREAD_MUTEX_KIND_MASK_NP
81 | PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
82 return __pthread_mutex_lock_full (mutex);
83
84 if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_NP))
85 {
86 FORCE_ELISION (mutex, goto elision);
87 simple:
88 /* Normal mutex. */
89 LLL_MUTEX_LOCK_OPTIMIZED (mutex);
90 assert (mutex->__data.__owner == 0);
91 }
92#if ENABLE_ELISION_SUPPORT
93 else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
94 {
95 elision: __attribute__((unused))
96 /* This case can never happen on a system without elision,
97 as the mutex type initialization functions will not
98 allow to set the elision flags. */
99 /* Don't record owner or users for elision case. This is a
100 tail call. */
101 return LLL_MUTEX_LOCK_ELISION (mutex);
102 }
103#endif
104 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
105 == PTHREAD_MUTEX_RECURSIVE_NP, 1))
106 {
107 /* Recursive mutex. */
108 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
109
110 /* Check whether we already hold the mutex. */
111 if (mutex->__data.__owner == id)
112 {
113 /* Just bump the counter. */
114 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
115 /* Overflow of the counter. */
116 return EAGAIN;
117
118 ++mutex->__data.__count;
119
120 return 0;
121 }
122
123 /* We have to get the mutex. */
124 LLL_MUTEX_LOCK_OPTIMIZED (mutex);
125
126 assert (mutex->__data.__owner == 0);
127 mutex->__data.__count = 1;
128 }
129 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
130 == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
131 {
132 if (LLL_MUTEX_TRYLOCK (mutex) != 0)
133 {
134 int cnt = 0;
135 int max_cnt = MIN (max_adaptive_count (),
136 mutex->__data.__spins * 2 + 10);
137 do
138 {
139 if (cnt++ >= max_cnt)
140 {
141 LLL_MUTEX_LOCK (mutex);
142 break;
143 }
144 atomic_spin_nop ();
145 }
146 while (LLL_MUTEX_TRYLOCK (mutex) != 0);
147
148 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
149 }
150 assert (mutex->__data.__owner == 0);
151 }
152 else
153 {
154 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
155 assert (PTHREAD_MUTEX_TYPE (mutex) == PTHREAD_MUTEX_ERRORCHECK_NP);
156 /* Check whether we already hold the mutex. */
157 if (__glibc_unlikely (mutex->__data.__owner == id))
158 return EDEADLK;
159 goto simple;
160 }
161
162 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
163
164 /* Record the ownership. */
165 mutex->__data.__owner = id;
166#ifndef NO_INCR
167 ++mutex->__data.__nusers;
168#endif
169
170 LIBC_PROBE (mutex_acquired, 1, mutex);
171
172 return 0;
173}
174
175static int
176__pthread_mutex_lock_full (pthread_mutex_t *mutex)
177{
178 int oldval;
179 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
180
181 switch (PTHREAD_MUTEX_TYPE (mutex))
182 {
183 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
184 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
185 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
186 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
187 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
188 &mutex->__data.__list.__next);
189 /* We need to set op_pending before starting the operation. Also
190 see comments at ENQUEUE_MUTEX. */
191 __asm ("" ::: "memory");
192
193 oldval = mutex->__data.__lock;
194 /* This is set to FUTEX_WAITERS iff we might have shared the
195 FUTEX_WAITERS flag with other threads, and therefore need to keep it
196 set to avoid lost wake-ups. We have the same requirement in the
197 simple mutex algorithm.
198 We start with value zero for a normal mutex, and FUTEX_WAITERS if we
199 are building the special case mutexes for use from within condition
200 variables. */
201 unsigned int assume_other_futex_waiters = LLL_ROBUST_MUTEX_LOCK_MODIFIER;
202 while (1)
203 {
204 /* Try to acquire the lock through a CAS from 0 (not acquired) to
205 our TID | assume_other_futex_waiters. */
206 if (__glibc_likely (oldval == 0))
207 {
208 oldval
209 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
210 id | assume_other_futex_waiters, 0);
211 if (__glibc_likely (oldval == 0))
212 break;
213 }
214
215 if ((oldval & FUTEX_OWNER_DIED) != 0)
216 {
217 /* The previous owner died. Try locking the mutex. */
218 int newval = id;
219#ifdef NO_INCR
220 /* We are not taking assume_other_futex_waiters into accoount
221 here simply because we'll set FUTEX_WAITERS anyway. */
222 newval |= FUTEX_WAITERS;
223#else
224 newval |= (oldval & FUTEX_WAITERS) | assume_other_futex_waiters;
225#endif
226
227 newval
228 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
229 newval, oldval);
230
231 if (newval != oldval)
232 {
233 oldval = newval;
234 continue;
235 }
236
237 /* We got the mutex. */
238 mutex->__data.__count = 1;
239 /* But it is inconsistent unless marked otherwise. */
240 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
241
242 /* We must not enqueue the mutex before we have acquired it.
243 Also see comments at ENQUEUE_MUTEX. */
244 __asm ("" ::: "memory");
245 ENQUEUE_MUTEX (mutex);
246 /* We need to clear op_pending after we enqueue the mutex. */
247 __asm ("" ::: "memory");
248 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
249
250 /* Note that we deliberately exit here. If we fall
251 through to the end of the function __nusers would be
252 incremented which is not correct because the old
253 owner has to be discounted. If we are not supposed
254 to increment __nusers we actually have to decrement
255 it here. */
256#ifdef NO_INCR
257 --mutex->__data.__nusers;
258#endif
259
260 return EOWNERDEAD;
261 }
262
263 /* Check whether we already hold the mutex. */
264 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
265 {
266 int kind = PTHREAD_MUTEX_TYPE (mutex);
267 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
268 {
269 /* We do not need to ensure ordering wrt another memory
270 access. Also see comments at ENQUEUE_MUTEX. */
271 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
272 NULL);
273 return EDEADLK;
274 }
275
276 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
277 {
278 /* We do not need to ensure ordering wrt another memory
279 access. */
280 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
281 NULL);
282
283 /* Just bump the counter. */
284 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
285 /* Overflow of the counter. */
286 return EAGAIN;
287
288 ++mutex->__data.__count;
289
290 return 0;
291 }
292 }
293
294 /* We cannot acquire the mutex nor has its owner died. Thus, try
295 to block using futexes. Set FUTEX_WAITERS if necessary so that
296 other threads are aware that there are potentially threads
297 blocked on the futex. Restart if oldval changed in the
298 meantime. */
299 if ((oldval & FUTEX_WAITERS) == 0)
300 {
301 if (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock,
302 oldval | FUTEX_WAITERS,
303 oldval)
304 != 0)
305 {
306 oldval = mutex->__data.__lock;
307 continue;
308 }
309 oldval |= FUTEX_WAITERS;
310 }
311
312 /* It is now possible that we share the FUTEX_WAITERS flag with
313 another thread; therefore, update assume_other_futex_waiters so
314 that we do not forget about this when handling other cases
315 above and thus do not cause lost wake-ups. */
316 assume_other_futex_waiters |= FUTEX_WAITERS;
317
318 /* Block using the futex and reload current lock value. */
319 futex_wait ((unsigned int *) &mutex->__data.__lock, oldval,
320 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
321 oldval = mutex->__data.__lock;
322 }
323
324 /* We have acquired the mutex; check if it is still consistent. */
325 if (__builtin_expect (mutex->__data.__owner
326 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
327 {
328 /* This mutex is now not recoverable. */
329 mutex->__data.__count = 0;
330 int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
331 lll_unlock (mutex->__data.__lock, private);
332 /* FIXME This violates the mutex destruction requirements. See
333 __pthread_mutex_unlock_full. */
334 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
335 return ENOTRECOVERABLE;
336 }
337
338 mutex->__data.__count = 1;
339 /* We must not enqueue the mutex before we have acquired it.
340 Also see comments at ENQUEUE_MUTEX. */
341 __asm ("" ::: "memory");
342 ENQUEUE_MUTEX (mutex);
343 /* We need to clear op_pending after we enqueue the mutex. */
344 __asm ("" ::: "memory");
345 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
346 break;
347
348 /* The PI support requires the Linux futex system call. If that's not
349 available, pthread_mutex_init should never have allowed the type to
350 be set. So it will get the default case for an invalid type. */
351#ifdef __NR_futex
352 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
353 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
354 case PTHREAD_MUTEX_PI_NORMAL_NP:
355 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
356 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
357 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
358 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
359 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
360 {
361 int kind, robust;
362 {
363 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
364 in sysdeps/nptl/bits/thread-shared-types.h. */
365 int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
366 kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP;
367 robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
368 }
369
370 if (robust)
371 {
372 /* Note: robust PI futexes are signaled by setting bit 0. */
373 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
374 (void *) (((uintptr_t) &mutex->__data.__list.__next)
375 | 1));
376 /* We need to set op_pending before starting the operation. Also
377 see comments at ENQUEUE_MUTEX. */
378 __asm ("" ::: "memory");
379 }
380
381 oldval = mutex->__data.__lock;
382
383 /* Check whether we already hold the mutex. */
384 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
385 {
386 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
387 {
388 /* We do not need to ensure ordering wrt another memory
389 access. */
390 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
391 return EDEADLK;
392 }
393
394 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
395 {
396 /* We do not need to ensure ordering wrt another memory
397 access. */
398 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
399
400 /* Just bump the counter. */
401 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
402 /* Overflow of the counter. */
403 return EAGAIN;
404
405 ++mutex->__data.__count;
406
407 return 0;
408 }
409 }
410
411 int newval = id;
412# ifdef NO_INCR
413 newval |= FUTEX_WAITERS;
414# endif
415 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
416 newval, 0);
417
418 if (oldval != 0)
419 {
420 /* The mutex is locked. The kernel will now take care of
421 everything. */
422 int private = (robust
423 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
424 : PTHREAD_MUTEX_PSHARED (mutex));
425 int e = futex_lock_pi64 (&mutex->__data.__lock, NULL, private);
426 if (e == ESRCH || e == EDEADLK)
427 {
428 assert (e != EDEADLK
429 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
430 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
431 /* ESRCH can happen only for non-robust PI mutexes where
432 the owner of the lock died. */
433 assert (e != ESRCH || !robust);
434
435 /* Delay the thread indefinitely. */
436 while (1)
437 __futex_abstimed_wait64 (&(unsigned int){0}, 0,
438 0 /* ignored */, NULL, private);
439 }
440
441 oldval = mutex->__data.__lock;
442
443 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
444 }
445
446 if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
447 {
448 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
449
450 /* We got the mutex. */
451 mutex->__data.__count = 1;
452 /* But it is inconsistent unless marked otherwise. */
453 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
454
455 /* We must not enqueue the mutex before we have acquired it.
456 Also see comments at ENQUEUE_MUTEX. */
457 __asm ("" ::: "memory");
458 ENQUEUE_MUTEX_PI (mutex);
459 /* We need to clear op_pending after we enqueue the mutex. */
460 __asm ("" ::: "memory");
461 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
462
463 /* Note that we deliberately exit here. If we fall
464 through to the end of the function __nusers would be
465 incremented which is not correct because the old owner
466 has to be discounted. If we are not supposed to
467 increment __nusers we actually have to decrement it here. */
468# ifdef NO_INCR
469 --mutex->__data.__nusers;
470# endif
471
472 return EOWNERDEAD;
473 }
474
475 if (robust
476 && __builtin_expect (mutex->__data.__owner
477 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
478 {
479 /* This mutex is now not recoverable. */
480 mutex->__data.__count = 0;
481
482 futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
483 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
484
485 /* To the kernel, this will be visible after the kernel has
486 acquired the mutex in the syscall. */
487 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
488 return ENOTRECOVERABLE;
489 }
490
491 mutex->__data.__count = 1;
492 if (robust)
493 {
494 /* We must not enqueue the mutex before we have acquired it.
495 Also see comments at ENQUEUE_MUTEX. */
496 __asm ("" ::: "memory");
497 ENQUEUE_MUTEX_PI (mutex);
498 /* We need to clear op_pending after we enqueue the mutex. */
499 __asm ("" ::: "memory");
500 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
501 }
502 }
503 break;
504#endif /* __NR_futex. */
505
506 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
507 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
508 case PTHREAD_MUTEX_PP_NORMAL_NP:
509 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
510 {
511 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
512 in sysdeps/nptl/bits/thread-shared-types.h. */
513 int kind = atomic_load_relaxed (&(mutex->__data.__kind))
514 & PTHREAD_MUTEX_KIND_MASK_NP;
515
516 oldval = mutex->__data.__lock;
517
518 /* Check whether we already hold the mutex. */
519 if (mutex->__data.__owner == id)
520 {
521 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
522 return EDEADLK;
523
524 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
525 {
526 /* Just bump the counter. */
527 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
528 /* Overflow of the counter. */
529 return EAGAIN;
530
531 ++mutex->__data.__count;
532
533 return 0;
534 }
535 }
536
537 int oldprio = -1, ceilval;
538 do
539 {
540 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
541 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
542
543 if (__pthread_current_priority () > ceiling)
544 {
545 if (oldprio != -1)
546 __pthread_tpp_change_priority (oldprio, -1);
547 return EINVAL;
548 }
549
550 int retval = __pthread_tpp_change_priority (oldprio, ceiling);
551 if (retval)
552 return retval;
553
554 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
555 oldprio = ceiling;
556
557 oldval
558 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
559#ifdef NO_INCR
560 ceilval | 2,
561#else
562 ceilval | 1,
563#endif
564 ceilval);
565
566 if (oldval == ceilval)
567 break;
568
569 do
570 {
571 oldval
572 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
573 ceilval | 2,
574 ceilval | 1);
575
576 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
577 break;
578
579 if (oldval != ceilval)
580 futex_wait ((unsigned int * ) &mutex->__data.__lock,
581 ceilval | 2,
582 PTHREAD_MUTEX_PSHARED (mutex));
583 }
584 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
585 ceilval | 2, ceilval)
586 != ceilval);
587 }
588 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
589
590 assert (mutex->__data.__owner == 0);
591 mutex->__data.__count = 1;
592 }
593 break;
594
595 default:
596 /* Correct code cannot set any other type. */
597 return EINVAL;
598 }
599
600 /* Record the ownership. */
601 mutex->__data.__owner = id;
602#ifndef NO_INCR
603 ++mutex->__data.__nusers;
604#endif
605
606 LIBC_PROBE (mutex_acquired, 1, mutex);
607
608 return 0;
609}
610
611#if PTHREAD_MUTEX_VERSIONS
612libc_hidden_ver (___pthread_mutex_lock, __pthread_mutex_lock)
613# ifndef SHARED
614strong_alias (___pthread_mutex_lock, __pthread_mutex_lock)
615# endif
616versioned_symbol (libpthread, ___pthread_mutex_lock, pthread_mutex_lock,
617 GLIBC_2_0);
618
619# if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_34)
620compat_symbol (libpthread, ___pthread_mutex_lock, __pthread_mutex_lock,
621 GLIBC_2_0);
622# endif
623#endif /* PTHREAD_MUTEX_VERSIONS */
624
625
626#ifdef NO_INCR
627void
628__pthread_mutex_cond_lock_adjust (pthread_mutex_t *mutex)
629{
630 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
631 in sysdeps/nptl/bits/thread-shared-types.h. */
632 int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
633 assert ((mutex_kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0);
634 assert ((mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0);
635 assert ((mutex_kind & PTHREAD_MUTEX_PSHARED_BIT) == 0);
636
637 /* Record the ownership. */
638 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
639 mutex->__data.__owner = id;
640
641 if (mutex_kind == PTHREAD_MUTEX_PI_RECURSIVE_NP)
642 ++mutex->__data.__count;
643}
644#endif
645