1 | /* Copyright (C) 2002-2021 Free Software Foundation, Inc. |
2 | This file is part of the GNU C Library. |
3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <https://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <assert.h> |
20 | #include <errno.h> |
21 | #include <time.h> |
22 | #include <sys/param.h> |
23 | #include <sys/time.h> |
24 | #include "pthreadP.h" |
25 | #include <atomic.h> |
26 | #include <lowlevellock.h> |
27 | #include <not-cancel.h> |
28 | #include <futex-internal.h> |
29 | |
30 | #include <stap-probe.h> |
31 | |
32 | int |
33 | __pthread_mutex_clocklock_common (pthread_mutex_t *mutex, |
34 | clockid_t clockid, |
35 | const struct __timespec64 *abstime) |
36 | { |
37 | int oldval; |
38 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
39 | int result = 0; |
40 | |
41 | /* We must not check ABSTIME here. If the thread does not block |
42 | abstime must not be checked for a valid value. */ |
43 | |
44 | /* See concurrency notes regarding mutex type which is loaded from __kind |
45 | in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */ |
46 | switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex), |
47 | PTHREAD_MUTEX_TIMED_NP)) |
48 | { |
49 | /* Recursive mutex. */ |
50 | case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP: |
51 | case PTHREAD_MUTEX_RECURSIVE_NP: |
52 | /* Check whether we already hold the mutex. */ |
53 | if (mutex->__data.__owner == id) |
54 | { |
55 | /* Just bump the counter. */ |
56 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
57 | /* Overflow of the counter. */ |
58 | return EAGAIN; |
59 | |
60 | ++mutex->__data.__count; |
61 | |
62 | goto out; |
63 | } |
64 | |
65 | /* We have to get the mutex. */ |
66 | result = __futex_clocklock64 (&mutex->__data.__lock, clockid, abstime, |
67 | PTHREAD_MUTEX_PSHARED (mutex)); |
68 | |
69 | if (result != 0) |
70 | goto out; |
71 | |
72 | /* Only locked once so far. */ |
73 | mutex->__data.__count = 1; |
74 | break; |
75 | |
76 | /* Error checking mutex. */ |
77 | case PTHREAD_MUTEX_ERRORCHECK_NP: |
78 | /* Check whether we already hold the mutex. */ |
79 | if (__glibc_unlikely (mutex->__data.__owner == id)) |
80 | return EDEADLK; |
81 | |
82 | /* Don't do lock elision on an error checking mutex. */ |
83 | goto simple; |
84 | |
85 | case PTHREAD_MUTEX_TIMED_NP: |
86 | FORCE_ELISION (mutex, goto elision); |
87 | simple: |
88 | /* Normal mutex. */ |
89 | result = __futex_clocklock64 (&mutex->__data.__lock, clockid, abstime, |
90 | PTHREAD_MUTEX_PSHARED (mutex)); |
91 | break; |
92 | |
93 | case PTHREAD_MUTEX_TIMED_ELISION_NP: |
94 | elision: __attribute__((unused)) |
95 | /* Don't record ownership */ |
96 | return lll_clocklock_elision (mutex->__data.__lock, |
97 | mutex->__data.__spins, |
98 | clockid, abstime, |
99 | PTHREAD_MUTEX_PSHARED (mutex)); |
100 | |
101 | |
102 | case PTHREAD_MUTEX_ADAPTIVE_NP: |
103 | if (lll_trylock (mutex->__data.__lock) != 0) |
104 | { |
105 | int cnt = 0; |
106 | int max_cnt = MIN (max_adaptive_count (), |
107 | mutex->__data.__spins * 2 + 10); |
108 | do |
109 | { |
110 | if (cnt++ >= max_cnt) |
111 | { |
112 | result = __futex_clocklock64 (&mutex->__data.__lock, |
113 | clockid, abstime, |
114 | PTHREAD_MUTEX_PSHARED (mutex)); |
115 | break; |
116 | } |
117 | atomic_spin_nop (); |
118 | } |
119 | while (lll_trylock (mutex->__data.__lock) != 0); |
120 | |
121 | mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8; |
122 | } |
123 | break; |
124 | |
125 | case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP: |
126 | case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP: |
127 | case PTHREAD_MUTEX_ROBUST_NORMAL_NP: |
128 | case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP: |
129 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
130 | &mutex->__data.__list.__next); |
131 | /* We need to set op_pending before starting the operation. Also |
132 | see comments at ENQUEUE_MUTEX. */ |
133 | __asm ("" ::: "memory" ); |
134 | |
135 | oldval = mutex->__data.__lock; |
136 | /* This is set to FUTEX_WAITERS iff we might have shared the |
137 | FUTEX_WAITERS flag with other threads, and therefore need to keep it |
138 | set to avoid lost wake-ups. We have the same requirement in the |
139 | simple mutex algorithm. */ |
140 | unsigned int assume_other_futex_waiters = 0; |
141 | while (1) |
142 | { |
143 | /* Try to acquire the lock through a CAS from 0 (not acquired) to |
144 | our TID | assume_other_futex_waiters. */ |
145 | if (__glibc_likely (oldval == 0)) |
146 | { |
147 | oldval |
148 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
149 | id | assume_other_futex_waiters, 0); |
150 | if (__glibc_likely (oldval == 0)) |
151 | break; |
152 | } |
153 | |
154 | if ((oldval & FUTEX_OWNER_DIED) != 0) |
155 | { |
156 | /* The previous owner died. Try locking the mutex. */ |
157 | int newval = id | (oldval & FUTEX_WAITERS) |
158 | | assume_other_futex_waiters; |
159 | |
160 | newval |
161 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
162 | newval, oldval); |
163 | if (newval != oldval) |
164 | { |
165 | oldval = newval; |
166 | continue; |
167 | } |
168 | |
169 | /* We got the mutex. */ |
170 | mutex->__data.__count = 1; |
171 | /* But it is inconsistent unless marked otherwise. */ |
172 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
173 | |
174 | /* We must not enqueue the mutex before we have acquired it. |
175 | Also see comments at ENQUEUE_MUTEX. */ |
176 | __asm ("" ::: "memory" ); |
177 | ENQUEUE_MUTEX (mutex); |
178 | /* We need to clear op_pending after we enqueue the mutex. */ |
179 | __asm ("" ::: "memory" ); |
180 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
181 | |
182 | /* Note that we deliberately exit here. If we fall |
183 | through to the end of the function __nusers would be |
184 | incremented which is not correct because the old |
185 | owner has to be discounted. */ |
186 | return EOWNERDEAD; |
187 | } |
188 | |
189 | /* Check whether we already hold the mutex. */ |
190 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
191 | { |
192 | int kind = PTHREAD_MUTEX_TYPE (mutex); |
193 | if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) |
194 | { |
195 | /* We do not need to ensure ordering wrt another memory |
196 | access. Also see comments at ENQUEUE_MUTEX. */ |
197 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
198 | NULL); |
199 | return EDEADLK; |
200 | } |
201 | |
202 | if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) |
203 | { |
204 | /* We do not need to ensure ordering wrt another memory |
205 | access. */ |
206 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
207 | NULL); |
208 | |
209 | /* Just bump the counter. */ |
210 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
211 | /* Overflow of the counter. */ |
212 | return EAGAIN; |
213 | |
214 | ++mutex->__data.__count; |
215 | |
216 | LIBC_PROBE (mutex_timedlock_acquired, 1, mutex); |
217 | |
218 | return 0; |
219 | } |
220 | } |
221 | |
222 | /* We are about to block; check whether the timeout is invalid. */ |
223 | if (! valid_nanoseconds (abstime->tv_nsec)) |
224 | return EINVAL; |
225 | /* Work around the fact that the kernel rejects negative timeout |
226 | values despite them being valid. */ |
227 | if (__glibc_unlikely (abstime->tv_sec < 0)) |
228 | return ETIMEDOUT; |
229 | |
230 | /* We cannot acquire the mutex nor has its owner died. Thus, try |
231 | to block using futexes. Set FUTEX_WAITERS if necessary so that |
232 | other threads are aware that there are potentially threads |
233 | blocked on the futex. Restart if oldval changed in the |
234 | meantime. */ |
235 | if ((oldval & FUTEX_WAITERS) == 0) |
236 | { |
237 | if (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock, |
238 | oldval | FUTEX_WAITERS, |
239 | oldval) |
240 | != 0) |
241 | { |
242 | oldval = mutex->__data.__lock; |
243 | continue; |
244 | } |
245 | oldval |= FUTEX_WAITERS; |
246 | } |
247 | |
248 | /* It is now possible that we share the FUTEX_WAITERS flag with |
249 | another thread; therefore, update assume_other_futex_waiters so |
250 | that we do not forget about this when handling other cases |
251 | above and thus do not cause lost wake-ups. */ |
252 | assume_other_futex_waiters |= FUTEX_WAITERS; |
253 | |
254 | /* Block using the futex. */ |
255 | int err = __futex_abstimed_wait64 ( |
256 | (unsigned int *) &mutex->__data.__lock, |
257 | oldval, clockid, abstime, |
258 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); |
259 | /* The futex call timed out. */ |
260 | if (err == ETIMEDOUT || err == EOVERFLOW) |
261 | return err; |
262 | /* Reload current lock value. */ |
263 | oldval = mutex->__data.__lock; |
264 | } |
265 | |
266 | /* We have acquired the mutex; check if it is still consistent. */ |
267 | if (__builtin_expect (mutex->__data.__owner |
268 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
269 | { |
270 | /* This mutex is now not recoverable. */ |
271 | mutex->__data.__count = 0; |
272 | int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex); |
273 | lll_unlock (mutex->__data.__lock, private); |
274 | /* FIXME This violates the mutex destruction requirements. See |
275 | __pthread_mutex_unlock_full. */ |
276 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
277 | return ENOTRECOVERABLE; |
278 | } |
279 | |
280 | mutex->__data.__count = 1; |
281 | /* We must not enqueue the mutex before we have acquired it. |
282 | Also see comments at ENQUEUE_MUTEX. */ |
283 | __asm ("" ::: "memory" ); |
284 | ENQUEUE_MUTEX (mutex); |
285 | /* We need to clear op_pending after we enqueue the mutex. */ |
286 | __asm ("" ::: "memory" ); |
287 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
288 | break; |
289 | |
290 | /* The PI support requires the Linux futex system call. If that's not |
291 | available, pthread_mutex_init should never have allowed the type to |
292 | be set. So it will get the default case for an invalid type. */ |
293 | #ifdef __NR_futex |
294 | case PTHREAD_MUTEX_PI_RECURSIVE_NP: |
295 | case PTHREAD_MUTEX_PI_ERRORCHECK_NP: |
296 | case PTHREAD_MUTEX_PI_NORMAL_NP: |
297 | case PTHREAD_MUTEX_PI_ADAPTIVE_NP: |
298 | case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP: |
299 | case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP: |
300 | case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: |
301 | case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: |
302 | { |
303 | /* Currently futex FUTEX_LOCK_PI operation only provides support for |
304 | CLOCK_REALTIME and trying to emulate by converting a |
305 | CLOCK_MONOTONIC to CLOCK_REALTIME will take in account possible |
306 | changes to the wall clock. */ |
307 | if (__glibc_unlikely (clockid != CLOCK_REALTIME)) |
308 | return EINVAL; |
309 | |
310 | int kind, robust; |
311 | { |
312 | /* See concurrency notes regarding __kind in struct __pthread_mutex_s |
313 | in sysdeps/nptl/bits/thread-shared-types.h. */ |
314 | int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); |
315 | kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP; |
316 | robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; |
317 | } |
318 | |
319 | if (robust) |
320 | { |
321 | /* Note: robust PI futexes are signaled by setting bit 0. */ |
322 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
323 | (void *) (((uintptr_t) &mutex->__data.__list.__next) |
324 | | 1)); |
325 | /* We need to set op_pending before starting the operation. Also |
326 | see comments at ENQUEUE_MUTEX. */ |
327 | __asm ("" ::: "memory" ); |
328 | } |
329 | |
330 | oldval = mutex->__data.__lock; |
331 | |
332 | /* Check whether we already hold the mutex. */ |
333 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
334 | { |
335 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
336 | { |
337 | /* We do not need to ensure ordering wrt another memory |
338 | access. */ |
339 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
340 | return EDEADLK; |
341 | } |
342 | |
343 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
344 | { |
345 | /* We do not need to ensure ordering wrt another memory |
346 | access. */ |
347 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
348 | |
349 | /* Just bump the counter. */ |
350 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
351 | /* Overflow of the counter. */ |
352 | return EAGAIN; |
353 | |
354 | ++mutex->__data.__count; |
355 | |
356 | LIBC_PROBE (mutex_timedlock_acquired, 1, mutex); |
357 | |
358 | return 0; |
359 | } |
360 | } |
361 | |
362 | oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
363 | id, 0); |
364 | |
365 | if (oldval != 0) |
366 | { |
367 | /* The mutex is locked. The kernel will now take care of |
368 | everything. The timeout value must be a relative value. |
369 | Convert it. */ |
370 | int private = (robust |
371 | ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex) |
372 | : PTHREAD_MUTEX_PSHARED (mutex)); |
373 | int e = futex_lock_pi64 (&mutex->__data.__lock, abstime, private); |
374 | if (e == ETIMEDOUT) |
375 | return ETIMEDOUT; |
376 | else if (e == ESRCH || e == EDEADLK) |
377 | { |
378 | assert (e != EDEADLK |
379 | || (kind != PTHREAD_MUTEX_ERRORCHECK_NP |
380 | && kind != PTHREAD_MUTEX_RECURSIVE_NP)); |
381 | /* ESRCH can happen only for non-robust PI mutexes where |
382 | the owner of the lock died. */ |
383 | assert (e != ESRCH || !robust); |
384 | |
385 | /* Delay the thread until the timeout is reached. Then return |
386 | ETIMEDOUT. */ |
387 | do |
388 | e = __futex_abstimed_wait64 (&(unsigned int){0}, 0, clockid, |
389 | abstime, private); |
390 | while (e != ETIMEDOUT); |
391 | return ETIMEDOUT; |
392 | } |
393 | else if (e != 0) |
394 | return e; |
395 | |
396 | oldval = mutex->__data.__lock; |
397 | |
398 | assert (robust || (oldval & FUTEX_OWNER_DIED) == 0); |
399 | } |
400 | |
401 | if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED)) |
402 | { |
403 | atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED); |
404 | |
405 | /* We got the mutex. */ |
406 | mutex->__data.__count = 1; |
407 | /* But it is inconsistent unless marked otherwise. */ |
408 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
409 | |
410 | /* We must not enqueue the mutex before we have acquired it. |
411 | Also see comments at ENQUEUE_MUTEX. */ |
412 | __asm ("" ::: "memory" ); |
413 | ENQUEUE_MUTEX_PI (mutex); |
414 | /* We need to clear op_pending after we enqueue the mutex. */ |
415 | __asm ("" ::: "memory" ); |
416 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
417 | |
418 | /* Note that we deliberately exit here. If we fall |
419 | through to the end of the function __nusers would be |
420 | incremented which is not correct because the old owner |
421 | has to be discounted. */ |
422 | return EOWNERDEAD; |
423 | } |
424 | |
425 | if (robust |
426 | && __builtin_expect (mutex->__data.__owner |
427 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
428 | { |
429 | /* This mutex is now not recoverable. */ |
430 | mutex->__data.__count = 0; |
431 | |
432 | futex_unlock_pi ((unsigned int *) &mutex->__data.__lock, |
433 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); |
434 | |
435 | /* To the kernel, this will be visible after the kernel has |
436 | acquired the mutex in the syscall. */ |
437 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
438 | return ENOTRECOVERABLE; |
439 | } |
440 | |
441 | mutex->__data.__count = 1; |
442 | if (robust) |
443 | { |
444 | /* We must not enqueue the mutex before we have acquired it. |
445 | Also see comments at ENQUEUE_MUTEX. */ |
446 | __asm ("" ::: "memory" ); |
447 | ENQUEUE_MUTEX_PI (mutex); |
448 | /* We need to clear op_pending after we enqueue the mutex. */ |
449 | __asm ("" ::: "memory" ); |
450 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
451 | } |
452 | } |
453 | break; |
454 | #endif /* __NR_futex. */ |
455 | |
456 | case PTHREAD_MUTEX_PP_RECURSIVE_NP: |
457 | case PTHREAD_MUTEX_PP_ERRORCHECK_NP: |
458 | case PTHREAD_MUTEX_PP_NORMAL_NP: |
459 | case PTHREAD_MUTEX_PP_ADAPTIVE_NP: |
460 | { |
461 | /* See concurrency notes regarding __kind in struct __pthread_mutex_s |
462 | in sysdeps/nptl/bits/thread-shared-types.h. */ |
463 | int kind = atomic_load_relaxed (&(mutex->__data.__kind)) |
464 | & PTHREAD_MUTEX_KIND_MASK_NP; |
465 | |
466 | oldval = mutex->__data.__lock; |
467 | |
468 | /* Check whether we already hold the mutex. */ |
469 | if (mutex->__data.__owner == id) |
470 | { |
471 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
472 | return EDEADLK; |
473 | |
474 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
475 | { |
476 | /* Just bump the counter. */ |
477 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
478 | /* Overflow of the counter. */ |
479 | return EAGAIN; |
480 | |
481 | ++mutex->__data.__count; |
482 | |
483 | LIBC_PROBE (mutex_timedlock_acquired, 1, mutex); |
484 | |
485 | return 0; |
486 | } |
487 | } |
488 | |
489 | int oldprio = -1, ceilval; |
490 | do |
491 | { |
492 | int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) |
493 | >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
494 | |
495 | if (__pthread_current_priority () > ceiling) |
496 | { |
497 | result = EINVAL; |
498 | failpp: |
499 | if (oldprio != -1) |
500 | __pthread_tpp_change_priority (oldprio, -1); |
501 | return result; |
502 | } |
503 | |
504 | result = __pthread_tpp_change_priority (oldprio, ceiling); |
505 | if (result) |
506 | return result; |
507 | |
508 | ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
509 | oldprio = ceiling; |
510 | |
511 | oldval |
512 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
513 | ceilval | 1, ceilval); |
514 | |
515 | if (oldval == ceilval) |
516 | break; |
517 | |
518 | do |
519 | { |
520 | oldval |
521 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
522 | ceilval | 2, |
523 | ceilval | 1); |
524 | |
525 | if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval) |
526 | break; |
527 | |
528 | if (oldval != ceilval) |
529 | { |
530 | /* Reject invalid timeouts. */ |
531 | if (! valid_nanoseconds (abstime->tv_nsec)) |
532 | { |
533 | result = EINVAL; |
534 | goto failpp; |
535 | } |
536 | |
537 | int e = __futex_abstimed_wait64 ( |
538 | (unsigned int *) &mutex->__data.__lock, ceilval | 2, |
539 | clockid, abstime, PTHREAD_MUTEX_PSHARED (mutex)); |
540 | if (e == ETIMEDOUT || e == EOVERFLOW) |
541 | return e; |
542 | } |
543 | } |
544 | while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
545 | ceilval | 2, ceilval) |
546 | != ceilval); |
547 | } |
548 | while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval); |
549 | |
550 | assert (mutex->__data.__owner == 0); |
551 | mutex->__data.__count = 1; |
552 | } |
553 | break; |
554 | |
555 | default: |
556 | /* Correct code cannot set any other type. */ |
557 | return EINVAL; |
558 | } |
559 | |
560 | if (result == 0) |
561 | { |
562 | /* Record the ownership. */ |
563 | mutex->__data.__owner = id; |
564 | ++mutex->__data.__nusers; |
565 | |
566 | LIBC_PROBE (mutex_timedlock_acquired, 1, mutex); |
567 | } |
568 | |
569 | out: |
570 | return result; |
571 | } |
572 | |
573 | int |
574 | ___pthread_mutex_clocklock64 (pthread_mutex_t *mutex, |
575 | clockid_t clockid, |
576 | const struct __timespec64 *abstime) |
577 | { |
578 | if (__glibc_unlikely (!futex_abstimed_supported_clockid (clockid))) |
579 | return EINVAL; |
580 | |
581 | LIBC_PROBE (mutex_clocklock_entry, 3, mutex, clockid, abstime); |
582 | return __pthread_mutex_clocklock_common (mutex, clockid, abstime); |
583 | } |
584 | |
585 | #if __TIMESIZE == 64 |
586 | strong_alias (___pthread_mutex_clocklock64, ___pthread_mutex_clocklock) |
587 | #else /* __TIMESPEC64 != 64 */ |
588 | strong_alias (___pthread_mutex_clocklock64, __pthread_mutex_clocklock64) |
589 | libc_hidden_def (__pthread_mutex_clocklock64) |
590 | |
591 | int |
592 | ___pthread_mutex_clocklock (pthread_mutex_t *mutex, |
593 | clockid_t clockid, |
594 | const struct timespec *abstime) |
595 | { |
596 | struct __timespec64 ts64 = valid_timespec_to_timespec64 (*abstime); |
597 | |
598 | return ___pthread_mutex_clocklock64 (mutex, clockid, &ts64); |
599 | } |
600 | #endif /* __TIMESPEC64 != 64 */ |
601 | libc_hidden_ver (___pthread_mutex_clocklock, __pthread_mutex_clocklock) |
602 | #ifndef SHARED |
603 | strong_alias (___pthread_mutex_clocklock, __pthread_mutex_clocklock) |
604 | #endif |
605 | versioned_symbol (libc, ___pthread_mutex_clocklock, |
606 | pthread_mutex_clocklock, GLIBC_2_34); |
607 | #if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_30, GLIBC_2_34) |
608 | compat_symbol (libpthread, ___pthread_mutex_clocklock, |
609 | pthread_mutex_clocklock, GLIBC_2_30); |
610 | #endif |
611 | |
612 | int |
613 | ___pthread_mutex_timedlock64 (pthread_mutex_t *mutex, |
614 | const struct __timespec64 *abstime) |
615 | { |
616 | LIBC_PROBE (mutex_timedlock_entry, 2, mutex, abstime); |
617 | return __pthread_mutex_clocklock_common (mutex, CLOCK_REALTIME, abstime); |
618 | } |
619 | |
620 | #if __TIMESIZE == 64 |
621 | strong_alias (___pthread_mutex_timedlock64, ___pthread_mutex_timedlock) |
622 | #else /* __TIMESPEC64 != 64 */ |
623 | strong_alias (___pthread_mutex_timedlock64, __pthread_mutex_timedlock64); |
624 | libc_hidden_def (__pthread_mutex_timedlock64) |
625 | |
626 | int |
627 | ___pthread_mutex_timedlock (pthread_mutex_t *mutex, |
628 | const struct timespec *abstime) |
629 | { |
630 | struct __timespec64 ts64 = valid_timespec_to_timespec64 (*abstime); |
631 | |
632 | return __pthread_mutex_timedlock64 (mutex, &ts64); |
633 | } |
634 | #endif /* __TIMESPEC64 != 64 */ |
635 | versioned_symbol (libc, ___pthread_mutex_timedlock, |
636 | pthread_mutex_timedlock, GLIBC_2_34); |
637 | libc_hidden_ver (___pthread_mutex_timedlock, __pthread_mutex_timedlock) |
638 | #ifndef SHARED |
639 | strong_alias (___pthread_mutex_timedlock, __pthread_mutex_timedlock) |
640 | #endif |
641 | |
642 | #if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_2, GLIBC_2_34) |
643 | compat_symbol (libpthread, ___pthread_mutex_timedlock, |
644 | pthread_mutex_timedlock, GLIBC_2_2); |
645 | #endif |
646 | |