1 | /* Copyright (C) 2002-2018 Free Software Foundation, Inc. |
2 | This file is part of the GNU C Library. |
3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <assert.h> |
20 | #include <errno.h> |
21 | #include <time.h> |
22 | #include <sys/param.h> |
23 | #include <sys/time.h> |
24 | #include "pthreadP.h" |
25 | #include <atomic.h> |
26 | #include <lowlevellock.h> |
27 | #include <not-cancel.h> |
28 | |
29 | #include <stap-probe.h> |
30 | |
31 | #ifndef lll_timedlock_elision |
32 | #define lll_timedlock_elision(a,dummy,b,c) lll_timedlock(a, b, c) |
33 | #endif |
34 | |
35 | #ifndef lll_trylock_elision |
36 | #define lll_trylock_elision(a,t) lll_trylock(a) |
37 | #endif |
38 | |
39 | #ifndef FORCE_ELISION |
40 | #define FORCE_ELISION(m, s) |
41 | #endif |
42 | |
43 | int |
44 | __pthread_mutex_timedlock (pthread_mutex_t *mutex, |
45 | const struct timespec *abstime) |
46 | { |
47 | int oldval; |
48 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
49 | int result = 0; |
50 | |
51 | LIBC_PROBE (mutex_timedlock_entry, 2, mutex, abstime); |
52 | |
53 | /* We must not check ABSTIME here. If the thread does not block |
54 | abstime must not be checked for a valid value. */ |
55 | |
56 | switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex), |
57 | PTHREAD_MUTEX_TIMED_NP)) |
58 | { |
59 | /* Recursive mutex. */ |
60 | case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP: |
61 | case PTHREAD_MUTEX_RECURSIVE_NP: |
62 | /* Check whether we already hold the mutex. */ |
63 | if (mutex->__data.__owner == id) |
64 | { |
65 | /* Just bump the counter. */ |
66 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
67 | /* Overflow of the counter. */ |
68 | return EAGAIN; |
69 | |
70 | ++mutex->__data.__count; |
71 | |
72 | goto out; |
73 | } |
74 | |
75 | /* We have to get the mutex. */ |
76 | result = lll_timedlock (mutex->__data.__lock, abstime, |
77 | PTHREAD_MUTEX_PSHARED (mutex)); |
78 | |
79 | if (result != 0) |
80 | goto out; |
81 | |
82 | /* Only locked once so far. */ |
83 | mutex->__data.__count = 1; |
84 | break; |
85 | |
86 | /* Error checking mutex. */ |
87 | case PTHREAD_MUTEX_ERRORCHECK_NP: |
88 | /* Check whether we already hold the mutex. */ |
89 | if (__glibc_unlikely (mutex->__data.__owner == id)) |
90 | return EDEADLK; |
91 | |
92 | /* Don't do lock elision on an error checking mutex. */ |
93 | goto simple; |
94 | |
95 | case PTHREAD_MUTEX_TIMED_NP: |
96 | FORCE_ELISION (mutex, goto elision); |
97 | simple: |
98 | /* Normal mutex. */ |
99 | result = lll_timedlock (mutex->__data.__lock, abstime, |
100 | PTHREAD_MUTEX_PSHARED (mutex)); |
101 | break; |
102 | |
103 | case PTHREAD_MUTEX_TIMED_ELISION_NP: |
104 | elision: __attribute__((unused)) |
105 | /* Don't record ownership */ |
106 | return lll_timedlock_elision (mutex->__data.__lock, |
107 | mutex->__data.__spins, |
108 | abstime, |
109 | PTHREAD_MUTEX_PSHARED (mutex)); |
110 | |
111 | |
112 | case PTHREAD_MUTEX_ADAPTIVE_NP: |
113 | if (! __is_smp) |
114 | goto simple; |
115 | |
116 | if (lll_trylock (mutex->__data.__lock) != 0) |
117 | { |
118 | int cnt = 0; |
119 | int max_cnt = MIN (MAX_ADAPTIVE_COUNT, |
120 | mutex->__data.__spins * 2 + 10); |
121 | do |
122 | { |
123 | if (cnt++ >= max_cnt) |
124 | { |
125 | result = lll_timedlock (mutex->__data.__lock, abstime, |
126 | PTHREAD_MUTEX_PSHARED (mutex)); |
127 | break; |
128 | } |
129 | atomic_spin_nop (); |
130 | } |
131 | while (lll_trylock (mutex->__data.__lock) != 0); |
132 | |
133 | mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8; |
134 | } |
135 | break; |
136 | |
137 | case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP: |
138 | case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP: |
139 | case PTHREAD_MUTEX_ROBUST_NORMAL_NP: |
140 | case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP: |
141 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
142 | &mutex->__data.__list.__next); |
143 | /* We need to set op_pending before starting the operation. Also |
144 | see comments at ENQUEUE_MUTEX. */ |
145 | __asm ("" ::: "memory" ); |
146 | |
147 | oldval = mutex->__data.__lock; |
148 | /* This is set to FUTEX_WAITERS iff we might have shared the |
149 | FUTEX_WAITERS flag with other threads, and therefore need to keep it |
150 | set to avoid lost wake-ups. We have the same requirement in the |
151 | simple mutex algorithm. */ |
152 | unsigned int assume_other_futex_waiters = 0; |
153 | while (1) |
154 | { |
155 | /* Try to acquire the lock through a CAS from 0 (not acquired) to |
156 | our TID | assume_other_futex_waiters. */ |
157 | if (__glibc_likely (oldval == 0)) |
158 | { |
159 | oldval |
160 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
161 | id | assume_other_futex_waiters, 0); |
162 | if (__glibc_likely (oldval == 0)) |
163 | break; |
164 | } |
165 | |
166 | if ((oldval & FUTEX_OWNER_DIED) != 0) |
167 | { |
168 | /* The previous owner died. Try locking the mutex. */ |
169 | int newval = id | (oldval & FUTEX_WAITERS) |
170 | | assume_other_futex_waiters; |
171 | |
172 | newval |
173 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
174 | newval, oldval); |
175 | if (newval != oldval) |
176 | { |
177 | oldval = newval; |
178 | continue; |
179 | } |
180 | |
181 | /* We got the mutex. */ |
182 | mutex->__data.__count = 1; |
183 | /* But it is inconsistent unless marked otherwise. */ |
184 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
185 | |
186 | /* We must not enqueue the mutex before we have acquired it. |
187 | Also see comments at ENQUEUE_MUTEX. */ |
188 | __asm ("" ::: "memory" ); |
189 | ENQUEUE_MUTEX (mutex); |
190 | /* We need to clear op_pending after we enqueue the mutex. */ |
191 | __asm ("" ::: "memory" ); |
192 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
193 | |
194 | /* Note that we deliberately exit here. If we fall |
195 | through to the end of the function __nusers would be |
196 | incremented which is not correct because the old |
197 | owner has to be discounted. */ |
198 | return EOWNERDEAD; |
199 | } |
200 | |
201 | /* Check whether we already hold the mutex. */ |
202 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
203 | { |
204 | int kind = PTHREAD_MUTEX_TYPE (mutex); |
205 | if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) |
206 | { |
207 | /* We do not need to ensure ordering wrt another memory |
208 | access. Also see comments at ENQUEUE_MUTEX. */ |
209 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
210 | NULL); |
211 | return EDEADLK; |
212 | } |
213 | |
214 | if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) |
215 | { |
216 | /* We do not need to ensure ordering wrt another memory |
217 | access. */ |
218 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
219 | NULL); |
220 | |
221 | /* Just bump the counter. */ |
222 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
223 | /* Overflow of the counter. */ |
224 | return EAGAIN; |
225 | |
226 | ++mutex->__data.__count; |
227 | |
228 | LIBC_PROBE (mutex_timedlock_acquired, 1, mutex); |
229 | |
230 | return 0; |
231 | } |
232 | } |
233 | |
234 | /* We are about to block; check whether the timeout is invalid. */ |
235 | if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) |
236 | return EINVAL; |
237 | /* Work around the fact that the kernel rejects negative timeout |
238 | values despite them being valid. */ |
239 | if (__glibc_unlikely (abstime->tv_sec < 0)) |
240 | return ETIMEDOUT; |
241 | #if (!defined __ASSUME_FUTEX_CLOCK_REALTIME \ |
242 | || !defined lll_futex_timed_wait_bitset) |
243 | struct timeval tv; |
244 | struct timespec rt; |
245 | |
246 | /* Get the current time. */ |
247 | (void) __gettimeofday (&tv, NULL); |
248 | |
249 | /* Compute relative timeout. */ |
250 | rt.tv_sec = abstime->tv_sec - tv.tv_sec; |
251 | rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000; |
252 | if (rt.tv_nsec < 0) |
253 | { |
254 | rt.tv_nsec += 1000000000; |
255 | --rt.tv_sec; |
256 | } |
257 | |
258 | /* Already timed out? */ |
259 | if (rt.tv_sec < 0) |
260 | return ETIMEDOUT; |
261 | #endif |
262 | |
263 | /* We cannot acquire the mutex nor has its owner died. Thus, try |
264 | to block using futexes. Set FUTEX_WAITERS if necessary so that |
265 | other threads are aware that there are potentially threads |
266 | blocked on the futex. Restart if oldval changed in the |
267 | meantime. */ |
268 | if ((oldval & FUTEX_WAITERS) == 0) |
269 | { |
270 | if (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock, |
271 | oldval | FUTEX_WAITERS, |
272 | oldval) |
273 | != 0) |
274 | { |
275 | oldval = mutex->__data.__lock; |
276 | continue; |
277 | } |
278 | oldval |= FUTEX_WAITERS; |
279 | } |
280 | |
281 | /* It is now possible that we share the FUTEX_WAITERS flag with |
282 | another thread; therefore, update assume_other_futex_waiters so |
283 | that we do not forget about this when handling other cases |
284 | above and thus do not cause lost wake-ups. */ |
285 | assume_other_futex_waiters |= FUTEX_WAITERS; |
286 | |
287 | /* Block using the futex. */ |
288 | #if (!defined __ASSUME_FUTEX_CLOCK_REALTIME \ |
289 | || !defined lll_futex_timed_wait_bitset) |
290 | lll_futex_timed wait (&mutex->__data.__lock, oldval, |
291 | &rt, PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); |
292 | #else |
293 | int err = lll_futex_timed_wait_bitset (&mutex->__data.__lock, |
294 | oldval, abstime, FUTEX_CLOCK_REALTIME, |
295 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); |
296 | /* The futex call timed out. */ |
297 | if (err == -ETIMEDOUT) |
298 | return -err; |
299 | #endif |
300 | /* Reload current lock value. */ |
301 | oldval = mutex->__data.__lock; |
302 | } |
303 | |
304 | /* We have acquired the mutex; check if it is still consistent. */ |
305 | if (__builtin_expect (mutex->__data.__owner |
306 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
307 | { |
308 | /* This mutex is now not recoverable. */ |
309 | mutex->__data.__count = 0; |
310 | int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex); |
311 | lll_unlock (mutex->__data.__lock, private); |
312 | /* FIXME This violates the mutex destruction requirements. See |
313 | __pthread_mutex_unlock_full. */ |
314 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
315 | return ENOTRECOVERABLE; |
316 | } |
317 | |
318 | mutex->__data.__count = 1; |
319 | /* We must not enqueue the mutex before we have acquired it. |
320 | Also see comments at ENQUEUE_MUTEX. */ |
321 | __asm ("" ::: "memory" ); |
322 | ENQUEUE_MUTEX (mutex); |
323 | /* We need to clear op_pending after we enqueue the mutex. */ |
324 | __asm ("" ::: "memory" ); |
325 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
326 | break; |
327 | |
328 | /* The PI support requires the Linux futex system call. If that's not |
329 | available, pthread_mutex_init should never have allowed the type to |
330 | be set. So it will get the default case for an invalid type. */ |
331 | #ifdef __NR_futex |
332 | case PTHREAD_MUTEX_PI_RECURSIVE_NP: |
333 | case PTHREAD_MUTEX_PI_ERRORCHECK_NP: |
334 | case PTHREAD_MUTEX_PI_NORMAL_NP: |
335 | case PTHREAD_MUTEX_PI_ADAPTIVE_NP: |
336 | case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP: |
337 | case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP: |
338 | case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: |
339 | case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: |
340 | { |
341 | int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; |
342 | int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; |
343 | |
344 | if (robust) |
345 | { |
346 | /* Note: robust PI futexes are signaled by setting bit 0. */ |
347 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
348 | (void *) (((uintptr_t) &mutex->__data.__list.__next) |
349 | | 1)); |
350 | /* We need to set op_pending before starting the operation. Also |
351 | see comments at ENQUEUE_MUTEX. */ |
352 | __asm ("" ::: "memory" ); |
353 | } |
354 | |
355 | oldval = mutex->__data.__lock; |
356 | |
357 | /* Check whether we already hold the mutex. */ |
358 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
359 | { |
360 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
361 | { |
362 | /* We do not need to ensure ordering wrt another memory |
363 | access. */ |
364 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
365 | return EDEADLK; |
366 | } |
367 | |
368 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
369 | { |
370 | /* We do not need to ensure ordering wrt another memory |
371 | access. */ |
372 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
373 | |
374 | /* Just bump the counter. */ |
375 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
376 | /* Overflow of the counter. */ |
377 | return EAGAIN; |
378 | |
379 | ++mutex->__data.__count; |
380 | |
381 | LIBC_PROBE (mutex_timedlock_acquired, 1, mutex); |
382 | |
383 | return 0; |
384 | } |
385 | } |
386 | |
387 | oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
388 | id, 0); |
389 | |
390 | if (oldval != 0) |
391 | { |
392 | /* The mutex is locked. The kernel will now take care of |
393 | everything. The timeout value must be a relative value. |
394 | Convert it. */ |
395 | int private = (robust |
396 | ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex) |
397 | : PTHREAD_MUTEX_PSHARED (mutex)); |
398 | INTERNAL_SYSCALL_DECL (__err); |
399 | |
400 | int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock, |
401 | __lll_private_flag (FUTEX_LOCK_PI, |
402 | private), 1, |
403 | abstime); |
404 | if (INTERNAL_SYSCALL_ERROR_P (e, __err)) |
405 | { |
406 | if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT) |
407 | return ETIMEDOUT; |
408 | |
409 | if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH |
410 | || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK) |
411 | { |
412 | assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK |
413 | || (kind != PTHREAD_MUTEX_ERRORCHECK_NP |
414 | && kind != PTHREAD_MUTEX_RECURSIVE_NP)); |
415 | /* ESRCH can happen only for non-robust PI mutexes where |
416 | the owner of the lock died. */ |
417 | assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH |
418 | || !robust); |
419 | |
420 | /* Delay the thread until the timeout is reached. |
421 | Then return ETIMEDOUT. */ |
422 | struct timespec reltime; |
423 | struct timespec now; |
424 | |
425 | INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME, |
426 | &now); |
427 | reltime.tv_sec = abstime->tv_sec - now.tv_sec; |
428 | reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec; |
429 | if (reltime.tv_nsec < 0) |
430 | { |
431 | reltime.tv_nsec += 1000000000; |
432 | --reltime.tv_sec; |
433 | } |
434 | if (reltime.tv_sec >= 0) |
435 | while (__nanosleep_nocancel (&reltime, &reltime) != 0) |
436 | continue; |
437 | |
438 | return ETIMEDOUT; |
439 | } |
440 | |
441 | return INTERNAL_SYSCALL_ERRNO (e, __err); |
442 | } |
443 | |
444 | oldval = mutex->__data.__lock; |
445 | |
446 | assert (robust || (oldval & FUTEX_OWNER_DIED) == 0); |
447 | } |
448 | |
449 | if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED)) |
450 | { |
451 | atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED); |
452 | |
453 | /* We got the mutex. */ |
454 | mutex->__data.__count = 1; |
455 | /* But it is inconsistent unless marked otherwise. */ |
456 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
457 | |
458 | /* We must not enqueue the mutex before we have acquired it. |
459 | Also see comments at ENQUEUE_MUTEX. */ |
460 | __asm ("" ::: "memory" ); |
461 | ENQUEUE_MUTEX_PI (mutex); |
462 | /* We need to clear op_pending after we enqueue the mutex. */ |
463 | __asm ("" ::: "memory" ); |
464 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
465 | |
466 | /* Note that we deliberately exit here. If we fall |
467 | through to the end of the function __nusers would be |
468 | incremented which is not correct because the old owner |
469 | has to be discounted. */ |
470 | return EOWNERDEAD; |
471 | } |
472 | |
473 | if (robust |
474 | && __builtin_expect (mutex->__data.__owner |
475 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
476 | { |
477 | /* This mutex is now not recoverable. */ |
478 | mutex->__data.__count = 0; |
479 | |
480 | INTERNAL_SYSCALL_DECL (__err); |
481 | INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock, |
482 | __lll_private_flag (FUTEX_UNLOCK_PI, |
483 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)), |
484 | 0, 0); |
485 | |
486 | /* To the kernel, this will be visible after the kernel has |
487 | acquired the mutex in the syscall. */ |
488 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
489 | return ENOTRECOVERABLE; |
490 | } |
491 | |
492 | mutex->__data.__count = 1; |
493 | if (robust) |
494 | { |
495 | /* We must not enqueue the mutex before we have acquired it. |
496 | Also see comments at ENQUEUE_MUTEX. */ |
497 | __asm ("" ::: "memory" ); |
498 | ENQUEUE_MUTEX_PI (mutex); |
499 | /* We need to clear op_pending after we enqueue the mutex. */ |
500 | __asm ("" ::: "memory" ); |
501 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
502 | } |
503 | } |
504 | break; |
505 | #endif /* __NR_futex. */ |
506 | |
507 | case PTHREAD_MUTEX_PP_RECURSIVE_NP: |
508 | case PTHREAD_MUTEX_PP_ERRORCHECK_NP: |
509 | case PTHREAD_MUTEX_PP_NORMAL_NP: |
510 | case PTHREAD_MUTEX_PP_ADAPTIVE_NP: |
511 | { |
512 | int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; |
513 | |
514 | oldval = mutex->__data.__lock; |
515 | |
516 | /* Check whether we already hold the mutex. */ |
517 | if (mutex->__data.__owner == id) |
518 | { |
519 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
520 | return EDEADLK; |
521 | |
522 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
523 | { |
524 | /* Just bump the counter. */ |
525 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
526 | /* Overflow of the counter. */ |
527 | return EAGAIN; |
528 | |
529 | ++mutex->__data.__count; |
530 | |
531 | LIBC_PROBE (mutex_timedlock_acquired, 1, mutex); |
532 | |
533 | return 0; |
534 | } |
535 | } |
536 | |
537 | int oldprio = -1, ceilval; |
538 | do |
539 | { |
540 | int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) |
541 | >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
542 | |
543 | if (__pthread_current_priority () > ceiling) |
544 | { |
545 | result = EINVAL; |
546 | failpp: |
547 | if (oldprio != -1) |
548 | __pthread_tpp_change_priority (oldprio, -1); |
549 | return result; |
550 | } |
551 | |
552 | result = __pthread_tpp_change_priority (oldprio, ceiling); |
553 | if (result) |
554 | return result; |
555 | |
556 | ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
557 | oldprio = ceiling; |
558 | |
559 | oldval |
560 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
561 | ceilval | 1, ceilval); |
562 | |
563 | if (oldval == ceilval) |
564 | break; |
565 | |
566 | do |
567 | { |
568 | oldval |
569 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
570 | ceilval | 2, |
571 | ceilval | 1); |
572 | |
573 | if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval) |
574 | break; |
575 | |
576 | if (oldval != ceilval) |
577 | { |
578 | /* Reject invalid timeouts. */ |
579 | if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) |
580 | { |
581 | result = EINVAL; |
582 | goto failpp; |
583 | } |
584 | |
585 | struct timeval tv; |
586 | struct timespec rt; |
587 | |
588 | /* Get the current time. */ |
589 | (void) __gettimeofday (&tv, NULL); |
590 | |
591 | /* Compute relative timeout. */ |
592 | rt.tv_sec = abstime->tv_sec - tv.tv_sec; |
593 | rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000; |
594 | if (rt.tv_nsec < 0) |
595 | { |
596 | rt.tv_nsec += 1000000000; |
597 | --rt.tv_sec; |
598 | } |
599 | |
600 | /* Already timed out? */ |
601 | if (rt.tv_sec < 0) |
602 | { |
603 | result = ETIMEDOUT; |
604 | goto failpp; |
605 | } |
606 | |
607 | lll_futex_timed_wait (&mutex->__data.__lock, |
608 | ceilval | 2, &rt, |
609 | PTHREAD_MUTEX_PSHARED (mutex)); |
610 | } |
611 | } |
612 | while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
613 | ceilval | 2, ceilval) |
614 | != ceilval); |
615 | } |
616 | while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval); |
617 | |
618 | assert (mutex->__data.__owner == 0); |
619 | mutex->__data.__count = 1; |
620 | } |
621 | break; |
622 | |
623 | default: |
624 | /* Correct code cannot set any other type. */ |
625 | return EINVAL; |
626 | } |
627 | |
628 | if (result == 0) |
629 | { |
630 | /* Record the ownership. */ |
631 | mutex->__data.__owner = id; |
632 | ++mutex->__data.__nusers; |
633 | |
634 | LIBC_PROBE (mutex_timedlock_acquired, 1, mutex); |
635 | } |
636 | |
637 | out: |
638 | return result; |
639 | } |
640 | weak_alias (__pthread_mutex_timedlock, pthread_mutex_timedlock) |
641 | |