1 | /* Copyright (C) 2002-2017 Free Software Foundation, Inc. |
2 | This file is part of the GNU C Library. |
3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <assert.h> |
20 | #include <errno.h> |
21 | #include <time.h> |
22 | #include <sys/param.h> |
23 | #include <sys/time.h> |
24 | #include "pthreadP.h" |
25 | #include <atomic.h> |
26 | #include <lowlevellock.h> |
27 | #include <not-cancel.h> |
28 | |
29 | #include <stap-probe.h> |
30 | |
31 | #ifndef lll_timedlock_elision |
32 | #define lll_timedlock_elision(a,dummy,b,c) lll_timedlock(a, b, c) |
33 | #endif |
34 | |
35 | #ifndef lll_trylock_elision |
36 | #define lll_trylock_elision(a,t) lll_trylock(a) |
37 | #endif |
38 | |
39 | #ifndef FORCE_ELISION |
40 | #define FORCE_ELISION(m, s) |
41 | #endif |
42 | |
43 | int |
44 | pthread_mutex_timedlock (pthread_mutex_t *mutex, |
45 | const struct timespec *abstime) |
46 | { |
47 | int oldval; |
48 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
49 | int result = 0; |
50 | |
51 | LIBC_PROBE (mutex_timedlock_entry, 2, mutex, abstime); |
52 | |
53 | /* We must not check ABSTIME here. If the thread does not block |
54 | abstime must not be checked for a valid value. */ |
55 | |
56 | switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex), |
57 | PTHREAD_MUTEX_TIMED_NP)) |
58 | { |
59 | /* Recursive mutex. */ |
60 | case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP: |
61 | case PTHREAD_MUTEX_RECURSIVE_NP: |
62 | /* Check whether we already hold the mutex. */ |
63 | if (mutex->__data.__owner == id) |
64 | { |
65 | /* Just bump the counter. */ |
66 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
67 | /* Overflow of the counter. */ |
68 | return EAGAIN; |
69 | |
70 | ++mutex->__data.__count; |
71 | |
72 | goto out; |
73 | } |
74 | |
75 | /* We have to get the mutex. */ |
76 | result = lll_timedlock (mutex->__data.__lock, abstime, |
77 | PTHREAD_MUTEX_PSHARED (mutex)); |
78 | |
79 | if (result != 0) |
80 | goto out; |
81 | |
82 | /* Only locked once so far. */ |
83 | mutex->__data.__count = 1; |
84 | break; |
85 | |
86 | /* Error checking mutex. */ |
87 | case PTHREAD_MUTEX_ERRORCHECK_NP: |
88 | /* Check whether we already hold the mutex. */ |
89 | if (__glibc_unlikely (mutex->__data.__owner == id)) |
90 | return EDEADLK; |
91 | |
92 | /* Don't do lock elision on an error checking mutex. */ |
93 | goto simple; |
94 | |
95 | case PTHREAD_MUTEX_TIMED_NP: |
96 | FORCE_ELISION (mutex, goto elision); |
97 | simple: |
98 | /* Normal mutex. */ |
99 | result = lll_timedlock (mutex->__data.__lock, abstime, |
100 | PTHREAD_MUTEX_PSHARED (mutex)); |
101 | break; |
102 | |
103 | case PTHREAD_MUTEX_TIMED_ELISION_NP: |
104 | elision: __attribute__((unused)) |
105 | /* Don't record ownership */ |
106 | return lll_timedlock_elision (mutex->__data.__lock, |
107 | mutex->__data.__spins, |
108 | abstime, |
109 | PTHREAD_MUTEX_PSHARED (mutex)); |
110 | |
111 | |
112 | case PTHREAD_MUTEX_ADAPTIVE_NP: |
113 | if (! __is_smp) |
114 | goto simple; |
115 | |
116 | if (lll_trylock (mutex->__data.__lock) != 0) |
117 | { |
118 | int cnt = 0; |
119 | int max_cnt = MIN (MAX_ADAPTIVE_COUNT, |
120 | mutex->__data.__spins * 2 + 10); |
121 | do |
122 | { |
123 | if (cnt++ >= max_cnt) |
124 | { |
125 | result = lll_timedlock (mutex->__data.__lock, abstime, |
126 | PTHREAD_MUTEX_PSHARED (mutex)); |
127 | break; |
128 | } |
129 | atomic_spin_nop (); |
130 | } |
131 | while (lll_trylock (mutex->__data.__lock) != 0); |
132 | |
133 | mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8; |
134 | } |
135 | break; |
136 | |
137 | case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP: |
138 | case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP: |
139 | case PTHREAD_MUTEX_ROBUST_NORMAL_NP: |
140 | case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP: |
141 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
142 | &mutex->__data.__list.__next); |
143 | /* We need to set op_pending before starting the operation. Also |
144 | see comments at ENQUEUE_MUTEX. */ |
145 | __asm ("" ::: "memory" ); |
146 | |
147 | oldval = mutex->__data.__lock; |
148 | /* This is set to FUTEX_WAITERS iff we might have shared the |
149 | FUTEX_WAITERS flag with other threads, and therefore need to keep it |
150 | set to avoid lost wake-ups. We have the same requirement in the |
151 | simple mutex algorithm. */ |
152 | unsigned int assume_other_futex_waiters = 0; |
153 | while (1) |
154 | { |
155 | /* Try to acquire the lock through a CAS from 0 (not acquired) to |
156 | our TID | assume_other_futex_waiters. */ |
157 | if (__glibc_likely ((oldval == 0) |
158 | && (atomic_compare_and_exchange_bool_acq |
159 | (&mutex->__data.__lock, |
160 | id | assume_other_futex_waiters, 0) == 0))) |
161 | break; |
162 | |
163 | if ((oldval & FUTEX_OWNER_DIED) != 0) |
164 | { |
165 | /* The previous owner died. Try locking the mutex. */ |
166 | int newval = id | (oldval & FUTEX_WAITERS) |
167 | | assume_other_futex_waiters; |
168 | |
169 | newval |
170 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
171 | newval, oldval); |
172 | if (newval != oldval) |
173 | { |
174 | oldval = newval; |
175 | continue; |
176 | } |
177 | |
178 | /* We got the mutex. */ |
179 | mutex->__data.__count = 1; |
180 | /* But it is inconsistent unless marked otherwise. */ |
181 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
182 | |
183 | /* We must not enqueue the mutex before we have acquired it. |
184 | Also see comments at ENQUEUE_MUTEX. */ |
185 | __asm ("" ::: "memory" ); |
186 | ENQUEUE_MUTEX (mutex); |
187 | /* We need to clear op_pending after we enqueue the mutex. */ |
188 | __asm ("" ::: "memory" ); |
189 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
190 | |
191 | /* Note that we deliberately exit here. If we fall |
192 | through to the end of the function __nusers would be |
193 | incremented which is not correct because the old |
194 | owner has to be discounted. */ |
195 | return EOWNERDEAD; |
196 | } |
197 | |
198 | /* Check whether we already hold the mutex. */ |
199 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
200 | { |
201 | int kind = PTHREAD_MUTEX_TYPE (mutex); |
202 | if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) |
203 | { |
204 | /* We do not need to ensure ordering wrt another memory |
205 | access. Also see comments at ENQUEUE_MUTEX. */ |
206 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
207 | NULL); |
208 | return EDEADLK; |
209 | } |
210 | |
211 | if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) |
212 | { |
213 | /* We do not need to ensure ordering wrt another memory |
214 | access. */ |
215 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
216 | NULL); |
217 | |
218 | /* Just bump the counter. */ |
219 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
220 | /* Overflow of the counter. */ |
221 | return EAGAIN; |
222 | |
223 | ++mutex->__data.__count; |
224 | |
225 | LIBC_PROBE (mutex_timedlock_acquired, 1, mutex); |
226 | |
227 | return 0; |
228 | } |
229 | } |
230 | |
231 | /* We are about to block; check whether the timeout is invalid. */ |
232 | if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) |
233 | return EINVAL; |
234 | /* Work around the fact that the kernel rejects negative timeout |
235 | values despite them being valid. */ |
236 | if (__glibc_unlikely (abstime->tv_sec < 0)) |
237 | return ETIMEDOUT; |
238 | #if (!defined __ASSUME_FUTEX_CLOCK_REALTIME \ |
239 | || !defined lll_futex_timed_wait_bitset) |
240 | struct timeval tv; |
241 | struct timespec rt; |
242 | |
243 | /* Get the current time. */ |
244 | (void) __gettimeofday (&tv, NULL); |
245 | |
246 | /* Compute relative timeout. */ |
247 | rt.tv_sec = abstime->tv_sec - tv.tv_sec; |
248 | rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000; |
249 | if (rt.tv_nsec < 0) |
250 | { |
251 | rt.tv_nsec += 1000000000; |
252 | --rt.tv_sec; |
253 | } |
254 | |
255 | /* Already timed out? */ |
256 | if (rt.tv_sec < 0) |
257 | return ETIMEDOUT; |
258 | #endif |
259 | |
260 | /* We cannot acquire the mutex nor has its owner died. Thus, try |
261 | to block using futexes. Set FUTEX_WAITERS if necessary so that |
262 | other threads are aware that there are potentially threads |
263 | blocked on the futex. Restart if oldval changed in the |
264 | meantime. */ |
265 | if ((oldval & FUTEX_WAITERS) == 0) |
266 | { |
267 | if (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock, |
268 | oldval | FUTEX_WAITERS, |
269 | oldval) |
270 | != 0) |
271 | { |
272 | oldval = mutex->__data.__lock; |
273 | continue; |
274 | } |
275 | oldval |= FUTEX_WAITERS; |
276 | } |
277 | |
278 | /* It is now possible that we share the FUTEX_WAITERS flag with |
279 | another thread; therefore, update assume_other_futex_waiters so |
280 | that we do not forget about this when handling other cases |
281 | above and thus do not cause lost wake-ups. */ |
282 | assume_other_futex_waiters |= FUTEX_WAITERS; |
283 | |
284 | /* Block using the futex. */ |
285 | #if (!defined __ASSUME_FUTEX_CLOCK_REALTIME \ |
286 | || !defined lll_futex_timed_wait_bitset) |
287 | lll_futex_timed wait (&mutex->__data.__lock, oldval, |
288 | &rt, PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); |
289 | #else |
290 | int err = lll_futex_timed_wait_bitset (&mutex->__data.__lock, |
291 | oldval, abstime, FUTEX_CLOCK_REALTIME, |
292 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); |
293 | /* The futex call timed out. */ |
294 | if (err == -ETIMEDOUT) |
295 | return -err; |
296 | #endif |
297 | /* Reload current lock value. */ |
298 | oldval = mutex->__data.__lock; |
299 | } |
300 | |
301 | /* We have acquired the mutex; check if it is still consistent. */ |
302 | if (__builtin_expect (mutex->__data.__owner |
303 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
304 | { |
305 | /* This mutex is now not recoverable. */ |
306 | mutex->__data.__count = 0; |
307 | int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex); |
308 | lll_unlock (mutex->__data.__lock, private); |
309 | /* FIXME This violates the mutex destruction requirements. See |
310 | __pthread_mutex_unlock_full. */ |
311 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
312 | return ENOTRECOVERABLE; |
313 | } |
314 | |
315 | mutex->__data.__count = 1; |
316 | /* We must not enqueue the mutex before we have acquired it. |
317 | Also see comments at ENQUEUE_MUTEX. */ |
318 | __asm ("" ::: "memory" ); |
319 | ENQUEUE_MUTEX (mutex); |
320 | /* We need to clear op_pending after we enqueue the mutex. */ |
321 | __asm ("" ::: "memory" ); |
322 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
323 | break; |
324 | |
325 | /* The PI support requires the Linux futex system call. If that's not |
326 | available, pthread_mutex_init should never have allowed the type to |
327 | be set. So it will get the default case for an invalid type. */ |
328 | #ifdef __NR_futex |
329 | case PTHREAD_MUTEX_PI_RECURSIVE_NP: |
330 | case PTHREAD_MUTEX_PI_ERRORCHECK_NP: |
331 | case PTHREAD_MUTEX_PI_NORMAL_NP: |
332 | case PTHREAD_MUTEX_PI_ADAPTIVE_NP: |
333 | case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP: |
334 | case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP: |
335 | case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: |
336 | case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: |
337 | { |
338 | int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; |
339 | int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; |
340 | |
341 | if (robust) |
342 | { |
343 | /* Note: robust PI futexes are signaled by setting bit 0. */ |
344 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
345 | (void *) (((uintptr_t) &mutex->__data.__list.__next) |
346 | | 1)); |
347 | /* We need to set op_pending before starting the operation. Also |
348 | see comments at ENQUEUE_MUTEX. */ |
349 | __asm ("" ::: "memory" ); |
350 | } |
351 | |
352 | oldval = mutex->__data.__lock; |
353 | |
354 | /* Check whether we already hold the mutex. */ |
355 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
356 | { |
357 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
358 | { |
359 | /* We do not need to ensure ordering wrt another memory |
360 | access. */ |
361 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
362 | return EDEADLK; |
363 | } |
364 | |
365 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
366 | { |
367 | /* We do not need to ensure ordering wrt another memory |
368 | access. */ |
369 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
370 | |
371 | /* Just bump the counter. */ |
372 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
373 | /* Overflow of the counter. */ |
374 | return EAGAIN; |
375 | |
376 | ++mutex->__data.__count; |
377 | |
378 | LIBC_PROBE (mutex_timedlock_acquired, 1, mutex); |
379 | |
380 | return 0; |
381 | } |
382 | } |
383 | |
384 | oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
385 | id, 0); |
386 | |
387 | if (oldval != 0) |
388 | { |
389 | /* The mutex is locked. The kernel will now take care of |
390 | everything. The timeout value must be a relative value. |
391 | Convert it. */ |
392 | int private = (robust |
393 | ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex) |
394 | : PTHREAD_MUTEX_PSHARED (mutex)); |
395 | INTERNAL_SYSCALL_DECL (__err); |
396 | |
397 | int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock, |
398 | __lll_private_flag (FUTEX_LOCK_PI, |
399 | private), 1, |
400 | abstime); |
401 | if (INTERNAL_SYSCALL_ERROR_P (e, __err)) |
402 | { |
403 | if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT) |
404 | return ETIMEDOUT; |
405 | |
406 | if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH |
407 | || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK) |
408 | { |
409 | assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK |
410 | || (kind != PTHREAD_MUTEX_ERRORCHECK_NP |
411 | && kind != PTHREAD_MUTEX_RECURSIVE_NP)); |
412 | /* ESRCH can happen only for non-robust PI mutexes where |
413 | the owner of the lock died. */ |
414 | assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH |
415 | || !robust); |
416 | |
417 | /* Delay the thread until the timeout is reached. |
418 | Then return ETIMEDOUT. */ |
419 | struct timespec reltime; |
420 | struct timespec now; |
421 | |
422 | INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME, |
423 | &now); |
424 | reltime.tv_sec = abstime->tv_sec - now.tv_sec; |
425 | reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec; |
426 | if (reltime.tv_nsec < 0) |
427 | { |
428 | reltime.tv_nsec += 1000000000; |
429 | --reltime.tv_sec; |
430 | } |
431 | if (reltime.tv_sec >= 0) |
432 | while (nanosleep_not_cancel (&reltime, &reltime) != 0) |
433 | continue; |
434 | |
435 | return ETIMEDOUT; |
436 | } |
437 | |
438 | return INTERNAL_SYSCALL_ERRNO (e, __err); |
439 | } |
440 | |
441 | oldval = mutex->__data.__lock; |
442 | |
443 | assert (robust || (oldval & FUTEX_OWNER_DIED) == 0); |
444 | } |
445 | |
446 | if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED)) |
447 | { |
448 | atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED); |
449 | |
450 | /* We got the mutex. */ |
451 | mutex->__data.__count = 1; |
452 | /* But it is inconsistent unless marked otherwise. */ |
453 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
454 | |
455 | /* We must not enqueue the mutex before we have acquired it. |
456 | Also see comments at ENQUEUE_MUTEX. */ |
457 | __asm ("" ::: "memory" ); |
458 | ENQUEUE_MUTEX_PI (mutex); |
459 | /* We need to clear op_pending after we enqueue the mutex. */ |
460 | __asm ("" ::: "memory" ); |
461 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
462 | |
463 | /* Note that we deliberately exit here. If we fall |
464 | through to the end of the function __nusers would be |
465 | incremented which is not correct because the old owner |
466 | has to be discounted. */ |
467 | return EOWNERDEAD; |
468 | } |
469 | |
470 | if (robust |
471 | && __builtin_expect (mutex->__data.__owner |
472 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
473 | { |
474 | /* This mutex is now not recoverable. */ |
475 | mutex->__data.__count = 0; |
476 | |
477 | INTERNAL_SYSCALL_DECL (__err); |
478 | INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock, |
479 | __lll_private_flag (FUTEX_UNLOCK_PI, |
480 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)), |
481 | 0, 0); |
482 | |
483 | /* To the kernel, this will be visible after the kernel has |
484 | acquired the mutex in the syscall. */ |
485 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
486 | return ENOTRECOVERABLE; |
487 | } |
488 | |
489 | mutex->__data.__count = 1; |
490 | if (robust) |
491 | { |
492 | /* We must not enqueue the mutex before we have acquired it. |
493 | Also see comments at ENQUEUE_MUTEX. */ |
494 | __asm ("" ::: "memory" ); |
495 | ENQUEUE_MUTEX_PI (mutex); |
496 | /* We need to clear op_pending after we enqueue the mutex. */ |
497 | __asm ("" ::: "memory" ); |
498 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
499 | } |
500 | } |
501 | break; |
502 | #endif /* __NR_futex. */ |
503 | |
504 | case PTHREAD_MUTEX_PP_RECURSIVE_NP: |
505 | case PTHREAD_MUTEX_PP_ERRORCHECK_NP: |
506 | case PTHREAD_MUTEX_PP_NORMAL_NP: |
507 | case PTHREAD_MUTEX_PP_ADAPTIVE_NP: |
508 | { |
509 | int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; |
510 | |
511 | oldval = mutex->__data.__lock; |
512 | |
513 | /* Check whether we already hold the mutex. */ |
514 | if (mutex->__data.__owner == id) |
515 | { |
516 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
517 | return EDEADLK; |
518 | |
519 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
520 | { |
521 | /* Just bump the counter. */ |
522 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
523 | /* Overflow of the counter. */ |
524 | return EAGAIN; |
525 | |
526 | ++mutex->__data.__count; |
527 | |
528 | LIBC_PROBE (mutex_timedlock_acquired, 1, mutex); |
529 | |
530 | return 0; |
531 | } |
532 | } |
533 | |
534 | int oldprio = -1, ceilval; |
535 | do |
536 | { |
537 | int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) |
538 | >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
539 | |
540 | if (__pthread_current_priority () > ceiling) |
541 | { |
542 | result = EINVAL; |
543 | failpp: |
544 | if (oldprio != -1) |
545 | __pthread_tpp_change_priority (oldprio, -1); |
546 | return result; |
547 | } |
548 | |
549 | result = __pthread_tpp_change_priority (oldprio, ceiling); |
550 | if (result) |
551 | return result; |
552 | |
553 | ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
554 | oldprio = ceiling; |
555 | |
556 | oldval |
557 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
558 | ceilval | 1, ceilval); |
559 | |
560 | if (oldval == ceilval) |
561 | break; |
562 | |
563 | do |
564 | { |
565 | oldval |
566 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
567 | ceilval | 2, |
568 | ceilval | 1); |
569 | |
570 | if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval) |
571 | break; |
572 | |
573 | if (oldval != ceilval) |
574 | { |
575 | /* Reject invalid timeouts. */ |
576 | if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) |
577 | { |
578 | result = EINVAL; |
579 | goto failpp; |
580 | } |
581 | |
582 | struct timeval tv; |
583 | struct timespec rt; |
584 | |
585 | /* Get the current time. */ |
586 | (void) __gettimeofday (&tv, NULL); |
587 | |
588 | /* Compute relative timeout. */ |
589 | rt.tv_sec = abstime->tv_sec - tv.tv_sec; |
590 | rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000; |
591 | if (rt.tv_nsec < 0) |
592 | { |
593 | rt.tv_nsec += 1000000000; |
594 | --rt.tv_sec; |
595 | } |
596 | |
597 | /* Already timed out? */ |
598 | if (rt.tv_sec < 0) |
599 | { |
600 | result = ETIMEDOUT; |
601 | goto failpp; |
602 | } |
603 | |
604 | lll_futex_timed_wait (&mutex->__data.__lock, |
605 | ceilval | 2, &rt, |
606 | PTHREAD_MUTEX_PSHARED (mutex)); |
607 | } |
608 | } |
609 | while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
610 | ceilval | 2, ceilval) |
611 | != ceilval); |
612 | } |
613 | while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval); |
614 | |
615 | assert (mutex->__data.__owner == 0); |
616 | mutex->__data.__count = 1; |
617 | } |
618 | break; |
619 | |
620 | default: |
621 | /* Correct code cannot set any other type. */ |
622 | return EINVAL; |
623 | } |
624 | |
625 | if (result == 0) |
626 | { |
627 | /* Record the ownership. */ |
628 | mutex->__data.__owner = id; |
629 | ++mutex->__data.__nusers; |
630 | |
631 | LIBC_PROBE (mutex_timedlock_acquired, 1, mutex); |
632 | } |
633 | |
634 | out: |
635 | return result; |
636 | } |
637 | |