1 | /* Copyright (C) 2002-2021 Free Software Foundation, Inc. |
2 | This file is part of the GNU C Library. |
3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <https://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <assert.h> |
20 | #include <errno.h> |
21 | #include <stdlib.h> |
22 | #include "pthreadP.h" |
23 | #include <lowlevellock.h> |
24 | #include <futex-internal.h> |
25 | |
26 | int |
27 | ___pthread_mutex_trylock (pthread_mutex_t *mutex) |
28 | { |
29 | int oldval; |
30 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
31 | |
32 | /* See concurrency notes regarding mutex type which is loaded from __kind |
33 | in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */ |
34 | switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex), |
35 | PTHREAD_MUTEX_TIMED_NP)) |
36 | { |
37 | /* Recursive mutex. */ |
38 | case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP: |
39 | case PTHREAD_MUTEX_RECURSIVE_NP: |
40 | /* Check whether we already hold the mutex. */ |
41 | if (mutex->__data.__owner == id) |
42 | { |
43 | /* Just bump the counter. */ |
44 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
45 | /* Overflow of the counter. */ |
46 | return EAGAIN; |
47 | |
48 | ++mutex->__data.__count; |
49 | return 0; |
50 | } |
51 | |
52 | if (lll_trylock (mutex->__data.__lock) == 0) |
53 | { |
54 | /* Record the ownership. */ |
55 | mutex->__data.__owner = id; |
56 | mutex->__data.__count = 1; |
57 | ++mutex->__data.__nusers; |
58 | return 0; |
59 | } |
60 | break; |
61 | |
62 | case PTHREAD_MUTEX_TIMED_ELISION_NP: |
63 | elision: __attribute__((unused)) |
64 | if (lll_trylock_elision (mutex->__data.__lock, |
65 | mutex->__data.__elision) != 0) |
66 | break; |
67 | /* Don't record the ownership. */ |
68 | return 0; |
69 | |
70 | case PTHREAD_MUTEX_TIMED_NP: |
71 | FORCE_ELISION (mutex, goto elision); |
72 | /*FALL THROUGH*/ |
73 | case PTHREAD_MUTEX_ADAPTIVE_NP: |
74 | case PTHREAD_MUTEX_ERRORCHECK_NP: |
75 | if (lll_trylock (mutex->__data.__lock) != 0) |
76 | break; |
77 | |
78 | /* Record the ownership. */ |
79 | mutex->__data.__owner = id; |
80 | ++mutex->__data.__nusers; |
81 | |
82 | return 0; |
83 | |
84 | case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP: |
85 | case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP: |
86 | case PTHREAD_MUTEX_ROBUST_NORMAL_NP: |
87 | case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP: |
88 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
89 | &mutex->__data.__list.__next); |
90 | /* We need to set op_pending before starting the operation. Also |
91 | see comments at ENQUEUE_MUTEX. */ |
92 | __asm ("" ::: "memory" ); |
93 | |
94 | oldval = mutex->__data.__lock; |
95 | do |
96 | { |
97 | again: |
98 | if ((oldval & FUTEX_OWNER_DIED) != 0) |
99 | { |
100 | /* The previous owner died. Try locking the mutex. */ |
101 | int newval = id | (oldval & FUTEX_WAITERS); |
102 | |
103 | newval |
104 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
105 | newval, oldval); |
106 | |
107 | if (newval != oldval) |
108 | { |
109 | oldval = newval; |
110 | goto again; |
111 | } |
112 | |
113 | /* We got the mutex. */ |
114 | mutex->__data.__count = 1; |
115 | /* But it is inconsistent unless marked otherwise. */ |
116 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
117 | |
118 | /* We must not enqueue the mutex before we have acquired it. |
119 | Also see comments at ENQUEUE_MUTEX. */ |
120 | __asm ("" ::: "memory" ); |
121 | ENQUEUE_MUTEX (mutex); |
122 | /* We need to clear op_pending after we enqueue the mutex. */ |
123 | __asm ("" ::: "memory" ); |
124 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
125 | |
126 | /* Note that we deliberately exit here. If we fall |
127 | through to the end of the function __nusers would be |
128 | incremented which is not correct because the old |
129 | owner has to be discounted. */ |
130 | return EOWNERDEAD; |
131 | } |
132 | |
133 | /* Check whether we already hold the mutex. */ |
134 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
135 | { |
136 | int kind = PTHREAD_MUTEX_TYPE (mutex); |
137 | if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) |
138 | { |
139 | /* We do not need to ensure ordering wrt another memory |
140 | access. Also see comments at ENQUEUE_MUTEX. */ |
141 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
142 | NULL); |
143 | return EDEADLK; |
144 | } |
145 | |
146 | if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) |
147 | { |
148 | /* We do not need to ensure ordering wrt another memory |
149 | access. */ |
150 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
151 | NULL); |
152 | |
153 | /* Just bump the counter. */ |
154 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
155 | /* Overflow of the counter. */ |
156 | return EAGAIN; |
157 | |
158 | ++mutex->__data.__count; |
159 | |
160 | return 0; |
161 | } |
162 | } |
163 | |
164 | oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
165 | id, 0); |
166 | if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0) |
167 | { |
168 | /* We haven't acquired the lock as it is already acquired by |
169 | another owner. We do not need to ensure ordering wrt another |
170 | memory access. */ |
171 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
172 | |
173 | return EBUSY; |
174 | } |
175 | |
176 | if (__builtin_expect (mutex->__data.__owner |
177 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
178 | { |
179 | /* This mutex is now not recoverable. */ |
180 | mutex->__data.__count = 0; |
181 | if (oldval == id) |
182 | lll_unlock (mutex->__data.__lock, |
183 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); |
184 | /* FIXME This violates the mutex destruction requirements. See |
185 | __pthread_mutex_unlock_full. */ |
186 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
187 | return ENOTRECOVERABLE; |
188 | } |
189 | } |
190 | while ((oldval & FUTEX_OWNER_DIED) != 0); |
191 | |
192 | /* We must not enqueue the mutex before we have acquired it. |
193 | Also see comments at ENQUEUE_MUTEX. */ |
194 | __asm ("" ::: "memory" ); |
195 | ENQUEUE_MUTEX (mutex); |
196 | /* We need to clear op_pending after we enqueue the mutex. */ |
197 | __asm ("" ::: "memory" ); |
198 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
199 | |
200 | mutex->__data.__owner = id; |
201 | ++mutex->__data.__nusers; |
202 | mutex->__data.__count = 1; |
203 | |
204 | return 0; |
205 | |
206 | /* The PI support requires the Linux futex system call. If that's not |
207 | available, pthread_mutex_init should never have allowed the type to |
208 | be set. So it will get the default case for an invalid type. */ |
209 | #ifdef __NR_futex |
210 | case PTHREAD_MUTEX_PI_RECURSIVE_NP: |
211 | case PTHREAD_MUTEX_PI_ERRORCHECK_NP: |
212 | case PTHREAD_MUTEX_PI_NORMAL_NP: |
213 | case PTHREAD_MUTEX_PI_ADAPTIVE_NP: |
214 | case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP: |
215 | case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP: |
216 | case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: |
217 | case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: |
218 | { |
219 | int kind, robust; |
220 | { |
221 | /* See concurrency notes regarding __kind in struct __pthread_mutex_s |
222 | in sysdeps/nptl/bits/thread-shared-types.h. */ |
223 | int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); |
224 | kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP; |
225 | robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; |
226 | } |
227 | |
228 | if (robust) |
229 | { |
230 | /* Note: robust PI futexes are signaled by setting bit 0. */ |
231 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
232 | (void *) (((uintptr_t) &mutex->__data.__list.__next) |
233 | | 1)); |
234 | /* We need to set op_pending before starting the operation. Also |
235 | see comments at ENQUEUE_MUTEX. */ |
236 | __asm ("" ::: "memory" ); |
237 | } |
238 | |
239 | oldval = mutex->__data.__lock; |
240 | |
241 | /* Check whether we already hold the mutex. */ |
242 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
243 | { |
244 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
245 | { |
246 | /* We do not need to ensure ordering wrt another memory |
247 | access. */ |
248 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
249 | return EDEADLK; |
250 | } |
251 | |
252 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
253 | { |
254 | /* We do not need to ensure ordering wrt another memory |
255 | access. */ |
256 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
257 | |
258 | /* Just bump the counter. */ |
259 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
260 | /* Overflow of the counter. */ |
261 | return EAGAIN; |
262 | |
263 | ++mutex->__data.__count; |
264 | |
265 | return 0; |
266 | } |
267 | } |
268 | |
269 | oldval |
270 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
271 | id, 0); |
272 | |
273 | if (oldval != 0) |
274 | { |
275 | if ((oldval & FUTEX_OWNER_DIED) == 0) |
276 | { |
277 | /* We haven't acquired the lock as it is already acquired by |
278 | another owner. We do not need to ensure ordering wrt another |
279 | memory access. */ |
280 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
281 | |
282 | return EBUSY; |
283 | } |
284 | |
285 | assert (robust); |
286 | |
287 | /* The mutex owner died. The kernel will now take care of |
288 | everything. */ |
289 | int private = (robust |
290 | ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex) |
291 | : PTHREAD_MUTEX_PSHARED (mutex)); |
292 | int e = INTERNAL_SYSCALL_CALL (futex, &mutex->__data.__lock, |
293 | __lll_private_flag (FUTEX_TRYLOCK_PI, |
294 | private), 0, 0); |
295 | |
296 | if (INTERNAL_SYSCALL_ERROR_P (e) |
297 | && INTERNAL_SYSCALL_ERRNO (e) == EWOULDBLOCK) |
298 | { |
299 | /* The kernel has not yet finished the mutex owner death. |
300 | We do not need to ensure ordering wrt another memory |
301 | access. */ |
302 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
303 | |
304 | return EBUSY; |
305 | } |
306 | |
307 | oldval = mutex->__data.__lock; |
308 | } |
309 | |
310 | if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED)) |
311 | { |
312 | atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED); |
313 | |
314 | /* We got the mutex. */ |
315 | mutex->__data.__count = 1; |
316 | /* But it is inconsistent unless marked otherwise. */ |
317 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
318 | |
319 | /* We must not enqueue the mutex before we have acquired it. |
320 | Also see comments at ENQUEUE_MUTEX. */ |
321 | __asm ("" ::: "memory" ); |
322 | ENQUEUE_MUTEX (mutex); |
323 | /* We need to clear op_pending after we enqueue the mutex. */ |
324 | __asm ("" ::: "memory" ); |
325 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
326 | |
327 | /* Note that we deliberately exit here. If we fall |
328 | through to the end of the function __nusers would be |
329 | incremented which is not correct because the old owner |
330 | has to be discounted. */ |
331 | return EOWNERDEAD; |
332 | } |
333 | |
334 | if (robust |
335 | && __builtin_expect (mutex->__data.__owner |
336 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
337 | { |
338 | /* This mutex is now not recoverable. */ |
339 | mutex->__data.__count = 0; |
340 | |
341 | futex_unlock_pi ((unsigned int *) &mutex->__data.__lock, |
342 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); |
343 | |
344 | /* To the kernel, this will be visible after the kernel has |
345 | acquired the mutex in the syscall. */ |
346 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
347 | return ENOTRECOVERABLE; |
348 | } |
349 | |
350 | if (robust) |
351 | { |
352 | /* We must not enqueue the mutex before we have acquired it. |
353 | Also see comments at ENQUEUE_MUTEX. */ |
354 | __asm ("" ::: "memory" ); |
355 | ENQUEUE_MUTEX_PI (mutex); |
356 | /* We need to clear op_pending after we enqueue the mutex. */ |
357 | __asm ("" ::: "memory" ); |
358 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
359 | } |
360 | |
361 | mutex->__data.__owner = id; |
362 | ++mutex->__data.__nusers; |
363 | mutex->__data.__count = 1; |
364 | |
365 | return 0; |
366 | } |
367 | #endif /* __NR_futex. */ |
368 | |
369 | case PTHREAD_MUTEX_PP_RECURSIVE_NP: |
370 | case PTHREAD_MUTEX_PP_ERRORCHECK_NP: |
371 | case PTHREAD_MUTEX_PP_NORMAL_NP: |
372 | case PTHREAD_MUTEX_PP_ADAPTIVE_NP: |
373 | { |
374 | /* See concurrency notes regarding __kind in struct __pthread_mutex_s |
375 | in sysdeps/nptl/bits/thread-shared-types.h. */ |
376 | int kind = atomic_load_relaxed (&(mutex->__data.__kind)) |
377 | & PTHREAD_MUTEX_KIND_MASK_NP; |
378 | |
379 | oldval = mutex->__data.__lock; |
380 | |
381 | /* Check whether we already hold the mutex. */ |
382 | if (mutex->__data.__owner == id) |
383 | { |
384 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
385 | return EDEADLK; |
386 | |
387 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
388 | { |
389 | /* Just bump the counter. */ |
390 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
391 | /* Overflow of the counter. */ |
392 | return EAGAIN; |
393 | |
394 | ++mutex->__data.__count; |
395 | |
396 | return 0; |
397 | } |
398 | } |
399 | |
400 | int oldprio = -1, ceilval; |
401 | do |
402 | { |
403 | int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) |
404 | >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
405 | |
406 | if (__pthread_current_priority () > ceiling) |
407 | { |
408 | if (oldprio != -1) |
409 | __pthread_tpp_change_priority (oldprio, -1); |
410 | return EINVAL; |
411 | } |
412 | |
413 | int retval = __pthread_tpp_change_priority (oldprio, ceiling); |
414 | if (retval) |
415 | return retval; |
416 | |
417 | ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
418 | oldprio = ceiling; |
419 | |
420 | oldval |
421 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
422 | ceilval | 1, ceilval); |
423 | |
424 | if (oldval == ceilval) |
425 | break; |
426 | } |
427 | while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval); |
428 | |
429 | if (oldval != ceilval) |
430 | { |
431 | __pthread_tpp_change_priority (oldprio, -1); |
432 | break; |
433 | } |
434 | |
435 | assert (mutex->__data.__owner == 0); |
436 | /* Record the ownership. */ |
437 | mutex->__data.__owner = id; |
438 | ++mutex->__data.__nusers; |
439 | mutex->__data.__count = 1; |
440 | |
441 | return 0; |
442 | } |
443 | break; |
444 | |
445 | default: |
446 | /* Correct code cannot set any other type. */ |
447 | return EINVAL; |
448 | } |
449 | |
450 | return EBUSY; |
451 | } |
452 | versioned_symbol (libc, ___pthread_mutex_trylock, |
453 | pthread_mutex_trylock, GLIBC_2_34); |
454 | libc_hidden_ver (___pthread_mutex_trylock, __pthread_mutex_trylock) |
455 | #ifndef SHARED |
456 | strong_alias (___pthread_mutex_trylock, __pthread_mutex_trylock) |
457 | #endif |
458 | |
459 | #if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_34) |
460 | compat_symbol (libpthread, ___pthread_mutex_trylock, |
461 | pthread_mutex_trylock, GLIBC_2_0); |
462 | compat_symbol (libpthread, ___pthread_mutex_trylock, |
463 | __pthread_mutex_trylock, GLIBC_2_0); |
464 | #endif |
465 | |