1 | /* Copyright (C) 2002-2023 Free Software Foundation, Inc. |
2 | This file is part of the GNU C Library. |
3 | |
4 | The GNU C Library is free software; you can redistribute it and/or |
5 | modify it under the terms of the GNU Lesser General Public |
6 | License as published by the Free Software Foundation; either |
7 | version 2.1 of the License, or (at your option) any later version. |
8 | |
9 | The GNU C Library is distributed in the hope that it will be useful, |
10 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
12 | Lesser General Public License for more details. |
13 | |
14 | You should have received a copy of the GNU Lesser General Public |
15 | License along with the GNU C Library; if not, see |
16 | <https://www.gnu.org/licenses/>. */ |
17 | |
18 | #include <assert.h> |
19 | #include <errno.h> |
20 | #include <stdlib.h> |
21 | #include "pthreadP.h" |
22 | #include <lowlevellock.h> |
23 | #include <futex-internal.h> |
24 | |
25 | int |
26 | ___pthread_mutex_trylock (pthread_mutex_t *mutex) |
27 | { |
28 | int oldval; |
29 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
30 | |
31 | /* See concurrency notes regarding mutex type which is loaded from __kind |
32 | in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */ |
33 | switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex), |
34 | PTHREAD_MUTEX_TIMED_NP)) |
35 | { |
36 | /* Recursive mutex. */ |
37 | case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP: |
38 | case PTHREAD_MUTEX_RECURSIVE_NP: |
39 | /* Check whether we already hold the mutex. */ |
40 | if (mutex->__data.__owner == id) |
41 | { |
42 | /* Just bump the counter. */ |
43 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
44 | /* Overflow of the counter. */ |
45 | return EAGAIN; |
46 | |
47 | ++mutex->__data.__count; |
48 | return 0; |
49 | } |
50 | |
51 | if (lll_trylock (mutex->__data.__lock) == 0) |
52 | { |
53 | /* Record the ownership. */ |
54 | mutex->__data.__owner = id; |
55 | mutex->__data.__count = 1; |
56 | ++mutex->__data.__nusers; |
57 | return 0; |
58 | } |
59 | break; |
60 | |
61 | case PTHREAD_MUTEX_TIMED_ELISION_NP: |
62 | elision: __attribute__((unused)) |
63 | if (lll_trylock_elision (mutex->__data.__lock, |
64 | mutex->__data.__elision) != 0) |
65 | break; |
66 | /* Don't record the ownership. */ |
67 | return 0; |
68 | |
69 | case PTHREAD_MUTEX_TIMED_NP: |
70 | FORCE_ELISION (mutex, goto elision); |
71 | /*FALL THROUGH*/ |
72 | case PTHREAD_MUTEX_ADAPTIVE_NP: |
73 | case PTHREAD_MUTEX_ERRORCHECK_NP: |
74 | if (lll_trylock (mutex->__data.__lock) != 0) |
75 | break; |
76 | |
77 | /* Record the ownership. */ |
78 | mutex->__data.__owner = id; |
79 | ++mutex->__data.__nusers; |
80 | |
81 | return 0; |
82 | |
83 | case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP: |
84 | case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP: |
85 | case PTHREAD_MUTEX_ROBUST_NORMAL_NP: |
86 | case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP: |
87 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
88 | &mutex->__data.__list.__next); |
89 | /* We need to set op_pending before starting the operation. Also |
90 | see comments at ENQUEUE_MUTEX. */ |
91 | __asm ("" ::: "memory" ); |
92 | |
93 | oldval = mutex->__data.__lock; |
94 | do |
95 | { |
96 | again: |
97 | if ((oldval & FUTEX_OWNER_DIED) != 0) |
98 | { |
99 | /* The previous owner died. Try locking the mutex. */ |
100 | int newval = id | (oldval & FUTEX_WAITERS); |
101 | |
102 | newval |
103 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
104 | newval, oldval); |
105 | |
106 | if (newval != oldval) |
107 | { |
108 | oldval = newval; |
109 | goto again; |
110 | } |
111 | |
112 | /* We got the mutex. */ |
113 | mutex->__data.__count = 1; |
114 | /* But it is inconsistent unless marked otherwise. */ |
115 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
116 | |
117 | /* We must not enqueue the mutex before we have acquired it. |
118 | Also see comments at ENQUEUE_MUTEX. */ |
119 | __asm ("" ::: "memory" ); |
120 | ENQUEUE_MUTEX (mutex); |
121 | /* We need to clear op_pending after we enqueue the mutex. */ |
122 | __asm ("" ::: "memory" ); |
123 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
124 | |
125 | /* Note that we deliberately exit here. If we fall |
126 | through to the end of the function __nusers would be |
127 | incremented which is not correct because the old |
128 | owner has to be discounted. */ |
129 | return EOWNERDEAD; |
130 | } |
131 | |
132 | /* Check whether we already hold the mutex. */ |
133 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
134 | { |
135 | int kind = PTHREAD_MUTEX_TYPE (mutex); |
136 | if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) |
137 | { |
138 | /* We do not need to ensure ordering wrt another memory |
139 | access. Also see comments at ENQUEUE_MUTEX. */ |
140 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
141 | NULL); |
142 | return EDEADLK; |
143 | } |
144 | |
145 | if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) |
146 | { |
147 | /* We do not need to ensure ordering wrt another memory |
148 | access. */ |
149 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
150 | NULL); |
151 | |
152 | /* Just bump the counter. */ |
153 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
154 | /* Overflow of the counter. */ |
155 | return EAGAIN; |
156 | |
157 | ++mutex->__data.__count; |
158 | |
159 | return 0; |
160 | } |
161 | } |
162 | |
163 | oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
164 | id, 0); |
165 | if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0) |
166 | { |
167 | /* We haven't acquired the lock as it is already acquired by |
168 | another owner. We do not need to ensure ordering wrt another |
169 | memory access. */ |
170 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
171 | |
172 | return EBUSY; |
173 | } |
174 | |
175 | if (__builtin_expect (mutex->__data.__owner |
176 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
177 | { |
178 | /* This mutex is now not recoverable. */ |
179 | mutex->__data.__count = 0; |
180 | if (oldval == id) |
181 | lll_unlock (mutex->__data.__lock, |
182 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); |
183 | /* FIXME This violates the mutex destruction requirements. See |
184 | __pthread_mutex_unlock_full. */ |
185 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
186 | return ENOTRECOVERABLE; |
187 | } |
188 | } |
189 | while ((oldval & FUTEX_OWNER_DIED) != 0); |
190 | |
191 | /* We must not enqueue the mutex before we have acquired it. |
192 | Also see comments at ENQUEUE_MUTEX. */ |
193 | __asm ("" ::: "memory" ); |
194 | ENQUEUE_MUTEX (mutex); |
195 | /* We need to clear op_pending after we enqueue the mutex. */ |
196 | __asm ("" ::: "memory" ); |
197 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
198 | |
199 | mutex->__data.__owner = id; |
200 | ++mutex->__data.__nusers; |
201 | mutex->__data.__count = 1; |
202 | |
203 | return 0; |
204 | |
205 | /* The PI support requires the Linux futex system call. If that's not |
206 | available, pthread_mutex_init should never have allowed the type to |
207 | be set. So it will get the default case for an invalid type. */ |
208 | #ifdef __NR_futex |
209 | case PTHREAD_MUTEX_PI_RECURSIVE_NP: |
210 | case PTHREAD_MUTEX_PI_ERRORCHECK_NP: |
211 | case PTHREAD_MUTEX_PI_NORMAL_NP: |
212 | case PTHREAD_MUTEX_PI_ADAPTIVE_NP: |
213 | case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP: |
214 | case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP: |
215 | case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: |
216 | case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: |
217 | { |
218 | int kind, robust; |
219 | { |
220 | /* See concurrency notes regarding __kind in struct __pthread_mutex_s |
221 | in sysdeps/nptl/bits/thread-shared-types.h. */ |
222 | int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); |
223 | kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP; |
224 | robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; |
225 | } |
226 | |
227 | if (robust) |
228 | { |
229 | /* Note: robust PI futexes are signaled by setting bit 0. */ |
230 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
231 | (void *) (((uintptr_t) &mutex->__data.__list.__next) |
232 | | 1)); |
233 | /* We need to set op_pending before starting the operation. Also |
234 | see comments at ENQUEUE_MUTEX. */ |
235 | __asm ("" ::: "memory" ); |
236 | } |
237 | |
238 | oldval = mutex->__data.__lock; |
239 | |
240 | /* Check whether we already hold the mutex. */ |
241 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
242 | { |
243 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
244 | { |
245 | /* We do not need to ensure ordering wrt another memory |
246 | access. */ |
247 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
248 | return EDEADLK; |
249 | } |
250 | |
251 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
252 | { |
253 | /* We do not need to ensure ordering wrt another memory |
254 | access. */ |
255 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
256 | |
257 | /* Just bump the counter. */ |
258 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
259 | /* Overflow of the counter. */ |
260 | return EAGAIN; |
261 | |
262 | ++mutex->__data.__count; |
263 | |
264 | return 0; |
265 | } |
266 | } |
267 | |
268 | oldval |
269 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
270 | id, 0); |
271 | |
272 | if (oldval != 0) |
273 | { |
274 | if ((oldval & FUTEX_OWNER_DIED) == 0) |
275 | { |
276 | /* We haven't acquired the lock as it is already acquired by |
277 | another owner. We do not need to ensure ordering wrt another |
278 | memory access. */ |
279 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
280 | |
281 | return EBUSY; |
282 | } |
283 | |
284 | assert (robust); |
285 | |
286 | /* The mutex owner died. The kernel will now take care of |
287 | everything. */ |
288 | int private = (robust |
289 | ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex) |
290 | : PTHREAD_MUTEX_PSHARED (mutex)); |
291 | int e = INTERNAL_SYSCALL_CALL (futex, &mutex->__data.__lock, |
292 | __lll_private_flag (FUTEX_TRYLOCK_PI, |
293 | private), 0, 0); |
294 | |
295 | if (INTERNAL_SYSCALL_ERROR_P (e) |
296 | && INTERNAL_SYSCALL_ERRNO (e) == EWOULDBLOCK) |
297 | { |
298 | /* The kernel has not yet finished the mutex owner death. |
299 | We do not need to ensure ordering wrt another memory |
300 | access. */ |
301 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
302 | |
303 | return EBUSY; |
304 | } |
305 | |
306 | oldval = mutex->__data.__lock; |
307 | } |
308 | |
309 | if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED)) |
310 | { |
311 | atomic_fetch_and_acquire (&mutex->__data.__lock, ~FUTEX_OWNER_DIED); |
312 | |
313 | /* We got the mutex. */ |
314 | mutex->__data.__count = 1; |
315 | /* But it is inconsistent unless marked otherwise. */ |
316 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
317 | |
318 | /* We must not enqueue the mutex before we have acquired it. |
319 | Also see comments at ENQUEUE_MUTEX. */ |
320 | __asm ("" ::: "memory" ); |
321 | ENQUEUE_MUTEX (mutex); |
322 | /* We need to clear op_pending after we enqueue the mutex. */ |
323 | __asm ("" ::: "memory" ); |
324 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
325 | |
326 | /* Note that we deliberately exit here. If we fall |
327 | through to the end of the function __nusers would be |
328 | incremented which is not correct because the old owner |
329 | has to be discounted. */ |
330 | return EOWNERDEAD; |
331 | } |
332 | |
333 | if (robust |
334 | && __builtin_expect (mutex->__data.__owner |
335 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
336 | { |
337 | /* This mutex is now not recoverable. */ |
338 | mutex->__data.__count = 0; |
339 | |
340 | futex_unlock_pi ((unsigned int *) &mutex->__data.__lock, |
341 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); |
342 | |
343 | /* To the kernel, this will be visible after the kernel has |
344 | acquired the mutex in the syscall. */ |
345 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
346 | return ENOTRECOVERABLE; |
347 | } |
348 | |
349 | if (robust) |
350 | { |
351 | /* We must not enqueue the mutex before we have acquired it. |
352 | Also see comments at ENQUEUE_MUTEX. */ |
353 | __asm ("" ::: "memory" ); |
354 | ENQUEUE_MUTEX_PI (mutex); |
355 | /* We need to clear op_pending after we enqueue the mutex. */ |
356 | __asm ("" ::: "memory" ); |
357 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
358 | } |
359 | |
360 | mutex->__data.__owner = id; |
361 | ++mutex->__data.__nusers; |
362 | mutex->__data.__count = 1; |
363 | |
364 | return 0; |
365 | } |
366 | #endif /* __NR_futex. */ |
367 | |
368 | case PTHREAD_MUTEX_PP_RECURSIVE_NP: |
369 | case PTHREAD_MUTEX_PP_ERRORCHECK_NP: |
370 | case PTHREAD_MUTEX_PP_NORMAL_NP: |
371 | case PTHREAD_MUTEX_PP_ADAPTIVE_NP: |
372 | { |
373 | /* See concurrency notes regarding __kind in struct __pthread_mutex_s |
374 | in sysdeps/nptl/bits/thread-shared-types.h. */ |
375 | int kind = atomic_load_relaxed (&(mutex->__data.__kind)) |
376 | & PTHREAD_MUTEX_KIND_MASK_NP; |
377 | |
378 | oldval = mutex->__data.__lock; |
379 | |
380 | /* Check whether we already hold the mutex. */ |
381 | if (mutex->__data.__owner == id) |
382 | { |
383 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
384 | return EDEADLK; |
385 | |
386 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
387 | { |
388 | /* Just bump the counter. */ |
389 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
390 | /* Overflow of the counter. */ |
391 | return EAGAIN; |
392 | |
393 | ++mutex->__data.__count; |
394 | |
395 | return 0; |
396 | } |
397 | } |
398 | |
399 | int oldprio = -1, ceilval; |
400 | do |
401 | { |
402 | int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) |
403 | >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
404 | |
405 | if (__pthread_current_priority () > ceiling) |
406 | { |
407 | if (oldprio != -1) |
408 | __pthread_tpp_change_priority (oldprio, -1); |
409 | return EINVAL; |
410 | } |
411 | |
412 | int retval = __pthread_tpp_change_priority (oldprio, ceiling); |
413 | if (retval) |
414 | return retval; |
415 | |
416 | ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
417 | oldprio = ceiling; |
418 | |
419 | oldval |
420 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
421 | ceilval | 1, ceilval); |
422 | |
423 | if (oldval == ceilval) |
424 | break; |
425 | } |
426 | while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval); |
427 | |
428 | if (oldval != ceilval) |
429 | { |
430 | __pthread_tpp_change_priority (oldprio, -1); |
431 | break; |
432 | } |
433 | |
434 | assert (mutex->__data.__owner == 0); |
435 | /* Record the ownership. */ |
436 | mutex->__data.__owner = id; |
437 | ++mutex->__data.__nusers; |
438 | mutex->__data.__count = 1; |
439 | |
440 | return 0; |
441 | } |
442 | break; |
443 | |
444 | default: |
445 | /* Correct code cannot set any other type. */ |
446 | return EINVAL; |
447 | } |
448 | |
449 | return EBUSY; |
450 | } |
451 | versioned_symbol (libc, ___pthread_mutex_trylock, |
452 | pthread_mutex_trylock, GLIBC_2_34); |
453 | libc_hidden_ver (___pthread_mutex_trylock, __pthread_mutex_trylock) |
454 | #ifndef SHARED |
455 | strong_alias (___pthread_mutex_trylock, __pthread_mutex_trylock) |
456 | #endif |
457 | |
458 | #if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_34) |
459 | compat_symbol (libpthread, ___pthread_mutex_trylock, |
460 | pthread_mutex_trylock, GLIBC_2_0); |
461 | compat_symbol (libpthread, ___pthread_mutex_trylock, |
462 | __pthread_mutex_trylock, GLIBC_2_0); |
463 | #endif |
464 | |