1 | /* Copyright (C) 2002-2018 Free Software Foundation, Inc. |
2 | This file is part of the GNU C Library. |
3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <assert.h> |
20 | #include <errno.h> |
21 | #include <stdlib.h> |
22 | #include "pthreadP.h" |
23 | #include <lowlevellock.h> |
24 | |
25 | #ifndef lll_trylock_elision |
26 | #define lll_trylock_elision(a,t) lll_trylock(a) |
27 | #endif |
28 | |
29 | #ifndef FORCE_ELISION |
30 | #define FORCE_ELISION(m, s) |
31 | #endif |
32 | |
33 | int |
34 | __pthread_mutex_trylock (pthread_mutex_t *mutex) |
35 | { |
36 | int oldval; |
37 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
38 | |
39 | switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex), |
40 | PTHREAD_MUTEX_TIMED_NP)) |
41 | { |
42 | /* Recursive mutex. */ |
43 | case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP: |
44 | case PTHREAD_MUTEX_RECURSIVE_NP: |
45 | /* Check whether we already hold the mutex. */ |
46 | if (mutex->__data.__owner == id) |
47 | { |
48 | /* Just bump the counter. */ |
49 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
50 | /* Overflow of the counter. */ |
51 | return EAGAIN; |
52 | |
53 | ++mutex->__data.__count; |
54 | return 0; |
55 | } |
56 | |
57 | if (lll_trylock (mutex->__data.__lock) == 0) |
58 | { |
59 | /* Record the ownership. */ |
60 | mutex->__data.__owner = id; |
61 | mutex->__data.__count = 1; |
62 | ++mutex->__data.__nusers; |
63 | return 0; |
64 | } |
65 | break; |
66 | |
67 | case PTHREAD_MUTEX_TIMED_ELISION_NP: |
68 | elision: __attribute__((unused)) |
69 | if (lll_trylock_elision (mutex->__data.__lock, |
70 | mutex->__data.__elision) != 0) |
71 | break; |
72 | /* Don't record the ownership. */ |
73 | return 0; |
74 | |
75 | case PTHREAD_MUTEX_TIMED_NP: |
76 | FORCE_ELISION (mutex, goto elision); |
77 | /*FALL THROUGH*/ |
78 | case PTHREAD_MUTEX_ADAPTIVE_NP: |
79 | case PTHREAD_MUTEX_ERRORCHECK_NP: |
80 | if (lll_trylock (mutex->__data.__lock) != 0) |
81 | break; |
82 | |
83 | /* Record the ownership. */ |
84 | mutex->__data.__owner = id; |
85 | ++mutex->__data.__nusers; |
86 | |
87 | return 0; |
88 | |
89 | case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP: |
90 | case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP: |
91 | case PTHREAD_MUTEX_ROBUST_NORMAL_NP: |
92 | case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP: |
93 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
94 | &mutex->__data.__list.__next); |
95 | |
96 | oldval = mutex->__data.__lock; |
97 | do |
98 | { |
99 | again: |
100 | if ((oldval & FUTEX_OWNER_DIED) != 0) |
101 | { |
102 | /* The previous owner died. Try locking the mutex. */ |
103 | int newval = id | (oldval & FUTEX_WAITERS); |
104 | |
105 | newval |
106 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
107 | newval, oldval); |
108 | |
109 | if (newval != oldval) |
110 | { |
111 | oldval = newval; |
112 | goto again; |
113 | } |
114 | |
115 | /* We got the mutex. */ |
116 | mutex->__data.__count = 1; |
117 | /* But it is inconsistent unless marked otherwise. */ |
118 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
119 | |
120 | ENQUEUE_MUTEX (mutex); |
121 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
122 | |
123 | /* Note that we deliberately exist here. If we fall |
124 | through to the end of the function __nusers would be |
125 | incremented which is not correct because the old |
126 | owner has to be discounted. */ |
127 | return EOWNERDEAD; |
128 | } |
129 | |
130 | /* Check whether we already hold the mutex. */ |
131 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
132 | { |
133 | int kind = PTHREAD_MUTEX_TYPE (mutex); |
134 | if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) |
135 | { |
136 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
137 | NULL); |
138 | return EDEADLK; |
139 | } |
140 | |
141 | if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) |
142 | { |
143 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
144 | NULL); |
145 | |
146 | /* Just bump the counter. */ |
147 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
148 | /* Overflow of the counter. */ |
149 | return EAGAIN; |
150 | |
151 | ++mutex->__data.__count; |
152 | |
153 | return 0; |
154 | } |
155 | } |
156 | |
157 | oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
158 | id, 0); |
159 | if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0) |
160 | { |
161 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
162 | |
163 | return EBUSY; |
164 | } |
165 | |
166 | if (__builtin_expect (mutex->__data.__owner |
167 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
168 | { |
169 | /* This mutex is now not recoverable. */ |
170 | mutex->__data.__count = 0; |
171 | if (oldval == id) |
172 | lll_unlock (mutex->__data.__lock, |
173 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); |
174 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
175 | return ENOTRECOVERABLE; |
176 | } |
177 | } |
178 | while ((oldval & FUTEX_OWNER_DIED) != 0); |
179 | |
180 | ENQUEUE_MUTEX (mutex); |
181 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
182 | |
183 | mutex->__data.__owner = id; |
184 | ++mutex->__data.__nusers; |
185 | mutex->__data.__count = 1; |
186 | |
187 | return 0; |
188 | |
189 | /* The PI support requires the Linux futex system call. If that's not |
190 | available, pthread_mutex_init should never have allowed the type to |
191 | be set. So it will get the default case for an invalid type. */ |
192 | #ifdef __NR_futex |
193 | case PTHREAD_MUTEX_PI_RECURSIVE_NP: |
194 | case PTHREAD_MUTEX_PI_ERRORCHECK_NP: |
195 | case PTHREAD_MUTEX_PI_NORMAL_NP: |
196 | case PTHREAD_MUTEX_PI_ADAPTIVE_NP: |
197 | case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP: |
198 | case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP: |
199 | case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: |
200 | case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: |
201 | { |
202 | int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; |
203 | int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; |
204 | |
205 | if (robust) |
206 | /* Note: robust PI futexes are signaled by setting bit 0. */ |
207 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
208 | (void *) (((uintptr_t) &mutex->__data.__list.__next) |
209 | | 1)); |
210 | |
211 | oldval = mutex->__data.__lock; |
212 | |
213 | /* Check whether we already hold the mutex. */ |
214 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
215 | { |
216 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
217 | { |
218 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
219 | return EDEADLK; |
220 | } |
221 | |
222 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
223 | { |
224 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
225 | |
226 | /* Just bump the counter. */ |
227 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
228 | /* Overflow of the counter. */ |
229 | return EAGAIN; |
230 | |
231 | ++mutex->__data.__count; |
232 | |
233 | return 0; |
234 | } |
235 | } |
236 | |
237 | oldval |
238 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
239 | id, 0); |
240 | |
241 | if (oldval != 0) |
242 | { |
243 | if ((oldval & FUTEX_OWNER_DIED) == 0) |
244 | { |
245 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
246 | |
247 | return EBUSY; |
248 | } |
249 | |
250 | assert (robust); |
251 | |
252 | /* The mutex owner died. The kernel will now take care of |
253 | everything. */ |
254 | int private = (robust |
255 | ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex) |
256 | : PTHREAD_MUTEX_PSHARED (mutex)); |
257 | INTERNAL_SYSCALL_DECL (__err); |
258 | int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock, |
259 | __lll_private_flag (FUTEX_TRYLOCK_PI, |
260 | private), 0, 0); |
261 | |
262 | if (INTERNAL_SYSCALL_ERROR_P (e, __err) |
263 | && INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK) |
264 | { |
265 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
266 | |
267 | return EBUSY; |
268 | } |
269 | |
270 | oldval = mutex->__data.__lock; |
271 | } |
272 | |
273 | if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED)) |
274 | { |
275 | atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED); |
276 | |
277 | /* We got the mutex. */ |
278 | mutex->__data.__count = 1; |
279 | /* But it is inconsistent unless marked otherwise. */ |
280 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
281 | |
282 | ENQUEUE_MUTEX (mutex); |
283 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
284 | |
285 | /* Note that we deliberately exit here. If we fall |
286 | through to the end of the function __nusers would be |
287 | incremented which is not correct because the old owner |
288 | has to be discounted. */ |
289 | return EOWNERDEAD; |
290 | } |
291 | |
292 | if (robust |
293 | && __builtin_expect (mutex->__data.__owner |
294 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
295 | { |
296 | /* This mutex is now not recoverable. */ |
297 | mutex->__data.__count = 0; |
298 | |
299 | INTERNAL_SYSCALL_DECL (__err); |
300 | INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock, |
301 | __lll_private_flag (FUTEX_UNLOCK_PI, |
302 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)), |
303 | 0, 0); |
304 | |
305 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
306 | return ENOTRECOVERABLE; |
307 | } |
308 | |
309 | if (robust) |
310 | { |
311 | ENQUEUE_MUTEX_PI (mutex); |
312 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
313 | } |
314 | |
315 | mutex->__data.__owner = id; |
316 | ++mutex->__data.__nusers; |
317 | mutex->__data.__count = 1; |
318 | |
319 | return 0; |
320 | } |
321 | #endif /* __NR_futex. */ |
322 | |
323 | case PTHREAD_MUTEX_PP_RECURSIVE_NP: |
324 | case PTHREAD_MUTEX_PP_ERRORCHECK_NP: |
325 | case PTHREAD_MUTEX_PP_NORMAL_NP: |
326 | case PTHREAD_MUTEX_PP_ADAPTIVE_NP: |
327 | { |
328 | int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; |
329 | |
330 | oldval = mutex->__data.__lock; |
331 | |
332 | /* Check whether we already hold the mutex. */ |
333 | if (mutex->__data.__owner == id) |
334 | { |
335 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
336 | return EDEADLK; |
337 | |
338 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
339 | { |
340 | /* Just bump the counter. */ |
341 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
342 | /* Overflow of the counter. */ |
343 | return EAGAIN; |
344 | |
345 | ++mutex->__data.__count; |
346 | |
347 | return 0; |
348 | } |
349 | } |
350 | |
351 | int oldprio = -1, ceilval; |
352 | do |
353 | { |
354 | int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) |
355 | >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
356 | |
357 | if (__pthread_current_priority () > ceiling) |
358 | { |
359 | if (oldprio != -1) |
360 | __pthread_tpp_change_priority (oldprio, -1); |
361 | return EINVAL; |
362 | } |
363 | |
364 | int retval = __pthread_tpp_change_priority (oldprio, ceiling); |
365 | if (retval) |
366 | return retval; |
367 | |
368 | ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
369 | oldprio = ceiling; |
370 | |
371 | oldval |
372 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
373 | ceilval | 1, ceilval); |
374 | |
375 | if (oldval == ceilval) |
376 | break; |
377 | } |
378 | while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval); |
379 | |
380 | if (oldval != ceilval) |
381 | { |
382 | __pthread_tpp_change_priority (oldprio, -1); |
383 | break; |
384 | } |
385 | |
386 | assert (mutex->__data.__owner == 0); |
387 | /* Record the ownership. */ |
388 | mutex->__data.__owner = id; |
389 | ++mutex->__data.__nusers; |
390 | mutex->__data.__count = 1; |
391 | |
392 | return 0; |
393 | } |
394 | break; |
395 | |
396 | default: |
397 | /* Correct code cannot set any other type. */ |
398 | return EINVAL; |
399 | } |
400 | |
401 | return EBUSY; |
402 | } |
403 | |
404 | #ifndef __pthread_mutex_trylock |
405 | #ifndef pthread_mutex_trylock |
406 | weak_alias (__pthread_mutex_trylock, pthread_mutex_trylock) |
407 | hidden_def (__pthread_mutex_trylock) |
408 | #endif |
409 | #endif |
410 | |