1/* Copyright (C) 2002-2019 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19#include <assert.h>
20#include <errno.h>
21#include <stdlib.h>
22#include "pthreadP.h"
23#include <lowlevellock.h>
24
25#ifndef lll_trylock_elision
26#define lll_trylock_elision(a,t) lll_trylock(a)
27#endif
28
29#ifndef FORCE_ELISION
30#define FORCE_ELISION(m, s)
31#endif
32
33int
34__pthread_mutex_trylock (pthread_mutex_t *mutex)
35{
36 int oldval;
37 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
38
39 /* See concurrency notes regarding mutex type which is loaded from __kind
40 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
41 switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex),
42 PTHREAD_MUTEX_TIMED_NP))
43 {
44 /* Recursive mutex. */
45 case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP:
46 case PTHREAD_MUTEX_RECURSIVE_NP:
47 /* Check whether we already hold the mutex. */
48 if (mutex->__data.__owner == id)
49 {
50 /* Just bump the counter. */
51 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
52 /* Overflow of the counter. */
53 return EAGAIN;
54
55 ++mutex->__data.__count;
56 return 0;
57 }
58
59 if (lll_trylock (mutex->__data.__lock) == 0)
60 {
61 /* Record the ownership. */
62 mutex->__data.__owner = id;
63 mutex->__data.__count = 1;
64 ++mutex->__data.__nusers;
65 return 0;
66 }
67 break;
68
69 case PTHREAD_MUTEX_TIMED_ELISION_NP:
70 elision: __attribute__((unused))
71 if (lll_trylock_elision (mutex->__data.__lock,
72 mutex->__data.__elision) != 0)
73 break;
74 /* Don't record the ownership. */
75 return 0;
76
77 case PTHREAD_MUTEX_TIMED_NP:
78 FORCE_ELISION (mutex, goto elision);
79 /*FALL THROUGH*/
80 case PTHREAD_MUTEX_ADAPTIVE_NP:
81 case PTHREAD_MUTEX_ERRORCHECK_NP:
82 if (lll_trylock (mutex->__data.__lock) != 0)
83 break;
84
85 /* Record the ownership. */
86 mutex->__data.__owner = id;
87 ++mutex->__data.__nusers;
88
89 return 0;
90
91 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
92 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
93 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
94 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
95 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
96 &mutex->__data.__list.__next);
97
98 oldval = mutex->__data.__lock;
99 do
100 {
101 again:
102 if ((oldval & FUTEX_OWNER_DIED) != 0)
103 {
104 /* The previous owner died. Try locking the mutex. */
105 int newval = id | (oldval & FUTEX_WAITERS);
106
107 newval
108 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
109 newval, oldval);
110
111 if (newval != oldval)
112 {
113 oldval = newval;
114 goto again;
115 }
116
117 /* We got the mutex. */
118 mutex->__data.__count = 1;
119 /* But it is inconsistent unless marked otherwise. */
120 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
121
122 ENQUEUE_MUTEX (mutex);
123 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
124
125 /* Note that we deliberately exist here. If we fall
126 through to the end of the function __nusers would be
127 incremented which is not correct because the old
128 owner has to be discounted. */
129 return EOWNERDEAD;
130 }
131
132 /* Check whether we already hold the mutex. */
133 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
134 {
135 int kind = PTHREAD_MUTEX_TYPE (mutex);
136 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
137 {
138 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
139 NULL);
140 return EDEADLK;
141 }
142
143 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
144 {
145 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
146 NULL);
147
148 /* Just bump the counter. */
149 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
150 /* Overflow of the counter. */
151 return EAGAIN;
152
153 ++mutex->__data.__count;
154
155 return 0;
156 }
157 }
158
159 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
160 id, 0);
161 if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
162 {
163 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
164
165 return EBUSY;
166 }
167
168 if (__builtin_expect (mutex->__data.__owner
169 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
170 {
171 /* This mutex is now not recoverable. */
172 mutex->__data.__count = 0;
173 if (oldval == id)
174 lll_unlock (mutex->__data.__lock,
175 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
176 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
177 return ENOTRECOVERABLE;
178 }
179 }
180 while ((oldval & FUTEX_OWNER_DIED) != 0);
181
182 ENQUEUE_MUTEX (mutex);
183 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
184
185 mutex->__data.__owner = id;
186 ++mutex->__data.__nusers;
187 mutex->__data.__count = 1;
188
189 return 0;
190
191 /* The PI support requires the Linux futex system call. If that's not
192 available, pthread_mutex_init should never have allowed the type to
193 be set. So it will get the default case for an invalid type. */
194#ifdef __NR_futex
195 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
196 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
197 case PTHREAD_MUTEX_PI_NORMAL_NP:
198 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
199 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
200 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
201 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
202 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
203 {
204 int kind, robust;
205 {
206 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
207 in sysdeps/nptl/bits/thread-shared-types.h. */
208 int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
209 kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP;
210 robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
211 }
212
213 if (robust)
214 /* Note: robust PI futexes are signaled by setting bit 0. */
215 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
216 (void *) (((uintptr_t) &mutex->__data.__list.__next)
217 | 1));
218
219 oldval = mutex->__data.__lock;
220
221 /* Check whether we already hold the mutex. */
222 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
223 {
224 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
225 {
226 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
227 return EDEADLK;
228 }
229
230 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
231 {
232 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
233
234 /* Just bump the counter. */
235 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
236 /* Overflow of the counter. */
237 return EAGAIN;
238
239 ++mutex->__data.__count;
240
241 return 0;
242 }
243 }
244
245 oldval
246 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
247 id, 0);
248
249 if (oldval != 0)
250 {
251 if ((oldval & FUTEX_OWNER_DIED) == 0)
252 {
253 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
254
255 return EBUSY;
256 }
257
258 assert (robust);
259
260 /* The mutex owner died. The kernel will now take care of
261 everything. */
262 int private = (robust
263 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
264 : PTHREAD_MUTEX_PSHARED (mutex));
265 INTERNAL_SYSCALL_DECL (__err);
266 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
267 __lll_private_flag (FUTEX_TRYLOCK_PI,
268 private), 0, 0);
269
270 if (INTERNAL_SYSCALL_ERROR_P (e, __err)
271 && INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK)
272 {
273 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
274
275 return EBUSY;
276 }
277
278 oldval = mutex->__data.__lock;
279 }
280
281 if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
282 {
283 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
284
285 /* We got the mutex. */
286 mutex->__data.__count = 1;
287 /* But it is inconsistent unless marked otherwise. */
288 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
289
290 ENQUEUE_MUTEX (mutex);
291 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
292
293 /* Note that we deliberately exit here. If we fall
294 through to the end of the function __nusers would be
295 incremented which is not correct because the old owner
296 has to be discounted. */
297 return EOWNERDEAD;
298 }
299
300 if (robust
301 && __builtin_expect (mutex->__data.__owner
302 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
303 {
304 /* This mutex is now not recoverable. */
305 mutex->__data.__count = 0;
306
307 INTERNAL_SYSCALL_DECL (__err);
308 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
309 __lll_private_flag (FUTEX_UNLOCK_PI,
310 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
311 0, 0);
312
313 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
314 return ENOTRECOVERABLE;
315 }
316
317 if (robust)
318 {
319 ENQUEUE_MUTEX_PI (mutex);
320 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
321 }
322
323 mutex->__data.__owner = id;
324 ++mutex->__data.__nusers;
325 mutex->__data.__count = 1;
326
327 return 0;
328 }
329#endif /* __NR_futex. */
330
331 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
332 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
333 case PTHREAD_MUTEX_PP_NORMAL_NP:
334 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
335 {
336 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
337 in sysdeps/nptl/bits/thread-shared-types.h. */
338 int kind = atomic_load_relaxed (&(mutex->__data.__kind))
339 & PTHREAD_MUTEX_KIND_MASK_NP;
340
341 oldval = mutex->__data.__lock;
342
343 /* Check whether we already hold the mutex. */
344 if (mutex->__data.__owner == id)
345 {
346 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
347 return EDEADLK;
348
349 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
350 {
351 /* Just bump the counter. */
352 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
353 /* Overflow of the counter. */
354 return EAGAIN;
355
356 ++mutex->__data.__count;
357
358 return 0;
359 }
360 }
361
362 int oldprio = -1, ceilval;
363 do
364 {
365 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
366 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
367
368 if (__pthread_current_priority () > ceiling)
369 {
370 if (oldprio != -1)
371 __pthread_tpp_change_priority (oldprio, -1);
372 return EINVAL;
373 }
374
375 int retval = __pthread_tpp_change_priority (oldprio, ceiling);
376 if (retval)
377 return retval;
378
379 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
380 oldprio = ceiling;
381
382 oldval
383 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
384 ceilval | 1, ceilval);
385
386 if (oldval == ceilval)
387 break;
388 }
389 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
390
391 if (oldval != ceilval)
392 {
393 __pthread_tpp_change_priority (oldprio, -1);
394 break;
395 }
396
397 assert (mutex->__data.__owner == 0);
398 /* Record the ownership. */
399 mutex->__data.__owner = id;
400 ++mutex->__data.__nusers;
401 mutex->__data.__count = 1;
402
403 return 0;
404 }
405 break;
406
407 default:
408 /* Correct code cannot set any other type. */
409 return EINVAL;
410 }
411
412 return EBUSY;
413}
414
415#ifndef __pthread_mutex_trylock
416#ifndef pthread_mutex_trylock
417weak_alias (__pthread_mutex_trylock, pthread_mutex_trylock)
418hidden_def (__pthread_mutex_trylock)
419#endif
420#endif
421