1/* Copyright (C) 2002-2018 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19#include <assert.h>
20#include <errno.h>
21#include <stdlib.h>
22#include "pthreadP.h"
23#include <lowlevellock.h>
24#include <stap-probe.h>
25
26#ifndef lll_unlock_elision
27#define lll_unlock_elision(a,b,c) ({ lll_unlock (a,c); 0; })
28#endif
29
30static int
31__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
32 __attribute_noinline__;
33
34int
35attribute_hidden
36__pthread_mutex_unlock_usercnt (pthread_mutex_t *mutex, int decr)
37{
38 int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
39 if (__builtin_expect (type &
40 ~(PTHREAD_MUTEX_KIND_MASK_NP|PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
41 return __pthread_mutex_unlock_full (mutex, decr);
42
43 if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
44 == PTHREAD_MUTEX_TIMED_NP)
45 {
46 /* Always reset the owner field. */
47 normal:
48 mutex->__data.__owner = 0;
49 if (decr)
50 /* One less user. */
51 --mutex->__data.__nusers;
52
53 /* Unlock. */
54 lll_unlock (mutex->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex));
55
56 LIBC_PROBE (mutex_release, 1, mutex);
57
58 return 0;
59 }
60 else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
61 {
62 /* Don't reset the owner/users fields for elision. */
63 return lll_unlock_elision (mutex->__data.__lock, mutex->__data.__elision,
64 PTHREAD_MUTEX_PSHARED (mutex));
65 }
66 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
67 == PTHREAD_MUTEX_RECURSIVE_NP, 1))
68 {
69 /* Recursive mutex. */
70 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
71 return EPERM;
72
73 if (--mutex->__data.__count != 0)
74 /* We still hold the mutex. */
75 return 0;
76 goto normal;
77 }
78 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
79 == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
80 goto normal;
81 else
82 {
83 /* Error checking mutex. */
84 assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
85 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
86 || ! lll_islocked (mutex->__data.__lock))
87 return EPERM;
88 goto normal;
89 }
90}
91
92
93static int
94__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
95{
96 int newowner = 0;
97 int private;
98
99 switch (PTHREAD_MUTEX_TYPE (mutex))
100 {
101 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
102 /* Recursive mutex. */
103 if ((mutex->__data.__lock & FUTEX_TID_MASK)
104 == THREAD_GETMEM (THREAD_SELF, tid)
105 && __builtin_expect (mutex->__data.__owner
106 == PTHREAD_MUTEX_INCONSISTENT, 0))
107 {
108 if (--mutex->__data.__count != 0)
109 /* We still hold the mutex. */
110 return ENOTRECOVERABLE;
111
112 goto notrecoverable;
113 }
114
115 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
116 return EPERM;
117
118 if (--mutex->__data.__count != 0)
119 /* We still hold the mutex. */
120 return 0;
121
122 goto robust;
123
124 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
125 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
126 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
127 if ((mutex->__data.__lock & FUTEX_TID_MASK)
128 != THREAD_GETMEM (THREAD_SELF, tid)
129 || ! lll_islocked (mutex->__data.__lock))
130 return EPERM;
131
132 /* If the previous owner died and the caller did not succeed in
133 making the state consistent, mark the mutex as unrecoverable
134 and make all waiters. */
135 if (__builtin_expect (mutex->__data.__owner
136 == PTHREAD_MUTEX_INCONSISTENT, 0))
137 notrecoverable:
138 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
139
140 robust:
141 /* Remove mutex from the list. */
142 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
143 &mutex->__data.__list.__next);
144 /* We must set op_pending before we dequeue the mutex. Also see
145 comments at ENQUEUE_MUTEX. */
146 __asm ("" ::: "memory");
147 DEQUEUE_MUTEX (mutex);
148
149 mutex->__data.__owner = newowner;
150 if (decr)
151 /* One less user. */
152 --mutex->__data.__nusers;
153
154 /* Unlock by setting the lock to 0 (not acquired); if the lock had
155 FUTEX_WAITERS set previously, then wake any waiters.
156 The unlock operation must be the last access to the mutex to not
157 violate the mutex destruction requirements (see __lll_unlock). */
158 private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
159 if (__glibc_unlikely ((atomic_exchange_rel (&mutex->__data.__lock, 0)
160 & FUTEX_WAITERS) != 0))
161 lll_futex_wake (&mutex->__data.__lock, 1, private);
162
163 /* We must clear op_pending after we release the mutex.
164 FIXME However, this violates the mutex destruction requirements
165 because another thread could acquire the mutex, destroy it, and
166 reuse the memory for something else; then, if this thread crashes,
167 and the memory happens to have a value equal to the TID, the kernel
168 will believe it is still related to the mutex (which has been
169 destroyed already) and will modify some other random object. */
170 __asm ("" ::: "memory");
171 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
172 break;
173
174 /* The PI support requires the Linux futex system call. If that's not
175 available, pthread_mutex_init should never have allowed the type to
176 be set. So it will get the default case for an invalid type. */
177#ifdef __NR_futex
178 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
179 /* Recursive mutex. */
180 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
181 return EPERM;
182
183 if (--mutex->__data.__count != 0)
184 /* We still hold the mutex. */
185 return 0;
186 goto continue_pi_non_robust;
187
188 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
189 /* Recursive mutex. */
190 if ((mutex->__data.__lock & FUTEX_TID_MASK)
191 == THREAD_GETMEM (THREAD_SELF, tid)
192 && __builtin_expect (mutex->__data.__owner
193 == PTHREAD_MUTEX_INCONSISTENT, 0))
194 {
195 if (--mutex->__data.__count != 0)
196 /* We still hold the mutex. */
197 return ENOTRECOVERABLE;
198
199 goto pi_notrecoverable;
200 }
201
202 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
203 return EPERM;
204
205 if (--mutex->__data.__count != 0)
206 /* We still hold the mutex. */
207 return 0;
208
209 goto continue_pi_robust;
210
211 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
212 case PTHREAD_MUTEX_PI_NORMAL_NP:
213 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
214 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
215 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
216 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
217 if ((mutex->__data.__lock & FUTEX_TID_MASK)
218 != THREAD_GETMEM (THREAD_SELF, tid)
219 || ! lll_islocked (mutex->__data.__lock))
220 return EPERM;
221
222 /* If the previous owner died and the caller did not succeed in
223 making the state consistent, mark the mutex as unrecoverable
224 and make all waiters. */
225 if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0
226 && __builtin_expect (mutex->__data.__owner
227 == PTHREAD_MUTEX_INCONSISTENT, 0))
228 pi_notrecoverable:
229 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
230
231 if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
232 {
233 continue_pi_robust:
234 /* Remove mutex from the list.
235 Note: robust PI futexes are signaled by setting bit 0. */
236 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
237 (void *) (((uintptr_t) &mutex->__data.__list.__next)
238 | 1));
239 /* We must set op_pending before we dequeue the mutex. Also see
240 comments at ENQUEUE_MUTEX. */
241 __asm ("" ::: "memory");
242 DEQUEUE_MUTEX (mutex);
243 }
244
245 continue_pi_non_robust:
246 mutex->__data.__owner = newowner;
247 if (decr)
248 /* One less user. */
249 --mutex->__data.__nusers;
250
251 /* Unlock. Load all necessary mutex data before releasing the mutex
252 to not violate the mutex destruction requirements (see
253 lll_unlock). */
254 int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
255 private = (robust
256 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
257 : PTHREAD_MUTEX_PSHARED (mutex));
258 /* Unlock the mutex using a CAS unless there are futex waiters or our
259 TID is not the value of __lock anymore, in which case we let the
260 kernel take care of the situation. Use release MO in the CAS to
261 synchronize with acquire MO in lock acquisitions. */
262 int l = atomic_load_relaxed (&mutex->__data.__lock);
263 do
264 {
265 if (((l & FUTEX_WAITERS) != 0)
266 || (l != THREAD_GETMEM (THREAD_SELF, tid)))
267 {
268 INTERNAL_SYSCALL_DECL (__err);
269 INTERNAL_SYSCALL (futex, __err, 2, &mutex->__data.__lock,
270 __lll_private_flag (FUTEX_UNLOCK_PI, private));
271 break;
272 }
273 }
274 while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
275 &l, 0));
276
277 /* This happens after the kernel releases the mutex but violates the
278 mutex destruction requirements; see comments in the code handling
279 PTHREAD_MUTEX_ROBUST_NORMAL_NP. */
280 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
281 break;
282#endif /* __NR_futex. */
283
284 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
285 /* Recursive mutex. */
286 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
287 return EPERM;
288
289 if (--mutex->__data.__count != 0)
290 /* We still hold the mutex. */
291 return 0;
292 goto pp;
293
294 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
295 /* Error checking mutex. */
296 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
297 || (mutex->__data.__lock & ~ PTHREAD_MUTEX_PRIO_CEILING_MASK) == 0)
298 return EPERM;
299 /* FALLTHROUGH */
300
301 case PTHREAD_MUTEX_PP_NORMAL_NP:
302 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
303 /* Always reset the owner field. */
304 pp:
305 mutex->__data.__owner = 0;
306
307 if (decr)
308 /* One less user. */
309 --mutex->__data.__nusers;
310
311 /* Unlock. Use release MO in the CAS to synchronize with acquire MO in
312 lock acquisitions. */
313 int newval;
314 int oldval = atomic_load_relaxed (&mutex->__data.__lock);
315 do
316 {
317 newval = oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK;
318 }
319 while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
320 &oldval, newval));
321
322 if ((oldval & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) > 1)
323 lll_futex_wake (&mutex->__data.__lock, 1,
324 PTHREAD_MUTEX_PSHARED (mutex));
325
326 int oldprio = newval >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
327
328 LIBC_PROBE (mutex_release, 1, mutex);
329
330 return __pthread_tpp_change_priority (oldprio, -1);
331
332 default:
333 /* Correct code cannot set any other type. */
334 return EINVAL;
335 }
336
337 LIBC_PROBE (mutex_release, 1, mutex);
338 return 0;
339}
340
341
342int
343__pthread_mutex_unlock (pthread_mutex_t *mutex)
344{
345 return __pthread_mutex_unlock_usercnt (mutex, 1);
346}
347weak_alias (__pthread_mutex_unlock, pthread_mutex_unlock)
348hidden_def (__pthread_mutex_unlock)
349