1/* Copyright (C) 2002-2021 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19#include <assert.h>
20#include <errno.h>
21#include <stdlib.h>
22#include "pthreadP.h"
23#include <lowlevellock.h>
24#include <stap-probe.h>
25#include <futex-internal.h>
26#include <shlib-compat.h>
27
28static int
29__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
30 __attribute_noinline__;
31
32/* lll_lock with single-thread optimization. */
33static inline void
34lll_mutex_unlock_optimized (pthread_mutex_t *mutex)
35{
36 /* The single-threaded optimization is only valid for private
37 mutexes. For process-shared mutexes, the mutex could be in a
38 shared mapping, so synchronization with another process is needed
39 even without any threads. */
40 int private = PTHREAD_MUTEX_PSHARED (mutex);
41 if (private == LLL_PRIVATE && SINGLE_THREAD_P)
42 mutex->__data.__lock = 0;
43 else
44 lll_unlock (mutex->__data.__lock, private);
45}
46
47int
48__pthread_mutex_unlock_usercnt (pthread_mutex_t *mutex, int decr)
49{
50 /* See concurrency notes regarding mutex type which is loaded from __kind
51 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
52 int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
53 if (__builtin_expect (type
54 & ~(PTHREAD_MUTEX_KIND_MASK_NP
55 |PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
56 return __pthread_mutex_unlock_full (mutex, decr);
57
58 if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
59 == PTHREAD_MUTEX_TIMED_NP)
60 {
61 /* Always reset the owner field. */
62 normal:
63 mutex->__data.__owner = 0;
64 if (decr)
65 /* One less user. */
66 --mutex->__data.__nusers;
67
68 /* Unlock. */
69 lll_mutex_unlock_optimized (mutex);
70
71 LIBC_PROBE (mutex_release, 1, mutex);
72
73 return 0;
74 }
75 else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
76 {
77 /* Don't reset the owner/users fields for elision. */
78 return lll_unlock_elision (mutex->__data.__lock, mutex->__data.__elision,
79 PTHREAD_MUTEX_PSHARED (mutex));
80 }
81 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
82 == PTHREAD_MUTEX_RECURSIVE_NP, 1))
83 {
84 /* Recursive mutex. */
85 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
86 return EPERM;
87
88 if (--mutex->__data.__count != 0)
89 /* We still hold the mutex. */
90 return 0;
91 goto normal;
92 }
93 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
94 == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
95 goto normal;
96 else
97 {
98 /* Error checking mutex. */
99 assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
100 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
101 || ! lll_islocked (mutex->__data.__lock))
102 return EPERM;
103 goto normal;
104 }
105}
106libc_hidden_def (__pthread_mutex_unlock_usercnt)
107
108
109static int
110__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
111{
112 int newowner = 0;
113 int private;
114
115 switch (PTHREAD_MUTEX_TYPE (mutex))
116 {
117 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
118 /* Recursive mutex. */
119 if ((mutex->__data.__lock & FUTEX_TID_MASK)
120 == THREAD_GETMEM (THREAD_SELF, tid)
121 && __builtin_expect (mutex->__data.__owner
122 == PTHREAD_MUTEX_INCONSISTENT, 0))
123 {
124 if (--mutex->__data.__count != 0)
125 /* We still hold the mutex. */
126 return ENOTRECOVERABLE;
127
128 goto notrecoverable;
129 }
130
131 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
132 return EPERM;
133
134 if (--mutex->__data.__count != 0)
135 /* We still hold the mutex. */
136 return 0;
137
138 goto robust;
139
140 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
141 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
142 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
143 if ((mutex->__data.__lock & FUTEX_TID_MASK)
144 != THREAD_GETMEM (THREAD_SELF, tid)
145 || ! lll_islocked (mutex->__data.__lock))
146 return EPERM;
147
148 /* If the previous owner died and the caller did not succeed in
149 making the state consistent, mark the mutex as unrecoverable
150 and make all waiters. */
151 if (__builtin_expect (mutex->__data.__owner
152 == PTHREAD_MUTEX_INCONSISTENT, 0))
153 notrecoverable:
154 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
155
156 robust:
157 /* Remove mutex from the list. */
158 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
159 &mutex->__data.__list.__next);
160 /* We must set op_pending before we dequeue the mutex. Also see
161 comments at ENQUEUE_MUTEX. */
162 __asm ("" ::: "memory");
163 DEQUEUE_MUTEX (mutex);
164
165 mutex->__data.__owner = newowner;
166 if (decr)
167 /* One less user. */
168 --mutex->__data.__nusers;
169
170 /* Unlock by setting the lock to 0 (not acquired); if the lock had
171 FUTEX_WAITERS set previously, then wake any waiters.
172 The unlock operation must be the last access to the mutex to not
173 violate the mutex destruction requirements (see __lll_unlock). */
174 private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
175 if (__glibc_unlikely ((atomic_exchange_rel (&mutex->__data.__lock, 0)
176 & FUTEX_WAITERS) != 0))
177 futex_wake ((unsigned int *) &mutex->__data.__lock, 1, private);
178
179 /* We must clear op_pending after we release the mutex.
180 FIXME However, this violates the mutex destruction requirements
181 because another thread could acquire the mutex, destroy it, and
182 reuse the memory for something else; then, if this thread crashes,
183 and the memory happens to have a value equal to the TID, the kernel
184 will believe it is still related to the mutex (which has been
185 destroyed already) and will modify some other random object. */
186 __asm ("" ::: "memory");
187 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
188 break;
189
190 /* The PI support requires the Linux futex system call. If that's not
191 available, pthread_mutex_init should never have allowed the type to
192 be set. So it will get the default case for an invalid type. */
193#ifdef __NR_futex
194 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
195 /* Recursive mutex. */
196 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
197 return EPERM;
198
199 if (--mutex->__data.__count != 0)
200 /* We still hold the mutex. */
201 return 0;
202 goto continue_pi_non_robust;
203
204 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
205 /* Recursive mutex. */
206 if ((mutex->__data.__lock & FUTEX_TID_MASK)
207 == THREAD_GETMEM (THREAD_SELF, tid)
208 && __builtin_expect (mutex->__data.__owner
209 == PTHREAD_MUTEX_INCONSISTENT, 0))
210 {
211 if (--mutex->__data.__count != 0)
212 /* We still hold the mutex. */
213 return ENOTRECOVERABLE;
214
215 goto pi_notrecoverable;
216 }
217
218 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
219 return EPERM;
220
221 if (--mutex->__data.__count != 0)
222 /* We still hold the mutex. */
223 return 0;
224
225 goto continue_pi_robust;
226
227 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
228 case PTHREAD_MUTEX_PI_NORMAL_NP:
229 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
230 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
231 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
232 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
233 if ((mutex->__data.__lock & FUTEX_TID_MASK)
234 != THREAD_GETMEM (THREAD_SELF, tid)
235 || ! lll_islocked (mutex->__data.__lock))
236 return EPERM;
237
238 /* If the previous owner died and the caller did not succeed in
239 making the state consistent, mark the mutex as unrecoverable
240 and make all waiters. */
241 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
242 in sysdeps/nptl/bits/thread-shared-types.h. */
243 if ((atomic_load_relaxed (&(mutex->__data.__kind))
244 & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0
245 && __builtin_expect (mutex->__data.__owner
246 == PTHREAD_MUTEX_INCONSISTENT, 0))
247 pi_notrecoverable:
248 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
249
250 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
251 in sysdeps/nptl/bits/thread-shared-types.h. */
252 if ((atomic_load_relaxed (&(mutex->__data.__kind))
253 & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
254 {
255 continue_pi_robust:
256 /* Remove mutex from the list.
257 Note: robust PI futexes are signaled by setting bit 0. */
258 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
259 (void *) (((uintptr_t) &mutex->__data.__list.__next)
260 | 1));
261 /* We must set op_pending before we dequeue the mutex. Also see
262 comments at ENQUEUE_MUTEX. */
263 __asm ("" ::: "memory");
264 DEQUEUE_MUTEX (mutex);
265 }
266
267 continue_pi_non_robust:
268 mutex->__data.__owner = newowner;
269 if (decr)
270 /* One less user. */
271 --mutex->__data.__nusers;
272
273 /* Unlock. Load all necessary mutex data before releasing the mutex
274 to not violate the mutex destruction requirements (see
275 lll_unlock). */
276 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
277 in sysdeps/nptl/bits/thread-shared-types.h. */
278 int robust = atomic_load_relaxed (&(mutex->__data.__kind))
279 & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
280 private = (robust
281 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
282 : PTHREAD_MUTEX_PSHARED (mutex));
283 /* Unlock the mutex using a CAS unless there are futex waiters or our
284 TID is not the value of __lock anymore, in which case we let the
285 kernel take care of the situation. Use release MO in the CAS to
286 synchronize with acquire MO in lock acquisitions. */
287 int l = atomic_load_relaxed (&mutex->__data.__lock);
288 do
289 {
290 if (((l & FUTEX_WAITERS) != 0)
291 || (l != THREAD_GETMEM (THREAD_SELF, tid)))
292 {
293 futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
294 private);
295 break;
296 }
297 }
298 while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
299 &l, 0));
300
301 /* This happens after the kernel releases the mutex but violates the
302 mutex destruction requirements; see comments in the code handling
303 PTHREAD_MUTEX_ROBUST_NORMAL_NP. */
304 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
305 break;
306#endif /* __NR_futex. */
307
308 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
309 /* Recursive mutex. */
310 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
311 return EPERM;
312
313 if (--mutex->__data.__count != 0)
314 /* We still hold the mutex. */
315 return 0;
316 goto pp;
317
318 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
319 /* Error checking mutex. */
320 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
321 || (mutex->__data.__lock & ~ PTHREAD_MUTEX_PRIO_CEILING_MASK) == 0)
322 return EPERM;
323 /* FALLTHROUGH */
324
325 case PTHREAD_MUTEX_PP_NORMAL_NP:
326 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
327 /* Always reset the owner field. */
328 pp:
329 mutex->__data.__owner = 0;
330
331 if (decr)
332 /* One less user. */
333 --mutex->__data.__nusers;
334
335 /* Unlock. Use release MO in the CAS to synchronize with acquire MO in
336 lock acquisitions. */
337 int newval;
338 int oldval = atomic_load_relaxed (&mutex->__data.__lock);
339 do
340 {
341 newval = oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK;
342 }
343 while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
344 &oldval, newval));
345
346 if ((oldval & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) > 1)
347 futex_wake ((unsigned int *)&mutex->__data.__lock, 1,
348 PTHREAD_MUTEX_PSHARED (mutex));
349
350 int oldprio = newval >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
351
352 LIBC_PROBE (mutex_release, 1, mutex);
353
354 return __pthread_tpp_change_priority (oldprio, -1);
355
356 default:
357 /* Correct code cannot set any other type. */
358 return EINVAL;
359 }
360
361 LIBC_PROBE (mutex_release, 1, mutex);
362 return 0;
363}
364
365
366int
367___pthread_mutex_unlock (pthread_mutex_t *mutex)
368{
369 return __pthread_mutex_unlock_usercnt (mutex, 1);
370}
371libc_hidden_ver (___pthread_mutex_unlock, __pthread_mutex_unlock)
372#ifndef SHARED
373strong_alias (___pthread_mutex_unlock, __pthread_mutex_unlock)
374#endif
375versioned_symbol (libpthread, ___pthread_mutex_unlock, pthread_mutex_unlock,
376 GLIBC_2_0);
377
378#if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_34)
379compat_symbol (libpthread, ___pthread_mutex_unlock, __pthread_mutex_unlock,
380 GLIBC_2_0);
381#endif
382