1 | /* Copyright (C) 2002-2021 Free Software Foundation, Inc. |
2 | This file is part of the GNU C Library. |
3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <https://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <assert.h> |
20 | #include <errno.h> |
21 | #include <stdbool.h> |
22 | #include <string.h> |
23 | #include <kernel-features.h> |
24 | #include "pthreadP.h" |
25 | #include <atomic.h> |
26 | #include <pthread-offsets.h> |
27 | #include <futex-internal.h> |
28 | #include <shlib-compat.h> |
29 | |
30 | #include <stap-probe.h> |
31 | |
32 | static const struct pthread_mutexattr default_mutexattr = |
33 | { |
34 | /* Default is a normal mutex, not shared between processes. */ |
35 | .mutexkind = PTHREAD_MUTEX_NORMAL |
36 | }; |
37 | |
38 | |
39 | static bool |
40 | prio_inherit_missing (void) |
41 | { |
42 | static int tpi_supported; |
43 | if (__glibc_unlikely (atomic_load_relaxed (&tpi_supported) == 0)) |
44 | { |
45 | int e = futex_unlock_pi (&(unsigned int){0}, 0); |
46 | atomic_store_relaxed (&tpi_supported, e == ENOSYS ? -1 : 1); |
47 | } |
48 | return __glibc_unlikely (tpi_supported < 0); |
49 | } |
50 | |
51 | int |
52 | ___pthread_mutex_init (pthread_mutex_t *mutex, |
53 | const pthread_mutexattr_t *mutexattr) |
54 | { |
55 | const struct pthread_mutexattr *imutexattr; |
56 | |
57 | ASSERT_TYPE_SIZE (pthread_mutex_t, __SIZEOF_PTHREAD_MUTEX_T); |
58 | |
59 | /* __kind is the only field where its offset should be checked to |
60 | avoid ABI breakage with static initializers. */ |
61 | ASSERT_PTHREAD_INTERNAL_OFFSET (pthread_mutex_t, __data.__kind, |
62 | __PTHREAD_MUTEX_KIND_OFFSET); |
63 | ASSERT_PTHREAD_INTERNAL_MEMBER_SIZE (pthread_mutex_t, __data.__kind, int); |
64 | |
65 | imutexattr = ((const struct pthread_mutexattr *) mutexattr |
66 | ?: &default_mutexattr); |
67 | |
68 | /* Sanity checks. */ |
69 | switch (__builtin_expect (imutexattr->mutexkind |
70 | & PTHREAD_MUTEXATTR_PROTOCOL_MASK, |
71 | PTHREAD_PRIO_NONE |
72 | << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT)) |
73 | { |
74 | case PTHREAD_PRIO_NONE << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT: |
75 | break; |
76 | |
77 | case PTHREAD_PRIO_INHERIT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT: |
78 | if (__glibc_unlikely (prio_inherit_missing ())) |
79 | return ENOTSUP; |
80 | break; |
81 | |
82 | default: |
83 | /* XXX: For now we don't support robust priority protected mutexes. */ |
84 | if (imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) |
85 | return ENOTSUP; |
86 | break; |
87 | } |
88 | |
89 | /* Clear the whole variable. */ |
90 | memset (mutex, '\0', __SIZEOF_PTHREAD_MUTEX_T); |
91 | |
92 | /* Copy the values from the attribute. */ |
93 | int mutex_kind = imutexattr->mutexkind & ~PTHREAD_MUTEXATTR_FLAG_BITS; |
94 | |
95 | if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) != 0) |
96 | { |
97 | #ifndef __ASSUME_SET_ROBUST_LIST |
98 | if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_PSHARED) != 0 |
99 | && !__nptl_set_robust_list_avail) |
100 | return ENOTSUP; |
101 | #endif |
102 | |
103 | mutex_kind |= PTHREAD_MUTEX_ROBUST_NORMAL_NP; |
104 | } |
105 | |
106 | switch (imutexattr->mutexkind & PTHREAD_MUTEXATTR_PROTOCOL_MASK) |
107 | { |
108 | case PTHREAD_PRIO_INHERIT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT: |
109 | mutex_kind |= PTHREAD_MUTEX_PRIO_INHERIT_NP; |
110 | break; |
111 | |
112 | case PTHREAD_PRIO_PROTECT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT: |
113 | mutex_kind |= PTHREAD_MUTEX_PRIO_PROTECT_NP; |
114 | |
115 | int ceiling = (imutexattr->mutexkind |
116 | & PTHREAD_MUTEXATTR_PRIO_CEILING_MASK) |
117 | >> PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT; |
118 | if (! ceiling) |
119 | { |
120 | /* See __init_sched_fifo_prio. */ |
121 | if (atomic_load_relaxed (&__sched_fifo_min_prio) == -1) |
122 | __init_sched_fifo_prio (); |
123 | if (ceiling < atomic_load_relaxed (&__sched_fifo_min_prio)) |
124 | ceiling = atomic_load_relaxed (&__sched_fifo_min_prio); |
125 | } |
126 | mutex->__data.__lock = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
127 | break; |
128 | |
129 | default: |
130 | break; |
131 | } |
132 | |
133 | /* The kernel when waking robust mutexes on exit never uses |
134 | FUTEX_PRIVATE_FLAG FUTEX_WAKE. */ |
135 | if ((imutexattr->mutexkind & (PTHREAD_MUTEXATTR_FLAG_PSHARED |
136 | | PTHREAD_MUTEXATTR_FLAG_ROBUST)) != 0) |
137 | mutex_kind |= PTHREAD_MUTEX_PSHARED_BIT; |
138 | |
139 | /* See concurrency notes regarding __kind in struct __pthread_mutex_s |
140 | in sysdeps/nptl/bits/thread-shared-types.h. */ |
141 | atomic_store_relaxed (&(mutex->__data.__kind), mutex_kind); |
142 | |
143 | /* Default values: mutex not used yet. */ |
144 | // mutex->__count = 0; already done by memset |
145 | // mutex->__owner = 0; already done by memset |
146 | // mutex->__nusers = 0; already done by memset |
147 | // mutex->__spins = 0; already done by memset |
148 | // mutex->__next = NULL; already done by memset |
149 | |
150 | LIBC_PROBE (mutex_init, 1, mutex); |
151 | |
152 | return 0; |
153 | } |
154 | versioned_symbol (libpthread, ___pthread_mutex_init, pthread_mutex_init, |
155 | GLIBC_2_0); |
156 | libc_hidden_ver (___pthread_mutex_init, __pthread_mutex_init) |
157 | #ifndef SHARED |
158 | strong_alias (___pthread_mutex_init, __pthread_mutex_init) |
159 | #endif |
160 | |
161 | #if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_34) |
162 | compat_symbol (libpthread, ___pthread_mutex_init, __pthread_mutex_init, |
163 | GLIBC_2_0); |
164 | #endif |
165 | |