1 | /* Copyright (C) 2002-2018 Free Software Foundation, Inc. |
2 | This file is part of the GNU C Library. |
3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <assert.h> |
20 | #include <errno.h> |
21 | #include <stdbool.h> |
22 | #include <string.h> |
23 | #include <kernel-features.h> |
24 | #include "pthreadP.h" |
25 | #include <atomic.h> |
26 | #include <pthread-offsets.h> |
27 | |
28 | #include <stap-probe.h> |
29 | |
30 | static const struct pthread_mutexattr default_mutexattr = |
31 | { |
32 | /* Default is a normal mutex, not shared between processes. */ |
33 | .mutexkind = PTHREAD_MUTEX_NORMAL |
34 | }; |
35 | |
36 | |
37 | static bool |
38 | prio_inherit_missing (void) |
39 | { |
40 | #ifdef __NR_futex |
41 | static int tpi_supported; |
42 | if (__glibc_unlikely (tpi_supported == 0)) |
43 | { |
44 | int lock = 0; |
45 | INTERNAL_SYSCALL_DECL (err); |
46 | int ret = INTERNAL_SYSCALL (futex, err, 4, &lock, FUTEX_UNLOCK_PI, 0, 0); |
47 | assert (INTERNAL_SYSCALL_ERROR_P (ret, err)); |
48 | tpi_supported = INTERNAL_SYSCALL_ERRNO (ret, err) == ENOSYS ? -1 : 1; |
49 | } |
50 | return __glibc_unlikely (tpi_supported < 0); |
51 | #endif |
52 | return true; |
53 | } |
54 | |
55 | int |
56 | __pthread_mutex_init (pthread_mutex_t *mutex, |
57 | const pthread_mutexattr_t *mutexattr) |
58 | { |
59 | const struct pthread_mutexattr *imutexattr; |
60 | |
61 | ASSERT_TYPE_SIZE (pthread_mutex_t, __SIZEOF_PTHREAD_MUTEX_T); |
62 | |
63 | ASSERT_PTHREAD_INTERNAL_OFFSET (pthread_mutex_t, __data.__nusers, |
64 | __PTHREAD_MUTEX_NUSERS_OFFSET); |
65 | ASSERT_PTHREAD_INTERNAL_OFFSET (pthread_mutex_t, __data.__kind, |
66 | __PTHREAD_MUTEX_KIND_OFFSET); |
67 | ASSERT_PTHREAD_INTERNAL_OFFSET (pthread_mutex_t, __data.__spins, |
68 | __PTHREAD_MUTEX_SPINS_OFFSET); |
69 | #if __PTHREAD_MUTEX_LOCK_ELISION |
70 | ASSERT_PTHREAD_INTERNAL_OFFSET (pthread_mutex_t, __data.__elision, |
71 | __PTHREAD_MUTEX_ELISION_OFFSET); |
72 | #endif |
73 | ASSERT_PTHREAD_INTERNAL_OFFSET (pthread_mutex_t, __data.__list, |
74 | __PTHREAD_MUTEX_LIST_OFFSET); |
75 | |
76 | imutexattr = ((const struct pthread_mutexattr *) mutexattr |
77 | ?: &default_mutexattr); |
78 | |
79 | /* Sanity checks. */ |
80 | switch (__builtin_expect (imutexattr->mutexkind |
81 | & PTHREAD_MUTEXATTR_PROTOCOL_MASK, |
82 | PTHREAD_PRIO_NONE |
83 | << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT)) |
84 | { |
85 | case PTHREAD_PRIO_NONE << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT: |
86 | break; |
87 | |
88 | case PTHREAD_PRIO_INHERIT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT: |
89 | if (__glibc_unlikely (prio_inherit_missing ())) |
90 | return ENOTSUP; |
91 | break; |
92 | |
93 | default: |
94 | /* XXX: For now we don't support robust priority protected mutexes. */ |
95 | if (imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) |
96 | return ENOTSUP; |
97 | break; |
98 | } |
99 | |
100 | /* Clear the whole variable. */ |
101 | memset (mutex, '\0', __SIZEOF_PTHREAD_MUTEX_T); |
102 | |
103 | /* Copy the values from the attribute. */ |
104 | mutex->__data.__kind = imutexattr->mutexkind & ~PTHREAD_MUTEXATTR_FLAG_BITS; |
105 | |
106 | if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) != 0) |
107 | { |
108 | #ifndef __ASSUME_SET_ROBUST_LIST |
109 | if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_PSHARED) != 0 |
110 | && __set_robust_list_avail < 0) |
111 | return ENOTSUP; |
112 | #endif |
113 | |
114 | mutex->__data.__kind |= PTHREAD_MUTEX_ROBUST_NORMAL_NP; |
115 | } |
116 | |
117 | switch (imutexattr->mutexkind & PTHREAD_MUTEXATTR_PROTOCOL_MASK) |
118 | { |
119 | case PTHREAD_PRIO_INHERIT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT: |
120 | mutex->__data.__kind |= PTHREAD_MUTEX_PRIO_INHERIT_NP; |
121 | break; |
122 | |
123 | case PTHREAD_PRIO_PROTECT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT: |
124 | mutex->__data.__kind |= PTHREAD_MUTEX_PRIO_PROTECT_NP; |
125 | |
126 | int ceiling = (imutexattr->mutexkind |
127 | & PTHREAD_MUTEXATTR_PRIO_CEILING_MASK) |
128 | >> PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT; |
129 | if (! ceiling) |
130 | { |
131 | /* See __init_sched_fifo_prio. */ |
132 | if (atomic_load_relaxed (&__sched_fifo_min_prio) == -1) |
133 | __init_sched_fifo_prio (); |
134 | if (ceiling < atomic_load_relaxed (&__sched_fifo_min_prio)) |
135 | ceiling = atomic_load_relaxed (&__sched_fifo_min_prio); |
136 | } |
137 | mutex->__data.__lock = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
138 | break; |
139 | |
140 | default: |
141 | break; |
142 | } |
143 | |
144 | /* The kernel when waking robust mutexes on exit never uses |
145 | FUTEX_PRIVATE_FLAG FUTEX_WAKE. */ |
146 | if ((imutexattr->mutexkind & (PTHREAD_MUTEXATTR_FLAG_PSHARED |
147 | | PTHREAD_MUTEXATTR_FLAG_ROBUST)) != 0) |
148 | mutex->__data.__kind |= PTHREAD_MUTEX_PSHARED_BIT; |
149 | |
150 | /* Default values: mutex not used yet. */ |
151 | // mutex->__count = 0; already done by memset |
152 | // mutex->__owner = 0; already done by memset |
153 | // mutex->__nusers = 0; already done by memset |
154 | // mutex->__spins = 0; already done by memset |
155 | // mutex->__next = NULL; already done by memset |
156 | |
157 | LIBC_PROBE (mutex_init, 1, mutex); |
158 | |
159 | return 0; |
160 | } |
161 | weak_alias (__pthread_mutex_init, pthread_mutex_init) |
162 | hidden_def (__pthread_mutex_init) |
163 | |