1 | /* Copyright (C) 2002-2021 Free Software Foundation, Inc. |
2 | This file is part of the GNU C Library. |
3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <https://www.gnu.org/licenses/>. */ |
18 | |
19 | #ifndef _DESCR_H |
20 | #define _DESCR_H 1 |
21 | |
22 | #include <limits.h> |
23 | #include <sched.h> |
24 | #include <setjmp.h> |
25 | #include <stdbool.h> |
26 | #include <sys/types.h> |
27 | #include <hp-timing.h> |
28 | #include <list_t.h> |
29 | #include <lowlevellock.h> |
30 | #include <pthreaddef.h> |
31 | #include <dl-sysdep.h> |
32 | #include <thread_db.h> |
33 | #include <tls.h> |
34 | #include <unwind.h> |
35 | #include <bits/types/res_state.h> |
36 | #include <kernel-features.h> |
37 | #include <tls-internal-struct.h> |
38 | |
39 | #ifndef TCB_ALIGNMENT |
40 | # define TCB_ALIGNMENT sizeof (double) |
41 | #endif |
42 | |
43 | |
44 | /* We keep thread specific data in a special data structure, a two-level |
45 | array. The top-level array contains pointers to dynamically allocated |
46 | arrays of a certain number of data pointers. So we can implement a |
47 | sparse array. Each dynamic second-level array has |
48 | PTHREAD_KEY_2NDLEVEL_SIZE |
49 | entries. This value shouldn't be too large. */ |
50 | #define PTHREAD_KEY_2NDLEVEL_SIZE 32 |
51 | |
52 | /* We need to address PTHREAD_KEYS_MAX key with PTHREAD_KEY_2NDLEVEL_SIZE |
53 | keys in each subarray. */ |
54 | #define PTHREAD_KEY_1STLEVEL_SIZE \ |
55 | ((PTHREAD_KEYS_MAX + PTHREAD_KEY_2NDLEVEL_SIZE - 1) \ |
56 | / PTHREAD_KEY_2NDLEVEL_SIZE) |
57 | |
58 | |
59 | |
60 | |
61 | /* Internal version of the buffer to store cancellation handler |
62 | information. */ |
63 | struct pthread_unwind_buf |
64 | { |
65 | struct |
66 | { |
67 | __jmp_buf jmp_buf; |
68 | int mask_was_saved; |
69 | } cancel_jmp_buf[1]; |
70 | |
71 | union |
72 | { |
73 | /* This is the placeholder of the public version. */ |
74 | void *pad[4]; |
75 | |
76 | struct |
77 | { |
78 | /* Pointer to the previous cleanup buffer. */ |
79 | struct pthread_unwind_buf *prev; |
80 | |
81 | /* Backward compatibility: state of the old-style cleanup |
82 | handler at the time of the previous new-style cleanup handler |
83 | installment. */ |
84 | struct _pthread_cleanup_buffer *cleanup; |
85 | |
86 | /* Cancellation type before the push call. */ |
87 | int canceltype; |
88 | } data; |
89 | } priv; |
90 | }; |
91 | |
92 | |
93 | /* Opcodes and data types for communication with the signal handler to |
94 | change user/group IDs. */ |
95 | struct xid_command |
96 | { |
97 | int syscall_no; |
98 | /* Enforce zero-extension for the pointer argument in |
99 | |
100 | int setgroups (size_t size, const gid_t *list); |
101 | |
102 | The kernel XID arguments are unsigned and do not require sign |
103 | extension. */ |
104 | unsigned long int id[3]; |
105 | volatile int cntr; |
106 | volatile int error; /* -1: no call yet, 0: success seen, >0: error seen. */ |
107 | }; |
108 | |
109 | |
110 | /* Data structure used by the kernel to find robust futexes. */ |
111 | struct robust_list_head |
112 | { |
113 | void *list; |
114 | long int futex_offset; |
115 | void *list_op_pending; |
116 | }; |
117 | |
118 | |
119 | /* Data strcture used to handle thread priority protection. */ |
120 | struct priority_protection_data |
121 | { |
122 | int priomax; |
123 | unsigned int priomap[]; |
124 | }; |
125 | |
126 | |
127 | /* Thread descriptor data structure. */ |
128 | struct pthread |
129 | { |
130 | union |
131 | { |
132 | #if !TLS_DTV_AT_TP |
133 | /* This overlaps the TCB as used for TLS without threads (see tls.h). */ |
134 | tcbhead_t ; |
135 | #else |
136 | struct |
137 | { |
138 | /* multiple_threads is enabled either when the process has spawned at |
139 | least one thread or when a single-threaded process cancels itself. |
140 | This enables additional code to introduce locking before doing some |
141 | compare_and_exchange operations and also enable cancellation points. |
142 | The concepts of multiple threads and cancellation points ideally |
143 | should be separate, since it is not necessary for multiple threads to |
144 | have been created for cancellation points to be enabled, as is the |
145 | case is when single-threaded process cancels itself. |
146 | |
147 | Since enabling multiple_threads enables additional code in |
148 | cancellation points and compare_and_exchange operations, there is a |
149 | potential for an unneeded performance hit when it is enabled in a |
150 | single-threaded, self-canceling process. This is OK though, since a |
151 | single-threaded process will enable async cancellation only when it |
152 | looks to cancel itself and is hence going to end anyway. */ |
153 | int multiple_threads; |
154 | int gscope_flag; |
155 | } header; |
156 | #endif |
157 | |
158 | /* This extra padding has no special purpose, and this structure layout |
159 | is private and subject to change without affecting the official ABI. |
160 | We just have it here in case it might be convenient for some |
161 | implementation-specific instrumentation hack or suchlike. */ |
162 | void *__padding[24]; |
163 | }; |
164 | |
165 | /* This descriptor's link on the GL (dl_stack_used) or |
166 | GL (dl_stack_user) list. */ |
167 | list_t list; |
168 | |
169 | /* Thread ID - which is also a 'is this thread descriptor (and |
170 | therefore stack) used' flag. */ |
171 | pid_t tid; |
172 | |
173 | /* Ununsed. */ |
174 | pid_t pid_ununsed; |
175 | |
176 | /* List of robust mutexes the thread is holding. */ |
177 | #if __PTHREAD_MUTEX_HAVE_PREV |
178 | void *robust_prev; |
179 | struct robust_list_head robust_head; |
180 | |
181 | /* The list above is strange. It is basically a double linked list |
182 | but the pointer to the next/previous element of the list points |
183 | in the middle of the object, the __next element. Whenever |
184 | casting to __pthread_list_t we need to adjust the pointer |
185 | first. |
186 | These operations are effectively concurrent code in that the thread |
187 | can get killed at any point in time and the kernel takes over. Thus, |
188 | the __next elements are a kind of concurrent list and we need to |
189 | enforce using compiler barriers that the individual operations happen |
190 | in such a way that the kernel always sees a consistent list. The |
191 | backward links (ie, the __prev elements) are not used by the kernel. |
192 | FIXME We should use relaxed MO atomic operations here and signal fences |
193 | because this kind of concurrency is similar to synchronizing with a |
194 | signal handler. */ |
195 | # define QUEUE_PTR_ADJUST (offsetof (__pthread_list_t, __next)) |
196 | |
197 | # define ENQUEUE_MUTEX_BOTH(mutex, val) \ |
198 | do { \ |
199 | __pthread_list_t *next = (__pthread_list_t *) \ |
200 | ((((uintptr_t) THREAD_GETMEM (THREAD_SELF, robust_head.list)) & ~1ul) \ |
201 | - QUEUE_PTR_ADJUST); \ |
202 | next->__prev = (void *) &mutex->__data.__list.__next; \ |
203 | mutex->__data.__list.__next = THREAD_GETMEM (THREAD_SELF, \ |
204 | robust_head.list); \ |
205 | mutex->__data.__list.__prev = (void *) &THREAD_SELF->robust_head; \ |
206 | /* Ensure that the new list entry is ready before we insert it. */ \ |
207 | __asm ("" ::: "memory"); \ |
208 | THREAD_SETMEM (THREAD_SELF, robust_head.list, \ |
209 | (void *) (((uintptr_t) &mutex->__data.__list.__next) \ |
210 | | val)); \ |
211 | } while (0) |
212 | # define DEQUEUE_MUTEX(mutex) \ |
213 | do { \ |
214 | __pthread_list_t *next = (__pthread_list_t *) \ |
215 | ((char *) (((uintptr_t) mutex->__data.__list.__next) & ~1ul) \ |
216 | - QUEUE_PTR_ADJUST); \ |
217 | next->__prev = mutex->__data.__list.__prev; \ |
218 | __pthread_list_t *prev = (__pthread_list_t *) \ |
219 | ((char *) (((uintptr_t) mutex->__data.__list.__prev) & ~1ul) \ |
220 | - QUEUE_PTR_ADJUST); \ |
221 | prev->__next = mutex->__data.__list.__next; \ |
222 | /* Ensure that we remove the entry from the list before we change the \ |
223 | __next pointer of the entry, which is read by the kernel. */ \ |
224 | __asm ("" ::: "memory"); \ |
225 | mutex->__data.__list.__prev = NULL; \ |
226 | mutex->__data.__list.__next = NULL; \ |
227 | } while (0) |
228 | #else |
229 | union |
230 | { |
231 | __pthread_slist_t robust_list; |
232 | struct robust_list_head robust_head; |
233 | }; |
234 | |
235 | # define ENQUEUE_MUTEX_BOTH(mutex, val) \ |
236 | do { \ |
237 | mutex->__data.__list.__next \ |
238 | = THREAD_GETMEM (THREAD_SELF, robust_list.__next); \ |
239 | /* Ensure that the new list entry is ready before we insert it. */ \ |
240 | __asm ("" ::: "memory"); \ |
241 | THREAD_SETMEM (THREAD_SELF, robust_list.__next, \ |
242 | (void *) (((uintptr_t) &mutex->__data.__list) | val)); \ |
243 | } while (0) |
244 | # define DEQUEUE_MUTEX(mutex) \ |
245 | do { \ |
246 | __pthread_slist_t *runp = (__pthread_slist_t *) \ |
247 | (((uintptr_t) THREAD_GETMEM (THREAD_SELF, robust_list.__next)) & ~1ul); \ |
248 | if (runp == &mutex->__data.__list) \ |
249 | THREAD_SETMEM (THREAD_SELF, robust_list.__next, runp->__next); \ |
250 | else \ |
251 | { \ |
252 | __pthread_slist_t *next = (__pthread_slist_t *) \ |
253 | (((uintptr_t) runp->__next) & ~1ul); \ |
254 | while (next != &mutex->__data.__list) \ |
255 | { \ |
256 | runp = next; \ |
257 | next = (__pthread_slist_t *) (((uintptr_t) runp->__next) & ~1ul); \ |
258 | } \ |
259 | \ |
260 | runp->__next = next->__next; \ |
261 | /* Ensure that we remove the entry from the list before we change the \ |
262 | __next pointer of the entry, which is read by the kernel. */ \ |
263 | __asm ("" ::: "memory"); \ |
264 | mutex->__data.__list.__next = NULL; \ |
265 | } \ |
266 | } while (0) |
267 | #endif |
268 | #define ENQUEUE_MUTEX(mutex) ENQUEUE_MUTEX_BOTH (mutex, 0) |
269 | #define ENQUEUE_MUTEX_PI(mutex) ENQUEUE_MUTEX_BOTH (mutex, 1) |
270 | |
271 | /* List of cleanup buffers. */ |
272 | struct _pthread_cleanup_buffer *cleanup; |
273 | |
274 | /* Unwind information. */ |
275 | struct pthread_unwind_buf *cleanup_jmp_buf; |
276 | #define HAVE_CLEANUP_JMP_BUF |
277 | |
278 | /* Flags determining processing of cancellation. */ |
279 | int cancelhandling; |
280 | /* Bit set if cancellation is disabled. */ |
281 | #define CANCELSTATE_BIT 0 |
282 | #define CANCELSTATE_BITMASK (0x01 << CANCELSTATE_BIT) |
283 | /* Bit set if asynchronous cancellation mode is selected. */ |
284 | #define CANCELTYPE_BIT 1 |
285 | #define CANCELTYPE_BITMASK (0x01 << CANCELTYPE_BIT) |
286 | /* Bit set if canceling has been initiated. */ |
287 | #define CANCELING_BIT 2 |
288 | #define CANCELING_BITMASK (0x01 << CANCELING_BIT) |
289 | /* Bit set if canceled. */ |
290 | #define CANCELED_BIT 3 |
291 | #define CANCELED_BITMASK (0x01 << CANCELED_BIT) |
292 | /* Bit set if thread is exiting. */ |
293 | #define EXITING_BIT 4 |
294 | #define EXITING_BITMASK (0x01 << EXITING_BIT) |
295 | /* Bit set if thread terminated and TCB is freed. */ |
296 | #define TERMINATED_BIT 5 |
297 | #define TERMINATED_BITMASK (0x01 << TERMINATED_BIT) |
298 | /* Bit set if thread is supposed to change XID. */ |
299 | #define SETXID_BIT 6 |
300 | #define SETXID_BITMASK (0x01 << SETXID_BIT) |
301 | /* Mask for the rest. Helps the compiler to optimize. */ |
302 | #define CANCEL_RESTMASK 0xffffff80 |
303 | |
304 | #define CANCEL_ENABLED_AND_CANCELED(value) \ |
305 | (((value) & (CANCELSTATE_BITMASK | CANCELED_BITMASK | EXITING_BITMASK \ |
306 | | CANCEL_RESTMASK | TERMINATED_BITMASK)) == CANCELED_BITMASK) |
307 | #define CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS(value) \ |
308 | (((value) & (CANCELSTATE_BITMASK | CANCELTYPE_BITMASK | CANCELED_BITMASK \ |
309 | | EXITING_BITMASK | CANCEL_RESTMASK | TERMINATED_BITMASK)) \ |
310 | == (CANCELTYPE_BITMASK | CANCELED_BITMASK)) |
311 | |
312 | /* Flags. Including those copied from the thread attribute. */ |
313 | int flags; |
314 | |
315 | /* We allocate one block of references here. This should be enough |
316 | to avoid allocating any memory dynamically for most applications. */ |
317 | struct pthread_key_data |
318 | { |
319 | /* Sequence number. We use uintptr_t to not require padding on |
320 | 32- and 64-bit machines. On 64-bit machines it helps to avoid |
321 | wrapping, too. */ |
322 | uintptr_t seq; |
323 | |
324 | /* Data pointer. */ |
325 | void *data; |
326 | } specific_1stblock[PTHREAD_KEY_2NDLEVEL_SIZE]; |
327 | |
328 | /* Two-level array for the thread-specific data. */ |
329 | struct pthread_key_data *specific[PTHREAD_KEY_1STLEVEL_SIZE]; |
330 | |
331 | /* Flag which is set when specific data is set. */ |
332 | bool specific_used; |
333 | |
334 | /* True if events must be reported. */ |
335 | bool report_events; |
336 | |
337 | /* True if the user provided the stack. */ |
338 | bool user_stack; |
339 | |
340 | /* True if thread must stop at startup time. */ |
341 | bool stopped_start; |
342 | |
343 | /* Formerly used for dealing with cancellation. */ |
344 | int parent_cancelhandling_unsed; |
345 | |
346 | /* Lock to synchronize access to the descriptor. */ |
347 | int lock; |
348 | |
349 | /* Lock for synchronizing setxid calls. */ |
350 | unsigned int setxid_futex; |
351 | |
352 | #if HP_TIMING_INLINE |
353 | hp_timing_t cpuclock_offset_ununsed; |
354 | #endif |
355 | |
356 | /* If the thread waits to join another one the ID of the latter is |
357 | stored here. |
358 | |
359 | In case a thread is detached this field contains a pointer of the |
360 | TCB if the thread itself. This is something which cannot happen |
361 | in normal operation. */ |
362 | struct pthread *joinid; |
363 | /* Check whether a thread is detached. */ |
364 | #define IS_DETACHED(pd) ((pd)->joinid == (pd)) |
365 | |
366 | /* The result of the thread function. */ |
367 | void *result; |
368 | |
369 | /* Scheduling parameters for the new thread. */ |
370 | struct sched_param schedparam; |
371 | int schedpolicy; |
372 | |
373 | /* Start position of the code to be executed and the argument passed |
374 | to the function. */ |
375 | void *(*start_routine) (void *); |
376 | void *arg; |
377 | |
378 | /* Debug state. */ |
379 | td_eventbuf_t eventbuf; |
380 | /* Next descriptor with a pending event. */ |
381 | struct pthread *nextevent; |
382 | |
383 | /* Machine-specific unwind info. */ |
384 | struct _Unwind_Exception exc; |
385 | |
386 | /* If nonzero, pointer to the area allocated for the stack and guard. */ |
387 | void *stackblock; |
388 | /* Size of the stackblock area including the guard. */ |
389 | size_t stackblock_size; |
390 | /* Size of the included guard area. */ |
391 | size_t guardsize; |
392 | /* This is what the user specified and what we will report. */ |
393 | size_t reported_guardsize; |
394 | |
395 | /* Thread Priority Protection data. */ |
396 | struct priority_protection_data *tpp; |
397 | |
398 | /* Resolver state. */ |
399 | struct __res_state res; |
400 | |
401 | /* Signal mask for the new thread. Used during thread startup to |
402 | restore the signal mask. (Threads are launched with all signals |
403 | masked.) */ |
404 | sigset_t sigmask; |
405 | |
406 | /* Indicates whether is a C11 thread created by thrd_creat. */ |
407 | bool c11; |
408 | |
409 | /* Used on strsignal. */ |
410 | struct tls_internal_t tls_state; |
411 | |
412 | /* This member must be last. */ |
413 | char end_padding[]; |
414 | |
415 | #define PTHREAD_STRUCT_END_PADDING \ |
416 | (sizeof (struct pthread) - offsetof (struct pthread, end_padding)) |
417 | } __attribute ((aligned (TCB_ALIGNMENT))); |
418 | |
419 | |
420 | #endif /* descr.h */ |
421 | |