1 | /* Copyright (C) 2002-2017 Free Software Foundation, Inc. |
2 | This file is part of the GNU C Library. |
3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <assert.h> |
20 | #include <errno.h> |
21 | #include <signal.h> |
22 | #include <stdint.h> |
23 | #include <string.h> |
24 | #include <unistd.h> |
25 | #include <sys/mman.h> |
26 | #include <sys/param.h> |
27 | #include <dl-sysdep.h> |
28 | #include <dl-tls.h> |
29 | #include <tls.h> |
30 | #include <list.h> |
31 | #include <lowlevellock.h> |
32 | #include <futex-internal.h> |
33 | #include <kernel-features.h> |
34 | #include <stack-aliasing.h> |
35 | |
36 | |
37 | #ifndef NEED_SEPARATE_REGISTER_STACK |
38 | |
39 | /* Most architectures have exactly one stack pointer. Some have more. */ |
40 | # define STACK_VARIABLES void *stackaddr = NULL |
41 | |
42 | /* How to pass the values to the 'create_thread' function. */ |
43 | # define STACK_VARIABLES_ARGS stackaddr |
44 | |
45 | /* How to declare function which gets there parameters. */ |
46 | # define STACK_VARIABLES_PARMS void *stackaddr |
47 | |
48 | /* How to declare allocate_stack. */ |
49 | # define ALLOCATE_STACK_PARMS void **stack |
50 | |
51 | /* This is how the function is called. We do it this way to allow |
52 | other variants of the function to have more parameters. */ |
53 | # define ALLOCATE_STACK(attr, pd) allocate_stack (attr, pd, &stackaddr) |
54 | |
55 | #else |
56 | |
57 | /* We need two stacks. The kernel will place them but we have to tell |
58 | the kernel about the size of the reserved address space. */ |
59 | # define STACK_VARIABLES void *stackaddr = NULL; size_t stacksize = 0 |
60 | |
61 | /* How to pass the values to the 'create_thread' function. */ |
62 | # define STACK_VARIABLES_ARGS stackaddr, stacksize |
63 | |
64 | /* How to declare function which gets there parameters. */ |
65 | # define STACK_VARIABLES_PARMS void *stackaddr, size_t stacksize |
66 | |
67 | /* How to declare allocate_stack. */ |
68 | # define ALLOCATE_STACK_PARMS void **stack, size_t *stacksize |
69 | |
70 | /* This is how the function is called. We do it this way to allow |
71 | other variants of the function to have more parameters. */ |
72 | # define ALLOCATE_STACK(attr, pd) \ |
73 | allocate_stack (attr, pd, &stackaddr, &stacksize) |
74 | |
75 | #endif |
76 | |
77 | |
78 | /* Default alignment of stack. */ |
79 | #ifndef STACK_ALIGN |
80 | # define STACK_ALIGN __alignof__ (long double) |
81 | #endif |
82 | |
83 | /* Default value for minimal stack size after allocating thread |
84 | descriptor and guard. */ |
85 | #ifndef MINIMAL_REST_STACK |
86 | # define MINIMAL_REST_STACK 4096 |
87 | #endif |
88 | |
89 | |
90 | /* Newer kernels have the MAP_STACK flag to indicate a mapping is used for |
91 | a stack. Use it when possible. */ |
92 | #ifndef MAP_STACK |
93 | # define MAP_STACK 0 |
94 | #endif |
95 | |
96 | /* This yields the pointer that TLS support code calls the thread pointer. */ |
97 | #if TLS_TCB_AT_TP |
98 | # define TLS_TPADJ(pd) (pd) |
99 | #elif TLS_DTV_AT_TP |
100 | # define TLS_TPADJ(pd) ((struct pthread *)((char *) (pd) + TLS_PRE_TCB_SIZE)) |
101 | #endif |
102 | |
103 | /* Cache handling for not-yet free stacks. */ |
104 | |
105 | /* Maximum size in kB of cache. */ |
106 | static size_t stack_cache_maxsize = 40 * 1024 * 1024; /* 40MiBi by default. */ |
107 | static size_t stack_cache_actsize; |
108 | |
109 | /* Mutex protecting this variable. */ |
110 | static int stack_cache_lock = LLL_LOCK_INITIALIZER; |
111 | |
112 | /* List of queued stack frames. */ |
113 | static LIST_HEAD (stack_cache); |
114 | |
115 | /* List of the stacks in use. */ |
116 | static LIST_HEAD (stack_used); |
117 | |
118 | /* We need to record what list operations we are going to do so that, |
119 | in case of an asynchronous interruption due to a fork() call, we |
120 | can correct for the work. */ |
121 | static uintptr_t in_flight_stack; |
122 | |
123 | /* List of the threads with user provided stacks in use. No need to |
124 | initialize this, since it's done in __pthread_initialize_minimal. */ |
125 | list_t __stack_user __attribute__ ((nocommon)); |
126 | hidden_data_def (__stack_user) |
127 | |
128 | #if COLORING_INCREMENT != 0 |
129 | /* Number of threads created. */ |
130 | static unsigned int nptl_ncreated; |
131 | #endif |
132 | |
133 | |
134 | /* Check whether the stack is still used or not. */ |
135 | #define FREE_P(descr) ((descr)->tid <= 0) |
136 | |
137 | |
138 | static void |
139 | stack_list_del (list_t *elem) |
140 | { |
141 | in_flight_stack = (uintptr_t) elem; |
142 | |
143 | atomic_write_barrier (); |
144 | |
145 | list_del (elem); |
146 | |
147 | atomic_write_barrier (); |
148 | |
149 | in_flight_stack = 0; |
150 | } |
151 | |
152 | |
153 | static void |
154 | stack_list_add (list_t *elem, list_t *list) |
155 | { |
156 | in_flight_stack = (uintptr_t) elem | 1; |
157 | |
158 | atomic_write_barrier (); |
159 | |
160 | list_add (elem, list); |
161 | |
162 | atomic_write_barrier (); |
163 | |
164 | in_flight_stack = 0; |
165 | } |
166 | |
167 | |
168 | /* We create a double linked list of all cache entries. Double linked |
169 | because this allows removing entries from the end. */ |
170 | |
171 | |
172 | /* Get a stack frame from the cache. We have to match by size since |
173 | some blocks might be too small or far too large. */ |
174 | static struct pthread * |
175 | get_cached_stack (size_t *sizep, void **memp) |
176 | { |
177 | size_t size = *sizep; |
178 | struct pthread *result = NULL; |
179 | list_t *entry; |
180 | |
181 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
182 | |
183 | /* Search the cache for a matching entry. We search for the |
184 | smallest stack which has at least the required size. Note that |
185 | in normal situations the size of all allocated stacks is the |
186 | same. As the very least there are only a few different sizes. |
187 | Therefore this loop will exit early most of the time with an |
188 | exact match. */ |
189 | list_for_each (entry, &stack_cache) |
190 | { |
191 | struct pthread *curr; |
192 | |
193 | curr = list_entry (entry, struct pthread, list); |
194 | if (FREE_P (curr) && curr->stackblock_size >= size) |
195 | { |
196 | if (curr->stackblock_size == size) |
197 | { |
198 | result = curr; |
199 | break; |
200 | } |
201 | |
202 | if (result == NULL |
203 | || result->stackblock_size > curr->stackblock_size) |
204 | result = curr; |
205 | } |
206 | } |
207 | |
208 | if (__builtin_expect (result == NULL, 0) |
209 | /* Make sure the size difference is not too excessive. In that |
210 | case we do not use the block. */ |
211 | || __builtin_expect (result->stackblock_size > 4 * size, 0)) |
212 | { |
213 | /* Release the lock. */ |
214 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
215 | |
216 | return NULL; |
217 | } |
218 | |
219 | /* Don't allow setxid until cloned. */ |
220 | result->setxid_futex = -1; |
221 | |
222 | /* Dequeue the entry. */ |
223 | stack_list_del (&result->list); |
224 | |
225 | /* And add to the list of stacks in use. */ |
226 | stack_list_add (&result->list, &stack_used); |
227 | |
228 | /* And decrease the cache size. */ |
229 | stack_cache_actsize -= result->stackblock_size; |
230 | |
231 | /* Release the lock early. */ |
232 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
233 | |
234 | /* Report size and location of the stack to the caller. */ |
235 | *sizep = result->stackblock_size; |
236 | *memp = result->stackblock; |
237 | |
238 | /* Cancellation handling is back to the default. */ |
239 | result->cancelhandling = 0; |
240 | result->cleanup = NULL; |
241 | |
242 | /* No pending event. */ |
243 | result->nextevent = NULL; |
244 | |
245 | /* Clear the DTV. */ |
246 | dtv_t *dtv = GET_DTV (TLS_TPADJ (result)); |
247 | for (size_t cnt = 0; cnt < dtv[-1].counter; ++cnt) |
248 | free (dtv[1 + cnt].pointer.to_free); |
249 | memset (dtv, '\0', (dtv[-1].counter + 1) * sizeof (dtv_t)); |
250 | |
251 | /* Re-initialize the TLS. */ |
252 | _dl_allocate_tls_init (TLS_TPADJ (result)); |
253 | |
254 | return result; |
255 | } |
256 | |
257 | |
258 | /* Free stacks until cache size is lower than LIMIT. */ |
259 | void |
260 | __free_stacks (size_t limit) |
261 | { |
262 | /* We reduce the size of the cache. Remove the last entries until |
263 | the size is below the limit. */ |
264 | list_t *entry; |
265 | list_t *prev; |
266 | |
267 | /* Search from the end of the list. */ |
268 | list_for_each_prev_safe (entry, prev, &stack_cache) |
269 | { |
270 | struct pthread *curr; |
271 | |
272 | curr = list_entry (entry, struct pthread, list); |
273 | if (FREE_P (curr)) |
274 | { |
275 | /* Unlink the block. */ |
276 | stack_list_del (entry); |
277 | |
278 | /* Account for the freed memory. */ |
279 | stack_cache_actsize -= curr->stackblock_size; |
280 | |
281 | /* Free the memory associated with the ELF TLS. */ |
282 | _dl_deallocate_tls (TLS_TPADJ (curr), false); |
283 | |
284 | /* Remove this block. This should never fail. If it does |
285 | something is really wrong. */ |
286 | if (munmap (curr->stackblock, curr->stackblock_size) != 0) |
287 | abort (); |
288 | |
289 | /* Maybe we have freed enough. */ |
290 | if (stack_cache_actsize <= limit) |
291 | break; |
292 | } |
293 | } |
294 | } |
295 | |
296 | |
297 | /* Add a stack frame which is not used anymore to the stack. Must be |
298 | called with the cache lock held. */ |
299 | static inline void |
300 | __attribute ((always_inline)) |
301 | queue_stack (struct pthread *stack) |
302 | { |
303 | /* We unconditionally add the stack to the list. The memory may |
304 | still be in use but it will not be reused until the kernel marks |
305 | the stack as not used anymore. */ |
306 | stack_list_add (&stack->list, &stack_cache); |
307 | |
308 | stack_cache_actsize += stack->stackblock_size; |
309 | if (__glibc_unlikely (stack_cache_actsize > stack_cache_maxsize)) |
310 | __free_stacks (stack_cache_maxsize); |
311 | } |
312 | |
313 | |
314 | static int |
315 | internal_function |
316 | change_stack_perm (struct pthread *pd |
317 | #ifdef NEED_SEPARATE_REGISTER_STACK |
318 | , size_t pagemask |
319 | #endif |
320 | ) |
321 | { |
322 | #ifdef NEED_SEPARATE_REGISTER_STACK |
323 | void *stack = (pd->stackblock |
324 | + (((((pd->stackblock_size - pd->guardsize) / 2) |
325 | & pagemask) + pd->guardsize) & pagemask)); |
326 | size_t len = pd->stackblock + pd->stackblock_size - stack; |
327 | #elif _STACK_GROWS_DOWN |
328 | void *stack = pd->stackblock + pd->guardsize; |
329 | size_t len = pd->stackblock_size - pd->guardsize; |
330 | #elif _STACK_GROWS_UP |
331 | void *stack = pd->stackblock; |
332 | size_t len = (uintptr_t) pd - pd->guardsize - (uintptr_t) pd->stackblock; |
333 | #else |
334 | # error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP" |
335 | #endif |
336 | if (mprotect (stack, len, PROT_READ | PROT_WRITE | PROT_EXEC) != 0) |
337 | return errno; |
338 | |
339 | return 0; |
340 | } |
341 | |
342 | |
343 | /* Returns a usable stack for a new thread either by allocating a |
344 | new stack or reusing a cached stack of sufficient size. |
345 | ATTR must be non-NULL and point to a valid pthread_attr. |
346 | PDP must be non-NULL. */ |
347 | static int |
348 | allocate_stack (const struct pthread_attr *attr, struct pthread **pdp, |
349 | ALLOCATE_STACK_PARMS) |
350 | { |
351 | struct pthread *pd; |
352 | size_t size; |
353 | size_t pagesize_m1 = __getpagesize () - 1; |
354 | |
355 | assert (powerof2 (pagesize_m1 + 1)); |
356 | assert (TCB_ALIGNMENT >= STACK_ALIGN); |
357 | |
358 | /* Get the stack size from the attribute if it is set. Otherwise we |
359 | use the default we determined at start time. */ |
360 | if (attr->stacksize != 0) |
361 | size = attr->stacksize; |
362 | else |
363 | { |
364 | lll_lock (__default_pthread_attr_lock, LLL_PRIVATE); |
365 | size = __default_pthread_attr.stacksize; |
366 | lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE); |
367 | } |
368 | |
369 | /* Get memory for the stack. */ |
370 | if (__glibc_unlikely (attr->flags & ATTR_FLAG_STACKADDR)) |
371 | { |
372 | uintptr_t adj; |
373 | char *stackaddr = (char *) attr->stackaddr; |
374 | |
375 | /* Assume the same layout as the _STACK_GROWS_DOWN case, with struct |
376 | pthread at the top of the stack block. Later we adjust the guard |
377 | location and stack address to match the _STACK_GROWS_UP case. */ |
378 | if (_STACK_GROWS_UP) |
379 | stackaddr += attr->stacksize; |
380 | |
381 | /* If the user also specified the size of the stack make sure it |
382 | is large enough. */ |
383 | if (attr->stacksize != 0 |
384 | && attr->stacksize < (__static_tls_size + MINIMAL_REST_STACK)) |
385 | return EINVAL; |
386 | |
387 | /* Adjust stack size for alignment of the TLS block. */ |
388 | #if TLS_TCB_AT_TP |
389 | adj = ((uintptr_t) stackaddr - TLS_TCB_SIZE) |
390 | & __static_tls_align_m1; |
391 | assert (size > adj + TLS_TCB_SIZE); |
392 | #elif TLS_DTV_AT_TP |
393 | adj = ((uintptr_t) stackaddr - __static_tls_size) |
394 | & __static_tls_align_m1; |
395 | assert (size > adj); |
396 | #endif |
397 | |
398 | /* The user provided some memory. Let's hope it matches the |
399 | size... We do not allocate guard pages if the user provided |
400 | the stack. It is the user's responsibility to do this if it |
401 | is wanted. */ |
402 | #if TLS_TCB_AT_TP |
403 | pd = (struct pthread *) ((uintptr_t) stackaddr |
404 | - TLS_TCB_SIZE - adj); |
405 | #elif TLS_DTV_AT_TP |
406 | pd = (struct pthread *) (((uintptr_t) stackaddr |
407 | - __static_tls_size - adj) |
408 | - TLS_PRE_TCB_SIZE); |
409 | #endif |
410 | |
411 | /* The user provided stack memory needs to be cleared. */ |
412 | memset (pd, '\0', sizeof (struct pthread)); |
413 | |
414 | /* The first TSD block is included in the TCB. */ |
415 | pd->specific[0] = pd->specific_1stblock; |
416 | |
417 | /* Remember the stack-related values. */ |
418 | pd->stackblock = (char *) stackaddr - size; |
419 | pd->stackblock_size = size; |
420 | |
421 | /* This is a user-provided stack. It will not be queued in the |
422 | stack cache nor will the memory (except the TLS memory) be freed. */ |
423 | pd->user_stack = true; |
424 | |
425 | /* This is at least the second thread. */ |
426 | pd->header.multiple_threads = 1; |
427 | #ifndef TLS_MULTIPLE_THREADS_IN_TCB |
428 | __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1; |
429 | #endif |
430 | |
431 | #ifndef __ASSUME_PRIVATE_FUTEX |
432 | /* The thread must know when private futexes are supported. */ |
433 | pd->header.private_futex = THREAD_GETMEM (THREAD_SELF, |
434 | header.private_futex); |
435 | #endif |
436 | |
437 | #ifdef NEED_DL_SYSINFO |
438 | SETUP_THREAD_SYSINFO (pd); |
439 | #endif |
440 | |
441 | /* Don't allow setxid until cloned. */ |
442 | pd->setxid_futex = -1; |
443 | |
444 | /* Allocate the DTV for this thread. */ |
445 | if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL) |
446 | { |
447 | /* Something went wrong. */ |
448 | assert (errno == ENOMEM); |
449 | return errno; |
450 | } |
451 | |
452 | |
453 | /* Prepare to modify global data. */ |
454 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
455 | |
456 | /* And add to the list of stacks in use. */ |
457 | list_add (&pd->list, &__stack_user); |
458 | |
459 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
460 | } |
461 | else |
462 | { |
463 | /* Allocate some anonymous memory. If possible use the cache. */ |
464 | size_t guardsize; |
465 | size_t reqsize; |
466 | void *mem; |
467 | const int prot = (PROT_READ | PROT_WRITE |
468 | | ((GL(dl_stack_flags) & PF_X) ? PROT_EXEC : 0)); |
469 | |
470 | #if COLORING_INCREMENT != 0 |
471 | /* Add one more page for stack coloring. Don't do it for stacks |
472 | with 16 times pagesize or larger. This might just cause |
473 | unnecessary misalignment. */ |
474 | if (size <= 16 * pagesize_m1) |
475 | size += pagesize_m1 + 1; |
476 | #endif |
477 | |
478 | /* Adjust the stack size for alignment. */ |
479 | size &= ~__static_tls_align_m1; |
480 | assert (size != 0); |
481 | |
482 | /* Make sure the size of the stack is enough for the guard and |
483 | eventually the thread descriptor. */ |
484 | guardsize = (attr->guardsize + pagesize_m1) & ~pagesize_m1; |
485 | if (__builtin_expect (size < ((guardsize + __static_tls_size |
486 | + MINIMAL_REST_STACK + pagesize_m1) |
487 | & ~pagesize_m1), |
488 | 0)) |
489 | /* The stack is too small (or the guard too large). */ |
490 | return EINVAL; |
491 | |
492 | /* Try to get a stack from the cache. */ |
493 | reqsize = size; |
494 | pd = get_cached_stack (&size, &mem); |
495 | if (pd == NULL) |
496 | { |
497 | /* To avoid aliasing effects on a larger scale than pages we |
498 | adjust the allocated stack size if necessary. This way |
499 | allocations directly following each other will not have |
500 | aliasing problems. */ |
501 | #if MULTI_PAGE_ALIASING != 0 |
502 | if ((size % MULTI_PAGE_ALIASING) == 0) |
503 | size += pagesize_m1 + 1; |
504 | #endif |
505 | |
506 | mem = mmap (NULL, size, prot, |
507 | MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0); |
508 | |
509 | if (__glibc_unlikely (mem == MAP_FAILED)) |
510 | return errno; |
511 | |
512 | /* SIZE is guaranteed to be greater than zero. |
513 | So we can never get a null pointer back from mmap. */ |
514 | assert (mem != NULL); |
515 | |
516 | #if COLORING_INCREMENT != 0 |
517 | /* Atomically increment NCREATED. */ |
518 | unsigned int ncreated = atomic_increment_val (&nptl_ncreated); |
519 | |
520 | /* We chose the offset for coloring by incrementing it for |
521 | every new thread by a fixed amount. The offset used |
522 | module the page size. Even if coloring would be better |
523 | relative to higher alignment values it makes no sense to |
524 | do it since the mmap() interface does not allow us to |
525 | specify any alignment for the returned memory block. */ |
526 | size_t coloring = (ncreated * COLORING_INCREMENT) & pagesize_m1; |
527 | |
528 | /* Make sure the coloring offsets does not disturb the alignment |
529 | of the TCB and static TLS block. */ |
530 | if (__glibc_unlikely ((coloring & __static_tls_align_m1) != 0)) |
531 | coloring = (((coloring + __static_tls_align_m1) |
532 | & ~(__static_tls_align_m1)) |
533 | & ~pagesize_m1); |
534 | #else |
535 | /* Unless specified we do not make any adjustments. */ |
536 | # define coloring 0 |
537 | #endif |
538 | |
539 | /* Place the thread descriptor at the end of the stack. */ |
540 | #if TLS_TCB_AT_TP |
541 | pd = (struct pthread *) ((char *) mem + size - coloring) - 1; |
542 | #elif TLS_DTV_AT_TP |
543 | pd = (struct pthread *) ((((uintptr_t) mem + size - coloring |
544 | - __static_tls_size) |
545 | & ~__static_tls_align_m1) |
546 | - TLS_PRE_TCB_SIZE); |
547 | #endif |
548 | |
549 | /* Remember the stack-related values. */ |
550 | pd->stackblock = mem; |
551 | pd->stackblock_size = size; |
552 | |
553 | /* We allocated the first block thread-specific data array. |
554 | This address will not change for the lifetime of this |
555 | descriptor. */ |
556 | pd->specific[0] = pd->specific_1stblock; |
557 | |
558 | /* This is at least the second thread. */ |
559 | pd->header.multiple_threads = 1; |
560 | #ifndef TLS_MULTIPLE_THREADS_IN_TCB |
561 | __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1; |
562 | #endif |
563 | |
564 | #ifndef __ASSUME_PRIVATE_FUTEX |
565 | /* The thread must know when private futexes are supported. */ |
566 | pd->header.private_futex = THREAD_GETMEM (THREAD_SELF, |
567 | header.private_futex); |
568 | #endif |
569 | |
570 | #ifdef NEED_DL_SYSINFO |
571 | SETUP_THREAD_SYSINFO (pd); |
572 | #endif |
573 | |
574 | /* Don't allow setxid until cloned. */ |
575 | pd->setxid_futex = -1; |
576 | |
577 | /* Allocate the DTV for this thread. */ |
578 | if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL) |
579 | { |
580 | /* Something went wrong. */ |
581 | assert (errno == ENOMEM); |
582 | |
583 | /* Free the stack memory we just allocated. */ |
584 | (void) munmap (mem, size); |
585 | |
586 | return errno; |
587 | } |
588 | |
589 | |
590 | /* Prepare to modify global data. */ |
591 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
592 | |
593 | /* And add to the list of stacks in use. */ |
594 | stack_list_add (&pd->list, &stack_used); |
595 | |
596 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
597 | |
598 | |
599 | /* There might have been a race. Another thread might have |
600 | caused the stacks to get exec permission while this new |
601 | stack was prepared. Detect if this was possible and |
602 | change the permission if necessary. */ |
603 | if (__builtin_expect ((GL(dl_stack_flags) & PF_X) != 0 |
604 | && (prot & PROT_EXEC) == 0, 0)) |
605 | { |
606 | int err = change_stack_perm (pd |
607 | #ifdef NEED_SEPARATE_REGISTER_STACK |
608 | , ~pagesize_m1 |
609 | #endif |
610 | ); |
611 | if (err != 0) |
612 | { |
613 | /* Free the stack memory we just allocated. */ |
614 | (void) munmap (mem, size); |
615 | |
616 | return err; |
617 | } |
618 | } |
619 | |
620 | |
621 | /* Note that all of the stack and the thread descriptor is |
622 | zeroed. This means we do not have to initialize fields |
623 | with initial value zero. This is specifically true for |
624 | the 'tid' field which is always set back to zero once the |
625 | stack is not used anymore and for the 'guardsize' field |
626 | which will be read next. */ |
627 | } |
628 | |
629 | /* Create or resize the guard area if necessary. */ |
630 | if (__glibc_unlikely (guardsize > pd->guardsize)) |
631 | { |
632 | #ifdef NEED_SEPARATE_REGISTER_STACK |
633 | char *guard = mem + (((size - guardsize) / 2) & ~pagesize_m1); |
634 | #elif _STACK_GROWS_DOWN |
635 | char *guard = mem; |
636 | #elif _STACK_GROWS_UP |
637 | char *guard = (char *) (((uintptr_t) pd - guardsize) & ~pagesize_m1); |
638 | #endif |
639 | if (mprotect (guard, guardsize, PROT_NONE) != 0) |
640 | { |
641 | mprot_error: |
642 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
643 | |
644 | /* Remove the thread from the list. */ |
645 | stack_list_del (&pd->list); |
646 | |
647 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
648 | |
649 | /* Get rid of the TLS block we allocated. */ |
650 | _dl_deallocate_tls (TLS_TPADJ (pd), false); |
651 | |
652 | /* Free the stack memory regardless of whether the size |
653 | of the cache is over the limit or not. If this piece |
654 | of memory caused problems we better do not use it |
655 | anymore. Uh, and we ignore possible errors. There |
656 | is nothing we could do. */ |
657 | (void) munmap (mem, size); |
658 | |
659 | return errno; |
660 | } |
661 | |
662 | pd->guardsize = guardsize; |
663 | } |
664 | else if (__builtin_expect (pd->guardsize - guardsize > size - reqsize, |
665 | 0)) |
666 | { |
667 | /* The old guard area is too large. */ |
668 | |
669 | #ifdef NEED_SEPARATE_REGISTER_STACK |
670 | char *guard = mem + (((size - guardsize) / 2) & ~pagesize_m1); |
671 | char *oldguard = mem + (((size - pd->guardsize) / 2) & ~pagesize_m1); |
672 | |
673 | if (oldguard < guard |
674 | && mprotect (oldguard, guard - oldguard, prot) != 0) |
675 | goto mprot_error; |
676 | |
677 | if (mprotect (guard + guardsize, |
678 | oldguard + pd->guardsize - guard - guardsize, |
679 | prot) != 0) |
680 | goto mprot_error; |
681 | #elif _STACK_GROWS_DOWN |
682 | if (mprotect ((char *) mem + guardsize, pd->guardsize - guardsize, |
683 | prot) != 0) |
684 | goto mprot_error; |
685 | #elif _STACK_GROWS_UP |
686 | if (mprotect ((char *) pd - pd->guardsize, |
687 | pd->guardsize - guardsize, prot) != 0) |
688 | goto mprot_error; |
689 | #endif |
690 | |
691 | pd->guardsize = guardsize; |
692 | } |
693 | /* The pthread_getattr_np() calls need to get passed the size |
694 | requested in the attribute, regardless of how large the |
695 | actually used guardsize is. */ |
696 | pd->reported_guardsize = guardsize; |
697 | } |
698 | |
699 | /* Initialize the lock. We have to do this unconditionally since the |
700 | stillborn thread could be canceled while the lock is taken. */ |
701 | pd->lock = LLL_LOCK_INITIALIZER; |
702 | |
703 | /* The robust mutex lists also need to be initialized |
704 | unconditionally because the cleanup for the previous stack owner |
705 | might have happened in the kernel. */ |
706 | pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock) |
707 | - offsetof (pthread_mutex_t, |
708 | __data.__list.__next)); |
709 | pd->robust_head.list_op_pending = NULL; |
710 | #ifdef __PTHREAD_MUTEX_HAVE_PREV |
711 | pd->robust_prev = &pd->robust_head; |
712 | #endif |
713 | pd->robust_head.list = &pd->robust_head; |
714 | |
715 | /* We place the thread descriptor at the end of the stack. */ |
716 | *pdp = pd; |
717 | |
718 | #if _STACK_GROWS_DOWN |
719 | void *stacktop; |
720 | |
721 | # if TLS_TCB_AT_TP |
722 | /* The stack begins before the TCB and the static TLS block. */ |
723 | stacktop = ((char *) (pd + 1) - __static_tls_size); |
724 | # elif TLS_DTV_AT_TP |
725 | stacktop = (char *) (pd - 1); |
726 | # endif |
727 | |
728 | # ifdef NEED_SEPARATE_REGISTER_STACK |
729 | *stack = pd->stackblock; |
730 | *stacksize = stacktop - *stack; |
731 | # else |
732 | *stack = stacktop; |
733 | # endif |
734 | #else |
735 | *stack = pd->stackblock; |
736 | #endif |
737 | |
738 | return 0; |
739 | } |
740 | |
741 | |
742 | void |
743 | internal_function |
744 | __deallocate_stack (struct pthread *pd) |
745 | { |
746 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
747 | |
748 | /* Remove the thread from the list of threads with user defined |
749 | stacks. */ |
750 | stack_list_del (&pd->list); |
751 | |
752 | /* Not much to do. Just free the mmap()ed memory. Note that we do |
753 | not reset the 'used' flag in the 'tid' field. This is done by |
754 | the kernel. If no thread has been created yet this field is |
755 | still zero. */ |
756 | if (__glibc_likely (! pd->user_stack)) |
757 | (void) queue_stack (pd); |
758 | else |
759 | /* Free the memory associated with the ELF TLS. */ |
760 | _dl_deallocate_tls (TLS_TPADJ (pd), false); |
761 | |
762 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
763 | } |
764 | |
765 | |
766 | int |
767 | internal_function |
768 | __make_stacks_executable (void **stack_endp) |
769 | { |
770 | /* First the main thread's stack. */ |
771 | int err = _dl_make_stack_executable (stack_endp); |
772 | if (err != 0) |
773 | return err; |
774 | |
775 | #ifdef NEED_SEPARATE_REGISTER_STACK |
776 | const size_t pagemask = ~(__getpagesize () - 1); |
777 | #endif |
778 | |
779 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
780 | |
781 | list_t *runp; |
782 | list_for_each (runp, &stack_used) |
783 | { |
784 | err = change_stack_perm (list_entry (runp, struct pthread, list) |
785 | #ifdef NEED_SEPARATE_REGISTER_STACK |
786 | , pagemask |
787 | #endif |
788 | ); |
789 | if (err != 0) |
790 | break; |
791 | } |
792 | |
793 | /* Also change the permission for the currently unused stacks. This |
794 | might be wasted time but better spend it here than adding a check |
795 | in the fast path. */ |
796 | if (err == 0) |
797 | list_for_each (runp, &stack_cache) |
798 | { |
799 | err = change_stack_perm (list_entry (runp, struct pthread, list) |
800 | #ifdef NEED_SEPARATE_REGISTER_STACK |
801 | , pagemask |
802 | #endif |
803 | ); |
804 | if (err != 0) |
805 | break; |
806 | } |
807 | |
808 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
809 | |
810 | return err; |
811 | } |
812 | |
813 | |
814 | /* In case of a fork() call the memory allocation in the child will be |
815 | the same but only one thread is running. All stacks except that of |
816 | the one running thread are not used anymore. We have to recycle |
817 | them. */ |
818 | void |
819 | __reclaim_stacks (void) |
820 | { |
821 | struct pthread *self = (struct pthread *) THREAD_SELF; |
822 | |
823 | /* No locking necessary. The caller is the only stack in use. But |
824 | we have to be aware that we might have interrupted a list |
825 | operation. */ |
826 | |
827 | if (in_flight_stack != 0) |
828 | { |
829 | bool add_p = in_flight_stack & 1; |
830 | list_t *elem = (list_t *) (in_flight_stack & ~(uintptr_t) 1); |
831 | |
832 | if (add_p) |
833 | { |
834 | /* We always add at the beginning of the list. So in this case we |
835 | only need to check the beginning of these lists to see if the |
836 | pointers at the head of the list are inconsistent. */ |
837 | list_t *l = NULL; |
838 | |
839 | if (stack_used.next->prev != &stack_used) |
840 | l = &stack_used; |
841 | else if (stack_cache.next->prev != &stack_cache) |
842 | l = &stack_cache; |
843 | |
844 | if (l != NULL) |
845 | { |
846 | assert (l->next->prev == elem); |
847 | elem->next = l->next; |
848 | elem->prev = l; |
849 | l->next = elem; |
850 | } |
851 | } |
852 | else |
853 | { |
854 | /* We can simply always replay the delete operation. */ |
855 | elem->next->prev = elem->prev; |
856 | elem->prev->next = elem->next; |
857 | } |
858 | } |
859 | |
860 | /* Mark all stacks except the still running one as free. */ |
861 | list_t *runp; |
862 | list_for_each (runp, &stack_used) |
863 | { |
864 | struct pthread *curp = list_entry (runp, struct pthread, list); |
865 | if (curp != self) |
866 | { |
867 | /* This marks the stack as free. */ |
868 | curp->tid = 0; |
869 | |
870 | /* Account for the size of the stack. */ |
871 | stack_cache_actsize += curp->stackblock_size; |
872 | |
873 | if (curp->specific_used) |
874 | { |
875 | /* Clear the thread-specific data. */ |
876 | memset (curp->specific_1stblock, '\0', |
877 | sizeof (curp->specific_1stblock)); |
878 | |
879 | curp->specific_used = false; |
880 | |
881 | for (size_t cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt) |
882 | if (curp->specific[cnt] != NULL) |
883 | { |
884 | memset (curp->specific[cnt], '\0', |
885 | sizeof (curp->specific_1stblock)); |
886 | |
887 | /* We have allocated the block which we do not |
888 | free here so re-set the bit. */ |
889 | curp->specific_used = true; |
890 | } |
891 | } |
892 | } |
893 | } |
894 | |
895 | /* Add the stack of all running threads to the cache. */ |
896 | list_splice (&stack_used, &stack_cache); |
897 | |
898 | /* Remove the entry for the current thread to from the cache list |
899 | and add it to the list of running threads. Which of the two |
900 | lists is decided by the user_stack flag. */ |
901 | stack_list_del (&self->list); |
902 | |
903 | /* Re-initialize the lists for all the threads. */ |
904 | INIT_LIST_HEAD (&stack_used); |
905 | INIT_LIST_HEAD (&__stack_user); |
906 | |
907 | if (__glibc_unlikely (THREAD_GETMEM (self, user_stack))) |
908 | list_add (&self->list, &__stack_user); |
909 | else |
910 | list_add (&self->list, &stack_used); |
911 | |
912 | /* There is one thread running. */ |
913 | __nptl_nthreads = 1; |
914 | |
915 | in_flight_stack = 0; |
916 | |
917 | /* Initialize locks. */ |
918 | stack_cache_lock = LLL_LOCK_INITIALIZER; |
919 | __default_pthread_attr_lock = LLL_LOCK_INITIALIZER; |
920 | } |
921 | |
922 | |
923 | #if HP_TIMING_AVAIL |
924 | # undef __find_thread_by_id |
925 | /* Find a thread given the thread ID. */ |
926 | attribute_hidden |
927 | struct pthread * |
928 | __find_thread_by_id (pid_t tid) |
929 | { |
930 | struct pthread *result = NULL; |
931 | |
932 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
933 | |
934 | /* Iterate over the list with system-allocated threads first. */ |
935 | list_t *runp; |
936 | list_for_each (runp, &stack_used) |
937 | { |
938 | struct pthread *curp; |
939 | |
940 | curp = list_entry (runp, struct pthread, list); |
941 | |
942 | if (curp->tid == tid) |
943 | { |
944 | result = curp; |
945 | goto out; |
946 | } |
947 | } |
948 | |
949 | /* Now the list with threads using user-allocated stacks. */ |
950 | list_for_each (runp, &__stack_user) |
951 | { |
952 | struct pthread *curp; |
953 | |
954 | curp = list_entry (runp, struct pthread, list); |
955 | |
956 | if (curp->tid == tid) |
957 | { |
958 | result = curp; |
959 | goto out; |
960 | } |
961 | } |
962 | |
963 | out: |
964 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
965 | |
966 | return result; |
967 | } |
968 | #endif |
969 | |
970 | |
971 | #ifdef SIGSETXID |
972 | static void |
973 | internal_function |
974 | setxid_mark_thread (struct xid_command *cmdp, struct pthread *t) |
975 | { |
976 | int ch; |
977 | |
978 | /* Wait until this thread is cloned. */ |
979 | if (t->setxid_futex == -1 |
980 | && ! atomic_compare_and_exchange_bool_acq (&t->setxid_futex, -2, -1)) |
981 | do |
982 | futex_wait_simple (&t->setxid_futex, -2, FUTEX_PRIVATE); |
983 | while (t->setxid_futex == -2); |
984 | |
985 | /* Don't let the thread exit before the setxid handler runs. */ |
986 | t->setxid_futex = 0; |
987 | |
988 | do |
989 | { |
990 | ch = t->cancelhandling; |
991 | |
992 | /* If the thread is exiting right now, ignore it. */ |
993 | if ((ch & EXITING_BITMASK) != 0) |
994 | { |
995 | /* Release the futex if there is no other setxid in |
996 | progress. */ |
997 | if ((ch & SETXID_BITMASK) == 0) |
998 | { |
999 | t->setxid_futex = 1; |
1000 | futex_wake (&t->setxid_futex, 1, FUTEX_PRIVATE); |
1001 | } |
1002 | return; |
1003 | } |
1004 | } |
1005 | while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling, |
1006 | ch | SETXID_BITMASK, ch)); |
1007 | } |
1008 | |
1009 | |
1010 | static void |
1011 | internal_function |
1012 | setxid_unmark_thread (struct xid_command *cmdp, struct pthread *t) |
1013 | { |
1014 | int ch; |
1015 | |
1016 | do |
1017 | { |
1018 | ch = t->cancelhandling; |
1019 | if ((ch & SETXID_BITMASK) == 0) |
1020 | return; |
1021 | } |
1022 | while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling, |
1023 | ch & ~SETXID_BITMASK, ch)); |
1024 | |
1025 | /* Release the futex just in case. */ |
1026 | t->setxid_futex = 1; |
1027 | futex_wake (&t->setxid_futex, 1, FUTEX_PRIVATE); |
1028 | } |
1029 | |
1030 | |
1031 | static int |
1032 | internal_function |
1033 | setxid_signal_thread (struct xid_command *cmdp, struct pthread *t) |
1034 | { |
1035 | if ((t->cancelhandling & SETXID_BITMASK) == 0) |
1036 | return 0; |
1037 | |
1038 | int val; |
1039 | pid_t pid = __getpid (); |
1040 | INTERNAL_SYSCALL_DECL (err); |
1041 | val = INTERNAL_SYSCALL_CALL (tgkill, err, pid, t->tid, SIGSETXID); |
1042 | |
1043 | /* If this failed, it must have had not started yet or else exited. */ |
1044 | if (!INTERNAL_SYSCALL_ERROR_P (val, err)) |
1045 | { |
1046 | atomic_increment (&cmdp->cntr); |
1047 | return 1; |
1048 | } |
1049 | else |
1050 | return 0; |
1051 | } |
1052 | |
1053 | /* Check for consistency across set*id system call results. The abort |
1054 | should not happen as long as all privileges changes happen through |
1055 | the glibc wrappers. ERROR must be 0 (no error) or an errno |
1056 | code. */ |
1057 | void |
1058 | attribute_hidden |
1059 | __nptl_setxid_error (struct xid_command *cmdp, int error) |
1060 | { |
1061 | do |
1062 | { |
1063 | int olderror = cmdp->error; |
1064 | if (olderror == error) |
1065 | break; |
1066 | if (olderror != -1) |
1067 | /* Mismatch between current and previous results. */ |
1068 | abort (); |
1069 | } |
1070 | while (atomic_compare_and_exchange_bool_acq (&cmdp->error, error, -1)); |
1071 | } |
1072 | |
1073 | int |
1074 | attribute_hidden |
1075 | __nptl_setxid (struct xid_command *cmdp) |
1076 | { |
1077 | int signalled; |
1078 | int result; |
1079 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
1080 | |
1081 | __xidcmd = cmdp; |
1082 | cmdp->cntr = 0; |
1083 | cmdp->error = -1; |
1084 | |
1085 | struct pthread *self = THREAD_SELF; |
1086 | |
1087 | /* Iterate over the list with system-allocated threads first. */ |
1088 | list_t *runp; |
1089 | list_for_each (runp, &stack_used) |
1090 | { |
1091 | struct pthread *t = list_entry (runp, struct pthread, list); |
1092 | if (t == self) |
1093 | continue; |
1094 | |
1095 | setxid_mark_thread (cmdp, t); |
1096 | } |
1097 | |
1098 | /* Now the list with threads using user-allocated stacks. */ |
1099 | list_for_each (runp, &__stack_user) |
1100 | { |
1101 | struct pthread *t = list_entry (runp, struct pthread, list); |
1102 | if (t == self) |
1103 | continue; |
1104 | |
1105 | setxid_mark_thread (cmdp, t); |
1106 | } |
1107 | |
1108 | /* Iterate until we don't succeed in signalling anyone. That means |
1109 | we have gotten all running threads, and their children will be |
1110 | automatically correct once started. */ |
1111 | do |
1112 | { |
1113 | signalled = 0; |
1114 | |
1115 | list_for_each (runp, &stack_used) |
1116 | { |
1117 | struct pthread *t = list_entry (runp, struct pthread, list); |
1118 | if (t == self) |
1119 | continue; |
1120 | |
1121 | signalled += setxid_signal_thread (cmdp, t); |
1122 | } |
1123 | |
1124 | list_for_each (runp, &__stack_user) |
1125 | { |
1126 | struct pthread *t = list_entry (runp, struct pthread, list); |
1127 | if (t == self) |
1128 | continue; |
1129 | |
1130 | signalled += setxid_signal_thread (cmdp, t); |
1131 | } |
1132 | |
1133 | int cur = cmdp->cntr; |
1134 | while (cur != 0) |
1135 | { |
1136 | futex_wait_simple ((unsigned int *) &cmdp->cntr, cur, |
1137 | FUTEX_PRIVATE); |
1138 | cur = cmdp->cntr; |
1139 | } |
1140 | } |
1141 | while (signalled != 0); |
1142 | |
1143 | /* Clean up flags, so that no thread blocks during exit waiting |
1144 | for a signal which will never come. */ |
1145 | list_for_each (runp, &stack_used) |
1146 | { |
1147 | struct pthread *t = list_entry (runp, struct pthread, list); |
1148 | if (t == self) |
1149 | continue; |
1150 | |
1151 | setxid_unmark_thread (cmdp, t); |
1152 | } |
1153 | |
1154 | list_for_each (runp, &__stack_user) |
1155 | { |
1156 | struct pthread *t = list_entry (runp, struct pthread, list); |
1157 | if (t == self) |
1158 | continue; |
1159 | |
1160 | setxid_unmark_thread (cmdp, t); |
1161 | } |
1162 | |
1163 | /* This must be last, otherwise the current thread might not have |
1164 | permissions to send SIGSETXID syscall to the other threads. */ |
1165 | INTERNAL_SYSCALL_DECL (err); |
1166 | result = INTERNAL_SYSCALL_NCS (cmdp->syscall_no, err, 3, |
1167 | cmdp->id[0], cmdp->id[1], cmdp->id[2]); |
1168 | int error = 0; |
1169 | if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err))) |
1170 | { |
1171 | error = INTERNAL_SYSCALL_ERRNO (result, err); |
1172 | __set_errno (error); |
1173 | result = -1; |
1174 | } |
1175 | __nptl_setxid_error (cmdp, error); |
1176 | |
1177 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
1178 | return result; |
1179 | } |
1180 | #endif /* SIGSETXID. */ |
1181 | |
1182 | |
1183 | static inline void __attribute__((always_inline)) |
1184 | init_one_static_tls (struct pthread *curp, struct link_map *map) |
1185 | { |
1186 | # if TLS_TCB_AT_TP |
1187 | void *dest = (char *) curp - map->l_tls_offset; |
1188 | # elif TLS_DTV_AT_TP |
1189 | void *dest = (char *) curp + map->l_tls_offset + TLS_PRE_TCB_SIZE; |
1190 | # else |
1191 | # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" |
1192 | # endif |
1193 | |
1194 | /* Initialize the memory. */ |
1195 | memset (__mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size), |
1196 | '\0', map->l_tls_blocksize - map->l_tls_initimage_size); |
1197 | } |
1198 | |
1199 | void |
1200 | attribute_hidden |
1201 | __pthread_init_static_tls (struct link_map *map) |
1202 | { |
1203 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
1204 | |
1205 | /* Iterate over the list with system-allocated threads first. */ |
1206 | list_t *runp; |
1207 | list_for_each (runp, &stack_used) |
1208 | init_one_static_tls (list_entry (runp, struct pthread, list), map); |
1209 | |
1210 | /* Now the list with threads using user-allocated stacks. */ |
1211 | list_for_each (runp, &__stack_user) |
1212 | init_one_static_tls (list_entry (runp, struct pthread, list), map); |
1213 | |
1214 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
1215 | } |
1216 | |
1217 | |
1218 | void |
1219 | attribute_hidden |
1220 | __wait_lookup_done (void) |
1221 | { |
1222 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
1223 | |
1224 | struct pthread *self = THREAD_SELF; |
1225 | |
1226 | /* Iterate over the list with system-allocated threads first. */ |
1227 | list_t *runp; |
1228 | list_for_each (runp, &stack_used) |
1229 | { |
1230 | struct pthread *t = list_entry (runp, struct pthread, list); |
1231 | if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED) |
1232 | continue; |
1233 | |
1234 | int *const gscope_flagp = &t->header.gscope_flag; |
1235 | |
1236 | /* We have to wait until this thread is done with the global |
1237 | scope. First tell the thread that we are waiting and |
1238 | possibly have to be woken. */ |
1239 | if (atomic_compare_and_exchange_bool_acq (gscope_flagp, |
1240 | THREAD_GSCOPE_FLAG_WAIT, |
1241 | THREAD_GSCOPE_FLAG_USED)) |
1242 | continue; |
1243 | |
1244 | do |
1245 | futex_wait_simple ((unsigned int *) gscope_flagp, |
1246 | THREAD_GSCOPE_FLAG_WAIT, FUTEX_PRIVATE); |
1247 | while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT); |
1248 | } |
1249 | |
1250 | /* Now the list with threads using user-allocated stacks. */ |
1251 | list_for_each (runp, &__stack_user) |
1252 | { |
1253 | struct pthread *t = list_entry (runp, struct pthread, list); |
1254 | if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED) |
1255 | continue; |
1256 | |
1257 | int *const gscope_flagp = &t->header.gscope_flag; |
1258 | |
1259 | /* We have to wait until this thread is done with the global |
1260 | scope. First tell the thread that we are waiting and |
1261 | possibly have to be woken. */ |
1262 | if (atomic_compare_and_exchange_bool_acq (gscope_flagp, |
1263 | THREAD_GSCOPE_FLAG_WAIT, |
1264 | THREAD_GSCOPE_FLAG_USED)) |
1265 | continue; |
1266 | |
1267 | do |
1268 | futex_wait_simple ((unsigned int *) gscope_flagp, |
1269 | THREAD_GSCOPE_FLAG_WAIT, FUTEX_PRIVATE); |
1270 | while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT); |
1271 | } |
1272 | |
1273 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
1274 | } |
1275 | |