1 | /* Copyright (C) 2002-2022 Free Software Foundation, Inc. |
2 | This file is part of the GNU C Library. |
3 | |
4 | The GNU C Library is free software; you can redistribute it and/or |
5 | modify it under the terms of the GNU Lesser General Public |
6 | License as published by the Free Software Foundation; either |
7 | version 2.1 of the License, or (at your option) any later version. |
8 | |
9 | The GNU C Library is distributed in the hope that it will be useful, |
10 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
12 | Lesser General Public License for more details. |
13 | |
14 | You should have received a copy of the GNU Lesser General Public |
15 | License along with the GNU C Library; if not, see |
16 | <https://www.gnu.org/licenses/>. */ |
17 | |
18 | #include <assert.h> |
19 | #include <errno.h> |
20 | #include <signal.h> |
21 | #include <stdint.h> |
22 | #include <string.h> |
23 | #include <unistd.h> |
24 | #include <sys/mman.h> |
25 | #include <sys/param.h> |
26 | #include <dl-sysdep.h> |
27 | #include <dl-tls.h> |
28 | #include <tls.h> |
29 | #include <list.h> |
30 | #include <lowlevellock.h> |
31 | #include <futex-internal.h> |
32 | #include <kernel-features.h> |
33 | #include <nptl-stack.h> |
34 | #include <libc-lock.h> |
35 | |
36 | /* Default alignment of stack. */ |
37 | #ifndef STACK_ALIGN |
38 | # define STACK_ALIGN __alignof__ (long double) |
39 | #endif |
40 | |
41 | /* Default value for minimal stack size after allocating thread |
42 | descriptor and guard. */ |
43 | #ifndef MINIMAL_REST_STACK |
44 | # define MINIMAL_REST_STACK 4096 |
45 | #endif |
46 | |
47 | |
48 | /* Newer kernels have the MAP_STACK flag to indicate a mapping is used for |
49 | a stack. Use it when possible. */ |
50 | #ifndef MAP_STACK |
51 | # define MAP_STACK 0 |
52 | #endif |
53 | |
54 | /* Get a stack frame from the cache. We have to match by size since |
55 | some blocks might be too small or far too large. */ |
56 | static struct pthread * |
57 | get_cached_stack (size_t *sizep, void **memp) |
58 | { |
59 | size_t size = *sizep; |
60 | struct pthread *result = NULL; |
61 | list_t *entry; |
62 | |
63 | lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE); |
64 | |
65 | /* Search the cache for a matching entry. We search for the |
66 | smallest stack which has at least the required size. Note that |
67 | in normal situations the size of all allocated stacks is the |
68 | same. As the very least there are only a few different sizes. |
69 | Therefore this loop will exit early most of the time with an |
70 | exact match. */ |
71 | list_for_each (entry, &GL (dl_stack_cache)) |
72 | { |
73 | struct pthread *curr; |
74 | |
75 | curr = list_entry (entry, struct pthread, list); |
76 | if (__nptl_stack_in_use (curr) && curr->stackblock_size >= size) |
77 | { |
78 | if (curr->stackblock_size == size) |
79 | { |
80 | result = curr; |
81 | break; |
82 | } |
83 | |
84 | if (result == NULL |
85 | || result->stackblock_size > curr->stackblock_size) |
86 | result = curr; |
87 | } |
88 | } |
89 | |
90 | if (__builtin_expect (result == NULL, 0) |
91 | /* Make sure the size difference is not too excessive. In that |
92 | case we do not use the block. */ |
93 | || __builtin_expect (result->stackblock_size > 4 * size, 0)) |
94 | { |
95 | /* Release the lock. */ |
96 | lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE); |
97 | |
98 | return NULL; |
99 | } |
100 | |
101 | /* Don't allow setxid until cloned. */ |
102 | result->setxid_futex = -1; |
103 | |
104 | /* Dequeue the entry. */ |
105 | __nptl_stack_list_del (&result->list); |
106 | |
107 | /* And add to the list of stacks in use. */ |
108 | __nptl_stack_list_add (&result->list, &GL (dl_stack_used)); |
109 | |
110 | /* And decrease the cache size. */ |
111 | GL (dl_stack_cache_actsize) -= result->stackblock_size; |
112 | |
113 | /* Release the lock early. */ |
114 | lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE); |
115 | |
116 | /* Report size and location of the stack to the caller. */ |
117 | *sizep = result->stackblock_size; |
118 | *memp = result->stackblock; |
119 | |
120 | /* Cancellation handling is back to the default. */ |
121 | result->cancelhandling = 0; |
122 | result->cancelstate = PTHREAD_CANCEL_ENABLE; |
123 | result->canceltype = PTHREAD_CANCEL_DEFERRED; |
124 | result->cleanup = NULL; |
125 | result->setup_failed = 0; |
126 | |
127 | /* No pending event. */ |
128 | result->nextevent = NULL; |
129 | |
130 | result->exiting = false; |
131 | __libc_lock_init (result->exit_lock); |
132 | result->tls_state = (struct tls_internal_t) { 0 }; |
133 | |
134 | /* Clear the DTV. */ |
135 | dtv_t *dtv = GET_DTV (TLS_TPADJ (result)); |
136 | for (size_t cnt = 0; cnt < dtv[-1].counter; ++cnt) |
137 | free (dtv[1 + cnt].pointer.to_free); |
138 | memset (dtv, '\0', (dtv[-1].counter + 1) * sizeof (dtv_t)); |
139 | |
140 | /* Re-initialize the TLS. */ |
141 | _dl_allocate_tls_init (TLS_TPADJ (result), true); |
142 | |
143 | return result; |
144 | } |
145 | |
146 | /* Return the guard page position on allocated stack. */ |
147 | static inline char * |
148 | __attribute ((always_inline)) |
149 | guard_position (void *mem, size_t size, size_t guardsize, struct pthread *pd, |
150 | size_t pagesize_m1) |
151 | { |
152 | #ifdef NEED_SEPARATE_REGISTER_STACK |
153 | return mem + (((size - guardsize) / 2) & ~pagesize_m1); |
154 | #elif _STACK_GROWS_DOWN |
155 | return mem; |
156 | #elif _STACK_GROWS_UP |
157 | return (char *) (((uintptr_t) pd - guardsize) & ~pagesize_m1); |
158 | #endif |
159 | } |
160 | |
161 | /* Based on stack allocated with PROT_NONE, setup the required portions with |
162 | 'prot' flags based on the guard page position. */ |
163 | static inline int |
164 | setup_stack_prot (char *mem, size_t size, char *guard, size_t guardsize, |
165 | const int prot) |
166 | { |
167 | char *guardend = guard + guardsize; |
168 | #if _STACK_GROWS_DOWN && !defined(NEED_SEPARATE_REGISTER_STACK) |
169 | /* As defined at guard_position, for architectures with downward stack |
170 | the guard page is always at start of the allocated area. */ |
171 | if (__mprotect (guardend, size - guardsize, prot) != 0) |
172 | return errno; |
173 | #else |
174 | size_t mprots1 = (uintptr_t) guard - (uintptr_t) mem; |
175 | if (__mprotect (mem, mprots1, prot) != 0) |
176 | return errno; |
177 | size_t mprots2 = ((uintptr_t) mem + size) - (uintptr_t) guardend; |
178 | if (__mprotect (guardend, mprots2, prot) != 0) |
179 | return errno; |
180 | #endif |
181 | return 0; |
182 | } |
183 | |
184 | /* Mark the memory of the stack as usable to the kernel. It frees everything |
185 | except for the space used for the TCB itself. */ |
186 | static __always_inline void |
187 | advise_stack_range (void *mem, size_t size, uintptr_t pd, size_t guardsize) |
188 | { |
189 | uintptr_t sp = (uintptr_t) CURRENT_STACK_FRAME; |
190 | size_t pagesize_m1 = __getpagesize () - 1; |
191 | #if _STACK_GROWS_DOWN && !defined(NEED_SEPARATE_REGISTER_STACK) |
192 | size_t freesize = (sp - (uintptr_t) mem) & ~pagesize_m1; |
193 | assert (freesize < size); |
194 | if (freesize > PTHREAD_STACK_MIN) |
195 | __madvise (mem, freesize - PTHREAD_STACK_MIN, MADV_DONTNEED); |
196 | #else |
197 | /* Page aligned start of memory to free (higher than or equal |
198 | to current sp plus the minimum stack size). */ |
199 | uintptr_t freeblock = (sp + PTHREAD_STACK_MIN + pagesize_m1) & ~pagesize_m1; |
200 | uintptr_t free_end = (pd - guardsize) & ~pagesize_m1; |
201 | if (free_end > freeblock) |
202 | { |
203 | size_t freesize = free_end - freeblock; |
204 | assert (freesize < size); |
205 | __madvise ((void*) freeblock, freesize, MADV_DONTNEED); |
206 | } |
207 | #endif |
208 | } |
209 | |
210 | /* Returns a usable stack for a new thread either by allocating a |
211 | new stack or reusing a cached stack of sufficient size. |
212 | ATTR must be non-NULL and point to a valid pthread_attr. |
213 | PDP must be non-NULL. */ |
214 | static int |
215 | allocate_stack (const struct pthread_attr *attr, struct pthread **pdp, |
216 | void **stack, size_t *stacksize) |
217 | { |
218 | struct pthread *pd; |
219 | size_t size; |
220 | size_t pagesize_m1 = __getpagesize () - 1; |
221 | size_t tls_static_size_for_stack = __nptl_tls_static_size_for_stack (); |
222 | size_t tls_static_align_m1 = GLRO (dl_tls_static_align) - 1; |
223 | |
224 | assert (powerof2 (pagesize_m1 + 1)); |
225 | assert (TCB_ALIGNMENT >= STACK_ALIGN); |
226 | |
227 | /* Get the stack size from the attribute if it is set. Otherwise we |
228 | use the default we determined at start time. */ |
229 | if (attr->stacksize != 0) |
230 | size = attr->stacksize; |
231 | else |
232 | { |
233 | lll_lock (__default_pthread_attr_lock, LLL_PRIVATE); |
234 | size = __default_pthread_attr.internal.stacksize; |
235 | lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE); |
236 | } |
237 | |
238 | /* Get memory for the stack. */ |
239 | if (__glibc_unlikely (attr->flags & ATTR_FLAG_STACKADDR)) |
240 | { |
241 | uintptr_t adj; |
242 | char *stackaddr = (char *) attr->stackaddr; |
243 | |
244 | /* Assume the same layout as the _STACK_GROWS_DOWN case, with struct |
245 | pthread at the top of the stack block. Later we adjust the guard |
246 | location and stack address to match the _STACK_GROWS_UP case. */ |
247 | if (_STACK_GROWS_UP) |
248 | stackaddr += attr->stacksize; |
249 | |
250 | /* If the user also specified the size of the stack make sure it |
251 | is large enough. */ |
252 | if (attr->stacksize != 0 |
253 | && attr->stacksize < (tls_static_size_for_stack |
254 | + MINIMAL_REST_STACK)) |
255 | return EINVAL; |
256 | |
257 | /* Adjust stack size for alignment of the TLS block. */ |
258 | #if TLS_TCB_AT_TP |
259 | adj = ((uintptr_t) stackaddr - TLS_TCB_SIZE) |
260 | & tls_static_align_m1; |
261 | assert (size > adj + TLS_TCB_SIZE); |
262 | #elif TLS_DTV_AT_TP |
263 | adj = ((uintptr_t) stackaddr - tls_static_size_for_stack) |
264 | & tls_static_align_m1; |
265 | assert (size > adj); |
266 | #endif |
267 | |
268 | /* The user provided some memory. Let's hope it matches the |
269 | size... We do not allocate guard pages if the user provided |
270 | the stack. It is the user's responsibility to do this if it |
271 | is wanted. */ |
272 | #if TLS_TCB_AT_TP |
273 | pd = (struct pthread *) ((uintptr_t) stackaddr |
274 | - TLS_TCB_SIZE - adj); |
275 | #elif TLS_DTV_AT_TP |
276 | pd = (struct pthread *) (((uintptr_t) stackaddr |
277 | - tls_static_size_for_stack - adj) |
278 | - TLS_PRE_TCB_SIZE); |
279 | #endif |
280 | |
281 | /* The user provided stack memory needs to be cleared. */ |
282 | memset (pd, '\0', sizeof (struct pthread)); |
283 | |
284 | /* The first TSD block is included in the TCB. */ |
285 | pd->specific[0] = pd->specific_1stblock; |
286 | |
287 | /* Remember the stack-related values. */ |
288 | pd->stackblock = (char *) stackaddr - size; |
289 | pd->stackblock_size = size; |
290 | |
291 | /* This is a user-provided stack. It will not be queued in the |
292 | stack cache nor will the memory (except the TLS memory) be freed. */ |
293 | pd->user_stack = true; |
294 | |
295 | /* This is at least the second thread. */ |
296 | pd->header.multiple_threads = 1; |
297 | #ifndef TLS_MULTIPLE_THREADS_IN_TCB |
298 | __libc_multiple_threads = 1; |
299 | #endif |
300 | |
301 | #ifdef NEED_DL_SYSINFO |
302 | SETUP_THREAD_SYSINFO (pd); |
303 | #endif |
304 | |
305 | /* Don't allow setxid until cloned. */ |
306 | pd->setxid_futex = -1; |
307 | |
308 | /* Allocate the DTV for this thread. */ |
309 | if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL) |
310 | { |
311 | /* Something went wrong. */ |
312 | assert (errno == ENOMEM); |
313 | return errno; |
314 | } |
315 | |
316 | |
317 | /* Prepare to modify global data. */ |
318 | lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE); |
319 | |
320 | /* And add to the list of stacks in use. */ |
321 | list_add (&pd->list, &GL (dl_stack_user)); |
322 | |
323 | lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE); |
324 | } |
325 | else |
326 | { |
327 | /* Allocate some anonymous memory. If possible use the cache. */ |
328 | size_t guardsize; |
329 | size_t reported_guardsize; |
330 | size_t reqsize; |
331 | void *mem; |
332 | const int prot = (PROT_READ | PROT_WRITE |
333 | | ((GL(dl_stack_flags) & PF_X) ? PROT_EXEC : 0)); |
334 | |
335 | /* Adjust the stack size for alignment. */ |
336 | size &= ~tls_static_align_m1; |
337 | assert (size != 0); |
338 | |
339 | /* Make sure the size of the stack is enough for the guard and |
340 | eventually the thread descriptor. On some targets there is |
341 | a minimum guard size requirement, ARCH_MIN_GUARD_SIZE, so |
342 | internally enforce it (unless the guard was disabled), but |
343 | report the original guard size for backward compatibility: |
344 | before POSIX 2008 the guardsize was specified to be one page |
345 | by default which is observable via pthread_attr_getguardsize |
346 | and pthread_getattr_np. */ |
347 | guardsize = (attr->guardsize + pagesize_m1) & ~pagesize_m1; |
348 | reported_guardsize = guardsize; |
349 | if (guardsize > 0 && guardsize < ARCH_MIN_GUARD_SIZE) |
350 | guardsize = ARCH_MIN_GUARD_SIZE; |
351 | if (guardsize < attr->guardsize || size + guardsize < guardsize) |
352 | /* Arithmetic overflow. */ |
353 | return EINVAL; |
354 | size += guardsize; |
355 | if (__builtin_expect (size < ((guardsize + tls_static_size_for_stack |
356 | + MINIMAL_REST_STACK + pagesize_m1) |
357 | & ~pagesize_m1), |
358 | 0)) |
359 | /* The stack is too small (or the guard too large). */ |
360 | return EINVAL; |
361 | |
362 | /* Try to get a stack from the cache. */ |
363 | reqsize = size; |
364 | pd = get_cached_stack (&size, &mem); |
365 | if (pd == NULL) |
366 | { |
367 | /* If a guard page is required, avoid committing memory by first |
368 | allocate with PROT_NONE and then reserve with required permission |
369 | excluding the guard page. */ |
370 | mem = __mmap (NULL, size, (guardsize == 0) ? prot : PROT_NONE, |
371 | MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0); |
372 | |
373 | if (__glibc_unlikely (mem == MAP_FAILED)) |
374 | return errno; |
375 | |
376 | /* SIZE is guaranteed to be greater than zero. |
377 | So we can never get a null pointer back from mmap. */ |
378 | assert (mem != NULL); |
379 | |
380 | /* Place the thread descriptor at the end of the stack. */ |
381 | #if TLS_TCB_AT_TP |
382 | pd = (struct pthread *) ((((uintptr_t) mem + size) |
383 | - TLS_TCB_SIZE) |
384 | & ~tls_static_align_m1); |
385 | #elif TLS_DTV_AT_TP |
386 | pd = (struct pthread *) ((((uintptr_t) mem + size |
387 | - tls_static_size_for_stack) |
388 | & ~tls_static_align_m1) |
389 | - TLS_PRE_TCB_SIZE); |
390 | #endif |
391 | |
392 | /* Now mprotect the required region excluding the guard area. */ |
393 | if (__glibc_likely (guardsize > 0)) |
394 | { |
395 | char *guard = guard_position (mem, size, guardsize, pd, |
396 | pagesize_m1); |
397 | if (setup_stack_prot (mem, size, guard, guardsize, prot) != 0) |
398 | { |
399 | __munmap (mem, size); |
400 | return errno; |
401 | } |
402 | } |
403 | |
404 | /* Remember the stack-related values. */ |
405 | pd->stackblock = mem; |
406 | pd->stackblock_size = size; |
407 | /* Update guardsize for newly allocated guardsize to avoid |
408 | an mprotect in guard resize below. */ |
409 | pd->guardsize = guardsize; |
410 | |
411 | /* We allocated the first block thread-specific data array. |
412 | This address will not change for the lifetime of this |
413 | descriptor. */ |
414 | pd->specific[0] = pd->specific_1stblock; |
415 | |
416 | /* This is at least the second thread. */ |
417 | pd->header.multiple_threads = 1; |
418 | #ifndef TLS_MULTIPLE_THREADS_IN_TCB |
419 | __libc_multiple_threads = 1; |
420 | #endif |
421 | |
422 | #ifdef NEED_DL_SYSINFO |
423 | SETUP_THREAD_SYSINFO (pd); |
424 | #endif |
425 | |
426 | /* Don't allow setxid until cloned. */ |
427 | pd->setxid_futex = -1; |
428 | |
429 | /* Allocate the DTV for this thread. */ |
430 | if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL) |
431 | { |
432 | /* Something went wrong. */ |
433 | assert (errno == ENOMEM); |
434 | |
435 | /* Free the stack memory we just allocated. */ |
436 | (void) __munmap (mem, size); |
437 | |
438 | return errno; |
439 | } |
440 | |
441 | |
442 | /* Prepare to modify global data. */ |
443 | lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE); |
444 | |
445 | /* And add to the list of stacks in use. */ |
446 | __nptl_stack_list_add (&pd->list, &GL (dl_stack_used)); |
447 | |
448 | lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE); |
449 | |
450 | |
451 | /* There might have been a race. Another thread might have |
452 | caused the stacks to get exec permission while this new |
453 | stack was prepared. Detect if this was possible and |
454 | change the permission if necessary. */ |
455 | if (__builtin_expect ((GL(dl_stack_flags) & PF_X) != 0 |
456 | && (prot & PROT_EXEC) == 0, 0)) |
457 | { |
458 | int err = __nptl_change_stack_perm (pd); |
459 | if (err != 0) |
460 | { |
461 | /* Free the stack memory we just allocated. */ |
462 | (void) __munmap (mem, size); |
463 | |
464 | return err; |
465 | } |
466 | } |
467 | |
468 | |
469 | /* Note that all of the stack and the thread descriptor is |
470 | zeroed. This means we do not have to initialize fields |
471 | with initial value zero. This is specifically true for |
472 | the 'tid' field which is always set back to zero once the |
473 | stack is not used anymore and for the 'guardsize' field |
474 | which will be read next. */ |
475 | } |
476 | |
477 | /* Create or resize the guard area if necessary. */ |
478 | if (__glibc_unlikely (guardsize > pd->guardsize)) |
479 | { |
480 | char *guard = guard_position (mem, size, guardsize, pd, |
481 | pagesize_m1); |
482 | if (__mprotect (guard, guardsize, PROT_NONE) != 0) |
483 | { |
484 | mprot_error: |
485 | lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE); |
486 | |
487 | /* Remove the thread from the list. */ |
488 | __nptl_stack_list_del (&pd->list); |
489 | |
490 | lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE); |
491 | |
492 | /* Get rid of the TLS block we allocated. */ |
493 | _dl_deallocate_tls (TLS_TPADJ (pd), false); |
494 | |
495 | /* Free the stack memory regardless of whether the size |
496 | of the cache is over the limit or not. If this piece |
497 | of memory caused problems we better do not use it |
498 | anymore. Uh, and we ignore possible errors. There |
499 | is nothing we could do. */ |
500 | (void) __munmap (mem, size); |
501 | |
502 | return errno; |
503 | } |
504 | |
505 | pd->guardsize = guardsize; |
506 | } |
507 | else if (__builtin_expect (pd->guardsize - guardsize > size - reqsize, |
508 | 0)) |
509 | { |
510 | /* The old guard area is too large. */ |
511 | |
512 | #ifdef NEED_SEPARATE_REGISTER_STACK |
513 | char *guard = mem + (((size - guardsize) / 2) & ~pagesize_m1); |
514 | char *oldguard = mem + (((size - pd->guardsize) / 2) & ~pagesize_m1); |
515 | |
516 | if (oldguard < guard |
517 | && __mprotect (oldguard, guard - oldguard, prot) != 0) |
518 | goto mprot_error; |
519 | |
520 | if (__mprotect (guard + guardsize, |
521 | oldguard + pd->guardsize - guard - guardsize, |
522 | prot) != 0) |
523 | goto mprot_error; |
524 | #elif _STACK_GROWS_DOWN |
525 | if (__mprotect ((char *) mem + guardsize, pd->guardsize - guardsize, |
526 | prot) != 0) |
527 | goto mprot_error; |
528 | #elif _STACK_GROWS_UP |
529 | char *new_guard = (char *)(((uintptr_t) pd - guardsize) |
530 | & ~pagesize_m1); |
531 | char *old_guard = (char *)(((uintptr_t) pd - pd->guardsize) |
532 | & ~pagesize_m1); |
533 | /* The guard size difference might be > 0, but once rounded |
534 | to the nearest page the size difference might be zero. */ |
535 | if (new_guard > old_guard |
536 | && __mprotect (old_guard, new_guard - old_guard, prot) != 0) |
537 | goto mprot_error; |
538 | #endif |
539 | |
540 | pd->guardsize = guardsize; |
541 | } |
542 | /* The pthread_getattr_np() calls need to get passed the size |
543 | requested in the attribute, regardless of how large the |
544 | actually used guardsize is. */ |
545 | pd->reported_guardsize = reported_guardsize; |
546 | } |
547 | |
548 | /* Initialize the lock. We have to do this unconditionally since the |
549 | stillborn thread could be canceled while the lock is taken. */ |
550 | pd->lock = LLL_LOCK_INITIALIZER; |
551 | |
552 | /* The robust mutex lists also need to be initialized |
553 | unconditionally because the cleanup for the previous stack owner |
554 | might have happened in the kernel. */ |
555 | pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock) |
556 | - offsetof (pthread_mutex_t, |
557 | __data.__list.__next)); |
558 | pd->robust_head.list_op_pending = NULL; |
559 | #if __PTHREAD_MUTEX_HAVE_PREV |
560 | pd->robust_prev = &pd->robust_head; |
561 | #endif |
562 | pd->robust_head.list = &pd->robust_head; |
563 | |
564 | /* We place the thread descriptor at the end of the stack. */ |
565 | *pdp = pd; |
566 | |
567 | void *stacktop; |
568 | |
569 | #if TLS_TCB_AT_TP |
570 | /* The stack begins before the TCB and the static TLS block. */ |
571 | stacktop = ((char *) (pd + 1) - tls_static_size_for_stack); |
572 | #elif TLS_DTV_AT_TP |
573 | stacktop = (char *) (pd - 1); |
574 | #endif |
575 | |
576 | *stacksize = stacktop - pd->stackblock; |
577 | *stack = pd->stackblock; |
578 | |
579 | return 0; |
580 | } |
581 | |