1 | /* Stack cache management for NPTL. |
2 | Copyright (C) 2002-2023 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <https://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <nptl-stack.h> |
20 | #include <ldsodefs.h> |
21 | #include <pthreadP.h> |
22 | |
23 | size_t __nptl_stack_cache_maxsize = 40 * 1024 * 1024; |
24 | int32_t __nptl_stack_hugetlb = 1; |
25 | |
26 | void |
27 | __nptl_stack_list_del (list_t *elem) |
28 | { |
29 | GL (dl_in_flight_stack) = (uintptr_t) elem; |
30 | |
31 | atomic_write_barrier (); |
32 | |
33 | list_del (elem); |
34 | |
35 | atomic_write_barrier (); |
36 | |
37 | GL (dl_in_flight_stack) = 0; |
38 | } |
39 | libc_hidden_def (__nptl_stack_list_del) |
40 | |
41 | void |
42 | __nptl_stack_list_add (list_t *elem, list_t *list) |
43 | { |
44 | GL (dl_in_flight_stack) = (uintptr_t) elem | 1; |
45 | |
46 | atomic_write_barrier (); |
47 | |
48 | list_add (elem, list); |
49 | |
50 | atomic_write_barrier (); |
51 | |
52 | GL (dl_in_flight_stack) = 0; |
53 | } |
54 | libc_hidden_def (__nptl_stack_list_add) |
55 | |
56 | void |
57 | __nptl_free_stacks (size_t limit) |
58 | { |
59 | /* We reduce the size of the cache. Remove the last entries until |
60 | the size is below the limit. */ |
61 | list_t *entry; |
62 | list_t *prev; |
63 | |
64 | /* Search from the end of the list. */ |
65 | list_for_each_prev_safe (entry, prev, &GL (dl_stack_cache)) |
66 | { |
67 | struct pthread *curr; |
68 | |
69 | curr = list_entry (entry, struct pthread, list); |
70 | if (__nptl_stack_in_use (curr)) |
71 | { |
72 | /* Unlink the block. */ |
73 | __nptl_stack_list_del (entry); |
74 | |
75 | /* Account for the freed memory. */ |
76 | GL (dl_stack_cache_actsize) -= curr->stackblock_size; |
77 | |
78 | /* Free the memory associated with the ELF TLS. */ |
79 | _dl_deallocate_tls (TLS_TPADJ (curr), false); |
80 | |
81 | /* Remove this block. This should never fail. If it does |
82 | something is really wrong. */ |
83 | if (__munmap (curr->stackblock, curr->stackblock_size) != 0) |
84 | abort (); |
85 | |
86 | /* Maybe we have freed enough. */ |
87 | if (GL (dl_stack_cache_actsize) <= limit) |
88 | break; |
89 | } |
90 | } |
91 | } |
92 | |
93 | /* Add a stack frame which is not used anymore to the stack. Must be |
94 | called with the cache lock held. */ |
95 | static inline void |
96 | __attribute ((always_inline)) |
97 | queue_stack (struct pthread *stack) |
98 | { |
99 | /* We unconditionally add the stack to the list. The memory may |
100 | still be in use but it will not be reused until the kernel marks |
101 | the stack as not used anymore. */ |
102 | __nptl_stack_list_add (&stack->list, &GL (dl_stack_cache)); |
103 | |
104 | GL (dl_stack_cache_actsize) += stack->stackblock_size; |
105 | if (__glibc_unlikely (GL (dl_stack_cache_actsize) |
106 | > __nptl_stack_cache_maxsize)) |
107 | __nptl_free_stacks (__nptl_stack_cache_maxsize); |
108 | } |
109 | |
110 | void |
111 | __nptl_deallocate_stack (struct pthread *pd) |
112 | { |
113 | lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE); |
114 | |
115 | /* Remove the thread from the list of threads with user defined |
116 | stacks. */ |
117 | __nptl_stack_list_del (&pd->list); |
118 | |
119 | /* Not much to do. Just free the mmap()ed memory. Note that we do |
120 | not reset the 'used' flag in the 'tid' field. This is done by |
121 | the kernel. If no thread has been created yet this field is |
122 | still zero. */ |
123 | if (__glibc_likely (! pd->user_stack)) |
124 | (void) queue_stack (pd); |
125 | else |
126 | /* Free the memory associated with the ELF TLS. */ |
127 | _dl_deallocate_tls (TLS_TPADJ (pd), false); |
128 | |
129 | lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE); |
130 | } |
131 | libc_hidden_def (__nptl_deallocate_stack) |
132 | |
133 | /* This function is internal (it has a GLIBC_PRIVATE) version, but it |
134 | is widely used (either via weak symbol, or dlsym) to obtain the |
135 | __static_tls_size value. This value is then used to adjust the |
136 | value of the stack size attribute, so that applications receive the |
137 | full requested stack size, not diminished by the TCB and static TLS |
138 | allocation on the stack. Once the TCB is separately allocated, |
139 | this function should be removed or renamed (if it is still |
140 | necessary at that point). */ |
141 | size_t |
142 | __pthread_get_minstack (const pthread_attr_t *attr) |
143 | { |
144 | return (GLRO(dl_pagesize) + __nptl_tls_static_size_for_stack () |
145 | + PTHREAD_STACK_MIN); |
146 | } |
147 | libc_hidden_def (__pthread_get_minstack) |
148 | |