| 1 | /* Stack cache management for NPTL. |
| 2 | Copyright (C) 2002-2023 Free Software Foundation, Inc. |
| 3 | This file is part of the GNU C Library. |
| 4 | |
| 5 | The GNU C Library is free software; you can redistribute it and/or |
| 6 | modify it under the terms of the GNU Lesser General Public |
| 7 | License as published by the Free Software Foundation; either |
| 8 | version 2.1 of the License, or (at your option) any later version. |
| 9 | |
| 10 | The GNU C Library is distributed in the hope that it will be useful, |
| 11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 13 | Lesser General Public License for more details. |
| 14 | |
| 15 | You should have received a copy of the GNU Lesser General Public |
| 16 | License along with the GNU C Library; if not, see |
| 17 | <https://www.gnu.org/licenses/>. */ |
| 18 | |
| 19 | #include <nptl-stack.h> |
| 20 | #include <ldsodefs.h> |
| 21 | #include <pthreadP.h> |
| 22 | |
| 23 | size_t __nptl_stack_cache_maxsize = 40 * 1024 * 1024; |
| 24 | |
| 25 | void |
| 26 | __nptl_stack_list_del (list_t *elem) |
| 27 | { |
| 28 | GL (dl_in_flight_stack) = (uintptr_t) elem; |
| 29 | |
| 30 | atomic_write_barrier (); |
| 31 | |
| 32 | list_del (elem); |
| 33 | |
| 34 | atomic_write_barrier (); |
| 35 | |
| 36 | GL (dl_in_flight_stack) = 0; |
| 37 | } |
| 38 | libc_hidden_def (__nptl_stack_list_del) |
| 39 | |
| 40 | void |
| 41 | __nptl_stack_list_add (list_t *elem, list_t *list) |
| 42 | { |
| 43 | GL (dl_in_flight_stack) = (uintptr_t) elem | 1; |
| 44 | |
| 45 | atomic_write_barrier (); |
| 46 | |
| 47 | list_add (elem, list); |
| 48 | |
| 49 | atomic_write_barrier (); |
| 50 | |
| 51 | GL (dl_in_flight_stack) = 0; |
| 52 | } |
| 53 | libc_hidden_def (__nptl_stack_list_add) |
| 54 | |
| 55 | void |
| 56 | __nptl_free_stacks (size_t limit) |
| 57 | { |
| 58 | /* We reduce the size of the cache. Remove the last entries until |
| 59 | the size is below the limit. */ |
| 60 | list_t *entry; |
| 61 | list_t *prev; |
| 62 | |
| 63 | /* Search from the end of the list. */ |
| 64 | list_for_each_prev_safe (entry, prev, &GL (dl_stack_cache)) |
| 65 | { |
| 66 | struct pthread *curr; |
| 67 | |
| 68 | curr = list_entry (entry, struct pthread, list); |
| 69 | if (__nptl_stack_in_use (curr)) |
| 70 | { |
| 71 | /* Unlink the block. */ |
| 72 | __nptl_stack_list_del (entry); |
| 73 | |
| 74 | /* Account for the freed memory. */ |
| 75 | GL (dl_stack_cache_actsize) -= curr->stackblock_size; |
| 76 | |
| 77 | /* Free the memory associated with the ELF TLS. */ |
| 78 | _dl_deallocate_tls (TLS_TPADJ (curr), false); |
| 79 | |
| 80 | /* Remove this block. This should never fail. If it does |
| 81 | something is really wrong. */ |
| 82 | if (__munmap (curr->stackblock, curr->stackblock_size) != 0) |
| 83 | abort (); |
| 84 | |
| 85 | /* Maybe we have freed enough. */ |
| 86 | if (GL (dl_stack_cache_actsize) <= limit) |
| 87 | break; |
| 88 | } |
| 89 | } |
| 90 | } |
| 91 | |
| 92 | /* Add a stack frame which is not used anymore to the stack. Must be |
| 93 | called with the cache lock held. */ |
| 94 | static inline void |
| 95 | __attribute ((always_inline)) |
| 96 | queue_stack (struct pthread *stack) |
| 97 | { |
| 98 | /* We unconditionally add the stack to the list. The memory may |
| 99 | still be in use but it will not be reused until the kernel marks |
| 100 | the stack as not used anymore. */ |
| 101 | __nptl_stack_list_add (&stack->list, &GL (dl_stack_cache)); |
| 102 | |
| 103 | GL (dl_stack_cache_actsize) += stack->stackblock_size; |
| 104 | if (__glibc_unlikely (GL (dl_stack_cache_actsize) |
| 105 | > __nptl_stack_cache_maxsize)) |
| 106 | __nptl_free_stacks (__nptl_stack_cache_maxsize); |
| 107 | } |
| 108 | |
| 109 | void |
| 110 | __nptl_deallocate_stack (struct pthread *pd) |
| 111 | { |
| 112 | lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE); |
| 113 | |
| 114 | /* Remove the thread from the list of threads with user defined |
| 115 | stacks. */ |
| 116 | __nptl_stack_list_del (&pd->list); |
| 117 | |
| 118 | /* Not much to do. Just free the mmap()ed memory. Note that we do |
| 119 | not reset the 'used' flag in the 'tid' field. This is done by |
| 120 | the kernel. If no thread has been created yet this field is |
| 121 | still zero. */ |
| 122 | if (__glibc_likely (! pd->user_stack)) |
| 123 | (void) queue_stack (pd); |
| 124 | else |
| 125 | /* Free the memory associated with the ELF TLS. */ |
| 126 | _dl_deallocate_tls (TLS_TPADJ (pd), false); |
| 127 | |
| 128 | lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE); |
| 129 | } |
| 130 | libc_hidden_def (__nptl_deallocate_stack) |
| 131 | |
| 132 | /* This function is internal (it has a GLIBC_PRIVATE) version, but it |
| 133 | is widely used (either via weak symbol, or dlsym) to obtain the |
| 134 | __static_tls_size value. This value is then used to adjust the |
| 135 | value of the stack size attribute, so that applications receive the |
| 136 | full requested stack size, not diminished by the TCB and static TLS |
| 137 | allocation on the stack. Once the TCB is separately allocated, |
| 138 | this function should be removed or renamed (if it is still |
| 139 | necessary at that point). */ |
| 140 | size_t |
| 141 | __pthread_get_minstack (const pthread_attr_t *attr) |
| 142 | { |
| 143 | return (GLRO(dl_pagesize) + __nptl_tls_static_size_for_stack () |
| 144 | + PTHREAD_STACK_MIN); |
| 145 | } |
| 146 | libc_hidden_def (__pthread_get_minstack) |
| 147 | |