1 | /* Malloc implementation for multiple threads without lock contention. |
2 | Copyright (C) 2001-2022 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public License as |
7 | published by the Free Software Foundation; either version 2.1 of the |
8 | License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; see the file COPYING.LIB. If |
17 | not, see <https://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <stdbool.h> |
20 | |
21 | #if HAVE_TUNABLES |
22 | # define TUNABLE_NAMESPACE malloc |
23 | #endif |
24 | #include <elf/dl-tunables.h> |
25 | |
26 | /* Compile-time constants. */ |
27 | |
28 | #define HEAP_MIN_SIZE (32 * 1024) |
29 | #ifndef HEAP_MAX_SIZE |
30 | # ifdef DEFAULT_MMAP_THRESHOLD_MAX |
31 | # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX) |
32 | # else |
33 | # define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */ |
34 | # endif |
35 | #endif |
36 | |
37 | /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps |
38 | that are dynamically created for multi-threaded programs. The |
39 | maximum size must be a power of two, for fast determination of |
40 | which heap belongs to a chunk. It should be much larger than the |
41 | mmap threshold, so that requests with a size just below that |
42 | threshold can be fulfilled without creating too many heaps. */ |
43 | |
44 | /* When huge pages are used to create new arenas, the maximum and minumum |
45 | size are based on the runtime defined huge page size. */ |
46 | |
47 | static inline size_t |
48 | heap_min_size (void) |
49 | { |
50 | #if HAVE_TUNABLES |
51 | return mp_.hp_pagesize == 0 ? HEAP_MIN_SIZE : mp_.hp_pagesize; |
52 | #else |
53 | return HEAP_MIN_SIZE; |
54 | #endif |
55 | } |
56 | |
57 | static inline size_t |
58 | heap_max_size (void) |
59 | { |
60 | #if HAVE_TUNABLES |
61 | return mp_.hp_pagesize == 0 ? HEAP_MAX_SIZE : mp_.hp_pagesize * 4; |
62 | #else |
63 | return HEAP_MAX_SIZE; |
64 | #endif |
65 | } |
66 | |
67 | /***************************************************************************/ |
68 | |
69 | #define top(ar_ptr) ((ar_ptr)->top) |
70 | |
71 | /* A heap is a single contiguous memory region holding (coalesceable) |
72 | malloc_chunks. It is allocated with mmap() and always starts at an |
73 | address aligned to HEAP_MAX_SIZE. */ |
74 | |
75 | typedef struct _heap_info |
76 | { |
77 | mstate ar_ptr; /* Arena for this heap. */ |
78 | struct _heap_info *prev; /* Previous heap. */ |
79 | size_t size; /* Current size in bytes. */ |
80 | size_t mprotect_size; /* Size in bytes that has been mprotected |
81 | PROT_READ|PROT_WRITE. */ |
82 | size_t pagesize; /* Page size used when allocating the arena. */ |
83 | /* Make sure the following data is properly aligned, particularly |
84 | that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of |
85 | MALLOC_ALIGNMENT. */ |
86 | char pad[-3 * SIZE_SZ & MALLOC_ALIGN_MASK]; |
87 | } heap_info; |
88 | |
89 | /* Get a compile-time error if the heap_info padding is not correct |
90 | to make alignment work as expected in sYSMALLOc. */ |
91 | extern int sanity_check_heap_info_alignment[(sizeof (heap_info) |
92 | + 2 * SIZE_SZ) % MALLOC_ALIGNMENT |
93 | ? -1 : 1]; |
94 | |
95 | /* Thread specific data. */ |
96 | |
97 | static __thread mstate thread_arena attribute_tls_model_ie; |
98 | |
99 | /* Arena free list. free_list_lock synchronizes access to the |
100 | free_list variable below, and the next_free and attached_threads |
101 | members of struct malloc_state objects. No other locks must be |
102 | acquired after free_list_lock has been acquired. */ |
103 | |
104 | __libc_lock_define_initialized (static, free_list_lock); |
105 | #if IS_IN (libc) |
106 | static size_t narenas = 1; |
107 | #endif |
108 | static mstate free_list; |
109 | |
110 | /* list_lock prevents concurrent writes to the next member of struct |
111 | malloc_state objects. |
112 | |
113 | Read access to the next member is supposed to synchronize with the |
114 | atomic_write_barrier and the write to the next member in |
115 | _int_new_arena. This suffers from data races; see the FIXME |
116 | comments in _int_new_arena and reused_arena. |
117 | |
118 | list_lock also prevents concurrent forks. At the time list_lock is |
119 | acquired, no arena lock must have been acquired, but it is |
120 | permitted to acquire arena locks subsequently, while list_lock is |
121 | acquired. */ |
122 | __libc_lock_define_initialized (static, list_lock); |
123 | |
124 | /* Already initialized? */ |
125 | static bool __malloc_initialized = false; |
126 | |
127 | /**************************************************************************/ |
128 | |
129 | |
130 | /* arena_get() acquires an arena and locks the corresponding mutex. |
131 | First, try the one last locked successfully by this thread. (This |
132 | is the common case and handled with a macro for speed.) Then, loop |
133 | once over the circularly linked list of arenas. If no arena is |
134 | readily available, create a new one. In this latter case, `size' |
135 | is just a hint as to how much memory will be required immediately |
136 | in the new arena. */ |
137 | |
138 | #define arena_get(ptr, size) do { \ |
139 | ptr = thread_arena; \ |
140 | arena_lock (ptr, size); \ |
141 | } while (0) |
142 | |
143 | #define arena_lock(ptr, size) do { \ |
144 | if (ptr) \ |
145 | __libc_lock_lock (ptr->mutex); \ |
146 | else \ |
147 | ptr = arena_get2 ((size), NULL); \ |
148 | } while (0) |
149 | |
150 | /* find the heap and corresponding arena for a given ptr */ |
151 | |
152 | static inline heap_info * |
153 | heap_for_ptr (void *ptr) |
154 | { |
155 | size_t max_size = heap_max_size (); |
156 | return PTR_ALIGN_DOWN (ptr, max_size); |
157 | } |
158 | |
159 | static inline struct malloc_state * |
160 | arena_for_chunk (mchunkptr ptr) |
161 | { |
162 | return chunk_main_arena (ptr) ? &main_arena : heap_for_ptr (ptr)->ar_ptr; |
163 | } |
164 | |
165 | |
166 | /**************************************************************************/ |
167 | |
168 | /* atfork support. */ |
169 | |
170 | /* The following three functions are called around fork from a |
171 | multi-threaded process. We do not use the general fork handler |
172 | mechanism to make sure that our handlers are the last ones being |
173 | called, so that other fork handlers can use the malloc |
174 | subsystem. */ |
175 | |
176 | void |
177 | __malloc_fork_lock_parent (void) |
178 | { |
179 | if (!__malloc_initialized) |
180 | return; |
181 | |
182 | /* We do not acquire free_list_lock here because we completely |
183 | reconstruct free_list in __malloc_fork_unlock_child. */ |
184 | |
185 | __libc_lock_lock (list_lock); |
186 | |
187 | for (mstate ar_ptr = &main_arena;; ) |
188 | { |
189 | __libc_lock_lock (ar_ptr->mutex); |
190 | ar_ptr = ar_ptr->next; |
191 | if (ar_ptr == &main_arena) |
192 | break; |
193 | } |
194 | } |
195 | |
196 | void |
197 | __malloc_fork_unlock_parent (void) |
198 | { |
199 | if (!__malloc_initialized) |
200 | return; |
201 | |
202 | for (mstate ar_ptr = &main_arena;; ) |
203 | { |
204 | __libc_lock_unlock (ar_ptr->mutex); |
205 | ar_ptr = ar_ptr->next; |
206 | if (ar_ptr == &main_arena) |
207 | break; |
208 | } |
209 | __libc_lock_unlock (list_lock); |
210 | } |
211 | |
212 | void |
213 | __malloc_fork_unlock_child (void) |
214 | { |
215 | if (!__malloc_initialized) |
216 | return; |
217 | |
218 | /* Push all arenas to the free list, except thread_arena, which is |
219 | attached to the current thread. */ |
220 | __libc_lock_init (free_list_lock); |
221 | if (thread_arena != NULL) |
222 | thread_arena->attached_threads = 1; |
223 | free_list = NULL; |
224 | for (mstate ar_ptr = &main_arena;; ) |
225 | { |
226 | __libc_lock_init (ar_ptr->mutex); |
227 | if (ar_ptr != thread_arena) |
228 | { |
229 | /* This arena is no longer attached to any thread. */ |
230 | ar_ptr->attached_threads = 0; |
231 | ar_ptr->next_free = free_list; |
232 | free_list = ar_ptr; |
233 | } |
234 | ar_ptr = ar_ptr->next; |
235 | if (ar_ptr == &main_arena) |
236 | break; |
237 | } |
238 | |
239 | __libc_lock_init (list_lock); |
240 | } |
241 | |
242 | #if HAVE_TUNABLES |
243 | # define TUNABLE_CALLBACK_FNDECL(__name, __type) \ |
244 | static inline int do_ ## __name (__type value); \ |
245 | static void \ |
246 | TUNABLE_CALLBACK (__name) (tunable_val_t *valp) \ |
247 | { \ |
248 | __type value = (__type) (valp)->numval; \ |
249 | do_ ## __name (value); \ |
250 | } |
251 | |
252 | TUNABLE_CALLBACK_FNDECL (set_mmap_threshold, size_t) |
253 | TUNABLE_CALLBACK_FNDECL (set_mmaps_max, int32_t) |
254 | TUNABLE_CALLBACK_FNDECL (set_top_pad, size_t) |
255 | TUNABLE_CALLBACK_FNDECL (set_perturb_byte, int32_t) |
256 | TUNABLE_CALLBACK_FNDECL (set_trim_threshold, size_t) |
257 | TUNABLE_CALLBACK_FNDECL (set_arena_max, size_t) |
258 | TUNABLE_CALLBACK_FNDECL (set_arena_test, size_t) |
259 | #if USE_TCACHE |
260 | TUNABLE_CALLBACK_FNDECL (set_tcache_max, size_t) |
261 | TUNABLE_CALLBACK_FNDECL (set_tcache_count, size_t) |
262 | TUNABLE_CALLBACK_FNDECL (set_tcache_unsorted_limit, size_t) |
263 | #endif |
264 | TUNABLE_CALLBACK_FNDECL (set_mxfast, size_t) |
265 | TUNABLE_CALLBACK_FNDECL (set_hugetlb, size_t) |
266 | #else |
267 | /* Initialization routine. */ |
268 | #include <string.h> |
269 | extern char **_environ; |
270 | |
271 | static char * |
272 | next_env_entry (char ***position) |
273 | { |
274 | char **current = *position; |
275 | char *result = NULL; |
276 | |
277 | while (*current != NULL) |
278 | { |
279 | if (__builtin_expect ((*current)[0] == 'M', 0) |
280 | && (*current)[1] == 'A' |
281 | && (*current)[2] == 'L' |
282 | && (*current)[3] == 'L' |
283 | && (*current)[4] == 'O' |
284 | && (*current)[5] == 'C' |
285 | && (*current)[6] == '_') |
286 | { |
287 | result = &(*current)[7]; |
288 | |
289 | /* Save current position for next visit. */ |
290 | *position = ++current; |
291 | |
292 | break; |
293 | } |
294 | |
295 | ++current; |
296 | } |
297 | |
298 | return result; |
299 | } |
300 | #endif |
301 | |
302 | |
303 | #ifdef SHARED |
304 | extern struct dl_open_hook *_dl_open_hook; |
305 | libc_hidden_proto (_dl_open_hook); |
306 | #endif |
307 | |
308 | #if USE_TCACHE |
309 | static void tcache_key_initialize (void); |
310 | #endif |
311 | |
312 | static void |
313 | ptmalloc_init (void) |
314 | { |
315 | if (__malloc_initialized) |
316 | return; |
317 | |
318 | __malloc_initialized = true; |
319 | |
320 | #if USE_TCACHE |
321 | tcache_key_initialize (); |
322 | #endif |
323 | |
324 | #ifdef USE_MTAG |
325 | if ((TUNABLE_GET_FULL (glibc, mem, tagging, int32_t, NULL) & 1) != 0) |
326 | { |
327 | /* If the tunable says that we should be using tagged memory |
328 | and that morecore does not support tagged regions, then |
329 | disable it. */ |
330 | if (__MTAG_SBRK_UNTAGGED) |
331 | __always_fail_morecore = true; |
332 | |
333 | mtag_enabled = true; |
334 | mtag_mmap_flags = __MTAG_MMAP_FLAGS; |
335 | } |
336 | #endif |
337 | |
338 | #if defined SHARED && IS_IN (libc) |
339 | /* In case this libc copy is in a non-default namespace, never use |
340 | brk. Likewise if dlopened from statically linked program. The |
341 | generic sbrk implementation also enforces this, but it is not |
342 | used on Hurd. */ |
343 | if (!__libc_initial) |
344 | __always_fail_morecore = true; |
345 | #endif |
346 | |
347 | thread_arena = &main_arena; |
348 | |
349 | malloc_init_state (&main_arena); |
350 | |
351 | #if HAVE_TUNABLES |
352 | TUNABLE_GET (top_pad, size_t, TUNABLE_CALLBACK (set_top_pad)); |
353 | TUNABLE_GET (perturb, int32_t, TUNABLE_CALLBACK (set_perturb_byte)); |
354 | TUNABLE_GET (mmap_threshold, size_t, TUNABLE_CALLBACK (set_mmap_threshold)); |
355 | TUNABLE_GET (trim_threshold, size_t, TUNABLE_CALLBACK (set_trim_threshold)); |
356 | TUNABLE_GET (mmap_max, int32_t, TUNABLE_CALLBACK (set_mmaps_max)); |
357 | TUNABLE_GET (arena_max, size_t, TUNABLE_CALLBACK (set_arena_max)); |
358 | TUNABLE_GET (arena_test, size_t, TUNABLE_CALLBACK (set_arena_test)); |
359 | # if USE_TCACHE |
360 | TUNABLE_GET (tcache_max, size_t, TUNABLE_CALLBACK (set_tcache_max)); |
361 | TUNABLE_GET (tcache_count, size_t, TUNABLE_CALLBACK (set_tcache_count)); |
362 | TUNABLE_GET (tcache_unsorted_limit, size_t, |
363 | TUNABLE_CALLBACK (set_tcache_unsorted_limit)); |
364 | # endif |
365 | TUNABLE_GET (mxfast, size_t, TUNABLE_CALLBACK (set_mxfast)); |
366 | TUNABLE_GET (hugetlb, size_t, TUNABLE_CALLBACK (set_hugetlb)); |
367 | if (mp_.hp_pagesize > 0) |
368 | /* Force mmap for main arena instead of sbrk, so hugepages are explicitly |
369 | used. */ |
370 | __always_fail_morecore = true; |
371 | #else |
372 | if (__glibc_likely (_environ != NULL)) |
373 | { |
374 | char **runp = _environ; |
375 | char *envline; |
376 | |
377 | while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL, |
378 | 0)) |
379 | { |
380 | size_t len = strcspn (envline, "=" ); |
381 | |
382 | if (envline[len] != '=') |
383 | /* This is a "MALLOC_" variable at the end of the string |
384 | without a '=' character. Ignore it since otherwise we |
385 | will access invalid memory below. */ |
386 | continue; |
387 | |
388 | switch (len) |
389 | { |
390 | case 8: |
391 | if (!__builtin_expect (__libc_enable_secure, 0)) |
392 | { |
393 | if (memcmp (envline, "TOP_PAD_" , 8) == 0) |
394 | __libc_mallopt (M_TOP_PAD, atoi (&envline[9])); |
395 | else if (memcmp (envline, "PERTURB_" , 8) == 0) |
396 | __libc_mallopt (M_PERTURB, atoi (&envline[9])); |
397 | } |
398 | break; |
399 | case 9: |
400 | if (!__builtin_expect (__libc_enable_secure, 0)) |
401 | { |
402 | if (memcmp (envline, "MMAP_MAX_" , 9) == 0) |
403 | __libc_mallopt (M_MMAP_MAX, atoi (&envline[10])); |
404 | else if (memcmp (envline, "ARENA_MAX" , 9) == 0) |
405 | __libc_mallopt (M_ARENA_MAX, atoi (&envline[10])); |
406 | } |
407 | break; |
408 | case 10: |
409 | if (!__builtin_expect (__libc_enable_secure, 0)) |
410 | { |
411 | if (memcmp (envline, "ARENA_TEST" , 10) == 0) |
412 | __libc_mallopt (M_ARENA_TEST, atoi (&envline[11])); |
413 | } |
414 | break; |
415 | case 15: |
416 | if (!__builtin_expect (__libc_enable_secure, 0)) |
417 | { |
418 | if (memcmp (envline, "TRIM_THRESHOLD_" , 15) == 0) |
419 | __libc_mallopt (M_TRIM_THRESHOLD, atoi (&envline[16])); |
420 | else if (memcmp (envline, "MMAP_THRESHOLD_" , 15) == 0) |
421 | __libc_mallopt (M_MMAP_THRESHOLD, atoi (&envline[16])); |
422 | } |
423 | break; |
424 | default: |
425 | break; |
426 | } |
427 | } |
428 | } |
429 | #endif |
430 | } |
431 | |
432 | /* Managing heaps and arenas (for concurrent threads) */ |
433 | |
434 | #if MALLOC_DEBUG > 1 |
435 | |
436 | /* Print the complete contents of a single heap to stderr. */ |
437 | |
438 | static void |
439 | dump_heap (heap_info *heap) |
440 | { |
441 | char *ptr; |
442 | mchunkptr p; |
443 | |
444 | fprintf (stderr, "Heap %p, size %10lx:\n" , heap, (long) heap->size); |
445 | ptr = (heap->ar_ptr != (mstate) (heap + 1)) ? |
446 | (char *) (heap + 1) : (char *) (heap + 1) + sizeof (struct malloc_state); |
447 | p = (mchunkptr) (((unsigned long) ptr + MALLOC_ALIGN_MASK) & |
448 | ~MALLOC_ALIGN_MASK); |
449 | for (;; ) |
450 | { |
451 | fprintf (stderr, "chunk %p size %10lx" , p, (long) chunksize_nomask(p)); |
452 | if (p == top (heap->ar_ptr)) |
453 | { |
454 | fprintf (stderr, " (top)\n" ); |
455 | break; |
456 | } |
457 | else if (chunksize_nomask(p) == (0 | PREV_INUSE)) |
458 | { |
459 | fprintf (stderr, " (fence)\n" ); |
460 | break; |
461 | } |
462 | fprintf (stderr, "\n" ); |
463 | p = next_chunk (p); |
464 | } |
465 | } |
466 | #endif /* MALLOC_DEBUG > 1 */ |
467 | |
468 | /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing |
469 | addresses as opposed to increasing, new_heap would badly fragment the |
470 | address space. In that case remember the second HEAP_MAX_SIZE part |
471 | aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...) |
472 | call (if it is already aligned) and try to reuse it next time. We need |
473 | no locking for it, as kernel ensures the atomicity for us - worst case |
474 | we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in |
475 | multiple threads, but only one will succeed. */ |
476 | static char *aligned_heap_area; |
477 | |
478 | /* Create a new heap. size is automatically rounded up to a multiple |
479 | of the page size. */ |
480 | |
481 | static heap_info * |
482 | alloc_new_heap (size_t size, size_t top_pad, size_t pagesize, |
483 | int mmap_flags) |
484 | { |
485 | char *p1, *p2; |
486 | unsigned long ul; |
487 | heap_info *h; |
488 | size_t min_size = heap_min_size (); |
489 | size_t max_size = heap_max_size (); |
490 | |
491 | if (size + top_pad < min_size) |
492 | size = min_size; |
493 | else if (size + top_pad <= max_size) |
494 | size += top_pad; |
495 | else if (size > max_size) |
496 | return 0; |
497 | else |
498 | size = max_size; |
499 | size = ALIGN_UP (size, pagesize); |
500 | |
501 | /* A memory region aligned to a multiple of max_size is needed. |
502 | No swap space needs to be reserved for the following large |
503 | mapping (on Linux, this is the case for all non-writable mappings |
504 | anyway). */ |
505 | p2 = MAP_FAILED; |
506 | if (aligned_heap_area) |
507 | { |
508 | p2 = (char *) MMAP (aligned_heap_area, max_size, PROT_NONE, mmap_flags); |
509 | aligned_heap_area = NULL; |
510 | if (p2 != MAP_FAILED && ((unsigned long) p2 & (max_size - 1))) |
511 | { |
512 | __munmap (p2, max_size); |
513 | p2 = MAP_FAILED; |
514 | } |
515 | } |
516 | if (p2 == MAP_FAILED) |
517 | { |
518 | p1 = (char *) MMAP (0, max_size << 1, PROT_NONE, mmap_flags); |
519 | if (p1 != MAP_FAILED) |
520 | { |
521 | p2 = (char *) (((unsigned long) p1 + (max_size - 1)) |
522 | & ~(max_size - 1)); |
523 | ul = p2 - p1; |
524 | if (ul) |
525 | __munmap (p1, ul); |
526 | else |
527 | aligned_heap_area = p2 + max_size; |
528 | __munmap (p2 + max_size, max_size - ul); |
529 | } |
530 | else |
531 | { |
532 | /* Try to take the chance that an allocation of only max_size |
533 | is already aligned. */ |
534 | p2 = (char *) MMAP (0, max_size, PROT_NONE, mmap_flags); |
535 | if (p2 == MAP_FAILED) |
536 | return 0; |
537 | |
538 | if ((unsigned long) p2 & (max_size - 1)) |
539 | { |
540 | __munmap (p2, max_size); |
541 | return 0; |
542 | } |
543 | } |
544 | } |
545 | if (__mprotect (p2, size, mtag_mmap_flags | PROT_READ | PROT_WRITE) != 0) |
546 | { |
547 | __munmap (p2, max_size); |
548 | return 0; |
549 | } |
550 | |
551 | madvise_thp (p2, size); |
552 | |
553 | h = (heap_info *) p2; |
554 | h->size = size; |
555 | h->mprotect_size = size; |
556 | h->pagesize = pagesize; |
557 | LIBC_PROBE (memory_heap_new, 2, h, h->size); |
558 | return h; |
559 | } |
560 | |
561 | static heap_info * |
562 | new_heap (size_t size, size_t top_pad) |
563 | { |
564 | #if HAVE_TUNABLES |
565 | if (__glibc_unlikely (mp_.hp_pagesize != 0)) |
566 | { |
567 | /* MAP_NORESERVE is not used for huge pages because some kernel may |
568 | not reserve the mmap region and a subsequent access may trigger |
569 | a SIGBUS if there is no free pages in the pool. */ |
570 | heap_info *h = alloc_new_heap (size, top_pad, mp_.hp_pagesize, |
571 | mp_.hp_flags); |
572 | if (h != NULL) |
573 | return h; |
574 | } |
575 | #endif |
576 | return alloc_new_heap (size, top_pad, GLRO (dl_pagesize), MAP_NORESERVE); |
577 | } |
578 | |
579 | /* Grow a heap. size is automatically rounded up to a |
580 | multiple of the page size. */ |
581 | |
582 | static int |
583 | grow_heap (heap_info *h, long diff) |
584 | { |
585 | size_t pagesize = h->pagesize; |
586 | size_t max_size = heap_max_size (); |
587 | long new_size; |
588 | |
589 | diff = ALIGN_UP (diff, pagesize); |
590 | new_size = (long) h->size + diff; |
591 | if ((unsigned long) new_size > (unsigned long) max_size) |
592 | return -1; |
593 | |
594 | if ((unsigned long) new_size > h->mprotect_size) |
595 | { |
596 | if (__mprotect ((char *) h + h->mprotect_size, |
597 | (unsigned long) new_size - h->mprotect_size, |
598 | mtag_mmap_flags | PROT_READ | PROT_WRITE) != 0) |
599 | return -2; |
600 | |
601 | h->mprotect_size = new_size; |
602 | } |
603 | |
604 | h->size = new_size; |
605 | LIBC_PROBE (memory_heap_more, 2, h, h->size); |
606 | return 0; |
607 | } |
608 | |
609 | /* Shrink a heap. */ |
610 | |
611 | static int |
612 | shrink_heap (heap_info *h, long diff) |
613 | { |
614 | long new_size; |
615 | |
616 | new_size = (long) h->size - diff; |
617 | if (new_size < (long) sizeof (*h)) |
618 | return -1; |
619 | |
620 | /* Try to re-map the extra heap space freshly to save memory, and make it |
621 | inaccessible. See malloc-sysdep.h to know when this is true. */ |
622 | if (__glibc_unlikely (check_may_shrink_heap ())) |
623 | { |
624 | if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE, |
625 | MAP_FIXED) == (char *) MAP_FAILED) |
626 | return -2; |
627 | |
628 | h->mprotect_size = new_size; |
629 | } |
630 | else |
631 | __madvise ((char *) h + new_size, diff, MADV_DONTNEED); |
632 | /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/ |
633 | |
634 | h->size = new_size; |
635 | LIBC_PROBE (memory_heap_less, 2, h, h->size); |
636 | return 0; |
637 | } |
638 | |
639 | /* Delete a heap. */ |
640 | |
641 | static int |
642 | heap_trim (heap_info *heap, size_t pad) |
643 | { |
644 | mstate ar_ptr = heap->ar_ptr; |
645 | mchunkptr top_chunk = top (ar_ptr), p; |
646 | heap_info *prev_heap; |
647 | long new_size, top_size, top_area, , prev_size, misalign; |
648 | size_t max_size = heap_max_size (); |
649 | |
650 | /* Can this heap go away completely? */ |
651 | while (top_chunk == chunk_at_offset (heap, sizeof (*heap))) |
652 | { |
653 | prev_heap = heap->prev; |
654 | prev_size = prev_heap->size - (MINSIZE - 2 * SIZE_SZ); |
655 | p = chunk_at_offset (prev_heap, prev_size); |
656 | /* fencepost must be properly aligned. */ |
657 | misalign = ((long) p) & MALLOC_ALIGN_MASK; |
658 | p = chunk_at_offset (prev_heap, prev_size - misalign); |
659 | assert (chunksize_nomask (p) == (0 | PREV_INUSE)); /* must be fencepost */ |
660 | p = prev_chunk (p); |
661 | new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign; |
662 | assert (new_size > 0 && new_size < (long) (2 * MINSIZE)); |
663 | if (!prev_inuse (p)) |
664 | new_size += prev_size (p); |
665 | assert (new_size > 0 && new_size < max_size); |
666 | if (new_size + (max_size - prev_heap->size) < pad + MINSIZE |
667 | + heap->pagesize) |
668 | break; |
669 | ar_ptr->system_mem -= heap->size; |
670 | LIBC_PROBE (memory_heap_free, 2, heap, heap->size); |
671 | if ((char *) heap + max_size == aligned_heap_area) |
672 | aligned_heap_area = NULL; |
673 | __munmap (heap, max_size); |
674 | heap = prev_heap; |
675 | if (!prev_inuse (p)) /* consolidate backward */ |
676 | { |
677 | p = prev_chunk (p); |
678 | unlink_chunk (ar_ptr, p); |
679 | } |
680 | assert (((unsigned long) ((char *) p + new_size) & (heap->pagesize - 1)) |
681 | == 0); |
682 | assert (((char *) p + new_size) == ((char *) heap + heap->size)); |
683 | top (ar_ptr) = top_chunk = p; |
684 | set_head (top_chunk, new_size | PREV_INUSE); |
685 | /*check_chunk(ar_ptr, top_chunk);*/ |
686 | } |
687 | |
688 | /* Uses similar logic for per-thread arenas as the main arena with systrim |
689 | and _int_free by preserving the top pad and rounding down to the nearest |
690 | page. */ |
691 | top_size = chunksize (top_chunk); |
692 | if ((unsigned long)(top_size) < |
693 | (unsigned long)(mp_.trim_threshold)) |
694 | return 0; |
695 | |
696 | top_area = top_size - MINSIZE - 1; |
697 | if (top_area < 0 || (size_t) top_area <= pad) |
698 | return 0; |
699 | |
700 | /* Release in pagesize units and round down to the nearest page. */ |
701 | extra = ALIGN_DOWN(top_area - pad, heap->pagesize); |
702 | if (extra == 0) |
703 | return 0; |
704 | |
705 | /* Try to shrink. */ |
706 | if (shrink_heap (heap, extra) != 0) |
707 | return 0; |
708 | |
709 | ar_ptr->system_mem -= extra; |
710 | |
711 | /* Success. Adjust top accordingly. */ |
712 | set_head (top_chunk, (top_size - extra) | PREV_INUSE); |
713 | /*check_chunk(ar_ptr, top_chunk);*/ |
714 | return 1; |
715 | } |
716 | |
717 | /* Create a new arena with initial size "size". */ |
718 | |
719 | #if IS_IN (libc) |
720 | /* If REPLACED_ARENA is not NULL, detach it from this thread. Must be |
721 | called while free_list_lock is held. */ |
722 | static void |
723 | detach_arena (mstate replaced_arena) |
724 | { |
725 | if (replaced_arena != NULL) |
726 | { |
727 | assert (replaced_arena->attached_threads > 0); |
728 | /* The current implementation only detaches from main_arena in |
729 | case of allocation failure. This means that it is likely not |
730 | beneficial to put the arena on free_list even if the |
731 | reference count reaches zero. */ |
732 | --replaced_arena->attached_threads; |
733 | } |
734 | } |
735 | |
736 | static mstate |
737 | _int_new_arena (size_t size) |
738 | { |
739 | mstate a; |
740 | heap_info *h; |
741 | char *ptr; |
742 | unsigned long misalign; |
743 | |
744 | h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT), |
745 | mp_.top_pad); |
746 | if (!h) |
747 | { |
748 | /* Maybe size is too large to fit in a single heap. So, just try |
749 | to create a minimally-sized arena and let _int_malloc() attempt |
750 | to deal with the large request via mmap_chunk(). */ |
751 | h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad); |
752 | if (!h) |
753 | return 0; |
754 | } |
755 | a = h->ar_ptr = (mstate) (h + 1); |
756 | malloc_init_state (a); |
757 | a->attached_threads = 1; |
758 | /*a->next = NULL;*/ |
759 | a->system_mem = a->max_system_mem = h->size; |
760 | |
761 | /* Set up the top chunk, with proper alignment. */ |
762 | ptr = (char *) (a + 1); |
763 | misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK; |
764 | if (misalign > 0) |
765 | ptr += MALLOC_ALIGNMENT - misalign; |
766 | top (a) = (mchunkptr) ptr; |
767 | set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE); |
768 | |
769 | LIBC_PROBE (memory_arena_new, 2, a, size); |
770 | mstate replaced_arena = thread_arena; |
771 | thread_arena = a; |
772 | __libc_lock_init (a->mutex); |
773 | |
774 | __libc_lock_lock (list_lock); |
775 | |
776 | /* Add the new arena to the global list. */ |
777 | a->next = main_arena.next; |
778 | /* FIXME: The barrier is an attempt to synchronize with read access |
779 | in reused_arena, which does not acquire list_lock while |
780 | traversing the list. */ |
781 | atomic_write_barrier (); |
782 | main_arena.next = a; |
783 | |
784 | __libc_lock_unlock (list_lock); |
785 | |
786 | __libc_lock_lock (free_list_lock); |
787 | detach_arena (replaced_arena); |
788 | __libc_lock_unlock (free_list_lock); |
789 | |
790 | /* Lock this arena. NB: Another thread may have been attached to |
791 | this arena because the arena is now accessible from the |
792 | main_arena.next list and could have been picked by reused_arena. |
793 | This can only happen for the last arena created (before the arena |
794 | limit is reached). At this point, some arena has to be attached |
795 | to two threads. We could acquire the arena lock before list_lock |
796 | to make it less likely that reused_arena picks this new arena, |
797 | but this could result in a deadlock with |
798 | __malloc_fork_lock_parent. */ |
799 | |
800 | __libc_lock_lock (a->mutex); |
801 | |
802 | return a; |
803 | } |
804 | |
805 | |
806 | /* Remove an arena from free_list. */ |
807 | static mstate |
808 | get_free_list (void) |
809 | { |
810 | mstate replaced_arena = thread_arena; |
811 | mstate result = free_list; |
812 | if (result != NULL) |
813 | { |
814 | __libc_lock_lock (free_list_lock); |
815 | result = free_list; |
816 | if (result != NULL) |
817 | { |
818 | free_list = result->next_free; |
819 | |
820 | /* The arena will be attached to this thread. */ |
821 | assert (result->attached_threads == 0); |
822 | result->attached_threads = 1; |
823 | |
824 | detach_arena (replaced_arena); |
825 | } |
826 | __libc_lock_unlock (free_list_lock); |
827 | |
828 | if (result != NULL) |
829 | { |
830 | LIBC_PROBE (memory_arena_reuse_free_list, 1, result); |
831 | __libc_lock_lock (result->mutex); |
832 | thread_arena = result; |
833 | } |
834 | } |
835 | |
836 | return result; |
837 | } |
838 | |
839 | /* Remove the arena from the free list (if it is present). |
840 | free_list_lock must have been acquired by the caller. */ |
841 | static void |
842 | remove_from_free_list (mstate arena) |
843 | { |
844 | mstate *previous = &free_list; |
845 | for (mstate p = free_list; p != NULL; p = p->next_free) |
846 | { |
847 | assert (p->attached_threads == 0); |
848 | if (p == arena) |
849 | { |
850 | /* Remove the requested arena from the list. */ |
851 | *previous = p->next_free; |
852 | break; |
853 | } |
854 | else |
855 | previous = &p->next_free; |
856 | } |
857 | } |
858 | |
859 | /* Lock and return an arena that can be reused for memory allocation. |
860 | Avoid AVOID_ARENA as we have already failed to allocate memory in |
861 | it and it is currently locked. */ |
862 | static mstate |
863 | reused_arena (mstate avoid_arena) |
864 | { |
865 | mstate result; |
866 | /* FIXME: Access to next_to_use suffers from data races. */ |
867 | static mstate next_to_use; |
868 | if (next_to_use == NULL) |
869 | next_to_use = &main_arena; |
870 | |
871 | /* Iterate over all arenas (including those linked from |
872 | free_list). */ |
873 | result = next_to_use; |
874 | do |
875 | { |
876 | if (!__libc_lock_trylock (result->mutex)) |
877 | goto out; |
878 | |
879 | /* FIXME: This is a data race, see _int_new_arena. */ |
880 | result = result->next; |
881 | } |
882 | while (result != next_to_use); |
883 | |
884 | /* Avoid AVOID_ARENA as we have already failed to allocate memory |
885 | in that arena and it is currently locked. */ |
886 | if (result == avoid_arena) |
887 | result = result->next; |
888 | |
889 | /* No arena available without contention. Wait for the next in line. */ |
890 | LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena); |
891 | __libc_lock_lock (result->mutex); |
892 | |
893 | out: |
894 | /* Attach the arena to the current thread. */ |
895 | { |
896 | /* Update the arena thread attachment counters. */ |
897 | mstate replaced_arena = thread_arena; |
898 | __libc_lock_lock (free_list_lock); |
899 | detach_arena (replaced_arena); |
900 | |
901 | /* We may have picked up an arena on the free list. We need to |
902 | preserve the invariant that no arena on the free list has a |
903 | positive attached_threads counter (otherwise, |
904 | arena_thread_freeres cannot use the counter to determine if the |
905 | arena needs to be put on the free list). We unconditionally |
906 | remove the selected arena from the free list. The caller of |
907 | reused_arena checked the free list and observed it to be empty, |
908 | so the list is very short. */ |
909 | remove_from_free_list (result); |
910 | |
911 | ++result->attached_threads; |
912 | |
913 | __libc_lock_unlock (free_list_lock); |
914 | } |
915 | |
916 | LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena); |
917 | thread_arena = result; |
918 | next_to_use = result->next; |
919 | |
920 | return result; |
921 | } |
922 | |
923 | static mstate |
924 | arena_get2 (size_t size, mstate avoid_arena) |
925 | { |
926 | mstate a; |
927 | |
928 | static size_t narenas_limit; |
929 | |
930 | a = get_free_list (); |
931 | if (a == NULL) |
932 | { |
933 | /* Nothing immediately available, so generate a new arena. */ |
934 | if (narenas_limit == 0) |
935 | { |
936 | if (mp_.arena_max != 0) |
937 | narenas_limit = mp_.arena_max; |
938 | else if (narenas > mp_.arena_test) |
939 | { |
940 | int n = __get_nprocs_sched (); |
941 | |
942 | if (n >= 1) |
943 | narenas_limit = NARENAS_FROM_NCORES (n); |
944 | else |
945 | /* We have no information about the system. Assume two |
946 | cores. */ |
947 | narenas_limit = NARENAS_FROM_NCORES (2); |
948 | } |
949 | } |
950 | repeat:; |
951 | size_t n = narenas; |
952 | /* NB: the following depends on the fact that (size_t)0 - 1 is a |
953 | very large number and that the underflow is OK. If arena_max |
954 | is set the value of arena_test is irrelevant. If arena_test |
955 | is set but narenas is not yet larger or equal to arena_test |
956 | narenas_limit is 0. There is no possibility for narenas to |
957 | be too big for the test to always fail since there is not |
958 | enough address space to create that many arenas. */ |
959 | if (__glibc_unlikely (n <= narenas_limit - 1)) |
960 | { |
961 | if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n)) |
962 | goto repeat; |
963 | a = _int_new_arena (size); |
964 | if (__glibc_unlikely (a == NULL)) |
965 | catomic_decrement (&narenas); |
966 | } |
967 | else |
968 | a = reused_arena (avoid_arena); |
969 | } |
970 | return a; |
971 | } |
972 | |
973 | /* If we don't have the main arena, then maybe the failure is due to running |
974 | out of mmapped areas, so we can try allocating on the main arena. |
975 | Otherwise, it is likely that sbrk() has failed and there is still a chance |
976 | to mmap(), so try one of the other arenas. */ |
977 | static mstate |
978 | arena_get_retry (mstate ar_ptr, size_t bytes) |
979 | { |
980 | LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr); |
981 | if (ar_ptr != &main_arena) |
982 | { |
983 | __libc_lock_unlock (ar_ptr->mutex); |
984 | ar_ptr = &main_arena; |
985 | __libc_lock_lock (ar_ptr->mutex); |
986 | } |
987 | else |
988 | { |
989 | __libc_lock_unlock (ar_ptr->mutex); |
990 | ar_ptr = arena_get2 (bytes, ar_ptr); |
991 | } |
992 | |
993 | return ar_ptr; |
994 | } |
995 | #endif |
996 | |
997 | void |
998 | __malloc_arena_thread_freeres (void) |
999 | { |
1000 | /* Shut down the thread cache first. This could deallocate data for |
1001 | the thread arena, so do this before we put the arena on the free |
1002 | list. */ |
1003 | tcache_thread_shutdown (); |
1004 | |
1005 | mstate a = thread_arena; |
1006 | thread_arena = NULL; |
1007 | |
1008 | if (a != NULL) |
1009 | { |
1010 | __libc_lock_lock (free_list_lock); |
1011 | /* If this was the last attached thread for this arena, put the |
1012 | arena on the free list. */ |
1013 | assert (a->attached_threads > 0); |
1014 | if (--a->attached_threads == 0) |
1015 | { |
1016 | a->next_free = free_list; |
1017 | free_list = a; |
1018 | } |
1019 | __libc_lock_unlock (free_list_lock); |
1020 | } |
1021 | } |
1022 | |
1023 | /* |
1024 | * Local variables: |
1025 | * c-basic-offset: 2 |
1026 | * End: |
1027 | */ |
1028 | |