1/* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2023 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public License as
7 published by the Free Software Foundation; either version 2.1 of the
8 License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; see the file COPYING.LIB. If
17 not, see <https://www.gnu.org/licenses/>. */
18
19#include <stdbool.h>
20
21#if HAVE_TUNABLES
22# define TUNABLE_NAMESPACE malloc
23#endif
24#include <elf/dl-tunables.h>
25
26/* Compile-time constants. */
27
28#define HEAP_MIN_SIZE (32 * 1024)
29#ifndef HEAP_MAX_SIZE
30# ifdef DEFAULT_MMAP_THRESHOLD_MAX
31# define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
32# else
33# define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */
34# endif
35#endif
36
37/* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
38 that are dynamically created for multi-threaded programs. The
39 maximum size must be a power of two, for fast determination of
40 which heap belongs to a chunk. It should be much larger than the
41 mmap threshold, so that requests with a size just below that
42 threshold can be fulfilled without creating too many heaps. */
43
44/* When huge pages are used to create new arenas, the maximum and minumum
45 size are based on the runtime defined huge page size. */
46
47static inline size_t
48heap_min_size (void)
49{
50#if HAVE_TUNABLES
51 return mp_.hp_pagesize == 0 ? HEAP_MIN_SIZE : mp_.hp_pagesize;
52#else
53 return HEAP_MIN_SIZE;
54#endif
55}
56
57static inline size_t
58heap_max_size (void)
59{
60#if HAVE_TUNABLES
61 return mp_.hp_pagesize == 0 ? HEAP_MAX_SIZE : mp_.hp_pagesize * 4;
62#else
63 return HEAP_MAX_SIZE;
64#endif
65}
66
67/***************************************************************************/
68
69#define top(ar_ptr) ((ar_ptr)->top)
70
71/* A heap is a single contiguous memory region holding (coalesceable)
72 malloc_chunks. It is allocated with mmap() and always starts at an
73 address aligned to HEAP_MAX_SIZE. */
74
75typedef struct _heap_info
76{
77 mstate ar_ptr; /* Arena for this heap. */
78 struct _heap_info *prev; /* Previous heap. */
79 size_t size; /* Current size in bytes. */
80 size_t mprotect_size; /* Size in bytes that has been mprotected
81 PROT_READ|PROT_WRITE. */
82 size_t pagesize; /* Page size used when allocating the arena. */
83 /* Make sure the following data is properly aligned, particularly
84 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
85 MALLOC_ALIGNMENT. */
86 char pad[-3 * SIZE_SZ & MALLOC_ALIGN_MASK];
87} heap_info;
88
89/* Get a compile-time error if the heap_info padding is not correct
90 to make alignment work as expected in sYSMALLOc. */
91extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
92 + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
93 ? -1 : 1];
94
95/* Thread specific data. */
96
97static __thread mstate thread_arena attribute_tls_model_ie;
98
99/* Arena free list. free_list_lock synchronizes access to the
100 free_list variable below, and the next_free and attached_threads
101 members of struct malloc_state objects. No other locks must be
102 acquired after free_list_lock has been acquired. */
103
104__libc_lock_define_initialized (static, free_list_lock);
105#if IS_IN (libc)
106static size_t narenas = 1;
107#endif
108static mstate free_list;
109
110/* list_lock prevents concurrent writes to the next member of struct
111 malloc_state objects.
112
113 Read access to the next member is supposed to synchronize with the
114 atomic_write_barrier and the write to the next member in
115 _int_new_arena. This suffers from data races; see the FIXME
116 comments in _int_new_arena and reused_arena.
117
118 list_lock also prevents concurrent forks. At the time list_lock is
119 acquired, no arena lock must have been acquired, but it is
120 permitted to acquire arena locks subsequently, while list_lock is
121 acquired. */
122__libc_lock_define_initialized (static, list_lock);
123
124/* Already initialized? */
125static bool __malloc_initialized = false;
126
127/**************************************************************************/
128
129
130/* arena_get() acquires an arena and locks the corresponding mutex.
131 First, try the one last locked successfully by this thread. (This
132 is the common case and handled with a macro for speed.) Then, loop
133 once over the circularly linked list of arenas. If no arena is
134 readily available, create a new one. In this latter case, `size'
135 is just a hint as to how much memory will be required immediately
136 in the new arena. */
137
138#define arena_get(ptr, size) do { \
139 ptr = thread_arena; \
140 arena_lock (ptr, size); \
141 } while (0)
142
143#define arena_lock(ptr, size) do { \
144 if (ptr) \
145 __libc_lock_lock (ptr->mutex); \
146 else \
147 ptr = arena_get2 ((size), NULL); \
148 } while (0)
149
150/* find the heap and corresponding arena for a given ptr */
151
152static inline heap_info *
153heap_for_ptr (void *ptr)
154{
155 size_t max_size = heap_max_size ();
156 return PTR_ALIGN_DOWN (ptr, max_size);
157}
158
159static inline struct malloc_state *
160arena_for_chunk (mchunkptr ptr)
161{
162 return chunk_main_arena (ptr) ? &main_arena : heap_for_ptr (ptr)->ar_ptr;
163}
164
165
166/**************************************************************************/
167
168/* atfork support. */
169
170/* The following three functions are called around fork from a
171 multi-threaded process. We do not use the general fork handler
172 mechanism to make sure that our handlers are the last ones being
173 called, so that other fork handlers can use the malloc
174 subsystem. */
175
176void
177__malloc_fork_lock_parent (void)
178{
179 if (!__malloc_initialized)
180 return;
181
182 /* We do not acquire free_list_lock here because we completely
183 reconstruct free_list in __malloc_fork_unlock_child. */
184
185 __libc_lock_lock (list_lock);
186
187 for (mstate ar_ptr = &main_arena;; )
188 {
189 __libc_lock_lock (ar_ptr->mutex);
190 ar_ptr = ar_ptr->next;
191 if (ar_ptr == &main_arena)
192 break;
193 }
194}
195
196void
197__malloc_fork_unlock_parent (void)
198{
199 if (!__malloc_initialized)
200 return;
201
202 for (mstate ar_ptr = &main_arena;; )
203 {
204 __libc_lock_unlock (ar_ptr->mutex);
205 ar_ptr = ar_ptr->next;
206 if (ar_ptr == &main_arena)
207 break;
208 }
209 __libc_lock_unlock (list_lock);
210}
211
212void
213__malloc_fork_unlock_child (void)
214{
215 if (!__malloc_initialized)
216 return;
217
218 /* Push all arenas to the free list, except thread_arena, which is
219 attached to the current thread. */
220 __libc_lock_init (free_list_lock);
221 if (thread_arena != NULL)
222 thread_arena->attached_threads = 1;
223 free_list = NULL;
224 for (mstate ar_ptr = &main_arena;; )
225 {
226 __libc_lock_init (ar_ptr->mutex);
227 if (ar_ptr != thread_arena)
228 {
229 /* This arena is no longer attached to any thread. */
230 ar_ptr->attached_threads = 0;
231 ar_ptr->next_free = free_list;
232 free_list = ar_ptr;
233 }
234 ar_ptr = ar_ptr->next;
235 if (ar_ptr == &main_arena)
236 break;
237 }
238
239 __libc_lock_init (list_lock);
240}
241
242#if HAVE_TUNABLES
243# define TUNABLE_CALLBACK_FNDECL(__name, __type) \
244static inline int do_ ## __name (__type value); \
245static void \
246TUNABLE_CALLBACK (__name) (tunable_val_t *valp) \
247{ \
248 __type value = (__type) (valp)->numval; \
249 do_ ## __name (value); \
250}
251
252TUNABLE_CALLBACK_FNDECL (set_mmap_threshold, size_t)
253TUNABLE_CALLBACK_FNDECL (set_mmaps_max, int32_t)
254TUNABLE_CALLBACK_FNDECL (set_top_pad, size_t)
255TUNABLE_CALLBACK_FNDECL (set_perturb_byte, int32_t)
256TUNABLE_CALLBACK_FNDECL (set_trim_threshold, size_t)
257TUNABLE_CALLBACK_FNDECL (set_arena_max, size_t)
258TUNABLE_CALLBACK_FNDECL (set_arena_test, size_t)
259#if USE_TCACHE
260TUNABLE_CALLBACK_FNDECL (set_tcache_max, size_t)
261TUNABLE_CALLBACK_FNDECL (set_tcache_count, size_t)
262TUNABLE_CALLBACK_FNDECL (set_tcache_unsorted_limit, size_t)
263#endif
264TUNABLE_CALLBACK_FNDECL (set_mxfast, size_t)
265TUNABLE_CALLBACK_FNDECL (set_hugetlb, size_t)
266#else
267/* Initialization routine. */
268#include <string.h>
269extern char **_environ;
270
271static char *
272next_env_entry (char ***position)
273{
274 char **current = *position;
275 char *result = NULL;
276
277 while (*current != NULL)
278 {
279 if (__builtin_expect ((*current)[0] == 'M', 0)
280 && (*current)[1] == 'A'
281 && (*current)[2] == 'L'
282 && (*current)[3] == 'L'
283 && (*current)[4] == 'O'
284 && (*current)[5] == 'C'
285 && (*current)[6] == '_')
286 {
287 result = &(*current)[7];
288
289 /* Save current position for next visit. */
290 *position = ++current;
291
292 break;
293 }
294
295 ++current;
296 }
297
298 return result;
299}
300#endif
301
302
303#if USE_TCACHE
304static void tcache_key_initialize (void);
305#endif
306
307static void
308ptmalloc_init (void)
309{
310 if (__malloc_initialized)
311 return;
312
313 __malloc_initialized = true;
314
315#if USE_TCACHE
316 tcache_key_initialize ();
317#endif
318
319#ifdef USE_MTAG
320 if ((TUNABLE_GET_FULL (glibc, mem, tagging, int32_t, NULL) & 1) != 0)
321 {
322 /* If the tunable says that we should be using tagged memory
323 and that morecore does not support tagged regions, then
324 disable it. */
325 if (__MTAG_SBRK_UNTAGGED)
326 __always_fail_morecore = true;
327
328 mtag_enabled = true;
329 mtag_mmap_flags = __MTAG_MMAP_FLAGS;
330 }
331#endif
332
333#if defined SHARED && IS_IN (libc)
334 /* In case this libc copy is in a non-default namespace, never use
335 brk. Likewise if dlopened from statically linked program. The
336 generic sbrk implementation also enforces this, but it is not
337 used on Hurd. */
338 if (!__libc_initial)
339 __always_fail_morecore = true;
340#endif
341
342 thread_arena = &main_arena;
343
344 malloc_init_state (&main_arena);
345
346#if HAVE_TUNABLES
347 TUNABLE_GET (top_pad, size_t, TUNABLE_CALLBACK (set_top_pad));
348 TUNABLE_GET (perturb, int32_t, TUNABLE_CALLBACK (set_perturb_byte));
349 TUNABLE_GET (mmap_threshold, size_t, TUNABLE_CALLBACK (set_mmap_threshold));
350 TUNABLE_GET (trim_threshold, size_t, TUNABLE_CALLBACK (set_trim_threshold));
351 TUNABLE_GET (mmap_max, int32_t, TUNABLE_CALLBACK (set_mmaps_max));
352 TUNABLE_GET (arena_max, size_t, TUNABLE_CALLBACK (set_arena_max));
353 TUNABLE_GET (arena_test, size_t, TUNABLE_CALLBACK (set_arena_test));
354# if USE_TCACHE
355 TUNABLE_GET (tcache_max, size_t, TUNABLE_CALLBACK (set_tcache_max));
356 TUNABLE_GET (tcache_count, size_t, TUNABLE_CALLBACK (set_tcache_count));
357 TUNABLE_GET (tcache_unsorted_limit, size_t,
358 TUNABLE_CALLBACK (set_tcache_unsorted_limit));
359# endif
360 TUNABLE_GET (mxfast, size_t, TUNABLE_CALLBACK (set_mxfast));
361 TUNABLE_GET (hugetlb, size_t, TUNABLE_CALLBACK (set_hugetlb));
362 if (mp_.hp_pagesize > 0)
363 /* Force mmap for main arena instead of sbrk, so hugepages are explicitly
364 used. */
365 __always_fail_morecore = true;
366#else
367 if (__glibc_likely (_environ != NULL))
368 {
369 char **runp = _environ;
370 char *envline;
371
372 while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
373 0))
374 {
375 size_t len = strcspn (envline, "=");
376
377 if (envline[len] != '=')
378 /* This is a "MALLOC_" variable at the end of the string
379 without a '=' character. Ignore it since otherwise we
380 will access invalid memory below. */
381 continue;
382
383 switch (len)
384 {
385 case 8:
386 if (!__builtin_expect (__libc_enable_secure, 0))
387 {
388 if (memcmp (envline, "TOP_PAD_", 8) == 0)
389 __libc_mallopt (M_TOP_PAD, strtol (&envline[9], NULL, 10));
390 else if (memcmp (envline, "PERTURB_", 8) == 0)
391 __libc_mallopt (M_PERTURB, strtol (&envline[9], NULL, 10));
392 }
393 break;
394 case 9:
395 if (!__builtin_expect (__libc_enable_secure, 0))
396 {
397 if (memcmp (envline, "MMAP_MAX_", 9) == 0)
398 __libc_mallopt (M_MMAP_MAX, strtol (&envline[10],
399 NULL, 10));
400 else if (memcmp (envline, "ARENA_MAX", 9) == 0)
401 __libc_mallopt (M_ARENA_MAX, strtol (&envline[10],
402 NULL, 10));
403 }
404 break;
405 case 10:
406 if (!__builtin_expect (__libc_enable_secure, 0))
407 {
408 if (memcmp (envline, "ARENA_TEST", 10) == 0)
409 __libc_mallopt (M_ARENA_TEST, strtol (&envline[11],
410 NULL, 10));
411 }
412 break;
413 case 15:
414 if (!__builtin_expect (__libc_enable_secure, 0))
415 {
416 if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
417 __libc_mallopt (M_TRIM_THRESHOLD, strtol (&envline[16],
418 NULL, 10));
419 else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
420 __libc_mallopt (M_MMAP_THRESHOLD, strtol (&envline[16],
421 NULL, 10));
422 }
423 break;
424 default:
425 break;
426 }
427 }
428 }
429#endif
430}
431
432/* Managing heaps and arenas (for concurrent threads) */
433
434#if MALLOC_DEBUG > 1
435
436/* Print the complete contents of a single heap to stderr. */
437
438static void
439dump_heap (heap_info *heap)
440{
441 char *ptr;
442 mchunkptr p;
443
444 fprintf (stderr, "Heap %p, size %10lx:\n", heap, (long) heap->size);
445 ptr = (heap->ar_ptr != (mstate) (heap + 1)) ?
446 (char *) (heap + 1) : (char *) (heap + 1) + sizeof (struct malloc_state);
447 p = (mchunkptr) (((uintptr_t) ptr + MALLOC_ALIGN_MASK) &
448 ~MALLOC_ALIGN_MASK);
449 for (;; )
450 {
451 fprintf (stderr, "chunk %p size %10lx", p, (long) chunksize_nomask(p));
452 if (p == top (heap->ar_ptr))
453 {
454 fprintf (stderr, " (top)\n");
455 break;
456 }
457 else if (chunksize_nomask(p) == (0 | PREV_INUSE))
458 {
459 fprintf (stderr, " (fence)\n");
460 break;
461 }
462 fprintf (stderr, "\n");
463 p = next_chunk (p);
464 }
465}
466#endif /* MALLOC_DEBUG > 1 */
467
468/* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
469 addresses as opposed to increasing, new_heap would badly fragment the
470 address space. In that case remember the second HEAP_MAX_SIZE part
471 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
472 call (if it is already aligned) and try to reuse it next time. We need
473 no locking for it, as kernel ensures the atomicity for us - worst case
474 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
475 multiple threads, but only one will succeed. */
476static char *aligned_heap_area;
477
478/* Create a new heap. size is automatically rounded up to a multiple
479 of the page size. */
480
481static heap_info *
482alloc_new_heap (size_t size, size_t top_pad, size_t pagesize,
483 int mmap_flags)
484{
485 char *p1, *p2;
486 unsigned long ul;
487 heap_info *h;
488 size_t min_size = heap_min_size ();
489 size_t max_size = heap_max_size ();
490
491 if (size + top_pad < min_size)
492 size = min_size;
493 else if (size + top_pad <= max_size)
494 size += top_pad;
495 else if (size > max_size)
496 return 0;
497 else
498 size = max_size;
499 size = ALIGN_UP (size, pagesize);
500
501 /* A memory region aligned to a multiple of max_size is needed.
502 No swap space needs to be reserved for the following large
503 mapping (on Linux, this is the case for all non-writable mappings
504 anyway). */
505 p2 = MAP_FAILED;
506 if (aligned_heap_area)
507 {
508 p2 = (char *) MMAP (aligned_heap_area, max_size, PROT_NONE, mmap_flags);
509 aligned_heap_area = NULL;
510 if (p2 != MAP_FAILED && ((unsigned long) p2 & (max_size - 1)))
511 {
512 __munmap (p2, max_size);
513 p2 = MAP_FAILED;
514 }
515 }
516 if (p2 == MAP_FAILED)
517 {
518 p1 = (char *) MMAP (0, max_size << 1, PROT_NONE, mmap_flags);
519 if (p1 != MAP_FAILED)
520 {
521 p2 = (char *) (((uintptr_t) p1 + (max_size - 1))
522 & ~(max_size - 1));
523 ul = p2 - p1;
524 if (ul)
525 __munmap (p1, ul);
526 else
527 aligned_heap_area = p2 + max_size;
528 __munmap (p2 + max_size, max_size - ul);
529 }
530 else
531 {
532 /* Try to take the chance that an allocation of only max_size
533 is already aligned. */
534 p2 = (char *) MMAP (0, max_size, PROT_NONE, mmap_flags);
535 if (p2 == MAP_FAILED)
536 return 0;
537
538 if ((unsigned long) p2 & (max_size - 1))
539 {
540 __munmap (p2, max_size);
541 return 0;
542 }
543 }
544 }
545 if (__mprotect (p2, size, mtag_mmap_flags | PROT_READ | PROT_WRITE) != 0)
546 {
547 __munmap (p2, max_size);
548 return 0;
549 }
550
551 madvise_thp (p2, size);
552
553 h = (heap_info *) p2;
554 h->size = size;
555 h->mprotect_size = size;
556 h->pagesize = pagesize;
557 LIBC_PROBE (memory_heap_new, 2, h, h->size);
558 return h;
559}
560
561static heap_info *
562new_heap (size_t size, size_t top_pad)
563{
564#if HAVE_TUNABLES
565 if (__glibc_unlikely (mp_.hp_pagesize != 0))
566 {
567 heap_info *h = alloc_new_heap (size, top_pad, mp_.hp_pagesize,
568 mp_.hp_flags);
569 if (h != NULL)
570 return h;
571 }
572#endif
573 return alloc_new_heap (size, top_pad, GLRO (dl_pagesize), 0);
574}
575
576/* Grow a heap. size is automatically rounded up to a
577 multiple of the page size. */
578
579static int
580grow_heap (heap_info *h, long diff)
581{
582 size_t pagesize = h->pagesize;
583 size_t max_size = heap_max_size ();
584 long new_size;
585
586 diff = ALIGN_UP (diff, pagesize);
587 new_size = (long) h->size + diff;
588 if ((unsigned long) new_size > (unsigned long) max_size)
589 return -1;
590
591 if ((unsigned long) new_size > h->mprotect_size)
592 {
593 if (__mprotect ((char *) h + h->mprotect_size,
594 (unsigned long) new_size - h->mprotect_size,
595 mtag_mmap_flags | PROT_READ | PROT_WRITE) != 0)
596 return -2;
597
598 h->mprotect_size = new_size;
599 }
600
601 h->size = new_size;
602 LIBC_PROBE (memory_heap_more, 2, h, h->size);
603 return 0;
604}
605
606/* Shrink a heap. */
607
608static int
609shrink_heap (heap_info *h, long diff)
610{
611 long new_size;
612
613 new_size = (long) h->size - diff;
614 if (new_size < (long) sizeof (*h))
615 return -1;
616
617 /* Try to re-map the extra heap space freshly to save memory, and make it
618 inaccessible. See malloc-sysdep.h to know when this is true. */
619 if (__glibc_unlikely (check_may_shrink_heap ()))
620 {
621 if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE,
622 MAP_FIXED) == (char *) MAP_FAILED)
623 return -2;
624
625 h->mprotect_size = new_size;
626 }
627 else
628 __madvise ((char *) h + new_size, diff, MADV_DONTNEED);
629 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
630
631 h->size = new_size;
632 LIBC_PROBE (memory_heap_less, 2, h, h->size);
633 return 0;
634}
635
636/* Delete a heap. */
637
638static int
639heap_trim (heap_info *heap, size_t pad)
640{
641 mstate ar_ptr = heap->ar_ptr;
642 mchunkptr top_chunk = top (ar_ptr), p;
643 heap_info *prev_heap;
644 long new_size, top_size, top_area, extra, prev_size, misalign;
645 size_t max_size = heap_max_size ();
646
647 /* Can this heap go away completely? */
648 while (top_chunk == chunk_at_offset (heap, sizeof (*heap)))
649 {
650 prev_heap = heap->prev;
651 prev_size = prev_heap->size - (MINSIZE - 2 * SIZE_SZ);
652 p = chunk_at_offset (prev_heap, prev_size);
653 /* fencepost must be properly aligned. */
654 misalign = ((long) p) & MALLOC_ALIGN_MASK;
655 p = chunk_at_offset (prev_heap, prev_size - misalign);
656 assert (chunksize_nomask (p) == (0 | PREV_INUSE)); /* must be fencepost */
657 p = prev_chunk (p);
658 new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign;
659 assert (new_size > 0 && new_size < (long) (2 * MINSIZE));
660 if (!prev_inuse (p))
661 new_size += prev_size (p);
662 assert (new_size > 0 && new_size < max_size);
663 if (new_size + (max_size - prev_heap->size) < pad + MINSIZE
664 + heap->pagesize)
665 break;
666 ar_ptr->system_mem -= heap->size;
667 LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
668 if ((char *) heap + max_size == aligned_heap_area)
669 aligned_heap_area = NULL;
670 __munmap (heap, max_size);
671 heap = prev_heap;
672 if (!prev_inuse (p)) /* consolidate backward */
673 {
674 p = prev_chunk (p);
675 unlink_chunk (ar_ptr, p);
676 }
677 assert (((unsigned long) ((char *) p + new_size) & (heap->pagesize - 1))
678 == 0);
679 assert (((char *) p + new_size) == ((char *) heap + heap->size));
680 top (ar_ptr) = top_chunk = p;
681 set_head (top_chunk, new_size | PREV_INUSE);
682 /*check_chunk(ar_ptr, top_chunk);*/
683 }
684
685 /* Uses similar logic for per-thread arenas as the main arena with systrim
686 and _int_free by preserving the top pad and rounding down to the nearest
687 page. */
688 top_size = chunksize (top_chunk);
689 if ((unsigned long)(top_size) <
690 (unsigned long)(mp_.trim_threshold))
691 return 0;
692
693 top_area = top_size - MINSIZE - 1;
694 if (top_area < 0 || (size_t) top_area <= pad)
695 return 0;
696
697 /* Release in pagesize units and round down to the nearest page. */
698 extra = ALIGN_DOWN(top_area - pad, heap->pagesize);
699 if (extra == 0)
700 return 0;
701
702 /* Try to shrink. */
703 if (shrink_heap (heap, extra) != 0)
704 return 0;
705
706 ar_ptr->system_mem -= extra;
707
708 /* Success. Adjust top accordingly. */
709 set_head (top_chunk, (top_size - extra) | PREV_INUSE);
710 /*check_chunk(ar_ptr, top_chunk);*/
711 return 1;
712}
713
714/* Create a new arena with initial size "size". */
715
716#if IS_IN (libc)
717/* If REPLACED_ARENA is not NULL, detach it from this thread. Must be
718 called while free_list_lock is held. */
719static void
720detach_arena (mstate replaced_arena)
721{
722 if (replaced_arena != NULL)
723 {
724 assert (replaced_arena->attached_threads > 0);
725 /* The current implementation only detaches from main_arena in
726 case of allocation failure. This means that it is likely not
727 beneficial to put the arena on free_list even if the
728 reference count reaches zero. */
729 --replaced_arena->attached_threads;
730 }
731}
732
733static mstate
734_int_new_arena (size_t size)
735{
736 mstate a;
737 heap_info *h;
738 char *ptr;
739 unsigned long misalign;
740
741 h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT),
742 mp_.top_pad);
743 if (!h)
744 {
745 /* Maybe size is too large to fit in a single heap. So, just try
746 to create a minimally-sized arena and let _int_malloc() attempt
747 to deal with the large request via mmap_chunk(). */
748 h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad);
749 if (!h)
750 return 0;
751 }
752 a = h->ar_ptr = (mstate) (h + 1);
753 malloc_init_state (a);
754 a->attached_threads = 1;
755 /*a->next = NULL;*/
756 a->system_mem = a->max_system_mem = h->size;
757
758 /* Set up the top chunk, with proper alignment. */
759 ptr = (char *) (a + 1);
760 misalign = (uintptr_t) chunk2mem (ptr) & MALLOC_ALIGN_MASK;
761 if (misalign > 0)
762 ptr += MALLOC_ALIGNMENT - misalign;
763 top (a) = (mchunkptr) ptr;
764 set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE);
765
766 LIBC_PROBE (memory_arena_new, 2, a, size);
767 mstate replaced_arena = thread_arena;
768 thread_arena = a;
769 __libc_lock_init (a->mutex);
770
771 __libc_lock_lock (list_lock);
772
773 /* Add the new arena to the global list. */
774 a->next = main_arena.next;
775 /* FIXME: The barrier is an attempt to synchronize with read access
776 in reused_arena, which does not acquire list_lock while
777 traversing the list. */
778 atomic_write_barrier ();
779 main_arena.next = a;
780
781 __libc_lock_unlock (list_lock);
782
783 __libc_lock_lock (free_list_lock);
784 detach_arena (replaced_arena);
785 __libc_lock_unlock (free_list_lock);
786
787 /* Lock this arena. NB: Another thread may have been attached to
788 this arena because the arena is now accessible from the
789 main_arena.next list and could have been picked by reused_arena.
790 This can only happen for the last arena created (before the arena
791 limit is reached). At this point, some arena has to be attached
792 to two threads. We could acquire the arena lock before list_lock
793 to make it less likely that reused_arena picks this new arena,
794 but this could result in a deadlock with
795 __malloc_fork_lock_parent. */
796
797 __libc_lock_lock (a->mutex);
798
799 return a;
800}
801
802
803/* Remove an arena from free_list. */
804static mstate
805get_free_list (void)
806{
807 mstate replaced_arena = thread_arena;
808 mstate result = free_list;
809 if (result != NULL)
810 {
811 __libc_lock_lock (free_list_lock);
812 result = free_list;
813 if (result != NULL)
814 {
815 free_list = result->next_free;
816
817 /* The arena will be attached to this thread. */
818 assert (result->attached_threads == 0);
819 result->attached_threads = 1;
820
821 detach_arena (replaced_arena);
822 }
823 __libc_lock_unlock (free_list_lock);
824
825 if (result != NULL)
826 {
827 LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
828 __libc_lock_lock (result->mutex);
829 thread_arena = result;
830 }
831 }
832
833 return result;
834}
835
836/* Remove the arena from the free list (if it is present).
837 free_list_lock must have been acquired by the caller. */
838static void
839remove_from_free_list (mstate arena)
840{
841 mstate *previous = &free_list;
842 for (mstate p = free_list; p != NULL; p = p->next_free)
843 {
844 assert (p->attached_threads == 0);
845 if (p == arena)
846 {
847 /* Remove the requested arena from the list. */
848 *previous = p->next_free;
849 break;
850 }
851 else
852 previous = &p->next_free;
853 }
854}
855
856/* Lock and return an arena that can be reused for memory allocation.
857 Avoid AVOID_ARENA as we have already failed to allocate memory in
858 it and it is currently locked. */
859static mstate
860reused_arena (mstate avoid_arena)
861{
862 mstate result;
863 /* FIXME: Access to next_to_use suffers from data races. */
864 static mstate next_to_use;
865 if (next_to_use == NULL)
866 next_to_use = &main_arena;
867
868 /* Iterate over all arenas (including those linked from
869 free_list). */
870 result = next_to_use;
871 do
872 {
873 if (!__libc_lock_trylock (result->mutex))
874 goto out;
875
876 /* FIXME: This is a data race, see _int_new_arena. */
877 result = result->next;
878 }
879 while (result != next_to_use);
880
881 /* Avoid AVOID_ARENA as we have already failed to allocate memory
882 in that arena and it is currently locked. */
883 if (result == avoid_arena)
884 result = result->next;
885
886 /* No arena available without contention. Wait for the next in line. */
887 LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
888 __libc_lock_lock (result->mutex);
889
890out:
891 /* Attach the arena to the current thread. */
892 {
893 /* Update the arena thread attachment counters. */
894 mstate replaced_arena = thread_arena;
895 __libc_lock_lock (free_list_lock);
896 detach_arena (replaced_arena);
897
898 /* We may have picked up an arena on the free list. We need to
899 preserve the invariant that no arena on the free list has a
900 positive attached_threads counter (otherwise,
901 arena_thread_freeres cannot use the counter to determine if the
902 arena needs to be put on the free list). We unconditionally
903 remove the selected arena from the free list. The caller of
904 reused_arena checked the free list and observed it to be empty,
905 so the list is very short. */
906 remove_from_free_list (result);
907
908 ++result->attached_threads;
909
910 __libc_lock_unlock (free_list_lock);
911 }
912
913 LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
914 thread_arena = result;
915 next_to_use = result->next;
916
917 return result;
918}
919
920static mstate
921arena_get2 (size_t size, mstate avoid_arena)
922{
923 mstate a;
924
925 static size_t narenas_limit;
926
927 a = get_free_list ();
928 if (a == NULL)
929 {
930 /* Nothing immediately available, so generate a new arena. */
931 if (narenas_limit == 0)
932 {
933 if (mp_.arena_max != 0)
934 narenas_limit = mp_.arena_max;
935 else if (narenas > mp_.arena_test)
936 {
937 int n = __get_nprocs_sched ();
938
939 if (n >= 1)
940 narenas_limit = NARENAS_FROM_NCORES (n);
941 else
942 /* We have no information about the system. Assume two
943 cores. */
944 narenas_limit = NARENAS_FROM_NCORES (2);
945 }
946 }
947 repeat:;
948 size_t n = narenas;
949 /* NB: the following depends on the fact that (size_t)0 - 1 is a
950 very large number and that the underflow is OK. If arena_max
951 is set the value of arena_test is irrelevant. If arena_test
952 is set but narenas is not yet larger or equal to arena_test
953 narenas_limit is 0. There is no possibility for narenas to
954 be too big for the test to always fail since there is not
955 enough address space to create that many arenas. */
956 if (__glibc_unlikely (n <= narenas_limit - 1))
957 {
958 if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
959 goto repeat;
960 a = _int_new_arena (size);
961 if (__glibc_unlikely (a == NULL))
962 catomic_decrement (&narenas);
963 }
964 else
965 a = reused_arena (avoid_arena);
966 }
967 return a;
968}
969
970/* If we don't have the main arena, then maybe the failure is due to running
971 out of mmapped areas, so we can try allocating on the main arena.
972 Otherwise, it is likely that sbrk() has failed and there is still a chance
973 to mmap(), so try one of the other arenas. */
974static mstate
975arena_get_retry (mstate ar_ptr, size_t bytes)
976{
977 LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr);
978 if (ar_ptr != &main_arena)
979 {
980 __libc_lock_unlock (ar_ptr->mutex);
981 ar_ptr = &main_arena;
982 __libc_lock_lock (ar_ptr->mutex);
983 }
984 else
985 {
986 __libc_lock_unlock (ar_ptr->mutex);
987 ar_ptr = arena_get2 (bytes, ar_ptr);
988 }
989
990 return ar_ptr;
991}
992#endif
993
994void
995__malloc_arena_thread_freeres (void)
996{
997 /* Shut down the thread cache first. This could deallocate data for
998 the thread arena, so do this before we put the arena on the free
999 list. */
1000 tcache_thread_shutdown ();
1001
1002 mstate a = thread_arena;
1003 thread_arena = NULL;
1004
1005 if (a != NULL)
1006 {
1007 __libc_lock_lock (free_list_lock);
1008 /* If this was the last attached thread for this arena, put the
1009 arena on the free list. */
1010 assert (a->attached_threads > 0);
1011 if (--a->attached_threads == 0)
1012 {
1013 a->next_free = free_list;
1014 free_list = a;
1015 }
1016 __libc_lock_unlock (free_list_lock);
1017 }
1018}
1019
1020/*
1021 * Local variables:
1022 * c-basic-offset: 2
1023 * End:
1024 */
1025