1/* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2021 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <https://www.gnu.org/licenses/>. */
19
20/* What to do if the standard debugging hooks are in place and a
21 corrupt pointer is detected: do nothing (0), print an error message
22 (1), or call abort() (2). */
23
24/* Hooks for debugging versions. The initial hooks just call the
25 initialization routine, then do the normal work. */
26
27static void *
28malloc_hook_ini (size_t sz, const void *caller)
29{
30 __malloc_hook = NULL;
31 ptmalloc_init ();
32 return __libc_malloc (sz);
33}
34
35static void *
36realloc_hook_ini (void *ptr, size_t sz, const void *caller)
37{
38 __malloc_hook = NULL;
39 __realloc_hook = NULL;
40 ptmalloc_init ();
41 return __libc_realloc (ptr, sz);
42}
43
44static void *
45memalign_hook_ini (size_t alignment, size_t sz, const void *caller)
46{
47 __memalign_hook = NULL;
48 ptmalloc_init ();
49 return __libc_memalign (alignment, sz);
50}
51
52/* Whether we are using malloc checking. */
53static int using_malloc_checking;
54
55/* Activate a standard set of debugging hooks. */
56void
57__malloc_check_init (void)
58{
59 using_malloc_checking = 1;
60 __malloc_hook = malloc_check;
61 __free_hook = free_check;
62 __realloc_hook = realloc_check;
63 __memalign_hook = memalign_check;
64}
65
66/* When memory is tagged, the checking data is stored in the user part
67 of the chunk. We can't rely on the user not having modified the
68 tags, so fetch the tag at each location before dereferencing
69 it. */
70#define SAFE_CHAR_OFFSET(p,offset) \
71 ((unsigned char *) TAG_AT (((unsigned char *) p) + offset))
72
73/* A simple, standard set of debugging hooks. Overhead is `only' one
74 byte per chunk; still this will catch most cases of double frees or
75 overruns. The goal here is to avoid obscure crashes due to invalid
76 usage, unlike in the MALLOC_DEBUG code. */
77
78static unsigned char
79magicbyte (const void *p)
80{
81 unsigned char magic;
82
83 magic = (((uintptr_t) p >> 3) ^ ((uintptr_t) p >> 11)) & 0xFF;
84 /* Do not return 1. See the comment in mem2mem_check(). */
85 if (magic == 1)
86 ++magic;
87 return magic;
88}
89
90/* Visualize the chunk as being partitioned into blocks of 255 bytes from the
91 highest address of the chunk, downwards. The end of each block tells
92 us the size of that block, up to the actual size of the requested
93 memory. Our magic byte is right at the end of the requested size, so we
94 must reach it with this iteration, otherwise we have witnessed a memory
95 corruption. */
96static size_t
97malloc_check_get_size (mchunkptr p)
98{
99 size_t size;
100 unsigned char c;
101 unsigned char magic = magicbyte (p);
102
103 assert (using_malloc_checking == 1);
104
105 for (size = CHUNK_AVAILABLE_SIZE (p) - 1;
106 (c = *SAFE_CHAR_OFFSET (p, size)) != magic;
107 size -= c)
108 {
109 if (c <= 0 || size < (c + CHUNK_HDR_SZ))
110 malloc_printerr ("malloc_check_get_size: memory corruption");
111 }
112
113 /* chunk2mem size. */
114 return size - CHUNK_HDR_SZ;
115}
116
117/* Instrument a chunk with overrun detector byte(s) and convert it
118 into a user pointer with requested size req_sz. */
119
120static void *
121mem2mem_check (void *ptr, size_t req_sz)
122{
123 mchunkptr p;
124 unsigned char *m_ptr = ptr;
125 size_t max_sz, block_sz, i;
126 unsigned char magic;
127
128 if (!ptr)
129 return ptr;
130
131 p = mem2chunk (ptr);
132 magic = magicbyte (p);
133 max_sz = CHUNK_AVAILABLE_SIZE (p) - CHUNK_HDR_SZ;
134
135 for (i = max_sz - 1; i > req_sz; i -= block_sz)
136 {
137 block_sz = MIN (i - req_sz, 0xff);
138 /* Don't allow the magic byte to appear in the chain of length bytes.
139 For the following to work, magicbyte cannot return 0x01. */
140 if (block_sz == magic)
141 --block_sz;
142
143 *SAFE_CHAR_OFFSET (m_ptr, i) = block_sz;
144 }
145 *SAFE_CHAR_OFFSET (m_ptr, req_sz) = magic;
146 return (void *) m_ptr;
147}
148
149/* Convert a pointer to be free()d or realloc()ed to a valid chunk
150 pointer. If the provided pointer is not valid, return NULL. */
151
152static mchunkptr
153mem2chunk_check (void *mem, unsigned char **magic_p)
154{
155 mchunkptr p;
156 INTERNAL_SIZE_T sz, c;
157 unsigned char magic;
158
159 if (!aligned_OK (mem))
160 return NULL;
161
162 p = mem2chunk (mem);
163 sz = chunksize (p);
164 magic = magicbyte (p);
165 if (!chunk_is_mmapped (p))
166 {
167 /* Must be a chunk in conventional heap memory. */
168 int contig = contiguous (&main_arena);
169 if ((contig &&
170 ((char *) p < mp_.sbrk_base ||
171 ((char *) p + sz) >= (mp_.sbrk_base + main_arena.system_mem))) ||
172 sz < MINSIZE || sz & MALLOC_ALIGN_MASK || !inuse (p) ||
173 (!prev_inuse (p) && ((prev_size (p) & MALLOC_ALIGN_MASK) != 0 ||
174 (contig && (char *) prev_chunk (p) < mp_.sbrk_base) ||
175 next_chunk (prev_chunk (p)) != p)))
176 return NULL;
177
178 for (sz = CHUNK_AVAILABLE_SIZE (p) - 1;
179 (c = *SAFE_CHAR_OFFSET (p, sz)) != magic;
180 sz -= c)
181 {
182 if (c == 0 || sz < (c + CHUNK_HDR_SZ))
183 return NULL;
184 }
185 }
186 else
187 {
188 unsigned long offset, page_mask = GLRO (dl_pagesize) - 1;
189
190 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
191 alignment relative to the beginning of a page. Check this
192 first. */
193 offset = (unsigned long) mem & page_mask;
194 if ((offset != MALLOC_ALIGNMENT && offset != 0 && offset != 0x10 &&
195 offset != 0x20 && offset != 0x40 && offset != 0x80 && offset != 0x100 &&
196 offset != 0x200 && offset != 0x400 && offset != 0x800 && offset != 0x1000 &&
197 offset < 0x2000) ||
198 !chunk_is_mmapped (p) || prev_inuse (p) ||
199 ((((unsigned long) p - prev_size (p)) & page_mask) != 0) ||
200 ((prev_size (p) + sz) & page_mask) != 0)
201 return NULL;
202
203 for (sz = CHUNK_AVAILABLE_SIZE (p) - 1;
204 (c = *SAFE_CHAR_OFFSET (p, sz)) != magic;
205 sz -= c)
206 {
207 if (c == 0 || sz < (c + CHUNK_HDR_SZ))
208 return NULL;
209 }
210 }
211
212 unsigned char* safe_p = SAFE_CHAR_OFFSET (p, sz);
213 *safe_p ^= 0xFF;
214 if (magic_p)
215 *magic_p = safe_p;
216 return p;
217}
218
219/* Check for corruption of the top chunk. */
220static void
221top_check (void)
222{
223 mchunkptr t = top (&main_arena);
224
225 if (t == initial_top (&main_arena) ||
226 (!chunk_is_mmapped (t) &&
227 chunksize (t) >= MINSIZE &&
228 prev_inuse (t) &&
229 (!contiguous (&main_arena) ||
230 (char *) t + chunksize (t) == mp_.sbrk_base + main_arena.system_mem)))
231 return;
232
233 malloc_printerr ("malloc: top chunk is corrupt");
234}
235
236static void *
237malloc_check (size_t sz, const void *caller)
238{
239 void *victim;
240 size_t nb;
241
242 if (__builtin_add_overflow (sz, 1, &nb))
243 {
244 __set_errno (ENOMEM);
245 return NULL;
246 }
247
248 __libc_lock_lock (main_arena.mutex);
249 top_check ();
250 victim = _int_malloc (&main_arena, nb);
251 __libc_lock_unlock (main_arena.mutex);
252 return mem2mem_check (TAG_NEW_USABLE (victim), sz);
253}
254
255static void
256free_check (void *mem, const void *caller)
257{
258 mchunkptr p;
259
260 if (!mem)
261 return;
262
263 int err = errno;
264
265#ifdef USE_MTAG
266 /* Quickly check that the freed pointer matches the tag for the memory.
267 This gives a useful double-free detection. */
268 *(volatile char *)mem;
269#endif
270
271 __libc_lock_lock (main_arena.mutex);
272 p = mem2chunk_check (mem, NULL);
273 if (!p)
274 malloc_printerr ("free(): invalid pointer");
275 if (chunk_is_mmapped (p))
276 {
277 __libc_lock_unlock (main_arena.mutex);
278 munmap_chunk (p);
279 }
280 else
281 {
282 /* Mark the chunk as belonging to the library again. */
283 (void)TAG_REGION (chunk2rawmem (p), CHUNK_AVAILABLE_SIZE (p)
284 - CHUNK_HDR_SZ);
285 _int_free (&main_arena, p, 1);
286 __libc_lock_unlock (main_arena.mutex);
287 }
288 __set_errno (err);
289}
290
291static void *
292realloc_check (void *oldmem, size_t bytes, const void *caller)
293{
294 INTERNAL_SIZE_T chnb;
295 void *newmem = 0;
296 unsigned char *magic_p;
297 size_t rb;
298
299 if (__builtin_add_overflow (bytes, 1, &rb))
300 {
301 __set_errno (ENOMEM);
302 return NULL;
303 }
304 if (oldmem == 0)
305 return malloc_check (bytes, NULL);
306
307 if (bytes == 0)
308 {
309 free_check (oldmem, NULL);
310 return NULL;
311 }
312
313#ifdef USE_MTAG
314 /* Quickly check that the freed pointer matches the tag for the memory.
315 This gives a useful double-free detection. */
316 *(volatile char *)oldmem;
317#endif
318
319 __libc_lock_lock (main_arena.mutex);
320 const mchunkptr oldp = mem2chunk_check (oldmem, &magic_p);
321 __libc_lock_unlock (main_arena.mutex);
322 if (!oldp)
323 malloc_printerr ("realloc(): invalid pointer");
324 const INTERNAL_SIZE_T oldsize = chunksize (oldp);
325
326 if (!checked_request2size (rb, &chnb))
327 goto invert;
328
329 __libc_lock_lock (main_arena.mutex);
330
331 if (chunk_is_mmapped (oldp))
332 {
333#if HAVE_MREMAP
334 mchunkptr newp = mremap_chunk (oldp, chnb);
335 if (newp)
336 newmem = chunk2mem (newp);
337 else
338#endif
339 {
340 /* Note the extra SIZE_SZ overhead. */
341 if (oldsize - SIZE_SZ >= chnb)
342 newmem = oldmem; /* do nothing */
343 else
344 {
345 /* Must alloc, copy, free. */
346 top_check ();
347 newmem = _int_malloc (&main_arena, rb);
348 if (newmem)
349 {
350 memcpy (newmem, oldmem, oldsize - CHUNK_HDR_SZ);
351 munmap_chunk (oldp);
352 }
353 }
354 }
355 }
356 else
357 {
358 top_check ();
359 newmem = _int_realloc (&main_arena, oldp, oldsize, chnb);
360 }
361
362 DIAG_PUSH_NEEDS_COMMENT;
363#if __GNUC_PREREQ (7, 0)
364 /* GCC 7 warns about magic_p may be used uninitialized. But we never
365 reach here if magic_p is uninitialized. */
366 DIAG_IGNORE_NEEDS_COMMENT (7, "-Wmaybe-uninitialized");
367#endif
368 /* mem2chunk_check changed the magic byte in the old chunk.
369 If newmem is NULL, then the old chunk will still be used though,
370 so we need to invert that change here. */
371invert:
372 if (newmem == NULL)
373 *magic_p ^= 0xFF;
374 DIAG_POP_NEEDS_COMMENT;
375
376 __libc_lock_unlock (main_arena.mutex);
377
378 return mem2mem_check (TAG_NEW_USABLE (newmem), bytes);
379}
380
381static void *
382memalign_check (size_t alignment, size_t bytes, const void *caller)
383{
384 void *mem;
385
386 if (alignment <= MALLOC_ALIGNMENT)
387 return malloc_check (bytes, NULL);
388
389 if (alignment < MINSIZE)
390 alignment = MINSIZE;
391
392 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
393 power of 2 and will cause overflow in the check below. */
394 if (alignment > SIZE_MAX / 2 + 1)
395 {
396 __set_errno (EINVAL);
397 return 0;
398 }
399
400 /* Check for overflow. */
401 if (bytes > SIZE_MAX - alignment - MINSIZE)
402 {
403 __set_errno (ENOMEM);
404 return 0;
405 }
406
407 /* Make sure alignment is power of 2. */
408 if (!powerof2 (alignment))
409 {
410 size_t a = MALLOC_ALIGNMENT * 2;
411 while (a < alignment)
412 a <<= 1;
413 alignment = a;
414 }
415
416 __libc_lock_lock (main_arena.mutex);
417 top_check ();
418 mem = _int_memalign (&main_arena, alignment, bytes + 1);
419 __libc_lock_unlock (main_arena.mutex);
420 return mem2mem_check (TAG_NEW_USABLE (mem), bytes);
421}
422
423#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_25)
424
425/* Support for restoring dumped heaps contained in historic Emacs
426 executables. The heap saving feature (malloc_get_state) is no
427 longer implemented in this version of glibc, but we have a heap
428 rewriter in malloc_set_state which transforms the heap into a
429 version compatible with current malloc. */
430
431#define MALLOC_STATE_MAGIC 0x444c4541l
432#define MALLOC_STATE_VERSION (0 * 0x100l + 5l) /* major*0x100 + minor */
433
434struct malloc_save_state
435{
436 long magic;
437 long version;
438 mbinptr av[NBINS * 2 + 2];
439 char *sbrk_base;
440 int sbrked_mem_bytes;
441 unsigned long trim_threshold;
442 unsigned long top_pad;
443 unsigned int n_mmaps_max;
444 unsigned long mmap_threshold;
445 int check_action;
446 unsigned long max_sbrked_mem;
447 unsigned long max_total_mem; /* Always 0, for backwards compatibility. */
448 unsigned int n_mmaps;
449 unsigned int max_n_mmaps;
450 unsigned long mmapped_mem;
451 unsigned long max_mmapped_mem;
452 int using_malloc_checking;
453 unsigned long max_fast;
454 unsigned long arena_test;
455 unsigned long arena_max;
456 unsigned long narenas;
457};
458
459/* Dummy implementation which always fails. We need to provide this
460 symbol so that existing Emacs binaries continue to work with
461 BIND_NOW. */
462void *
463attribute_compat_text_section
464malloc_get_state (void)
465{
466 __set_errno (ENOSYS);
467 return NULL;
468}
469compat_symbol (libc, malloc_get_state, malloc_get_state, GLIBC_2_0);
470
471int
472attribute_compat_text_section
473malloc_set_state (void *msptr)
474{
475 struct malloc_save_state *ms = (struct malloc_save_state *) msptr;
476
477 if (ms->magic != MALLOC_STATE_MAGIC)
478 return -1;
479
480 /* Must fail if the major version is too high. */
481 if ((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl))
482 return -2;
483
484 /* We do not need to perform locking here because malloc_set_state
485 must be called before the first call into the malloc subsytem
486 (usually via __malloc_initialize_hook). pthread_create always
487 calls calloc and thus must be called only afterwards, so there
488 cannot be more than one thread when we reach this point. */
489
490 /* Disable the malloc hooks (and malloc checking). */
491 __malloc_hook = NULL;
492 __realloc_hook = NULL;
493 __free_hook = NULL;
494 __memalign_hook = NULL;
495 using_malloc_checking = 0;
496
497 /* Patch the dumped heap. We no longer try to integrate into the
498 existing heap. Instead, we mark the existing chunks as mmapped.
499 Together with the update to dumped_main_arena_start and
500 dumped_main_arena_end, realloc and free will recognize these
501 chunks as dumped fake mmapped chunks and never free them. */
502
503 /* Find the chunk with the lowest address with the heap. */
504 mchunkptr chunk = NULL;
505 {
506 size_t *candidate = (size_t *) ms->sbrk_base;
507 size_t *end = (size_t *) (ms->sbrk_base + ms->sbrked_mem_bytes);
508 while (candidate < end)
509 if (*candidate != 0)
510 {
511 chunk = mem2chunk ((void *) (candidate + 1));
512 break;
513 }
514 else
515 ++candidate;
516 }
517 if (chunk == NULL)
518 return 0;
519
520 /* Iterate over the dumped heap and patch the chunks so that they
521 are treated as fake mmapped chunks. */
522 mchunkptr top = ms->av[2];
523 while (chunk < top)
524 {
525 if (inuse (chunk))
526 {
527 /* Mark chunk as mmapped, to trigger the fallback path. */
528 size_t size = chunksize (chunk);
529 set_head (chunk, size | IS_MMAPPED);
530 }
531 chunk = next_chunk (chunk);
532 }
533
534 /* The dumped fake mmapped chunks all lie in this address range. */
535 dumped_main_arena_start = (mchunkptr) ms->sbrk_base;
536 dumped_main_arena_end = top;
537
538 return 0;
539}
540compat_symbol (libc, malloc_set_state, malloc_set_state, GLIBC_2_0);
541
542#endif /* SHLIB_COMPAT */
543
544/*
545 * Local variables:
546 * c-basic-offset: 2
547 * End:
548 */
549