| 1 | /* Repeating a memory blob, with alias mapping optimization. | 
| 2 |    Copyright (C) 2018-2019 Free Software Foundation, Inc. | 
| 3 |    This file is part of the GNU C Library. | 
| 4 |  | 
| 5 |    The GNU C Library is free software; you can redistribute it and/or | 
| 6 |    modify it under the terms of the GNU Lesser General Public | 
| 7 |    License as published by the Free Software Foundation; either | 
| 8 |    version 2.1 of the License, or (at your option) any later version. | 
| 9 |  | 
| 10 |    The GNU C Library is distributed in the hope that it will be useful, | 
| 11 |    but WITHOUT ANY WARRANTY; without even the implied warranty of | 
| 12 |    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
| 13 |    Lesser General Public License for more details. | 
| 14 |  | 
| 15 |    You should have received a copy of the GNU Lesser General Public | 
| 16 |    License along with the GNU C Library; if not, see | 
| 17 |    <http://www.gnu.org/licenses/>.  */ | 
| 18 |  | 
| 19 | #include <errno.h> | 
| 20 | #include <fcntl.h> | 
| 21 | #include <stdbool.h> | 
| 22 | #include <stdlib.h> | 
| 23 | #include <string.h> | 
| 24 | #include <support/blob_repeat.h> | 
| 25 | #include <support/check.h> | 
| 26 | #include <support/test-driver.h> | 
| 27 | #include <support/support.h> | 
| 28 | #include <support/xunistd.h> | 
| 29 | #include <sys/mman.h> | 
| 30 | #include <unistd.h> | 
| 31 | #include <wchar.h> | 
| 32 |  | 
| 33 | /* Small allocations should use malloc directly instead of the mmap | 
| 34 |    optimization because mappings carry a lot of overhead.  */ | 
| 35 | static const size_t maximum_small_size = 4 * 1024 * 1024; | 
| 36 |  | 
| 37 | /* Internal helper for fill.  */ | 
| 38 | static void | 
| 39 | fill0 (char *target, const char *element, size_t element_size, | 
| 40 |        size_t count) | 
| 41 | { | 
| 42 |   while (count > 0) | 
| 43 |     { | 
| 44 |       memcpy (target, element, element_size); | 
| 45 |       target += element_size; | 
| 46 |       --count; | 
| 47 |     } | 
| 48 | } | 
| 49 |  | 
| 50 | /* Fill the buffer at TARGET with COUNT copies of the ELEMENT_SIZE | 
| 51 |    bytes starting at ELEMENT.  */ | 
| 52 | static void | 
| 53 | fill (char *target, const char *element, size_t element_size, | 
| 54 |       size_t count) | 
| 55 | { | 
| 56 |   if (element_size == 0 || count == 0) | 
| 57 |     return; | 
| 58 |   else if (element_size == 1) | 
| 59 |     memset (target, element[0], count); | 
| 60 |   else if (element_size == sizeof (wchar_t)) | 
| 61 |     { | 
| 62 |       wchar_t wc; | 
| 63 |       memcpy (&wc, element, sizeof (wc)); | 
| 64 |       wmemset ((wchar_t *) target, wc, count); | 
| 65 |     } | 
| 66 |   else if (element_size < 1024 && count > 4096) | 
| 67 |     { | 
| 68 |       /* Use larger copies for really small element sizes.  */ | 
| 69 |       char buffer[8192]; | 
| 70 |       size_t buffer_count = sizeof (buffer) / element_size; | 
| 71 |       fill0 (buffer, element, element_size, buffer_count); | 
| 72 |       while (count > 0) | 
| 73 |         { | 
| 74 |           size_t copy_count = buffer_count; | 
| 75 |           if (copy_count > count) | 
| 76 |             copy_count = count; | 
| 77 |           size_t copy_bytes = copy_count * element_size; | 
| 78 |           memcpy (target, buffer, copy_bytes); | 
| 79 |           target += copy_bytes; | 
| 80 |           count -= copy_count; | 
| 81 |         } | 
| 82 |     } | 
| 83 |   else | 
| 84 |     fill0 (target, element, element_size, count); | 
| 85 | } | 
| 86 |  | 
| 87 | /* Use malloc instead of mmap for small allocations and unusual size | 
| 88 |    combinations.  */ | 
| 89 | static struct support_blob_repeat | 
| 90 | allocate_malloc (size_t total_size, const void *element, size_t element_size, | 
| 91 |                  size_t count) | 
| 92 | { | 
| 93 |   void *buffer = malloc (total_size); | 
| 94 |   if (buffer == NULL) | 
| 95 |     return (struct support_blob_repeat) { 0 }; | 
| 96 |   fill (buffer, element, element_size, count); | 
| 97 |   return (struct support_blob_repeat) | 
| 98 |     { | 
| 99 |       .start = buffer, | 
| 100 |       .size = total_size, | 
| 101 |       .use_malloc = true | 
| 102 |     }; | 
| 103 | } | 
| 104 |  | 
| 105 | /* Return the least common multiple of PAGE_SIZE and ELEMENT_SIZE, | 
| 106 |    avoiding overflow.  This assumes that PAGE_SIZE is a power of | 
| 107 |    two.  */ | 
| 108 | static size_t | 
| 109 | minimum_stride_size (size_t page_size, size_t element_size) | 
| 110 | { | 
| 111 |   TEST_VERIFY_EXIT (page_size > 0); | 
| 112 |   TEST_VERIFY_EXIT (element_size > 0); | 
| 113 |  | 
| 114 |   /* Compute the number of trailing zeros common to both sizes.  */ | 
| 115 |   unsigned int common_zeros = __builtin_ctzll (page_size | element_size); | 
| 116 |  | 
| 117 |   /* In the product, this power of two appears twice, but in the least | 
| 118 |      common multiple, it appears only once.  Therefore, shift one | 
| 119 |      factor.  */ | 
| 120 |   size_t multiple; | 
| 121 |   if (__builtin_mul_overflow (page_size >> common_zeros, element_size, | 
| 122 | 			      &multiple)) | 
| 123 |     return 0; | 
| 124 |   return multiple; | 
| 125 | } | 
| 126 |  | 
| 127 | /* Allocations larger than maximum_small_size potentially use mmap | 
| 128 |    with alias mappings.  */ | 
| 129 | static struct support_blob_repeat | 
| 130 | allocate_big (size_t total_size, const void *element, size_t element_size, | 
| 131 |               size_t count) | 
| 132 | { | 
| 133 |   unsigned long page_size = xsysconf (_SC_PAGESIZE); | 
| 134 |   size_t stride_size = minimum_stride_size (page_size, element_size); | 
| 135 |   if (stride_size == 0) | 
| 136 |     { | 
| 137 |       errno = EOVERFLOW; | 
| 138 |       return (struct support_blob_repeat) { 0 }; | 
| 139 |     } | 
| 140 |  | 
| 141 |   /* Ensure that the stride size is at least maximum_small_size.  This | 
| 142 |      is necessary to reduce the number of distinct mappings.  */ | 
| 143 |   if (stride_size < maximum_small_size) | 
| 144 |     stride_size | 
| 145 |       = ((maximum_small_size + stride_size - 1) / stride_size) * stride_size; | 
| 146 |  | 
| 147 |   if (stride_size > total_size) | 
| 148 |     /* The mmap optimization would not save anything.  */ | 
| 149 |     return allocate_malloc (total_size, element, element_size, count); | 
| 150 |  | 
| 151 |   /* Reserve the memory region.  If we cannot create the mapping, | 
| 152 |      there is no reason to set up the backing file.  */ | 
| 153 |   void *target = mmap (NULL, total_size, PROT_NONE, | 
| 154 |                        MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); | 
| 155 |   if (target == MAP_FAILED) | 
| 156 |     return (struct support_blob_repeat) { 0 }; | 
| 157 |  | 
| 158 |   /* Create the backing file for the repeated mapping.  Call mkstemp | 
| 159 |      directly to remove the resources backing the temporary file | 
| 160 |      immediately, once support_blob_repeat_free is called.  Using | 
| 161 |      create_temp_file would result in a warning during post-test | 
| 162 |      cleanup.  */ | 
| 163 |   int fd; | 
| 164 |   { | 
| 165 |     char *temppath = xasprintf ("%s/support_blob_repeat-XXXXXX" , test_dir); | 
| 166 |     fd = mkstemp (temppath); | 
| 167 |     if (fd < 0) | 
| 168 |       FAIL_EXIT1 ("mkstemp (\"%s\"): %m" , temppath); | 
| 169 |     xunlink (temppath); | 
| 170 |     free (temppath); | 
| 171 |   } | 
| 172 |  | 
| 173 |   /* Make sure that there is backing storage, so that the fill | 
| 174 |      operation will not fault.  */ | 
| 175 |   if (posix_fallocate (fd, 0, stride_size) != 0) | 
| 176 |     FAIL_EXIT1 ("posix_fallocate (%zu): %m" , stride_size); | 
| 177 |  | 
| 178 |   /* The stride size must still be a multiple of the page size and | 
| 179 |      element size.  */ | 
| 180 |   TEST_VERIFY_EXIT ((stride_size % page_size) == 0); | 
| 181 |   TEST_VERIFY_EXIT ((stride_size % element_size) == 0); | 
| 182 |  | 
| 183 |   /* Fill the backing store.  */ | 
| 184 |   { | 
| 185 |     void *ptr = mmap (target, stride_size, PROT_READ | PROT_WRITE, | 
| 186 |                       MAP_FIXED | MAP_FILE | MAP_SHARED, fd, 0); | 
| 187 |     if (ptr == MAP_FAILED) | 
| 188 |       { | 
| 189 |         int saved_errno = errno; | 
| 190 |         xmunmap (target, total_size); | 
| 191 |         xclose (fd); | 
| 192 |         errno = saved_errno; | 
| 193 |         return (struct support_blob_repeat) { 0 }; | 
| 194 |       } | 
| 195 |     if (ptr != target) | 
| 196 |       FAIL_EXIT1 ("mapping of %zu bytes moved from %p to %p" , | 
| 197 |                   stride_size, target, ptr); | 
| 198 |  | 
| 199 |     /* Write the repeating data.  */ | 
| 200 |     fill (target, element, element_size, stride_size / element_size); | 
| 201 |  | 
| 202 |     /* Return to a PROT_NONE mapping, just to be on the safe side.  */ | 
| 203 |     ptr = mmap (target, stride_size, PROT_NONE, | 
| 204 |                 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); | 
| 205 |     if (ptr == MAP_FAILED) | 
| 206 |       FAIL_EXIT1 ("Failed to reinstate PROT_NONE mapping: %m" ); | 
| 207 |     if (ptr != target) | 
| 208 |       FAIL_EXIT1 ("PROT_NONE mapping of %zu bytes moved from %p to %p" , | 
| 209 |                   stride_size, target, ptr); | 
| 210 |   } | 
| 211 |  | 
| 212 |   /* Create the alias mappings.  */ | 
| 213 |   { | 
| 214 |     size_t remaining_size = total_size; | 
| 215 |     char *current = target; | 
| 216 |     int flags = MAP_FIXED | MAP_FILE | MAP_PRIVATE; | 
| 217 | #ifdef MAP_NORESERVE | 
| 218 |     flags |= MAP_NORESERVE; | 
| 219 | #endif | 
| 220 |     while (remaining_size > 0) | 
| 221 |       { | 
| 222 |         size_t to_map = stride_size; | 
| 223 |         if (to_map > remaining_size) | 
| 224 |           to_map = remaining_size; | 
| 225 |         void *ptr = mmap (current, to_map, PROT_READ | PROT_WRITE, | 
| 226 |                           flags, fd, 0); | 
| 227 |         if (ptr == MAP_FAILED) | 
| 228 |           { | 
| 229 |             int saved_errno = errno; | 
| 230 |             xmunmap (target, total_size); | 
| 231 |             xclose (fd); | 
| 232 |             errno = saved_errno; | 
| 233 |             return (struct support_blob_repeat) { 0 }; | 
| 234 |           } | 
| 235 |         if (ptr != current) | 
| 236 |           FAIL_EXIT1 ("MAP_PRIVATE mapping of %zu bytes moved from %p to %p" , | 
| 237 |                       to_map, target, ptr); | 
| 238 |         remaining_size -= to_map; | 
| 239 |         current += to_map; | 
| 240 |       } | 
| 241 |   } | 
| 242 |  | 
| 243 |   xclose (fd); | 
| 244 |  | 
| 245 |   return (struct support_blob_repeat) | 
| 246 |     { | 
| 247 |       .start = target, | 
| 248 |       .size = total_size, | 
| 249 |       .use_malloc = false | 
| 250 |     }; | 
| 251 | } | 
| 252 |  | 
| 253 | struct support_blob_repeat | 
| 254 | support_blob_repeat_allocate (const void *element, size_t element_size, | 
| 255 |                               size_t count) | 
| 256 | { | 
| 257 |   size_t total_size; | 
| 258 |   if (__builtin_mul_overflow (element_size, count, &total_size)) | 
| 259 |     { | 
| 260 |       errno = EOVERFLOW; | 
| 261 |       return (struct support_blob_repeat) { 0 }; | 
| 262 |     } | 
| 263 |   if (total_size <= maximum_small_size) | 
| 264 |     return allocate_malloc (total_size, element, element_size, count); | 
| 265 |   else | 
| 266 |     return allocate_big (total_size, element, element_size, count); | 
| 267 | } | 
| 268 |  | 
| 269 | void | 
| 270 | support_blob_repeat_free (struct support_blob_repeat *blob) | 
| 271 | { | 
| 272 |   if (blob->size > 0) | 
| 273 |     { | 
| 274 |       int saved_errno = errno; | 
| 275 |       if (blob->use_malloc) | 
| 276 |         free (blob->start); | 
| 277 |       else | 
| 278 |         xmunmap (blob->start, blob->size); | 
| 279 |       errno = saved_errno; | 
| 280 |     } | 
| 281 |   *blob = (struct support_blob_repeat) { 0 }; | 
| 282 | } | 
| 283 |  |