| 1 | /* Map in a shared object's segments. Generic version. |
| 2 | Copyright (C) 1995-2023 Free Software Foundation, Inc. |
| 3 | Copyright The GNU Toolchain Authors. |
| 4 | This file is part of the GNU C Library. |
| 5 | |
| 6 | The GNU C Library is free software; you can redistribute it and/or |
| 7 | modify it under the terms of the GNU Lesser General Public |
| 8 | License as published by the Free Software Foundation; either |
| 9 | version 2.1 of the License, or (at your option) any later version. |
| 10 | |
| 11 | The GNU C Library is distributed in the hope that it will be useful, |
| 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | Lesser General Public License for more details. |
| 15 | |
| 16 | You should have received a copy of the GNU Lesser General Public |
| 17 | License along with the GNU C Library; if not, see |
| 18 | <https://www.gnu.org/licenses/>. */ |
| 19 | |
| 20 | #include <dl-load.h> |
| 21 | |
| 22 | /* Map a segment and align it properly. */ |
| 23 | |
| 24 | static __always_inline ElfW(Addr) |
| 25 | _dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref, |
| 26 | const size_t maplength, int fd) |
| 27 | { |
| 28 | if (__glibc_likely (c->mapalign <= GLRO(dl_pagesize))) |
| 29 | return (ElfW(Addr)) __mmap ((void *) mappref, maplength, c->prot, |
| 30 | MAP_COPY|MAP_FILE, fd, c->mapoff); |
| 31 | |
| 32 | /* If the segment alignment > the page size, allocate enough space to |
| 33 | ensure that the segment can be properly aligned. */ |
| 34 | ElfW(Addr) maplen = (maplength >= c->mapalign |
| 35 | ? (maplength + c->mapalign) |
| 36 | : (2 * c->mapalign)); |
| 37 | ElfW(Addr) map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplen, |
| 38 | PROT_NONE, |
| 39 | MAP_ANONYMOUS|MAP_PRIVATE, |
| 40 | -1, 0); |
| 41 | if (__glibc_unlikely ((void *) map_start == MAP_FAILED)) |
| 42 | return map_start; |
| 43 | |
| 44 | ElfW(Addr) map_start_aligned = ALIGN_UP (map_start, c->mapalign); |
| 45 | map_start_aligned = (ElfW(Addr)) __mmap ((void *) map_start_aligned, |
| 46 | maplength, c->prot, |
| 47 | MAP_COPY|MAP_FILE|MAP_FIXED, |
| 48 | fd, c->mapoff); |
| 49 | if (__glibc_unlikely ((void *) map_start_aligned == MAP_FAILED)) |
| 50 | __munmap ((void *) map_start, maplen); |
| 51 | else |
| 52 | { |
| 53 | /* Unmap the unused regions. */ |
| 54 | ElfW(Addr) delta = map_start_aligned - map_start; |
| 55 | if (delta) |
| 56 | __munmap ((void *) map_start, delta); |
| 57 | ElfW(Addr) map_end = map_start_aligned + maplength; |
| 58 | map_end = ALIGN_UP (map_end, GLRO(dl_pagesize)); |
| 59 | delta = map_start + maplen - map_end; |
| 60 | if (delta) |
| 61 | __munmap ((void *) map_end, delta); |
| 62 | } |
| 63 | |
| 64 | return map_start_aligned; |
| 65 | } |
| 66 | |
| 67 | /* This implementation assumes (as does the corresponding implementation |
| 68 | of _dl_unmap_segments, in dl-unmap-segments.h) that shared objects |
| 69 | are always laid out with all segments contiguous (or with gaps |
| 70 | between them small enough that it's preferable to reserve all whole |
| 71 | pages inside the gaps with PROT_NONE mappings rather than permitting |
| 72 | other use of those parts of the address space). */ |
| 73 | |
| 74 | static __always_inline const char * |
| 75 | _dl_map_segments (struct link_map *l, int fd, |
| 76 | const ElfW(Ehdr) *, int type, |
| 77 | const struct loadcmd loadcmds[], size_t nloadcmds, |
| 78 | const size_t maplength, bool has_holes, |
| 79 | struct link_map *loader) |
| 80 | { |
| 81 | const struct loadcmd *c = loadcmds; |
| 82 | |
| 83 | if (__glibc_likely (type == ET_DYN)) |
| 84 | { |
| 85 | /* This is a position-independent shared object. We can let the |
| 86 | kernel map it anywhere it likes, but we must have space for all |
| 87 | the segments in their specified positions relative to the first. |
| 88 | So we map the first segment without MAP_FIXED, but with its |
| 89 | extent increased to cover all the segments. Then we remove |
| 90 | access from excess portion, and there is known sufficient space |
| 91 | there to remap from the later segments. |
| 92 | |
| 93 | As a refinement, sometimes we have an address that we would |
| 94 | prefer to map such objects at; but this is only a preference, |
| 95 | the OS can do whatever it likes. */ |
| 96 | ElfW(Addr) mappref |
| 97 | = (ELF_PREFERRED_ADDRESS (loader, maplength, c->mapstart) |
| 98 | - MAP_BASE_ADDR (l)); |
| 99 | |
| 100 | /* Remember which part of the address space this object uses. */ |
| 101 | l->l_map_start = _dl_map_segment (c, mappref, maplength, fd); |
| 102 | if (__glibc_unlikely ((void *) l->l_map_start == MAP_FAILED)) |
| 103 | return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; |
| 104 | |
| 105 | l->l_map_end = l->l_map_start + maplength; |
| 106 | l->l_addr = l->l_map_start - c->mapstart; |
| 107 | |
| 108 | if (has_holes) |
| 109 | { |
| 110 | /* Change protection on the excess portion to disallow all access; |
| 111 | the portions we do not remap later will be inaccessible as if |
| 112 | unallocated. Then jump into the normal segment-mapping loop to |
| 113 | handle the portion of the segment past the end of the file |
| 114 | mapping. */ |
| 115 | if (__glibc_unlikely (loadcmds[nloadcmds - 1].mapstart < |
| 116 | c->mapend)) |
| 117 | return N_("ELF load command address/offset not page-aligned" ); |
| 118 | if (__glibc_unlikely |
| 119 | (__mprotect ((caddr_t) (l->l_addr + c->mapend), |
| 120 | loadcmds[nloadcmds - 1].mapstart - c->mapend, |
| 121 | PROT_NONE) < 0)) |
| 122 | return DL_MAP_SEGMENTS_ERROR_MPROTECT; |
| 123 | } |
| 124 | |
| 125 | l->l_contiguous = 1; |
| 126 | |
| 127 | goto postmap; |
| 128 | } |
| 129 | |
| 130 | /* Remember which part of the address space this object uses. */ |
| 131 | l->l_map_start = c->mapstart + l->l_addr; |
| 132 | l->l_map_end = l->l_map_start + maplength; |
| 133 | l->l_contiguous = !has_holes; |
| 134 | |
| 135 | while (c < &loadcmds[nloadcmds]) |
| 136 | { |
| 137 | if (c->mapend > c->mapstart |
| 138 | /* Map the segment contents from the file. */ |
| 139 | && (__mmap ((void *) (l->l_addr + c->mapstart), |
| 140 | c->mapend - c->mapstart, c->prot, |
| 141 | MAP_FIXED|MAP_COPY|MAP_FILE, |
| 142 | fd, c->mapoff) |
| 143 | == MAP_FAILED)) |
| 144 | return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; |
| 145 | |
| 146 | postmap: |
| 147 | _dl_postprocess_loadcmd (l, header, c); |
| 148 | |
| 149 | if (c->allocend > c->dataend) |
| 150 | { |
| 151 | /* Extra zero pages should appear at the end of this segment, |
| 152 | after the data mapped from the file. */ |
| 153 | ElfW(Addr) zero, zeroend, zeropage; |
| 154 | |
| 155 | zero = l->l_addr + c->dataend; |
| 156 | zeroend = l->l_addr + c->allocend; |
| 157 | zeropage = ((zero + GLRO(dl_pagesize) - 1) |
| 158 | & ~(GLRO(dl_pagesize) - 1)); |
| 159 | |
| 160 | if (zeroend < zeropage) |
| 161 | /* All the extra data is in the last page of the segment. |
| 162 | We can just zero it. */ |
| 163 | zeropage = zeroend; |
| 164 | |
| 165 | if (zeropage > zero) |
| 166 | { |
| 167 | /* Zero the final part of the last page of the segment. */ |
| 168 | if (__glibc_unlikely ((c->prot & PROT_WRITE) == 0)) |
| 169 | { |
| 170 | /* Dag nab it. */ |
| 171 | if (__mprotect ((caddr_t) (zero |
| 172 | & ~(GLRO(dl_pagesize) - 1)), |
| 173 | GLRO(dl_pagesize), c->prot|PROT_WRITE) < 0) |
| 174 | return DL_MAP_SEGMENTS_ERROR_MPROTECT; |
| 175 | } |
| 176 | memset ((void *) zero, '\0', zeropage - zero); |
| 177 | if (__glibc_unlikely ((c->prot & PROT_WRITE) == 0)) |
| 178 | __mprotect ((caddr_t) (zero & ~(GLRO(dl_pagesize) - 1)), |
| 179 | GLRO(dl_pagesize), c->prot); |
| 180 | } |
| 181 | |
| 182 | if (zeroend > zeropage) |
| 183 | { |
| 184 | /* Map the remaining zero pages in from the zero fill FD. */ |
| 185 | caddr_t mapat; |
| 186 | mapat = __mmap ((caddr_t) zeropage, zeroend - zeropage, |
| 187 | c->prot, MAP_ANON|MAP_PRIVATE|MAP_FIXED, |
| 188 | -1, 0); |
| 189 | if (__glibc_unlikely (mapat == MAP_FAILED)) |
| 190 | return DL_MAP_SEGMENTS_ERROR_MAP_ZERO_FILL; |
| 191 | } |
| 192 | } |
| 193 | |
| 194 | ++c; |
| 195 | } |
| 196 | |
| 197 | /* Notify ELF_PREFERRED_ADDRESS that we have to load this one |
| 198 | fixed. */ |
| 199 | ELF_FIXED_ADDRESS (loader, c->mapstart); |
| 200 | |
| 201 | return NULL; |
| 202 | } |
| 203 | |