1 | /* Relocate a shared object and resolve its references to other loaded objects. |
2 | Copyright (C) 1995-2021 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <https://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <errno.h> |
20 | #include <libintl.h> |
21 | #include <stdlib.h> |
22 | #include <unistd.h> |
23 | #include <ldsodefs.h> |
24 | #include <sys/mman.h> |
25 | #include <sys/param.h> |
26 | #include <sys/types.h> |
27 | #include <_itoa.h> |
28 | #include <libc-pointer-arith.h> |
29 | #include "dynamic-link.h" |
30 | |
31 | /* Statistics function. */ |
32 | #ifdef SHARED |
33 | # define bump_num_cache_relocations() ++GL(dl_num_cache_relocations) |
34 | #else |
35 | # define bump_num_cache_relocations() ((void) 0) |
36 | #endif |
37 | |
38 | |
39 | /* We are trying to perform a static TLS relocation in MAP, but it was |
40 | dynamically loaded. This can only work if there is enough surplus in |
41 | the static TLS area already allocated for each running thread. If this |
42 | object's TLS segment is too big to fit, we fail with -1. If it fits, |
43 | we set MAP->l_tls_offset and return 0. |
44 | A portion of the surplus static TLS can be optionally used to optimize |
45 | dynamic TLS access (with TLSDESC or powerpc TLS optimizations). |
46 | If OPTIONAL is true then TLS is allocated for such optimization and |
47 | the caller must have a fallback in case the optional portion of surplus |
48 | TLS runs out. If OPTIONAL is false then the entire surplus TLS area is |
49 | considered and the allocation only fails if that runs out. */ |
50 | int |
51 | _dl_try_allocate_static_tls (struct link_map *map, bool optional) |
52 | { |
53 | /* If we've already used the variable with dynamic access, or if the |
54 | alignment requirements are too high, fail. */ |
55 | if (map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET |
56 | || map->l_tls_align > GLRO (dl_tls_static_align)) |
57 | { |
58 | fail: |
59 | return -1; |
60 | } |
61 | |
62 | #if TLS_TCB_AT_TP |
63 | size_t freebytes = GLRO (dl_tls_static_size) - GL(dl_tls_static_used); |
64 | if (freebytes < TLS_TCB_SIZE) |
65 | goto fail; |
66 | freebytes -= TLS_TCB_SIZE; |
67 | |
68 | size_t blsize = map->l_tls_blocksize + map->l_tls_firstbyte_offset; |
69 | if (freebytes < blsize) |
70 | goto fail; |
71 | |
72 | size_t n = (freebytes - blsize) / map->l_tls_align; |
73 | |
74 | /* Account optional static TLS surplus usage. */ |
75 | size_t use = freebytes - n * map->l_tls_align - map->l_tls_firstbyte_offset; |
76 | if (optional && use > GL(dl_tls_static_optional)) |
77 | goto fail; |
78 | else if (optional) |
79 | GL(dl_tls_static_optional) -= use; |
80 | |
81 | size_t offset = GL(dl_tls_static_used) + use; |
82 | |
83 | map->l_tls_offset = GL(dl_tls_static_used) = offset; |
84 | #elif TLS_DTV_AT_TP |
85 | /* dl_tls_static_used includes the TCB at the beginning. */ |
86 | size_t offset = (ALIGN_UP(GL(dl_tls_static_used) |
87 | - map->l_tls_firstbyte_offset, |
88 | map->l_tls_align) |
89 | + map->l_tls_firstbyte_offset); |
90 | size_t used = offset + map->l_tls_blocksize; |
91 | |
92 | if (used > GLRO (dl_tls_static_size)) |
93 | goto fail; |
94 | |
95 | /* Account optional static TLS surplus usage. */ |
96 | size_t use = used - GL(dl_tls_static_used); |
97 | if (optional && use > GL(dl_tls_static_optional)) |
98 | goto fail; |
99 | else if (optional) |
100 | GL(dl_tls_static_optional) -= use; |
101 | |
102 | map->l_tls_offset = offset; |
103 | map->l_tls_firstbyte_offset = GL(dl_tls_static_used); |
104 | GL(dl_tls_static_used) = used; |
105 | #else |
106 | # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" |
107 | #endif |
108 | |
109 | /* If the object is not yet relocated we cannot initialize the |
110 | static TLS region. Delay it. */ |
111 | if (map->l_real->l_relocated) |
112 | { |
113 | #ifdef SHARED |
114 | if (__builtin_expect (THREAD_DTV()[0].counter != GL(dl_tls_generation), |
115 | 0)) |
116 | /* Update the slot information data for at least the generation of |
117 | the DSO we are allocating data for. */ |
118 | (void) _dl_update_slotinfo (map->l_tls_modid); |
119 | #endif |
120 | |
121 | dl_init_static_tls (map); |
122 | } |
123 | else |
124 | map->l_need_tls_init = 1; |
125 | |
126 | return 0; |
127 | } |
128 | |
129 | /* This function intentionally does not return any value but signals error |
130 | directly, as static TLS should be rare and code handling it should |
131 | not be inlined as much as possible. */ |
132 | void |
133 | __attribute_noinline__ |
134 | _dl_allocate_static_tls (struct link_map *map) |
135 | { |
136 | if (map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET |
137 | || _dl_try_allocate_static_tls (map, false)) |
138 | { |
139 | _dl_signal_error (0, map->l_name, NULL, N_("\ |
140 | cannot allocate memory in static TLS block" )); |
141 | } |
142 | } |
143 | |
144 | #if !THREAD_GSCOPE_IN_TCB |
145 | /* Initialize static TLS area and DTV for current (only) thread. |
146 | libpthread implementations should provide their own hook |
147 | to handle all threads. */ |
148 | void |
149 | _dl_nothread_init_static_tls (struct link_map *map) |
150 | { |
151 | #if TLS_TCB_AT_TP |
152 | void *dest = (char *) THREAD_SELF - map->l_tls_offset; |
153 | #elif TLS_DTV_AT_TP |
154 | void *dest = (char *) THREAD_SELF + map->l_tls_offset + TLS_PRE_TCB_SIZE; |
155 | #else |
156 | # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" |
157 | #endif |
158 | |
159 | /* Initialize the memory. */ |
160 | memset (__mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size), |
161 | '\0', map->l_tls_blocksize - map->l_tls_initimage_size); |
162 | } |
163 | #endif /* !THREAD_GSCOPE_IN_TCB */ |
164 | |
165 | void |
166 | _dl_relocate_object (struct link_map *l, struct r_scope_elem *scope[], |
167 | int reloc_mode, int consider_profiling) |
168 | { |
169 | struct textrels |
170 | { |
171 | caddr_t start; |
172 | size_t len; |
173 | int prot; |
174 | struct textrels *next; |
175 | } *textrels = NULL; |
176 | /* Initialize it to make the compiler happy. */ |
177 | const char *errstring = NULL; |
178 | int lazy = reloc_mode & RTLD_LAZY; |
179 | int skip_ifunc = reloc_mode & __RTLD_NOIFUNC; |
180 | |
181 | #ifdef SHARED |
182 | /* If we are auditing, install the same handlers we need for profiling. */ |
183 | if ((reloc_mode & __RTLD_AUDIT) == 0) |
184 | consider_profiling |= GLRO(dl_audit) != NULL; |
185 | #elif defined PROF |
186 | /* Never use dynamic linker profiling for gprof profiling code. */ |
187 | # define consider_profiling 0 |
188 | #endif |
189 | |
190 | if (l->l_relocated) |
191 | return; |
192 | |
193 | /* If DT_BIND_NOW is set relocate all references in this object. We |
194 | do not do this if we are profiling, of course. */ |
195 | // XXX Correct for auditing? |
196 | if (!consider_profiling |
197 | && __builtin_expect (l->l_info[DT_BIND_NOW] != NULL, 0)) |
198 | lazy = 0; |
199 | |
200 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_RELOC)) |
201 | _dl_debug_printf ("\nrelocation processing: %s%s\n" , |
202 | DSO_FILENAME (l->l_name), lazy ? " (lazy)" : "" ); |
203 | |
204 | /* DT_TEXTREL is now in level 2 and might phase out at some time. |
205 | But we rewrite the DT_FLAGS entry to a DT_TEXTREL entry to make |
206 | testing easier and therefore it will be available at all time. */ |
207 | if (__glibc_unlikely (l->l_info[DT_TEXTREL] != NULL)) |
208 | { |
209 | /* Bletch. We must make read-only segments writable |
210 | long enough to relocate them. */ |
211 | const ElfW(Phdr) *ph; |
212 | for (ph = l->l_phdr; ph < &l->l_phdr[l->l_phnum]; ++ph) |
213 | if (ph->p_type == PT_LOAD && (ph->p_flags & PF_W) == 0) |
214 | { |
215 | struct textrels *newp; |
216 | |
217 | newp = (struct textrels *) alloca (sizeof (*newp)); |
218 | newp->len = ALIGN_UP (ph->p_vaddr + ph->p_memsz, GLRO(dl_pagesize)) |
219 | - ALIGN_DOWN (ph->p_vaddr, GLRO(dl_pagesize)); |
220 | newp->start = PTR_ALIGN_DOWN (ph->p_vaddr, GLRO(dl_pagesize)) |
221 | + (caddr_t) l->l_addr; |
222 | |
223 | newp->prot = 0; |
224 | if (ph->p_flags & PF_R) |
225 | newp->prot |= PROT_READ; |
226 | if (ph->p_flags & PF_W) |
227 | newp->prot |= PROT_WRITE; |
228 | if (ph->p_flags & PF_X) |
229 | newp->prot |= PROT_EXEC; |
230 | |
231 | if (__mprotect (newp->start, newp->len, newp->prot|PROT_WRITE) < 0) |
232 | { |
233 | errstring = N_("cannot make segment writable for relocation" ); |
234 | call_error: |
235 | _dl_signal_error (errno, l->l_name, NULL, errstring); |
236 | } |
237 | |
238 | newp->next = textrels; |
239 | textrels = newp; |
240 | } |
241 | } |
242 | |
243 | { |
244 | /* Do the actual relocation of the object's GOT and other data. */ |
245 | |
246 | /* String table object symbols. */ |
247 | const char *strtab = (const void *) D_PTR (l, l_info[DT_STRTAB]); |
248 | |
249 | /* This macro is used as a callback from the ELF_DYNAMIC_RELOCATE code. */ |
250 | #define RESOLVE_MAP(ref, version, r_type) \ |
251 | ((ELFW(ST_BIND) ((*ref)->st_info) != STB_LOCAL \ |
252 | && __glibc_likely (!dl_symbol_visibility_binds_local_p (*ref))) \ |
253 | ? ((__builtin_expect ((*ref) == l->l_lookup_cache.sym, 0) \ |
254 | && elf_machine_type_class (r_type) == l->l_lookup_cache.type_class) \ |
255 | ? (bump_num_cache_relocations (), \ |
256 | (*ref) = l->l_lookup_cache.ret, \ |
257 | l->l_lookup_cache.value) \ |
258 | : ({ lookup_t _lr; \ |
259 | int _tc = elf_machine_type_class (r_type); \ |
260 | l->l_lookup_cache.type_class = _tc; \ |
261 | l->l_lookup_cache.sym = (*ref); \ |
262 | const struct r_found_version *v = NULL; \ |
263 | if ((version) != NULL && (version)->hash != 0) \ |
264 | v = (version); \ |
265 | _lr = _dl_lookup_symbol_x (strtab + (*ref)->st_name, l, (ref), \ |
266 | scope, v, _tc, \ |
267 | DL_LOOKUP_ADD_DEPENDENCY \ |
268 | | DL_LOOKUP_FOR_RELOCATE, NULL); \ |
269 | l->l_lookup_cache.ret = (*ref); \ |
270 | l->l_lookup_cache.value = _lr; })) \ |
271 | : l) |
272 | |
273 | #include "dynamic-link.h" |
274 | |
275 | ELF_DYNAMIC_RELOCATE (l, lazy, consider_profiling, skip_ifunc); |
276 | |
277 | #ifndef PROF |
278 | if (__glibc_unlikely (consider_profiling) |
279 | && l->l_info[DT_PLTRELSZ] != NULL) |
280 | { |
281 | /* Allocate the array which will contain the already found |
282 | relocations. If the shared object lacks a PLT (for example |
283 | if it only contains lead function) the l_info[DT_PLTRELSZ] |
284 | will be NULL. */ |
285 | size_t sizeofrel = l->l_info[DT_PLTREL]->d_un.d_val == DT_RELA |
286 | ? sizeof (ElfW(Rela)) |
287 | : sizeof (ElfW(Rel)); |
288 | size_t relcount = l->l_info[DT_PLTRELSZ]->d_un.d_val / sizeofrel; |
289 | l->l_reloc_result = calloc (sizeof (l->l_reloc_result[0]), relcount); |
290 | |
291 | if (l->l_reloc_result == NULL) |
292 | { |
293 | errstring = N_("\ |
294 | %s: out of memory to store relocation results for %s\n" ); |
295 | _dl_fatal_printf (errstring, RTLD_PROGNAME, l->l_name); |
296 | } |
297 | } |
298 | #endif |
299 | } |
300 | |
301 | /* Mark the object so we know this work has been done. */ |
302 | l->l_relocated = 1; |
303 | |
304 | /* Undo the segment protection changes. */ |
305 | while (__builtin_expect (textrels != NULL, 0)) |
306 | { |
307 | if (__mprotect (textrels->start, textrels->len, textrels->prot) < 0) |
308 | { |
309 | errstring = N_("cannot restore segment prot after reloc" ); |
310 | goto call_error; |
311 | } |
312 | |
313 | #ifdef CLEAR_CACHE |
314 | CLEAR_CACHE (textrels->start, textrels->start + textrels->len); |
315 | #endif |
316 | |
317 | textrels = textrels->next; |
318 | } |
319 | |
320 | /* In case we can protect the data now that the relocations are |
321 | done, do it. */ |
322 | if (l->l_relro_size != 0) |
323 | _dl_protect_relro (l); |
324 | } |
325 | |
326 | |
327 | void |
328 | _dl_protect_relro (struct link_map *l) |
329 | { |
330 | ElfW(Addr) start = ALIGN_DOWN((l->l_addr |
331 | + l->l_relro_addr), |
332 | GLRO(dl_pagesize)); |
333 | ElfW(Addr) end = ALIGN_DOWN((l->l_addr |
334 | + l->l_relro_addr |
335 | + l->l_relro_size), |
336 | GLRO(dl_pagesize)); |
337 | if (start != end |
338 | && __mprotect ((void *) start, end - start, PROT_READ) < 0) |
339 | { |
340 | static const char errstring[] = N_("\ |
341 | cannot apply additional memory protection after relocation" ); |
342 | _dl_signal_error (errno, l->l_name, NULL, errstring); |
343 | } |
344 | } |
345 | |
346 | void |
347 | __attribute_noinline__ |
348 | _dl_reloc_bad_type (struct link_map *map, unsigned int type, int plt) |
349 | { |
350 | #define DIGIT(b) _itoa_lower_digits[(b) & 0xf]; |
351 | |
352 | /* XXX We cannot translate these messages. */ |
353 | static const char msg[2][32 |
354 | #if __ELF_NATIVE_CLASS == 64 |
355 | + 6 |
356 | #endif |
357 | ] = { "unexpected reloc type 0x" , |
358 | "unexpected PLT reloc type 0x" }; |
359 | char msgbuf[sizeof (msg[0])]; |
360 | char *cp; |
361 | |
362 | cp = __stpcpy (msgbuf, msg[plt]); |
363 | #if __ELF_NATIVE_CLASS == 64 |
364 | if (__builtin_expect(type > 0xff, 0)) |
365 | { |
366 | *cp++ = DIGIT (type >> 28); |
367 | *cp++ = DIGIT (type >> 24); |
368 | *cp++ = DIGIT (type >> 20); |
369 | *cp++ = DIGIT (type >> 16); |
370 | *cp++ = DIGIT (type >> 12); |
371 | *cp++ = DIGIT (type >> 8); |
372 | } |
373 | #endif |
374 | *cp++ = DIGIT (type >> 4); |
375 | *cp++ = DIGIT (type); |
376 | *cp = '\0'; |
377 | |
378 | _dl_signal_error (0, map->l_name, NULL, msgbuf); |
379 | } |
380 | |