1 | /* Load a shared object at runtime, relocate it, and run its initializer. |
2 | Copyright (C) 1996-2023 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <https://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <assert.h> |
20 | #include <dlfcn.h> |
21 | #include <errno.h> |
22 | #include <libintl.h> |
23 | #include <stdio.h> |
24 | #include <stdlib.h> |
25 | #include <string.h> |
26 | #include <unistd.h> |
27 | #include <sys/mman.h> /* Check whether MAP_COPY is defined. */ |
28 | #include <sys/param.h> |
29 | #include <libc-lock.h> |
30 | #include <ldsodefs.h> |
31 | #include <sysdep-cancel.h> |
32 | #include <tls.h> |
33 | #include <stap-probe.h> |
34 | #include <atomic.h> |
35 | #include <libc-internal.h> |
36 | #include <array_length.h> |
37 | #include <libc-early-init.h> |
38 | #include <gnu/lib-names.h> |
39 | #include <dl-find_object.h> |
40 | |
41 | #include <dl-dst.h> |
42 | #include <dl-prop.h> |
43 | |
44 | |
45 | /* We must be careful not to leave us in an inconsistent state. Thus we |
46 | catch any error and re-raise it after cleaning up. */ |
47 | |
48 | struct dl_open_args |
49 | { |
50 | const char *file; |
51 | int mode; |
52 | /* This is the caller of the dlopen() function. */ |
53 | const void *caller_dlopen; |
54 | struct link_map *map; |
55 | /* Namespace ID. */ |
56 | Lmid_t nsid; |
57 | |
58 | /* Original value of _ns_global_scope_pending_adds. Set by |
59 | dl_open_worker. Only valid if nsid is a real namespace |
60 | (non-negative). */ |
61 | unsigned int original_global_scope_pending_adds; |
62 | |
63 | /* Set to true by dl_open_worker if libc.so was already loaded into |
64 | the namespace at the time dl_open_worker was called. This is |
65 | used to determine whether libc.so early initialization has |
66 | already been done before, and whether to roll back the cached |
67 | libc_map value in the namespace in case of a dlopen failure. */ |
68 | bool libc_already_loaded; |
69 | |
70 | /* Set to true if the end of dl_open_worker_begin was reached. */ |
71 | bool worker_continue; |
72 | |
73 | /* Original parameters to the program and the current environment. */ |
74 | int argc; |
75 | char **argv; |
76 | char **env; |
77 | }; |
78 | |
79 | /* Called in case the global scope cannot be extended. */ |
80 | static void __attribute__ ((noreturn)) |
81 | add_to_global_resize_failure (struct link_map *new) |
82 | { |
83 | _dl_signal_error (ENOMEM, new->l_libname->name, NULL, |
84 | N_ ("cannot extend global scope" )); |
85 | } |
86 | |
87 | /* Grow the global scope array for the namespace, so that all the new |
88 | global objects can be added later in add_to_global_update, without |
89 | risk of memory allocation failure. add_to_global_resize raises |
90 | exceptions for memory allocation errors. */ |
91 | static void |
92 | add_to_global_resize (struct link_map *new) |
93 | { |
94 | struct link_namespaces *ns = &GL (dl_ns)[new->l_ns]; |
95 | |
96 | /* Count the objects we have to put in the global scope. */ |
97 | unsigned int to_add = 0; |
98 | for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt) |
99 | if (new->l_searchlist.r_list[cnt]->l_global == 0) |
100 | ++to_add; |
101 | |
102 | /* The symbols of the new objects and its dependencies are to be |
103 | introduced into the global scope that will be used to resolve |
104 | references from other dynamically-loaded objects. |
105 | |
106 | The global scope is the searchlist in the main link map. We |
107 | extend this list if necessary. There is one problem though: |
108 | since this structure was allocated very early (before the libc |
109 | is loaded) the memory it uses is allocated by the malloc()-stub |
110 | in the ld.so. When we come here these functions are not used |
111 | anymore. Instead the malloc() implementation of the libc is |
112 | used. But this means the block from the main map cannot be used |
113 | in an realloc() call. Therefore we allocate a completely new |
114 | array the first time we have to add something to the locale scope. */ |
115 | |
116 | if (__builtin_add_overflow (ns->_ns_global_scope_pending_adds, to_add, |
117 | &ns->_ns_global_scope_pending_adds)) |
118 | add_to_global_resize_failure (new); |
119 | |
120 | unsigned int new_size = 0; /* 0 means no new allocation. */ |
121 | void *old_global = NULL; /* Old allocation if free-able. */ |
122 | |
123 | /* Minimum required element count for resizing. Adjusted below for |
124 | an exponential resizing policy. */ |
125 | size_t required_new_size; |
126 | if (__builtin_add_overflow (ns->_ns_main_searchlist->r_nlist, |
127 | ns->_ns_global_scope_pending_adds, |
128 | &required_new_size)) |
129 | add_to_global_resize_failure (new); |
130 | |
131 | if (ns->_ns_global_scope_alloc == 0) |
132 | { |
133 | if (__builtin_add_overflow (required_new_size, 8, &new_size)) |
134 | add_to_global_resize_failure (new); |
135 | } |
136 | else if (required_new_size > ns->_ns_global_scope_alloc) |
137 | { |
138 | if (__builtin_mul_overflow (required_new_size, 2, &new_size)) |
139 | add_to_global_resize_failure (new); |
140 | |
141 | /* The old array was allocated with our malloc, not the minimal |
142 | malloc. */ |
143 | old_global = ns->_ns_main_searchlist->r_list; |
144 | } |
145 | |
146 | if (new_size > 0) |
147 | { |
148 | size_t allocation_size; |
149 | if (__builtin_mul_overflow (new_size, sizeof (struct link_map *), |
150 | &allocation_size)) |
151 | add_to_global_resize_failure (new); |
152 | struct link_map **new_global = malloc (allocation_size); |
153 | if (new_global == NULL) |
154 | add_to_global_resize_failure (new); |
155 | |
156 | /* Copy over the old entries. */ |
157 | memcpy (new_global, ns->_ns_main_searchlist->r_list, |
158 | ns->_ns_main_searchlist->r_nlist * sizeof (struct link_map *)); |
159 | |
160 | ns->_ns_global_scope_alloc = new_size; |
161 | ns->_ns_main_searchlist->r_list = new_global; |
162 | |
163 | if (!RTLD_SINGLE_THREAD_P) |
164 | THREAD_GSCOPE_WAIT (); |
165 | |
166 | free (old_global); |
167 | } |
168 | } |
169 | |
170 | /* Actually add the new global objects to the global scope. Must be |
171 | called after add_to_global_resize. This function cannot fail. */ |
172 | static void |
173 | add_to_global_update (struct link_map *new) |
174 | { |
175 | struct link_namespaces *ns = &GL (dl_ns)[new->l_ns]; |
176 | |
177 | /* Now add the new entries. */ |
178 | unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist; |
179 | for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt) |
180 | { |
181 | struct link_map *map = new->l_searchlist.r_list[cnt]; |
182 | |
183 | if (map->l_global == 0) |
184 | { |
185 | map->l_global = 1; |
186 | |
187 | /* The array has been resized by add_to_global_resize. */ |
188 | assert (new_nlist < ns->_ns_global_scope_alloc); |
189 | |
190 | ns->_ns_main_searchlist->r_list[new_nlist++] = map; |
191 | |
192 | /* We modify the global scope. Report this. */ |
193 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES)) |
194 | _dl_debug_printf ("\nadd %s [%lu] to global scope\n" , |
195 | map->l_name, map->l_ns); |
196 | } |
197 | } |
198 | |
199 | /* Some of the pending adds have been performed by the loop above. |
200 | Adjust the counter accordingly. */ |
201 | unsigned int added = new_nlist - ns->_ns_main_searchlist->r_nlist; |
202 | assert (added <= ns->_ns_global_scope_pending_adds); |
203 | ns->_ns_global_scope_pending_adds -= added; |
204 | |
205 | atomic_write_barrier (); |
206 | ns->_ns_main_searchlist->r_nlist = new_nlist; |
207 | } |
208 | |
209 | /* Search link maps in all namespaces for the DSO that contains the object at |
210 | address ADDR. Returns the pointer to the link map of the matching DSO, or |
211 | NULL if a match is not found. */ |
212 | struct link_map * |
213 | _dl_find_dso_for_object (const ElfW(Addr) addr) |
214 | { |
215 | struct link_map *l; |
216 | |
217 | /* Find the highest-addressed object that ADDR is not below. */ |
218 | for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns) |
219 | for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next) |
220 | if (addr >= l->l_map_start && addr < l->l_map_end |
221 | && (l->l_contiguous |
222 | || _dl_addr_inside_object (l, (ElfW(Addr)) addr))) |
223 | { |
224 | assert (ns == l->l_ns); |
225 | return l; |
226 | } |
227 | return NULL; |
228 | } |
229 | rtld_hidden_def (_dl_find_dso_for_object); |
230 | |
231 | /* Return true if NEW is found in the scope for MAP. */ |
232 | static size_t |
233 | scope_has_map (struct link_map *map, struct link_map *new) |
234 | { |
235 | size_t cnt; |
236 | for (cnt = 0; map->l_scope[cnt] != NULL; ++cnt) |
237 | if (map->l_scope[cnt] == &new->l_searchlist) |
238 | return true; |
239 | return false; |
240 | } |
241 | |
242 | /* Return the length of the scope for MAP. */ |
243 | static size_t |
244 | scope_size (struct link_map *map) |
245 | { |
246 | size_t cnt; |
247 | for (cnt = 0; map->l_scope[cnt] != NULL; ) |
248 | ++cnt; |
249 | return cnt; |
250 | } |
251 | |
252 | /* Resize the scopes of depended-upon objects, so that the new object |
253 | can be added later without further allocation of memory. This |
254 | function can raise an exceptions due to malloc failure. */ |
255 | static void |
256 | resize_scopes (struct link_map *new) |
257 | { |
258 | /* If the file is not loaded now as a dependency, add the search |
259 | list of the newly loaded object to the scope. */ |
260 | for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) |
261 | { |
262 | struct link_map *imap = new->l_searchlist.r_list[i]; |
263 | |
264 | /* If the initializer has been called already, the object has |
265 | not been loaded here and now. */ |
266 | if (imap->l_init_called && imap->l_type == lt_loaded) |
267 | { |
268 | if (scope_has_map (imap, new)) |
269 | /* Avoid duplicates. */ |
270 | continue; |
271 | |
272 | size_t cnt = scope_size (imap); |
273 | if (__glibc_unlikely (cnt + 1 >= imap->l_scope_max)) |
274 | { |
275 | /* The l_scope array is too small. Allocate a new one |
276 | dynamically. */ |
277 | size_t new_size; |
278 | struct r_scope_elem **newp; |
279 | |
280 | if (imap->l_scope != imap->l_scope_mem |
281 | && imap->l_scope_max < array_length (imap->l_scope_mem)) |
282 | { |
283 | /* If the current l_scope memory is not pointing to |
284 | the static memory in the structure, but the |
285 | static memory in the structure is large enough to |
286 | use for cnt + 1 scope entries, then switch to |
287 | using the static memory. */ |
288 | new_size = array_length (imap->l_scope_mem); |
289 | newp = imap->l_scope_mem; |
290 | } |
291 | else |
292 | { |
293 | new_size = imap->l_scope_max * 2; |
294 | newp = (struct r_scope_elem **) |
295 | malloc (new_size * sizeof (struct r_scope_elem *)); |
296 | if (newp == NULL) |
297 | _dl_signal_error (ENOMEM, "dlopen" , NULL, |
298 | N_("cannot create scope list" )); |
299 | } |
300 | |
301 | /* Copy the array and the terminating NULL. */ |
302 | memcpy (newp, imap->l_scope, |
303 | (cnt + 1) * sizeof (imap->l_scope[0])); |
304 | struct r_scope_elem **old = imap->l_scope; |
305 | |
306 | imap->l_scope = newp; |
307 | |
308 | if (old != imap->l_scope_mem) |
309 | _dl_scope_free (old); |
310 | |
311 | imap->l_scope_max = new_size; |
312 | } |
313 | } |
314 | } |
315 | } |
316 | |
317 | /* Second stage of resize_scopes: Add NEW to the scopes. Also print |
318 | debugging information about scopes if requested. |
319 | |
320 | This function cannot raise an exception because all required memory |
321 | has been allocated by a previous call to resize_scopes. */ |
322 | static void |
323 | update_scopes (struct link_map *new) |
324 | { |
325 | for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) |
326 | { |
327 | struct link_map *imap = new->l_searchlist.r_list[i]; |
328 | int from_scope = 0; |
329 | |
330 | if (imap->l_init_called && imap->l_type == lt_loaded) |
331 | { |
332 | if (scope_has_map (imap, new)) |
333 | /* Avoid duplicates. */ |
334 | continue; |
335 | |
336 | size_t cnt = scope_size (imap); |
337 | /* Assert that resize_scopes has sufficiently enlarged the |
338 | array. */ |
339 | assert (cnt + 1 < imap->l_scope_max); |
340 | |
341 | /* First terminate the extended list. Otherwise a thread |
342 | might use the new last element and then use the garbage |
343 | at offset IDX+1. */ |
344 | imap->l_scope[cnt + 1] = NULL; |
345 | atomic_write_barrier (); |
346 | imap->l_scope[cnt] = &new->l_searchlist; |
347 | |
348 | from_scope = cnt; |
349 | } |
350 | |
351 | /* Print scope information. */ |
352 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES)) |
353 | _dl_show_scope (imap, from_scope); |
354 | } |
355 | } |
356 | |
357 | /* Call _dl_add_to_slotinfo with DO_ADD set to false, to allocate |
358 | space in GL (dl_tls_dtv_slotinfo_list). This can raise an |
359 | exception. The return value is true if any of the new objects use |
360 | TLS. */ |
361 | static bool |
362 | resize_tls_slotinfo (struct link_map *new) |
363 | { |
364 | bool any_tls = false; |
365 | for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) |
366 | { |
367 | struct link_map *imap = new->l_searchlist.r_list[i]; |
368 | |
369 | /* Only add TLS memory if this object is loaded now and |
370 | therefore is not yet initialized. */ |
371 | if (! imap->l_init_called && imap->l_tls_blocksize > 0) |
372 | { |
373 | _dl_add_to_slotinfo (imap, false); |
374 | any_tls = true; |
375 | } |
376 | } |
377 | return any_tls; |
378 | } |
379 | |
380 | /* Second stage of TLS update, after resize_tls_slotinfo. This |
381 | function does not raise any exception. It should only be called if |
382 | resize_tls_slotinfo returned true. */ |
383 | static void |
384 | update_tls_slotinfo (struct link_map *new) |
385 | { |
386 | unsigned int first_static_tls = new->l_searchlist.r_nlist; |
387 | for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) |
388 | { |
389 | struct link_map *imap = new->l_searchlist.r_list[i]; |
390 | |
391 | /* Only add TLS memory if this object is loaded now and |
392 | therefore is not yet initialized. */ |
393 | if (! imap->l_init_called && imap->l_tls_blocksize > 0) |
394 | { |
395 | _dl_add_to_slotinfo (imap, true); |
396 | |
397 | if (imap->l_need_tls_init |
398 | && first_static_tls == new->l_searchlist.r_nlist) |
399 | first_static_tls = i; |
400 | } |
401 | } |
402 | |
403 | size_t newgen = GL(dl_tls_generation) + 1; |
404 | if (__glibc_unlikely (newgen == 0)) |
405 | _dl_fatal_printf (N_("\ |
406 | TLS generation counter wrapped! Please report this." )); |
407 | /* Can be read concurrently. */ |
408 | atomic_store_relaxed (&GL(dl_tls_generation), newgen); |
409 | |
410 | /* We need a second pass for static tls data, because |
411 | _dl_update_slotinfo must not be run while calls to |
412 | _dl_add_to_slotinfo are still pending. */ |
413 | for (unsigned int i = first_static_tls; i < new->l_searchlist.r_nlist; ++i) |
414 | { |
415 | struct link_map *imap = new->l_searchlist.r_list[i]; |
416 | |
417 | if (imap->l_need_tls_init |
418 | && ! imap->l_init_called |
419 | && imap->l_tls_blocksize > 0) |
420 | { |
421 | /* For static TLS we have to allocate the memory here and |
422 | now, but we can delay updating the DTV. */ |
423 | imap->l_need_tls_init = 0; |
424 | #ifdef SHARED |
425 | /* Update the slot information data for at least the |
426 | generation of the DSO we are allocating data for. */ |
427 | |
428 | /* FIXME: This can terminate the process on memory |
429 | allocation failure. It is not possible to raise |
430 | exceptions from this context; to fix this bug, |
431 | _dl_update_slotinfo would have to be split into two |
432 | operations, similar to resize_scopes and update_scopes |
433 | above. This is related to bug 16134. */ |
434 | _dl_update_slotinfo (imap->l_tls_modid); |
435 | #endif |
436 | |
437 | dl_init_static_tls (imap); |
438 | assert (imap->l_need_tls_init == 0); |
439 | } |
440 | } |
441 | } |
442 | |
443 | /* Mark the objects as NODELETE if required. This is delayed until |
444 | after dlopen failure is not possible, so that _dl_close can clean |
445 | up objects if necessary. */ |
446 | static void |
447 | activate_nodelete (struct link_map *new) |
448 | { |
449 | /* It is necessary to traverse the entire namespace. References to |
450 | objects in the global scope and unique symbol bindings can force |
451 | NODELETE status for objects outside the local scope. */ |
452 | for (struct link_map *l = GL (dl_ns)[new->l_ns]._ns_loaded; l != NULL; |
453 | l = l->l_next) |
454 | if (l->l_nodelete_pending) |
455 | { |
456 | if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES)) |
457 | _dl_debug_printf ("activating NODELETE for %s [%lu]\n" , |
458 | l->l_name, l->l_ns); |
459 | |
460 | /* The flag can already be true at this point, e.g. a signal |
461 | handler may have triggered lazy binding and set NODELETE |
462 | status immediately. */ |
463 | l->l_nodelete_active = true; |
464 | |
465 | /* This is just a debugging aid, to indicate that |
466 | activate_nodelete has run for this map. */ |
467 | l->l_nodelete_pending = false; |
468 | } |
469 | } |
470 | |
471 | /* struct dl_init_args and call_dl_init are used to call _dl_init with |
472 | exception handling disabled. */ |
473 | struct dl_init_args |
474 | { |
475 | struct link_map *new; |
476 | int argc; |
477 | char **argv; |
478 | char **env; |
479 | }; |
480 | |
481 | static void |
482 | call_dl_init (void *closure) |
483 | { |
484 | struct dl_init_args *args = closure; |
485 | _dl_init (args->new, args->argc, args->argv, args->env); |
486 | } |
487 | |
488 | static void |
489 | dl_open_worker_begin (void *a) |
490 | { |
491 | struct dl_open_args *args = a; |
492 | const char *file = args->file; |
493 | int mode = args->mode; |
494 | struct link_map *call_map = NULL; |
495 | |
496 | /* Determine the caller's map if necessary. This is needed in case |
497 | we have a DST, when we don't know the namespace ID we have to put |
498 | the new object in, or when the file name has no path in which |
499 | case we need to look along the RUNPATH/RPATH of the caller. */ |
500 | const char *dst = strchr (file, '$'); |
501 | if (dst != NULL || args->nsid == __LM_ID_CALLER |
502 | || strchr (file, '/') == NULL) |
503 | { |
504 | const void *caller_dlopen = args->caller_dlopen; |
505 | |
506 | /* We have to find out from which object the caller is calling. |
507 | By default we assume this is the main application. */ |
508 | call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded; |
509 | |
510 | struct link_map *l = _dl_find_dso_for_object ((ElfW(Addr)) caller_dlopen); |
511 | |
512 | if (l) |
513 | call_map = l; |
514 | |
515 | if (args->nsid == __LM_ID_CALLER) |
516 | args->nsid = call_map->l_ns; |
517 | } |
518 | |
519 | /* The namespace ID is now known. Keep track of whether libc.so was |
520 | already loaded, to determine whether it is necessary to call the |
521 | early initialization routine (or clear libc_map on error). */ |
522 | args->libc_already_loaded = GL(dl_ns)[args->nsid].libc_map != NULL; |
523 | |
524 | /* Retain the old value, so that it can be restored. */ |
525 | args->original_global_scope_pending_adds |
526 | = GL (dl_ns)[args->nsid]._ns_global_scope_pending_adds; |
527 | |
528 | /* One might be tempted to assert that we are RT_CONSISTENT at this point, but that |
529 | may not be true if this is a recursive call to dlopen. */ |
530 | _dl_debug_initialize (0, args->nsid); |
531 | |
532 | /* Load the named object. */ |
533 | struct link_map *new; |
534 | args->map = new = _dl_map_object (call_map, file, lt_loaded, 0, |
535 | mode | __RTLD_CALLMAP, args->nsid); |
536 | |
537 | /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is |
538 | set and the object is not already loaded. */ |
539 | if (new == NULL) |
540 | { |
541 | assert (mode & RTLD_NOLOAD); |
542 | return; |
543 | } |
544 | |
545 | if (__glibc_unlikely (mode & __RTLD_SPROF)) |
546 | /* This happens only if we load a DSO for 'sprof'. */ |
547 | return; |
548 | |
549 | /* This object is directly loaded. */ |
550 | ++new->l_direct_opencount; |
551 | |
552 | /* It was already open. */ |
553 | if (__glibc_unlikely (new->l_searchlist.r_list != NULL)) |
554 | { |
555 | /* Let the user know about the opencount. */ |
556 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES)) |
557 | _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n" , |
558 | new->l_name, new->l_ns, new->l_direct_opencount); |
559 | |
560 | /* If the user requested the object to be in the global |
561 | namespace but it is not so far, prepare to add it now. This |
562 | can raise an exception to do a malloc failure. */ |
563 | if ((mode & RTLD_GLOBAL) && new->l_global == 0) |
564 | add_to_global_resize (new); |
565 | |
566 | /* Mark the object as not deletable if the RTLD_NODELETE flags |
567 | was passed. */ |
568 | if (__glibc_unlikely (mode & RTLD_NODELETE)) |
569 | { |
570 | if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES) |
571 | && !new->l_nodelete_active) |
572 | _dl_debug_printf ("marking %s [%lu] as NODELETE\n" , |
573 | new->l_name, new->l_ns); |
574 | new->l_nodelete_active = true; |
575 | } |
576 | |
577 | /* Finalize the addition to the global scope. */ |
578 | if ((mode & RTLD_GLOBAL) && new->l_global == 0) |
579 | add_to_global_update (new); |
580 | |
581 | const int r_state __attribute__ ((unused)) |
582 | = _dl_debug_update (args->nsid)->r_state; |
583 | assert (r_state == RT_CONSISTENT); |
584 | |
585 | return; |
586 | } |
587 | |
588 | /* Schedule NODELETE marking for the directly loaded object if |
589 | requested. */ |
590 | if (__glibc_unlikely (mode & RTLD_NODELETE)) |
591 | new->l_nodelete_pending = true; |
592 | |
593 | /* Load that object's dependencies. */ |
594 | _dl_map_object_deps (new, NULL, 0, 0, |
595 | mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT)); |
596 | |
597 | /* So far, so good. Now check the versions. */ |
598 | for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) |
599 | if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL) |
600 | { |
601 | struct link_map *map = new->l_searchlist.r_list[i]->l_real; |
602 | _dl_check_map_versions (map, 0, 0); |
603 | #ifndef SHARED |
604 | /* During static dlopen, check if ld.so has been loaded. |
605 | Perform partial initialization in this case. This must |
606 | come after the symbol versioning initialization in |
607 | _dl_check_map_versions. */ |
608 | if (map->l_info[DT_SONAME] != NULL |
609 | && strcmp (((const char *) D_PTR (map, l_info[DT_STRTAB]) |
610 | + map->l_info[DT_SONAME]->d_un.d_val), LD_SO) == 0) |
611 | __rtld_static_init (map); |
612 | #endif |
613 | } |
614 | |
615 | #ifdef SHARED |
616 | /* Auditing checkpoint: we have added all objects. */ |
617 | _dl_audit_activity_nsid (new->l_ns, LA_ACT_CONSISTENT); |
618 | #endif |
619 | |
620 | /* Notify the debugger all new objects are now ready to go. */ |
621 | struct r_debug *r = _dl_debug_update (args->nsid); |
622 | r->r_state = RT_CONSISTENT; |
623 | _dl_debug_state (); |
624 | LIBC_PROBE (map_complete, 3, args->nsid, r, new); |
625 | |
626 | _dl_open_check (new); |
627 | |
628 | /* Print scope information. */ |
629 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES)) |
630 | _dl_show_scope (new, 0); |
631 | |
632 | /* Only do lazy relocation if `LD_BIND_NOW' is not set. */ |
633 | int reloc_mode = mode & __RTLD_AUDIT; |
634 | if (GLRO(dl_lazy)) |
635 | reloc_mode |= mode & RTLD_LAZY; |
636 | |
637 | /* Objects must be sorted by dependency for the relocation process. |
638 | This allows IFUNC relocations to work and it also means copy |
639 | relocation of dependencies are if necessary overwritten. |
640 | __dl_map_object_deps has already sorted l_initfini for us. */ |
641 | unsigned int first = UINT_MAX; |
642 | unsigned int last = 0; |
643 | unsigned int j = 0; |
644 | struct link_map *l = new->l_initfini[0]; |
645 | do |
646 | { |
647 | if (! l->l_real->l_relocated) |
648 | { |
649 | if (first == UINT_MAX) |
650 | first = j; |
651 | last = j + 1; |
652 | } |
653 | l = new->l_initfini[++j]; |
654 | } |
655 | while (l != NULL); |
656 | |
657 | int relocation_in_progress = 0; |
658 | |
659 | /* Perform relocation. This can trigger lazy binding in IFUNC |
660 | resolvers. For NODELETE mappings, these dependencies are not |
661 | recorded because the flag has not been applied to the newly |
662 | loaded objects. This means that upon dlopen failure, these |
663 | NODELETE objects can be unloaded despite existing references to |
664 | them. However, such relocation dependencies in IFUNC resolvers |
665 | are undefined anyway, so this is not a problem. */ |
666 | |
667 | for (unsigned int i = last; i-- > first; ) |
668 | { |
669 | l = new->l_initfini[i]; |
670 | |
671 | if (l->l_real->l_relocated) |
672 | continue; |
673 | |
674 | if (! relocation_in_progress) |
675 | { |
676 | /* Notify the debugger that relocations are about to happen. */ |
677 | LIBC_PROBE (reloc_start, 2, args->nsid, r); |
678 | relocation_in_progress = 1; |
679 | } |
680 | |
681 | #ifdef SHARED |
682 | if (__glibc_unlikely (GLRO(dl_profile) != NULL)) |
683 | { |
684 | /* If this here is the shared object which we want to profile |
685 | make sure the profile is started. We can find out whether |
686 | this is necessary or not by observing the `_dl_profile_map' |
687 | variable. If it was NULL but is not NULL afterwards we must |
688 | start the profiling. */ |
689 | struct link_map *old_profile_map = GL(dl_profile_map); |
690 | |
691 | _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1); |
692 | |
693 | if (old_profile_map == NULL && GL(dl_profile_map) != NULL) |
694 | { |
695 | /* We must prepare the profiling. */ |
696 | _dl_start_profile (); |
697 | |
698 | /* Prevent unloading the object. */ |
699 | GL(dl_profile_map)->l_nodelete_active = true; |
700 | } |
701 | } |
702 | else |
703 | #endif |
704 | _dl_relocate_object (l, l->l_scope, reloc_mode, 0); |
705 | } |
706 | |
707 | /* This only performs the memory allocations. The actual update of |
708 | the scopes happens below, after failure is impossible. */ |
709 | resize_scopes (new); |
710 | |
711 | /* Increase the size of the GL (dl_tls_dtv_slotinfo_list) data |
712 | structure. */ |
713 | bool any_tls = resize_tls_slotinfo (new); |
714 | |
715 | /* Perform the necessary allocations for adding new global objects |
716 | to the global scope below. */ |
717 | if (mode & RTLD_GLOBAL) |
718 | add_to_global_resize (new); |
719 | |
720 | /* Demarcation point: After this, no recoverable errors are allowed. |
721 | All memory allocations for new objects must have happened |
722 | before. */ |
723 | |
724 | /* Finalize the NODELETE status first. This comes before |
725 | update_scopes, so that lazy binding will not see pending NODELETE |
726 | state for newly loaded objects. There is a compiler barrier in |
727 | update_scopes which ensures that the changes from |
728 | activate_nodelete are visible before new objects show up in the |
729 | local scope. */ |
730 | activate_nodelete (new); |
731 | |
732 | /* Second stage after resize_scopes: Actually perform the scope |
733 | update. After this, dlsym and lazy binding can bind to new |
734 | objects. */ |
735 | update_scopes (new); |
736 | |
737 | if (!_dl_find_object_update (new)) |
738 | _dl_signal_error (ENOMEM, new->l_libname->name, NULL, |
739 | N_ ("cannot allocate address lookup data" )); |
740 | |
741 | /* FIXME: It is unclear whether the order here is correct. |
742 | Shouldn't new objects be made available for binding (and thus |
743 | execution) only after there TLS data has been set up fully? |
744 | Fixing bug 16134 will likely make this distinction less |
745 | important. */ |
746 | |
747 | /* Second stage after resize_tls_slotinfo: Update the slotinfo data |
748 | structures. */ |
749 | if (any_tls) |
750 | /* FIXME: This calls _dl_update_slotinfo, which aborts the process |
751 | on memory allocation failure. See bug 16134. */ |
752 | update_tls_slotinfo (new); |
753 | |
754 | /* Notify the debugger all new objects have been relocated. */ |
755 | if (relocation_in_progress) |
756 | LIBC_PROBE (reloc_complete, 3, args->nsid, r, new); |
757 | |
758 | /* If libc.so was not there before, attempt to call its early |
759 | initialization routine. Indicate to the initialization routine |
760 | whether the libc being initialized is the one in the base |
761 | namespace. */ |
762 | if (!args->libc_already_loaded) |
763 | { |
764 | /* dlopen cannot be used to load an initial libc by design. */ |
765 | struct link_map *libc_map = GL(dl_ns)[args->nsid].libc_map; |
766 | _dl_call_libc_early_init (libc_map, false); |
767 | } |
768 | |
769 | args->worker_continue = true; |
770 | } |
771 | |
772 | static void |
773 | dl_open_worker (void *a) |
774 | { |
775 | struct dl_open_args *args = a; |
776 | |
777 | args->worker_continue = false; |
778 | |
779 | { |
780 | /* Protects global and module specific TLS state. */ |
781 | __rtld_lock_lock_recursive (GL(dl_load_tls_lock)); |
782 | |
783 | struct dl_exception ex; |
784 | int err = _dl_catch_exception (&ex, dl_open_worker_begin, args); |
785 | |
786 | __rtld_lock_unlock_recursive (GL(dl_load_tls_lock)); |
787 | |
788 | if (__glibc_unlikely (ex.errstring != NULL)) |
789 | /* Reraise the error. */ |
790 | _dl_signal_exception (err, &ex, NULL); |
791 | } |
792 | |
793 | if (!args->worker_continue) |
794 | return; |
795 | |
796 | int mode = args->mode; |
797 | struct link_map *new = args->map; |
798 | |
799 | /* Run the initializer functions of new objects. Temporarily |
800 | disable the exception handler, so that lazy binding failures are |
801 | fatal. */ |
802 | { |
803 | struct dl_init_args init_args = |
804 | { |
805 | .new = new, |
806 | .argc = args->argc, |
807 | .argv = args->argv, |
808 | .env = args->env |
809 | }; |
810 | _dl_catch_exception (NULL, call_dl_init, &init_args); |
811 | } |
812 | |
813 | /* Now we can make the new map available in the global scope. */ |
814 | if (mode & RTLD_GLOBAL) |
815 | add_to_global_update (new); |
816 | |
817 | /* Let the user know about the opencount. */ |
818 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES)) |
819 | _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n" , |
820 | new->l_name, new->l_ns, new->l_direct_opencount); |
821 | } |
822 | |
823 | void * |
824 | _dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid, |
825 | int argc, char *argv[], char *env[]) |
826 | { |
827 | if ((mode & RTLD_BINDING_MASK) == 0) |
828 | /* One of the flags must be set. */ |
829 | _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()" )); |
830 | |
831 | /* Make sure we are alone. */ |
832 | __rtld_lock_lock_recursive (GL(dl_load_lock)); |
833 | |
834 | if (__glibc_unlikely (nsid == LM_ID_NEWLM)) |
835 | { |
836 | /* Find a new namespace. */ |
837 | for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid) |
838 | if (GL(dl_ns)[nsid]._ns_loaded == NULL) |
839 | break; |
840 | |
841 | if (__glibc_unlikely (nsid == DL_NNS)) |
842 | { |
843 | /* No more namespace available. */ |
844 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
845 | |
846 | _dl_signal_error (EINVAL, file, NULL, N_("\ |
847 | no more namespaces available for dlmopen()" )); |
848 | } |
849 | else if (nsid == GL(dl_nns)) |
850 | { |
851 | __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock); |
852 | ++GL(dl_nns); |
853 | } |
854 | |
855 | GL(dl_ns)[nsid].libc_map = NULL; |
856 | _dl_debug_update (nsid)->r_state = RT_CONSISTENT; |
857 | } |
858 | /* Never allow loading a DSO in a namespace which is empty. Such |
859 | direct placements is only causing problems. Also don't allow |
860 | loading into a namespace used for auditing. */ |
861 | else if (__glibc_unlikely (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER) |
862 | && (__glibc_unlikely (nsid < 0 || nsid >= GL(dl_nns)) |
863 | /* This prevents the [NSID] index expressions from being |
864 | evaluated, so the compiler won't think that we are |
865 | accessing an invalid index here in the !SHARED case where |
866 | DL_NNS is 1 and so any NSID != 0 is invalid. */ |
867 | || DL_NNS == 1 |
868 | || GL(dl_ns)[nsid]._ns_nloaded == 0 |
869 | || GL(dl_ns)[nsid]._ns_loaded->l_auditing)) |
870 | _dl_signal_error (EINVAL, file, NULL, |
871 | N_("invalid target namespace in dlmopen()" )); |
872 | |
873 | struct dl_open_args args; |
874 | args.file = file; |
875 | args.mode = mode; |
876 | args.caller_dlopen = caller_dlopen; |
877 | args.map = NULL; |
878 | args.nsid = nsid; |
879 | /* args.libc_already_loaded is always assigned by dl_open_worker |
880 | (before any explicit/non-local returns). */ |
881 | args.argc = argc; |
882 | args.argv = argv; |
883 | args.env = env; |
884 | |
885 | struct dl_exception exception; |
886 | int errcode = _dl_catch_exception (&exception, dl_open_worker, &args); |
887 | |
888 | #if defined USE_LDCONFIG && !defined MAP_COPY |
889 | /* We must unmap the cache file. */ |
890 | _dl_unload_cache (); |
891 | #endif |
892 | |
893 | /* Do this for both the error and success cases. The old value has |
894 | only been determined if the namespace ID was assigned (i.e., it |
895 | is not __LM_ID_CALLER). In the success case, we actually may |
896 | have consumed more pending adds than planned (because the local |
897 | scopes overlap in case of a recursive dlopen, the inner dlopen |
898 | doing some of the globalization work of the outer dlopen), so the |
899 | old pending adds value is larger than absolutely necessary. |
900 | Since it is just a conservative upper bound, this is harmless. |
901 | The top-level dlopen call will restore the field to zero. */ |
902 | if (args.nsid >= 0) |
903 | GL (dl_ns)[args.nsid]._ns_global_scope_pending_adds |
904 | = args.original_global_scope_pending_adds; |
905 | |
906 | /* See if an error occurred during loading. */ |
907 | if (__glibc_unlikely (exception.errstring != NULL)) |
908 | { |
909 | /* Avoid keeping around a dangling reference to the libc.so link |
910 | map in case it has been cached in libc_map. */ |
911 | if (!args.libc_already_loaded) |
912 | GL(dl_ns)[args.nsid].libc_map = NULL; |
913 | |
914 | /* Remove the object from memory. It may be in an inconsistent |
915 | state if relocation failed, for example. */ |
916 | if (args.map) |
917 | { |
918 | _dl_close_worker (args.map, true); |
919 | |
920 | /* All l_nodelete_pending objects should have been deleted |
921 | at this point, which is why it is not necessary to reset |
922 | the flag here. */ |
923 | } |
924 | |
925 | /* Release the lock. */ |
926 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
927 | |
928 | /* Reraise the error. */ |
929 | _dl_signal_exception (errcode, &exception, NULL); |
930 | } |
931 | |
932 | const int r_state __attribute__ ((unused)) |
933 | = _dl_debug_update (args.nsid)->r_state; |
934 | assert (r_state == RT_CONSISTENT); |
935 | |
936 | /* Release the lock. */ |
937 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
938 | |
939 | return args.map; |
940 | } |
941 | |
942 | |
943 | void |
944 | _dl_show_scope (struct link_map *l, int from) |
945 | { |
946 | _dl_debug_printf ("object=%s [%lu]\n" , |
947 | DSO_FILENAME (l->l_name), l->l_ns); |
948 | if (l->l_scope != NULL) |
949 | for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt) |
950 | { |
951 | _dl_debug_printf (" scope %u:" , scope_cnt); |
952 | |
953 | for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt) |
954 | if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name) |
955 | _dl_debug_printf_c (" %s" , |
956 | l->l_scope[scope_cnt]->r_list[cnt]->l_name); |
957 | else |
958 | _dl_debug_printf_c (" %s" , RTLD_PROGNAME); |
959 | |
960 | _dl_debug_printf_c ("\n" ); |
961 | } |
962 | else |
963 | _dl_debug_printf (" no scope\n" ); |
964 | _dl_debug_printf ("\n" ); |
965 | } |
966 | |