1 | /* Load a shared object at runtime, relocate it, and run its initializer. |
2 | Copyright (C) 1996-2020 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <https://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <assert.h> |
20 | #include <dlfcn.h> |
21 | #include <errno.h> |
22 | #include <libintl.h> |
23 | #include <stdio.h> |
24 | #include <stdlib.h> |
25 | #include <string.h> |
26 | #include <unistd.h> |
27 | #include <sys/mman.h> /* Check whether MAP_COPY is defined. */ |
28 | #include <sys/param.h> |
29 | #include <libc-lock.h> |
30 | #include <ldsodefs.h> |
31 | #include <sysdep-cancel.h> |
32 | #include <tls.h> |
33 | #include <stap-probe.h> |
34 | #include <atomic.h> |
35 | #include <libc-internal.h> |
36 | #include <array_length.h> |
37 | #include <libc-early-init.h> |
38 | |
39 | #include <dl-dst.h> |
40 | #include <dl-prop.h> |
41 | |
42 | |
43 | /* We must be careful not to leave us in an inconsistent state. Thus we |
44 | catch any error and re-raise it after cleaning up. */ |
45 | |
46 | struct dl_open_args |
47 | { |
48 | const char *file; |
49 | int mode; |
50 | /* This is the caller of the dlopen() function. */ |
51 | const void *caller_dlopen; |
52 | struct link_map *map; |
53 | /* Namespace ID. */ |
54 | Lmid_t nsid; |
55 | |
56 | /* Original value of _ns_global_scope_pending_adds. Set by |
57 | dl_open_worker. Only valid if nsid is a real namespace |
58 | (non-negative). */ |
59 | unsigned int original_global_scope_pending_adds; |
60 | |
61 | /* Set to true by dl_open_worker if libc.so was already loaded into |
62 | the namespace at the time dl_open_worker was called. This is |
63 | used to determine whether libc.so early initialization has |
64 | already been done before, and whether to roll back the cached |
65 | libc_map value in the namespace in case of a dlopen failure. */ |
66 | bool libc_already_loaded; |
67 | |
68 | /* Original parameters to the program and the current environment. */ |
69 | int argc; |
70 | char **argv; |
71 | char **env; |
72 | }; |
73 | |
74 | /* Called in case the global scope cannot be extended. */ |
75 | static void __attribute__ ((noreturn)) |
76 | add_to_global_resize_failure (struct link_map *new) |
77 | { |
78 | _dl_signal_error (ENOMEM, new->l_libname->name, NULL, |
79 | N_ ("cannot extend global scope" )); |
80 | } |
81 | |
82 | /* Grow the global scope array for the namespace, so that all the new |
83 | global objects can be added later in add_to_global_update, without |
84 | risk of memory allocation failure. add_to_global_resize raises |
85 | exceptions for memory allocation errors. */ |
86 | static void |
87 | add_to_global_resize (struct link_map *new) |
88 | { |
89 | struct link_namespaces *ns = &GL (dl_ns)[new->l_ns]; |
90 | |
91 | /* Count the objects we have to put in the global scope. */ |
92 | unsigned int to_add = 0; |
93 | for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt) |
94 | if (new->l_searchlist.r_list[cnt]->l_global == 0) |
95 | ++to_add; |
96 | |
97 | /* The symbols of the new objects and its dependencies are to be |
98 | introduced into the global scope that will be used to resolve |
99 | references from other dynamically-loaded objects. |
100 | |
101 | The global scope is the searchlist in the main link map. We |
102 | extend this list if necessary. There is one problem though: |
103 | since this structure was allocated very early (before the libc |
104 | is loaded) the memory it uses is allocated by the malloc()-stub |
105 | in the ld.so. When we come here these functions are not used |
106 | anymore. Instead the malloc() implementation of the libc is |
107 | used. But this means the block from the main map cannot be used |
108 | in an realloc() call. Therefore we allocate a completely new |
109 | array the first time we have to add something to the locale scope. */ |
110 | |
111 | if (__builtin_add_overflow (ns->_ns_global_scope_pending_adds, to_add, |
112 | &ns->_ns_global_scope_pending_adds)) |
113 | add_to_global_resize_failure (new); |
114 | |
115 | unsigned int new_size = 0; /* 0 means no new allocation. */ |
116 | void *old_global = NULL; /* Old allocation if free-able. */ |
117 | |
118 | /* Minimum required element count for resizing. Adjusted below for |
119 | an exponential resizing policy. */ |
120 | size_t required_new_size; |
121 | if (__builtin_add_overflow (ns->_ns_main_searchlist->r_nlist, |
122 | ns->_ns_global_scope_pending_adds, |
123 | &required_new_size)) |
124 | add_to_global_resize_failure (new); |
125 | |
126 | if (ns->_ns_global_scope_alloc == 0) |
127 | { |
128 | if (__builtin_add_overflow (required_new_size, 8, &new_size)) |
129 | add_to_global_resize_failure (new); |
130 | } |
131 | else if (required_new_size > ns->_ns_global_scope_alloc) |
132 | { |
133 | if (__builtin_mul_overflow (required_new_size, 2, &new_size)) |
134 | add_to_global_resize_failure (new); |
135 | |
136 | /* The old array was allocated with our malloc, not the minimal |
137 | malloc. */ |
138 | old_global = ns->_ns_main_searchlist->r_list; |
139 | } |
140 | |
141 | if (new_size > 0) |
142 | { |
143 | size_t allocation_size; |
144 | if (__builtin_mul_overflow (new_size, sizeof (struct link_map *), |
145 | &allocation_size)) |
146 | add_to_global_resize_failure (new); |
147 | struct link_map **new_global = malloc (allocation_size); |
148 | if (new_global == NULL) |
149 | add_to_global_resize_failure (new); |
150 | |
151 | /* Copy over the old entries. */ |
152 | memcpy (new_global, ns->_ns_main_searchlist->r_list, |
153 | ns->_ns_main_searchlist->r_nlist * sizeof (struct link_map *)); |
154 | |
155 | ns->_ns_global_scope_alloc = new_size; |
156 | ns->_ns_main_searchlist->r_list = new_global; |
157 | |
158 | if (!RTLD_SINGLE_THREAD_P) |
159 | THREAD_GSCOPE_WAIT (); |
160 | |
161 | free (old_global); |
162 | } |
163 | } |
164 | |
165 | /* Actually add the new global objects to the global scope. Must be |
166 | called after add_to_global_resize. This function cannot fail. */ |
167 | static void |
168 | add_to_global_update (struct link_map *new) |
169 | { |
170 | struct link_namespaces *ns = &GL (dl_ns)[new->l_ns]; |
171 | |
172 | /* Now add the new entries. */ |
173 | unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist; |
174 | for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt) |
175 | { |
176 | struct link_map *map = new->l_searchlist.r_list[cnt]; |
177 | |
178 | if (map->l_global == 0) |
179 | { |
180 | map->l_global = 1; |
181 | |
182 | /* The array has been resized by add_to_global_resize. */ |
183 | assert (new_nlist < ns->_ns_global_scope_alloc); |
184 | |
185 | ns->_ns_main_searchlist->r_list[new_nlist++] = map; |
186 | |
187 | /* We modify the global scope. Report this. */ |
188 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES)) |
189 | _dl_debug_printf ("\nadd %s [%lu] to global scope\n" , |
190 | map->l_name, map->l_ns); |
191 | } |
192 | } |
193 | |
194 | /* Some of the pending adds have been performed by the loop above. |
195 | Adjust the counter accordingly. */ |
196 | unsigned int added = new_nlist - ns->_ns_main_searchlist->r_nlist; |
197 | assert (added <= ns->_ns_global_scope_pending_adds); |
198 | ns->_ns_global_scope_pending_adds -= added; |
199 | |
200 | atomic_write_barrier (); |
201 | ns->_ns_main_searchlist->r_nlist = new_nlist; |
202 | } |
203 | |
204 | /* Search link maps in all namespaces for the DSO that contains the object at |
205 | address ADDR. Returns the pointer to the link map of the matching DSO, or |
206 | NULL if a match is not found. */ |
207 | struct link_map * |
208 | _dl_find_dso_for_object (const ElfW(Addr) addr) |
209 | { |
210 | struct link_map *l; |
211 | |
212 | /* Find the highest-addressed object that ADDR is not below. */ |
213 | for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns) |
214 | for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next) |
215 | if (addr >= l->l_map_start && addr < l->l_map_end |
216 | && (l->l_contiguous |
217 | || _dl_addr_inside_object (l, (ElfW(Addr)) addr))) |
218 | { |
219 | assert (ns == l->l_ns); |
220 | return l; |
221 | } |
222 | return NULL; |
223 | } |
224 | rtld_hidden_def (_dl_find_dso_for_object); |
225 | |
226 | /* Return true if NEW is found in the scope for MAP. */ |
227 | static size_t |
228 | scope_has_map (struct link_map *map, struct link_map *new) |
229 | { |
230 | size_t cnt; |
231 | for (cnt = 0; map->l_scope[cnt] != NULL; ++cnt) |
232 | if (map->l_scope[cnt] == &new->l_searchlist) |
233 | return true; |
234 | return false; |
235 | } |
236 | |
237 | /* Return the length of the scope for MAP. */ |
238 | static size_t |
239 | scope_size (struct link_map *map) |
240 | { |
241 | size_t cnt; |
242 | for (cnt = 0; map->l_scope[cnt] != NULL; ) |
243 | ++cnt; |
244 | return cnt; |
245 | } |
246 | |
247 | /* Resize the scopes of depended-upon objects, so that the new object |
248 | can be added later without further allocation of memory. This |
249 | function can raise an exceptions due to malloc failure. */ |
250 | static void |
251 | resize_scopes (struct link_map *new) |
252 | { |
253 | /* If the file is not loaded now as a dependency, add the search |
254 | list of the newly loaded object to the scope. */ |
255 | for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) |
256 | { |
257 | struct link_map *imap = new->l_searchlist.r_list[i]; |
258 | |
259 | /* If the initializer has been called already, the object has |
260 | not been loaded here and now. */ |
261 | if (imap->l_init_called && imap->l_type == lt_loaded) |
262 | { |
263 | if (scope_has_map (imap, new)) |
264 | /* Avoid duplicates. */ |
265 | continue; |
266 | |
267 | size_t cnt = scope_size (imap); |
268 | if (__glibc_unlikely (cnt + 1 >= imap->l_scope_max)) |
269 | { |
270 | /* The l_scope array is too small. Allocate a new one |
271 | dynamically. */ |
272 | size_t new_size; |
273 | struct r_scope_elem **newp; |
274 | |
275 | if (imap->l_scope != imap->l_scope_mem |
276 | && imap->l_scope_max < array_length (imap->l_scope_mem)) |
277 | { |
278 | /* If the current l_scope memory is not pointing to |
279 | the static memory in the structure, but the |
280 | static memory in the structure is large enough to |
281 | use for cnt + 1 scope entries, then switch to |
282 | using the static memory. */ |
283 | new_size = array_length (imap->l_scope_mem); |
284 | newp = imap->l_scope_mem; |
285 | } |
286 | else |
287 | { |
288 | new_size = imap->l_scope_max * 2; |
289 | newp = (struct r_scope_elem **) |
290 | malloc (new_size * sizeof (struct r_scope_elem *)); |
291 | if (newp == NULL) |
292 | _dl_signal_error (ENOMEM, "dlopen" , NULL, |
293 | N_("cannot create scope list" )); |
294 | } |
295 | |
296 | /* Copy the array and the terminating NULL. */ |
297 | memcpy (newp, imap->l_scope, |
298 | (cnt + 1) * sizeof (imap->l_scope[0])); |
299 | struct r_scope_elem **old = imap->l_scope; |
300 | |
301 | imap->l_scope = newp; |
302 | |
303 | if (old != imap->l_scope_mem) |
304 | _dl_scope_free (old); |
305 | |
306 | imap->l_scope_max = new_size; |
307 | } |
308 | } |
309 | } |
310 | } |
311 | |
312 | /* Second stage of resize_scopes: Add NEW to the scopes. Also print |
313 | debugging information about scopes if requested. |
314 | |
315 | This function cannot raise an exception because all required memory |
316 | has been allocated by a previous call to resize_scopes. */ |
317 | static void |
318 | update_scopes (struct link_map *new) |
319 | { |
320 | for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) |
321 | { |
322 | struct link_map *imap = new->l_searchlist.r_list[i]; |
323 | int from_scope = 0; |
324 | |
325 | if (imap->l_init_called && imap->l_type == lt_loaded) |
326 | { |
327 | if (scope_has_map (imap, new)) |
328 | /* Avoid duplicates. */ |
329 | continue; |
330 | |
331 | size_t cnt = scope_size (imap); |
332 | /* Assert that resize_scopes has sufficiently enlarged the |
333 | array. */ |
334 | assert (cnt + 1 < imap->l_scope_max); |
335 | |
336 | /* First terminate the extended list. Otherwise a thread |
337 | might use the new last element and then use the garbage |
338 | at offset IDX+1. */ |
339 | imap->l_scope[cnt + 1] = NULL; |
340 | atomic_write_barrier (); |
341 | imap->l_scope[cnt] = &new->l_searchlist; |
342 | |
343 | from_scope = cnt; |
344 | } |
345 | |
346 | /* Print scope information. */ |
347 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES)) |
348 | _dl_show_scope (imap, from_scope); |
349 | } |
350 | } |
351 | |
352 | /* Call _dl_add_to_slotinfo with DO_ADD set to false, to allocate |
353 | space in GL (dl_tls_dtv_slotinfo_list). This can raise an |
354 | exception. The return value is true if any of the new objects use |
355 | TLS. */ |
356 | static bool |
357 | resize_tls_slotinfo (struct link_map *new) |
358 | { |
359 | bool any_tls = false; |
360 | for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) |
361 | { |
362 | struct link_map *imap = new->l_searchlist.r_list[i]; |
363 | |
364 | /* Only add TLS memory if this object is loaded now and |
365 | therefore is not yet initialized. */ |
366 | if (! imap->l_init_called && imap->l_tls_blocksize > 0) |
367 | { |
368 | _dl_add_to_slotinfo (imap, false); |
369 | any_tls = true; |
370 | } |
371 | } |
372 | return any_tls; |
373 | } |
374 | |
375 | /* Second stage of TLS update, after resize_tls_slotinfo. This |
376 | function does not raise any exception. It should only be called if |
377 | resize_tls_slotinfo returned true. */ |
378 | static void |
379 | update_tls_slotinfo (struct link_map *new) |
380 | { |
381 | unsigned int first_static_tls = new->l_searchlist.r_nlist; |
382 | for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) |
383 | { |
384 | struct link_map *imap = new->l_searchlist.r_list[i]; |
385 | |
386 | /* Only add TLS memory if this object is loaded now and |
387 | therefore is not yet initialized. */ |
388 | if (! imap->l_init_called && imap->l_tls_blocksize > 0) |
389 | { |
390 | _dl_add_to_slotinfo (imap, true); |
391 | |
392 | if (imap->l_need_tls_init |
393 | && first_static_tls == new->l_searchlist.r_nlist) |
394 | first_static_tls = i; |
395 | } |
396 | } |
397 | |
398 | if (__builtin_expect (++GL(dl_tls_generation) == 0, 0)) |
399 | _dl_fatal_printf (N_("\ |
400 | TLS generation counter wrapped! Please report this." )); |
401 | |
402 | /* We need a second pass for static tls data, because |
403 | _dl_update_slotinfo must not be run while calls to |
404 | _dl_add_to_slotinfo are still pending. */ |
405 | for (unsigned int i = first_static_tls; i < new->l_searchlist.r_nlist; ++i) |
406 | { |
407 | struct link_map *imap = new->l_searchlist.r_list[i]; |
408 | |
409 | if (imap->l_need_tls_init |
410 | && ! imap->l_init_called |
411 | && imap->l_tls_blocksize > 0) |
412 | { |
413 | /* For static TLS we have to allocate the memory here and |
414 | now, but we can delay updating the DTV. */ |
415 | imap->l_need_tls_init = 0; |
416 | #ifdef SHARED |
417 | /* Update the slot information data for at least the |
418 | generation of the DSO we are allocating data for. */ |
419 | |
420 | /* FIXME: This can terminate the process on memory |
421 | allocation failure. It is not possible to raise |
422 | exceptions from this context; to fix this bug, |
423 | _dl_update_slotinfo would have to be split into two |
424 | operations, similar to resize_scopes and update_scopes |
425 | above. This is related to bug 16134. */ |
426 | _dl_update_slotinfo (imap->l_tls_modid); |
427 | #endif |
428 | |
429 | GL(dl_init_static_tls) (imap); |
430 | assert (imap->l_need_tls_init == 0); |
431 | } |
432 | } |
433 | } |
434 | |
435 | /* Mark the objects as NODELETE if required. This is delayed until |
436 | after dlopen failure is not possible, so that _dl_close can clean |
437 | up objects if necessary. */ |
438 | static void |
439 | activate_nodelete (struct link_map *new) |
440 | { |
441 | /* It is necessary to traverse the entire namespace. References to |
442 | objects in the global scope and unique symbol bindings can force |
443 | NODELETE status for objects outside the local scope. */ |
444 | for (struct link_map *l = GL (dl_ns)[new->l_ns]._ns_loaded; l != NULL; |
445 | l = l->l_next) |
446 | if (l->l_nodelete_pending) |
447 | { |
448 | if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES)) |
449 | _dl_debug_printf ("activating NODELETE for %s [%lu]\n" , |
450 | l->l_name, l->l_ns); |
451 | |
452 | /* The flag can already be true at this point, e.g. a signal |
453 | handler may have triggered lazy binding and set NODELETE |
454 | status immediately. */ |
455 | l->l_nodelete_active = true; |
456 | |
457 | /* This is just a debugging aid, to indicate that |
458 | activate_nodelete has run for this map. */ |
459 | l->l_nodelete_pending = false; |
460 | } |
461 | } |
462 | |
463 | /* struct dl_init_args and call_dl_init are used to call _dl_init with |
464 | exception handling disabled. */ |
465 | struct dl_init_args |
466 | { |
467 | struct link_map *new; |
468 | int argc; |
469 | char **argv; |
470 | char **env; |
471 | }; |
472 | |
473 | static void |
474 | call_dl_init (void *closure) |
475 | { |
476 | struct dl_init_args *args = closure; |
477 | _dl_init (args->new, args->argc, args->argv, args->env); |
478 | } |
479 | |
480 | static void |
481 | dl_open_worker (void *a) |
482 | { |
483 | struct dl_open_args *args = a; |
484 | const char *file = args->file; |
485 | int mode = args->mode; |
486 | struct link_map *call_map = NULL; |
487 | |
488 | /* Determine the caller's map if necessary. This is needed in case |
489 | we have a DST, when we don't know the namespace ID we have to put |
490 | the new object in, or when the file name has no path in which |
491 | case we need to look along the RUNPATH/RPATH of the caller. */ |
492 | const char *dst = strchr (file, '$'); |
493 | if (dst != NULL || args->nsid == __LM_ID_CALLER |
494 | || strchr (file, '/') == NULL) |
495 | { |
496 | const void *caller_dlopen = args->caller_dlopen; |
497 | |
498 | /* We have to find out from which object the caller is calling. |
499 | By default we assume this is the main application. */ |
500 | call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded; |
501 | |
502 | struct link_map *l = _dl_find_dso_for_object ((ElfW(Addr)) caller_dlopen); |
503 | |
504 | if (l) |
505 | call_map = l; |
506 | |
507 | if (args->nsid == __LM_ID_CALLER) |
508 | args->nsid = call_map->l_ns; |
509 | } |
510 | |
511 | /* The namespace ID is now known. Keep track of whether libc.so was |
512 | already loaded, to determine whether it is necessary to call the |
513 | early initialization routine (or clear libc_map on error). */ |
514 | args->libc_already_loaded = GL(dl_ns)[args->nsid].libc_map != NULL; |
515 | |
516 | /* Retain the old value, so that it can be restored. */ |
517 | args->original_global_scope_pending_adds |
518 | = GL (dl_ns)[args->nsid]._ns_global_scope_pending_adds; |
519 | |
520 | /* One might be tempted to assert that we are RT_CONSISTENT at this point, but that |
521 | may not be true if this is a recursive call to dlopen. */ |
522 | _dl_debug_initialize (0, args->nsid); |
523 | |
524 | /* Load the named object. */ |
525 | struct link_map *new; |
526 | args->map = new = _dl_map_object (call_map, file, lt_loaded, 0, |
527 | mode | __RTLD_CALLMAP, args->nsid); |
528 | |
529 | /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is |
530 | set and the object is not already loaded. */ |
531 | if (new == NULL) |
532 | { |
533 | assert (mode & RTLD_NOLOAD); |
534 | return; |
535 | } |
536 | |
537 | if (__glibc_unlikely (mode & __RTLD_SPROF)) |
538 | /* This happens only if we load a DSO for 'sprof'. */ |
539 | return; |
540 | |
541 | /* This object is directly loaded. */ |
542 | ++new->l_direct_opencount; |
543 | |
544 | /* It was already open. */ |
545 | if (__glibc_unlikely (new->l_searchlist.r_list != NULL)) |
546 | { |
547 | /* Let the user know about the opencount. */ |
548 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES)) |
549 | _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n" , |
550 | new->l_name, new->l_ns, new->l_direct_opencount); |
551 | |
552 | /* If the user requested the object to be in the global |
553 | namespace but it is not so far, prepare to add it now. This |
554 | can raise an exception to do a malloc failure. */ |
555 | if ((mode & RTLD_GLOBAL) && new->l_global == 0) |
556 | add_to_global_resize (new); |
557 | |
558 | /* Mark the object as not deletable if the RTLD_NODELETE flags |
559 | was passed. */ |
560 | if (__glibc_unlikely (mode & RTLD_NODELETE)) |
561 | { |
562 | if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES) |
563 | && !new->l_nodelete_active) |
564 | _dl_debug_printf ("marking %s [%lu] as NODELETE\n" , |
565 | new->l_name, new->l_ns); |
566 | new->l_nodelete_active = true; |
567 | } |
568 | |
569 | /* Finalize the addition to the global scope. */ |
570 | if ((mode & RTLD_GLOBAL) && new->l_global == 0) |
571 | add_to_global_update (new); |
572 | |
573 | assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT); |
574 | |
575 | return; |
576 | } |
577 | |
578 | /* Schedule NODELETE marking for the directly loaded object if |
579 | requested. */ |
580 | if (__glibc_unlikely (mode & RTLD_NODELETE)) |
581 | new->l_nodelete_pending = true; |
582 | |
583 | /* Load that object's dependencies. */ |
584 | _dl_map_object_deps (new, NULL, 0, 0, |
585 | mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT)); |
586 | |
587 | /* So far, so good. Now check the versions. */ |
588 | for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) |
589 | if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL) |
590 | (void) _dl_check_map_versions (new->l_searchlist.r_list[i]->l_real, |
591 | 0, 0); |
592 | |
593 | #ifdef SHARED |
594 | /* Auditing checkpoint: we have added all objects. */ |
595 | if (__glibc_unlikely (GLRO(dl_naudit) > 0)) |
596 | { |
597 | struct link_map *head = GL(dl_ns)[new->l_ns]._ns_loaded; |
598 | /* Do not call the functions for any auditing object. */ |
599 | if (head->l_auditing == 0) |
600 | { |
601 | struct audit_ifaces *afct = GLRO(dl_audit); |
602 | for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt) |
603 | { |
604 | if (afct->activity != NULL) |
605 | { |
606 | struct auditstate *state = link_map_audit_state (head, cnt); |
607 | afct->activity (&state->cookie, LA_ACT_CONSISTENT); |
608 | } |
609 | |
610 | afct = afct->next; |
611 | } |
612 | } |
613 | } |
614 | #endif |
615 | |
616 | /* Notify the debugger all new objects are now ready to go. */ |
617 | struct r_debug *r = _dl_debug_initialize (0, args->nsid); |
618 | r->r_state = RT_CONSISTENT; |
619 | _dl_debug_state (); |
620 | LIBC_PROBE (map_complete, 3, args->nsid, r, new); |
621 | |
622 | _dl_open_check (new); |
623 | |
624 | /* Print scope information. */ |
625 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES)) |
626 | _dl_show_scope (new, 0); |
627 | |
628 | /* Only do lazy relocation if `LD_BIND_NOW' is not set. */ |
629 | int reloc_mode = mode & __RTLD_AUDIT; |
630 | if (GLRO(dl_lazy)) |
631 | reloc_mode |= mode & RTLD_LAZY; |
632 | |
633 | /* Objects must be sorted by dependency for the relocation process. |
634 | This allows IFUNC relocations to work and it also means copy |
635 | relocation of dependencies are if necessary overwritten. |
636 | __dl_map_object_deps has already sorted l_initfini for us. */ |
637 | unsigned int first = UINT_MAX; |
638 | unsigned int last = 0; |
639 | unsigned int j = 0; |
640 | struct link_map *l = new->l_initfini[0]; |
641 | do |
642 | { |
643 | if (! l->l_real->l_relocated) |
644 | { |
645 | if (first == UINT_MAX) |
646 | first = j; |
647 | last = j + 1; |
648 | } |
649 | l = new->l_initfini[++j]; |
650 | } |
651 | while (l != NULL); |
652 | |
653 | int relocation_in_progress = 0; |
654 | |
655 | /* Perform relocation. This can trigger lazy binding in IFUNC |
656 | resolvers. For NODELETE mappings, these dependencies are not |
657 | recorded because the flag has not been applied to the newly |
658 | loaded objects. This means that upon dlopen failure, these |
659 | NODELETE objects can be unloaded despite existing references to |
660 | them. However, such relocation dependencies in IFUNC resolvers |
661 | are undefined anyway, so this is not a problem. */ |
662 | |
663 | for (unsigned int i = last; i-- > first; ) |
664 | { |
665 | l = new->l_initfini[i]; |
666 | |
667 | if (l->l_real->l_relocated) |
668 | continue; |
669 | |
670 | if (! relocation_in_progress) |
671 | { |
672 | /* Notify the debugger that relocations are about to happen. */ |
673 | LIBC_PROBE (reloc_start, 2, args->nsid, r); |
674 | relocation_in_progress = 1; |
675 | } |
676 | |
677 | #ifdef SHARED |
678 | if (__glibc_unlikely (GLRO(dl_profile) != NULL)) |
679 | { |
680 | /* If this here is the shared object which we want to profile |
681 | make sure the profile is started. We can find out whether |
682 | this is necessary or not by observing the `_dl_profile_map' |
683 | variable. If it was NULL but is not NULL afterwards we must |
684 | start the profiling. */ |
685 | struct link_map *old_profile_map = GL(dl_profile_map); |
686 | |
687 | _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1); |
688 | |
689 | if (old_profile_map == NULL && GL(dl_profile_map) != NULL) |
690 | { |
691 | /* We must prepare the profiling. */ |
692 | _dl_start_profile (); |
693 | |
694 | /* Prevent unloading the object. */ |
695 | GL(dl_profile_map)->l_nodelete_active = true; |
696 | } |
697 | } |
698 | else |
699 | #endif |
700 | _dl_relocate_object (l, l->l_scope, reloc_mode, 0); |
701 | } |
702 | |
703 | /* This only performs the memory allocations. The actual update of |
704 | the scopes happens below, after failure is impossible. */ |
705 | resize_scopes (new); |
706 | |
707 | /* Increase the size of the GL (dl_tls_dtv_slotinfo_list) data |
708 | structure. */ |
709 | bool any_tls = resize_tls_slotinfo (new); |
710 | |
711 | /* Perform the necessary allocations for adding new global objects |
712 | to the global scope below. */ |
713 | if (mode & RTLD_GLOBAL) |
714 | add_to_global_resize (new); |
715 | |
716 | /* Demarcation point: After this, no recoverable errors are allowed. |
717 | All memory allocations for new objects must have happened |
718 | before. */ |
719 | |
720 | /* Finalize the NODELETE status first. This comes before |
721 | update_scopes, so that lazy binding will not see pending NODELETE |
722 | state for newly loaded objects. There is a compiler barrier in |
723 | update_scopes which ensures that the changes from |
724 | activate_nodelete are visible before new objects show up in the |
725 | local scope. */ |
726 | activate_nodelete (new); |
727 | |
728 | /* Second stage after resize_scopes: Actually perform the scope |
729 | update. After this, dlsym and lazy binding can bind to new |
730 | objects. */ |
731 | update_scopes (new); |
732 | |
733 | /* FIXME: It is unclear whether the order here is correct. |
734 | Shouldn't new objects be made available for binding (and thus |
735 | execution) only after there TLS data has been set up fully? |
736 | Fixing bug 16134 will likely make this distinction less |
737 | important. */ |
738 | |
739 | /* Second stage after resize_tls_slotinfo: Update the slotinfo data |
740 | structures. */ |
741 | if (any_tls) |
742 | /* FIXME: This calls _dl_update_slotinfo, which aborts the process |
743 | on memory allocation failure. See bug 16134. */ |
744 | update_tls_slotinfo (new); |
745 | |
746 | /* Notify the debugger all new objects have been relocated. */ |
747 | if (relocation_in_progress) |
748 | LIBC_PROBE (reloc_complete, 3, args->nsid, r, new); |
749 | |
750 | /* If libc.so was not there before, attempt to call its early |
751 | initialization routine. Indicate to the initialization routine |
752 | whether the libc being initialized is the one in the base |
753 | namespace. */ |
754 | if (!args->libc_already_loaded) |
755 | { |
756 | struct link_map *libc_map = GL(dl_ns)[args->nsid].libc_map; |
757 | #ifdef SHARED |
758 | bool initial = libc_map->l_ns == LM_ID_BASE; |
759 | #else |
760 | /* In the static case, there is only one namespace, but it |
761 | contains a secondary libc (the primary libc is statically |
762 | linked). */ |
763 | bool initial = false; |
764 | #endif |
765 | _dl_call_libc_early_init (libc_map, initial); |
766 | } |
767 | |
768 | #ifndef SHARED |
769 | DL_STATIC_INIT (new); |
770 | #endif |
771 | |
772 | /* Run the initializer functions of new objects. Temporarily |
773 | disable the exception handler, so that lazy binding failures are |
774 | fatal. */ |
775 | { |
776 | struct dl_init_args init_args = |
777 | { |
778 | .new = new, |
779 | .argc = args->argc, |
780 | .argv = args->argv, |
781 | .env = args->env |
782 | }; |
783 | _dl_catch_exception (NULL, call_dl_init, &init_args); |
784 | } |
785 | |
786 | /* Now we can make the new map available in the global scope. */ |
787 | if (mode & RTLD_GLOBAL) |
788 | add_to_global_update (new); |
789 | |
790 | #ifndef SHARED |
791 | /* We must be the static _dl_open in libc.a. A static program that |
792 | has loaded a dynamic object now has competition. */ |
793 | __libc_multiple_libcs = 1; |
794 | #endif |
795 | |
796 | /* Let the user know about the opencount. */ |
797 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES)) |
798 | _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n" , |
799 | new->l_name, new->l_ns, new->l_direct_opencount); |
800 | } |
801 | |
802 | void * |
803 | _dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid, |
804 | int argc, char *argv[], char *env[]) |
805 | { |
806 | if ((mode & RTLD_BINDING_MASK) == 0) |
807 | /* One of the flags must be set. */ |
808 | _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()" )); |
809 | |
810 | /* Make sure we are alone. */ |
811 | __rtld_lock_lock_recursive (GL(dl_load_lock)); |
812 | |
813 | if (__glibc_unlikely (nsid == LM_ID_NEWLM)) |
814 | { |
815 | /* Find a new namespace. */ |
816 | for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid) |
817 | if (GL(dl_ns)[nsid]._ns_loaded == NULL) |
818 | break; |
819 | |
820 | if (__glibc_unlikely (nsid == DL_NNS)) |
821 | { |
822 | /* No more namespace available. */ |
823 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
824 | |
825 | _dl_signal_error (EINVAL, file, NULL, N_("\ |
826 | no more namespaces available for dlmopen()" )); |
827 | } |
828 | else if (nsid == GL(dl_nns)) |
829 | { |
830 | __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock); |
831 | ++GL(dl_nns); |
832 | } |
833 | |
834 | _dl_debug_initialize (0, nsid)->r_state = RT_CONSISTENT; |
835 | } |
836 | /* Never allow loading a DSO in a namespace which is empty. Such |
837 | direct placements is only causing problems. Also don't allow |
838 | loading into a namespace used for auditing. */ |
839 | else if (__glibc_unlikely (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER) |
840 | && (__glibc_unlikely (nsid < 0 || nsid >= GL(dl_nns)) |
841 | /* This prevents the [NSID] index expressions from being |
842 | evaluated, so the compiler won't think that we are |
843 | accessing an invalid index here in the !SHARED case where |
844 | DL_NNS is 1 and so any NSID != 0 is invalid. */ |
845 | || DL_NNS == 1 |
846 | || GL(dl_ns)[nsid]._ns_nloaded == 0 |
847 | || GL(dl_ns)[nsid]._ns_loaded->l_auditing)) |
848 | _dl_signal_error (EINVAL, file, NULL, |
849 | N_("invalid target namespace in dlmopen()" )); |
850 | |
851 | struct dl_open_args args; |
852 | args.file = file; |
853 | args.mode = mode; |
854 | args.caller_dlopen = caller_dlopen; |
855 | args.map = NULL; |
856 | args.nsid = nsid; |
857 | /* args.libc_already_loaded is always assigned by dl_open_worker |
858 | (before any explicit/non-local returns). */ |
859 | args.argc = argc; |
860 | args.argv = argv; |
861 | args.env = env; |
862 | |
863 | struct dl_exception exception; |
864 | int errcode = _dl_catch_exception (&exception, dl_open_worker, &args); |
865 | |
866 | #if defined USE_LDCONFIG && !defined MAP_COPY |
867 | /* We must unmap the cache file. */ |
868 | _dl_unload_cache (); |
869 | #endif |
870 | |
871 | /* Do this for both the error and success cases. The old value has |
872 | only been determined if the namespace ID was assigned (i.e., it |
873 | is not __LM_ID_CALLER). In the success case, we actually may |
874 | have consumed more pending adds than planned (because the local |
875 | scopes overlap in case of a recursive dlopen, the inner dlopen |
876 | doing some of the globalization work of the outer dlopen), so the |
877 | old pending adds value is larger than absolutely necessary. |
878 | Since it is just a conservative upper bound, this is harmless. |
879 | The top-level dlopen call will restore the field to zero. */ |
880 | if (args.nsid >= 0) |
881 | GL (dl_ns)[args.nsid]._ns_global_scope_pending_adds |
882 | = args.original_global_scope_pending_adds; |
883 | |
884 | /* See if an error occurred during loading. */ |
885 | if (__glibc_unlikely (exception.errstring != NULL)) |
886 | { |
887 | /* Avoid keeping around a dangling reference to the libc.so link |
888 | map in case it has been cached in libc_map. */ |
889 | if (!args.libc_already_loaded) |
890 | GL(dl_ns)[nsid].libc_map = NULL; |
891 | |
892 | /* Remove the object from memory. It may be in an inconsistent |
893 | state if relocation failed, for example. */ |
894 | if (args.map) |
895 | { |
896 | /* Maybe some of the modules which were loaded use TLS. |
897 | Since it will be removed in the following _dl_close call |
898 | we have to mark the dtv array as having gaps to fill the |
899 | holes. This is a pessimistic assumption which won't hurt |
900 | if not true. There is no need to do this when we are |
901 | loading the auditing DSOs since TLS has not yet been set |
902 | up. */ |
903 | if ((mode & __RTLD_AUDIT) == 0) |
904 | GL(dl_tls_dtv_gaps) = true; |
905 | |
906 | _dl_close_worker (args.map, true); |
907 | |
908 | /* All l_nodelete_pending objects should have been deleted |
909 | at this point, which is why it is not necessary to reset |
910 | the flag here. */ |
911 | } |
912 | |
913 | assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT); |
914 | |
915 | /* Release the lock. */ |
916 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
917 | |
918 | /* Reraise the error. */ |
919 | _dl_signal_exception (errcode, &exception, NULL); |
920 | } |
921 | |
922 | assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT); |
923 | |
924 | /* Release the lock. */ |
925 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
926 | |
927 | return args.map; |
928 | } |
929 | |
930 | |
931 | void |
932 | _dl_show_scope (struct link_map *l, int from) |
933 | { |
934 | _dl_debug_printf ("object=%s [%lu]\n" , |
935 | DSO_FILENAME (l->l_name), l->l_ns); |
936 | if (l->l_scope != NULL) |
937 | for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt) |
938 | { |
939 | _dl_debug_printf (" scope %u:" , scope_cnt); |
940 | |
941 | for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt) |
942 | if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name) |
943 | _dl_debug_printf_c (" %s" , |
944 | l->l_scope[scope_cnt]->r_list[cnt]->l_name); |
945 | else |
946 | _dl_debug_printf_c (" %s" , RTLD_PROGNAME); |
947 | |
948 | _dl_debug_printf_c ("\n" ); |
949 | } |
950 | else |
951 | _dl_debug_printf (" no scope\n" ); |
952 | _dl_debug_printf ("\n" ); |
953 | } |
954 | |