1/* Load a shared object at runtime, relocate it, and run its initializer.
2 Copyright (C) 1996-2021 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19#include <assert.h>
20#include <dlfcn.h>
21#include <errno.h>
22#include <libintl.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <string.h>
26#include <unistd.h>
27#include <sys/mman.h> /* Check whether MAP_COPY is defined. */
28#include <sys/param.h>
29#include <libc-lock.h>
30#include <ldsodefs.h>
31#include <sysdep-cancel.h>
32#include <tls.h>
33#include <stap-probe.h>
34#include <atomic.h>
35#include <libc-internal.h>
36#include <array_length.h>
37#include <libc-early-init.h>
38#include <gnu/lib-names.h>
39
40#include <dl-dst.h>
41#include <dl-prop.h>
42
43
44/* We must be careful not to leave us in an inconsistent state. Thus we
45 catch any error and re-raise it after cleaning up. */
46
47struct dl_open_args
48{
49 const char *file;
50 int mode;
51 /* This is the caller of the dlopen() function. */
52 const void *caller_dlopen;
53 struct link_map *map;
54 /* Namespace ID. */
55 Lmid_t nsid;
56
57 /* Original value of _ns_global_scope_pending_adds. Set by
58 dl_open_worker. Only valid if nsid is a real namespace
59 (non-negative). */
60 unsigned int original_global_scope_pending_adds;
61
62 /* Set to true by dl_open_worker if libc.so was already loaded into
63 the namespace at the time dl_open_worker was called. This is
64 used to determine whether libc.so early initialization has
65 already been done before, and whether to roll back the cached
66 libc_map value in the namespace in case of a dlopen failure. */
67 bool libc_already_loaded;
68
69 /* Original parameters to the program and the current environment. */
70 int argc;
71 char **argv;
72 char **env;
73};
74
75/* Called in case the global scope cannot be extended. */
76static void __attribute__ ((noreturn))
77add_to_global_resize_failure (struct link_map *new)
78{
79 _dl_signal_error (ENOMEM, new->l_libname->name, NULL,
80 N_ ("cannot extend global scope"));
81}
82
83/* Grow the global scope array for the namespace, so that all the new
84 global objects can be added later in add_to_global_update, without
85 risk of memory allocation failure. add_to_global_resize raises
86 exceptions for memory allocation errors. */
87static void
88add_to_global_resize (struct link_map *new)
89{
90 struct link_namespaces *ns = &GL (dl_ns)[new->l_ns];
91
92 /* Count the objects we have to put in the global scope. */
93 unsigned int to_add = 0;
94 for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
95 if (new->l_searchlist.r_list[cnt]->l_global == 0)
96 ++to_add;
97
98 /* The symbols of the new objects and its dependencies are to be
99 introduced into the global scope that will be used to resolve
100 references from other dynamically-loaded objects.
101
102 The global scope is the searchlist in the main link map. We
103 extend this list if necessary. There is one problem though:
104 since this structure was allocated very early (before the libc
105 is loaded) the memory it uses is allocated by the malloc()-stub
106 in the ld.so. When we come here these functions are not used
107 anymore. Instead the malloc() implementation of the libc is
108 used. But this means the block from the main map cannot be used
109 in an realloc() call. Therefore we allocate a completely new
110 array the first time we have to add something to the locale scope. */
111
112 if (__builtin_add_overflow (ns->_ns_global_scope_pending_adds, to_add,
113 &ns->_ns_global_scope_pending_adds))
114 add_to_global_resize_failure (new);
115
116 unsigned int new_size = 0; /* 0 means no new allocation. */
117 void *old_global = NULL; /* Old allocation if free-able. */
118
119 /* Minimum required element count for resizing. Adjusted below for
120 an exponential resizing policy. */
121 size_t required_new_size;
122 if (__builtin_add_overflow (ns->_ns_main_searchlist->r_nlist,
123 ns->_ns_global_scope_pending_adds,
124 &required_new_size))
125 add_to_global_resize_failure (new);
126
127 if (ns->_ns_global_scope_alloc == 0)
128 {
129 if (__builtin_add_overflow (required_new_size, 8, &new_size))
130 add_to_global_resize_failure (new);
131 }
132 else if (required_new_size > ns->_ns_global_scope_alloc)
133 {
134 if (__builtin_mul_overflow (required_new_size, 2, &new_size))
135 add_to_global_resize_failure (new);
136
137 /* The old array was allocated with our malloc, not the minimal
138 malloc. */
139 old_global = ns->_ns_main_searchlist->r_list;
140 }
141
142 if (new_size > 0)
143 {
144 size_t allocation_size;
145 if (__builtin_mul_overflow (new_size, sizeof (struct link_map *),
146 &allocation_size))
147 add_to_global_resize_failure (new);
148 struct link_map **new_global = malloc (allocation_size);
149 if (new_global == NULL)
150 add_to_global_resize_failure (new);
151
152 /* Copy over the old entries. */
153 memcpy (new_global, ns->_ns_main_searchlist->r_list,
154 ns->_ns_main_searchlist->r_nlist * sizeof (struct link_map *));
155
156 ns->_ns_global_scope_alloc = new_size;
157 ns->_ns_main_searchlist->r_list = new_global;
158
159 if (!RTLD_SINGLE_THREAD_P)
160 THREAD_GSCOPE_WAIT ();
161
162 free (old_global);
163 }
164}
165
166/* Actually add the new global objects to the global scope. Must be
167 called after add_to_global_resize. This function cannot fail. */
168static void
169add_to_global_update (struct link_map *new)
170{
171 struct link_namespaces *ns = &GL (dl_ns)[new->l_ns];
172
173 /* Now add the new entries. */
174 unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
175 for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
176 {
177 struct link_map *map = new->l_searchlist.r_list[cnt];
178
179 if (map->l_global == 0)
180 {
181 map->l_global = 1;
182
183 /* The array has been resized by add_to_global_resize. */
184 assert (new_nlist < ns->_ns_global_scope_alloc);
185
186 ns->_ns_main_searchlist->r_list[new_nlist++] = map;
187
188 /* We modify the global scope. Report this. */
189 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
190 _dl_debug_printf ("\nadd %s [%lu] to global scope\n",
191 map->l_name, map->l_ns);
192 }
193 }
194
195 /* Some of the pending adds have been performed by the loop above.
196 Adjust the counter accordingly. */
197 unsigned int added = new_nlist - ns->_ns_main_searchlist->r_nlist;
198 assert (added <= ns->_ns_global_scope_pending_adds);
199 ns->_ns_global_scope_pending_adds -= added;
200
201 atomic_write_barrier ();
202 ns->_ns_main_searchlist->r_nlist = new_nlist;
203}
204
205/* Search link maps in all namespaces for the DSO that contains the object at
206 address ADDR. Returns the pointer to the link map of the matching DSO, or
207 NULL if a match is not found. */
208struct link_map *
209_dl_find_dso_for_object (const ElfW(Addr) addr)
210{
211 struct link_map *l;
212
213 /* Find the highest-addressed object that ADDR is not below. */
214 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
215 for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
216 if (addr >= l->l_map_start && addr < l->l_map_end
217 && (l->l_contiguous
218 || _dl_addr_inside_object (l, (ElfW(Addr)) addr)))
219 {
220 assert (ns == l->l_ns);
221 return l;
222 }
223 return NULL;
224}
225rtld_hidden_def (_dl_find_dso_for_object);
226
227/* Return true if NEW is found in the scope for MAP. */
228static size_t
229scope_has_map (struct link_map *map, struct link_map *new)
230{
231 size_t cnt;
232 for (cnt = 0; map->l_scope[cnt] != NULL; ++cnt)
233 if (map->l_scope[cnt] == &new->l_searchlist)
234 return true;
235 return false;
236}
237
238/* Return the length of the scope for MAP. */
239static size_t
240scope_size (struct link_map *map)
241{
242 size_t cnt;
243 for (cnt = 0; map->l_scope[cnt] != NULL; )
244 ++cnt;
245 return cnt;
246}
247
248/* Resize the scopes of depended-upon objects, so that the new object
249 can be added later without further allocation of memory. This
250 function can raise an exceptions due to malloc failure. */
251static void
252resize_scopes (struct link_map *new)
253{
254 /* If the file is not loaded now as a dependency, add the search
255 list of the newly loaded object to the scope. */
256 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
257 {
258 struct link_map *imap = new->l_searchlist.r_list[i];
259
260 /* If the initializer has been called already, the object has
261 not been loaded here and now. */
262 if (imap->l_init_called && imap->l_type == lt_loaded)
263 {
264 if (scope_has_map (imap, new))
265 /* Avoid duplicates. */
266 continue;
267
268 size_t cnt = scope_size (imap);
269 if (__glibc_unlikely (cnt + 1 >= imap->l_scope_max))
270 {
271 /* The l_scope array is too small. Allocate a new one
272 dynamically. */
273 size_t new_size;
274 struct r_scope_elem **newp;
275
276 if (imap->l_scope != imap->l_scope_mem
277 && imap->l_scope_max < array_length (imap->l_scope_mem))
278 {
279 /* If the current l_scope memory is not pointing to
280 the static memory in the structure, but the
281 static memory in the structure is large enough to
282 use for cnt + 1 scope entries, then switch to
283 using the static memory. */
284 new_size = array_length (imap->l_scope_mem);
285 newp = imap->l_scope_mem;
286 }
287 else
288 {
289 new_size = imap->l_scope_max * 2;
290 newp = (struct r_scope_elem **)
291 malloc (new_size * sizeof (struct r_scope_elem *));
292 if (newp == NULL)
293 _dl_signal_error (ENOMEM, "dlopen", NULL,
294 N_("cannot create scope list"));
295 }
296
297 /* Copy the array and the terminating NULL. */
298 memcpy (newp, imap->l_scope,
299 (cnt + 1) * sizeof (imap->l_scope[0]));
300 struct r_scope_elem **old = imap->l_scope;
301
302 imap->l_scope = newp;
303
304 if (old != imap->l_scope_mem)
305 _dl_scope_free (old);
306
307 imap->l_scope_max = new_size;
308 }
309 }
310 }
311}
312
313/* Second stage of resize_scopes: Add NEW to the scopes. Also print
314 debugging information about scopes if requested.
315
316 This function cannot raise an exception because all required memory
317 has been allocated by a previous call to resize_scopes. */
318static void
319update_scopes (struct link_map *new)
320{
321 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
322 {
323 struct link_map *imap = new->l_searchlist.r_list[i];
324 int from_scope = 0;
325
326 if (imap->l_init_called && imap->l_type == lt_loaded)
327 {
328 if (scope_has_map (imap, new))
329 /* Avoid duplicates. */
330 continue;
331
332 size_t cnt = scope_size (imap);
333 /* Assert that resize_scopes has sufficiently enlarged the
334 array. */
335 assert (cnt + 1 < imap->l_scope_max);
336
337 /* First terminate the extended list. Otherwise a thread
338 might use the new last element and then use the garbage
339 at offset IDX+1. */
340 imap->l_scope[cnt + 1] = NULL;
341 atomic_write_barrier ();
342 imap->l_scope[cnt] = &new->l_searchlist;
343
344 from_scope = cnt;
345 }
346
347 /* Print scope information. */
348 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
349 _dl_show_scope (imap, from_scope);
350 }
351}
352
353/* Call _dl_add_to_slotinfo with DO_ADD set to false, to allocate
354 space in GL (dl_tls_dtv_slotinfo_list). This can raise an
355 exception. The return value is true if any of the new objects use
356 TLS. */
357static bool
358resize_tls_slotinfo (struct link_map *new)
359{
360 bool any_tls = false;
361 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
362 {
363 struct link_map *imap = new->l_searchlist.r_list[i];
364
365 /* Only add TLS memory if this object is loaded now and
366 therefore is not yet initialized. */
367 if (! imap->l_init_called && imap->l_tls_blocksize > 0)
368 {
369 _dl_add_to_slotinfo (imap, false);
370 any_tls = true;
371 }
372 }
373 return any_tls;
374}
375
376/* Second stage of TLS update, after resize_tls_slotinfo. This
377 function does not raise any exception. It should only be called if
378 resize_tls_slotinfo returned true. */
379static void
380update_tls_slotinfo (struct link_map *new)
381{
382 unsigned int first_static_tls = new->l_searchlist.r_nlist;
383 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
384 {
385 struct link_map *imap = new->l_searchlist.r_list[i];
386
387 /* Only add TLS memory if this object is loaded now and
388 therefore is not yet initialized. */
389 if (! imap->l_init_called && imap->l_tls_blocksize > 0)
390 {
391 _dl_add_to_slotinfo (imap, true);
392
393 if (imap->l_need_tls_init
394 && first_static_tls == new->l_searchlist.r_nlist)
395 first_static_tls = i;
396 }
397 }
398
399 size_t newgen = GL(dl_tls_generation) + 1;
400 if (__glibc_unlikely (newgen == 0))
401 _dl_fatal_printf (N_("\
402TLS generation counter wrapped! Please report this."));
403 /* Can be read concurrently. */
404 atomic_store_relaxed (&GL(dl_tls_generation), newgen);
405
406 /* We need a second pass for static tls data, because
407 _dl_update_slotinfo must not be run while calls to
408 _dl_add_to_slotinfo are still pending. */
409 for (unsigned int i = first_static_tls; i < new->l_searchlist.r_nlist; ++i)
410 {
411 struct link_map *imap = new->l_searchlist.r_list[i];
412
413 if (imap->l_need_tls_init
414 && ! imap->l_init_called
415 && imap->l_tls_blocksize > 0)
416 {
417 /* For static TLS we have to allocate the memory here and
418 now, but we can delay updating the DTV. */
419 imap->l_need_tls_init = 0;
420#ifdef SHARED
421 /* Update the slot information data for at least the
422 generation of the DSO we are allocating data for. */
423
424 /* FIXME: This can terminate the process on memory
425 allocation failure. It is not possible to raise
426 exceptions from this context; to fix this bug,
427 _dl_update_slotinfo would have to be split into two
428 operations, similar to resize_scopes and update_scopes
429 above. This is related to bug 16134. */
430 _dl_update_slotinfo (imap->l_tls_modid);
431#endif
432
433 dl_init_static_tls (imap);
434 assert (imap->l_need_tls_init == 0);
435 }
436 }
437}
438
439/* Mark the objects as NODELETE if required. This is delayed until
440 after dlopen failure is not possible, so that _dl_close can clean
441 up objects if necessary. */
442static void
443activate_nodelete (struct link_map *new)
444{
445 /* It is necessary to traverse the entire namespace. References to
446 objects in the global scope and unique symbol bindings can force
447 NODELETE status for objects outside the local scope. */
448 for (struct link_map *l = GL (dl_ns)[new->l_ns]._ns_loaded; l != NULL;
449 l = l->l_next)
450 if (l->l_nodelete_pending)
451 {
452 if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES))
453 _dl_debug_printf ("activating NODELETE for %s [%lu]\n",
454 l->l_name, l->l_ns);
455
456 /* The flag can already be true at this point, e.g. a signal
457 handler may have triggered lazy binding and set NODELETE
458 status immediately. */
459 l->l_nodelete_active = true;
460
461 /* This is just a debugging aid, to indicate that
462 activate_nodelete has run for this map. */
463 l->l_nodelete_pending = false;
464 }
465}
466
467/* struct dl_init_args and call_dl_init are used to call _dl_init with
468 exception handling disabled. */
469struct dl_init_args
470{
471 struct link_map *new;
472 int argc;
473 char **argv;
474 char **env;
475};
476
477static void
478call_dl_init (void *closure)
479{
480 struct dl_init_args *args = closure;
481 _dl_init (args->new, args->argc, args->argv, args->env);
482}
483
484static void
485dl_open_worker (void *a)
486{
487 struct dl_open_args *args = a;
488 const char *file = args->file;
489 int mode = args->mode;
490 struct link_map *call_map = NULL;
491
492 /* Determine the caller's map if necessary. This is needed in case
493 we have a DST, when we don't know the namespace ID we have to put
494 the new object in, or when the file name has no path in which
495 case we need to look along the RUNPATH/RPATH of the caller. */
496 const char *dst = strchr (file, '$');
497 if (dst != NULL || args->nsid == __LM_ID_CALLER
498 || strchr (file, '/') == NULL)
499 {
500 const void *caller_dlopen = args->caller_dlopen;
501
502 /* We have to find out from which object the caller is calling.
503 By default we assume this is the main application. */
504 call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
505
506 struct link_map *l = _dl_find_dso_for_object ((ElfW(Addr)) caller_dlopen);
507
508 if (l)
509 call_map = l;
510
511 if (args->nsid == __LM_ID_CALLER)
512 args->nsid = call_map->l_ns;
513 }
514
515 /* The namespace ID is now known. Keep track of whether libc.so was
516 already loaded, to determine whether it is necessary to call the
517 early initialization routine (or clear libc_map on error). */
518 args->libc_already_loaded = GL(dl_ns)[args->nsid].libc_map != NULL;
519
520 /* Retain the old value, so that it can be restored. */
521 args->original_global_scope_pending_adds
522 = GL (dl_ns)[args->nsid]._ns_global_scope_pending_adds;
523
524 /* One might be tempted to assert that we are RT_CONSISTENT at this point, but that
525 may not be true if this is a recursive call to dlopen. */
526 _dl_debug_initialize (0, args->nsid);
527
528 /* Load the named object. */
529 struct link_map *new;
530 args->map = new = _dl_map_object (call_map, file, lt_loaded, 0,
531 mode | __RTLD_CALLMAP, args->nsid);
532
533 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
534 set and the object is not already loaded. */
535 if (new == NULL)
536 {
537 assert (mode & RTLD_NOLOAD);
538 return;
539 }
540
541 if (__glibc_unlikely (mode & __RTLD_SPROF))
542 /* This happens only if we load a DSO for 'sprof'. */
543 return;
544
545 /* This object is directly loaded. */
546 ++new->l_direct_opencount;
547
548 /* It was already open. */
549 if (__glibc_unlikely (new->l_searchlist.r_list != NULL))
550 {
551 /* Let the user know about the opencount. */
552 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
553 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
554 new->l_name, new->l_ns, new->l_direct_opencount);
555
556 /* If the user requested the object to be in the global
557 namespace but it is not so far, prepare to add it now. This
558 can raise an exception to do a malloc failure. */
559 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
560 add_to_global_resize (new);
561
562 /* Mark the object as not deletable if the RTLD_NODELETE flags
563 was passed. */
564 if (__glibc_unlikely (mode & RTLD_NODELETE))
565 {
566 if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES)
567 && !new->l_nodelete_active)
568 _dl_debug_printf ("marking %s [%lu] as NODELETE\n",
569 new->l_name, new->l_ns);
570 new->l_nodelete_active = true;
571 }
572
573 /* Finalize the addition to the global scope. */
574 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
575 add_to_global_update (new);
576
577 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
578
579 return;
580 }
581
582 /* Schedule NODELETE marking for the directly loaded object if
583 requested. */
584 if (__glibc_unlikely (mode & RTLD_NODELETE))
585 new->l_nodelete_pending = true;
586
587 /* Load that object's dependencies. */
588 _dl_map_object_deps (new, NULL, 0, 0,
589 mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT));
590
591 /* So far, so good. Now check the versions. */
592 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
593 if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
594 {
595 struct link_map *map = new->l_searchlist.r_list[i]->l_real;
596 _dl_check_map_versions (map, 0, 0);
597#ifndef SHARED
598 /* During static dlopen, check if ld.so has been loaded.
599 Perform partial initialization in this case. This must
600 come after the symbol versioning initialization in
601 _dl_check_map_versions. */
602 if (map->l_info[DT_SONAME] != NULL
603 && strcmp (((const char *) D_PTR (map, l_info[DT_STRTAB])
604 + map->l_info[DT_SONAME]->d_un.d_val), LD_SO) == 0)
605 __rtld_static_init (map);
606#endif
607 }
608
609#ifdef SHARED
610 /* Auditing checkpoint: we have added all objects. */
611 if (__glibc_unlikely (GLRO(dl_naudit) > 0))
612 {
613 struct link_map *head = GL(dl_ns)[new->l_ns]._ns_loaded;
614 /* Do not call the functions for any auditing object. */
615 if (head->l_auditing == 0)
616 {
617 struct audit_ifaces *afct = GLRO(dl_audit);
618 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
619 {
620 if (afct->activity != NULL)
621 {
622 struct auditstate *state = link_map_audit_state (head, cnt);
623 afct->activity (&state->cookie, LA_ACT_CONSISTENT);
624 }
625
626 afct = afct->next;
627 }
628 }
629 }
630#endif
631
632 /* Notify the debugger all new objects are now ready to go. */
633 struct r_debug *r = _dl_debug_initialize (0, args->nsid);
634 r->r_state = RT_CONSISTENT;
635 _dl_debug_state ();
636 LIBC_PROBE (map_complete, 3, args->nsid, r, new);
637
638 _dl_open_check (new);
639
640 /* Print scope information. */
641 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
642 _dl_show_scope (new, 0);
643
644 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
645 int reloc_mode = mode & __RTLD_AUDIT;
646 if (GLRO(dl_lazy))
647 reloc_mode |= mode & RTLD_LAZY;
648
649 /* Objects must be sorted by dependency for the relocation process.
650 This allows IFUNC relocations to work and it also means copy
651 relocation of dependencies are if necessary overwritten.
652 __dl_map_object_deps has already sorted l_initfini for us. */
653 unsigned int first = UINT_MAX;
654 unsigned int last = 0;
655 unsigned int j = 0;
656 struct link_map *l = new->l_initfini[0];
657 do
658 {
659 if (! l->l_real->l_relocated)
660 {
661 if (first == UINT_MAX)
662 first = j;
663 last = j + 1;
664 }
665 l = new->l_initfini[++j];
666 }
667 while (l != NULL);
668
669 int relocation_in_progress = 0;
670
671 /* Perform relocation. This can trigger lazy binding in IFUNC
672 resolvers. For NODELETE mappings, these dependencies are not
673 recorded because the flag has not been applied to the newly
674 loaded objects. This means that upon dlopen failure, these
675 NODELETE objects can be unloaded despite existing references to
676 them. However, such relocation dependencies in IFUNC resolvers
677 are undefined anyway, so this is not a problem. */
678
679 for (unsigned int i = last; i-- > first; )
680 {
681 l = new->l_initfini[i];
682
683 if (l->l_real->l_relocated)
684 continue;
685
686 if (! relocation_in_progress)
687 {
688 /* Notify the debugger that relocations are about to happen. */
689 LIBC_PROBE (reloc_start, 2, args->nsid, r);
690 relocation_in_progress = 1;
691 }
692
693#ifdef SHARED
694 if (__glibc_unlikely (GLRO(dl_profile) != NULL))
695 {
696 /* If this here is the shared object which we want to profile
697 make sure the profile is started. We can find out whether
698 this is necessary or not by observing the `_dl_profile_map'
699 variable. If it was NULL but is not NULL afterwards we must
700 start the profiling. */
701 struct link_map *old_profile_map = GL(dl_profile_map);
702
703 _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
704
705 if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
706 {
707 /* We must prepare the profiling. */
708 _dl_start_profile ();
709
710 /* Prevent unloading the object. */
711 GL(dl_profile_map)->l_nodelete_active = true;
712 }
713 }
714 else
715#endif
716 _dl_relocate_object (l, l->l_scope, reloc_mode, 0);
717 }
718
719 /* This only performs the memory allocations. The actual update of
720 the scopes happens below, after failure is impossible. */
721 resize_scopes (new);
722
723 /* Increase the size of the GL (dl_tls_dtv_slotinfo_list) data
724 structure. */
725 bool any_tls = resize_tls_slotinfo (new);
726
727 /* Perform the necessary allocations for adding new global objects
728 to the global scope below. */
729 if (mode & RTLD_GLOBAL)
730 add_to_global_resize (new);
731
732 /* Demarcation point: After this, no recoverable errors are allowed.
733 All memory allocations for new objects must have happened
734 before. */
735
736 /* Finalize the NODELETE status first. This comes before
737 update_scopes, so that lazy binding will not see pending NODELETE
738 state for newly loaded objects. There is a compiler barrier in
739 update_scopes which ensures that the changes from
740 activate_nodelete are visible before new objects show up in the
741 local scope. */
742 activate_nodelete (new);
743
744 /* Second stage after resize_scopes: Actually perform the scope
745 update. After this, dlsym and lazy binding can bind to new
746 objects. */
747 update_scopes (new);
748
749 /* FIXME: It is unclear whether the order here is correct.
750 Shouldn't new objects be made available for binding (and thus
751 execution) only after there TLS data has been set up fully?
752 Fixing bug 16134 will likely make this distinction less
753 important. */
754
755 /* Second stage after resize_tls_slotinfo: Update the slotinfo data
756 structures. */
757 if (any_tls)
758 /* FIXME: This calls _dl_update_slotinfo, which aborts the process
759 on memory allocation failure. See bug 16134. */
760 update_tls_slotinfo (new);
761
762 /* Notify the debugger all new objects have been relocated. */
763 if (relocation_in_progress)
764 LIBC_PROBE (reloc_complete, 3, args->nsid, r, new);
765
766 /* If libc.so was not there before, attempt to call its early
767 initialization routine. Indicate to the initialization routine
768 whether the libc being initialized is the one in the base
769 namespace. */
770 if (!args->libc_already_loaded)
771 {
772 /* dlopen cannot be used to load an initial libc by design. */
773 struct link_map *libc_map = GL(dl_ns)[args->nsid].libc_map;
774 _dl_call_libc_early_init (libc_map, false);
775 }
776
777 /* Run the initializer functions of new objects. Temporarily
778 disable the exception handler, so that lazy binding failures are
779 fatal. */
780 {
781 struct dl_init_args init_args =
782 {
783 .new = new,
784 .argc = args->argc,
785 .argv = args->argv,
786 .env = args->env
787 };
788 _dl_catch_exception (NULL, call_dl_init, &init_args);
789 }
790
791 /* Now we can make the new map available in the global scope. */
792 if (mode & RTLD_GLOBAL)
793 add_to_global_update (new);
794
795 /* Let the user know about the opencount. */
796 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
797 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
798 new->l_name, new->l_ns, new->l_direct_opencount);
799}
800
801void *
802_dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid,
803 int argc, char *argv[], char *env[])
804{
805 if ((mode & RTLD_BINDING_MASK) == 0)
806 /* One of the flags must be set. */
807 _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()"));
808
809 /* Make sure we are alone. */
810 __rtld_lock_lock_recursive (GL(dl_load_lock));
811
812 if (__glibc_unlikely (nsid == LM_ID_NEWLM))
813 {
814 /* Find a new namespace. */
815 for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid)
816 if (GL(dl_ns)[nsid]._ns_loaded == NULL)
817 break;
818
819 if (__glibc_unlikely (nsid == DL_NNS))
820 {
821 /* No more namespace available. */
822 __rtld_lock_unlock_recursive (GL(dl_load_lock));
823
824 _dl_signal_error (EINVAL, file, NULL, N_("\
825no more namespaces available for dlmopen()"));
826 }
827 else if (nsid == GL(dl_nns))
828 {
829 __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
830 ++GL(dl_nns);
831 }
832
833 _dl_debug_initialize (0, nsid)->r_state = RT_CONSISTENT;
834 }
835 /* Never allow loading a DSO in a namespace which is empty. Such
836 direct placements is only causing problems. Also don't allow
837 loading into a namespace used for auditing. */
838 else if (__glibc_unlikely (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER)
839 && (__glibc_unlikely (nsid < 0 || nsid >= GL(dl_nns))
840 /* This prevents the [NSID] index expressions from being
841 evaluated, so the compiler won't think that we are
842 accessing an invalid index here in the !SHARED case where
843 DL_NNS is 1 and so any NSID != 0 is invalid. */
844 || DL_NNS == 1
845 || GL(dl_ns)[nsid]._ns_nloaded == 0
846 || GL(dl_ns)[nsid]._ns_loaded->l_auditing))
847 _dl_signal_error (EINVAL, file, NULL,
848 N_("invalid target namespace in dlmopen()"));
849
850 struct dl_open_args args;
851 args.file = file;
852 args.mode = mode;
853 args.caller_dlopen = caller_dlopen;
854 args.map = NULL;
855 args.nsid = nsid;
856 /* args.libc_already_loaded is always assigned by dl_open_worker
857 (before any explicit/non-local returns). */
858 args.argc = argc;
859 args.argv = argv;
860 args.env = env;
861
862 struct dl_exception exception;
863 int errcode = _dl_catch_exception (&exception, dl_open_worker, &args);
864
865#if defined USE_LDCONFIG && !defined MAP_COPY
866 /* We must unmap the cache file. */
867 _dl_unload_cache ();
868#endif
869
870 /* Do this for both the error and success cases. The old value has
871 only been determined if the namespace ID was assigned (i.e., it
872 is not __LM_ID_CALLER). In the success case, we actually may
873 have consumed more pending adds than planned (because the local
874 scopes overlap in case of a recursive dlopen, the inner dlopen
875 doing some of the globalization work of the outer dlopen), so the
876 old pending adds value is larger than absolutely necessary.
877 Since it is just a conservative upper bound, this is harmless.
878 The top-level dlopen call will restore the field to zero. */
879 if (args.nsid >= 0)
880 GL (dl_ns)[args.nsid]._ns_global_scope_pending_adds
881 = args.original_global_scope_pending_adds;
882
883 /* See if an error occurred during loading. */
884 if (__glibc_unlikely (exception.errstring != NULL))
885 {
886 /* Avoid keeping around a dangling reference to the libc.so link
887 map in case it has been cached in libc_map. */
888 if (!args.libc_already_loaded)
889 GL(dl_ns)[nsid].libc_map = NULL;
890
891 /* Remove the object from memory. It may be in an inconsistent
892 state if relocation failed, for example. */
893 if (args.map)
894 {
895 _dl_close_worker (args.map, true);
896
897 /* All l_nodelete_pending objects should have been deleted
898 at this point, which is why it is not necessary to reset
899 the flag here. */
900 }
901
902 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
903
904 /* Release the lock. */
905 __rtld_lock_unlock_recursive (GL(dl_load_lock));
906
907 /* Reraise the error. */
908 _dl_signal_exception (errcode, &exception, NULL);
909 }
910
911 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
912
913 /* Release the lock. */
914 __rtld_lock_unlock_recursive (GL(dl_load_lock));
915
916 return args.map;
917}
918
919
920void
921_dl_show_scope (struct link_map *l, int from)
922{
923 _dl_debug_printf ("object=%s [%lu]\n",
924 DSO_FILENAME (l->l_name), l->l_ns);
925 if (l->l_scope != NULL)
926 for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt)
927 {
928 _dl_debug_printf (" scope %u:", scope_cnt);
929
930 for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt)
931 if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name)
932 _dl_debug_printf_c (" %s",
933 l->l_scope[scope_cnt]->r_list[cnt]->l_name);
934 else
935 _dl_debug_printf_c (" %s", RTLD_PROGNAME);
936
937 _dl_debug_printf_c ("\n");
938 }
939 else
940 _dl_debug_printf (" no scope\n");
941 _dl_debug_printf ("\n");
942}
943