1/* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2021 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19#include <assert.h>
20#include <dlfcn.h>
21#include <errno.h>
22#include <libintl.h>
23#include <stddef.h>
24#include <stdio.h>
25#include <stdlib.h>
26#include <string.h>
27#include <unistd.h>
28#include <libc-lock.h>
29#include <ldsodefs.h>
30#include <sys/types.h>
31#include <sys/mman.h>
32#include <sysdep-cancel.h>
33#include <tls.h>
34#include <stap-probe.h>
35
36#include <dl-unmap-segments.h>
37
38
39/* Type of the constructor functions. */
40typedef void (*fini_t) (void);
41
42
43/* Special l_idx value used to indicate which objects remain loaded. */
44#define IDX_STILL_USED -1
45
46
47/* Returns true we an non-empty was found. */
48static bool
49remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
50 bool should_be_there)
51{
52 if (idx - disp >= listp->len)
53 {
54 if (listp->next == NULL)
55 {
56 /* The index is not actually valid in the slotinfo list,
57 because this object was closed before it was fully set
58 up due to some error. */
59 assert (! should_be_there);
60 }
61 else
62 {
63 if (remove_slotinfo (idx, listp->next, disp + listp->len,
64 should_be_there))
65 return true;
66
67 /* No non-empty entry. Search from the end of this element's
68 slotinfo array. */
69 idx = disp + listp->len;
70 }
71 }
72 else
73 {
74 struct link_map *old_map = listp->slotinfo[idx - disp].map;
75
76 /* The entry might still be in its unused state if we are closing an
77 object that wasn't fully set up. */
78 if (__glibc_likely (old_map != NULL))
79 {
80 /* Mark the entry as unused. These can be read concurrently. */
81 atomic_store_relaxed (&listp->slotinfo[idx - disp].gen,
82 GL(dl_tls_generation) + 1);
83 atomic_store_relaxed (&listp->slotinfo[idx - disp].map, NULL);
84 }
85
86 /* If this is not the last currently used entry no need to look
87 further. */
88 if (idx != GL(dl_tls_max_dtv_idx))
89 {
90 /* There is an unused dtv entry in the middle. */
91 GL(dl_tls_dtv_gaps) = true;
92 return true;
93 }
94 }
95
96 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
97 {
98 --idx;
99
100 if (listp->slotinfo[idx - disp].map != NULL)
101 {
102 /* Found a new last used index. This can be read concurrently. */
103 atomic_store_relaxed (&GL(dl_tls_max_dtv_idx), idx);
104 return true;
105 }
106 }
107
108 /* No non-entry in this list element. */
109 return false;
110}
111
112/* Invoke dstructors for CLOSURE (a struct link_map *). Called with
113 exception handling temporarily disabled, to make errors fatal. */
114static void
115call_destructors (void *closure)
116{
117 struct link_map *map = closure;
118
119 if (map->l_info[DT_FINI_ARRAY] != NULL)
120 {
121 ElfW(Addr) *array =
122 (ElfW(Addr) *) (map->l_addr
123 + map->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
124 unsigned int sz = (map->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
125 / sizeof (ElfW(Addr)));
126
127 while (sz-- > 0)
128 ((fini_t) array[sz]) ();
129 }
130
131 /* Next try the old-style destructor. */
132 if (map->l_info[DT_FINI] != NULL)
133 DL_CALL_DT_FINI (map, ((void *) map->l_addr
134 + map->l_info[DT_FINI]->d_un.d_ptr));
135}
136
137void
138_dl_close_worker (struct link_map *map, bool force)
139{
140 /* One less direct use. */
141 --map->l_direct_opencount;
142
143 /* If _dl_close is called recursively (some destructor call dlclose),
144 just record that the parent _dl_close will need to do garbage collection
145 again and return. */
146 static enum { not_pending, pending, rerun } dl_close_state;
147
148 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
149 || dl_close_state != not_pending)
150 {
151 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
152 dl_close_state = rerun;
153
154 /* There are still references to this object. Do nothing more. */
155 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
156 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
157 map->l_name, map->l_direct_opencount);
158
159 return;
160 }
161
162 Lmid_t nsid = map->l_ns;
163 struct link_namespaces *ns = &GL(dl_ns)[nsid];
164
165 retry:
166 dl_close_state = pending;
167
168 bool any_tls = false;
169 const unsigned int nloaded = ns->_ns_nloaded;
170 char used[nloaded];
171 char done[nloaded];
172 struct link_map *maps[nloaded];
173
174 /* Run over the list and assign indexes to the link maps and enter
175 them into the MAPS array. */
176 int idx = 0;
177 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
178 {
179 l->l_idx = idx;
180 maps[idx] = l;
181 ++idx;
182
183 }
184 assert (idx == nloaded);
185
186 /* Prepare the bitmaps. */
187 memset (used, '\0', sizeof (used));
188 memset (done, '\0', sizeof (done));
189
190 /* Keep track of the lowest index link map we have covered already. */
191 int done_index = -1;
192 while (++done_index < nloaded)
193 {
194 struct link_map *l = maps[done_index];
195
196 if (done[done_index])
197 /* Already handled. */
198 continue;
199
200 /* Check whether this object is still used. */
201 if (l->l_type == lt_loaded
202 && l->l_direct_opencount == 0
203 && !l->l_nodelete_active
204 /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
205 acquire is sufficient and correct. */
206 && atomic_load_acquire (&l->l_tls_dtor_count) == 0
207 && !used[done_index])
208 continue;
209
210 /* We need this object and we handle it now. */
211 done[done_index] = 1;
212 used[done_index] = 1;
213 /* Signal the object is still needed. */
214 l->l_idx = IDX_STILL_USED;
215
216 /* Mark all dependencies as used. */
217 if (l->l_initfini != NULL)
218 {
219 /* We are always the zeroth entry, and since we don't include
220 ourselves in the dependency analysis start at 1. */
221 struct link_map **lp = &l->l_initfini[1];
222 while (*lp != NULL)
223 {
224 if ((*lp)->l_idx != IDX_STILL_USED)
225 {
226 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
227
228 if (!used[(*lp)->l_idx])
229 {
230 used[(*lp)->l_idx] = 1;
231 /* If we marked a new object as used, and we've
232 already processed it, then we need to go back
233 and process again from that point forward to
234 ensure we keep all of its dependencies also. */
235 if ((*lp)->l_idx - 1 < done_index)
236 done_index = (*lp)->l_idx - 1;
237 }
238 }
239
240 ++lp;
241 }
242 }
243 /* And the same for relocation dependencies. */
244 if (l->l_reldeps != NULL)
245 for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
246 {
247 struct link_map *jmap = l->l_reldeps->list[j];
248
249 if (jmap->l_idx != IDX_STILL_USED)
250 {
251 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
252
253 if (!used[jmap->l_idx])
254 {
255 used[jmap->l_idx] = 1;
256 if (jmap->l_idx - 1 < done_index)
257 done_index = jmap->l_idx - 1;
258 }
259 }
260 }
261 }
262
263 /* Sort the entries. We can skip looking for the binary itself which is
264 at the front of the search list for the main namespace. */
265 _dl_sort_maps (maps + (nsid == LM_ID_BASE), nloaded - (nsid == LM_ID_BASE),
266 used + (nsid == LM_ID_BASE), true);
267
268 /* Call all termination functions at once. */
269#ifdef SHARED
270 bool do_audit = GLRO(dl_naudit) > 0 && !ns->_ns_loaded->l_auditing;
271#endif
272 bool unload_any = false;
273 bool scope_mem_left = false;
274 unsigned int unload_global = 0;
275 unsigned int first_loaded = ~0;
276 for (unsigned int i = 0; i < nloaded; ++i)
277 {
278 struct link_map *imap = maps[i];
279
280 /* All elements must be in the same namespace. */
281 assert (imap->l_ns == nsid);
282
283 if (!used[i])
284 {
285 assert (imap->l_type == lt_loaded && !imap->l_nodelete_active);
286
287 /* Call its termination function. Do not do it for
288 half-cooked objects. Temporarily disable exception
289 handling, so that errors are fatal. */
290 if (imap->l_init_called)
291 {
292 /* When debugging print a message first. */
293 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
294 0))
295 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
296 imap->l_name, nsid);
297
298 if (imap->l_info[DT_FINI_ARRAY] != NULL
299 || imap->l_info[DT_FINI] != NULL)
300 _dl_catch_exception (NULL, call_destructors, imap);
301 }
302
303#ifdef SHARED
304 /* Auditing checkpoint: we remove an object. */
305 if (__glibc_unlikely (do_audit))
306 {
307 struct audit_ifaces *afct = GLRO(dl_audit);
308 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
309 {
310 if (afct->objclose != NULL)
311 {
312 struct auditstate *state
313 = link_map_audit_state (imap, cnt);
314 /* Return value is ignored. */
315 (void) afct->objclose (&state->cookie);
316 }
317
318 afct = afct->next;
319 }
320 }
321#endif
322
323 /* This object must not be used anymore. */
324 imap->l_removed = 1;
325
326 /* We indeed have an object to remove. */
327 unload_any = true;
328
329 if (imap->l_global)
330 ++unload_global;
331
332 /* Remember where the first dynamically loaded object is. */
333 if (i < first_loaded)
334 first_loaded = i;
335 }
336 /* Else used[i]. */
337 else if (imap->l_type == lt_loaded)
338 {
339 struct r_scope_elem *new_list = NULL;
340
341 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
342 {
343 /* The object is still used. But one of the objects we are
344 unloading right now is responsible for loading it. If
345 the current object does not have it's own scope yet we
346 have to create one. This has to be done before running
347 the finalizers.
348
349 To do this count the number of dependencies. */
350 unsigned int cnt;
351 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
352 ;
353
354 /* We simply reuse the l_initfini list. */
355 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
356 imap->l_searchlist.r_nlist = cnt;
357
358 new_list = &imap->l_searchlist;
359 }
360
361 /* Count the number of scopes which remain after the unload.
362 When we add the local search list count it. Always add
363 one for the terminating NULL pointer. */
364 size_t remain = (new_list != NULL) + 1;
365 bool removed_any = false;
366 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
367 /* This relies on l_scope[] entries being always set either
368 to its own l_symbolic_searchlist address, or some map's
369 l_searchlist address. */
370 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
371 {
372 struct link_map *tmap = (struct link_map *)
373 ((char *) imap->l_scope[cnt]
374 - offsetof (struct link_map, l_searchlist));
375 assert (tmap->l_ns == nsid);
376 if (tmap->l_idx == IDX_STILL_USED)
377 ++remain;
378 else
379 removed_any = true;
380 }
381 else
382 ++remain;
383
384 if (removed_any)
385 {
386 /* Always allocate a new array for the scope. This is
387 necessary since we must be able to determine the last
388 user of the current array. If possible use the link map's
389 memory. */
390 size_t new_size;
391 struct r_scope_elem **newp;
392
393#define SCOPE_ELEMS(imap) \
394 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
395
396 if (imap->l_scope != imap->l_scope_mem
397 && remain < SCOPE_ELEMS (imap))
398 {
399 new_size = SCOPE_ELEMS (imap);
400 newp = imap->l_scope_mem;
401 }
402 else
403 {
404 new_size = imap->l_scope_max;
405 newp = (struct r_scope_elem **)
406 malloc (new_size * sizeof (struct r_scope_elem *));
407 if (newp == NULL)
408 _dl_signal_error (ENOMEM, "dlclose", NULL,
409 N_("cannot create scope list"));
410 }
411
412 /* Copy over the remaining scope elements. */
413 remain = 0;
414 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
415 {
416 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
417 {
418 struct link_map *tmap = (struct link_map *)
419 ((char *) imap->l_scope[cnt]
420 - offsetof (struct link_map, l_searchlist));
421 if (tmap->l_idx != IDX_STILL_USED)
422 {
423 /* Remove the scope. Or replace with own map's
424 scope. */
425 if (new_list != NULL)
426 {
427 newp[remain++] = new_list;
428 new_list = NULL;
429 }
430 continue;
431 }
432 }
433
434 newp[remain++] = imap->l_scope[cnt];
435 }
436 newp[remain] = NULL;
437
438 struct r_scope_elem **old = imap->l_scope;
439
440 imap->l_scope = newp;
441
442 /* No user anymore, we can free it now. */
443 if (old != imap->l_scope_mem)
444 {
445 if (_dl_scope_free (old))
446 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
447 no need to repeat it. */
448 scope_mem_left = false;
449 }
450 else
451 scope_mem_left = true;
452
453 imap->l_scope_max = new_size;
454 }
455 else if (new_list != NULL)
456 {
457 /* We didn't change the scope array, so reset the search
458 list. */
459 imap->l_searchlist.r_list = NULL;
460 imap->l_searchlist.r_nlist = 0;
461 }
462
463 /* The loader is gone, so mark the object as not having one.
464 Note: l_idx != IDX_STILL_USED -> object will be removed. */
465 if (imap->l_loader != NULL
466 && imap->l_loader->l_idx != IDX_STILL_USED)
467 imap->l_loader = NULL;
468
469 /* Remember where the first dynamically loaded object is. */
470 if (i < first_loaded)
471 first_loaded = i;
472 }
473 }
474
475 /* If there are no objects to unload, do nothing further. */
476 if (!unload_any)
477 goto out;
478
479#ifdef SHARED
480 /* Auditing checkpoint: we will start deleting objects. */
481 if (__glibc_unlikely (do_audit))
482 {
483 struct link_map *head = ns->_ns_loaded;
484 struct audit_ifaces *afct = GLRO(dl_audit);
485 /* Do not call the functions for any auditing object. */
486 if (head->l_auditing == 0)
487 {
488 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
489 {
490 if (afct->activity != NULL)
491 {
492 struct auditstate *state = link_map_audit_state (head, cnt);
493 afct->activity (&state->cookie, LA_ACT_DELETE);
494 }
495
496 afct = afct->next;
497 }
498 }
499 }
500#endif
501
502 /* Notify the debugger we are about to remove some loaded objects. */
503 struct r_debug *r = _dl_debug_initialize (0, nsid);
504 r->r_state = RT_DELETE;
505 _dl_debug_state ();
506 LIBC_PROBE (unmap_start, 2, nsid, r);
507
508 if (unload_global)
509 {
510 /* Some objects are in the global scope list. Remove them. */
511 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
512 unsigned int i;
513 unsigned int j = 0;
514 unsigned int cnt = ns_msl->r_nlist;
515
516 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
517 --cnt;
518
519 if (cnt + unload_global == ns_msl->r_nlist)
520 /* Speed up removing most recently added objects. */
521 j = cnt;
522 else
523 for (i = 0; i < cnt; i++)
524 if (ns_msl->r_list[i]->l_removed == 0)
525 {
526 if (i != j)
527 ns_msl->r_list[j] = ns_msl->r_list[i];
528 j++;
529 }
530 ns_msl->r_nlist = j;
531 }
532
533 if (!RTLD_SINGLE_THREAD_P
534 && (unload_global
535 || scope_mem_left
536 || (GL(dl_scope_free_list) != NULL
537 && GL(dl_scope_free_list)->count)))
538 {
539 THREAD_GSCOPE_WAIT ();
540
541 /* Now we can free any queued old scopes. */
542 struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
543 if (fsl != NULL)
544 while (fsl->count > 0)
545 free (fsl->list[--fsl->count]);
546 }
547
548 size_t tls_free_start;
549 size_t tls_free_end;
550 tls_free_start = tls_free_end = NO_TLS_OFFSET;
551
552 /* We modify the list of loaded objects. */
553 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
554
555 /* Check each element of the search list to see if all references to
556 it are gone. */
557 for (unsigned int i = first_loaded; i < nloaded; ++i)
558 {
559 struct link_map *imap = maps[i];
560 if (!used[i])
561 {
562 assert (imap->l_type == lt_loaded);
563
564 /* That was the last reference, and this was a dlopen-loaded
565 object. We can unmap it. */
566
567 /* Remove the object from the dtv slotinfo array if it uses TLS. */
568 if (__glibc_unlikely (imap->l_tls_blocksize > 0))
569 {
570 any_tls = true;
571
572 if (GL(dl_tls_dtv_slotinfo_list) != NULL
573 && ! remove_slotinfo (imap->l_tls_modid,
574 GL(dl_tls_dtv_slotinfo_list), 0,
575 imap->l_init_called))
576 /* All dynamically loaded modules with TLS are unloaded. */
577 /* Can be read concurrently. */
578 atomic_store_relaxed (&GL(dl_tls_max_dtv_idx),
579 GL(dl_tls_static_nelem));
580
581 if (imap->l_tls_offset != NO_TLS_OFFSET
582 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
583 {
584 /* Collect a contiguous chunk built from the objects in
585 this search list, going in either direction. When the
586 whole chunk is at the end of the used area then we can
587 reclaim it. */
588#if TLS_TCB_AT_TP
589 if (tls_free_start == NO_TLS_OFFSET
590 || (size_t) imap->l_tls_offset == tls_free_start)
591 {
592 /* Extend the contiguous chunk being reclaimed. */
593 tls_free_start
594 = imap->l_tls_offset - imap->l_tls_blocksize;
595
596 if (tls_free_end == NO_TLS_OFFSET)
597 tls_free_end = imap->l_tls_offset;
598 }
599 else if (imap->l_tls_offset - imap->l_tls_blocksize
600 == tls_free_end)
601 /* Extend the chunk backwards. */
602 tls_free_end = imap->l_tls_offset;
603 else
604 {
605 /* This isn't contiguous with the last chunk freed.
606 One of them will be leaked unless we can free
607 one block right away. */
608 if (tls_free_end == GL(dl_tls_static_used))
609 {
610 GL(dl_tls_static_used) = tls_free_start;
611 tls_free_end = imap->l_tls_offset;
612 tls_free_start
613 = tls_free_end - imap->l_tls_blocksize;
614 }
615 else if ((size_t) imap->l_tls_offset
616 == GL(dl_tls_static_used))
617 GL(dl_tls_static_used)
618 = imap->l_tls_offset - imap->l_tls_blocksize;
619 else if (tls_free_end < (size_t) imap->l_tls_offset)
620 {
621 /* We pick the later block. It has a chance to
622 be freed. */
623 tls_free_end = imap->l_tls_offset;
624 tls_free_start
625 = tls_free_end - imap->l_tls_blocksize;
626 }
627 }
628#elif TLS_DTV_AT_TP
629 if (tls_free_start == NO_TLS_OFFSET)
630 {
631 tls_free_start = imap->l_tls_firstbyte_offset;
632 tls_free_end = (imap->l_tls_offset
633 + imap->l_tls_blocksize);
634 }
635 else if (imap->l_tls_firstbyte_offset == tls_free_end)
636 /* Extend the contiguous chunk being reclaimed. */
637 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
638 else if (imap->l_tls_offset + imap->l_tls_blocksize
639 == tls_free_start)
640 /* Extend the chunk backwards. */
641 tls_free_start = imap->l_tls_firstbyte_offset;
642 /* This isn't contiguous with the last chunk freed.
643 One of them will be leaked unless we can free
644 one block right away. */
645 else if (imap->l_tls_offset + imap->l_tls_blocksize
646 == GL(dl_tls_static_used))
647 GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
648 else if (tls_free_end == GL(dl_tls_static_used))
649 {
650 GL(dl_tls_static_used) = tls_free_start;
651 tls_free_start = imap->l_tls_firstbyte_offset;
652 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
653 }
654 else if (tls_free_end < imap->l_tls_firstbyte_offset)
655 {
656 /* We pick the later block. It has a chance to
657 be freed. */
658 tls_free_start = imap->l_tls_firstbyte_offset;
659 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
660 }
661#else
662# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
663#endif
664 }
665 }
666
667 /* Reset unique symbols if forced. */
668 if (force)
669 {
670 struct unique_sym_table *tab = &ns->_ns_unique_sym_table;
671 __rtld_lock_lock_recursive (tab->lock);
672 struct unique_sym *entries = tab->entries;
673 if (entries != NULL)
674 {
675 size_t idx, size = tab->size;
676 for (idx = 0; idx < size; ++idx)
677 {
678 /* Clear unique symbol entries that belong to this
679 object. */
680 if (entries[idx].name != NULL
681 && entries[idx].map == imap)
682 {
683 entries[idx].name = NULL;
684 entries[idx].hashval = 0;
685 tab->n_elements--;
686 }
687 }
688 }
689 __rtld_lock_unlock_recursive (tab->lock);
690 }
691
692 /* We can unmap all the maps at once. We determined the
693 start address and length when we loaded the object and
694 the `munmap' call does the rest. */
695 DL_UNMAP (imap);
696
697 /* Finally, unlink the data structure and free it. */
698#if DL_NNS == 1
699 /* The assert in the (imap->l_prev == NULL) case gives
700 the compiler license to warn that NS points outside
701 the dl_ns array bounds in that case (as nsid != LM_ID_BASE
702 is tantamount to nsid >= DL_NNS). That should be impossible
703 in this configuration, so just assert about it instead. */
704 assert (nsid == LM_ID_BASE);
705 assert (imap->l_prev != NULL);
706#else
707 if (imap->l_prev == NULL)
708 {
709 assert (nsid != LM_ID_BASE);
710 ns->_ns_loaded = imap->l_next;
711
712 /* Update the pointer to the head of the list
713 we leave for debuggers to examine. */
714 r->r_map = (void *) ns->_ns_loaded;
715 }
716 else
717#endif
718 imap->l_prev->l_next = imap->l_next;
719
720 --ns->_ns_nloaded;
721 if (imap->l_next != NULL)
722 imap->l_next->l_prev = imap->l_prev;
723
724 free (imap->l_versions);
725 if (imap->l_origin != (char *) -1)
726 free ((char *) imap->l_origin);
727
728 free (imap->l_reldeps);
729
730 /* Print debugging message. */
731 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
732 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
733 imap->l_name, imap->l_ns);
734
735 /* This name always is allocated. */
736 free (imap->l_name);
737 /* Remove the list with all the names of the shared object. */
738
739 struct libname_list *lnp = imap->l_libname;
740 do
741 {
742 struct libname_list *this = lnp;
743 lnp = lnp->next;
744 if (!this->dont_free)
745 free (this);
746 }
747 while (lnp != NULL);
748
749 /* Remove the searchlists. */
750 free (imap->l_initfini);
751
752 /* Remove the scope array if we allocated it. */
753 if (imap->l_scope != imap->l_scope_mem)
754 free (imap->l_scope);
755
756 if (imap->l_phdr_allocated)
757 free ((void *) imap->l_phdr);
758
759 if (imap->l_rpath_dirs.dirs != (void *) -1)
760 free (imap->l_rpath_dirs.dirs);
761 if (imap->l_runpath_dirs.dirs != (void *) -1)
762 free (imap->l_runpath_dirs.dirs);
763
764 /* Clear GL(dl_initfirst) when freeing its link_map memory. */
765 if (imap == GL(dl_initfirst))
766 GL(dl_initfirst) = NULL;
767
768 free (imap);
769 }
770 }
771
772 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
773
774 /* If we removed any object which uses TLS bump the generation counter. */
775 if (any_tls)
776 {
777 size_t newgen = GL(dl_tls_generation) + 1;
778 if (__glibc_unlikely (newgen == 0))
779 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO".\n");
780 /* Can be read concurrently. */
781 atomic_store_relaxed (&GL(dl_tls_generation), newgen);
782
783 if (tls_free_end == GL(dl_tls_static_used))
784 GL(dl_tls_static_used) = tls_free_start;
785 }
786
787#ifdef SHARED
788 /* Auditing checkpoint: we have deleted all objects. */
789 if (__glibc_unlikely (do_audit))
790 {
791 struct link_map *head = ns->_ns_loaded;
792 /* If head is NULL, the namespace has become empty, and the
793 audit interface does not give us a way to signal
794 LA_ACT_CONSISTENT for it because the first loaded module is
795 used to identify the namespace.
796
797 Furthermore, do not notify auditors of the cleanup of a
798 failed audit module loading attempt. */
799 if (head != NULL && head->l_auditing == 0)
800 {
801 struct audit_ifaces *afct = GLRO(dl_audit);
802 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
803 {
804 if (afct->activity != NULL)
805 {
806 struct auditstate *state = link_map_audit_state (head, cnt);
807 afct->activity (&state->cookie, LA_ACT_CONSISTENT);
808 }
809
810 afct = afct->next;
811 }
812 }
813 }
814#endif
815
816 if (__builtin_expect (ns->_ns_loaded == NULL, 0)
817 && nsid == GL(dl_nns) - 1)
818 do
819 --GL(dl_nns);
820 while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
821
822 /* Notify the debugger those objects are finalized and gone. */
823 r->r_state = RT_CONSISTENT;
824 _dl_debug_state ();
825 LIBC_PROBE (unmap_complete, 2, nsid, r);
826
827 /* Recheck if we need to retry, release the lock. */
828 out:
829 if (dl_close_state == rerun)
830 goto retry;
831
832 dl_close_state = not_pending;
833}
834
835
836void
837_dl_close (void *_map)
838{
839 struct link_map *map = _map;
840
841 /* We must take the lock to examine the contents of map and avoid
842 concurrent dlopens. */
843 __rtld_lock_lock_recursive (GL(dl_load_lock));
844
845 /* At this point we are guaranteed nobody else is touching the list of
846 loaded maps, but a concurrent dlclose might have freed our map
847 before we took the lock. There is no way to detect this (see below)
848 so we proceed assuming this isn't the case. First see whether we
849 can remove the object at all. */
850 if (__glibc_unlikely (map->l_nodelete_active))
851 {
852 /* Nope. Do nothing. */
853 __rtld_lock_unlock_recursive (GL(dl_load_lock));
854 return;
855 }
856
857 /* At present this is an unreliable check except in the case where the
858 caller has recursively called dlclose and we are sure the link map
859 has not been freed. In a non-recursive dlclose the map itself
860 might have been freed and this access is potentially a data race
861 with whatever other use this memory might have now, or worse we
862 might silently corrupt memory if it looks enough like a link map.
863 POSIX has language in dlclose that appears to guarantee that this
864 should be a detectable case and given that dlclose should be threadsafe
865 we need this to be a reliable detection.
866 This is bug 20990. */
867 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
868 {
869 __rtld_lock_unlock_recursive (GL(dl_load_lock));
870 _dl_signal_error (0, map->l_name, NULL, N_("shared object not open"));
871 }
872
873 _dl_close_worker (map, false);
874
875 __rtld_lock_unlock_recursive (GL(dl_load_lock));
876}
877