1 | /* Close a shared object opened by `_dl_open'. |
2 | Copyright (C) 1996-2022 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <https://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <assert.h> |
20 | #include <dlfcn.h> |
21 | #include <errno.h> |
22 | #include <libintl.h> |
23 | #include <stddef.h> |
24 | #include <stdio.h> |
25 | #include <stdlib.h> |
26 | #include <string.h> |
27 | #include <unistd.h> |
28 | #include <libc-lock.h> |
29 | #include <ldsodefs.h> |
30 | #include <sys/types.h> |
31 | #include <sys/mman.h> |
32 | #include <sysdep-cancel.h> |
33 | #include <tls.h> |
34 | #include <stap-probe.h> |
35 | #include <dl-find_object.h> |
36 | |
37 | #include <dl-unmap-segments.h> |
38 | |
39 | |
40 | /* Type of the constructor functions. */ |
41 | typedef void (*fini_t) (void); |
42 | |
43 | |
44 | /* Special l_idx value used to indicate which objects remain loaded. */ |
45 | #define IDX_STILL_USED -1 |
46 | |
47 | |
48 | /* Returns true we an non-empty was found. */ |
49 | static bool |
50 | remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp, |
51 | bool should_be_there) |
52 | { |
53 | if (idx - disp >= listp->len) |
54 | { |
55 | if (listp->next == NULL) |
56 | { |
57 | /* The index is not actually valid in the slotinfo list, |
58 | because this object was closed before it was fully set |
59 | up due to some error. */ |
60 | assert (! should_be_there); |
61 | } |
62 | else |
63 | { |
64 | if (remove_slotinfo (idx, listp->next, disp + listp->len, |
65 | should_be_there)) |
66 | return true; |
67 | |
68 | /* No non-empty entry. Search from the end of this element's |
69 | slotinfo array. */ |
70 | idx = disp + listp->len; |
71 | } |
72 | } |
73 | else |
74 | { |
75 | struct link_map *old_map = listp->slotinfo[idx - disp].map; |
76 | |
77 | /* The entry might still be in its unused state if we are closing an |
78 | object that wasn't fully set up. */ |
79 | if (__glibc_likely (old_map != NULL)) |
80 | { |
81 | /* Mark the entry as unused. These can be read concurrently. */ |
82 | atomic_store_relaxed (&listp->slotinfo[idx - disp].gen, |
83 | GL(dl_tls_generation) + 1); |
84 | atomic_store_relaxed (&listp->slotinfo[idx - disp].map, NULL); |
85 | } |
86 | |
87 | /* If this is not the last currently used entry no need to look |
88 | further. */ |
89 | if (idx != GL(dl_tls_max_dtv_idx)) |
90 | { |
91 | /* There is an unused dtv entry in the middle. */ |
92 | GL(dl_tls_dtv_gaps) = true; |
93 | return true; |
94 | } |
95 | } |
96 | |
97 | while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0)) |
98 | { |
99 | --idx; |
100 | |
101 | if (listp->slotinfo[idx - disp].map != NULL) |
102 | { |
103 | /* Found a new last used index. This can be read concurrently. */ |
104 | atomic_store_relaxed (&GL(dl_tls_max_dtv_idx), idx); |
105 | return true; |
106 | } |
107 | } |
108 | |
109 | /* No non-entry in this list element. */ |
110 | return false; |
111 | } |
112 | |
113 | /* Invoke dstructors for CLOSURE (a struct link_map *). Called with |
114 | exception handling temporarily disabled, to make errors fatal. */ |
115 | static void |
116 | call_destructors (void *closure) |
117 | { |
118 | struct link_map *map = closure; |
119 | |
120 | if (map->l_info[DT_FINI_ARRAY] != NULL) |
121 | { |
122 | ElfW(Addr) *array = |
123 | (ElfW(Addr) *) (map->l_addr |
124 | + map->l_info[DT_FINI_ARRAY]->d_un.d_ptr); |
125 | unsigned int sz = (map->l_info[DT_FINI_ARRAYSZ]->d_un.d_val |
126 | / sizeof (ElfW(Addr))); |
127 | |
128 | while (sz-- > 0) |
129 | ((fini_t) array[sz]) (); |
130 | } |
131 | |
132 | /* Next try the old-style destructor. */ |
133 | if (map->l_info[DT_FINI] != NULL) |
134 | DL_CALL_DT_FINI (map, ((void *) map->l_addr |
135 | + map->l_info[DT_FINI]->d_un.d_ptr)); |
136 | } |
137 | |
138 | void |
139 | _dl_close_worker (struct link_map *map, bool force) |
140 | { |
141 | /* One less direct use. */ |
142 | --map->l_direct_opencount; |
143 | |
144 | /* If _dl_close is called recursively (some destructor call dlclose), |
145 | just record that the parent _dl_close will need to do garbage collection |
146 | again and return. */ |
147 | static enum { not_pending, pending, rerun } dl_close_state; |
148 | |
149 | if (map->l_direct_opencount > 0 || map->l_type != lt_loaded |
150 | || dl_close_state != not_pending) |
151 | { |
152 | if (map->l_direct_opencount == 0 && map->l_type == lt_loaded) |
153 | dl_close_state = rerun; |
154 | |
155 | /* There are still references to this object. Do nothing more. */ |
156 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES)) |
157 | _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n" , |
158 | map->l_name, map->l_direct_opencount); |
159 | |
160 | return; |
161 | } |
162 | |
163 | Lmid_t nsid = map->l_ns; |
164 | struct link_namespaces *ns = &GL(dl_ns)[nsid]; |
165 | |
166 | retry: |
167 | dl_close_state = pending; |
168 | |
169 | bool any_tls = false; |
170 | const unsigned int nloaded = ns->_ns_nloaded; |
171 | struct link_map *maps[nloaded]; |
172 | |
173 | /* Run over the list and assign indexes to the link maps and enter |
174 | them into the MAPS array. */ |
175 | int idx = 0; |
176 | for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next) |
177 | { |
178 | l->l_map_used = 0; |
179 | l->l_map_done = 0; |
180 | l->l_idx = idx; |
181 | maps[idx] = l; |
182 | ++idx; |
183 | } |
184 | assert (idx == nloaded); |
185 | |
186 | /* Keep track of the lowest index link map we have covered already. */ |
187 | int done_index = -1; |
188 | while (++done_index < nloaded) |
189 | { |
190 | struct link_map *l = maps[done_index]; |
191 | |
192 | if (l->l_map_done) |
193 | /* Already handled. */ |
194 | continue; |
195 | |
196 | /* Check whether this object is still used. */ |
197 | if (l->l_type == lt_loaded |
198 | && l->l_direct_opencount == 0 |
199 | && !l->l_nodelete_active |
200 | /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why |
201 | acquire is sufficient and correct. */ |
202 | && atomic_load_acquire (&l->l_tls_dtor_count) == 0 |
203 | && !l->l_map_used) |
204 | continue; |
205 | |
206 | /* We need this object and we handle it now. */ |
207 | l->l_map_used = 1; |
208 | l->l_map_done = 1; |
209 | /* Signal the object is still needed. */ |
210 | l->l_idx = IDX_STILL_USED; |
211 | |
212 | /* Mark all dependencies as used. */ |
213 | if (l->l_initfini != NULL) |
214 | { |
215 | /* We are always the zeroth entry, and since we don't include |
216 | ourselves in the dependency analysis start at 1. */ |
217 | struct link_map **lp = &l->l_initfini[1]; |
218 | while (*lp != NULL) |
219 | { |
220 | if ((*lp)->l_idx != IDX_STILL_USED) |
221 | { |
222 | assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded); |
223 | |
224 | if (!(*lp)->l_map_used) |
225 | { |
226 | (*lp)->l_map_used = 1; |
227 | /* If we marked a new object as used, and we've |
228 | already processed it, then we need to go back |
229 | and process again from that point forward to |
230 | ensure we keep all of its dependencies also. */ |
231 | if ((*lp)->l_idx - 1 < done_index) |
232 | done_index = (*lp)->l_idx - 1; |
233 | } |
234 | } |
235 | |
236 | ++lp; |
237 | } |
238 | } |
239 | /* And the same for relocation dependencies. */ |
240 | if (l->l_reldeps != NULL) |
241 | for (unsigned int j = 0; j < l->l_reldeps->act; ++j) |
242 | { |
243 | struct link_map *jmap = l->l_reldeps->list[j]; |
244 | |
245 | if (jmap->l_idx != IDX_STILL_USED) |
246 | { |
247 | assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded); |
248 | |
249 | if (!jmap->l_map_used) |
250 | { |
251 | jmap->l_map_used = 1; |
252 | if (jmap->l_idx - 1 < done_index) |
253 | done_index = jmap->l_idx - 1; |
254 | } |
255 | } |
256 | } |
257 | } |
258 | |
259 | /* Sort the entries. We can skip looking for the binary itself which is |
260 | at the front of the search list for the main namespace. */ |
261 | _dl_sort_maps (maps, nloaded, (nsid == LM_ID_BASE), true); |
262 | |
263 | /* Call all termination functions at once. */ |
264 | bool unload_any = false; |
265 | bool scope_mem_left = false; |
266 | unsigned int unload_global = 0; |
267 | unsigned int first_loaded = ~0; |
268 | for (unsigned int i = 0; i < nloaded; ++i) |
269 | { |
270 | struct link_map *imap = maps[i]; |
271 | |
272 | /* All elements must be in the same namespace. */ |
273 | assert (imap->l_ns == nsid); |
274 | |
275 | if (!imap->l_map_used) |
276 | { |
277 | assert (imap->l_type == lt_loaded && !imap->l_nodelete_active); |
278 | |
279 | /* Call its termination function. Do not do it for |
280 | half-cooked objects. Temporarily disable exception |
281 | handling, so that errors are fatal. */ |
282 | if (imap->l_init_called) |
283 | { |
284 | /* When debugging print a message first. */ |
285 | if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS, |
286 | 0)) |
287 | _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n" , |
288 | imap->l_name, nsid); |
289 | |
290 | if (imap->l_info[DT_FINI_ARRAY] != NULL |
291 | || imap->l_info[DT_FINI] != NULL) |
292 | _dl_catch_exception (NULL, call_destructors, imap); |
293 | } |
294 | |
295 | #ifdef SHARED |
296 | /* Auditing checkpoint: we remove an object. */ |
297 | _dl_audit_objclose (imap); |
298 | #endif |
299 | |
300 | /* This object must not be used anymore. */ |
301 | imap->l_removed = 1; |
302 | |
303 | /* We indeed have an object to remove. */ |
304 | unload_any = true; |
305 | |
306 | if (imap->l_global) |
307 | ++unload_global; |
308 | |
309 | /* Remember where the first dynamically loaded object is. */ |
310 | if (i < first_loaded) |
311 | first_loaded = i; |
312 | } |
313 | /* Else imap->l_map_used. */ |
314 | else if (imap->l_type == lt_loaded) |
315 | { |
316 | struct r_scope_elem *new_list = NULL; |
317 | |
318 | if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL) |
319 | { |
320 | /* The object is still used. But one of the objects we are |
321 | unloading right now is responsible for loading it. If |
322 | the current object does not have it's own scope yet we |
323 | have to create one. This has to be done before running |
324 | the finalizers. |
325 | |
326 | To do this count the number of dependencies. */ |
327 | unsigned int cnt; |
328 | for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt) |
329 | ; |
330 | |
331 | /* We simply reuse the l_initfini list. */ |
332 | imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1]; |
333 | imap->l_searchlist.r_nlist = cnt; |
334 | |
335 | new_list = &imap->l_searchlist; |
336 | } |
337 | |
338 | /* Count the number of scopes which remain after the unload. |
339 | When we add the local search list count it. Always add |
340 | one for the terminating NULL pointer. */ |
341 | size_t remain = (new_list != NULL) + 1; |
342 | bool removed_any = false; |
343 | for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt) |
344 | /* This relies on l_scope[] entries being always set either |
345 | to its own l_symbolic_searchlist address, or some map's |
346 | l_searchlist address. */ |
347 | if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist) |
348 | { |
349 | struct link_map *tmap = (struct link_map *) |
350 | ((char *) imap->l_scope[cnt] |
351 | - offsetof (struct link_map, l_searchlist)); |
352 | assert (tmap->l_ns == nsid); |
353 | if (tmap->l_idx == IDX_STILL_USED) |
354 | ++remain; |
355 | else |
356 | removed_any = true; |
357 | } |
358 | else |
359 | ++remain; |
360 | |
361 | if (removed_any) |
362 | { |
363 | /* Always allocate a new array for the scope. This is |
364 | necessary since we must be able to determine the last |
365 | user of the current array. If possible use the link map's |
366 | memory. */ |
367 | size_t new_size; |
368 | struct r_scope_elem **newp; |
369 | |
370 | #define SCOPE_ELEMS(imap) \ |
371 | (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0])) |
372 | |
373 | if (imap->l_scope != imap->l_scope_mem |
374 | && remain < SCOPE_ELEMS (imap)) |
375 | { |
376 | new_size = SCOPE_ELEMS (imap); |
377 | newp = imap->l_scope_mem; |
378 | } |
379 | else |
380 | { |
381 | new_size = imap->l_scope_max; |
382 | newp = (struct r_scope_elem **) |
383 | malloc (new_size * sizeof (struct r_scope_elem *)); |
384 | if (newp == NULL) |
385 | _dl_signal_error (ENOMEM, "dlclose" , NULL, |
386 | N_("cannot create scope list" )); |
387 | } |
388 | |
389 | /* Copy over the remaining scope elements. */ |
390 | remain = 0; |
391 | for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt) |
392 | { |
393 | if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist) |
394 | { |
395 | struct link_map *tmap = (struct link_map *) |
396 | ((char *) imap->l_scope[cnt] |
397 | - offsetof (struct link_map, l_searchlist)); |
398 | if (tmap->l_idx != IDX_STILL_USED) |
399 | { |
400 | /* Remove the scope. Or replace with own map's |
401 | scope. */ |
402 | if (new_list != NULL) |
403 | { |
404 | newp[remain++] = new_list; |
405 | new_list = NULL; |
406 | } |
407 | continue; |
408 | } |
409 | } |
410 | |
411 | newp[remain++] = imap->l_scope[cnt]; |
412 | } |
413 | newp[remain] = NULL; |
414 | |
415 | struct r_scope_elem **old = imap->l_scope; |
416 | |
417 | imap->l_scope = newp; |
418 | |
419 | /* No user anymore, we can free it now. */ |
420 | if (old != imap->l_scope_mem) |
421 | { |
422 | if (_dl_scope_free (old)) |
423 | /* If _dl_scope_free used THREAD_GSCOPE_WAIT (), |
424 | no need to repeat it. */ |
425 | scope_mem_left = false; |
426 | } |
427 | else |
428 | scope_mem_left = true; |
429 | |
430 | imap->l_scope_max = new_size; |
431 | } |
432 | else if (new_list != NULL) |
433 | { |
434 | /* We didn't change the scope array, so reset the search |
435 | list. */ |
436 | imap->l_searchlist.r_list = NULL; |
437 | imap->l_searchlist.r_nlist = 0; |
438 | } |
439 | |
440 | /* The loader is gone, so mark the object as not having one. |
441 | Note: l_idx != IDX_STILL_USED -> object will be removed. */ |
442 | if (imap->l_loader != NULL |
443 | && imap->l_loader->l_idx != IDX_STILL_USED) |
444 | imap->l_loader = NULL; |
445 | |
446 | /* Remember where the first dynamically loaded object is. */ |
447 | if (i < first_loaded) |
448 | first_loaded = i; |
449 | } |
450 | } |
451 | |
452 | /* If there are no objects to unload, do nothing further. */ |
453 | if (!unload_any) |
454 | goto out; |
455 | |
456 | #ifdef SHARED |
457 | /* Auditing checkpoint: we will start deleting objects. */ |
458 | _dl_audit_activity_nsid (nsid, LA_ACT_DELETE); |
459 | #endif |
460 | |
461 | /* Notify the debugger we are about to remove some loaded objects. */ |
462 | struct r_debug *r = _dl_debug_update (nsid); |
463 | r->r_state = RT_DELETE; |
464 | _dl_debug_state (); |
465 | LIBC_PROBE (unmap_start, 2, nsid, r); |
466 | |
467 | if (unload_global) |
468 | { |
469 | /* Some objects are in the global scope list. Remove them. */ |
470 | struct r_scope_elem *ns_msl = ns->_ns_main_searchlist; |
471 | unsigned int i; |
472 | unsigned int j = 0; |
473 | unsigned int cnt = ns_msl->r_nlist; |
474 | |
475 | while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed) |
476 | --cnt; |
477 | |
478 | if (cnt + unload_global == ns_msl->r_nlist) |
479 | /* Speed up removing most recently added objects. */ |
480 | j = cnt; |
481 | else |
482 | for (i = 0; i < cnt; i++) |
483 | if (ns_msl->r_list[i]->l_removed == 0) |
484 | { |
485 | if (i != j) |
486 | ns_msl->r_list[j] = ns_msl->r_list[i]; |
487 | j++; |
488 | } |
489 | ns_msl->r_nlist = j; |
490 | } |
491 | |
492 | if (!RTLD_SINGLE_THREAD_P |
493 | && (unload_global |
494 | || scope_mem_left |
495 | || (GL(dl_scope_free_list) != NULL |
496 | && GL(dl_scope_free_list)->count))) |
497 | { |
498 | THREAD_GSCOPE_WAIT (); |
499 | |
500 | /* Now we can free any queued old scopes. */ |
501 | struct dl_scope_free_list *fsl = GL(dl_scope_free_list); |
502 | if (fsl != NULL) |
503 | while (fsl->count > 0) |
504 | free (fsl->list[--fsl->count]); |
505 | } |
506 | |
507 | size_t tls_free_start; |
508 | size_t tls_free_end; |
509 | tls_free_start = tls_free_end = NO_TLS_OFFSET; |
510 | |
511 | /* Protects global and module specitic TLS state. */ |
512 | __rtld_lock_lock_recursive (GL(dl_load_tls_lock)); |
513 | |
514 | /* We modify the list of loaded objects. */ |
515 | __rtld_lock_lock_recursive (GL(dl_load_write_lock)); |
516 | |
517 | /* Check each element of the search list to see if all references to |
518 | it are gone. */ |
519 | for (unsigned int i = first_loaded; i < nloaded; ++i) |
520 | { |
521 | struct link_map *imap = maps[i]; |
522 | if (!imap->l_map_used) |
523 | { |
524 | assert (imap->l_type == lt_loaded); |
525 | |
526 | /* That was the last reference, and this was a dlopen-loaded |
527 | object. We can unmap it. */ |
528 | |
529 | /* Remove the object from the dtv slotinfo array if it uses TLS. */ |
530 | if (__glibc_unlikely (imap->l_tls_blocksize > 0)) |
531 | { |
532 | any_tls = true; |
533 | |
534 | if (GL(dl_tls_dtv_slotinfo_list) != NULL |
535 | && ! remove_slotinfo (imap->l_tls_modid, |
536 | GL(dl_tls_dtv_slotinfo_list), 0, |
537 | imap->l_init_called)) |
538 | /* All dynamically loaded modules with TLS are unloaded. */ |
539 | /* Can be read concurrently. */ |
540 | atomic_store_relaxed (&GL(dl_tls_max_dtv_idx), |
541 | GL(dl_tls_static_nelem)); |
542 | |
543 | if (imap->l_tls_offset != NO_TLS_OFFSET |
544 | && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET) |
545 | { |
546 | /* Collect a contiguous chunk built from the objects in |
547 | this search list, going in either direction. When the |
548 | whole chunk is at the end of the used area then we can |
549 | reclaim it. */ |
550 | #if TLS_TCB_AT_TP |
551 | if (tls_free_start == NO_TLS_OFFSET |
552 | || (size_t) imap->l_tls_offset == tls_free_start) |
553 | { |
554 | /* Extend the contiguous chunk being reclaimed. */ |
555 | tls_free_start |
556 | = imap->l_tls_offset - imap->l_tls_blocksize; |
557 | |
558 | if (tls_free_end == NO_TLS_OFFSET) |
559 | tls_free_end = imap->l_tls_offset; |
560 | } |
561 | else if (imap->l_tls_offset - imap->l_tls_blocksize |
562 | == tls_free_end) |
563 | /* Extend the chunk backwards. */ |
564 | tls_free_end = imap->l_tls_offset; |
565 | else |
566 | { |
567 | /* This isn't contiguous with the last chunk freed. |
568 | One of them will be leaked unless we can free |
569 | one block right away. */ |
570 | if (tls_free_end == GL(dl_tls_static_used)) |
571 | { |
572 | GL(dl_tls_static_used) = tls_free_start; |
573 | tls_free_end = imap->l_tls_offset; |
574 | tls_free_start |
575 | = tls_free_end - imap->l_tls_blocksize; |
576 | } |
577 | else if ((size_t) imap->l_tls_offset |
578 | == GL(dl_tls_static_used)) |
579 | GL(dl_tls_static_used) |
580 | = imap->l_tls_offset - imap->l_tls_blocksize; |
581 | else if (tls_free_end < (size_t) imap->l_tls_offset) |
582 | { |
583 | /* We pick the later block. It has a chance to |
584 | be freed. */ |
585 | tls_free_end = imap->l_tls_offset; |
586 | tls_free_start |
587 | = tls_free_end - imap->l_tls_blocksize; |
588 | } |
589 | } |
590 | #elif TLS_DTV_AT_TP |
591 | if (tls_free_start == NO_TLS_OFFSET) |
592 | { |
593 | tls_free_start = imap->l_tls_firstbyte_offset; |
594 | tls_free_end = (imap->l_tls_offset |
595 | + imap->l_tls_blocksize); |
596 | } |
597 | else if (imap->l_tls_firstbyte_offset == tls_free_end) |
598 | /* Extend the contiguous chunk being reclaimed. */ |
599 | tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize; |
600 | else if (imap->l_tls_offset + imap->l_tls_blocksize |
601 | == tls_free_start) |
602 | /* Extend the chunk backwards. */ |
603 | tls_free_start = imap->l_tls_firstbyte_offset; |
604 | /* This isn't contiguous with the last chunk freed. |
605 | One of them will be leaked unless we can free |
606 | one block right away. */ |
607 | else if (imap->l_tls_offset + imap->l_tls_blocksize |
608 | == GL(dl_tls_static_used)) |
609 | GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset; |
610 | else if (tls_free_end == GL(dl_tls_static_used)) |
611 | { |
612 | GL(dl_tls_static_used) = tls_free_start; |
613 | tls_free_start = imap->l_tls_firstbyte_offset; |
614 | tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize; |
615 | } |
616 | else if (tls_free_end < imap->l_tls_firstbyte_offset) |
617 | { |
618 | /* We pick the later block. It has a chance to |
619 | be freed. */ |
620 | tls_free_start = imap->l_tls_firstbyte_offset; |
621 | tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize; |
622 | } |
623 | #else |
624 | # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" |
625 | #endif |
626 | } |
627 | } |
628 | |
629 | /* Reset unique symbols if forced. */ |
630 | if (force) |
631 | { |
632 | struct unique_sym_table *tab = &ns->_ns_unique_sym_table; |
633 | __rtld_lock_lock_recursive (tab->lock); |
634 | struct unique_sym *entries = tab->entries; |
635 | if (entries != NULL) |
636 | { |
637 | size_t idx, size = tab->size; |
638 | for (idx = 0; idx < size; ++idx) |
639 | { |
640 | /* Clear unique symbol entries that belong to this |
641 | object. */ |
642 | if (entries[idx].name != NULL |
643 | && entries[idx].map == imap) |
644 | { |
645 | entries[idx].name = NULL; |
646 | entries[idx].hashval = 0; |
647 | tab->n_elements--; |
648 | } |
649 | } |
650 | } |
651 | __rtld_lock_unlock_recursive (tab->lock); |
652 | } |
653 | |
654 | /* We can unmap all the maps at once. We determined the |
655 | start address and length when we loaded the object and |
656 | the `munmap' call does the rest. */ |
657 | DL_UNMAP (imap); |
658 | |
659 | /* Finally, unlink the data structure and free it. */ |
660 | #if DL_NNS == 1 |
661 | /* The assert in the (imap->l_prev == NULL) case gives |
662 | the compiler license to warn that NS points outside |
663 | the dl_ns array bounds in that case (as nsid != LM_ID_BASE |
664 | is tantamount to nsid >= DL_NNS). That should be impossible |
665 | in this configuration, so just assert about it instead. */ |
666 | assert (nsid == LM_ID_BASE); |
667 | assert (imap->l_prev != NULL); |
668 | #else |
669 | if (imap->l_prev == NULL) |
670 | { |
671 | assert (nsid != LM_ID_BASE); |
672 | ns->_ns_loaded = imap->l_next; |
673 | |
674 | /* Update the pointer to the head of the list |
675 | we leave for debuggers to examine. */ |
676 | r->r_map = (void *) ns->_ns_loaded; |
677 | } |
678 | else |
679 | #endif |
680 | imap->l_prev->l_next = imap->l_next; |
681 | |
682 | --ns->_ns_nloaded; |
683 | if (imap->l_next != NULL) |
684 | imap->l_next->l_prev = imap->l_prev; |
685 | |
686 | /* Update the data used by _dl_find_object. */ |
687 | _dl_find_object_dlclose (imap); |
688 | |
689 | free (imap->l_versions); |
690 | if (imap->l_origin != (char *) -1) |
691 | free ((char *) imap->l_origin); |
692 | |
693 | free (imap->l_reldeps); |
694 | |
695 | /* Print debugging message. */ |
696 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES)) |
697 | _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n" , |
698 | imap->l_name, imap->l_ns); |
699 | |
700 | /* This name always is allocated. */ |
701 | free (imap->l_name); |
702 | /* Remove the list with all the names of the shared object. */ |
703 | |
704 | struct libname_list *lnp = imap->l_libname; |
705 | do |
706 | { |
707 | struct libname_list *this = lnp; |
708 | lnp = lnp->next; |
709 | if (!this->dont_free) |
710 | free (this); |
711 | } |
712 | while (lnp != NULL); |
713 | |
714 | /* Remove the searchlists. */ |
715 | free (imap->l_initfini); |
716 | |
717 | /* Remove the scope array if we allocated it. */ |
718 | if (imap->l_scope != imap->l_scope_mem) |
719 | free (imap->l_scope); |
720 | |
721 | if (imap->l_phdr_allocated) |
722 | free ((void *) imap->l_phdr); |
723 | |
724 | if (imap->l_rpath_dirs.dirs != (void *) -1) |
725 | free (imap->l_rpath_dirs.dirs); |
726 | if (imap->l_runpath_dirs.dirs != (void *) -1) |
727 | free (imap->l_runpath_dirs.dirs); |
728 | |
729 | /* Clear GL(dl_initfirst) when freeing its link_map memory. */ |
730 | if (imap == GL(dl_initfirst)) |
731 | GL(dl_initfirst) = NULL; |
732 | |
733 | free (imap); |
734 | } |
735 | } |
736 | |
737 | __rtld_lock_unlock_recursive (GL(dl_load_write_lock)); |
738 | |
739 | /* If we removed any object which uses TLS bump the generation counter. */ |
740 | if (any_tls) |
741 | { |
742 | size_t newgen = GL(dl_tls_generation) + 1; |
743 | if (__glibc_unlikely (newgen == 0)) |
744 | _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in " REPORT_BUGS_TO".\n" ); |
745 | /* Can be read concurrently. */ |
746 | atomic_store_relaxed (&GL(dl_tls_generation), newgen); |
747 | |
748 | if (tls_free_end == GL(dl_tls_static_used)) |
749 | GL(dl_tls_static_used) = tls_free_start; |
750 | } |
751 | |
752 | /* TLS is cleaned up for the unloaded modules. */ |
753 | __rtld_lock_unlock_recursive (GL(dl_load_tls_lock)); |
754 | |
755 | #ifdef SHARED |
756 | /* Auditing checkpoint: we have deleted all objects. Also, do not notify |
757 | auditors of the cleanup of a failed audit module loading attempt. */ |
758 | _dl_audit_activity_nsid (nsid, LA_ACT_CONSISTENT); |
759 | #endif |
760 | |
761 | if (__builtin_expect (ns->_ns_loaded == NULL, 0) |
762 | && nsid == GL(dl_nns) - 1) |
763 | do |
764 | --GL(dl_nns); |
765 | while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL); |
766 | |
767 | /* Notify the debugger those objects are finalized and gone. */ |
768 | r->r_state = RT_CONSISTENT; |
769 | _dl_debug_state (); |
770 | LIBC_PROBE (unmap_complete, 2, nsid, r); |
771 | |
772 | /* Recheck if we need to retry, release the lock. */ |
773 | out: |
774 | if (dl_close_state == rerun) |
775 | goto retry; |
776 | |
777 | dl_close_state = not_pending; |
778 | } |
779 | |
780 | |
781 | void |
782 | _dl_close (void *_map) |
783 | { |
784 | struct link_map *map = _map; |
785 | |
786 | /* We must take the lock to examine the contents of map and avoid |
787 | concurrent dlopens. */ |
788 | __rtld_lock_lock_recursive (GL(dl_load_lock)); |
789 | |
790 | /* At this point we are guaranteed nobody else is touching the list of |
791 | loaded maps, but a concurrent dlclose might have freed our map |
792 | before we took the lock. There is no way to detect this (see below) |
793 | so we proceed assuming this isn't the case. First see whether we |
794 | can remove the object at all. */ |
795 | if (__glibc_unlikely (map->l_nodelete_active)) |
796 | { |
797 | /* Nope. Do nothing. */ |
798 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
799 | return; |
800 | } |
801 | |
802 | /* At present this is an unreliable check except in the case where the |
803 | caller has recursively called dlclose and we are sure the link map |
804 | has not been freed. In a non-recursive dlclose the map itself |
805 | might have been freed and this access is potentially a data race |
806 | with whatever other use this memory might have now, or worse we |
807 | might silently corrupt memory if it looks enough like a link map. |
808 | POSIX has language in dlclose that appears to guarantee that this |
809 | should be a detectable case and given that dlclose should be threadsafe |
810 | we need this to be a reliable detection. |
811 | This is bug 20990. */ |
812 | if (__builtin_expect (map->l_direct_opencount, 1) == 0) |
813 | { |
814 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
815 | _dl_signal_error (0, map->l_name, NULL, N_("shared object not open" )); |
816 | } |
817 | |
818 | _dl_close_worker (map, false); |
819 | |
820 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
821 | } |
822 | |