1 | /* Close a shared object opened by `_dl_open'. |
2 | Copyright (C) 1996-2021 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <https://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <assert.h> |
20 | #include <dlfcn.h> |
21 | #include <errno.h> |
22 | #include <libintl.h> |
23 | #include <stddef.h> |
24 | #include <stdio.h> |
25 | #include <stdlib.h> |
26 | #include <string.h> |
27 | #include <unistd.h> |
28 | #include <libc-lock.h> |
29 | #include <ldsodefs.h> |
30 | #include <sys/types.h> |
31 | #include <sys/mman.h> |
32 | #include <sysdep-cancel.h> |
33 | #include <tls.h> |
34 | #include <stap-probe.h> |
35 | |
36 | #include <dl-unmap-segments.h> |
37 | |
38 | |
39 | /* Type of the constructor functions. */ |
40 | typedef void (*fini_t) (void); |
41 | |
42 | |
43 | /* Special l_idx value used to indicate which objects remain loaded. */ |
44 | #define IDX_STILL_USED -1 |
45 | |
46 | |
47 | /* Returns true we an non-empty was found. */ |
48 | static bool |
49 | remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp, |
50 | bool should_be_there) |
51 | { |
52 | if (idx - disp >= listp->len) |
53 | { |
54 | if (listp->next == NULL) |
55 | { |
56 | /* The index is not actually valid in the slotinfo list, |
57 | because this object was closed before it was fully set |
58 | up due to some error. */ |
59 | assert (! should_be_there); |
60 | } |
61 | else |
62 | { |
63 | if (remove_slotinfo (idx, listp->next, disp + listp->len, |
64 | should_be_there)) |
65 | return true; |
66 | |
67 | /* No non-empty entry. Search from the end of this element's |
68 | slotinfo array. */ |
69 | idx = disp + listp->len; |
70 | } |
71 | } |
72 | else |
73 | { |
74 | struct link_map *old_map = listp->slotinfo[idx - disp].map; |
75 | |
76 | /* The entry might still be in its unused state if we are closing an |
77 | object that wasn't fully set up. */ |
78 | if (__glibc_likely (old_map != NULL)) |
79 | { |
80 | assert (old_map->l_tls_modid == idx); |
81 | |
82 | /* Mark the entry as unused. */ |
83 | listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1; |
84 | listp->slotinfo[idx - disp].map = NULL; |
85 | } |
86 | |
87 | /* If this is not the last currently used entry no need to look |
88 | further. */ |
89 | if (idx != GL(dl_tls_max_dtv_idx)) |
90 | return true; |
91 | } |
92 | |
93 | while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0)) |
94 | { |
95 | --idx; |
96 | |
97 | if (listp->slotinfo[idx - disp].map != NULL) |
98 | { |
99 | /* Found a new last used index. */ |
100 | GL(dl_tls_max_dtv_idx) = idx; |
101 | return true; |
102 | } |
103 | } |
104 | |
105 | /* No non-entry in this list element. */ |
106 | return false; |
107 | } |
108 | |
109 | /* Invoke dstructors for CLOSURE (a struct link_map *). Called with |
110 | exception handling temporarily disabled, to make errors fatal. */ |
111 | static void |
112 | call_destructors (void *closure) |
113 | { |
114 | struct link_map *map = closure; |
115 | |
116 | if (map->l_info[DT_FINI_ARRAY] != NULL) |
117 | { |
118 | ElfW(Addr) *array = |
119 | (ElfW(Addr) *) (map->l_addr |
120 | + map->l_info[DT_FINI_ARRAY]->d_un.d_ptr); |
121 | unsigned int sz = (map->l_info[DT_FINI_ARRAYSZ]->d_un.d_val |
122 | / sizeof (ElfW(Addr))); |
123 | |
124 | while (sz-- > 0) |
125 | ((fini_t) array[sz]) (); |
126 | } |
127 | |
128 | /* Next try the old-style destructor. */ |
129 | if (map->l_info[DT_FINI] != NULL) |
130 | DL_CALL_DT_FINI (map, ((void *) map->l_addr |
131 | + map->l_info[DT_FINI]->d_un.d_ptr)); |
132 | } |
133 | |
134 | void |
135 | _dl_close_worker (struct link_map *map, bool force) |
136 | { |
137 | /* One less direct use. */ |
138 | --map->l_direct_opencount; |
139 | |
140 | /* If _dl_close is called recursively (some destructor call dlclose), |
141 | just record that the parent _dl_close will need to do garbage collection |
142 | again and return. */ |
143 | static enum { not_pending, pending, rerun } dl_close_state; |
144 | |
145 | if (map->l_direct_opencount > 0 || map->l_type != lt_loaded |
146 | || dl_close_state != not_pending) |
147 | { |
148 | if (map->l_direct_opencount == 0 && map->l_type == lt_loaded) |
149 | dl_close_state = rerun; |
150 | |
151 | /* There are still references to this object. Do nothing more. */ |
152 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES)) |
153 | _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n" , |
154 | map->l_name, map->l_direct_opencount); |
155 | |
156 | return; |
157 | } |
158 | |
159 | Lmid_t nsid = map->l_ns; |
160 | struct link_namespaces *ns = &GL(dl_ns)[nsid]; |
161 | |
162 | retry: |
163 | dl_close_state = pending; |
164 | |
165 | bool any_tls = false; |
166 | const unsigned int nloaded = ns->_ns_nloaded; |
167 | char used[nloaded]; |
168 | char done[nloaded]; |
169 | struct link_map *maps[nloaded]; |
170 | |
171 | /* Run over the list and assign indexes to the link maps and enter |
172 | them into the MAPS array. */ |
173 | int idx = 0; |
174 | for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next) |
175 | { |
176 | l->l_idx = idx; |
177 | maps[idx] = l; |
178 | ++idx; |
179 | |
180 | } |
181 | assert (idx == nloaded); |
182 | |
183 | /* Prepare the bitmaps. */ |
184 | memset (used, '\0', sizeof (used)); |
185 | memset (done, '\0', sizeof (done)); |
186 | |
187 | /* Keep track of the lowest index link map we have covered already. */ |
188 | int done_index = -1; |
189 | while (++done_index < nloaded) |
190 | { |
191 | struct link_map *l = maps[done_index]; |
192 | |
193 | if (done[done_index]) |
194 | /* Already handled. */ |
195 | continue; |
196 | |
197 | /* Check whether this object is still used. */ |
198 | if (l->l_type == lt_loaded |
199 | && l->l_direct_opencount == 0 |
200 | && !l->l_nodelete_active |
201 | /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why |
202 | acquire is sufficient and correct. */ |
203 | && atomic_load_acquire (&l->l_tls_dtor_count) == 0 |
204 | && !used[done_index]) |
205 | continue; |
206 | |
207 | /* We need this object and we handle it now. */ |
208 | done[done_index] = 1; |
209 | used[done_index] = 1; |
210 | /* Signal the object is still needed. */ |
211 | l->l_idx = IDX_STILL_USED; |
212 | |
213 | /* Mark all dependencies as used. */ |
214 | if (l->l_initfini != NULL) |
215 | { |
216 | /* We are always the zeroth entry, and since we don't include |
217 | ourselves in the dependency analysis start at 1. */ |
218 | struct link_map **lp = &l->l_initfini[1]; |
219 | while (*lp != NULL) |
220 | { |
221 | if ((*lp)->l_idx != IDX_STILL_USED) |
222 | { |
223 | assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded); |
224 | |
225 | if (!used[(*lp)->l_idx]) |
226 | { |
227 | used[(*lp)->l_idx] = 1; |
228 | /* If we marked a new object as used, and we've |
229 | already processed it, then we need to go back |
230 | and process again from that point forward to |
231 | ensure we keep all of its dependencies also. */ |
232 | if ((*lp)->l_idx - 1 < done_index) |
233 | done_index = (*lp)->l_idx - 1; |
234 | } |
235 | } |
236 | |
237 | ++lp; |
238 | } |
239 | } |
240 | /* And the same for relocation dependencies. */ |
241 | if (l->l_reldeps != NULL) |
242 | for (unsigned int j = 0; j < l->l_reldeps->act; ++j) |
243 | { |
244 | struct link_map *jmap = l->l_reldeps->list[j]; |
245 | |
246 | if (jmap->l_idx != IDX_STILL_USED) |
247 | { |
248 | assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded); |
249 | |
250 | if (!used[jmap->l_idx]) |
251 | { |
252 | used[jmap->l_idx] = 1; |
253 | if (jmap->l_idx - 1 < done_index) |
254 | done_index = jmap->l_idx - 1; |
255 | } |
256 | } |
257 | } |
258 | } |
259 | |
260 | /* Sort the entries. We can skip looking for the binary itself which is |
261 | at the front of the search list for the main namespace. */ |
262 | _dl_sort_maps (maps + (nsid == LM_ID_BASE), nloaded - (nsid == LM_ID_BASE), |
263 | used + (nsid == LM_ID_BASE), true); |
264 | |
265 | /* Call all termination functions at once. */ |
266 | #ifdef SHARED |
267 | bool do_audit = GLRO(dl_naudit) > 0 && !ns->_ns_loaded->l_auditing; |
268 | #endif |
269 | bool unload_any = false; |
270 | bool scope_mem_left = false; |
271 | unsigned int unload_global = 0; |
272 | unsigned int first_loaded = ~0; |
273 | for (unsigned int i = 0; i < nloaded; ++i) |
274 | { |
275 | struct link_map *imap = maps[i]; |
276 | |
277 | /* All elements must be in the same namespace. */ |
278 | assert (imap->l_ns == nsid); |
279 | |
280 | if (!used[i]) |
281 | { |
282 | assert (imap->l_type == lt_loaded && !imap->l_nodelete_active); |
283 | |
284 | /* Call its termination function. Do not do it for |
285 | half-cooked objects. Temporarily disable exception |
286 | handling, so that errors are fatal. */ |
287 | if (imap->l_init_called) |
288 | { |
289 | /* When debugging print a message first. */ |
290 | if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS, |
291 | 0)) |
292 | _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n" , |
293 | imap->l_name, nsid); |
294 | |
295 | if (imap->l_info[DT_FINI_ARRAY] != NULL |
296 | || imap->l_info[DT_FINI] != NULL) |
297 | _dl_catch_exception (NULL, call_destructors, imap); |
298 | } |
299 | |
300 | #ifdef SHARED |
301 | /* Auditing checkpoint: we remove an object. */ |
302 | if (__glibc_unlikely (do_audit)) |
303 | { |
304 | struct audit_ifaces *afct = GLRO(dl_audit); |
305 | for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt) |
306 | { |
307 | if (afct->objclose != NULL) |
308 | { |
309 | struct auditstate *state |
310 | = link_map_audit_state (imap, cnt); |
311 | /* Return value is ignored. */ |
312 | (void) afct->objclose (&state->cookie); |
313 | } |
314 | |
315 | afct = afct->next; |
316 | } |
317 | } |
318 | #endif |
319 | |
320 | /* This object must not be used anymore. */ |
321 | imap->l_removed = 1; |
322 | |
323 | /* We indeed have an object to remove. */ |
324 | unload_any = true; |
325 | |
326 | if (imap->l_global) |
327 | ++unload_global; |
328 | |
329 | /* Remember where the first dynamically loaded object is. */ |
330 | if (i < first_loaded) |
331 | first_loaded = i; |
332 | } |
333 | /* Else used[i]. */ |
334 | else if (imap->l_type == lt_loaded) |
335 | { |
336 | struct r_scope_elem *new_list = NULL; |
337 | |
338 | if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL) |
339 | { |
340 | /* The object is still used. But one of the objects we are |
341 | unloading right now is responsible for loading it. If |
342 | the current object does not have it's own scope yet we |
343 | have to create one. This has to be done before running |
344 | the finalizers. |
345 | |
346 | To do this count the number of dependencies. */ |
347 | unsigned int cnt; |
348 | for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt) |
349 | ; |
350 | |
351 | /* We simply reuse the l_initfini list. */ |
352 | imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1]; |
353 | imap->l_searchlist.r_nlist = cnt; |
354 | |
355 | new_list = &imap->l_searchlist; |
356 | } |
357 | |
358 | /* Count the number of scopes which remain after the unload. |
359 | When we add the local search list count it. Always add |
360 | one for the terminating NULL pointer. */ |
361 | size_t remain = (new_list != NULL) + 1; |
362 | bool removed_any = false; |
363 | for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt) |
364 | /* This relies on l_scope[] entries being always set either |
365 | to its own l_symbolic_searchlist address, or some map's |
366 | l_searchlist address. */ |
367 | if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist) |
368 | { |
369 | struct link_map *tmap = (struct link_map *) |
370 | ((char *) imap->l_scope[cnt] |
371 | - offsetof (struct link_map, l_searchlist)); |
372 | assert (tmap->l_ns == nsid); |
373 | if (tmap->l_idx == IDX_STILL_USED) |
374 | ++remain; |
375 | else |
376 | removed_any = true; |
377 | } |
378 | else |
379 | ++remain; |
380 | |
381 | if (removed_any) |
382 | { |
383 | /* Always allocate a new array for the scope. This is |
384 | necessary since we must be able to determine the last |
385 | user of the current array. If possible use the link map's |
386 | memory. */ |
387 | size_t new_size; |
388 | struct r_scope_elem **newp; |
389 | |
390 | #define SCOPE_ELEMS(imap) \ |
391 | (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0])) |
392 | |
393 | if (imap->l_scope != imap->l_scope_mem |
394 | && remain < SCOPE_ELEMS (imap)) |
395 | { |
396 | new_size = SCOPE_ELEMS (imap); |
397 | newp = imap->l_scope_mem; |
398 | } |
399 | else |
400 | { |
401 | new_size = imap->l_scope_max; |
402 | newp = (struct r_scope_elem **) |
403 | malloc (new_size * sizeof (struct r_scope_elem *)); |
404 | if (newp == NULL) |
405 | _dl_signal_error (ENOMEM, "dlclose" , NULL, |
406 | N_("cannot create scope list" )); |
407 | } |
408 | |
409 | /* Copy over the remaining scope elements. */ |
410 | remain = 0; |
411 | for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt) |
412 | { |
413 | if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist) |
414 | { |
415 | struct link_map *tmap = (struct link_map *) |
416 | ((char *) imap->l_scope[cnt] |
417 | - offsetof (struct link_map, l_searchlist)); |
418 | if (tmap->l_idx != IDX_STILL_USED) |
419 | { |
420 | /* Remove the scope. Or replace with own map's |
421 | scope. */ |
422 | if (new_list != NULL) |
423 | { |
424 | newp[remain++] = new_list; |
425 | new_list = NULL; |
426 | } |
427 | continue; |
428 | } |
429 | } |
430 | |
431 | newp[remain++] = imap->l_scope[cnt]; |
432 | } |
433 | newp[remain] = NULL; |
434 | |
435 | struct r_scope_elem **old = imap->l_scope; |
436 | |
437 | imap->l_scope = newp; |
438 | |
439 | /* No user anymore, we can free it now. */ |
440 | if (old != imap->l_scope_mem) |
441 | { |
442 | if (_dl_scope_free (old)) |
443 | /* If _dl_scope_free used THREAD_GSCOPE_WAIT (), |
444 | no need to repeat it. */ |
445 | scope_mem_left = false; |
446 | } |
447 | else |
448 | scope_mem_left = true; |
449 | |
450 | imap->l_scope_max = new_size; |
451 | } |
452 | else if (new_list != NULL) |
453 | { |
454 | /* We didn't change the scope array, so reset the search |
455 | list. */ |
456 | imap->l_searchlist.r_list = NULL; |
457 | imap->l_searchlist.r_nlist = 0; |
458 | } |
459 | |
460 | /* The loader is gone, so mark the object as not having one. |
461 | Note: l_idx != IDX_STILL_USED -> object will be removed. */ |
462 | if (imap->l_loader != NULL |
463 | && imap->l_loader->l_idx != IDX_STILL_USED) |
464 | imap->l_loader = NULL; |
465 | |
466 | /* Remember where the first dynamically loaded object is. */ |
467 | if (i < first_loaded) |
468 | first_loaded = i; |
469 | } |
470 | } |
471 | |
472 | /* If there are no objects to unload, do nothing further. */ |
473 | if (!unload_any) |
474 | goto out; |
475 | |
476 | #ifdef SHARED |
477 | /* Auditing checkpoint: we will start deleting objects. */ |
478 | if (__glibc_unlikely (do_audit)) |
479 | { |
480 | struct link_map *head = ns->_ns_loaded; |
481 | struct audit_ifaces *afct = GLRO(dl_audit); |
482 | /* Do not call the functions for any auditing object. */ |
483 | if (head->l_auditing == 0) |
484 | { |
485 | for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt) |
486 | { |
487 | if (afct->activity != NULL) |
488 | { |
489 | struct auditstate *state = link_map_audit_state (head, cnt); |
490 | afct->activity (&state->cookie, LA_ACT_DELETE); |
491 | } |
492 | |
493 | afct = afct->next; |
494 | } |
495 | } |
496 | } |
497 | #endif |
498 | |
499 | /* Notify the debugger we are about to remove some loaded objects. */ |
500 | struct r_debug *r = _dl_debug_initialize (0, nsid); |
501 | r->r_state = RT_DELETE; |
502 | _dl_debug_state (); |
503 | LIBC_PROBE (unmap_start, 2, nsid, r); |
504 | |
505 | if (unload_global) |
506 | { |
507 | /* Some objects are in the global scope list. Remove them. */ |
508 | struct r_scope_elem *ns_msl = ns->_ns_main_searchlist; |
509 | unsigned int i; |
510 | unsigned int j = 0; |
511 | unsigned int cnt = ns_msl->r_nlist; |
512 | |
513 | while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed) |
514 | --cnt; |
515 | |
516 | if (cnt + unload_global == ns_msl->r_nlist) |
517 | /* Speed up removing most recently added objects. */ |
518 | j = cnt; |
519 | else |
520 | for (i = 0; i < cnt; i++) |
521 | if (ns_msl->r_list[i]->l_removed == 0) |
522 | { |
523 | if (i != j) |
524 | ns_msl->r_list[j] = ns_msl->r_list[i]; |
525 | j++; |
526 | } |
527 | ns_msl->r_nlist = j; |
528 | } |
529 | |
530 | if (!RTLD_SINGLE_THREAD_P |
531 | && (unload_global |
532 | || scope_mem_left |
533 | || (GL(dl_scope_free_list) != NULL |
534 | && GL(dl_scope_free_list)->count))) |
535 | { |
536 | THREAD_GSCOPE_WAIT (); |
537 | |
538 | /* Now we can free any queued old scopes. */ |
539 | struct dl_scope_free_list *fsl = GL(dl_scope_free_list); |
540 | if (fsl != NULL) |
541 | while (fsl->count > 0) |
542 | free (fsl->list[--fsl->count]); |
543 | } |
544 | |
545 | size_t tls_free_start; |
546 | size_t tls_free_end; |
547 | tls_free_start = tls_free_end = NO_TLS_OFFSET; |
548 | |
549 | /* We modify the list of loaded objects. */ |
550 | __rtld_lock_lock_recursive (GL(dl_load_write_lock)); |
551 | |
552 | /* Check each element of the search list to see if all references to |
553 | it are gone. */ |
554 | for (unsigned int i = first_loaded; i < nloaded; ++i) |
555 | { |
556 | struct link_map *imap = maps[i]; |
557 | if (!used[i]) |
558 | { |
559 | assert (imap->l_type == lt_loaded); |
560 | |
561 | /* That was the last reference, and this was a dlopen-loaded |
562 | object. We can unmap it. */ |
563 | |
564 | /* Remove the object from the dtv slotinfo array if it uses TLS. */ |
565 | if (__glibc_unlikely (imap->l_tls_blocksize > 0)) |
566 | { |
567 | any_tls = true; |
568 | |
569 | if (GL(dl_tls_dtv_slotinfo_list) != NULL |
570 | && ! remove_slotinfo (imap->l_tls_modid, |
571 | GL(dl_tls_dtv_slotinfo_list), 0, |
572 | imap->l_init_called)) |
573 | /* All dynamically loaded modules with TLS are unloaded. */ |
574 | GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem); |
575 | |
576 | if (imap->l_tls_offset != NO_TLS_OFFSET |
577 | && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET) |
578 | { |
579 | /* Collect a contiguous chunk built from the objects in |
580 | this search list, going in either direction. When the |
581 | whole chunk is at the end of the used area then we can |
582 | reclaim it. */ |
583 | #if TLS_TCB_AT_TP |
584 | if (tls_free_start == NO_TLS_OFFSET |
585 | || (size_t) imap->l_tls_offset == tls_free_start) |
586 | { |
587 | /* Extend the contiguous chunk being reclaimed. */ |
588 | tls_free_start |
589 | = imap->l_tls_offset - imap->l_tls_blocksize; |
590 | |
591 | if (tls_free_end == NO_TLS_OFFSET) |
592 | tls_free_end = imap->l_tls_offset; |
593 | } |
594 | else if (imap->l_tls_offset - imap->l_tls_blocksize |
595 | == tls_free_end) |
596 | /* Extend the chunk backwards. */ |
597 | tls_free_end = imap->l_tls_offset; |
598 | else |
599 | { |
600 | /* This isn't contiguous with the last chunk freed. |
601 | One of them will be leaked unless we can free |
602 | one block right away. */ |
603 | if (tls_free_end == GL(dl_tls_static_used)) |
604 | { |
605 | GL(dl_tls_static_used) = tls_free_start; |
606 | tls_free_end = imap->l_tls_offset; |
607 | tls_free_start |
608 | = tls_free_end - imap->l_tls_blocksize; |
609 | } |
610 | else if ((size_t) imap->l_tls_offset |
611 | == GL(dl_tls_static_used)) |
612 | GL(dl_tls_static_used) |
613 | = imap->l_tls_offset - imap->l_tls_blocksize; |
614 | else if (tls_free_end < (size_t) imap->l_tls_offset) |
615 | { |
616 | /* We pick the later block. It has a chance to |
617 | be freed. */ |
618 | tls_free_end = imap->l_tls_offset; |
619 | tls_free_start |
620 | = tls_free_end - imap->l_tls_blocksize; |
621 | } |
622 | } |
623 | #elif TLS_DTV_AT_TP |
624 | if (tls_free_start == NO_TLS_OFFSET) |
625 | { |
626 | tls_free_start = imap->l_tls_firstbyte_offset; |
627 | tls_free_end = (imap->l_tls_offset |
628 | + imap->l_tls_blocksize); |
629 | } |
630 | else if (imap->l_tls_firstbyte_offset == tls_free_end) |
631 | /* Extend the contiguous chunk being reclaimed. */ |
632 | tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize; |
633 | else if (imap->l_tls_offset + imap->l_tls_blocksize |
634 | == tls_free_start) |
635 | /* Extend the chunk backwards. */ |
636 | tls_free_start = imap->l_tls_firstbyte_offset; |
637 | /* This isn't contiguous with the last chunk freed. |
638 | One of them will be leaked unless we can free |
639 | one block right away. */ |
640 | else if (imap->l_tls_offset + imap->l_tls_blocksize |
641 | == GL(dl_tls_static_used)) |
642 | GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset; |
643 | else if (tls_free_end == GL(dl_tls_static_used)) |
644 | { |
645 | GL(dl_tls_static_used) = tls_free_start; |
646 | tls_free_start = imap->l_tls_firstbyte_offset; |
647 | tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize; |
648 | } |
649 | else if (tls_free_end < imap->l_tls_firstbyte_offset) |
650 | { |
651 | /* We pick the later block. It has a chance to |
652 | be freed. */ |
653 | tls_free_start = imap->l_tls_firstbyte_offset; |
654 | tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize; |
655 | } |
656 | #else |
657 | # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" |
658 | #endif |
659 | } |
660 | } |
661 | |
662 | /* Reset unique symbols if forced. */ |
663 | if (force) |
664 | { |
665 | struct unique_sym_table *tab = &ns->_ns_unique_sym_table; |
666 | __rtld_lock_lock_recursive (tab->lock); |
667 | struct unique_sym *entries = tab->entries; |
668 | if (entries != NULL) |
669 | { |
670 | size_t idx, size = tab->size; |
671 | for (idx = 0; idx < size; ++idx) |
672 | { |
673 | /* Clear unique symbol entries that belong to this |
674 | object. */ |
675 | if (entries[idx].name != NULL |
676 | && entries[idx].map == imap) |
677 | { |
678 | entries[idx].name = NULL; |
679 | entries[idx].hashval = 0; |
680 | tab->n_elements--; |
681 | } |
682 | } |
683 | } |
684 | __rtld_lock_unlock_recursive (tab->lock); |
685 | } |
686 | |
687 | /* We can unmap all the maps at once. We determined the |
688 | start address and length when we loaded the object and |
689 | the `munmap' call does the rest. */ |
690 | DL_UNMAP (imap); |
691 | |
692 | /* Finally, unlink the data structure and free it. */ |
693 | #if DL_NNS == 1 |
694 | /* The assert in the (imap->l_prev == NULL) case gives |
695 | the compiler license to warn that NS points outside |
696 | the dl_ns array bounds in that case (as nsid != LM_ID_BASE |
697 | is tantamount to nsid >= DL_NNS). That should be impossible |
698 | in this configuration, so just assert about it instead. */ |
699 | assert (nsid == LM_ID_BASE); |
700 | assert (imap->l_prev != NULL); |
701 | #else |
702 | if (imap->l_prev == NULL) |
703 | { |
704 | assert (nsid != LM_ID_BASE); |
705 | ns->_ns_loaded = imap->l_next; |
706 | |
707 | /* Update the pointer to the head of the list |
708 | we leave for debuggers to examine. */ |
709 | r->r_map = (void *) ns->_ns_loaded; |
710 | } |
711 | else |
712 | #endif |
713 | imap->l_prev->l_next = imap->l_next; |
714 | |
715 | --ns->_ns_nloaded; |
716 | if (imap->l_next != NULL) |
717 | imap->l_next->l_prev = imap->l_prev; |
718 | |
719 | free (imap->l_versions); |
720 | if (imap->l_origin != (char *) -1) |
721 | free ((char *) imap->l_origin); |
722 | |
723 | free (imap->l_reldeps); |
724 | |
725 | /* Print debugging message. */ |
726 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES)) |
727 | _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n" , |
728 | imap->l_name, imap->l_ns); |
729 | |
730 | /* This name always is allocated. */ |
731 | free (imap->l_name); |
732 | /* Remove the list with all the names of the shared object. */ |
733 | |
734 | struct libname_list *lnp = imap->l_libname; |
735 | do |
736 | { |
737 | struct libname_list *this = lnp; |
738 | lnp = lnp->next; |
739 | if (!this->dont_free) |
740 | free (this); |
741 | } |
742 | while (lnp != NULL); |
743 | |
744 | /* Remove the searchlists. */ |
745 | free (imap->l_initfini); |
746 | |
747 | /* Remove the scope array if we allocated it. */ |
748 | if (imap->l_scope != imap->l_scope_mem) |
749 | free (imap->l_scope); |
750 | |
751 | if (imap->l_phdr_allocated) |
752 | free ((void *) imap->l_phdr); |
753 | |
754 | if (imap->l_rpath_dirs.dirs != (void *) -1) |
755 | free (imap->l_rpath_dirs.dirs); |
756 | if (imap->l_runpath_dirs.dirs != (void *) -1) |
757 | free (imap->l_runpath_dirs.dirs); |
758 | |
759 | /* Clear GL(dl_initfirst) when freeing its link_map memory. */ |
760 | if (imap == GL(dl_initfirst)) |
761 | GL(dl_initfirst) = NULL; |
762 | |
763 | free (imap); |
764 | } |
765 | } |
766 | |
767 | __rtld_lock_unlock_recursive (GL(dl_load_write_lock)); |
768 | |
769 | /* If we removed any object which uses TLS bump the generation counter. */ |
770 | if (any_tls) |
771 | { |
772 | if (__glibc_unlikely (++GL(dl_tls_generation) == 0)) |
773 | _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in " REPORT_BUGS_TO".\n" ); |
774 | |
775 | if (tls_free_end == GL(dl_tls_static_used)) |
776 | GL(dl_tls_static_used) = tls_free_start; |
777 | } |
778 | |
779 | #ifdef SHARED |
780 | /* Auditing checkpoint: we have deleted all objects. */ |
781 | if (__glibc_unlikely (do_audit)) |
782 | { |
783 | struct link_map *head = ns->_ns_loaded; |
784 | /* If head is NULL, the namespace has become empty, and the |
785 | audit interface does not give us a way to signal |
786 | LA_ACT_CONSISTENT for it because the first loaded module is |
787 | used to identify the namespace. |
788 | |
789 | Furthermore, do not notify auditors of the cleanup of a |
790 | failed audit module loading attempt. */ |
791 | if (head != NULL && head->l_auditing == 0) |
792 | { |
793 | struct audit_ifaces *afct = GLRO(dl_audit); |
794 | for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt) |
795 | { |
796 | if (afct->activity != NULL) |
797 | { |
798 | struct auditstate *state = link_map_audit_state (head, cnt); |
799 | afct->activity (&state->cookie, LA_ACT_CONSISTENT); |
800 | } |
801 | |
802 | afct = afct->next; |
803 | } |
804 | } |
805 | } |
806 | #endif |
807 | |
808 | if (__builtin_expect (ns->_ns_loaded == NULL, 0) |
809 | && nsid == GL(dl_nns) - 1) |
810 | do |
811 | --GL(dl_nns); |
812 | while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL); |
813 | |
814 | /* Notify the debugger those objects are finalized and gone. */ |
815 | r->r_state = RT_CONSISTENT; |
816 | _dl_debug_state (); |
817 | LIBC_PROBE (unmap_complete, 2, nsid, r); |
818 | |
819 | /* Recheck if we need to retry, release the lock. */ |
820 | out: |
821 | if (dl_close_state == rerun) |
822 | goto retry; |
823 | |
824 | dl_close_state = not_pending; |
825 | } |
826 | |
827 | |
828 | void |
829 | _dl_close (void *_map) |
830 | { |
831 | struct link_map *map = _map; |
832 | |
833 | /* We must take the lock to examine the contents of map and avoid |
834 | concurrent dlopens. */ |
835 | __rtld_lock_lock_recursive (GL(dl_load_lock)); |
836 | |
837 | /* At this point we are guaranteed nobody else is touching the list of |
838 | loaded maps, but a concurrent dlclose might have freed our map |
839 | before we took the lock. There is no way to detect this (see below) |
840 | so we proceed assuming this isn't the case. First see whether we |
841 | can remove the object at all. */ |
842 | if (__glibc_unlikely (map->l_nodelete_active)) |
843 | { |
844 | /* Nope. Do nothing. */ |
845 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
846 | return; |
847 | } |
848 | |
849 | /* At present this is an unreliable check except in the case where the |
850 | caller has recursively called dlclose and we are sure the link map |
851 | has not been freed. In a non-recursive dlclose the map itself |
852 | might have been freed and this access is potentially a data race |
853 | with whatever other use this memory might have now, or worse we |
854 | might silently corrupt memory if it looks enough like a link map. |
855 | POSIX has language in dlclose that appears to guarantee that this |
856 | should be a detectable case and given that dlclose should be threadsafe |
857 | we need this to be a reliable detection. |
858 | This is bug 20990. */ |
859 | if (__builtin_expect (map->l_direct_opencount, 1) == 0) |
860 | { |
861 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
862 | _dl_signal_error (0, map->l_name, NULL, N_("shared object not open" )); |
863 | } |
864 | |
865 | _dl_close_worker (map, false); |
866 | |
867 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
868 | } |
869 | |