1 | /* Hash table for TLS descriptors. |
2 | Copyright (C) 2005-2021 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | Contributed by Alexandre Oliva <aoliva@redhat.com> |
5 | |
6 | The GNU C Library is free software; you can redistribute it and/or |
7 | modify it under the terms of the GNU Lesser General Public |
8 | License as published by the Free Software Foundation; either |
9 | version 2.1 of the License, or (at your option) any later version. |
10 | |
11 | The GNU C Library is distributed in the hope that it will be useful, |
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | Lesser General Public License for more details. |
15 | |
16 | You should have received a copy of the GNU Lesser General Public |
17 | License along with the GNU C Library; if not, see |
18 | <https://www.gnu.org/licenses/>. */ |
19 | |
20 | #ifndef TLSDESCHTAB_H |
21 | # define TLSDESCHTAB_H 1 |
22 | |
23 | #include <atomic.h> |
24 | |
25 | # ifdef SHARED |
26 | |
27 | # include <inline-hashtab.h> |
28 | |
29 | inline static int |
30 | hash_tlsdesc (void *p) |
31 | { |
32 | struct tlsdesc_dynamic_arg *td = p; |
33 | |
34 | /* We know all entries are for the same module, so ti_offset is the |
35 | only distinguishing entry. */ |
36 | return td->tlsinfo.ti_offset; |
37 | } |
38 | |
39 | inline static int |
40 | eq_tlsdesc (void *p, void *q) |
41 | { |
42 | struct tlsdesc_dynamic_arg *tdp = p, *tdq = q; |
43 | |
44 | return tdp->tlsinfo.ti_offset == tdq->tlsinfo.ti_offset; |
45 | } |
46 | |
47 | inline static size_t |
48 | map_generation (struct link_map *map) |
49 | { |
50 | size_t idx = map->l_tls_modid; |
51 | struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list); |
52 | |
53 | /* Find the place in the dtv slotinfo list. */ |
54 | do |
55 | { |
56 | /* Does it fit in the array of this list element? */ |
57 | if (idx < listp->len) |
58 | { |
59 | /* We should never get here for a module in static TLS, so |
60 | we can assume that, if the generation count is zero, we |
61 | still haven't determined the generation count for this |
62 | module. */ |
63 | if (listp->slotinfo[idx].map == map && listp->slotinfo[idx].gen) |
64 | return listp->slotinfo[idx].gen; |
65 | else |
66 | break; |
67 | } |
68 | idx -= listp->len; |
69 | listp = listp->next; |
70 | } |
71 | while (listp != NULL); |
72 | |
73 | /* If we get to this point, the module still hasn't been assigned an |
74 | entry in the dtv slotinfo data structures, and it will when we're |
75 | done with relocations. At that point, the module will get a |
76 | generation number that is one past the current generation, so |
77 | return exactly that. */ |
78 | return GL(dl_tls_generation) + 1; |
79 | } |
80 | |
81 | void * |
82 | _dl_make_tlsdesc_dynamic (struct link_map *map, size_t ti_offset) |
83 | { |
84 | struct hashtab *ht; |
85 | void **entry; |
86 | struct tlsdesc_dynamic_arg *td, test; |
87 | |
88 | /* FIXME: We could use a per-map lock here, but is it worth it? */ |
89 | __rtld_lock_lock_recursive (GL(dl_load_lock)); |
90 | |
91 | ht = map->l_mach.tlsdesc_table; |
92 | if (! ht) |
93 | { |
94 | ht = htab_create (); |
95 | if (! ht) |
96 | { |
97 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
98 | return 0; |
99 | } |
100 | map->l_mach.tlsdesc_table = ht; |
101 | } |
102 | |
103 | test.tlsinfo.ti_module = map->l_tls_modid; |
104 | test.tlsinfo.ti_offset = ti_offset; |
105 | entry = htab_find_slot (ht, &test, 1, hash_tlsdesc, eq_tlsdesc); |
106 | if (! entry) |
107 | { |
108 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
109 | return 0; |
110 | } |
111 | |
112 | if (*entry) |
113 | { |
114 | td = *entry; |
115 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
116 | return td; |
117 | } |
118 | |
119 | *entry = td = malloc (sizeof (struct tlsdesc_dynamic_arg)); |
120 | /* This may be higher than the map's generation, but it doesn't |
121 | matter much. Worst case, we'll have one extra DTV update per |
122 | thread. */ |
123 | td->gen_count = map_generation (map); |
124 | td->tlsinfo = test.tlsinfo; |
125 | |
126 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
127 | return td; |
128 | } |
129 | |
130 | # endif /* SHARED */ |
131 | |
132 | /* The idea of the following two functions is to stop multiple threads |
133 | from attempting to resolve the same TLS descriptor without busy |
134 | waiting. Ideally, we should be able to release the lock right |
135 | after changing td->entry, and then using say a condition variable |
136 | or a futex wake to wake up any waiting threads, but let's try to |
137 | avoid introducing such dependencies. */ |
138 | |
139 | static int |
140 | __attribute__ ((unused)) |
141 | _dl_tlsdesc_resolve_early_return_p (struct tlsdesc volatile *td, void *caller) |
142 | { |
143 | if (caller != atomic_load_relaxed (&td->entry)) |
144 | return 1; |
145 | |
146 | __rtld_lock_lock_recursive (GL(dl_load_lock)); |
147 | if (caller != atomic_load_relaxed (&td->entry)) |
148 | { |
149 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
150 | return 1; |
151 | } |
152 | |
153 | atomic_store_relaxed (&td->entry, _dl_tlsdesc_resolve_hold); |
154 | |
155 | return 0; |
156 | } |
157 | |
158 | static void |
159 | __attribute__ ((unused)) |
160 | _dl_tlsdesc_wake_up_held_fixups (void) |
161 | { |
162 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
163 | } |
164 | |
165 | #endif |
166 | |