1 | /* pthread_cond_common -- shared code for condition variable. |
2 | Copyright (C) 2016-2018 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <atomic.h> |
20 | #include <stdint.h> |
21 | #include <pthread.h> |
22 | |
23 | /* We need 3 least-significant bits on __wrefs for something else. */ |
24 | #define __PTHREAD_COND_MAX_GROUP_SIZE ((unsigned) 1 << 29) |
25 | |
26 | #if __HAVE_64B_ATOMICS == 1 |
27 | |
28 | static uint64_t __attribute__ ((unused)) |
29 | __condvar_load_wseq_relaxed (pthread_cond_t *cond) |
30 | { |
31 | return atomic_load_relaxed (&cond->__data.__wseq); |
32 | } |
33 | |
34 | static uint64_t __attribute__ ((unused)) |
35 | __condvar_fetch_add_wseq_acquire (pthread_cond_t *cond, unsigned int val) |
36 | { |
37 | return atomic_fetch_add_acquire (&cond->__data.__wseq, val); |
38 | } |
39 | |
40 | static uint64_t __attribute__ ((unused)) |
41 | __condvar_fetch_xor_wseq_release (pthread_cond_t *cond, unsigned int val) |
42 | { |
43 | return atomic_fetch_xor_release (&cond->__data.__wseq, val); |
44 | } |
45 | |
46 | static uint64_t __attribute__ ((unused)) |
47 | __condvar_load_g1_start_relaxed (pthread_cond_t *cond) |
48 | { |
49 | return atomic_load_relaxed (&cond->__data.__g1_start); |
50 | } |
51 | |
52 | static void __attribute__ ((unused)) |
53 | __condvar_add_g1_start_relaxed (pthread_cond_t *cond, unsigned int val) |
54 | { |
55 | atomic_store_relaxed (&cond->__data.__g1_start, |
56 | atomic_load_relaxed (&cond->__data.__g1_start) + val); |
57 | } |
58 | |
59 | #else |
60 | |
61 | /* We use two 64b counters: __wseq and __g1_start. They are monotonically |
62 | increasing and single-writer-multiple-readers counters, so we can implement |
63 | load, fetch-and-add, and fetch-and-xor operations even when we just have |
64 | 32b atomics. Values we add or xor are less than or equal to 1<<31 (*), |
65 | so we only have to make overflow-and-addition atomic wrt. to concurrent |
66 | load operations and xor operations. To do that, we split each counter into |
67 | two 32b values of which we reserve the MSB of each to represent an |
68 | overflow from the lower-order half to the higher-order half. |
69 | |
70 | In the common case, the state is (higher-order / lower-order half, and . is |
71 | basically concatenation of the bits): |
72 | 0.h / 0.l = h.l |
73 | |
74 | When we add a value of x that overflows (i.e., 0.l + x == 1.L), we run the |
75 | following steps S1-S4 (the values these represent are on the right-hand |
76 | side): |
77 | S1: 0.h / 1.L == (h+1).L |
78 | S2: 1.(h+1) / 1.L == (h+1).L |
79 | S3: 1.(h+1) / 0.L == (h+1).L |
80 | S4: 0.(h+1) / 0.L == (h+1).L |
81 | If the LSB of the higher-order half is set, readers will ignore the |
82 | overflow bit in the lower-order half. |
83 | |
84 | To get an atomic snapshot in load operations, we exploit that the |
85 | higher-order half is monotonically increasing; if we load a value V from |
86 | it, then read the lower-order half, and then read the higher-order half |
87 | again and see the same value V, we know that both halves have existed in |
88 | the sequence of values the full counter had. This is similar to the |
89 | validated reads in the time-based STMs in GCC's libitm (e.g., |
90 | method_ml_wt). |
91 | |
92 | The xor operation needs to be an atomic read-modify-write. The write |
93 | itself is not an issue as it affects just the lower-order half but not bits |
94 | used in the add operation. To make the full fetch-and-xor atomic, we |
95 | exploit that concurrently, the value can increase by at most 1<<31 (*): The |
96 | xor operation is only called while having acquired the lock, so not more |
97 | than __PTHREAD_COND_MAX_GROUP_SIZE waiters can enter concurrently and thus |
98 | increment __wseq. Therefore, if the xor operation observes a value of |
99 | __wseq, then the value it applies the modification to later on can be |
100 | derived (see below). |
101 | |
102 | One benefit of this scheme is that this makes load operations |
103 | obstruction-free because unlike if we would just lock the counter, readers |
104 | can almost always interpret a snapshot of each halves. Readers can be |
105 | forced to read a new snapshot when the read is concurrent with an overflow. |
106 | However, overflows will happen infrequently, so load operations are |
107 | practically lock-free. |
108 | |
109 | (*) The highest value we add is __PTHREAD_COND_MAX_GROUP_SIZE << 2 to |
110 | __g1_start (the two extra bits are for the lock in the two LSBs of |
111 | __g1_start). */ |
112 | |
113 | typedef struct |
114 | { |
115 | unsigned int low; |
116 | unsigned int high; |
117 | } _condvar_lohi; |
118 | |
119 | static uint64_t |
120 | __condvar_fetch_add_64_relaxed (_condvar_lohi *lh, unsigned int op) |
121 | { |
122 | /* S1. Note that this is an atomic read-modify-write so it extends the |
123 | release sequence of release MO store at S3. */ |
124 | unsigned int l = atomic_fetch_add_relaxed (&lh->low, op); |
125 | unsigned int h = atomic_load_relaxed (&lh->high); |
126 | uint64_t result = ((uint64_t) h << 31) | l; |
127 | l += op; |
128 | if ((l >> 31) > 0) |
129 | { |
130 | /* Overflow. Need to increment higher-order half. Note that all |
131 | add operations are ordered in happens-before. */ |
132 | h++; |
133 | /* S2. Release MO to synchronize with the loads of the higher-order half |
134 | in the load operation. See __condvar_load_64_relaxed. */ |
135 | atomic_store_release (&lh->high, h | ((unsigned int) 1 << 31)); |
136 | l ^= (unsigned int) 1 << 31; |
137 | /* S3. See __condvar_load_64_relaxed. */ |
138 | atomic_store_release (&lh->low, l); |
139 | /* S4. Likewise. */ |
140 | atomic_store_release (&lh->high, h); |
141 | } |
142 | return result; |
143 | } |
144 | |
145 | static uint64_t |
146 | __condvar_load_64_relaxed (_condvar_lohi *lh) |
147 | { |
148 | unsigned int h, l, h2; |
149 | do |
150 | { |
151 | /* This load and the second one below to the same location read from the |
152 | stores in the overflow handling of the add operation or the |
153 | initializing stores (which is a simple special case because |
154 | initialization always completely happens before further use). |
155 | Because no two stores to the higher-order half write the same value, |
156 | the loop ensures that if we continue to use the snapshot, this load |
157 | and the second one read from the same store operation. All candidate |
158 | store operations have release MO. |
159 | If we read from S2 in the first load, then we will see the value of |
160 | S1 on the next load (because we synchronize with S2), or a value |
161 | later in modification order. We correctly ignore the lower-half's |
162 | overflow bit in this case. If we read from S4, then we will see the |
163 | value of S3 in the next load (or a later value), which does not have |
164 | the overflow bit set anymore. |
165 | */ |
166 | h = atomic_load_acquire (&lh->high); |
167 | /* This will read from the release sequence of S3 (i.e, either the S3 |
168 | store or the read-modify-writes at S1 following S3 in modification |
169 | order). Thus, the read synchronizes with S3, and the following load |
170 | of the higher-order half will read from the matching S2 (or a later |
171 | value). |
172 | Thus, if we read a lower-half value here that already overflowed and |
173 | belongs to an increased higher-order half value, we will see the |
174 | latter and h and h2 will not be equal. */ |
175 | l = atomic_load_acquire (&lh->low); |
176 | /* See above. */ |
177 | h2 = atomic_load_relaxed (&lh->high); |
178 | } |
179 | while (h != h2); |
180 | if (((l >> 31) > 0) && ((h >> 31) > 0)) |
181 | l ^= (unsigned int) 1 << 31; |
182 | return ((uint64_t) (h & ~((unsigned int) 1 << 31)) << 31) + l; |
183 | } |
184 | |
185 | static uint64_t __attribute__ ((unused)) |
186 | __condvar_load_wseq_relaxed (pthread_cond_t *cond) |
187 | { |
188 | return __condvar_load_64_relaxed ((_condvar_lohi *) &cond->__data.__wseq32); |
189 | } |
190 | |
191 | static uint64_t __attribute__ ((unused)) |
192 | __condvar_fetch_add_wseq_acquire (pthread_cond_t *cond, unsigned int val) |
193 | { |
194 | uint64_t r = __condvar_fetch_add_64_relaxed |
195 | ((_condvar_lohi *) &cond->__data.__wseq32, val); |
196 | atomic_thread_fence_acquire (); |
197 | return r; |
198 | } |
199 | |
200 | static uint64_t __attribute__ ((unused)) |
201 | __condvar_fetch_xor_wseq_release (pthread_cond_t *cond, unsigned int val) |
202 | { |
203 | _condvar_lohi *lh = (_condvar_lohi *) &cond->__data.__wseq32; |
204 | /* First, get the current value. See __condvar_load_64_relaxed. */ |
205 | unsigned int h, l, h2; |
206 | do |
207 | { |
208 | h = atomic_load_acquire (&lh->high); |
209 | l = atomic_load_acquire (&lh->low); |
210 | h2 = atomic_load_relaxed (&lh->high); |
211 | } |
212 | while (h != h2); |
213 | if (((l >> 31) > 0) && ((h >> 31) == 0)) |
214 | h++; |
215 | h &= ~((unsigned int) 1 << 31); |
216 | l &= ~((unsigned int) 1 << 31); |
217 | |
218 | /* Now modify. Due to the coherence rules, the prior load will read a value |
219 | earlier in modification order than the following fetch-xor. |
220 | This uses release MO to make the full operation have release semantics |
221 | (all other operations access the lower-order half). */ |
222 | unsigned int l2 = atomic_fetch_xor_release (&lh->low, val) |
223 | & ~((unsigned int) 1 << 31); |
224 | if (l2 < l) |
225 | /* The lower-order half overflowed in the meantime. This happened exactly |
226 | once due to the limit on concurrent waiters (see above). */ |
227 | h++; |
228 | return ((uint64_t) h << 31) + l2; |
229 | } |
230 | |
231 | static uint64_t __attribute__ ((unused)) |
232 | __condvar_load_g1_start_relaxed (pthread_cond_t *cond) |
233 | { |
234 | return __condvar_load_64_relaxed |
235 | ((_condvar_lohi *) &cond->__data.__g1_start32); |
236 | } |
237 | |
238 | static void __attribute__ ((unused)) |
239 | __condvar_add_g1_start_relaxed (pthread_cond_t *cond, unsigned int val) |
240 | { |
241 | ignore_value (__condvar_fetch_add_64_relaxed |
242 | ((_condvar_lohi *) &cond->__data.__g1_start32, val)); |
243 | } |
244 | |
245 | #endif /* !__HAVE_64B_ATOMICS */ |
246 | |
247 | |
248 | /* The lock that signalers use. See pthread_cond_wait_common for uses. |
249 | The lock is our normal three-state lock: not acquired (0) / acquired (1) / |
250 | acquired-with-futex_wake-request (2). However, we need to preserve the |
251 | other bits in the unsigned int used for the lock, and therefore it is a |
252 | little more complex. */ |
253 | static void __attribute__ ((unused)) |
254 | __condvar_acquire_lock (pthread_cond_t *cond, int private) |
255 | { |
256 | unsigned int s = atomic_load_relaxed (&cond->__data.__g1_orig_size); |
257 | while ((s & 3) == 0) |
258 | { |
259 | if (atomic_compare_exchange_weak_acquire (&cond->__data.__g1_orig_size, |
260 | &s, s | 1)) |
261 | return; |
262 | /* TODO Spinning and back-off. */ |
263 | } |
264 | /* We can't change from not acquired to acquired, so try to change to |
265 | acquired-with-futex-wake-request and do a futex wait if we cannot change |
266 | from not acquired. */ |
267 | while (1) |
268 | { |
269 | while ((s & 3) != 2) |
270 | { |
271 | if (atomic_compare_exchange_weak_acquire |
272 | (&cond->__data.__g1_orig_size, &s, (s & ~(unsigned int) 3) | 2)) |
273 | { |
274 | if ((s & 3) == 0) |
275 | return; |
276 | break; |
277 | } |
278 | /* TODO Back off. */ |
279 | } |
280 | futex_wait_simple (&cond->__data.__g1_orig_size, |
281 | (s & ~(unsigned int) 3) | 2, private); |
282 | /* Reload so we see a recent value. */ |
283 | s = atomic_load_relaxed (&cond->__data.__g1_orig_size); |
284 | } |
285 | } |
286 | |
287 | /* See __condvar_acquire_lock. */ |
288 | static void __attribute__ ((unused)) |
289 | __condvar_release_lock (pthread_cond_t *cond, int private) |
290 | { |
291 | if ((atomic_fetch_and_release (&cond->__data.__g1_orig_size, |
292 | ~(unsigned int) 3) & 3) |
293 | == 2) |
294 | futex_wake (&cond->__data.__g1_orig_size, 1, private); |
295 | } |
296 | |
297 | /* Only use this when having acquired the lock. */ |
298 | static unsigned int __attribute__ ((unused)) |
299 | __condvar_get_orig_size (pthread_cond_t *cond) |
300 | { |
301 | return atomic_load_relaxed (&cond->__data.__g1_orig_size) >> 2; |
302 | } |
303 | |
304 | /* Only use this when having acquired the lock. */ |
305 | static void __attribute__ ((unused)) |
306 | __condvar_set_orig_size (pthread_cond_t *cond, unsigned int size) |
307 | { |
308 | /* We have acquired the lock, but might get one concurrent update due to a |
309 | lock state change from acquired to acquired-with-futex_wake-request. |
310 | The store with relaxed MO is fine because there will be no further |
311 | changes to the lock bits nor the size, and we will subsequently release |
312 | the lock with release MO. */ |
313 | unsigned int s; |
314 | s = (atomic_load_relaxed (&cond->__data.__g1_orig_size) & 3) |
315 | | (size << 2); |
316 | if ((atomic_exchange_relaxed (&cond->__data.__g1_orig_size, s) & 3) |
317 | != (s & 3)) |
318 | atomic_store_relaxed (&cond->__data.__g1_orig_size, (size << 2) | 2); |
319 | } |
320 | |
321 | /* Returns FUTEX_SHARED or FUTEX_PRIVATE based on the provided __wrefs |
322 | value. */ |
323 | static int __attribute__ ((unused)) |
324 | __condvar_get_private (int flags) |
325 | { |
326 | if ((flags & __PTHREAD_COND_SHARED_MASK) == 0) |
327 | return FUTEX_PRIVATE; |
328 | else |
329 | return FUTEX_SHARED; |
330 | } |
331 | |
332 | /* This closes G1 (whose index is in G1INDEX), waits for all futex waiters to |
333 | leave G1, converts G1 into a fresh G2, and then switches group roles so that |
334 | the former G2 becomes the new G1 ending at the current __wseq value when we |
335 | eventually make the switch (WSEQ is just an observation of __wseq by the |
336 | signaler). |
337 | If G2 is empty, it will not switch groups because then it would create an |
338 | empty G1 which would require switching groups again on the next signal. |
339 | Returns false iff groups were not switched because G2 was empty. */ |
340 | static bool __attribute__ ((unused)) |
341 | __condvar_quiesce_and_switch_g1 (pthread_cond_t *cond, uint64_t wseq, |
342 | unsigned int *g1index, int private) |
343 | { |
344 | const unsigned int maxspin = 0; |
345 | unsigned int g1 = *g1index; |
346 | |
347 | /* If there is no waiter in G2, we don't do anything. The expression may |
348 | look odd but remember that __g_size might hold a negative value, so |
349 | putting the expression this way avoids relying on implementation-defined |
350 | behavior. |
351 | Note that this works correctly for a zero-initialized condvar too. */ |
352 | unsigned int old_orig_size = __condvar_get_orig_size (cond); |
353 | uint64_t old_g1_start = __condvar_load_g1_start_relaxed (cond) >> 1; |
354 | if (((unsigned) (wseq - old_g1_start - old_orig_size) |
355 | + cond->__data.__g_size[g1 ^ 1]) == 0) |
356 | return false; |
357 | |
358 | /* Now try to close and quiesce G1. We have to consider the following kinds |
359 | of waiters: |
360 | * Waiters from less recent groups than G1 are not affected because |
361 | nothing will change for them apart from __g1_start getting larger. |
362 | * New waiters arriving concurrently with the group switching will all go |
363 | into G2 until we atomically make the switch. Waiters existing in G2 |
364 | are not affected. |
365 | * Waiters in G1 will be closed out immediately by setting a flag in |
366 | __g_signals, which will prevent waiters from blocking using a futex on |
367 | __g_signals and also notifies them that the group is closed. As a |
368 | result, they will eventually remove their group reference, allowing us |
369 | to close switch group roles. */ |
370 | |
371 | /* First, set the closed flag on __g_signals. This tells waiters that are |
372 | about to wait that they shouldn't do that anymore. This basically |
373 | serves as an advance notificaton of the upcoming change to __g1_start; |
374 | waiters interpret it as if __g1_start was larger than their waiter |
375 | sequence position. This allows us to change __g1_start after waiting |
376 | for all existing waiters with group references to leave, which in turn |
377 | makes recovery after stealing a signal simpler because it then can be |
378 | skipped if __g1_start indicates that the group is closed (otherwise, |
379 | we would have to recover always because waiters don't know how big their |
380 | groups are). Relaxed MO is fine. */ |
381 | atomic_fetch_or_relaxed (cond->__data.__g_signals + g1, 1); |
382 | |
383 | /* Wait until there are no group references anymore. The fetch-or operation |
384 | injects us into the modification order of __g_refs; release MO ensures |
385 | that waiters incrementing __g_refs after our fetch-or see the previous |
386 | changes to __g_signals and to __g1_start that had to happen before we can |
387 | switch this G1 and alias with an older group (we have two groups, so |
388 | aliasing requires switching group roles twice). Note that nobody else |
389 | can have set the wake-request flag, so we do not have to act upon it. |
390 | |
391 | Also note that it is harmless if older waiters or waiters from this G1 |
392 | get a group reference after we have quiesced the group because it will |
393 | remain closed for them either because of the closed flag in __g_signals |
394 | or the later update to __g1_start. New waiters will never arrive here |
395 | but instead continue to go into the still current G2. */ |
396 | unsigned r = atomic_fetch_or_release (cond->__data.__g_refs + g1, 0); |
397 | while ((r >> 1) > 0) |
398 | { |
399 | for (unsigned int spin = maxspin; ((r >> 1) > 0) && (spin > 0); spin--) |
400 | { |
401 | /* TODO Back off. */ |
402 | r = atomic_load_relaxed (cond->__data.__g_refs + g1); |
403 | } |
404 | if ((r >> 1) > 0) |
405 | { |
406 | /* There is still a waiter after spinning. Set the wake-request |
407 | flag and block. Relaxed MO is fine because this is just about |
408 | this futex word. */ |
409 | r = atomic_fetch_or_relaxed (cond->__data.__g_refs + g1, 1); |
410 | |
411 | if ((r >> 1) > 0) |
412 | futex_wait_simple (cond->__data.__g_refs + g1, r, private); |
413 | /* Reload here so we eventually see the most recent value even if we |
414 | do not spin. */ |
415 | r = atomic_load_relaxed (cond->__data.__g_refs + g1); |
416 | } |
417 | } |
418 | /* Acquire MO so that we synchronize with the release operation that waiters |
419 | use to decrement __g_refs and thus happen after the waiters we waited |
420 | for. */ |
421 | atomic_thread_fence_acquire (); |
422 | |
423 | /* Update __g1_start, which finishes closing this group. The value we add |
424 | will never be negative because old_orig_size can only be zero when we |
425 | switch groups the first time after a condvar was initialized, in which |
426 | case G1 will be at index 1 and we will add a value of 1. See above for |
427 | why this takes place after waiting for quiescence of the group. |
428 | Relaxed MO is fine because the change comes with no additional |
429 | constraints that others would have to observe. */ |
430 | __condvar_add_g1_start_relaxed (cond, |
431 | (old_orig_size << 1) + (g1 == 1 ? 1 : - 1)); |
432 | |
433 | /* Now reopen the group, thus enabling waiters to again block using the |
434 | futex controlled by __g_signals. Release MO so that observers that see |
435 | no signals (and thus can block) also see the write __g1_start and thus |
436 | that this is now a new group (see __pthread_cond_wait_common for the |
437 | matching acquire MO loads). */ |
438 | atomic_store_release (cond->__data.__g_signals + g1, 0); |
439 | |
440 | /* At this point, the old G1 is now a valid new G2 (but not in use yet). |
441 | No old waiter can neither grab a signal nor acquire a reference without |
442 | noticing that __g1_start is larger. |
443 | We can now publish the group switch by flipping the G2 index in __wseq. |
444 | Release MO so that this synchronizes with the acquire MO operation |
445 | waiters use to obtain a position in the waiter sequence. */ |
446 | wseq = __condvar_fetch_xor_wseq_release (cond, 1) >> 1; |
447 | g1 ^= 1; |
448 | *g1index ^= 1; |
449 | |
450 | /* These values are just observed by signalers, and thus protected by the |
451 | lock. */ |
452 | unsigned int orig_size = wseq - (old_g1_start + old_orig_size); |
453 | __condvar_set_orig_size (cond, orig_size); |
454 | /* Use and addition to not loose track of cancellations in what was |
455 | previously G2. */ |
456 | cond->__data.__g_size[g1] += orig_size; |
457 | |
458 | /* The new G1's size may be zero because of cancellations during its time |
459 | as G2. If this happens, there are no waiters that have to receive a |
460 | signal, so we do not need to add any and return false. */ |
461 | if (cond->__data.__g_size[g1] == 0) |
462 | return false; |
463 | |
464 | return true; |
465 | } |
466 | |