1 | /* elision-lock.c: Elided pthread mutex lock. |
2 | Copyright (C) 2011-2021 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <https://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <pthread.h> |
20 | #include "pthreadP.h" |
21 | #include "lowlevellock.h" |
22 | #include "hle.h" |
23 | #include <elision-conf.h> |
24 | |
25 | #if !defined(LLL_LOCK) && !defined(EXTRAARG) |
26 | /* Make sure the configuration code is always linked in for static |
27 | libraries. */ |
28 | #include "elision-conf.c" |
29 | #endif |
30 | |
31 | #ifndef EXTRAARG |
32 | #define |
33 | #endif |
34 | #ifndef LLL_LOCK |
35 | #define LLL_LOCK(a,b) lll_lock(a,b), 0 |
36 | #endif |
37 | |
38 | #define aconf __elision_aconf |
39 | |
40 | /* Adaptive lock using transactions. |
41 | By default the lock region is run as a transaction, and when it |
42 | aborts or the lock is busy the lock adapts itself. */ |
43 | |
44 | int |
45 | __lll_lock_elision (int *futex, short *adapt_count, EXTRAARG int private) |
46 | { |
47 | /* adapt_count can be accessed concurrently; these accesses can be both |
48 | inside of transactions (if critical sections are nested and the outer |
49 | critical section uses lock elision) and outside of transactions. Thus, |
50 | we need to use atomic accesses to avoid data races. However, the |
51 | value of adapt_count is just a hint, so relaxed MO accesses are |
52 | sufficient. */ |
53 | if (atomic_load_relaxed (adapt_count) <= 0) |
54 | { |
55 | unsigned status; |
56 | int try_xbegin; |
57 | |
58 | for (try_xbegin = aconf.retry_try_xbegin; |
59 | try_xbegin > 0; |
60 | try_xbegin--) |
61 | { |
62 | if ((status = _xbegin()) == _XBEGIN_STARTED) |
63 | { |
64 | if (*futex == 0) |
65 | return 0; |
66 | |
67 | /* Lock was busy. Fall back to normal locking. |
68 | Could also _xend here but xabort with 0xff code |
69 | is more visible in the profiler. */ |
70 | _xabort (_ABORT_LOCK_BUSY); |
71 | } |
72 | |
73 | if (!(status & _XABORT_RETRY)) |
74 | { |
75 | if ((status & _XABORT_EXPLICIT) |
76 | && _XABORT_CODE (status) == _ABORT_LOCK_BUSY) |
77 | { |
78 | /* Right now we skip here. Better would be to wait a bit |
79 | and retry. This likely needs some spinning. See |
80 | above for why relaxed MO is sufficient. */ |
81 | if (atomic_load_relaxed (adapt_count) |
82 | != aconf.skip_lock_busy) |
83 | atomic_store_relaxed (adapt_count, aconf.skip_lock_busy); |
84 | } |
85 | /* Internal abort. There is no chance for retry. |
86 | Use the normal locking and next time use lock. |
87 | Be careful to avoid writing to the lock. See above for why |
88 | relaxed MO is sufficient. */ |
89 | else if (atomic_load_relaxed (adapt_count) |
90 | != aconf.skip_lock_internal_abort) |
91 | atomic_store_relaxed (adapt_count, |
92 | aconf.skip_lock_internal_abort); |
93 | break; |
94 | } |
95 | } |
96 | } |
97 | else |
98 | { |
99 | /* Use a normal lock until the threshold counter runs out. |
100 | Lost updates possible. */ |
101 | atomic_store_relaxed (adapt_count, |
102 | atomic_load_relaxed (adapt_count) - 1); |
103 | } |
104 | |
105 | /* Use a normal lock here. */ |
106 | return LLL_LOCK ((*futex), private); |
107 | } |
108 | |