1 | /* Low-level lock implementation, x86 version. |
2 | Copyright (C) 2019-2021 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <https://www.gnu.org/licenses/>. */ |
18 | |
19 | #ifndef _X86_64_LOWLEVELLOCK_H |
20 | #define _X86_64_LOWLEVELLOCK_H 1 |
21 | |
22 | #ifndef __ASSEMBLER__ |
23 | #include <sysdeps/nptl/lowlevellock.h> |
24 | #include <single-thread.h> |
25 | |
26 | /* The lll_trylock, lll_lock, and lll_unlock implements a single-thread |
27 | optimization using the cmpxchgl instruction. It checks if the process |
28 | is single thread and avoid a more expensive atomic instruction. */ |
29 | |
30 | /* The single-thread optimization only works for libc itself, we need |
31 | atomicity for libpthread in case of shared futexes. */ |
32 | #if !IS_IN(libc) |
33 | # define is_single_thread 0 |
34 | #else |
35 | # define is_single_thread SINGLE_THREAD_P |
36 | #endif |
37 | |
38 | /* In the __lllc_as we simply return the value in %eax after the cmpxchg |
39 | instruction. In case the operation succeeded this value is zero. In case |
40 | the operation failed, the cmpxchg instruction has loaded the current value |
41 | of the memory work which is guaranteed to be nonzero. */ |
42 | static inline int |
43 | __attribute__ ((always_inline)) |
44 | __lll_cas_lock (int *futex) |
45 | { |
46 | int ret; |
47 | asm volatile ("cmpxchgl %2, %1" |
48 | : "=a" (ret), "=m" (*futex) |
49 | : "r" (1), "m" (*futex), "0" (0) |
50 | : "memory" ); |
51 | return ret; |
52 | } |
53 | |
54 | #undef lll_trylock |
55 | #define lll_trylock(lock) \ |
56 | ({ \ |
57 | int __ret; \ |
58 | if (is_single_thread) \ |
59 | __ret = __lll_cas_lock (&(lock)); \ |
60 | else \ |
61 | __ret = __lll_trylock (&(lock)); \ |
62 | __ret; \ |
63 | }) |
64 | |
65 | #undef lll_lock |
66 | #define lll_lock(lock, private) \ |
67 | ((void) \ |
68 | ({ \ |
69 | if (is_single_thread) \ |
70 | __lll_cas_lock (&(lock)); \ |
71 | else \ |
72 | __lll_lock (&(lock), private); \ |
73 | })) |
74 | |
75 | #undef lll_unlock |
76 | #define lll_unlock(lock, private) \ |
77 | ((void) \ |
78 | ({ \ |
79 | if (is_single_thread) \ |
80 | (lock)--; \ |
81 | else \ |
82 | __lll_unlock (&(lock), private); \ |
83 | })) |
84 | |
85 | extern int __lll_clocklock_elision (int *futex, short *adapt_count, |
86 | clockid_t clockid, |
87 | const struct __timespec64 *timeout, |
88 | int private) attribute_hidden; |
89 | |
90 | #define lll_clocklock_elision(futex, adapt_count, clockid, timeout, private) \ |
91 | __lll_clocklock_elision (&(futex), &(adapt_count), clockid, timeout, private) |
92 | |
93 | extern int __lll_lock_elision (int *futex, short *adapt_count, int private) |
94 | attribute_hidden; |
95 | |
96 | extern int __lll_unlock_elision (int *lock, int private) |
97 | attribute_hidden; |
98 | |
99 | extern int __lll_trylock_elision (int *lock, short *adapt_count) |
100 | attribute_hidden; |
101 | |
102 | #define lll_lock_elision(futex, adapt_count, private) \ |
103 | __lll_lock_elision (&(futex), &(adapt_count), private) |
104 | #define lll_unlock_elision(futex, adapt_count, private) \ |
105 | __lll_unlock_elision (&(futex), private) |
106 | #define lll_trylock_elision(futex, adapt_count) \ |
107 | __lll_trylock_elision (&(futex), &(adapt_count)) |
108 | |
109 | #endif /* !__ASSEMBLER__ */ |
110 | |
111 | #endif /* lowlevellock.h */ |
112 | |