1 | /* |
2 | * Copyright (c) 201 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #ifndef _I386_LOCKS_I386_INLINES_H_ |
30 | #define _I386_LOCKS_I386_INLINES_H_ |
31 | |
32 | #include <kern/locks.h> |
33 | /* |
34 | * We need only enough declarations from the BSD-side to be able to |
35 | * test if our probe is active, and to call __dtrace_probe(). Setting |
36 | * NEED_DTRACE_DEFS gets a local copy of those definitions pulled in. |
37 | */ |
38 | #if CONFIG_DTRACE |
39 | #define NEED_DTRACE_DEFS |
40 | #include <../bsd/sys/lockstat.h> |
41 | #endif |
42 | |
43 | // Enforce program order of loads and stores. |
44 | #define ordered_load(target) _Generic( (target),\ |
45 | uint32_t* : __c11_atomic_load((_Atomic uint32_t* )(target), memory_order_relaxed), \ |
46 | uintptr_t*: __c11_atomic_load((_Atomic uintptr_t*)(target), memory_order_relaxed) ) |
47 | #define ordered_store_release(target, value) _Generic( (target),\ |
48 | uint32_t* : __c11_atomic_store((_Atomic uint32_t* )(target), (value), memory_order_release_smp), \ |
49 | uintptr_t*: __c11_atomic_store((_Atomic uintptr_t*)(target), (value), memory_order_release_smp) ) |
50 | #define ordered_store_volatile(target, value) _Generic( (target),\ |
51 | volatile uint32_t* : __c11_atomic_store((_Atomic volatile uint32_t* )(target), (value), memory_order_relaxed), \ |
52 | volatile uintptr_t*: __c11_atomic_store((_Atomic volatile uintptr_t*)(target), (value), memory_order_relaxed) ) |
53 | |
54 | /* Enforce program order of loads and stores. */ |
55 | #define ordered_load_mtx_state(lock) ordered_load(&(lock)->lck_mtx_state) |
56 | #define ordered_store_mtx_state_release(lock, value) ordered_store_release(&(lock)->lck_mtx_state, (value)) |
57 | #define ordered_store_mtx_owner(lock, value) ordered_store_volatile(&(lock)->lck_mtx_owner, (value)) |
58 | |
59 | #if DEVELOPMENT | DEBUG |
60 | void lck_mtx_owner_check_panic(lck_mtx_t *mutex); |
61 | #endif |
62 | |
63 | __attribute__((always_inline)) |
64 | static inline void |
65 | lck_mtx_ilk_unlock_inline( |
66 | lck_mtx_t *mutex, |
67 | uint32_t state) |
68 | { |
69 | state &= ~LCK_MTX_ILOCKED_MSK; |
70 | ordered_store_mtx_state_release(mutex, state); |
71 | |
72 | enable_preemption(); |
73 | } |
74 | |
75 | __attribute__((always_inline)) |
76 | static inline void |
77 | lck_mtx_lock_finish_inline( |
78 | lck_mtx_t *mutex, |
79 | uint32_t state, |
80 | boolean_t indirect) |
81 | { |
82 | assert(state & LCK_MTX_ILOCKED_MSK); |
83 | |
84 | /* release the interlock and re-enable preemption */ |
85 | lck_mtx_ilk_unlock_inline(mutex, state); |
86 | |
87 | #if CONFIG_DTRACE |
88 | if (indirect) { |
89 | LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, mutex, 0); |
90 | } else { |
91 | LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, mutex, 0); |
92 | } |
93 | #endif |
94 | } |
95 | |
96 | __attribute__((always_inline)) |
97 | static inline void |
98 | lck_mtx_try_lock_finish_inline( |
99 | lck_mtx_t *mutex, |
100 | uint32_t state) |
101 | { |
102 | /* release the interlock and re-enable preemption */ |
103 | lck_mtx_ilk_unlock_inline(mutex, state); |
104 | |
105 | #if CONFIG_DTRACE |
106 | LOCKSTAT_RECORD(LS_LCK_MTX_TRY_LOCK_ACQUIRE, mutex, 0); |
107 | #endif |
108 | } |
109 | |
110 | __attribute__((always_inline)) |
111 | static inline void |
112 | lck_mtx_convert_spin_finish_inline( |
113 | lck_mtx_t *mutex, |
114 | uint32_t state) |
115 | { |
116 | /* release the interlock and acquire it as mutex */ |
117 | state &= ~(LCK_MTX_ILOCKED_MSK | LCK_MTX_SPIN_MSK); |
118 | state |= LCK_MTX_MLOCKED_MSK; |
119 | |
120 | ordered_store_mtx_state_release(mutex, state); |
121 | enable_preemption(); |
122 | } |
123 | |
124 | __attribute__((always_inline)) |
125 | static inline void |
126 | lck_mtx_unlock_finish_inline( |
127 | lck_mtx_t *mutex, |
128 | boolean_t indirect) |
129 | { |
130 | enable_preemption(); |
131 | |
132 | #if CONFIG_DTRACE |
133 | if (indirect) { |
134 | LOCKSTAT_RECORD(LS_LCK_MTX_EXT_UNLOCK_RELEASE, mutex, 0); |
135 | } else { |
136 | LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, mutex, 0); |
137 | } |
138 | #endif // CONFIG_DTRACE |
139 | } |
140 | |
141 | #endif /* _I386_LOCKS_I386_INLINES_H_ */ |
142 | |
143 | |