1 | /* |
2 | * Copyright (c) 2000-2018 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #define ATOMIC_PRIVATE 1 |
30 | #define LOCK_PRIVATE 1 |
31 | |
32 | #include <mach_ldebug.h> |
33 | |
34 | #include <kern/locks.h> |
35 | #include <kern/kalloc.h> |
36 | #include <kern/misc_protos.h> |
37 | #include <kern/thread.h> |
38 | #include <kern/processor.h> |
39 | #include <kern/cpu_data.h> |
40 | #include <kern/cpu_number.h> |
41 | #include <kern/sched_prim.h> |
42 | #include <kern/xpr.h> |
43 | #include <kern/debug.h> |
44 | #include <string.h> |
45 | |
46 | #include <i386/machine_routines.h> /* machine_timeout_suspended() */ |
47 | #include <machine/atomic.h> |
48 | #include <machine/machine_cpu.h> |
49 | #include <i386/mp.h> |
50 | #include <machine/atomic.h> |
51 | #include <sys/kdebug.h> |
52 | #include <i386/locks_i386_inlines.h> |
53 | |
54 | /* |
55 | * Fast path routines for lck_mtx locking and unlocking functions. |
56 | * Fast paths will try a single compare and swap instruction to acquire/release the lock |
57 | * and interlock, and they will fall through the slow path in case it fails. |
58 | * |
59 | * These functions were previously implemented in x86 assembly, |
60 | * and some optimizations are in place in this c code to obtain a compiled code |
61 | * as performant and compact as the assembly version. |
62 | * |
63 | * To avoid to inline these functions and increase the kernel text size all functions have |
64 | * the __attribute__((noinline)) specified. |
65 | * |
66 | * The code is structured in such a way there are no calls to functions that will return |
67 | * on the context of the caller function, i.e. all functions called are or tail call functions |
68 | * or inline functions. The number of arguments of the tail call functions are less then six, |
69 | * so that they can be passed over registers and do not need to be pushed on stack. |
70 | * This allows the compiler to not create a stack frame for the functions. |
71 | * |
72 | * The file is compiled with momit-leaf-frame-pointer and O2. |
73 | */ |
74 | |
75 | #if DEVELOPMENT || DEBUG |
76 | |
77 | /* |
78 | * If one or more simplelocks are currently held by a thread, |
79 | * an attempt to acquire a mutex will cause this check to fail |
80 | * (since a mutex lock may context switch, holding a simplelock |
81 | * is not a good thing). |
82 | */ |
83 | void __inline__ |
84 | lck_mtx_check_preemption(void) |
85 | { |
86 | if (get_preemption_level() == 0) |
87 | return; |
88 | if (LckDisablePreemptCheck) |
89 | return; |
90 | if (current_cpu_datap()->cpu_hibernate) |
91 | return; |
92 | |
93 | panic("preemption_level(%d) != 0\n" , get_preemption_level()); |
94 | } |
95 | |
96 | #else /* DEVELOPMENT || DEBUG */ |
97 | |
98 | void __inline__ |
99 | lck_mtx_check_preemption(void) |
100 | { |
101 | return; |
102 | } |
103 | |
104 | #endif /* DEVELOPMENT || DEBUG */ |
105 | |
106 | /* |
107 | * Routine: lck_mtx_lock |
108 | * |
109 | * Locks a mutex for current thread. |
110 | * It tries the fast path first and |
111 | * falls through the slow path in case |
112 | * of contention. |
113 | * |
114 | * Interlock or mutex cannot be already held by current thread. |
115 | * In case of contention it might sleep. |
116 | */ |
117 | __attribute__((noinline)) |
118 | void |
119 | lck_mtx_lock( |
120 | lck_mtx_t *lock) |
121 | { |
122 | uint32_t prev, state; |
123 | |
124 | lck_mtx_check_preemption(); |
125 | state = ordered_load_mtx_state(lock); |
126 | |
127 | /* |
128 | * Fast path only if the mutex is not held |
129 | * interlock is not contended and there are no waiters. |
130 | * Indirect mutexes will fall through the slow path as |
131 | * well as destroyed mutexes. |
132 | */ |
133 | |
134 | prev = state & ~(LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK | LCK_MTX_WAITERS_MSK); |
135 | state = prev | LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK; |
136 | |
137 | disable_preemption(); |
138 | if (!atomic_compare_exchange32(&lock->lck_mtx_state, prev, state, memory_order_acquire_smp, FALSE)) { |
139 | enable_preemption(); |
140 | return lck_mtx_lock_slow(lock); |
141 | } |
142 | |
143 | /* mutex acquired, interlock acquired and preemption disabled */ |
144 | |
145 | thread_t thread = current_thread(); |
146 | /* record owner of mutex */ |
147 | ordered_store_mtx_owner(lock, (uintptr_t)thread); |
148 | |
149 | #if MACH_LDEBUG |
150 | if (thread) { |
151 | thread->mutex_count++; /* lock statistic */ |
152 | } |
153 | #endif |
154 | |
155 | /* release interlock and re-enable preemption */ |
156 | lck_mtx_lock_finish_inline(lock, state, FALSE); |
157 | } |
158 | |
159 | /* |
160 | * Routine: lck_mtx_try_lock |
161 | * |
162 | * Try to lock a mutex for current thread. |
163 | * It tries the fast path first and |
164 | * falls through the slow path in case |
165 | * of contention. |
166 | * |
167 | * Interlock or mutex cannot be already held by current thread. |
168 | * |
169 | * In case the mutex is held (either as spin or mutex) |
170 | * the function will fail, it will acquire the mutex otherwise. |
171 | */ |
172 | __attribute__((noinline)) |
173 | boolean_t |
174 | lck_mtx_try_lock( |
175 | lck_mtx_t *lock) |
176 | { |
177 | uint32_t prev, state; |
178 | |
179 | state = ordered_load_mtx_state(lock); |
180 | |
181 | /* |
182 | * Fast path only if the mutex is not held |
183 | * interlock is not contended and there are no waiters. |
184 | * Indirect mutexes will fall through the slow path as |
185 | * well as destroyed mutexes. |
186 | */ |
187 | |
188 | prev = state & ~(LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK | LCK_MTX_WAITERS_MSK); |
189 | state = prev | LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK; |
190 | |
191 | disable_preemption(); |
192 | if (!atomic_compare_exchange32(&lock->lck_mtx_state, prev, state, memory_order_acquire_smp, FALSE)) { |
193 | enable_preemption(); |
194 | return lck_mtx_try_lock_slow(lock); |
195 | } |
196 | |
197 | /* mutex acquired, interlock acquired and preemption disabled */ |
198 | |
199 | thread_t thread = current_thread(); |
200 | /* record owner of mutex */ |
201 | ordered_store_mtx_owner(lock, (uintptr_t)thread); |
202 | |
203 | #if MACH_LDEBUG |
204 | if (thread) { |
205 | thread->mutex_count++; /* lock statistic */ |
206 | } |
207 | #endif |
208 | |
209 | /* release interlock and re-enable preemption */ |
210 | lck_mtx_try_lock_finish_inline(lock, state); |
211 | |
212 | return TRUE; |
213 | } |
214 | |
215 | /* |
216 | * Routine: lck_mtx_lock_spin_always |
217 | * |
218 | * Try to lock a mutex as spin lock for current thread. |
219 | * It tries the fast path first and |
220 | * falls through the slow path in case |
221 | * of contention. |
222 | * |
223 | * Interlock or mutex cannot be already held by current thread. |
224 | * |
225 | * In case the mutex is held as mutex by another thread |
226 | * this function will switch behavior and try to acquire the lock as mutex. |
227 | * |
228 | * In case the mutex is held as spinlock it will spin contending |
229 | * for it. |
230 | * |
231 | * In case of contention it might sleep. |
232 | */ |
233 | __attribute__((noinline)) |
234 | void |
235 | lck_mtx_lock_spin_always( |
236 | lck_mtx_t *lock) |
237 | { |
238 | uint32_t prev, state; |
239 | |
240 | state = ordered_load_mtx_state(lock); |
241 | |
242 | /* |
243 | * Fast path only if the mutex is not held |
244 | * neither as mutex nor as spin and |
245 | * interlock is not contended. |
246 | * Indirect mutexes will fall through the slow path as |
247 | * well as destroyed mutexes. |
248 | */ |
249 | |
250 | /* Note LCK_MTX_SPIN_MSK is set only if LCK_MTX_ILOCKED_MSK is set */ |
251 | prev = state & ~(LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK); |
252 | state = prev | LCK_MTX_ILOCKED_MSK | LCK_MTX_SPIN_MSK; |
253 | |
254 | disable_preemption(); |
255 | if (!atomic_compare_exchange32(&lock->lck_mtx_state, prev, state, memory_order_acquire_smp, FALSE)) { |
256 | enable_preemption(); |
257 | return lck_mtx_lock_spin_slow(lock); |
258 | } |
259 | |
260 | /* mutex acquired as spinlock, interlock acquired and preemption disabled */ |
261 | |
262 | thread_t thread = current_thread(); |
263 | /* record owner of mutex */ |
264 | ordered_store_mtx_owner(lock, (uintptr_t)thread); |
265 | |
266 | #if MACH_LDEBUG |
267 | if (thread) { |
268 | thread->mutex_count++; /* lock statistic */ |
269 | } |
270 | #endif |
271 | |
272 | #if CONFIG_DTRACE |
273 | LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN_ACQUIRE, lock, 0); |
274 | #endif |
275 | /* return with the interlock held and preemption disabled */ |
276 | return; |
277 | } |
278 | |
279 | /* |
280 | * Routine: lck_mtx_lock_spin |
281 | * |
282 | * Try to lock a mutex as spin lock for current thread. |
283 | * It tries the fast path first and |
284 | * falls through the slow path in case |
285 | * of contention. |
286 | * |
287 | * Interlock or mutex cannot be already held by current thread. |
288 | * |
289 | * In case the mutex is held as mutex by another thread |
290 | * this function will switch behavior and try to acquire the lock as mutex. |
291 | * |
292 | * In case the mutex is held as spinlock it will spin contending |
293 | * for it. |
294 | * |
295 | * In case of contention it might sleep. |
296 | */ |
297 | void |
298 | lck_mtx_lock_spin( |
299 | lck_mtx_t *lock) |
300 | { |
301 | lck_mtx_check_preemption(); |
302 | lck_mtx_lock_spin_always(lock); |
303 | } |
304 | |
305 | /* |
306 | * Routine: lck_mtx_try_lock_spin_always |
307 | * |
308 | * Try to lock a mutex as spin lock for current thread. |
309 | * It tries the fast path first and |
310 | * falls through the slow path in case |
311 | * of contention. |
312 | * |
313 | * Interlock or mutex cannot be already held by current thread. |
314 | * |
315 | * In case the mutex is held (either as spin or mutex) |
316 | * the function will fail, it will acquire the mutex as spin lock |
317 | * otherwise. |
318 | * |
319 | */ |
320 | __attribute__((noinline)) |
321 | boolean_t |
322 | lck_mtx_try_lock_spin_always( |
323 | lck_mtx_t *lock) |
324 | { |
325 | uint32_t prev, state; |
326 | |
327 | state = ordered_load_mtx_state(lock); |
328 | |
329 | /* |
330 | * Fast path only if the mutex is not held |
331 | * neither as mutex nor as spin and |
332 | * interlock is not contended. |
333 | * Indirect mutexes will fall through the slow path as |
334 | * well as destroyed mutexes. |
335 | */ |
336 | |
337 | /* Note LCK_MTX_SPIN_MSK is set only if LCK_MTX_ILOCKED_MSK is set */ |
338 | prev = state & ~(LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK); |
339 | state = prev | LCK_MTX_ILOCKED_MSK | LCK_MTX_SPIN_MSK; |
340 | |
341 | disable_preemption(); |
342 | if (!atomic_compare_exchange32(&lock->lck_mtx_state, prev, state, memory_order_acquire_smp, FALSE)) { |
343 | enable_preemption(); |
344 | return lck_mtx_try_lock_spin_slow(lock); |
345 | } |
346 | |
347 | /* mutex acquired as spinlock, interlock acquired and preemption disabled */ |
348 | |
349 | thread_t thread = current_thread(); |
350 | /* record owner of mutex */ |
351 | ordered_store_mtx_owner(lock, (uintptr_t)thread); |
352 | |
353 | #if MACH_LDEBUG |
354 | if (thread) { |
355 | thread->mutex_count++; /* lock statistic */ |
356 | } |
357 | #endif |
358 | |
359 | #if CONFIG_DTRACE |
360 | LOCKSTAT_RECORD(LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, lock, 0); |
361 | #endif |
362 | |
363 | /* return with the interlock held and preemption disabled */ |
364 | return TRUE; |
365 | } |
366 | |
367 | /* |
368 | * Routine: lck_mtx_try_lock_spin |
369 | * |
370 | * Try to lock a mutex as spin lock for current thread. |
371 | * It tries the fast path first and |
372 | * falls through the slow path in case |
373 | * of contention. |
374 | * |
375 | * Interlock or mutex cannot be already held by current thread. |
376 | * |
377 | * In case the mutex is held (either as spin or mutex) |
378 | * the function will fail, it will acquire the mutex as spin lock |
379 | * otherwise. |
380 | * |
381 | */ |
382 | boolean_t |
383 | lck_mtx_try_lock_spin( |
384 | lck_mtx_t *lock) |
385 | { |
386 | return lck_mtx_try_lock_spin_always(lock); |
387 | } |
388 | |
389 | /* |
390 | * Routine: lck_mtx_unlock |
391 | * |
392 | * Unlocks a mutex held by current thread. |
393 | * It tries the fast path first, and falls |
394 | * through the slow path in case waiters need to |
395 | * be woken up or promotions need to be dropped. |
396 | * |
397 | * Interlock can be held, and the slow path will |
398 | * unlock the mutex for this case. |
399 | */ |
400 | __attribute__((noinline)) |
401 | void |
402 | lck_mtx_unlock( |
403 | lck_mtx_t *lock) |
404 | { |
405 | uint32_t prev, state; |
406 | |
407 | state = ordered_load_mtx_state(lock); |
408 | |
409 | if (state & LCK_MTX_SPIN_MSK) |
410 | return lck_mtx_unlock_slow(lock); |
411 | |
412 | /* |
413 | * Only full mutex will go through the fast path |
414 | * (if the lock was acquired as a spinlock it will |
415 | * fall through the slow path). |
416 | * If there are waiters or promotions it will fall |
417 | * through the slow path. |
418 | * If it is indirect it will fall through the slow path. |
419 | */ |
420 | |
421 | /* |
422 | * Fast path state: |
423 | * interlock not held, no waiters, no promotion and mutex held. |
424 | */ |
425 | prev = state & ~(LCK_MTX_ILOCKED_MSK | LCK_MTX_WAITERS_MSK | LCK_MTX_PROMOTED_MSK); |
426 | prev |= LCK_MTX_MLOCKED_MSK; |
427 | |
428 | state = prev | LCK_MTX_ILOCKED_MSK; |
429 | state &= ~LCK_MTX_MLOCKED_MSK; |
430 | |
431 | disable_preemption(); |
432 | |
433 | /* the memory order needs to be acquire because it is acquiring the interlock */ |
434 | if (!atomic_compare_exchange32(&lock->lck_mtx_state, prev, state, memory_order_acquire_smp, FALSE)) { |
435 | enable_preemption(); |
436 | return lck_mtx_unlock_slow(lock); |
437 | } |
438 | |
439 | /* mutex released, interlock acquired and preemption disabled */ |
440 | |
441 | #if DEVELOPMENT | DEBUG |
442 | thread_t owner = (thread_t)lock->lck_mtx_owner; |
443 | if(__improbable(owner != current_thread())) |
444 | return lck_mtx_owner_check_panic(lock); |
445 | #endif |
446 | |
447 | /* clear owner */ |
448 | ordered_store_mtx_owner(lock, 0); |
449 | /* release interlock */ |
450 | state &= ~LCK_MTX_ILOCKED_MSK; |
451 | ordered_store_mtx_state_release(lock, state); |
452 | |
453 | #if MACH_LDEBUG |
454 | thread_t thread = current_thread(); |
455 | if (thread) |
456 | thread->mutex_count--; |
457 | #endif /* MACH_LDEBUG */ |
458 | |
459 | /* re-enable preemption */ |
460 | lck_mtx_unlock_finish_inline(lock, FALSE); |
461 | } |
462 | |
463 | |