1/*
2 * Copyright (c) 2004-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#ifndef _I386_LOCKS_H_
30#define _I386_LOCKS_H_
31
32#include <sys/appleapiopts.h>
33#include <kern/kern_types.h>
34#include <kern/assert.h>
35
36#ifdef MACH_KERNEL_PRIVATE
37
38#include <i386/hw_lock_types.h>
39
40extern unsigned int LcksOpts;
41#if DEVELOPMENT || DEBUG
42extern unsigned int LckDisablePreemptCheck;
43#endif
44
45#define enaLkDeb 0x00000001 /* Request debug in default attribute */
46#define enaLkStat 0x00000002 /* Request statistic in default attribute */
47#define disLkRWPrio 0x00000004 /* Disable RW lock priority promotion */
48
49#endif /* MACH_KERNEL_PRIVATE */
50
51#if defined(MACH_KERNEL_PRIVATE)
52typedef struct {
53 volatile uintptr_t interlock;
54#if MACH_LDEBUG
55 unsigned long lck_spin_pad[9]; /* XXX - usimple_lock_data_t */
56#endif
57} lck_spin_t;
58
59#define LCK_SPIN_TAG_DESTROYED 0x00002007 /* lock marked as Destroyed */
60
61#else /* MACH_KERNEL_PRIVATE */
62#ifdef KERNEL_PRIVATE
63typedef struct {
64 unsigned long opaque[10];
65} lck_spin_t;
66#else /* KERNEL_PRIVATE */
67typedef struct __lck_spin_t__ lck_spin_t;
68#endif
69#endif
70
71#ifdef MACH_KERNEL_PRIVATE
72/* The definition of this structure, including the layout of the
73 * state bitfield, is tailored to the asm implementation in i386_lock.s
74 */
75typedef struct _lck_mtx_ {
76 union {
77 struct {
78 volatile uintptr_t lck_mtx_owner;
79 union {
80 struct {
81 volatile uint32_t
82 lck_mtx_waiters:16,
83 lck_mtx_pri:8,
84 lck_mtx_ilocked:1,
85 lck_mtx_mlocked:1,
86 lck_mtx_promoted:1,
87 lck_mtx_spin:1,
88 lck_mtx_is_ext:1,
89 lck_mtx_pad3:3;
90 };
91 uint32_t lck_mtx_state;
92 };
93 /* Pad field used as a canary, initialized to ~0 */
94 uint32_t lck_mtx_pad32;
95 };
96 struct {
97 struct _lck_mtx_ext_ *lck_mtx_ptr;
98 uint32_t lck_mtx_tag;
99 uint32_t lck_mtx_pad32_2;
100 };
101 };
102} lck_mtx_t;
103
104#define LCK_MTX_WAITERS_MSK 0x0000ffff
105#define LCK_MTX_WAITER 0x00000001
106#define LCK_MTX_PRIORITY_MSK 0x00ff0000
107#define LCK_MTX_ILOCKED_MSK 0x01000000
108#define LCK_MTX_MLOCKED_MSK 0x02000000
109#define LCK_MTX_PROMOTED_MSK 0x04000000
110#define LCK_MTX_SPIN_MSK 0x08000000
111
112/* This pattern must subsume the interlocked, mlocked and spin bits */
113#define LCK_MTX_TAG_INDIRECT 0x07ff1007 /* lock marked as Indirect */
114#define LCK_MTX_TAG_DESTROYED 0x07fe2007 /* lock marked as Destroyed */
115
116/* Adaptive spin before blocking */
117extern uint64_t MutexSpin;
118
119typedef enum lck_mtx_spinwait_ret_type {
120 LCK_MTX_SPINWAIT_ACQUIRED = 0,
121 LCK_MTX_SPINWAIT_SPUN = 1,
122 LCK_MTX_SPINWAIT_NO_SPIN = 2,
123} lck_mtx_spinwait_ret_type_t;
124
125extern lck_mtx_spinwait_ret_type_t lck_mtx_lock_spinwait_x86(lck_mtx_t *mutex);
126extern void lck_mtx_lock_wait_x86(lck_mtx_t *mutex);
127extern void lck_mtx_lock_acquire_x86(lck_mtx_t *mutex);
128
129extern void lck_mtx_lock_slow(lck_mtx_t *lock);
130extern boolean_t lck_mtx_try_lock_slow(lck_mtx_t *lock);
131extern void lck_mtx_unlock_slow(lck_mtx_t *lock);
132extern void lck_mtx_lock_spin_slow(lck_mtx_t *lock);
133extern boolean_t lck_mtx_try_lock_spin_slow(lck_mtx_t *lock);
134extern void hw_lock_byte_init(volatile uint8_t *lock_byte);
135extern void hw_lock_byte_lock(volatile uint8_t *lock_byte);
136extern void hw_lock_byte_unlock(volatile uint8_t *lock_byte);
137
138typedef struct {
139 unsigned int type;
140 unsigned int pad4;
141 vm_offset_t pc;
142 vm_offset_t thread;
143} lck_mtx_deb_t;
144
145#define MUTEX_TAG 0x4d4d
146
147typedef struct {
148 unsigned int lck_mtx_stat_data;
149} lck_mtx_stat_t;
150
151typedef struct _lck_mtx_ext_ {
152 lck_mtx_t lck_mtx;
153 struct _lck_grp_ *lck_mtx_grp;
154 unsigned int lck_mtx_attr;
155 unsigned int lck_mtx_pad1;
156 lck_mtx_deb_t lck_mtx_deb;
157 uint64_t lck_mtx_stat;
158 unsigned int lck_mtx_pad2[2];
159} lck_mtx_ext_t;
160
161#define LCK_MTX_ATTR_DEBUG 0x1
162#define LCK_MTX_ATTR_DEBUGb 0
163#define LCK_MTX_ATTR_STAT 0x2
164#define LCK_MTX_ATTR_STATb 1
165
166#define LCK_MTX_EVENT(lck) ((event_t)(((unsigned int*)(lck))+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int)))
167#define LCK_EVENT_TO_MUTEX(event) ((lck_mtx_t *)(uintptr_t)(((unsigned int *)(event)) - ((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))))
168
169#else /* MACH_KERNEL_PRIVATE */
170#ifdef XNU_KERNEL_PRIVATE
171typedef struct {
172 unsigned long opaque[2];
173} lck_mtx_t;
174
175typedef struct {
176 unsigned long opaque[10];
177} lck_mtx_ext_t;
178#else
179#ifdef KERNEL_PRIVATE
180typedef struct {
181 unsigned long opaque[2];
182} lck_mtx_t;
183
184typedef struct {
185 unsigned long opaque[10];
186} lck_mtx_ext_t;
187
188#else
189typedef struct __lck_mtx_t__ lck_mtx_t;
190typedef struct __lck_mtx_ext_t__ lck_mtx_ext_t;
191#endif
192#endif
193#endif
194
195#ifdef MACH_KERNEL_PRIVATE
196typedef union _lck_rw_t_internal_ {
197 struct {
198 volatile uint16_t lck_rw_shared_count; /* No. of accepted readers */
199 volatile uint8_t lck_rw_interlock; /* Interlock byte */
200 volatile uint8_t
201 lck_rw_priv_excl:1, /* Writers prioritized if set */
202 lck_rw_want_upgrade:1, /* Read-to-write upgrade waiting */
203 lck_rw_want_write:1, /* Writer waiting or locked for write */
204 lck_r_waiting:1, /* Reader is sleeping on lock */
205 lck_w_waiting:1, /* Writer is sleeping on lock */
206 lck_rw_can_sleep:1, /* Can attempts to lock go to sleep? */
207 lck_rw_padb6:2; /* padding */
208 uint32_t lck_rw_tag; /* This can be obsoleted when stats are in */
209 thread_t lck_rw_owner; /* Unused */
210 };
211 struct {
212 uint32_t data; /* Single word for count, ilk, and bitfields */
213 uint32_t lck_rw_pad4;
214 uint32_t lck_rw_pad8;
215 uint32_t lck_rw_pad12;
216 };
217} lck_rw_t;
218#define LCK_RW_T_SIZE 16
219
220static_assert(sizeof(lck_rw_t) == LCK_RW_T_SIZE);
221
222#define LCK_RW_SHARED_SHIFT 0
223#define LCK_RW_INTERLOCK_BIT 16
224#define LCK_RW_PRIV_EXCL_BIT 24
225#define LCK_RW_WANT_UPGRADE_BIT 25
226#define LCK_RW_WANT_EXCL_BIT 26
227#define LCK_RW_R_WAITING_BIT 27
228#define LCK_RW_W_WAITING_BIT 28
229#define LCK_RW_CAN_SLEEP_BIT 29
230
231#define LCK_RW_INTERLOCK (1 << LCK_RW_INTERLOCK_BIT)
232#define LCK_RW_WANT_UPGRADE (1 << LCK_RW_WANT_UPGRADE_BIT)
233#define LCK_RW_WANT_EXCL (1 << LCK_RW_WANT_EXCL_BIT)
234#define LCK_RW_R_WAITING (1 << LCK_RW_R_WAITING_BIT)
235#define LCK_RW_W_WAITING (1 << LCK_RW_W_WAITING_BIT)
236#define LCK_RW_PRIV_EXCL (1 << LCK_RW_PRIV_EXCL_BIT)
237#define LCK_RW_TAG_VALID (1 << LCK_RW_TAG_VALID_BIT)
238#define LCK_RW_SHARED_MASK (0xffff << LCK_RW_SHARED_SHIFT)
239#define LCK_RW_SHARED_READER (1 << LCK_RW_SHARED_SHIFT)
240
241#define LCK_RW_WANT_WRITE LCK_RW_WANT_EXCL
242
243
244#define LCK_RW_ATTR_DEBUG 0x1
245#define LCK_RW_ATTR_DEBUGb 0
246#define LCK_RW_ATTR_STAT 0x2
247#define LCK_RW_ATTR_STATb 1
248#define LCK_RW_ATTR_READ_PRI 0x3
249#define LCK_RW_ATTR_READ_PRIb 2
250#define LCK_RW_ATTR_DIS_THREAD 0x40000000
251#define LCK_RW_ATTR_DIS_THREADb 30
252#define LCK_RW_ATTR_DIS_MYLOCK 0x10000000
253#define LCK_RW_ATTR_DIS_MYLOCKb 28
254
255#define LCK_RW_TAG_DESTROYED 0x00002007 /* lock marked as Destroyed */
256
257#define RW_LOCK_READER_EVENT(x) ((event_t) (((unsigned char*) (x)) + (offsetof(lck_rw_t, lck_rw_tag))))
258#define RW_LOCK_WRITER_EVENT(x) ((event_t) (((unsigned char*) (x)) + (offsetof(lck_rw_t, lck_rw_pad8))))
259#define READ_EVENT_TO_RWLOCK(x) ((lck_rw_t *)(((unsigned char*)(x) - (offsetof(lck_rw_t, lck_rw_tag)))))
260#define WRITE_EVENT_TO_RWLOCK(x) ((lck_rw_t *)(((unsigned char*)(x) - (offsetof(lck_rw_t, lck_rw_pad8)))))
261
262#if LOCK_PRIVATE
263
264#define disable_preemption_for_thread(t) ((cpu_data_t GS_RELATIVE *)0UL)->cpu_preemption_level++
265#define preemption_disabled_for_thread(t) (((cpu_data_t GS_RELATIVE *)0UL)->cpu_preemption_level > 0)
266
267#define LCK_MTX_THREAD_TO_STATE(t) ((uintptr_t)t)
268#define PLATFORM_LCK_ILOCK 0
269
270#define LOCK_SNOOP_SPINS 1000
271#define LOCK_PRETEST 1
272
273/* Spinlock panic deadline, in mach_absolute_time units (ns on i386) */
274#define LOCK_PANIC_TIMEOUT 0xf00000 /* 250 ms (huge) */
275
276#endif // LOCK_PRIVATE
277
278#else
279#ifdef KERNEL_PRIVATE
280#pragma pack(1)
281typedef struct {
282 uint32_t opaque[3];
283 uint32_t opaque4;
284} lck_rw_t;
285#pragma pack()
286#else
287typedef struct __lck_rw_t__ lck_rw_t;
288#endif
289#endif
290
291#ifdef MACH_KERNEL_PRIVATE
292
293extern void kernel_preempt_check (void);
294
295#endif /* MACH_KERNEL_PRIVATE */
296#endif /* _I386_LOCKS_H_ */
297