| 1 | /* |
| 2 | * Copyright (c) 2000-2012 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | /* |
| 29 | * @OSF_COPYRIGHT@ |
| 30 | */ |
| 31 | /* CMU_ENDHIST */ |
| 32 | /* |
| 33 | * Mach Operating System |
| 34 | * Copyright (c) 1991,1990 Carnegie Mellon University |
| 35 | * All Rights Reserved. |
| 36 | * |
| 37 | * Permission to use, copy, modify and distribute this software and its |
| 38 | * documentation is hereby granted, provided that both the copyright |
| 39 | * notice and this permission notice appear in all copies of the |
| 40 | * software, derivative works or modified versions, and any portions |
| 41 | * thereof, and that both notices appear in supporting documentation. |
| 42 | * |
| 43 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
| 44 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR |
| 45 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
| 46 | * |
| 47 | * Carnegie Mellon requests users of this software to return to |
| 48 | * |
| 49 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
| 50 | * School of Computer Science |
| 51 | * Carnegie Mellon University |
| 52 | * Pittsburgh PA 15213-3890 |
| 53 | * |
| 54 | * any improvements or extensions that they make and grant Carnegie Mellon |
| 55 | * the rights to redistribute these changes. |
| 56 | */ |
| 57 | |
| 58 | /* |
| 59 | */ |
| 60 | |
| 61 | /* |
| 62 | * Processor registers for i386 and i486. |
| 63 | */ |
| 64 | #ifndef _I386_PROC_REG_H_ |
| 65 | #define _I386_PROC_REG_H_ |
| 66 | |
| 67 | /* |
| 68 | * Model Specific Registers |
| 69 | */ |
| 70 | #define MSR_P5_TSC 0x10 /* Time Stamp Register */ |
| 71 | #define MSR_P5_CESR 0x11 /* Control and Event Select Register */ |
| 72 | #define MSR_P5_CTR0 0x12 /* Counter #0 */ |
| 73 | #define MSR_P5_CTR1 0x13 /* Counter #1 */ |
| 74 | |
| 75 | #define MSR_P5_CESR_PC 0x0200 /* Pin Control */ |
| 76 | #define MSR_P5_CESR_CC 0x01C0 /* Counter Control mask */ |
| 77 | #define MSR_P5_CESR_ES 0x003F /* Event Control mask */ |
| 78 | |
| 79 | #define MSR_P5_CESR_SHIFT 16 /* Shift to get Counter 1 */ |
| 80 | #define MSR_P5_CESR_MASK (MSR_P5_CESR_PC|\ |
| 81 | MSR_P5_CESR_CC|\ |
| 82 | MSR_P5_CESR_ES) /* Mask Counter */ |
| 83 | |
| 84 | #define MSR_P5_CESR_CC_CLOCK 0x0100 /* Clock Counting (otherwise Event) */ |
| 85 | #define MSR_P5_CESR_CC_DISABLE 0x0000 /* Disable counter */ |
| 86 | #define MSR_P5_CESR_CC_CPL012 0x0040 /* Count if the CPL == 0, 1, 2 */ |
| 87 | #define MSR_P5_CESR_CC_CPL3 0x0080 /* Count if the CPL == 3 */ |
| 88 | #define MSR_P5_CESR_CC_CPL 0x00C0 /* Count regardless of the CPL */ |
| 89 | |
| 90 | #define MSR_P5_CESR_ES_DATA_READ 0x000000 /* Data Read */ |
| 91 | #define MSR_P5_CESR_ES_DATA_WRITE 0x000001 /* Data Write */ |
| 92 | #define MSR_P5_CESR_ES_DATA_RW 0x101000 /* Data Read or Write */ |
| 93 | #define MSR_P5_CESR_ES_DATA_TLB_MISS 0x000010 /* Data TLB Miss */ |
| 94 | #define MSR_P5_CESR_ES_DATA_READ_MISS 0x000011 /* Data Read Miss */ |
| 95 | #define MSR_P5_CESR_ES_DATA_WRITE_MISS 0x000100 /* Data Write Miss */ |
| 96 | #define MSR_P5_CESR_ES_DATA_RW_MISS 0x101001 /* Data Read or Write Miss */ |
| 97 | #define MSR_P5_CESR_ES_HIT_EM 0x000101 /* Write (hit) to M|E state */ |
| 98 | #define MSR_P5_CESR_ES_DATA_CACHE_WB 0x000110 /* Cache lines written back */ |
| 99 | #define MSR_P5_CESR_ES_EXTERNAL_SNOOP 0x000111 /* External Snoop */ |
| 100 | #define MSR_P5_CESR_ES_CACHE_SNOOP_HIT 0x001000 /* Data cache snoop hits */ |
| 101 | #define MSR_P5_CESR_ES_MEM_ACCESS_PIPE 0x001001 /* Mem. access in both pipes */ |
| 102 | #define MSR_P5_CESR_ES_BANK_CONFLICTS 0x001010 /* Bank conflicts */ |
| 103 | #define MSR_P5_CESR_ES_MISALIGNED 0x001011 /* Misaligned Memory or I/O */ |
| 104 | #define MSR_P5_CESR_ES_CODE_READ 0x001100 /* Code Read */ |
| 105 | #define MSR_P5_CESR_ES_CODE_TLB_MISS 0x001101 /* Code TLB miss */ |
| 106 | #define MSR_P5_CESR_ES_CODE_CACHE_MISS 0x001110 /* Code Cache miss */ |
| 107 | #define MSR_P5_CESR_ES_SEGMENT_LOADED 0x001111 /* Any segment reg. loaded */ |
| 108 | #define MSR_P5_CESR_ES_BRANCHE 0x010010 /* Branches */ |
| 109 | #define MSR_P5_CESR_ES_BTB_HIT 0x010011 /* BTB Hits */ |
| 110 | #define MSR_P5_CESR_ES_BRANCHE_BTB 0x010100 /* Taken branch or BTB Hit */ |
| 111 | #define MSR_P5_CESR_ES_PIPELINE_FLUSH 0x010101 /* Pipeline Flushes */ |
| 112 | #define MSR_P5_CESR_ES_INSTRUCTION 0x010110 /* Instruction executed */ |
| 113 | #define MSR_P5_CESR_ES_INSTRUCTION_V 0x010111 /* Inst. executed (v-pipe) */ |
| 114 | #define MSR_P5_CESR_ES_BUS_CYCLE 0x011000 /* Clocks while bus cycle */ |
| 115 | #define MSR_P5_CESR_ES_FULL_WRITE_BUF 0x011001 /* Clocks while full wrt buf. */ |
| 116 | #define MSR_P5_CESR_ES_DATA_MEM_READ 0x011010 /* Pipeline waiting for read */ |
| 117 | #define MSR_P5_CESR_ES_WRITE_EM 0x011011 /* Stall on write E|M state */ |
| 118 | #define MSR_P5_CESR_ES_LOCKED_CYCLE 0x011100 /* Locked bus cycles */ |
| 119 | #define MSR_P5_CESR_ES_IO_CYCLE 0x011101 /* I/O Read or Write cycles */ |
| 120 | #define MSR_P5_CESR_ES_NON_CACHEABLE 0x011110 /* Non-cacheable Mem. read */ |
| 121 | #define MSR_P5_CESR_ES_AGI 0x011111 /* Stall because of AGI */ |
| 122 | #define MSR_P5_CESR_ES_FLOP 0x100010 /* Floating Point operations */ |
| 123 | #define MSR_P5_CESR_ES_BREAK_DR0 0x100011 /* Breakpoint matches on DR0 */ |
| 124 | #define MSR_P5_CESR_ES_BREAK_DR1 0x100100 /* Breakpoint matches on DR1 */ |
| 125 | #define MSR_P5_CESR_ES_BREAK_DR2 0x100101 /* Breakpoint matches on DR2 */ |
| 126 | #define MSR_P5_CESR_ES_BREAK_DR3 0x100110 /* Breakpoint matches on DR3 */ |
| 127 | #define MSR_P5_CESR_ES_HARDWARE_IT 0x100111 /* Hardware interrupts */ |
| 128 | |
| 129 | /* |
| 130 | * CR0 |
| 131 | */ |
| 132 | #define CR0_PG 0x80000000 /* Enable paging */ |
| 133 | #define CR0_CD 0x40000000 /* i486: Cache disable */ |
| 134 | #define CR0_NW 0x20000000 /* i486: No write-through */ |
| 135 | #define CR0_AM 0x00040000 /* i486: Alignment check mask */ |
| 136 | #define CR0_WP 0x00010000 /* i486: Write-protect kernel access */ |
| 137 | #define CR0_NE 0x00000020 /* i486: Handle numeric exceptions */ |
| 138 | #define CR0_ET 0x00000010 /* Extension type is 80387 */ |
| 139 | /* (not official) */ |
| 140 | #define CR0_TS 0x00000008 /* Task switch */ |
| 141 | #define CR0_EM 0x00000004 /* Emulate coprocessor */ |
| 142 | #define CR0_MP 0x00000002 /* Monitor coprocessor */ |
| 143 | #define CR0_PE 0x00000001 /* Enable protected mode */ |
| 144 | |
| 145 | /* |
| 146 | * CR4 |
| 147 | */ |
| 148 | #define CR4_SEE 0x00008000 /* Secure Enclave Enable XXX */ |
| 149 | #define CR4_SMAP 0x00200000 /* Supervisor-Mode Access Protect */ |
| 150 | #define CR4_SMEP 0x00100000 /* Supervisor-Mode Execute Protect */ |
| 151 | #define CR4_OSXSAVE 0x00040000 /* OS supports XSAVE */ |
| 152 | #define CR4_PCIDE 0x00020000 /* PCID Enable */ |
| 153 | #define CR4_RDWRFSGS 0x00010000 /* RDWRFSGS Enable */ |
| 154 | #define CR4_SMXE 0x00004000 /* Enable SMX operation */ |
| 155 | #define CR4_VMXE 0x00002000 /* Enable VMX operation */ |
| 156 | #define CR4_OSXMM 0x00000400 /* SSE/SSE2 exception support in OS */ |
| 157 | #define CR4_OSFXS 0x00000200 /* SSE/SSE2 OS supports FXSave */ |
| 158 | #define CR4_PCE 0x00000100 /* Performance-Monitor Count Enable */ |
| 159 | #define CR4_PGE 0x00000080 /* Page Global Enable */ |
| 160 | #define CR4_MCE 0x00000040 /* Machine Check Exceptions */ |
| 161 | #define CR4_PAE 0x00000020 /* Physical Address Extensions */ |
| 162 | #define CR4_PSE 0x00000010 /* Page Size Extensions */ |
| 163 | #define CR4_DE 0x00000008 /* Debugging Extensions */ |
| 164 | #define CR4_TSD 0x00000004 /* Time Stamp Disable */ |
| 165 | #define CR4_PVI 0x00000002 /* Protected-mode Virtual Interrupts */ |
| 166 | #define CR4_VME 0x00000001 /* Virtual-8086 Mode Extensions */ |
| 167 | |
| 168 | /* |
| 169 | * XCR0 - XFEATURE_ENABLED_MASK (a.k.a. XFEM) register |
| 170 | */ |
| 171 | #define XCR0_X87 (1ULL << 0) /* x87, FPU/MMX (always set) */ |
| 172 | #define XCR0_SSE (1ULL << 1) /* SSE supported by XSAVE/XRESTORE */ |
| 173 | #define XCR0_YMM (1ULL << 2) /* YMM state available */ |
| 174 | #define XCR0_BNDREGS (1ULL << 3) /* MPX Bounds register state */ |
| 175 | #define XCR0_BNDCSR (1ULL << 4) /* MPX Bounds configuration/state */ |
| 176 | #if !defined(RC_HIDE_XNU_J137) |
| 177 | #define XCR0_OPMASK (1ULL << 5) /* Opmask register state */ |
| 178 | #define XCR0_ZMM_HI256 (1ULL << 6) /* ZMM upper 256-bit state */ |
| 179 | #define XCR0_HI16_ZMM (1ULL << 7) /* ZMM16..ZMM31 512-bit state */ |
| 180 | #endif /* not RC_HIDE_XNU_J137 */ |
| 181 | #define XFEM_X87 XCR0_X87 |
| 182 | #define XFEM_SSE XCR0_SSE |
| 183 | #define XFEM_YMM XCR0_YMM |
| 184 | #define XFEM_BNDREGS XCR0_BNDREGS |
| 185 | #define XFEM_BNDCSR XCR0_BNDCSR |
| 186 | #if !defined(XNU_HODE_J137) |
| 187 | #define XFEM_OPMASK XCR0_OPMASK |
| 188 | #define XFEM_ZMM_HI256 XCR0_ZMM_HI256 |
| 189 | #define XFEM_HI16_ZMM XCR0_HI16_ZMM |
| 190 | #define XFEM_ZMM (XFEM_ZMM_HI256 | XFEM_HI16_ZMM | XFEM_OPMASK) |
| 191 | #endif /* not XNU_HODE_J137 */ |
| 192 | #define XCR0 (0) |
| 193 | |
| 194 | #define PMAP_PCID_PRESERVE (1ULL << 63) |
| 195 | #define PMAP_PCID_MASK (0xFFF) |
| 196 | |
| 197 | /* |
| 198 | * If thread groups are needed for x86, set this to 1 |
| 199 | */ |
| 200 | #define CONFIG_THREAD_GROUPS 0 |
| 201 | |
| 202 | #ifndef ASSEMBLER |
| 203 | |
| 204 | #include <sys/cdefs.h> |
| 205 | #include <stdint.h> |
| 206 | |
| 207 | __BEGIN_DECLS |
| 208 | |
| 209 | #define set_ts() set_cr0(get_cr0() | CR0_TS) |
| 210 | |
| 211 | static inline uint16_t get_es(void) |
| 212 | { |
| 213 | uint16_t es; |
| 214 | __asm__ volatile("mov %%es, %0" : "=r" (es)); |
| 215 | return es; |
| 216 | } |
| 217 | |
| 218 | static inline void set_es(uint16_t es) |
| 219 | { |
| 220 | __asm__ volatile("mov %0, %%es" : : "r" (es)); |
| 221 | } |
| 222 | |
| 223 | static inline uint16_t get_ds(void) |
| 224 | { |
| 225 | uint16_t ds; |
| 226 | __asm__ volatile("mov %%ds, %0" : "=r" (ds)); |
| 227 | return ds; |
| 228 | } |
| 229 | |
| 230 | static inline void set_ds(uint16_t ds) |
| 231 | { |
| 232 | __asm__ volatile("mov %0, %%ds" : : "r" (ds)); |
| 233 | } |
| 234 | |
| 235 | static inline uint16_t get_fs(void) |
| 236 | { |
| 237 | uint16_t fs; |
| 238 | __asm__ volatile("mov %%fs, %0" : "=r" (fs)); |
| 239 | return fs; |
| 240 | } |
| 241 | |
| 242 | static inline void set_fs(uint16_t fs) |
| 243 | { |
| 244 | __asm__ volatile("mov %0, %%fs" : : "r" (fs)); |
| 245 | } |
| 246 | |
| 247 | static inline uint16_t get_gs(void) |
| 248 | { |
| 249 | uint16_t gs; |
| 250 | __asm__ volatile("mov %%gs, %0" : "=r" (gs)); |
| 251 | return gs; |
| 252 | } |
| 253 | |
| 254 | static inline void set_gs(uint16_t gs) |
| 255 | { |
| 256 | __asm__ volatile("mov %0, %%gs" : : "r" (gs)); |
| 257 | } |
| 258 | |
| 259 | static inline uint16_t get_ss(void) |
| 260 | { |
| 261 | uint16_t ss; |
| 262 | __asm__ volatile("mov %%ss, %0" : "=r" (ss)); |
| 263 | return ss; |
| 264 | } |
| 265 | |
| 266 | static inline void set_ss(uint16_t ss) |
| 267 | { |
| 268 | __asm__ volatile("mov %0, %%ss" : : "r" (ss)); |
| 269 | } |
| 270 | |
| 271 | static inline uintptr_t get_cr0(void) |
| 272 | { |
| 273 | uintptr_t cr0; |
| 274 | __asm__ volatile("mov %%cr0, %0" : "=r" (cr0)); |
| 275 | return(cr0); |
| 276 | } |
| 277 | |
| 278 | static inline void set_cr0(uintptr_t value) |
| 279 | { |
| 280 | __asm__ volatile("mov %0, %%cr0" : : "r" (value)); |
| 281 | } |
| 282 | |
| 283 | static inline uintptr_t get_cr2(void) |
| 284 | { |
| 285 | uintptr_t cr2; |
| 286 | __asm__ volatile("mov %%cr2, %0" : "=r" (cr2)); |
| 287 | return(cr2); |
| 288 | } |
| 289 | |
| 290 | static inline uintptr_t get_cr3_raw(void) |
| 291 | { |
| 292 | uintptr_t cr3; |
| 293 | __asm__ volatile("mov %%cr3, %0" : "=r" (cr3)); |
| 294 | return(cr3); |
| 295 | } |
| 296 | |
| 297 | static inline void set_cr3_raw(uintptr_t value) |
| 298 | { |
| 299 | __asm__ volatile("mov %0, %%cr3" : : "r" (value)); |
| 300 | } |
| 301 | |
| 302 | static inline uintptr_t get_cr3_base(void) |
| 303 | { |
| 304 | uintptr_t cr3; |
| 305 | __asm__ volatile("mov %%cr3, %0" : "=r" (cr3)); |
| 306 | return(cr3 & ~(0xFFFULL)); |
| 307 | } |
| 308 | |
| 309 | static inline void set_cr3_composed(uintptr_t base, uint16_t pcid, uint64_t preserve) |
| 310 | { |
| 311 | __asm__ volatile("mov %0, %%cr3" : : "r" (base | pcid | ( (preserve) << 63) ) ); |
| 312 | } |
| 313 | |
| 314 | static inline uintptr_t get_cr4(void) |
| 315 | { |
| 316 | uintptr_t cr4; |
| 317 | __asm__ volatile("mov %%cr4, %0" : "=r" (cr4)); |
| 318 | return(cr4); |
| 319 | } |
| 320 | |
| 321 | static inline void set_cr4(uintptr_t value) |
| 322 | { |
| 323 | __asm__ volatile("mov %0, %%cr4" : : "r" (value)); |
| 324 | } |
| 325 | |
| 326 | static inline uintptr_t x86_get_flags(void) |
| 327 | { |
| 328 | uintptr_t erflags; |
| 329 | __asm__ volatile("pushf; pop %0" : "=r" (erflags)); |
| 330 | return erflags; |
| 331 | } |
| 332 | |
| 333 | static inline void clear_ts(void) |
| 334 | { |
| 335 | __asm__ volatile("clts" ); |
| 336 | } |
| 337 | |
| 338 | static inline unsigned short get_tr(void) |
| 339 | { |
| 340 | unsigned short seg; |
| 341 | __asm__ volatile("str %0" : "=rm" (seg)); |
| 342 | return(seg); |
| 343 | } |
| 344 | |
| 345 | static inline void set_tr(unsigned int seg) |
| 346 | { |
| 347 | __asm__ volatile("ltr %0" : : "rm" ((unsigned short)(seg))); |
| 348 | } |
| 349 | |
| 350 | static inline unsigned short sldt(void) |
| 351 | { |
| 352 | unsigned short seg; |
| 353 | __asm__ volatile("sldt %0" : "=rm" (seg)); |
| 354 | return(seg); |
| 355 | } |
| 356 | |
| 357 | static inline void lldt(unsigned int seg) |
| 358 | { |
| 359 | __asm__ volatile("lldt %0" : : "rm" ((unsigned short)(seg))); |
| 360 | } |
| 361 | |
| 362 | static inline void lgdt(uintptr_t *desc) |
| 363 | { |
| 364 | __asm__ volatile("lgdt %0" : : "m" (*desc)); |
| 365 | } |
| 366 | |
| 367 | static inline void lidt(uintptr_t *desc) |
| 368 | { |
| 369 | __asm__ volatile("lidt %0" : : "m" (*desc)); |
| 370 | } |
| 371 | |
| 372 | static inline void swapgs(void) |
| 373 | { |
| 374 | __asm__ volatile("swapgs" ); |
| 375 | } |
| 376 | |
| 377 | static inline void hlt(void) |
| 378 | { |
| 379 | __asm__ volatile("hlt" ); |
| 380 | } |
| 381 | |
| 382 | #ifdef MACH_KERNEL_PRIVATE |
| 383 | |
| 384 | static inline void flush_tlb_raw(void) |
| 385 | { |
| 386 | uintptr_t cr4 = get_cr4(); |
| 387 | if (cr4 & CR4_PGE) { |
| 388 | set_cr4(cr4 & ~CR4_PGE); |
| 389 | set_cr4(cr4 | CR4_PGE); |
| 390 | } else { |
| 391 | set_cr3_raw(get_cr3_raw()); |
| 392 | } |
| 393 | } |
| 394 | extern int rdmsr64_carefully(uint32_t msr, uint64_t *val); |
| 395 | extern int wrmsr64_carefully(uint32_t msr, uint64_t val); |
| 396 | #endif /* MACH_KERNEL_PRIVATE */ |
| 397 | |
| 398 | static inline void wbinvd(void) |
| 399 | { |
| 400 | __asm__ volatile("wbinvd" ); |
| 401 | } |
| 402 | |
| 403 | static inline void invlpg(uintptr_t addr) |
| 404 | { |
| 405 | __asm__ volatile("invlpg (%0)" :: "r" (addr) : "memory" ); |
| 406 | } |
| 407 | |
| 408 | static inline void clac(void) |
| 409 | { |
| 410 | __asm__ volatile("clac" ); |
| 411 | } |
| 412 | |
| 413 | static inline void stac(void) |
| 414 | { |
| 415 | __asm__ volatile("stac" ); |
| 416 | } |
| 417 | |
| 418 | /* |
| 419 | * Access to machine-specific registers (available on 586 and better only) |
| 420 | * Note: the rd* operations modify the parameters directly (without using |
| 421 | * pointer indirection), this allows gcc to optimize better |
| 422 | */ |
| 423 | |
| 424 | #define rdmsr(msr,lo,hi) \ |
| 425 | __asm__ volatile("rdmsr" : "=a" (lo), "=d" (hi) : "c" (msr)) |
| 426 | |
| 427 | #define wrmsr(msr,lo,hi) \ |
| 428 | __asm__ volatile("wrmsr" : : "c" (msr), "a" (lo), "d" (hi)) |
| 429 | |
| 430 | #define rdtsc(lo,hi) \ |
| 431 | __asm__ volatile("lfence; rdtsc; lfence" : "=a" (lo), "=d" (hi)) |
| 432 | |
| 433 | #define rdtsc_nofence(lo,hi) \ |
| 434 | __asm__ volatile("rdtsc" : "=a" (lo), "=d" (hi)) |
| 435 | |
| 436 | #define write_tsc(lo,hi) wrmsr(0x10, lo, hi) |
| 437 | |
| 438 | #define rdpmc(counter,lo,hi) \ |
| 439 | __asm__ volatile("rdpmc" : "=a" (lo), "=d" (hi) : "c" (counter)) |
| 440 | |
| 441 | #ifdef XNU_KERNEL_PRIVATE |
| 442 | extern void do_mfence(void); |
| 443 | #define mfence() do_mfence() |
| 444 | #endif |
| 445 | |
| 446 | #ifdef __LP64__ |
| 447 | static inline uint64_t rdpmc64(uint32_t pmc) |
| 448 | { |
| 449 | uint32_t lo=0, hi=0; |
| 450 | rdpmc(pmc, lo, hi); |
| 451 | return (((uint64_t)hi) << 32) | ((uint64_t)lo); |
| 452 | } |
| 453 | |
| 454 | static inline uint64_t rdmsr64(uint32_t msr) |
| 455 | { |
| 456 | uint32_t lo=0, hi=0; |
| 457 | rdmsr(msr, lo, hi); |
| 458 | return (((uint64_t)hi) << 32) | ((uint64_t)lo); |
| 459 | } |
| 460 | |
| 461 | static inline void wrmsr64(uint32_t msr, uint64_t val) |
| 462 | { |
| 463 | wrmsr(msr, (val & 0xFFFFFFFFUL), ((val >> 32) & 0xFFFFFFFFUL)); |
| 464 | } |
| 465 | |
| 466 | static inline uint64_t rdtsc64(void) |
| 467 | { |
| 468 | uint64_t lo, hi; |
| 469 | rdtsc(lo, hi); |
| 470 | return ((hi) << 32) | (lo); |
| 471 | } |
| 472 | |
| 473 | static inline uint64_t rdtscp64(uint32_t *aux) |
| 474 | { |
| 475 | uint64_t lo, hi; |
| 476 | __asm__ volatile("rdtscp; mov %%ecx, %1" |
| 477 | : "=a" (lo), "=d" (hi), "=m" (*aux) |
| 478 | : |
| 479 | : "ecx" ); |
| 480 | return ((hi) << 32) | (lo); |
| 481 | } |
| 482 | #endif /* __LP64__ */ |
| 483 | |
| 484 | /* |
| 485 | * rdmsr_carefully() returns 0 when the MSR has been read successfully, |
| 486 | * or non-zero (1) if the MSR does not exist. |
| 487 | * The implementation is in locore.s. |
| 488 | */ |
| 489 | extern int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi); |
| 490 | __END_DECLS |
| 491 | |
| 492 | #endif /* ASSEMBLER */ |
| 493 | |
| 494 | #define MSR_IA32_P5_MC_ADDR 0 |
| 495 | #define MSR_IA32_P5_MC_TYPE 1 |
| 496 | #define MSR_IA32_PLATFORM_ID 0x17 |
| 497 | #define MSR_IA32_EBL_CR_POWERON 0x2a |
| 498 | |
| 499 | #define MSR_IA32_APIC_BASE 0x1b |
| 500 | #define MSR_IA32_APIC_BASE_BSP (1<<8) |
| 501 | #define MSR_IA32_APIC_BASE_EXTENDED (1<<10) |
| 502 | #define MSR_IA32_APIC_BASE_ENABLE (1<<11) |
| 503 | #define MSR_IA32_APIC_BASE_BASE (0xfffff<<12) |
| 504 | |
| 505 | #define MSR_CORE_THREAD_COUNT 0x35 |
| 506 | |
| 507 | #define MSR_IA32_FEATURE_CONTROL 0x3a |
| 508 | #define MSR_IA32_FEATCTL_LOCK (1<<0) |
| 509 | #define MSR_IA32_FEATCTL_VMXON_SMX (1<<1) |
| 510 | #define MSR_IA32_FEATCTL_VMXON (1<<2) |
| 511 | #define MSR_IA32_FEATCTL_CSTATE_SMI (1<<16) |
| 512 | |
| 513 | #define MSR_IA32_UPDT_TRIG 0x79 |
| 514 | #define MSR_IA32_BIOS_SIGN_ID 0x8b |
| 515 | #define MSR_IA32_UCODE_WRITE MSR_IA32_UPDT_TRIG |
| 516 | #define MSR_IA32_UCODE_REV MSR_IA32_BIOS_SIGN_ID |
| 517 | |
| 518 | #define MSR_IA32_PERFCTR0 0xc1 |
| 519 | #define MSR_IA32_PERFCTR1 0xc2 |
| 520 | #define MSR_IA32_PERFCTR3 0xc3 |
| 521 | #define MSR_IA32_PERFCTR4 0xc4 |
| 522 | |
| 523 | #define MSR_PLATFORM_INFO 0xce |
| 524 | |
| 525 | #define MSR_IA32_MPERF 0xE7 |
| 526 | #define MSR_IA32_APERF 0xE8 |
| 527 | |
| 528 | #define MSR_IA32_BBL_CR_CTL 0x119 |
| 529 | |
| 530 | #define MSR_IA32_SYSENTER_CS 0x174 |
| 531 | #define MSR_IA32_SYSENTER_ESP 0x175 |
| 532 | #define MSR_IA32_SYSENTER_EIP 0x176 |
| 533 | |
| 534 | #define MSR_IA32_MCG_CAP 0x179 |
| 535 | #define MSR_IA32_MCG_STATUS 0x17a |
| 536 | #define MSR_IA32_MCG_CTL 0x17b |
| 537 | |
| 538 | #define MSR_IA32_EVNTSEL0 0x186 |
| 539 | #define MSR_IA32_EVNTSEL1 0x187 |
| 540 | #define MSR_IA32_EVNTSEL2 0x188 |
| 541 | #define MSR_IA32_EVNTSEL3 0x189 |
| 542 | |
| 543 | #define MSR_FLEX_RATIO 0x194 |
| 544 | #define MSR_IA32_PERF_STS 0x198 |
| 545 | #define MSR_IA32_PERF_CTL 0x199 |
| 546 | #define MSR_IA32_CLOCK_MODULATION 0x19a |
| 547 | |
| 548 | #define MSR_IA32_MISC_ENABLE 0x1a0 |
| 549 | |
| 550 | |
| 551 | #define MSR_IA32_PACKAGE_THERM_STATUS 0x1b1 |
| 552 | #define MSR_IA32_PACKAGE_THERM_INTERRUPT 0x1b2 |
| 553 | |
| 554 | #define MSR_IA32_DEBUGCTLMSR 0x1d9 |
| 555 | #define MSR_IA32_LASTBRANCHFROMIP 0x1db |
| 556 | #define MSR_IA32_LASTBRANCHTOIP 0x1dc |
| 557 | #define MSR_IA32_LASTINTFROMIP 0x1dd |
| 558 | #define MSR_IA32_LASTINTTOIP 0x1de |
| 559 | |
| 560 | #define MSR_IA32_CR_PAT 0x277 |
| 561 | |
| 562 | #define MSR_IA32_MTRRCAP 0xfe |
| 563 | #define MSR_IA32_MTRR_DEF_TYPE 0x2ff |
| 564 | #define MSR_IA32_MTRR_PHYSBASE(n) (0x200 + 2*(n)) |
| 565 | #define MSR_IA32_MTRR_PHYSMASK(n) (0x200 + 2*(n) + 1) |
| 566 | #define MSR_IA32_MTRR_FIX64K_00000 0x250 |
| 567 | #define MSR_IA32_MTRR_FIX16K_80000 0x258 |
| 568 | #define MSR_IA32_MTRR_FIX16K_A0000 0x259 |
| 569 | #define MSR_IA32_MTRR_FIX4K_C0000 0x268 |
| 570 | #define MSR_IA32_MTRR_FIX4K_C8000 0x269 |
| 571 | #define MSR_IA32_MTRR_FIX4K_D0000 0x26a |
| 572 | #define MSR_IA32_MTRR_FIX4K_D8000 0x26b |
| 573 | #define MSR_IA32_MTRR_FIX4K_E0000 0x26c |
| 574 | #define MSR_IA32_MTRR_FIX4K_E8000 0x26d |
| 575 | #define MSR_IA32_MTRR_FIX4K_F0000 0x26e |
| 576 | #define MSR_IA32_MTRR_FIX4K_F8000 0x26f |
| 577 | |
| 578 | #define MSR_IA32_PERF_FIXED_CTR0 0x309 |
| 579 | |
| 580 | #define MSR_IA32_PERF_FIXED_CTR_CTRL 0x38D |
| 581 | #define MSR_IA32_PERF_GLOBAL_STATUS 0x38E |
| 582 | #define MSR_IA32_PERF_GLOBAL_CTRL 0x38F |
| 583 | #define MSR_IA32_PERF_GLOBAL_OVF_CTRL 0x390 |
| 584 | |
| 585 | #define MSR_IA32_PKG_C3_RESIDENCY 0x3F8 |
| 586 | #define MSR_IA32_PKG_C6_RESIDENCY 0x3F9 |
| 587 | #define MSR_IA32_PKG_C7_RESIDENCY 0x3FA |
| 588 | |
| 589 | #define MSR_IA32_CORE_C3_RESIDENCY 0x3FC |
| 590 | #define MSR_IA32_CORE_C6_RESIDENCY 0x3FD |
| 591 | #define MSR_IA32_CORE_C7_RESIDENCY 0x3FE |
| 592 | |
| 593 | #define MSR_IA32_MC0_CTL 0x400 |
| 594 | #define MSR_IA32_MC0_STATUS 0x401 |
| 595 | #define MSR_IA32_MC0_ADDR 0x402 |
| 596 | #define MSR_IA32_MC0_MISC 0x403 |
| 597 | |
| 598 | #define MSR_IA32_VMX_BASE 0x480 |
| 599 | #define MSR_IA32_VMX_BASIC MSR_IA32_VMX_BASE |
| 600 | #define MSR_IA32_VMX_PINBASED_CTLS MSR_IA32_VMX_BASE+1 |
| 601 | #define MSR_IA32_VMX_PROCBASED_CTLS MSR_IA32_VMX_BASE+2 |
| 602 | #define MSR_IA32_VMX_EXIT_CTLS MSR_IA32_VMX_BASE+3 |
| 603 | #define MSR_IA32_VMX_ENTRY_CTLS MSR_IA32_VMX_BASE+4 |
| 604 | #define MSR_IA32_VMX_MISC MSR_IA32_VMX_BASE+5 |
| 605 | #define MSR_IA32_VMX_CR0_FIXED0 MSR_IA32_VMX_BASE+6 |
| 606 | #define MSR_IA32_VMX_CR0_FIXED1 MSR_IA32_VMX_BASE+7 |
| 607 | #define MSR_IA32_VMX_CR4_FIXED0 MSR_IA32_VMX_BASE+8 |
| 608 | #define MSR_IA32_VMX_CR4_FIXED1 MSR_IA32_VMX_BASE+9 |
| 609 | #define MSR_IA32_VMX_VMCS_ENUM MSR_IA32_VMX_BASE+10 |
| 610 | #define MSR_IA32_VMX_PROCBASED_CTLS2 MSR_IA32_VMX_BASE+11 |
| 611 | #define MSR_IA32_VMX_EPT_VPID_CAP MSR_IA32_VMX_BASE+12 |
| 612 | #define MSR_IA32_VMX_EPT_VPID_CAP_AD_SHIFT 21 |
| 613 | #define MSR_IA32_VMX_TRUE_PINBASED_CTLS MSR_IA32_VMX_BASE+13 |
| 614 | #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS MSR_IA32_VMX_BASE+14 |
| 615 | #define MSR_IA32_VMX_TRUE_VMEXIT_CTLS MSR_IA32_VMX_BASE+15 |
| 616 | #define MSR_IA32_VMX_TRUE_VMENTRY_CTLS MSR_IA32_VMX_BASE+16 |
| 617 | #define MSR_IA32_VMX_VMFUNC MSR_IA32_VMX_BASE+17 |
| 618 | |
| 619 | #define MSR_IA32_DS_AREA 0x600 |
| 620 | |
| 621 | #define MSR_IA32_PKG_POWER_SKU_UNIT 0x606 |
| 622 | #define MSR_IA32_PKG_C2_RESIDENCY 0x60D |
| 623 | #define MSR_IA32_PKG_ENERGY_STATUS 0x611 |
| 624 | #define MSR_IA32_DDR_ENERGY_STATUS 0x619 |
| 625 | #define MSR_IA32_LLC_FLUSHED_RESIDENCY_TIMER 0x61D |
| 626 | #define MSR_IA32_RING_PERF_STATUS 0x621 |
| 627 | |
| 628 | #define MSR_IA32_PKG_C8_RESIDENCY 0x630 |
| 629 | #define MSR_IA32_PKG_C9_RESIDENCY 0x631 |
| 630 | #define MSR_IA32_PKG_C10_RESIDENCY 0x632 |
| 631 | |
| 632 | #define MSR_IA32_PP0_ENERGY_STATUS 0x639 |
| 633 | #define MSR_IA32_PP1_ENERGY_STATUS 0x641 |
| 634 | #define MSR_IA32_IA_PERF_LIMIT_REASONS_SKL 0x64F |
| 635 | |
| 636 | #define MSR_IA32_IA_PERF_LIMIT_REASONS 0x690 |
| 637 | #define MSR_IA32_GT_PERF_LIMIT_REASONS 0x6B0 |
| 638 | |
| 639 | #define MSR_IA32_TSC_DEADLINE 0x6e0 |
| 640 | |
| 641 | #define MSR_IA32_EFER 0xC0000080 |
| 642 | #define MSR_IA32_EFER_SCE 0x00000001 |
| 643 | #define MSR_IA32_EFER_LME 0x00000100 |
| 644 | #define MSR_IA32_EFER_LMA 0x00000400 |
| 645 | #define MSR_IA32_EFER_NXE 0x00000800 |
| 646 | |
| 647 | #define MSR_IA32_STAR 0xC0000081 |
| 648 | #define MSR_IA32_LSTAR 0xC0000082 |
| 649 | #define MSR_IA32_CSTAR 0xC0000083 |
| 650 | #define MSR_IA32_FMASK 0xC0000084 |
| 651 | |
| 652 | #define MSR_IA32_FS_BASE 0xC0000100 |
| 653 | #define MSR_IA32_GS_BASE 0xC0000101 |
| 654 | #define MSR_IA32_KERNEL_GS_BASE 0xC0000102 |
| 655 | #define MSR_IA32_TSC_AUX 0xC0000103 |
| 656 | |
| 657 | #define HV_VMX_EPTP_MEMORY_TYPE_UC 0x0 |
| 658 | #define HV_VMX_EPTP_MEMORY_TYPE_WB 0x6 |
| 659 | #define HV_VMX_EPTP_WALK_LENGTH(wl) (0ULL | ((((wl) - 1) & 0x7) << 3)) |
| 660 | #define HV_VMX_EPTP_ENABLE_AD_FLAGS (1ULL << 6) |
| 661 | |
| 662 | #endif /* _I386_PROC_REG_H_ */ |
| 663 | |