| 1 | /* |
| 2 | * Copyright (c) 2013 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | |
| 29 | #include <kern/locks.h> |
| 30 | #include <kern/task.h> |
| 31 | #include <kern/thread.h> |
| 32 | #include <libkern/OSAtomic.h> |
| 33 | #include <vm/vm_pageout.h> |
| 34 | |
| 35 | #if defined(__x86_64__) && CONFIG_VMX |
| 36 | #include <i386/vmx/vmx_cpu.h> |
| 37 | #endif |
| 38 | |
| 39 | #include <kern/hv_support.h> |
| 40 | |
| 41 | int hv_support_available = 0; |
| 42 | |
| 43 | /* callbacks for tasks/threads with associated hv objects */ |
| 44 | hv_callbacks_t hv_callbacks = { |
| 45 | .dispatch = NULL, /* thread is being dispatched for execution */ |
| 46 | .preempt = NULL, /* thread is being preempted */ |
| 47 | .suspend = NULL, /* system is being suspended */ |
| 48 | .thread_destroy = NULL, /* thread is being destroyed */ |
| 49 | .task_destroy = NULL, /* task is being destroyed */ |
| 50 | .volatile_state = NULL, /* thread state is becoming volatile */ |
| 51 | }; |
| 52 | |
| 53 | /* trap tables for hv_*_trap syscalls */ |
| 54 | static hv_trap_table_t hv_trap_table[] = { |
| 55 | [HV_TASK_TRAP] = { |
| 56 | .traps = NULL, |
| 57 | .trap_count = 0 |
| 58 | }, |
| 59 | [HV_THREAD_TRAP] = { |
| 60 | .traps = NULL, |
| 61 | .trap_count = 0 |
| 62 | } |
| 63 | }; |
| 64 | |
| 65 | static int hv_callbacks_enabled = 0; |
| 66 | static lck_grp_t *hv_support_lck_grp = NULL; |
| 67 | static lck_mtx_t *hv_support_lck_mtx = NULL; |
| 68 | |
| 69 | /* hv_support boot initialization */ |
| 70 | void |
| 71 | hv_support_init(void) { |
| 72 | #if defined(__x86_64__) && CONFIG_VMX |
| 73 | hv_support_available = vmx_hv_support(); |
| 74 | #endif |
| 75 | |
| 76 | hv_support_lck_grp = lck_grp_alloc_init("hv_support" , LCK_GRP_ATTR_NULL); |
| 77 | assert(hv_support_lck_grp); |
| 78 | |
| 79 | hv_support_lck_mtx = lck_mtx_alloc_init(hv_support_lck_grp, LCK_ATTR_NULL); |
| 80 | assert(hv_support_lck_mtx); |
| 81 | } |
| 82 | |
| 83 | /* returns true if hv_support is available on this machine */ |
| 84 | int |
| 85 | hv_get_support(void) { |
| 86 | return hv_support_available; |
| 87 | } |
| 88 | |
| 89 | /* associate an hv object with the current task */ |
| 90 | void |
| 91 | hv_set_task_target(void *target) { |
| 92 | current_task()->hv_task_target = target; |
| 93 | } |
| 94 | |
| 95 | /* associate an hv object with the current thread */ |
| 96 | void |
| 97 | hv_set_thread_target(void *target) { |
| 98 | current_thread()->hv_thread_target = target; |
| 99 | } |
| 100 | |
| 101 | /* get hv object associated with the current task */ |
| 102 | void* |
| 103 | hv_get_task_target(void) { |
| 104 | return current_task()->hv_task_target; |
| 105 | } |
| 106 | |
| 107 | /* get hv object associated with the current thread */ |
| 108 | void* |
| 109 | hv_get_thread_target(void) { |
| 110 | return current_thread()->hv_thread_target; |
| 111 | } |
| 112 | |
| 113 | /* test if a given thread state may be volatile between dispatch |
| 114 | and preemption */ |
| 115 | int |
| 116 | hv_get_volatile_state(hv_volatile_state_t state) { |
| 117 | int is_volatile = 0; |
| 118 | |
| 119 | #if (defined(__x86_64__)) |
| 120 | if (state == HV_DEBUG_STATE) { |
| 121 | is_volatile = (current_thread()->machine.ids != NULL); |
| 122 | } |
| 123 | #endif |
| 124 | |
| 125 | return is_volatile; |
| 126 | } |
| 127 | |
| 128 | /* register a list of trap handlers for the hv_*_trap syscalls */ |
| 129 | kern_return_t |
| 130 | hv_set_traps(hv_trap_type_t trap_type, const hv_trap_t *traps, |
| 131 | unsigned trap_count) |
| 132 | { |
| 133 | hv_trap_table_t *trap_table = &hv_trap_table[trap_type]; |
| 134 | kern_return_t kr = KERN_FAILURE; |
| 135 | |
| 136 | lck_mtx_lock(hv_support_lck_mtx); |
| 137 | if (trap_table->trap_count == 0) { |
| 138 | trap_table->traps = traps; |
| 139 | OSMemoryBarrier(); |
| 140 | trap_table->trap_count = trap_count; |
| 141 | kr = KERN_SUCCESS; |
| 142 | } |
| 143 | lck_mtx_unlock(hv_support_lck_mtx); |
| 144 | |
| 145 | return kr; |
| 146 | } |
| 147 | |
| 148 | /* release hv_*_trap traps */ |
| 149 | void |
| 150 | hv_release_traps(hv_trap_type_t trap_type) { |
| 151 | hv_trap_table_t *trap_table = &hv_trap_table[trap_type]; |
| 152 | |
| 153 | lck_mtx_lock(hv_support_lck_mtx); |
| 154 | trap_table->trap_count = 0; |
| 155 | OSMemoryBarrier(); |
| 156 | trap_table->traps = NULL; |
| 157 | lck_mtx_unlock(hv_support_lck_mtx); |
| 158 | } |
| 159 | |
| 160 | /* register callbacks for certain task/thread events for tasks/threads with |
| 161 | associated hv objects */ |
| 162 | kern_return_t |
| 163 | hv_set_callbacks(hv_callbacks_t callbacks) { |
| 164 | kern_return_t kr = KERN_FAILURE; |
| 165 | |
| 166 | lck_mtx_lock(hv_support_lck_mtx); |
| 167 | if (hv_callbacks_enabled == 0) { |
| 168 | hv_callbacks = callbacks; |
| 169 | hv_callbacks_enabled = 1; |
| 170 | kr = KERN_SUCCESS; |
| 171 | } |
| 172 | lck_mtx_unlock(hv_support_lck_mtx); |
| 173 | |
| 174 | return kr; |
| 175 | } |
| 176 | |
| 177 | /* release callbacks for task/thread events */ |
| 178 | void |
| 179 | hv_release_callbacks(void) { |
| 180 | lck_mtx_lock(hv_support_lck_mtx); |
| 181 | hv_callbacks = (hv_callbacks_t) { |
| 182 | .dispatch = NULL, |
| 183 | .preempt = NULL, |
| 184 | .suspend = NULL, |
| 185 | .thread_destroy = NULL, |
| 186 | .task_destroy = NULL, |
| 187 | .volatile_state = NULL, |
| 188 | .memory_pressure = NULL |
| 189 | }; |
| 190 | |
| 191 | hv_callbacks_enabled = 0; |
| 192 | lck_mtx_unlock(hv_support_lck_mtx); |
| 193 | } |
| 194 | |
| 195 | /* system suspend notification */ |
| 196 | void |
| 197 | hv_suspend(void) { |
| 198 | if (hv_callbacks_enabled) { |
| 199 | hv_callbacks.suspend(); |
| 200 | } |
| 201 | } |
| 202 | |
| 203 | /* dispatch hv_task_trap/hv_thread_trap syscalls to trap handlers, |
| 204 | fail for invalid index or absence of trap handlers, trap handler is |
| 205 | responsible for validating targets */ |
| 206 | #define HV_TRAP_DISPATCH(type, index, target, argument)\ |
| 207 | ((__probable(index < hv_trap_table[type].trap_count)) ? \ |
| 208 | hv_trap_table[type].traps[index](target, argument) \ |
| 209 | : KERN_INVALID_ARGUMENT) |
| 210 | |
| 211 | kern_return_t hv_task_trap(uint64_t index, uint64_t arg) { |
| 212 | return HV_TRAP_DISPATCH(HV_TASK_TRAP, index, hv_get_task_target(), arg); |
| 213 | } |
| 214 | |
| 215 | kern_return_t hv_thread_trap(uint64_t index, uint64_t arg) { |
| 216 | return HV_TRAP_DISPATCH(HV_THREAD_TRAP, index, hv_get_thread_target(), arg); |
| 217 | } |
| 218 | |