1 | /* |
2 | * Copyright (c) 2000-2016 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | #ifdef MACH_BSD |
29 | #include <mach_debug.h> |
30 | #include <mach_ldebug.h> |
31 | |
32 | #include <mach/kern_return.h> |
33 | #include <mach/mach_traps.h> |
34 | #include <mach/thread_status.h> |
35 | #include <mach/vm_param.h> |
36 | |
37 | #include <kern/counters.h> |
38 | #include <kern/cpu_data.h> |
39 | #include <kern/mach_param.h> |
40 | #include <kern/task.h> |
41 | #include <kern/thread.h> |
42 | #include <kern/sched_prim.h> |
43 | #include <kern/misc_protos.h> |
44 | #include <kern/assert.h> |
45 | #include <kern/debug.h> |
46 | #include <kern/spl.h> |
47 | #include <kern/syscall_sw.h> |
48 | #include <ipc/ipc_port.h> |
49 | #include <vm/vm_kern.h> |
50 | #include <vm/pmap.h> |
51 | |
52 | #include <i386/cpu_number.h> |
53 | #include <i386/eflags.h> |
54 | #include <i386/proc_reg.h> |
55 | #include <i386/tss.h> |
56 | #include <i386/user_ldt.h> |
57 | #include <i386/fpu.h> |
58 | #include <i386/machdep_call.h> |
59 | #include <i386/vmparam.h> |
60 | #include <i386/mp_desc.h> |
61 | #include <i386/misc_protos.h> |
62 | #include <i386/thread.h> |
63 | #include <i386/trap.h> |
64 | #include <i386/seg.h> |
65 | #include <mach/i386/syscall_sw.h> |
66 | #include <sys/syscall.h> |
67 | #include <sys/kdebug.h> |
68 | #include <sys/errno.h> |
69 | #include <../bsd/sys/sysent.h> |
70 | |
71 | #ifdef MACH_BSD |
72 | extern void mach_kauth_cred_uthread_update(void); |
73 | extern void throttle_lowpri_io(int); |
74 | #endif |
75 | |
76 | void * find_user_regs(thread_t); |
77 | |
78 | unsigned int get_msr_exportmask(void); |
79 | |
80 | unsigned int get_msr_nbits(void); |
81 | |
82 | unsigned int get_msr_rbits(void); |
83 | |
84 | /* |
85 | * thread_userstack: |
86 | * |
87 | * Return the user stack pointer from the machine |
88 | * dependent thread state info. |
89 | */ |
90 | kern_return_t |
91 | thread_userstack( |
92 | __unused thread_t thread, |
93 | int flavor, |
94 | thread_state_t tstate, |
95 | __unused unsigned int count, |
96 | mach_vm_offset_t *user_stack, |
97 | int *customstack, |
98 | __unused boolean_t is64bit |
99 | ) |
100 | { |
101 | if (customstack) |
102 | *customstack = 0; |
103 | |
104 | switch (flavor) { |
105 | case x86_THREAD_STATE32: |
106 | { |
107 | x86_thread_state32_t *state25; |
108 | |
109 | state25 = (x86_thread_state32_t *) tstate; |
110 | |
111 | if (state25->esp) { |
112 | *user_stack = state25->esp; |
113 | if (customstack) |
114 | *customstack = 1; |
115 | } else { |
116 | *user_stack = VM_USRSTACK32; |
117 | if (customstack) |
118 | *customstack = 0; |
119 | } |
120 | break; |
121 | } |
122 | |
123 | case x86_THREAD_STATE64: |
124 | { |
125 | x86_thread_state64_t *state25; |
126 | |
127 | state25 = (x86_thread_state64_t *) tstate; |
128 | |
129 | if (state25->rsp) { |
130 | *user_stack = state25->rsp; |
131 | if (customstack) |
132 | *customstack = 1; |
133 | } else { |
134 | *user_stack = VM_USRSTACK64; |
135 | if (customstack) |
136 | *customstack = 0; |
137 | } |
138 | break; |
139 | } |
140 | |
141 | default: |
142 | return (KERN_INVALID_ARGUMENT); |
143 | } |
144 | |
145 | return (KERN_SUCCESS); |
146 | } |
147 | |
148 | /* |
149 | * thread_userstackdefault: |
150 | * |
151 | * Return the default stack location for the |
152 | * thread, if otherwise unknown. |
153 | */ |
154 | kern_return_t |
155 | thread_userstackdefault( |
156 | mach_vm_offset_t *default_user_stack, |
157 | boolean_t is64bit) |
158 | { |
159 | if (is64bit) { |
160 | *default_user_stack = VM_USRSTACK64; |
161 | } else { |
162 | *default_user_stack = VM_USRSTACK32; |
163 | } |
164 | return (KERN_SUCCESS); |
165 | } |
166 | |
167 | kern_return_t |
168 | thread_entrypoint( |
169 | __unused thread_t thread, |
170 | int flavor, |
171 | thread_state_t tstate, |
172 | __unused unsigned int count, |
173 | mach_vm_offset_t *entry_point |
174 | ) |
175 | { |
176 | /* |
177 | * Set a default. |
178 | */ |
179 | if (*entry_point == 0) |
180 | *entry_point = VM_MIN_ADDRESS; |
181 | |
182 | switch (flavor) { |
183 | case x86_THREAD_STATE32: |
184 | { |
185 | x86_thread_state32_t *state25; |
186 | |
187 | state25 = (i386_thread_state_t *) tstate; |
188 | *entry_point = state25->eip ? state25->eip: VM_MIN_ADDRESS; |
189 | break; |
190 | } |
191 | |
192 | case x86_THREAD_STATE64: |
193 | { |
194 | x86_thread_state64_t *state25; |
195 | |
196 | state25 = (x86_thread_state64_t *) tstate; |
197 | *entry_point = state25->rip ? state25->rip: VM_MIN_ADDRESS64; |
198 | break; |
199 | } |
200 | } |
201 | return (KERN_SUCCESS); |
202 | } |
203 | |
204 | /* |
205 | * FIXME - thread_set_child |
206 | */ |
207 | |
208 | void thread_set_child(thread_t child, int pid); |
209 | void |
210 | thread_set_child(thread_t child, int pid) |
211 | { |
212 | pal_register_cache_state(child, DIRTY); |
213 | |
214 | if (thread_is_64bit_addr(child)) { |
215 | x86_saved_state64_t *iss64; |
216 | |
217 | iss64 = USER_REGS64(child); |
218 | |
219 | iss64->rax = pid; |
220 | iss64->rdx = 1; |
221 | iss64->isf.rflags &= ~EFL_CF; |
222 | } else { |
223 | x86_saved_state32_t *iss32; |
224 | |
225 | iss32 = USER_REGS32(child); |
226 | |
227 | iss32->eax = pid; |
228 | iss32->edx = 1; |
229 | iss32->efl &= ~EFL_CF; |
230 | } |
231 | } |
232 | |
233 | |
234 | |
235 | /* |
236 | * System Call handling code |
237 | */ |
238 | |
239 | extern long fuword(vm_offset_t); |
240 | |
241 | __attribute__((noreturn)) |
242 | void |
243 | machdep_syscall(x86_saved_state_t *state) |
244 | { |
245 | int args[machdep_call_count]; |
246 | int trapno; |
247 | int nargs; |
248 | const machdep_call_t *entry; |
249 | x86_saved_state32_t *regs; |
250 | |
251 | assert(is_saved_state32(state)); |
252 | regs = saved_state32(state); |
253 | |
254 | trapno = regs->eax; |
255 | #if DEBUG_TRACE |
256 | kprintf("machdep_syscall(0x%08x) code=%d\n" , regs, trapno); |
257 | #endif |
258 | |
259 | DEBUG_KPRINT_SYSCALL_MDEP( |
260 | "machdep_syscall: trapno=%d\n" , trapno); |
261 | |
262 | if (trapno < 0 || trapno >= machdep_call_count) { |
263 | regs->eax = (unsigned int)kern_invalid(NULL); |
264 | |
265 | thread_exception_return(); |
266 | /* NOTREACHED */ |
267 | } |
268 | entry = &machdep_call_table[trapno]; |
269 | nargs = entry->nargs; |
270 | |
271 | if (nargs != 0) { |
272 | if (copyin((user_addr_t) regs->uesp + sizeof (int), |
273 | (char *) args, (nargs * sizeof (int)))) { |
274 | regs->eax = KERN_INVALID_ADDRESS; |
275 | |
276 | thread_exception_return(); |
277 | /* NOTREACHED */ |
278 | } |
279 | } |
280 | switch (nargs) { |
281 | case 0: |
282 | regs->eax = (*entry->routine.args_0)(); |
283 | break; |
284 | case 1: |
285 | regs->eax = (*entry->routine.args_1)(args[0]); |
286 | break; |
287 | case 2: |
288 | regs->eax = (*entry->routine.args_2)(args[0],args[1]); |
289 | break; |
290 | case 3: |
291 | if (!entry->bsd_style) |
292 | regs->eax = (*entry->routine.args_3)(args[0],args[1],args[2]); |
293 | else { |
294 | int error; |
295 | uint32_t rval; |
296 | |
297 | error = (*entry->routine.args_bsd_3)(&rval, args[0], args[1], args[2]); |
298 | if (error) { |
299 | regs->eax = error; |
300 | regs->efl |= EFL_CF; /* carry bit */ |
301 | } else { |
302 | regs->eax = rval; |
303 | regs->efl &= ~EFL_CF; |
304 | } |
305 | } |
306 | break; |
307 | case 4: |
308 | regs->eax = (*entry->routine.args_4)(args[0], args[1], args[2], args[3]); |
309 | break; |
310 | |
311 | default: |
312 | panic("machdep_syscall: too many args" ); |
313 | } |
314 | |
315 | DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%u\n" , regs->eax); |
316 | |
317 | #if DEBUG || DEVELOPMENT |
318 | kern_allocation_name_t |
319 | prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name; |
320 | assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared" , kern_allocation_get_name(prior)); |
321 | #endif /* DEBUG || DEVELOPMENT */ |
322 | |
323 | throttle_lowpri_io(1); |
324 | |
325 | thread_exception_return(); |
326 | /* NOTREACHED */ |
327 | } |
328 | |
329 | __attribute__((noreturn)) |
330 | void |
331 | machdep_syscall64(x86_saved_state_t *state) |
332 | { |
333 | int trapno; |
334 | const machdep_call_t *entry; |
335 | x86_saved_state64_t *regs; |
336 | |
337 | assert(is_saved_state64(state)); |
338 | regs = saved_state64(state); |
339 | |
340 | trapno = (int)(regs->rax & SYSCALL_NUMBER_MASK); |
341 | |
342 | DEBUG_KPRINT_SYSCALL_MDEP( |
343 | "machdep_syscall64: trapno=%d\n" , trapno); |
344 | |
345 | if (trapno < 0 || trapno >= machdep_call_count) { |
346 | regs->rax = (unsigned int)kern_invalid(NULL); |
347 | |
348 | thread_exception_return(); |
349 | /* NOTREACHED */ |
350 | } |
351 | entry = &machdep_call_table64[trapno]; |
352 | |
353 | switch (entry->nargs) { |
354 | case 0: |
355 | regs->rax = (*entry->routine.args_0)(); |
356 | break; |
357 | case 1: |
358 | regs->rax = (*entry->routine.args64_1)(regs->rdi); |
359 | break; |
360 | case 2: |
361 | regs->rax = (*entry->routine.args64_2)(regs->rdi, regs->rsi); |
362 | break; |
363 | default: |
364 | panic("machdep_syscall64: too many args" ); |
365 | } |
366 | |
367 | DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%llu\n" , regs->rax); |
368 | |
369 | #if DEBUG || DEVELOPMENT |
370 | kern_allocation_name_t |
371 | prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name; |
372 | assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared" , kern_allocation_get_name(prior)); |
373 | #endif /* DEBUG || DEVELOPMENT */ |
374 | |
375 | throttle_lowpri_io(1); |
376 | |
377 | thread_exception_return(); |
378 | /* NOTREACHED */ |
379 | } |
380 | |
381 | #endif /* MACH_BSD */ |
382 | |
383 | |
384 | typedef kern_return_t (*mach_call_t)(void *); |
385 | |
386 | struct mach_call_args { |
387 | syscall_arg_t arg1; |
388 | syscall_arg_t arg2; |
389 | syscall_arg_t arg3; |
390 | syscall_arg_t arg4; |
391 | syscall_arg_t arg5; |
392 | syscall_arg_t arg6; |
393 | syscall_arg_t arg7; |
394 | syscall_arg_t arg8; |
395 | syscall_arg_t arg9; |
396 | }; |
397 | |
398 | static kern_return_t |
399 | mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp); |
400 | |
401 | |
402 | static kern_return_t |
403 | mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp) |
404 | { |
405 | if (copyin((user_addr_t)(sp + sizeof(int)), (char *)args, trapp->mach_trap_u32_words * sizeof (int))) |
406 | return KERN_INVALID_ARGUMENT; |
407 | #if CONFIG_REQUIRES_U32_MUNGING |
408 | trapp->mach_trap_arg_munge32(args); |
409 | #else |
410 | #error U32 mach traps on x86_64 kernel requires munging |
411 | #endif |
412 | return KERN_SUCCESS; |
413 | } |
414 | |
415 | |
416 | __private_extern__ void mach_call_munger(x86_saved_state_t *state); |
417 | |
418 | extern const char *mach_syscall_name_table[]; |
419 | |
420 | __attribute__((noreturn)) |
421 | void |
422 | mach_call_munger(x86_saved_state_t *state) |
423 | { |
424 | int argc; |
425 | int call_number; |
426 | mach_call_t mach_call; |
427 | kern_return_t retval; |
428 | struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; |
429 | x86_saved_state32_t *regs; |
430 | |
431 | struct uthread *ut = get_bsdthread_info(current_thread()); |
432 | uthread_reset_proc_refcount(ut); |
433 | |
434 | assert(is_saved_state32(state)); |
435 | regs = saved_state32(state); |
436 | |
437 | call_number = -(regs->eax); |
438 | |
439 | DEBUG_KPRINT_SYSCALL_MACH( |
440 | "mach_call_munger: code=%d(%s)\n" , |
441 | call_number, mach_syscall_name_table[call_number]); |
442 | #if DEBUG_TRACE |
443 | kprintf("mach_call_munger(0x%08x) code=%d\n" , regs, call_number); |
444 | #endif |
445 | |
446 | if (call_number < 0 || call_number >= mach_trap_count) { |
447 | i386_exception(EXC_SYSCALL, call_number, 1); |
448 | /* NOTREACHED */ |
449 | } |
450 | mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function; |
451 | |
452 | if (mach_call == (mach_call_t)kern_invalid) { |
453 | DEBUG_KPRINT_SYSCALL_MACH( |
454 | "mach_call_munger: kern_invalid 0x%x\n" , regs->eax); |
455 | i386_exception(EXC_SYSCALL, call_number, 1); |
456 | /* NOTREACHED */ |
457 | } |
458 | |
459 | argc = mach_trap_table[call_number].mach_trap_arg_count; |
460 | if (argc) { |
461 | retval = mach_call_arg_munger32(regs->uesp, &args, &mach_trap_table[call_number]); |
462 | if (retval != KERN_SUCCESS) { |
463 | regs->eax = retval; |
464 | |
465 | DEBUG_KPRINT_SYSCALL_MACH( |
466 | "mach_call_munger: retval=0x%x\n" , retval); |
467 | |
468 | thread_exception_return(); |
469 | /* NOTREACHED */ |
470 | } |
471 | } |
472 | |
473 | #ifdef MACH_BSD |
474 | mach_kauth_cred_uthread_update(); |
475 | #endif |
476 | |
477 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
478 | MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START, |
479 | args.arg1, args.arg2, args.arg3, args.arg4, 0); |
480 | |
481 | retval = mach_call(&args); |
482 | |
483 | DEBUG_KPRINT_SYSCALL_MACH("mach_call_munger: retval=0x%x\n" , retval); |
484 | |
485 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
486 | MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END, |
487 | retval, 0, 0, 0, 0); |
488 | |
489 | regs->eax = retval; |
490 | |
491 | #if DEBUG || DEVELOPMENT |
492 | kern_allocation_name_t |
493 | prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name; |
494 | assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared" , kern_allocation_get_name(prior)); |
495 | #endif /* DEBUG || DEVELOPMENT */ |
496 | |
497 | throttle_lowpri_io(1); |
498 | |
499 | #if PROC_REF_DEBUG |
500 | if (__improbable(uthread_get_proc_refcount(ut) != 0)) { |
501 | panic("system call returned with uu_proc_refcount != 0" ); |
502 | } |
503 | #endif |
504 | |
505 | thread_exception_return(); |
506 | /* NOTREACHED */ |
507 | } |
508 | |
509 | |
510 | __private_extern__ void mach_call_munger64(x86_saved_state_t *regs); |
511 | |
512 | __attribute__((noreturn)) |
513 | void |
514 | mach_call_munger64(x86_saved_state_t *state) |
515 | { |
516 | int call_number; |
517 | int argc; |
518 | mach_call_t mach_call; |
519 | struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; |
520 | x86_saved_state64_t *regs; |
521 | |
522 | struct uthread *ut = get_bsdthread_info(current_thread()); |
523 | uthread_reset_proc_refcount(ut); |
524 | |
525 | assert(is_saved_state64(state)); |
526 | regs = saved_state64(state); |
527 | |
528 | call_number = (int)(regs->rax & SYSCALL_NUMBER_MASK); |
529 | |
530 | DEBUG_KPRINT_SYSCALL_MACH( |
531 | "mach_call_munger64: code=%d(%s)\n" , |
532 | call_number, mach_syscall_name_table[call_number]); |
533 | |
534 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
535 | MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_START, |
536 | regs->rdi, regs->rsi, regs->rdx, regs->r10, 0); |
537 | |
538 | if (call_number < 0 || call_number >= mach_trap_count) { |
539 | i386_exception(EXC_SYSCALL, regs->rax, 1); |
540 | /* NOTREACHED */ |
541 | } |
542 | mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function; |
543 | |
544 | if (mach_call == (mach_call_t)kern_invalid) { |
545 | i386_exception(EXC_SYSCALL, regs->rax, 1); |
546 | /* NOTREACHED */ |
547 | } |
548 | argc = mach_trap_table[call_number].mach_trap_arg_count; |
549 | if (argc) { |
550 | int args_in_regs = MIN(6, argc); |
551 | |
552 | memcpy(&args.arg1, ®s->rdi, args_in_regs * sizeof(syscall_arg_t)); |
553 | |
554 | if (argc > 6) { |
555 | int copyin_count; |
556 | |
557 | assert(argc <= 9); |
558 | copyin_count = (argc - 6) * (int)sizeof(syscall_arg_t); |
559 | |
560 | if (copyin((user_addr_t)(regs->isf.rsp + sizeof(user_addr_t)), (char *)&args.arg7, copyin_count)) { |
561 | regs->rax = KERN_INVALID_ARGUMENT; |
562 | |
563 | thread_exception_return(); |
564 | /* NOTREACHED */ |
565 | } |
566 | } |
567 | } |
568 | |
569 | #ifdef MACH_BSD |
570 | mach_kauth_cred_uthread_update(); |
571 | #endif |
572 | |
573 | regs->rax = (uint64_t)mach_call((void *)&args); |
574 | |
575 | DEBUG_KPRINT_SYSCALL_MACH( "mach_call_munger64: retval=0x%llx\n" , regs->rax); |
576 | |
577 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
578 | MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END, |
579 | regs->rax, 0, 0, 0, 0); |
580 | |
581 | #if DEBUG || DEVELOPMENT |
582 | kern_allocation_name_t |
583 | prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name; |
584 | assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared" , kern_allocation_get_name(prior)); |
585 | #endif /* DEBUG || DEVELOPMENT */ |
586 | |
587 | throttle_lowpri_io(1); |
588 | |
589 | #if PROC_REF_DEBUG |
590 | if (__improbable(uthread_get_proc_refcount(ut) != 0)) { |
591 | panic("system call returned with uu_proc_refcount != 0" ); |
592 | } |
593 | #endif |
594 | |
595 | thread_exception_return(); |
596 | /* NOTREACHED */ |
597 | } |
598 | |
599 | |
600 | /* |
601 | * thread_setuserstack: |
602 | * |
603 | * Sets the user stack pointer into the machine |
604 | * dependent thread state info. |
605 | */ |
606 | void |
607 | thread_setuserstack( |
608 | thread_t thread, |
609 | mach_vm_address_t user_stack) |
610 | { |
611 | pal_register_cache_state(thread, DIRTY); |
612 | if (thread_is_64bit_addr(thread)) { |
613 | x86_saved_state64_t *iss64; |
614 | |
615 | iss64 = USER_REGS64(thread); |
616 | |
617 | iss64->isf.rsp = (uint64_t)user_stack; |
618 | } else { |
619 | x86_saved_state32_t *iss32; |
620 | |
621 | iss32 = USER_REGS32(thread); |
622 | |
623 | iss32->uesp = CAST_DOWN_EXPLICIT(unsigned int, user_stack); |
624 | } |
625 | } |
626 | |
627 | /* |
628 | * thread_adjuserstack: |
629 | * |
630 | * Returns the adjusted user stack pointer from the machine |
631 | * dependent thread state info. Used for small (<2G) deltas. |
632 | */ |
633 | uint64_t |
634 | thread_adjuserstack( |
635 | thread_t thread, |
636 | int adjust) |
637 | { |
638 | pal_register_cache_state(thread, DIRTY); |
639 | if (thread_is_64bit_addr(thread)) { |
640 | x86_saved_state64_t *iss64; |
641 | |
642 | iss64 = USER_REGS64(thread); |
643 | |
644 | iss64->isf.rsp += adjust; |
645 | |
646 | return iss64->isf.rsp; |
647 | } else { |
648 | x86_saved_state32_t *iss32; |
649 | |
650 | iss32 = USER_REGS32(thread); |
651 | |
652 | iss32->uesp += adjust; |
653 | |
654 | return CAST_USER_ADDR_T(iss32->uesp); |
655 | } |
656 | } |
657 | |
658 | /* |
659 | * thread_setentrypoint: |
660 | * |
661 | * Sets the user PC into the machine |
662 | * dependent thread state info. |
663 | */ |
664 | void |
665 | thread_setentrypoint(thread_t thread, mach_vm_address_t entry) |
666 | { |
667 | pal_register_cache_state(thread, DIRTY); |
668 | if (thread_is_64bit_addr(thread)) { |
669 | x86_saved_state64_t *iss64; |
670 | |
671 | iss64 = USER_REGS64(thread); |
672 | |
673 | iss64->isf.rip = (uint64_t)entry; |
674 | } else { |
675 | x86_saved_state32_t *iss32; |
676 | |
677 | iss32 = USER_REGS32(thread); |
678 | |
679 | iss32->eip = CAST_DOWN_EXPLICIT(unsigned int, entry); |
680 | } |
681 | } |
682 | |
683 | |
684 | kern_return_t |
685 | thread_setsinglestep(thread_t thread, int on) |
686 | { |
687 | pal_register_cache_state(thread, DIRTY); |
688 | if (thread_is_64bit_addr(thread)) { |
689 | x86_saved_state64_t *iss64; |
690 | |
691 | iss64 = USER_REGS64(thread); |
692 | |
693 | if (on) |
694 | iss64->isf.rflags |= EFL_TF; |
695 | else |
696 | iss64->isf.rflags &= ~EFL_TF; |
697 | } else { |
698 | x86_saved_state32_t *iss32; |
699 | |
700 | iss32 = USER_REGS32(thread); |
701 | |
702 | if (on) { |
703 | iss32->efl |= EFL_TF; |
704 | /* Ensure IRET */ |
705 | if (iss32->cs == SYSENTER_CS) |
706 | iss32->cs = SYSENTER_TF_CS; |
707 | } |
708 | else |
709 | iss32->efl &= ~EFL_TF; |
710 | } |
711 | |
712 | return (KERN_SUCCESS); |
713 | } |
714 | |
715 | void * |
716 | get_user_regs(thread_t th) |
717 | { |
718 | pal_register_cache_state(th, DIRTY); |
719 | return(USER_STATE(th)); |
720 | } |
721 | |
722 | void * |
723 | find_user_regs(thread_t thread) |
724 | { |
725 | return get_user_regs(thread); |
726 | } |
727 | |
728 | #if CONFIG_DTRACE |
729 | /* |
730 | * DTrace would like to have a peek at the kernel interrupt state, if available. |
731 | */ |
732 | x86_saved_state_t *find_kern_regs(thread_t); |
733 | |
734 | x86_saved_state_t * |
735 | find_kern_regs(thread_t thread) |
736 | { |
737 | if (thread == current_thread() && |
738 | NULL != current_cpu_datap()->cpu_int_state && |
739 | !(USER_STATE(thread) == current_cpu_datap()->cpu_int_state && |
740 | current_cpu_datap()->cpu_interrupt_level == 1)) { |
741 | |
742 | return current_cpu_datap()->cpu_int_state; |
743 | } else { |
744 | return NULL; |
745 | } |
746 | } |
747 | |
748 | vm_offset_t dtrace_get_cpu_int_stack_top(void); |
749 | |
750 | vm_offset_t |
751 | dtrace_get_cpu_int_stack_top(void) |
752 | { |
753 | return current_cpu_datap()->cpu_int_stack_top; |
754 | } |
755 | #endif |
756 | |