1/*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57#include <mach_debug.h>
58#include <mach_ldebug.h>
59
60#include <sys/kdebug.h>
61
62#include <mach/kern_return.h>
63#include <mach/thread_status.h>
64#include <mach/vm_param.h>
65
66#include <kern/counters.h>
67#include <kern/kalloc.h>
68#include <kern/mach_param.h>
69#include <kern/processor.h>
70#include <kern/cpu_data.h>
71#include <kern/cpu_number.h>
72#include <kern/task.h>
73#include <kern/thread.h>
74#include <kern/sched_prim.h>
75#include <kern/misc_protos.h>
76#include <kern/assert.h>
77#include <kern/spl.h>
78#include <kern/machine.h>
79#include <kern/kpc.h>
80#include <ipc/ipc_port.h>
81#include <vm/vm_kern.h>
82#include <vm/vm_map.h>
83#include <vm/pmap.h>
84#include <vm/vm_protos.h>
85
86#include <i386/cpu_data.h>
87#include <i386/cpu_number.h>
88#include <i386/eflags.h>
89#include <i386/proc_reg.h>
90#include <i386/fpu.h>
91#include <i386/misc_protos.h>
92#include <i386/mp_desc.h>
93#include <i386/thread.h>
94#include <i386/machine_routines.h>
95#include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
96
97#if HYPERVISOR
98#include <kern/hv_support.h>
99#endif
100
101/*
102 * Maps state flavor to number of words in the state:
103 */
104unsigned int _MachineStateCount[] = {
105 [x86_THREAD_STATE32] = x86_THREAD_STATE32_COUNT,
106 [x86_THREAD_STATE64] = x86_THREAD_STATE64_COUNT,
107 [x86_THREAD_STATE] = x86_THREAD_STATE_COUNT,
108 [x86_FLOAT_STATE32] = x86_FLOAT_STATE32_COUNT,
109 [x86_FLOAT_STATE64] = x86_FLOAT_STATE64_COUNT,
110 [x86_FLOAT_STATE] = x86_FLOAT_STATE_COUNT,
111 [x86_EXCEPTION_STATE32] = x86_EXCEPTION_STATE32_COUNT,
112 [x86_EXCEPTION_STATE64] = x86_EXCEPTION_STATE64_COUNT,
113 [x86_EXCEPTION_STATE] = x86_EXCEPTION_STATE_COUNT,
114 [x86_DEBUG_STATE32] = x86_DEBUG_STATE32_COUNT,
115 [x86_DEBUG_STATE64] = x86_DEBUG_STATE64_COUNT,
116 [x86_DEBUG_STATE] = x86_DEBUG_STATE_COUNT,
117 [x86_AVX_STATE32] = x86_AVX_STATE32_COUNT,
118 [x86_AVX_STATE64] = x86_AVX_STATE64_COUNT,
119 [x86_AVX_STATE] = x86_AVX_STATE_COUNT,
120#if !defined(RC_HIDE_XNU_J137)
121 [x86_AVX512_STATE32] = x86_AVX512_STATE32_COUNT,
122 [x86_AVX512_STATE64] = x86_AVX512_STATE64_COUNT,
123 [x86_AVX512_STATE] = x86_AVX512_STATE_COUNT,
124#endif /* not RC_HIDE_XNU_J137 */
125};
126
127zone_t iss_zone; /* zone for saved_state area */
128zone_t ids_zone; /* zone for debug_state area */
129
130/* Forward */
131
132extern void Thread_continue(void);
133extern void Load_context(
134 thread_t thread) __attribute__((noreturn));
135
136static void
137get_exception_state32(thread_t thread, x86_exception_state32_t *es);
138
139static void
140get_exception_state64(thread_t thread, x86_exception_state64_t *es);
141
142static void
143get_thread_state32(thread_t thread, x86_thread_state32_t *ts);
144
145static void
146get_thread_state64(thread_t thread, x86_thread_state64_t *ts);
147
148static int
149set_thread_state32(thread_t thread, x86_thread_state32_t *ts);
150
151static int
152set_thread_state64(thread_t thread, x86_thread_state64_t *ts);
153
154#if HYPERVISOR
155static inline void
156ml_hv_cswitch(thread_t old, thread_t new)
157{
158 if (old->hv_thread_target)
159 hv_callbacks.preempt(old->hv_thread_target);
160
161 if (new->hv_thread_target)
162 hv_callbacks.dispatch(new->hv_thread_target);
163}
164#endif
165
166/*
167 * Don't let an illegal value for the lower 32-bits of dr7 get set.
168 * Specifically, check for undefined settings. Setting these bit patterns
169 * result in undefined behaviour and can lead to an unexpected
170 * TRCTRAP.
171 */
172static boolean_t
173dr7d_is_valid(uint32_t *dr7d)
174{
175 int i;
176 uint32_t mask1, mask2;
177
178 /*
179 * If the DE bit is set in CR4, R/W0-3 can be pattern
180 * "10B" to indicate i/o reads and write
181 */
182 if (!(get_cr4() & CR4_DE))
183 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 4;
184 i++, mask1 <<= 4, mask2 <<= 4)
185 if ((*dr7d & mask1) == mask2)
186 return (FALSE);
187
188 /*
189 * if we are doing an instruction execution break (indicated
190 * by r/w[x] being "00B"), then the len[x] must also be set
191 * to "00B"
192 */
193 for (i = 0; i < 4; i++)
194 if (((((*dr7d >> (16 + i*4))) & 0x3) == 0) &&
195 ((((*dr7d >> (18 + i*4))) & 0x3) != 0))
196 return (FALSE);
197
198 /*
199 * Intel docs have these bits fixed.
200 */
201 *dr7d |= 0x1 << 10; /* set bit 10 to 1 */
202 *dr7d &= ~(0x1 << 11); /* set bit 11 to 0 */
203 *dr7d &= ~(0x1 << 12); /* set bit 12 to 0 */
204 *dr7d &= ~(0x1 << 14); /* set bit 14 to 0 */
205 *dr7d &= ~(0x1 << 15); /* set bit 15 to 0 */
206
207 /*
208 * We don't allow anything to set the global breakpoints.
209 */
210
211 if (*dr7d & 0x2)
212 return (FALSE);
213
214 if (*dr7d & (0x2<<2))
215 return (FALSE);
216
217 if (*dr7d & (0x2<<4))
218 return (FALSE);
219
220 if (*dr7d & (0x2<<6))
221 return (FALSE);
222
223 return (TRUE);
224}
225
226extern void set_64bit_debug_regs(x86_debug_state64_t *ds);
227
228boolean_t
229debug_state_is_valid32(x86_debug_state32_t *ds)
230{
231 if (!dr7d_is_valid(&ds->dr7))
232 return FALSE;
233
234 return TRUE;
235}
236
237boolean_t
238debug_state_is_valid64(x86_debug_state64_t *ds)
239{
240 if (!dr7d_is_valid((uint32_t *)&ds->dr7))
241 return FALSE;
242
243 /*
244 * Don't allow the user to set debug addresses above their max
245 * value
246 */
247 if (ds->dr7 & 0x1)
248 if (ds->dr0 >= VM_MAX_PAGE_ADDRESS)
249 return FALSE;
250
251 if (ds->dr7 & (0x1<<2))
252 if (ds->dr1 >= VM_MAX_PAGE_ADDRESS)
253 return FALSE;
254
255 if (ds->dr7 & (0x1<<4))
256 if (ds->dr2 >= VM_MAX_PAGE_ADDRESS)
257 return FALSE;
258
259 if (ds->dr7 & (0x1<<6))
260 if (ds->dr3 >= VM_MAX_PAGE_ADDRESS)
261 return FALSE;
262
263 /* For x86-64, we must ensure the upper 32-bits of DR7 are clear */
264 ds->dr7 &= 0xffffffffULL;
265
266 return TRUE;
267}
268
269
270static kern_return_t
271set_debug_state32(thread_t thread, x86_debug_state32_t *ds)
272{
273 x86_debug_state32_t *new_ids;
274 pcb_t pcb;
275
276 pcb = THREAD_TO_PCB(thread);
277
278 if (debug_state_is_valid32(ds) != TRUE) {
279 return KERN_INVALID_ARGUMENT;
280 }
281
282 if (pcb->ids == NULL) {
283 new_ids = zalloc(ids_zone);
284 bzero(new_ids, sizeof *new_ids);
285
286 simple_lock(&pcb->lock);
287 /* make sure it wasn't already alloc()'d elsewhere */
288 if (pcb->ids == NULL) {
289 pcb->ids = new_ids;
290 simple_unlock(&pcb->lock);
291 } else {
292 simple_unlock(&pcb->lock);
293 zfree(ids_zone, new_ids);
294 }
295 }
296
297
298 copy_debug_state32(ds, pcb->ids, FALSE);
299
300 return (KERN_SUCCESS);
301}
302
303static kern_return_t
304set_debug_state64(thread_t thread, x86_debug_state64_t *ds)
305{
306 x86_debug_state64_t *new_ids;
307 pcb_t pcb;
308
309 pcb = THREAD_TO_PCB(thread);
310
311 if (debug_state_is_valid64(ds) != TRUE) {
312 return KERN_INVALID_ARGUMENT;
313 }
314
315 if (pcb->ids == NULL) {
316 new_ids = zalloc(ids_zone);
317 bzero(new_ids, sizeof *new_ids);
318
319#if HYPERVISOR
320 if (thread->hv_thread_target) {
321 hv_callbacks.volatile_state(thread->hv_thread_target,
322 HV_DEBUG_STATE);
323 }
324#endif
325
326 simple_lock(&pcb->lock);
327 /* make sure it wasn't already alloc()'d elsewhere */
328 if (pcb->ids == NULL) {
329 pcb->ids = new_ids;
330 simple_unlock(&pcb->lock);
331 } else {
332 simple_unlock(&pcb->lock);
333 zfree(ids_zone, new_ids);
334 }
335 }
336
337 copy_debug_state64(ds, pcb->ids, FALSE);
338
339 return (KERN_SUCCESS);
340}
341
342static void
343get_debug_state32(thread_t thread, x86_debug_state32_t *ds)
344{
345 x86_debug_state32_t *saved_state;
346
347 saved_state = thread->machine.ids;
348
349 if (saved_state) {
350 copy_debug_state32(saved_state, ds, TRUE);
351 } else
352 bzero(ds, sizeof *ds);
353}
354
355static void
356get_debug_state64(thread_t thread, x86_debug_state64_t *ds)
357{
358 x86_debug_state64_t *saved_state;
359
360 saved_state = (x86_debug_state64_t *)thread->machine.ids;
361
362 if (saved_state) {
363 copy_debug_state64(saved_state, ds, TRUE);
364 } else
365 bzero(ds, sizeof *ds);
366}
367
368/*
369 * consider_machine_collect:
370 *
371 * Try to collect machine-dependent pages
372 */
373void
374consider_machine_collect(void)
375{
376}
377
378void
379consider_machine_adjust(void)
380{
381}
382
383/*
384 * Switch to the first thread on a CPU.
385 */
386void
387machine_load_context(
388 thread_t new)
389{
390 new->machine.specFlags |= OnProc;
391 act_machine_switch_pcb(NULL, new);
392 Load_context(new);
393}
394
395static inline void pmap_switch_context(thread_t ot, thread_t nt, int cnum) {
396 pmap_assert(ml_get_interrupts_enabled() == FALSE);
397 vm_map_t nmap = nt->map, omap = ot->map;
398 if ((omap != nmap) || (nmap->pmap->pagezero_accessible)) {
399 PMAP_DEACTIVATE_MAP(omap, ot, cnum);
400 PMAP_ACTIVATE_MAP(nmap, nt, cnum);
401 }
402}
403
404/*
405 * Switch to a new thread.
406 * Save the old thread`s kernel state or continuation,
407 * and return it.
408 */
409thread_t
410machine_switch_context(
411 thread_t old,
412 thread_continue_t continuation,
413 thread_t new)
414{
415 assert(current_cpu_datap()->cpu_active_stack == old->kernel_stack);
416
417#if KPC
418 kpc_off_cpu(old);
419#endif /* KPC */
420
421 /*
422 * Save FP registers if in use.
423 */
424 fpu_switch_context(old, new);
425
426 old->machine.specFlags &= ~OnProc;
427 new->machine.specFlags |= OnProc;
428
429 /*
430 * Monitor the stack depth and report new max,
431 * not worrying about races.
432 */
433 vm_offset_t depth = current_stack_depth();
434 if (depth > kernel_stack_depth_max) {
435 kernel_stack_depth_max = depth;
436 KERNEL_DEBUG_CONSTANT(
437 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH),
438 (long) depth, 0, 0, 0, 0);
439 }
440
441 /*
442 * Switch address maps if need be, even if not switching tasks.
443 * (A server activation may be "borrowing" a client map.)
444 */
445 pmap_switch_context(old, new, cpu_number());
446
447 /*
448 * Load the rest of the user state for the new thread
449 */
450 act_machine_switch_pcb(old, new);
451
452#if HYPERVISOR
453 ml_hv_cswitch(old, new);
454#endif
455
456 return(Switch_context(old, continuation, new));
457}
458
459thread_t
460machine_processor_shutdown(
461 thread_t thread,
462 void (*doshutdown)(processor_t),
463 processor_t processor)
464{
465#if CONFIG_VMX
466 vmx_suspend();
467#endif
468 fpu_switch_context(thread, NULL);
469 pmap_switch_context(thread, processor->idle_thread, cpu_number());
470 return(Shutdown_context(thread, doshutdown, processor));
471}
472
473
474/*
475 * This is where registers that are not normally specified by the mach-o
476 * file on an execve would be nullified, perhaps to avoid a covert channel.
477 */
478kern_return_t
479machine_thread_state_initialize(
480 thread_t thread)
481{
482 /*
483 * If there's an fpu save area, free it.
484 * The initialized state will then be lazily faulted-in, if required.
485 * And if we're target, re-arm the no-fpu trap.
486 */
487 if (thread->machine.ifps) {
488 (void) fpu_set_fxstate(thread, NULL, x86_FLOAT_STATE64);
489
490 if (thread == current_thread())
491 clear_fpu();
492 }
493
494 if (thread->machine.ids) {
495 zfree(ids_zone, thread->machine.ids);
496 thread->machine.ids = NULL;
497 }
498
499 return KERN_SUCCESS;
500}
501
502uint32_t
503get_eflags_exportmask(void)
504{
505 return EFL_USER_SET;
506}
507
508/*
509 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
510 * for 32bit tasks only
511 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
512 * for 64bit tasks only
513 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
514 * for 32bit tasks only
515 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
516 * for 64bit tasks only
517 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
518 * for either 32bit or 64bit tasks
519 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
520 * for 32bit tasks only
521 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
522 * for 64bit tasks only
523 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
524 * for either 32bit or 64bit tasks
525 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
526 * for 32bit tasks only
527 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
528 * for 64bit tasks only
529 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
530 * for either 32bit or 64bit tasks
531 */
532
533
534static void
535get_exception_state64(thread_t thread, x86_exception_state64_t *es)
536{
537 x86_saved_state64_t *saved_state;
538
539 saved_state = USER_REGS64(thread);
540
541 es->trapno = saved_state->isf.trapno;
542 es->cpu = saved_state->isf.cpu;
543 es->err = (typeof(es->err))saved_state->isf.err;
544 es->faultvaddr = saved_state->cr2;
545}
546
547static void
548get_exception_state32(thread_t thread, x86_exception_state32_t *es)
549{
550 x86_saved_state32_t *saved_state;
551
552 saved_state = USER_REGS32(thread);
553
554 es->trapno = saved_state->trapno;
555 es->cpu = saved_state->cpu;
556 es->err = saved_state->err;
557 es->faultvaddr = saved_state->cr2;
558}
559
560
561static int
562set_thread_state32(thread_t thread, x86_thread_state32_t *ts)
563{
564 x86_saved_state32_t *saved_state;
565
566 pal_register_cache_state(thread, DIRTY);
567
568 saved_state = USER_REGS32(thread);
569
570 /*
571 * Scrub segment selector values:
572 */
573 ts->cs = USER_CS;
574 /*
575 * On a 64 bit kernel, we always override the data segments,
576 * as the actual selector numbers have changed. This also
577 * means that we don't support setting the data segments
578 * manually any more.
579 */
580 ts->ss = USER_DS;
581 ts->ds = USER_DS;
582 ts->es = USER_DS;
583
584 /* Set GS to CTHREAD only if's been established */
585 ts->gs = thread->machine.cthread_self ? USER_CTHREAD : NULL_SEG;
586
587 /* Check segment selectors are safe */
588 if (!valid_user_segment_selectors(ts->cs,
589 ts->ss,
590 ts->ds,
591 ts->es,
592 ts->fs,
593 ts->gs))
594 return(KERN_INVALID_ARGUMENT);
595
596 saved_state->eax = ts->eax;
597 saved_state->ebx = ts->ebx;
598 saved_state->ecx = ts->ecx;
599 saved_state->edx = ts->edx;
600 saved_state->edi = ts->edi;
601 saved_state->esi = ts->esi;
602 saved_state->ebp = ts->ebp;
603 saved_state->uesp = ts->esp;
604 saved_state->efl = (ts->eflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
605 saved_state->eip = ts->eip;
606 saved_state->cs = ts->cs;
607 saved_state->ss = ts->ss;
608 saved_state->ds = ts->ds;
609 saved_state->es = ts->es;
610 saved_state->fs = ts->fs;
611 saved_state->gs = ts->gs;
612
613 /*
614 * If the trace trap bit is being set,
615 * ensure that the user returns via iret
616 * - which is signaled thusly:
617 */
618 if ((saved_state->efl & EFL_TF) && saved_state->cs == SYSENTER_CS)
619 saved_state->cs = SYSENTER_TF_CS;
620
621 return(KERN_SUCCESS);
622}
623
624static int
625set_thread_state64(thread_t thread, x86_thread_state64_t *ts)
626{
627 x86_saved_state64_t *saved_state;
628
629 pal_register_cache_state(thread, DIRTY);
630
631 saved_state = USER_REGS64(thread);
632
633 if (!IS_USERADDR64_CANONICAL(ts->rsp) ||
634 !IS_USERADDR64_CANONICAL(ts->rip))
635 return(KERN_INVALID_ARGUMENT);
636
637 saved_state->r8 = ts->r8;
638 saved_state->r9 = ts->r9;
639 saved_state->r10 = ts->r10;
640 saved_state->r11 = ts->r11;
641 saved_state->r12 = ts->r12;
642 saved_state->r13 = ts->r13;
643 saved_state->r14 = ts->r14;
644 saved_state->r15 = ts->r15;
645 saved_state->rax = ts->rax;
646 saved_state->rbx = ts->rbx;
647 saved_state->rcx = ts->rcx;
648 saved_state->rdx = ts->rdx;
649 saved_state->rdi = ts->rdi;
650 saved_state->rsi = ts->rsi;
651 saved_state->rbp = ts->rbp;
652 saved_state->isf.rsp = ts->rsp;
653 saved_state->isf.rflags = (ts->rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
654 saved_state->isf.rip = ts->rip;
655 saved_state->isf.cs = USER64_CS;
656 saved_state->fs = (uint32_t)ts->fs;
657 saved_state->gs = (uint32_t)ts->gs;
658
659 return(KERN_SUCCESS);
660}
661
662
663
664static void
665get_thread_state32(thread_t thread, x86_thread_state32_t *ts)
666{
667 x86_saved_state32_t *saved_state;
668
669 pal_register_cache_state(thread, VALID);
670
671 saved_state = USER_REGS32(thread);
672
673 ts->eax = saved_state->eax;
674 ts->ebx = saved_state->ebx;
675 ts->ecx = saved_state->ecx;
676 ts->edx = saved_state->edx;
677 ts->edi = saved_state->edi;
678 ts->esi = saved_state->esi;
679 ts->ebp = saved_state->ebp;
680 ts->esp = saved_state->uesp;
681 ts->eflags = saved_state->efl;
682 ts->eip = saved_state->eip;
683 ts->cs = saved_state->cs;
684 ts->ss = saved_state->ss;
685 ts->ds = saved_state->ds;
686 ts->es = saved_state->es;
687 ts->fs = saved_state->fs;
688 ts->gs = saved_state->gs;
689}
690
691
692static void
693get_thread_state64(thread_t thread, x86_thread_state64_t *ts)
694{
695 x86_saved_state64_t *saved_state;
696
697 pal_register_cache_state(thread, VALID);
698
699 saved_state = USER_REGS64(thread);
700
701 ts->r8 = saved_state->r8;
702 ts->r9 = saved_state->r9;
703 ts->r10 = saved_state->r10;
704 ts->r11 = saved_state->r11;
705 ts->r12 = saved_state->r12;
706 ts->r13 = saved_state->r13;
707 ts->r14 = saved_state->r14;
708 ts->r15 = saved_state->r15;
709 ts->rax = saved_state->rax;
710 ts->rbx = saved_state->rbx;
711 ts->rcx = saved_state->rcx;
712 ts->rdx = saved_state->rdx;
713 ts->rdi = saved_state->rdi;
714 ts->rsi = saved_state->rsi;
715 ts->rbp = saved_state->rbp;
716 ts->rsp = saved_state->isf.rsp;
717 ts->rflags = saved_state->isf.rflags;
718 ts->rip = saved_state->isf.rip;
719 ts->cs = saved_state->isf.cs;
720 ts->fs = saved_state->fs;
721 ts->gs = saved_state->gs;
722}
723
724kern_return_t
725machine_thread_state_convert_to_user(
726 __unused thread_t thread,
727 __unused thread_flavor_t flavor,
728 __unused thread_state_t tstate,
729 __unused mach_msg_type_number_t *count)
730{
731 // No conversion to userspace representation on this platform
732 return KERN_SUCCESS;
733}
734
735kern_return_t
736machine_thread_state_convert_from_user(
737 __unused thread_t thread,
738 __unused thread_flavor_t flavor,
739 __unused thread_state_t tstate,
740 __unused mach_msg_type_number_t count)
741{
742 // No conversion from userspace representation on this platform
743 return KERN_SUCCESS;
744}
745
746kern_return_t
747machine_thread_siguctx_pointer_convert_to_user(
748 __unused thread_t thread,
749 __unused user_addr_t *uctxp)
750{
751 // No conversion to userspace representation on this platform
752 return KERN_SUCCESS;
753}
754
755kern_return_t
756machine_thread_function_pointers_convert_from_user(
757 __unused thread_t thread,
758 __unused user_addr_t *fptrs,
759 __unused uint32_t count)
760{
761 // No conversion from userspace representation on this platform
762 return KERN_SUCCESS;
763}
764
765/*
766 * act_machine_set_state:
767 *
768 * Set the status of the specified thread.
769 */
770
771kern_return_t
772machine_thread_set_state(
773 thread_t thr_act,
774 thread_flavor_t flavor,
775 thread_state_t tstate,
776 mach_msg_type_number_t count)
777{
778 switch (flavor) {
779 case x86_SAVED_STATE32:
780 {
781 x86_saved_state32_t *state;
782 x86_saved_state32_t *saved_state;
783
784 if (count < x86_SAVED_STATE32_COUNT)
785 return(KERN_INVALID_ARGUMENT);
786
787 if (thread_is_64bit_addr(thr_act))
788 return(KERN_INVALID_ARGUMENT);
789
790 state = (x86_saved_state32_t *) tstate;
791
792 /* Check segment selectors are safe */
793 if (!valid_user_segment_selectors(state->cs,
794 state->ss,
795 state->ds,
796 state->es,
797 state->fs,
798 state->gs))
799 return KERN_INVALID_ARGUMENT;
800
801 pal_register_cache_state(thr_act, DIRTY);
802
803 saved_state = USER_REGS32(thr_act);
804
805 /*
806 * General registers
807 */
808 saved_state->edi = state->edi;
809 saved_state->esi = state->esi;
810 saved_state->ebp = state->ebp;
811 saved_state->uesp = state->uesp;
812 saved_state->ebx = state->ebx;
813 saved_state->edx = state->edx;
814 saved_state->ecx = state->ecx;
815 saved_state->eax = state->eax;
816 saved_state->eip = state->eip;
817
818 saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET;
819
820 /*
821 * If the trace trap bit is being set,
822 * ensure that the user returns via iret
823 * - which is signaled thusly:
824 */
825 if ((saved_state->efl & EFL_TF) && state->cs == SYSENTER_CS)
826 state->cs = SYSENTER_TF_CS;
827
828 /*
829 * User setting segment registers.
830 * Code and stack selectors have already been
831 * checked. Others will be reset by 'iret'
832 * if they are not valid.
833 */
834 saved_state->cs = state->cs;
835 saved_state->ss = state->ss;
836 saved_state->ds = state->ds;
837 saved_state->es = state->es;
838 saved_state->fs = state->fs;
839 saved_state->gs = state->gs;
840
841 break;
842 }
843
844 case x86_SAVED_STATE64:
845 {
846 x86_saved_state64_t *state;
847 x86_saved_state64_t *saved_state;
848
849 if (count < x86_SAVED_STATE64_COUNT)
850 return(KERN_INVALID_ARGUMENT);
851
852 if (!thread_is_64bit_addr(thr_act))
853 return(KERN_INVALID_ARGUMENT);
854
855 state = (x86_saved_state64_t *) tstate;
856
857 /* Verify that the supplied code segment selector is
858 * valid. In 64-bit mode, the FS and GS segment overrides
859 * use the FS.base and GS.base MSRs to calculate
860 * base addresses, and the trampolines don't directly
861 * restore the segment registers--hence they are no
862 * longer relevant for validation.
863 */
864 if (!valid_user_code_selector(state->isf.cs))
865 return KERN_INVALID_ARGUMENT;
866
867 /* Check pc and stack are canonical addresses */
868 if (!IS_USERADDR64_CANONICAL(state->isf.rsp) ||
869 !IS_USERADDR64_CANONICAL(state->isf.rip))
870 return KERN_INVALID_ARGUMENT;
871
872 pal_register_cache_state(thr_act, DIRTY);
873
874 saved_state = USER_REGS64(thr_act);
875
876 /*
877 * General registers
878 */
879 saved_state->r8 = state->r8;
880 saved_state->r9 = state->r9;
881 saved_state->r10 = state->r10;
882 saved_state->r11 = state->r11;
883 saved_state->r12 = state->r12;
884 saved_state->r13 = state->r13;
885 saved_state->r14 = state->r14;
886 saved_state->r15 = state->r15;
887 saved_state->rdi = state->rdi;
888 saved_state->rsi = state->rsi;
889 saved_state->rbp = state->rbp;
890 saved_state->rbx = state->rbx;
891 saved_state->rdx = state->rdx;
892 saved_state->rcx = state->rcx;
893 saved_state->rax = state->rax;
894 saved_state->isf.rsp = state->isf.rsp;
895 saved_state->isf.rip = state->isf.rip;
896
897 saved_state->isf.rflags = (state->isf.rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
898
899 /*
900 * User setting segment registers.
901 * Code and stack selectors have already been
902 * checked. Others will be reset by 'sys'
903 * if they are not valid.
904 */
905 saved_state->isf.cs = state->isf.cs;
906 saved_state->isf.ss = state->isf.ss;
907 saved_state->fs = state->fs;
908 saved_state->gs = state->gs;
909
910 break;
911 }
912
913 case x86_FLOAT_STATE32:
914 case x86_AVX_STATE32:
915#if !defined(RC_HIDE_XNU_J137)
916 case x86_AVX512_STATE32:
917#endif /* not RC_HIDE_XNU_J137 */
918 {
919 if (count != _MachineStateCount[flavor])
920 return(KERN_INVALID_ARGUMENT);
921
922 if (thread_is_64bit_addr(thr_act))
923 return(KERN_INVALID_ARGUMENT);
924
925 return fpu_set_fxstate(thr_act, tstate, flavor);
926 }
927
928 case x86_FLOAT_STATE64:
929 case x86_AVX_STATE64:
930#if !defined(RC_HIDE_XNU_J137)
931 case x86_AVX512_STATE64:
932#endif /* not RC_HIDE_XNU_J137 */
933 {
934 if (count != _MachineStateCount[flavor])
935 return(KERN_INVALID_ARGUMENT);
936
937 if (!thread_is_64bit_addr(thr_act))
938 return(KERN_INVALID_ARGUMENT);
939
940 return fpu_set_fxstate(thr_act, tstate, flavor);
941 }
942
943 case x86_FLOAT_STATE:
944 {
945 x86_float_state_t *state;
946
947 if (count != x86_FLOAT_STATE_COUNT)
948 return(KERN_INVALID_ARGUMENT);
949
950 state = (x86_float_state_t *)tstate;
951 if (state->fsh.flavor == x86_FLOAT_STATE64 && state->fsh.count == x86_FLOAT_STATE64_COUNT &&
952 thread_is_64bit_addr(thr_act)) {
953 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
954 }
955 if (state->fsh.flavor == x86_FLOAT_STATE32 && state->fsh.count == x86_FLOAT_STATE32_COUNT &&
956 !thread_is_64bit_addr(thr_act)) {
957 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
958 }
959 return(KERN_INVALID_ARGUMENT);
960 }
961
962 case x86_AVX_STATE:
963#if !defined(RC_HIDE_XNU_J137)
964 case x86_AVX512_STATE:
965#endif
966 {
967 x86_avx_state_t *state;
968
969 if (count != _MachineStateCount[flavor])
970 return(KERN_INVALID_ARGUMENT);
971
972 state = (x86_avx_state_t *)tstate;
973 /* Flavors are defined to have sequential values: 32-bit, 64-bit, non-specific */
974 /* 64-bit flavor? */
975 if (state->ash.flavor == (flavor - 1) &&
976 state->ash.count == _MachineStateCount[flavor - 1] &&
977 thread_is_64bit_addr(thr_act)) {
978 return fpu_set_fxstate(thr_act,
979 (thread_state_t)&state->ufs.as64,
980 flavor - 1);
981 }
982 /* 32-bit flavor? */
983 if (state->ash.flavor == (flavor - 2) &&
984 state->ash.count == _MachineStateCount[flavor - 2] &&
985 !thread_is_64bit_addr(thr_act)) {
986 return fpu_set_fxstate(thr_act,
987 (thread_state_t)&state->ufs.as32,
988 flavor - 2);
989 }
990 return(KERN_INVALID_ARGUMENT);
991 }
992
993 case x86_THREAD_STATE32:
994 {
995 if (count != x86_THREAD_STATE32_COUNT)
996 return(KERN_INVALID_ARGUMENT);
997
998 if (thread_is_64bit_addr(thr_act))
999 return(KERN_INVALID_ARGUMENT);
1000
1001 return set_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
1002 }
1003
1004 case x86_THREAD_STATE64:
1005 {
1006 if (count != x86_THREAD_STATE64_COUNT)
1007 return(KERN_INVALID_ARGUMENT);
1008
1009 if (!thread_is_64bit_addr(thr_act))
1010 return(KERN_INVALID_ARGUMENT);
1011
1012 return set_thread_state64(thr_act, (x86_thread_state64_t *)tstate);
1013
1014 }
1015 case x86_THREAD_STATE:
1016 {
1017 x86_thread_state_t *state;
1018
1019 if (count != x86_THREAD_STATE_COUNT)
1020 return(KERN_INVALID_ARGUMENT);
1021
1022 state = (x86_thread_state_t *)tstate;
1023
1024 if (state->tsh.flavor == x86_THREAD_STATE64 &&
1025 state->tsh.count == x86_THREAD_STATE64_COUNT &&
1026 thread_is_64bit_addr(thr_act)) {
1027 return set_thread_state64(thr_act, &state->uts.ts64);
1028 } else if (state->tsh.flavor == x86_THREAD_STATE32 &&
1029 state->tsh.count == x86_THREAD_STATE32_COUNT &&
1030 !thread_is_64bit_addr(thr_act)) {
1031 return set_thread_state32(thr_act, &state->uts.ts32);
1032 } else
1033 return(KERN_INVALID_ARGUMENT);
1034 }
1035 case x86_DEBUG_STATE32:
1036 {
1037 x86_debug_state32_t *state;
1038 kern_return_t ret;
1039
1040 if (thread_is_64bit_addr(thr_act))
1041 return(KERN_INVALID_ARGUMENT);
1042
1043 state = (x86_debug_state32_t *)tstate;
1044
1045 ret = set_debug_state32(thr_act, state);
1046
1047 return ret;
1048 }
1049 case x86_DEBUG_STATE64:
1050 {
1051 x86_debug_state64_t *state;
1052 kern_return_t ret;
1053
1054 if (!thread_is_64bit_addr(thr_act))
1055 return(KERN_INVALID_ARGUMENT);
1056
1057 state = (x86_debug_state64_t *)tstate;
1058
1059 ret = set_debug_state64(thr_act, state);
1060
1061 return ret;
1062 }
1063 case x86_DEBUG_STATE:
1064 {
1065 x86_debug_state_t *state;
1066 kern_return_t ret = KERN_INVALID_ARGUMENT;
1067
1068 if (count != x86_DEBUG_STATE_COUNT)
1069 return (KERN_INVALID_ARGUMENT);
1070
1071 state = (x86_debug_state_t *)tstate;
1072 if (state->dsh.flavor == x86_DEBUG_STATE64 &&
1073 state->dsh.count == x86_DEBUG_STATE64_COUNT &&
1074 thread_is_64bit_addr(thr_act)) {
1075 ret = set_debug_state64(thr_act, &state->uds.ds64);
1076 }
1077 else
1078 if (state->dsh.flavor == x86_DEBUG_STATE32 &&
1079 state->dsh.count == x86_DEBUG_STATE32_COUNT &&
1080 !thread_is_64bit_addr(thr_act)) {
1081 ret = set_debug_state32(thr_act, &state->uds.ds32);
1082 }
1083 return ret;
1084 }
1085 default:
1086 return(KERN_INVALID_ARGUMENT);
1087 }
1088
1089 return(KERN_SUCCESS);
1090}
1091
1092
1093
1094/*
1095 * thread_getstatus:
1096 *
1097 * Get the status of the specified thread.
1098 */
1099
1100kern_return_t
1101machine_thread_get_state(
1102 thread_t thr_act,
1103 thread_flavor_t flavor,
1104 thread_state_t tstate,
1105 mach_msg_type_number_t *count)
1106{
1107
1108 switch (flavor) {
1109
1110 case THREAD_STATE_FLAVOR_LIST:
1111 {
1112 if (*count < 3)
1113 return (KERN_INVALID_ARGUMENT);
1114
1115 tstate[0] = i386_THREAD_STATE;
1116 tstate[1] = i386_FLOAT_STATE;
1117 tstate[2] = i386_EXCEPTION_STATE;
1118
1119 *count = 3;
1120 break;
1121 }
1122
1123 case THREAD_STATE_FLAVOR_LIST_NEW:
1124 {
1125 if (*count < 4)
1126 return (KERN_INVALID_ARGUMENT);
1127
1128 tstate[0] = x86_THREAD_STATE;
1129 tstate[1] = x86_FLOAT_STATE;
1130 tstate[2] = x86_EXCEPTION_STATE;
1131 tstate[3] = x86_DEBUG_STATE;
1132
1133 *count = 4;
1134 break;
1135 }
1136
1137 case THREAD_STATE_FLAVOR_LIST_10_9:
1138 {
1139 if (*count < 5)
1140 return (KERN_INVALID_ARGUMENT);
1141
1142 tstate[0] = x86_THREAD_STATE;
1143 tstate[1] = x86_FLOAT_STATE;
1144 tstate[2] = x86_EXCEPTION_STATE;
1145 tstate[3] = x86_DEBUG_STATE;
1146 tstate[4] = x86_AVX_STATE;
1147
1148 *count = 5;
1149 break;
1150 }
1151
1152#if !defined(RC_HIDE_XNU_J137)
1153 case THREAD_STATE_FLAVOR_LIST_10_13:
1154 {
1155 if (*count < 6)
1156 return (KERN_INVALID_ARGUMENT);
1157
1158 tstate[0] = x86_THREAD_STATE;
1159 tstate[1] = x86_FLOAT_STATE;
1160 tstate[2] = x86_EXCEPTION_STATE;
1161 tstate[3] = x86_DEBUG_STATE;
1162 tstate[4] = x86_AVX_STATE;
1163 tstate[5] = x86_AVX512_STATE;
1164
1165 *count = 6;
1166 break;
1167 }
1168
1169#endif
1170 case x86_SAVED_STATE32:
1171 {
1172 x86_saved_state32_t *state;
1173 x86_saved_state32_t *saved_state;
1174
1175 if (*count < x86_SAVED_STATE32_COUNT)
1176 return(KERN_INVALID_ARGUMENT);
1177
1178 if (thread_is_64bit_addr(thr_act))
1179 return(KERN_INVALID_ARGUMENT);
1180
1181 state = (x86_saved_state32_t *) tstate;
1182 saved_state = USER_REGS32(thr_act);
1183
1184 /*
1185 * First, copy everything:
1186 */
1187 *state = *saved_state;
1188 state->ds = saved_state->ds & 0xffff;
1189 state->es = saved_state->es & 0xffff;
1190 state->fs = saved_state->fs & 0xffff;
1191 state->gs = saved_state->gs & 0xffff;
1192
1193 *count = x86_SAVED_STATE32_COUNT;
1194 break;
1195 }
1196
1197 case x86_SAVED_STATE64:
1198 {
1199 x86_saved_state64_t *state;
1200 x86_saved_state64_t *saved_state;
1201
1202 if (*count < x86_SAVED_STATE64_COUNT)
1203 return(KERN_INVALID_ARGUMENT);
1204
1205 if (!thread_is_64bit_addr(thr_act))
1206 return(KERN_INVALID_ARGUMENT);
1207
1208 state = (x86_saved_state64_t *)tstate;
1209 saved_state = USER_REGS64(thr_act);
1210
1211 /*
1212 * First, copy everything:
1213 */
1214 *state = *saved_state;
1215 state->fs = saved_state->fs & 0xffff;
1216 state->gs = saved_state->gs & 0xffff;
1217
1218 *count = x86_SAVED_STATE64_COUNT;
1219 break;
1220 }
1221
1222 case x86_FLOAT_STATE32:
1223 {
1224 if (*count < x86_FLOAT_STATE32_COUNT)
1225 return(KERN_INVALID_ARGUMENT);
1226
1227 if (thread_is_64bit_addr(thr_act))
1228 return(KERN_INVALID_ARGUMENT);
1229
1230 *count = x86_FLOAT_STATE32_COUNT;
1231
1232 return fpu_get_fxstate(thr_act, tstate, flavor);
1233 }
1234
1235 case x86_FLOAT_STATE64:
1236 {
1237 if (*count < x86_FLOAT_STATE64_COUNT)
1238 return(KERN_INVALID_ARGUMENT);
1239
1240 if ( !thread_is_64bit_addr(thr_act))
1241 return(KERN_INVALID_ARGUMENT);
1242
1243 *count = x86_FLOAT_STATE64_COUNT;
1244
1245 return fpu_get_fxstate(thr_act, tstate, flavor);
1246 }
1247
1248 case x86_FLOAT_STATE:
1249 {
1250 x86_float_state_t *state;
1251 kern_return_t kret;
1252
1253 if (*count < x86_FLOAT_STATE_COUNT)
1254 return(KERN_INVALID_ARGUMENT);
1255
1256 state = (x86_float_state_t *)tstate;
1257
1258 /*
1259 * no need to bzero... currently
1260 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1261 */
1262 if (thread_is_64bit_addr(thr_act)) {
1263 state->fsh.flavor = x86_FLOAT_STATE64;
1264 state->fsh.count = x86_FLOAT_STATE64_COUNT;
1265
1266 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
1267 } else {
1268 state->fsh.flavor = x86_FLOAT_STATE32;
1269 state->fsh.count = x86_FLOAT_STATE32_COUNT;
1270
1271 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
1272 }
1273 *count = x86_FLOAT_STATE_COUNT;
1274
1275 return(kret);
1276 }
1277
1278 case x86_AVX_STATE32:
1279#if !defined(RC_HIDE_XNU_J137)
1280 case x86_AVX512_STATE32:
1281#endif
1282 {
1283 if (*count != _MachineStateCount[flavor])
1284 return(KERN_INVALID_ARGUMENT);
1285
1286 if (thread_is_64bit_addr(thr_act))
1287 return(KERN_INVALID_ARGUMENT);
1288
1289 *count = _MachineStateCount[flavor];
1290
1291 return fpu_get_fxstate(thr_act, tstate, flavor);
1292 }
1293
1294 case x86_AVX_STATE64:
1295#if !defined(RC_HIDE_XNU_J137)
1296 case x86_AVX512_STATE64:
1297#endif
1298 {
1299 if (*count != _MachineStateCount[flavor])
1300 return(KERN_INVALID_ARGUMENT);
1301
1302 if ( !thread_is_64bit_addr(thr_act))
1303 return(KERN_INVALID_ARGUMENT);
1304
1305 *count = _MachineStateCount[flavor];
1306
1307 return fpu_get_fxstate(thr_act, tstate, flavor);
1308 }
1309
1310 case x86_AVX_STATE:
1311#if !defined(RC_HIDE_XNU_J137)
1312 case x86_AVX512_STATE:
1313#endif
1314 {
1315 x86_avx_state_t *state;
1316 thread_state_t fstate;
1317
1318 if (*count < _MachineStateCount[flavor])
1319 return(KERN_INVALID_ARGUMENT);
1320
1321 *count = _MachineStateCount[flavor];
1322 state = (x86_avx_state_t *)tstate;
1323
1324 bzero((char *)state, *count * sizeof(int));
1325
1326 if (thread_is_64bit_addr(thr_act)) {
1327 flavor -= 1; /* 64-bit flavor */
1328 fstate = (thread_state_t) &state->ufs.as64;
1329 } else {
1330 flavor -= 2; /* 32-bit flavor */
1331 fstate = (thread_state_t) &state->ufs.as32;
1332 }
1333 state->ash.flavor = flavor;
1334 state->ash.count = _MachineStateCount[flavor];
1335
1336 return fpu_get_fxstate(thr_act, fstate, flavor);
1337 }
1338
1339 case x86_THREAD_STATE32:
1340 {
1341 if (*count < x86_THREAD_STATE32_COUNT)
1342 return(KERN_INVALID_ARGUMENT);
1343
1344 if (thread_is_64bit_addr(thr_act))
1345 return(KERN_INVALID_ARGUMENT);
1346
1347 *count = x86_THREAD_STATE32_COUNT;
1348
1349 get_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
1350 break;
1351 }
1352
1353 case x86_THREAD_STATE64:
1354 {
1355 if (*count < x86_THREAD_STATE64_COUNT)
1356 return(KERN_INVALID_ARGUMENT);
1357
1358 if ( !thread_is_64bit_addr(thr_act))
1359 return(KERN_INVALID_ARGUMENT);
1360
1361 *count = x86_THREAD_STATE64_COUNT;
1362
1363 get_thread_state64(thr_act, (x86_thread_state64_t *)tstate);
1364 break;
1365 }
1366
1367 case x86_THREAD_STATE:
1368 {
1369 x86_thread_state_t *state;
1370
1371 if (*count < x86_THREAD_STATE_COUNT)
1372 return(KERN_INVALID_ARGUMENT);
1373
1374 state = (x86_thread_state_t *)tstate;
1375
1376 bzero((char *)state, sizeof(x86_thread_state_t));
1377
1378 if (thread_is_64bit_addr(thr_act)) {
1379 state->tsh.flavor = x86_THREAD_STATE64;
1380 state->tsh.count = x86_THREAD_STATE64_COUNT;
1381
1382 get_thread_state64(thr_act, &state->uts.ts64);
1383 } else {
1384 state->tsh.flavor = x86_THREAD_STATE32;
1385 state->tsh.count = x86_THREAD_STATE32_COUNT;
1386
1387 get_thread_state32(thr_act, &state->uts.ts32);
1388 }
1389 *count = x86_THREAD_STATE_COUNT;
1390
1391 break;
1392 }
1393
1394
1395 case x86_EXCEPTION_STATE32:
1396 {
1397 if (*count < x86_EXCEPTION_STATE32_COUNT)
1398 return(KERN_INVALID_ARGUMENT);
1399
1400 if (thread_is_64bit_addr(thr_act))
1401 return(KERN_INVALID_ARGUMENT);
1402
1403 *count = x86_EXCEPTION_STATE32_COUNT;
1404
1405 get_exception_state32(thr_act, (x86_exception_state32_t *)tstate);
1406 /*
1407 * Suppress the cpu number for binary compatibility
1408 * of this deprecated state.
1409 */
1410 ((x86_exception_state32_t *)tstate)->cpu = 0;
1411 break;
1412 }
1413
1414 case x86_EXCEPTION_STATE64:
1415 {
1416 if (*count < x86_EXCEPTION_STATE64_COUNT)
1417 return(KERN_INVALID_ARGUMENT);
1418
1419 if ( !thread_is_64bit_addr(thr_act))
1420 return(KERN_INVALID_ARGUMENT);
1421
1422 *count = x86_EXCEPTION_STATE64_COUNT;
1423
1424 get_exception_state64(thr_act, (x86_exception_state64_t *)tstate);
1425 /*
1426 * Suppress the cpu number for binary compatibility
1427 * of this deprecated state.
1428 */
1429 ((x86_exception_state64_t *)tstate)->cpu = 0;
1430 break;
1431 }
1432
1433 case x86_EXCEPTION_STATE:
1434 {
1435 x86_exception_state_t *state;
1436
1437 if (*count < x86_EXCEPTION_STATE_COUNT)
1438 return(KERN_INVALID_ARGUMENT);
1439
1440 state = (x86_exception_state_t *)tstate;
1441
1442 bzero((char *)state, sizeof(x86_exception_state_t));
1443
1444 if (thread_is_64bit_addr(thr_act)) {
1445 state->esh.flavor = x86_EXCEPTION_STATE64;
1446 state->esh.count = x86_EXCEPTION_STATE64_COUNT;
1447
1448 get_exception_state64(thr_act, &state->ues.es64);
1449 } else {
1450 state->esh.flavor = x86_EXCEPTION_STATE32;
1451 state->esh.count = x86_EXCEPTION_STATE32_COUNT;
1452
1453 get_exception_state32(thr_act, &state->ues.es32);
1454 }
1455 *count = x86_EXCEPTION_STATE_COUNT;
1456
1457 break;
1458 }
1459 case x86_DEBUG_STATE32:
1460 {
1461 if (*count < x86_DEBUG_STATE32_COUNT)
1462 return(KERN_INVALID_ARGUMENT);
1463
1464 if (thread_is_64bit_addr(thr_act))
1465 return(KERN_INVALID_ARGUMENT);
1466
1467 get_debug_state32(thr_act, (x86_debug_state32_t *)tstate);
1468
1469 *count = x86_DEBUG_STATE32_COUNT;
1470
1471 break;
1472 }
1473 case x86_DEBUG_STATE64:
1474 {
1475 if (*count < x86_DEBUG_STATE64_COUNT)
1476 return(KERN_INVALID_ARGUMENT);
1477
1478 if (!thread_is_64bit_addr(thr_act))
1479 return(KERN_INVALID_ARGUMENT);
1480
1481 get_debug_state64(thr_act, (x86_debug_state64_t *)tstate);
1482
1483 *count = x86_DEBUG_STATE64_COUNT;
1484
1485 break;
1486 }
1487 case x86_DEBUG_STATE:
1488 {
1489 x86_debug_state_t *state;
1490
1491 if (*count < x86_DEBUG_STATE_COUNT)
1492 return(KERN_INVALID_ARGUMENT);
1493
1494 state = (x86_debug_state_t *)tstate;
1495
1496 bzero(state, sizeof *state);
1497
1498 if (thread_is_64bit_addr(thr_act)) {
1499 state->dsh.flavor = x86_DEBUG_STATE64;
1500 state->dsh.count = x86_DEBUG_STATE64_COUNT;
1501
1502 get_debug_state64(thr_act, &state->uds.ds64);
1503 } else {
1504 state->dsh.flavor = x86_DEBUG_STATE32;
1505 state->dsh.count = x86_DEBUG_STATE32_COUNT;
1506
1507 get_debug_state32(thr_act, &state->uds.ds32);
1508 }
1509 *count = x86_DEBUG_STATE_COUNT;
1510 break;
1511 }
1512 default:
1513 return(KERN_INVALID_ARGUMENT);
1514 }
1515
1516 return(KERN_SUCCESS);
1517}
1518
1519kern_return_t
1520machine_thread_get_kern_state(
1521 thread_t thread,
1522 thread_flavor_t flavor,
1523 thread_state_t tstate,
1524 mach_msg_type_number_t *count)
1525{
1526 x86_saved_state_t *int_state = current_cpu_datap()->cpu_int_state;
1527
1528 /*
1529 * This works only for an interrupted kernel thread
1530 */
1531 if (thread != current_thread() || int_state == NULL)
1532 return KERN_FAILURE;
1533
1534 switch (flavor) {
1535 case x86_THREAD_STATE32: {
1536 x86_thread_state32_t *state;
1537 x86_saved_state32_t *saved_state;
1538
1539 if (!is_saved_state32(int_state) ||
1540 *count < x86_THREAD_STATE32_COUNT)
1541 return (KERN_INVALID_ARGUMENT);
1542
1543 state = (x86_thread_state32_t *) tstate;
1544
1545 saved_state = saved_state32(int_state);
1546 /*
1547 * General registers.
1548 */
1549 state->eax = saved_state->eax;
1550 state->ebx = saved_state->ebx;
1551 state->ecx = saved_state->ecx;
1552 state->edx = saved_state->edx;
1553 state->edi = saved_state->edi;
1554 state->esi = saved_state->esi;
1555 state->ebp = saved_state->ebp;
1556 state->esp = saved_state->uesp;
1557 state->eflags = saved_state->efl;
1558 state->eip = saved_state->eip;
1559 state->cs = saved_state->cs;
1560 state->ss = saved_state->ss;
1561 state->ds = saved_state->ds & 0xffff;
1562 state->es = saved_state->es & 0xffff;
1563 state->fs = saved_state->fs & 0xffff;
1564 state->gs = saved_state->gs & 0xffff;
1565
1566 *count = x86_THREAD_STATE32_COUNT;
1567
1568 return KERN_SUCCESS;
1569 }
1570
1571 case x86_THREAD_STATE64: {
1572 x86_thread_state64_t *state;
1573 x86_saved_state64_t *saved_state;
1574
1575 if (!is_saved_state64(int_state) ||
1576 *count < x86_THREAD_STATE64_COUNT)
1577 return (KERN_INVALID_ARGUMENT);
1578
1579 state = (x86_thread_state64_t *) tstate;
1580
1581 saved_state = saved_state64(int_state);
1582 /*
1583 * General registers.
1584 */
1585 state->rax = saved_state->rax;
1586 state->rbx = saved_state->rbx;
1587 state->rcx = saved_state->rcx;
1588 state->rdx = saved_state->rdx;
1589 state->rdi = saved_state->rdi;
1590 state->rsi = saved_state->rsi;
1591 state->rbp = saved_state->rbp;
1592 state->rsp = saved_state->isf.rsp;
1593 state->r8 = saved_state->r8;
1594 state->r9 = saved_state->r9;
1595 state->r10 = saved_state->r10;
1596 state->r11 = saved_state->r11;
1597 state->r12 = saved_state->r12;
1598 state->r13 = saved_state->r13;
1599 state->r14 = saved_state->r14;
1600 state->r15 = saved_state->r15;
1601
1602 state->rip = saved_state->isf.rip;
1603 state->rflags = saved_state->isf.rflags;
1604 state->cs = saved_state->isf.cs;
1605 state->fs = saved_state->fs & 0xffff;
1606 state->gs = saved_state->gs & 0xffff;
1607 *count = x86_THREAD_STATE64_COUNT;
1608
1609 return KERN_SUCCESS;
1610 }
1611
1612 case x86_THREAD_STATE: {
1613 x86_thread_state_t *state = NULL;
1614
1615 if (*count < x86_THREAD_STATE_COUNT)
1616 return (KERN_INVALID_ARGUMENT);
1617
1618 state = (x86_thread_state_t *) tstate;
1619
1620 if (is_saved_state32(int_state)) {
1621 x86_saved_state32_t *saved_state = saved_state32(int_state);
1622
1623 state->tsh.flavor = x86_THREAD_STATE32;
1624 state->tsh.count = x86_THREAD_STATE32_COUNT;
1625
1626 /*
1627 * General registers.
1628 */
1629 state->uts.ts32.eax = saved_state->eax;
1630 state->uts.ts32.ebx = saved_state->ebx;
1631 state->uts.ts32.ecx = saved_state->ecx;
1632 state->uts.ts32.edx = saved_state->edx;
1633 state->uts.ts32.edi = saved_state->edi;
1634 state->uts.ts32.esi = saved_state->esi;
1635 state->uts.ts32.ebp = saved_state->ebp;
1636 state->uts.ts32.esp = saved_state->uesp;
1637 state->uts.ts32.eflags = saved_state->efl;
1638 state->uts.ts32.eip = saved_state->eip;
1639 state->uts.ts32.cs = saved_state->cs;
1640 state->uts.ts32.ss = saved_state->ss;
1641 state->uts.ts32.ds = saved_state->ds & 0xffff;
1642 state->uts.ts32.es = saved_state->es & 0xffff;
1643 state->uts.ts32.fs = saved_state->fs & 0xffff;
1644 state->uts.ts32.gs = saved_state->gs & 0xffff;
1645 } else if (is_saved_state64(int_state)) {
1646 x86_saved_state64_t *saved_state = saved_state64(int_state);
1647
1648 state->tsh.flavor = x86_THREAD_STATE64;
1649 state->tsh.count = x86_THREAD_STATE64_COUNT;
1650
1651 /*
1652 * General registers.
1653 */
1654 state->uts.ts64.rax = saved_state->rax;
1655 state->uts.ts64.rbx = saved_state->rbx;
1656 state->uts.ts64.rcx = saved_state->rcx;
1657 state->uts.ts64.rdx = saved_state->rdx;
1658 state->uts.ts64.rdi = saved_state->rdi;
1659 state->uts.ts64.rsi = saved_state->rsi;
1660 state->uts.ts64.rbp = saved_state->rbp;
1661 state->uts.ts64.rsp = saved_state->isf.rsp;
1662 state->uts.ts64.r8 = saved_state->r8;
1663 state->uts.ts64.r9 = saved_state->r9;
1664 state->uts.ts64.r10 = saved_state->r10;
1665 state->uts.ts64.r11 = saved_state->r11;
1666 state->uts.ts64.r12 = saved_state->r12;
1667 state->uts.ts64.r13 = saved_state->r13;
1668 state->uts.ts64.r14 = saved_state->r14;
1669 state->uts.ts64.r15 = saved_state->r15;
1670
1671 state->uts.ts64.rip = saved_state->isf.rip;
1672 state->uts.ts64.rflags = saved_state->isf.rflags;
1673 state->uts.ts64.cs = saved_state->isf.cs;
1674 state->uts.ts64.fs = saved_state->fs & 0xffff;
1675 state->uts.ts64.gs = saved_state->gs & 0xffff;
1676 } else {
1677 panic("unknown thread state");
1678 }
1679
1680 *count = x86_THREAD_STATE_COUNT;
1681 return KERN_SUCCESS;
1682 }
1683 }
1684 return KERN_FAILURE;
1685}
1686
1687
1688void
1689machine_thread_switch_addrmode(thread_t thread)
1690{
1691 /*
1692 * We don't want to be preempted until we're done
1693 * - particularly if we're switching the current thread
1694 */
1695 disable_preemption();
1696
1697 /*
1698 * Reset the state saveareas. As we're resetting, we anticipate no
1699 * memory allocations in this path.
1700 */
1701 machine_thread_create(thread, thread->task);
1702
1703 /* Adjust FPU state */
1704 fpu_switch_addrmode(thread, task_has_64Bit_addr(thread->task));
1705
1706 /* If we're switching ourselves, reset the pcb addresses etc. */
1707 if (thread == current_thread()) {
1708 boolean_t istate = ml_set_interrupts_enabled(FALSE);
1709 act_machine_switch_pcb(NULL, thread);
1710 ml_set_interrupts_enabled(istate);
1711 }
1712 enable_preemption();
1713}
1714
1715
1716
1717/*
1718 * This is used to set the current thr_act/thread
1719 * when starting up a new processor
1720 */
1721void
1722machine_set_current_thread(thread_t thread)
1723{
1724 current_cpu_datap()->cpu_active_thread = thread;
1725}
1726
1727
1728/*
1729 * Perform machine-dependent per-thread initializations
1730 */
1731void
1732machine_thread_init(void)
1733{
1734 iss_zone = zinit(sizeof(x86_saved_state_t),
1735 thread_max * sizeof(x86_saved_state_t),
1736 THREAD_CHUNK * sizeof(x86_saved_state_t),
1737 "x86_64 saved state");
1738
1739 ids_zone = zinit(sizeof(x86_debug_state64_t),
1740 thread_max * sizeof(x86_debug_state64_t),
1741 THREAD_CHUNK * sizeof(x86_debug_state64_t),
1742 "x86_64 debug state");
1743
1744 fpu_module_init();
1745}
1746
1747
1748
1749user_addr_t
1750get_useraddr(void)
1751{
1752 thread_t thr_act = current_thread();
1753
1754 if (thread_is_64bit_addr(thr_act)) {
1755 x86_saved_state64_t *iss64;
1756
1757 iss64 = USER_REGS64(thr_act);
1758
1759 return(iss64->isf.rip);
1760 } else {
1761 x86_saved_state32_t *iss32;
1762
1763 iss32 = USER_REGS32(thr_act);
1764
1765 return(iss32->eip);
1766 }
1767}
1768
1769/*
1770 * detach and return a kernel stack from a thread
1771 */
1772
1773vm_offset_t
1774machine_stack_detach(thread_t thread)
1775{
1776 vm_offset_t stack;
1777
1778 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
1779 (uintptr_t)thread_tid(thread), thread->priority,
1780 thread->sched_pri, 0,
1781 0);
1782
1783 stack = thread->kernel_stack;
1784 thread->kernel_stack = 0;
1785
1786 return (stack);
1787}
1788
1789/*
1790 * attach a kernel stack to a thread and initialize it
1791 */
1792
1793void
1794machine_stack_attach(
1795 thread_t thread,
1796 vm_offset_t stack)
1797{
1798 struct x86_kernel_state *statep;
1799
1800 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
1801 (uintptr_t)thread_tid(thread), thread->priority,
1802 thread->sched_pri, 0, 0);
1803
1804 assert(stack);
1805 thread->kernel_stack = stack;
1806 thread_initialize_kernel_state(thread);
1807
1808 statep = STACK_IKS(stack);
1809#if defined(__x86_64__)
1810 statep->k_rip = (unsigned long) Thread_continue;
1811 statep->k_rbx = (unsigned long) thread_continue;
1812 statep->k_rsp = (unsigned long) STACK_IKS(stack);
1813#else
1814 statep->k_eip = (unsigned long) Thread_continue;
1815 statep->k_ebx = (unsigned long) thread_continue;
1816 statep->k_esp = (unsigned long) STACK_IKS(stack);
1817#endif
1818
1819 return;
1820}
1821
1822/*
1823 * move a stack from old to new thread
1824 */
1825
1826void
1827machine_stack_handoff(thread_t old,
1828 thread_t new)
1829{
1830 vm_offset_t stack;
1831
1832 assert(new);
1833 assert(old);
1834
1835 kpc_off_cpu(old);
1836
1837 stack = old->kernel_stack;
1838 if (stack == old->reserved_stack) {
1839 assert(new->reserved_stack);
1840 old->reserved_stack = new->reserved_stack;
1841 new->reserved_stack = stack;
1842 }
1843 old->kernel_stack = 0;
1844 /*
1845 * A full call to machine_stack_attach() is unnecessry
1846 * because old stack is already initialized.
1847 */
1848 new->kernel_stack = stack;
1849
1850 fpu_switch_context(old, new);
1851
1852 old->machine.specFlags &= ~OnProc;
1853 new->machine.specFlags |= OnProc;
1854
1855 pmap_switch_context(old, new, cpu_number());
1856 act_machine_switch_pcb(old, new);
1857
1858#if HYPERVISOR
1859 ml_hv_cswitch(old, new);
1860#endif
1861
1862 machine_set_current_thread(new);
1863 thread_initialize_kernel_state(new);
1864
1865 return;
1866}
1867
1868
1869
1870
1871struct x86_act_context32 {
1872 x86_saved_state32_t ss;
1873 x86_float_state32_t fs;
1874 x86_debug_state32_t ds;
1875};
1876
1877struct x86_act_context64 {
1878 x86_saved_state64_t ss;
1879 x86_float_state64_t fs;
1880 x86_debug_state64_t ds;
1881};
1882
1883
1884
1885void *
1886act_thread_csave(void)
1887{
1888 kern_return_t kret;
1889 mach_msg_type_number_t val;
1890 thread_t thr_act = current_thread();
1891
1892 if (thread_is_64bit_addr(thr_act)) {
1893 struct x86_act_context64 *ic64;
1894
1895 ic64 = (struct x86_act_context64 *)kalloc(sizeof(struct x86_act_context64));
1896
1897 if (ic64 == (struct x86_act_context64 *)NULL)
1898 return((void *)0);
1899
1900 val = x86_SAVED_STATE64_COUNT;
1901 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE64,
1902 (thread_state_t) &ic64->ss, &val);
1903 if (kret != KERN_SUCCESS) {
1904 kfree(ic64, sizeof(struct x86_act_context64));
1905 return((void *)0);
1906 }
1907 val = x86_FLOAT_STATE64_COUNT;
1908 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE64,
1909 (thread_state_t) &ic64->fs, &val);
1910 if (kret != KERN_SUCCESS) {
1911 kfree(ic64, sizeof(struct x86_act_context64));
1912 return((void *)0);
1913 }
1914
1915 val = x86_DEBUG_STATE64_COUNT;
1916 kret = machine_thread_get_state(thr_act,
1917 x86_DEBUG_STATE64,
1918 (thread_state_t)&ic64->ds,
1919 &val);
1920 if (kret != KERN_SUCCESS) {
1921 kfree(ic64, sizeof(struct x86_act_context64));
1922 return((void *)0);
1923 }
1924 return(ic64);
1925
1926 } else {
1927 struct x86_act_context32 *ic32;
1928
1929 ic32 = (struct x86_act_context32 *)kalloc(sizeof(struct x86_act_context32));
1930
1931 if (ic32 == (struct x86_act_context32 *)NULL)
1932 return((void *)0);
1933
1934 val = x86_SAVED_STATE32_COUNT;
1935 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE32,
1936 (thread_state_t) &ic32->ss, &val);
1937 if (kret != KERN_SUCCESS) {
1938 kfree(ic32, sizeof(struct x86_act_context32));
1939 return((void *)0);
1940 }
1941 val = x86_FLOAT_STATE32_COUNT;
1942 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE32,
1943 (thread_state_t) &ic32->fs, &val);
1944 if (kret != KERN_SUCCESS) {
1945 kfree(ic32, sizeof(struct x86_act_context32));
1946 return((void *)0);
1947 }
1948
1949 val = x86_DEBUG_STATE32_COUNT;
1950 kret = machine_thread_get_state(thr_act,
1951 x86_DEBUG_STATE32,
1952 (thread_state_t)&ic32->ds,
1953 &val);
1954 if (kret != KERN_SUCCESS) {
1955 kfree(ic32, sizeof(struct x86_act_context32));
1956 return((void *)0);
1957 }
1958 return(ic32);
1959 }
1960}
1961
1962
1963void
1964act_thread_catt(void *ctx)
1965{
1966 thread_t thr_act = current_thread();
1967 kern_return_t kret;
1968
1969 if (ctx == (void *)NULL)
1970 return;
1971
1972 if (thread_is_64bit_addr(thr_act)) {
1973 struct x86_act_context64 *ic64;
1974
1975 ic64 = (struct x86_act_context64 *)ctx;
1976
1977 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE64,
1978 (thread_state_t) &ic64->ss, x86_SAVED_STATE64_COUNT);
1979 if (kret == KERN_SUCCESS) {
1980 machine_thread_set_state(thr_act, x86_FLOAT_STATE64,
1981 (thread_state_t) &ic64->fs, x86_FLOAT_STATE64_COUNT);
1982 }
1983 kfree(ic64, sizeof(struct x86_act_context64));
1984 } else {
1985 struct x86_act_context32 *ic32;
1986
1987 ic32 = (struct x86_act_context32 *)ctx;
1988
1989 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE32,
1990 (thread_state_t) &ic32->ss, x86_SAVED_STATE32_COUNT);
1991 if (kret == KERN_SUCCESS) {
1992 (void) machine_thread_set_state(thr_act, x86_FLOAT_STATE32,
1993 (thread_state_t) &ic32->fs, x86_FLOAT_STATE32_COUNT);
1994 }
1995 kfree(ic32, sizeof(struct x86_act_context32));
1996 }
1997}
1998
1999
2000void act_thread_cfree(__unused void *ctx)
2001{
2002 /* XXX - Unused */
2003}
2004
2005/*
2006 * Duplicate one x86_debug_state32_t to another. "all" parameter
2007 * chooses whether dr4 and dr5 are copied (they are never meant
2008 * to be installed when we do machine_task_set_state() or
2009 * machine_thread_set_state()).
2010 */
2011void
2012copy_debug_state32(
2013 x86_debug_state32_t *src,
2014 x86_debug_state32_t *target,
2015 boolean_t all)
2016{
2017 if (all) {
2018 target->dr4 = src->dr4;
2019 target->dr5 = src->dr5;
2020 }
2021
2022 target->dr0 = src->dr0;
2023 target->dr1 = src->dr1;
2024 target->dr2 = src->dr2;
2025 target->dr3 = src->dr3;
2026 target->dr6 = src->dr6;
2027 target->dr7 = src->dr7;
2028}
2029
2030/*
2031 * Duplicate one x86_debug_state64_t to another. "all" parameter
2032 * chooses whether dr4 and dr5 are copied (they are never meant
2033 * to be installed when we do machine_task_set_state() or
2034 * machine_thread_set_state()).
2035 */
2036void
2037copy_debug_state64(
2038 x86_debug_state64_t *src,
2039 x86_debug_state64_t *target,
2040 boolean_t all)
2041{
2042 if (all) {
2043 target->dr4 = src->dr4;
2044 target->dr5 = src->dr5;
2045 }
2046
2047 target->dr0 = src->dr0;
2048 target->dr1 = src->dr1;
2049 target->dr2 = src->dr2;
2050 target->dr3 = src->dr3;
2051 target->dr6 = src->dr6;
2052 target->dr7 = src->dr7;
2053}
2054