1 | /* |
2 | * Copyright (c) 2008-2016 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <sys/errno.h> |
30 | |
31 | #include <mach/mach_types.h> |
32 | #include <mach/vm_attributes.h> |
33 | #include <mach/vm_param.h> |
34 | #include <libsa/types.h> |
35 | |
36 | #include <vm/vm_map.h> |
37 | #include <i386/pmap.h> |
38 | #include <i386/pmap_internal.h> /* pmap_pde */ |
39 | #include <i386/mp.h> |
40 | #include <i386/misc_protos.h> |
41 | #include <i386/pio.h> |
42 | #include <i386/proc_reg.h> |
43 | |
44 | #include <i386/pmap_internal.h> |
45 | |
46 | #include <kdp/kdp_internal.h> |
47 | #include <kdp/kdp_core.h> |
48 | #include <kdp/ml/i386/kdp_x86_common.h> |
49 | #include <mach/vm_map.h> |
50 | |
51 | #include <vm/vm_protos.h> |
52 | #include <vm/vm_kern.h> |
53 | |
54 | #include <machine/pal_routines.h> |
55 | #include <libkern/kernel_mach_header.h> |
56 | |
57 | // #define KDP_VM_READ_DEBUG 1 |
58 | // #define KDP_VM_WRITE_DEBUG 1 |
59 | |
60 | /* |
61 | * A (potentially valid) physical address is not a kernel address |
62 | * i.e. it'a a user address. |
63 | */ |
64 | #define IS_PHYS_ADDR(addr) IS_USERADDR64_CANONICAL(addr) |
65 | |
66 | boolean_t kdp_read_io; |
67 | boolean_t kdp_trans_off; |
68 | |
69 | pmap_paddr_t kdp_vtophys(pmap_t pmap, vm_offset_t va); |
70 | |
71 | pmap_t kdp_pmap = 0; |
72 | |
73 | pmap_paddr_t |
74 | kdp_vtophys( |
75 | pmap_t pmap, |
76 | vm_offset_t va) |
77 | { |
78 | pmap_paddr_t pa; |
79 | ppnum_t pp; |
80 | |
81 | pp = pmap_find_phys(pmap, va); |
82 | if(!pp) return 0; |
83 | |
84 | pa = ((pmap_paddr_t)pp << PAGE_SHIFT) | (va & PAGE_MASK); |
85 | |
86 | return(pa); |
87 | } |
88 | |
89 | mach_vm_size_t |
90 | kdp_machine_vm_read( mach_vm_address_t src, caddr_t dst, mach_vm_size_t len) |
91 | { |
92 | addr64_t cur_virt_src = PAL_KDP_ADDR((addr64_t)src); |
93 | addr64_t cur_virt_dst = PAL_KDP_ADDR((addr64_t)(intptr_t)dst); |
94 | addr64_t cur_phys_dst, cur_phys_src; |
95 | mach_vm_size_t resid = len; |
96 | mach_vm_size_t cnt = 0, cnt_src, cnt_dst; |
97 | pmap_t src_pmap = kernel_pmap; |
98 | |
99 | #ifdef KDP_VM_READ_DEBUG |
100 | printf("kdp_vm_read: src %llx dst %p len %llx\n" , src, (void *)dst, len); |
101 | #endif |
102 | |
103 | if (kdp_trans_off && IS_PHYS_ADDR(src)) { |
104 | kdp_readphysmem64_req_t rq; |
105 | mach_vm_size_t ret; |
106 | |
107 | rq.address = src; |
108 | rq.nbytes = (uint32_t)len; |
109 | ret = kdp_machine_phys_read(&rq, dst, KDP_CURRENT_LCPU); |
110 | return ret; |
111 | } |
112 | |
113 | /* If a different pmap has been specified with kdp_pmap, use it to translate the |
114 | * source (cur_virt_src); otherwise, the source is translated using the |
115 | * kernel_pmap. |
116 | */ |
117 | if (kdp_pmap) |
118 | src_pmap = kdp_pmap; |
119 | |
120 | while (resid != 0) { |
121 | if (!(cur_phys_src = kdp_vtophys(src_pmap, |
122 | cur_virt_src))) |
123 | goto exit; |
124 | |
125 | /* Always translate the destination buffer using the kernel_pmap */ |
126 | if(!(cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst))) |
127 | goto exit; |
128 | |
129 | /* Validate physical page numbers unless kdp_read_io is set */ |
130 | if (kdp_read_io == FALSE) |
131 | if (!pmap_valid_page(i386_btop(cur_phys_dst)) || !pmap_valid_page(i386_btop(cur_phys_src))) |
132 | goto exit; |
133 | |
134 | /* Get length left on page */ |
135 | cnt_src = PAGE_SIZE - (cur_phys_src & PAGE_MASK); |
136 | cnt_dst = PAGE_SIZE - (cur_phys_dst & PAGE_MASK); |
137 | if (cnt_src > cnt_dst) |
138 | cnt = cnt_dst; |
139 | else |
140 | cnt = cnt_src; |
141 | if (cnt > resid) |
142 | cnt = resid; |
143 | |
144 | /* Do a physical copy */ |
145 | if (EFAULT == ml_copy_phys(cur_phys_src, |
146 | cur_phys_dst, |
147 | (vm_size_t)cnt)) |
148 | goto exit; |
149 | cur_virt_src += cnt; |
150 | cur_virt_dst += cnt; |
151 | resid -= cnt; |
152 | } |
153 | exit: |
154 | return (len - resid); |
155 | } |
156 | |
157 | mach_vm_size_t |
158 | kdp_machine_phys_read(kdp_readphysmem64_req_t *rq, caddr_t dst, |
159 | uint16_t lcpu) |
160 | { |
161 | mach_vm_address_t src = rq->address; |
162 | mach_vm_size_t len = rq->nbytes; |
163 | |
164 | addr64_t cur_virt_dst; |
165 | addr64_t cur_phys_dst, cur_phys_src; |
166 | mach_vm_size_t resid = len; |
167 | mach_vm_size_t cnt = 0, cnt_src, cnt_dst; |
168 | |
169 | if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) { |
170 | return (mach_vm_size_t) |
171 | kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_phys_read, rq, dst); |
172 | } |
173 | |
174 | #ifdef KDP_VM_READ_DEBUG |
175 | printf("kdp_phys_read: src %llx dst %p len %llx\n" , src, (void *)dst, len); |
176 | #endif |
177 | |
178 | cur_virt_dst = (addr64_t)(intptr_t)dst; |
179 | cur_phys_src = (addr64_t)src; |
180 | |
181 | while (resid != 0) { |
182 | |
183 | if(!(cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst))) |
184 | goto exit; |
185 | |
186 | /* Get length left on page */ |
187 | cnt_src = PAGE_SIZE - (cur_phys_src & PAGE_MASK); |
188 | cnt_dst = PAGE_SIZE - (cur_phys_dst & PAGE_MASK); |
189 | if (cnt_src > cnt_dst) |
190 | cnt = cnt_dst; |
191 | else |
192 | cnt = cnt_src; |
193 | if (cnt > resid) |
194 | cnt = resid; |
195 | |
196 | /* Do a physical copy; use ml_copy_phys() in the event this is |
197 | * a short read with potential side effects. |
198 | */ |
199 | if (EFAULT == ml_copy_phys(cur_phys_src, |
200 | cur_phys_dst, |
201 | (vm_size_t)cnt)) |
202 | goto exit; |
203 | cur_phys_src += cnt; |
204 | cur_virt_dst += cnt; |
205 | resid -= cnt; |
206 | } |
207 | exit: |
208 | return (len - resid); |
209 | } |
210 | |
211 | /* |
212 | * |
213 | */ |
214 | mach_vm_size_t |
215 | kdp_machine_vm_write( caddr_t src, mach_vm_address_t dst, mach_vm_size_t len) |
216 | { |
217 | addr64_t cur_virt_src, cur_virt_dst; |
218 | addr64_t cur_phys_src, cur_phys_dst; |
219 | unsigned resid, cnt, cnt_src, cnt_dst; |
220 | |
221 | #ifdef KDP_VM_WRITE_DEBUG |
222 | printf("kdp_vm_write: src %p dst %llx len %llx - %08X %08X\n" , (void *)src, dst, len, ((unsigned int *)src)[0], ((unsigned int *)src)[1]); |
223 | #endif |
224 | |
225 | cur_virt_src = PAL_KDP_ADDR((addr64_t)(intptr_t)src); |
226 | cur_virt_dst = PAL_KDP_ADDR((addr64_t)dst); |
227 | |
228 | resid = (unsigned)len; |
229 | |
230 | while (resid != 0) { |
231 | if ((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0) |
232 | goto exit; |
233 | |
234 | if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0) |
235 | goto exit; |
236 | |
237 | /* Copy as many bytes as possible without crossing a page */ |
238 | cnt_src = (unsigned)(PAGE_SIZE - (cur_phys_src & PAGE_MASK)); |
239 | cnt_dst = (unsigned)(PAGE_SIZE - (cur_phys_dst & PAGE_MASK)); |
240 | |
241 | if (cnt_src > cnt_dst) |
242 | cnt = cnt_dst; |
243 | else |
244 | cnt = cnt_src; |
245 | if (cnt > resid) |
246 | cnt = resid; |
247 | |
248 | if (EFAULT == ml_copy_phys(cur_phys_src, cur_phys_dst, cnt)) |
249 | goto exit; /* Copy stuff over */ |
250 | |
251 | cur_virt_src +=cnt; |
252 | cur_virt_dst +=cnt; |
253 | resid -= cnt; |
254 | } |
255 | exit: |
256 | return (len - resid); |
257 | } |
258 | |
259 | /* |
260 | * |
261 | */ |
262 | mach_vm_size_t |
263 | kdp_machine_phys_write(kdp_writephysmem64_req_t *rq, caddr_t src, |
264 | uint16_t lcpu) |
265 | { |
266 | mach_vm_address_t dst = rq->address; |
267 | mach_vm_size_t len = rq->nbytes; |
268 | addr64_t cur_virt_src; |
269 | addr64_t cur_phys_src, cur_phys_dst; |
270 | unsigned resid, cnt, cnt_src, cnt_dst; |
271 | |
272 | if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) { |
273 | return (mach_vm_size_t) |
274 | kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_phys_write, rq, src); |
275 | } |
276 | |
277 | #ifdef KDP_VM_WRITE_DEBUG |
278 | printf("kdp_phys_write: src %p dst %llx len %llx - %08X %08X\n" , (void *)src, dst, len, ((unsigned int *)src)[0], ((unsigned int *)src)[1]); |
279 | #endif |
280 | |
281 | cur_virt_src = (addr64_t)(intptr_t)src; |
282 | cur_phys_dst = (addr64_t)dst; |
283 | |
284 | resid = (unsigned)len; |
285 | |
286 | while (resid != 0) { |
287 | if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0) |
288 | goto exit; |
289 | |
290 | /* Copy as many bytes as possible without crossing a page */ |
291 | cnt_src = (unsigned)(PAGE_SIZE - (cur_phys_src & PAGE_MASK)); |
292 | cnt_dst = (unsigned)(PAGE_SIZE - (cur_phys_dst & PAGE_MASK)); |
293 | |
294 | if (cnt_src > cnt_dst) |
295 | cnt = cnt_dst; |
296 | else |
297 | cnt = cnt_src; |
298 | if (cnt > resid) |
299 | cnt = resid; |
300 | |
301 | if (EFAULT == ml_copy_phys(cur_phys_src, cur_phys_dst, cnt)) |
302 | goto exit; /* Copy stuff over */ |
303 | |
304 | cur_virt_src +=cnt; |
305 | cur_phys_dst +=cnt; |
306 | resid -= cnt; |
307 | } |
308 | |
309 | exit: |
310 | return (len - resid); |
311 | } |
312 | |
313 | int |
314 | kdp_machine_ioport_read(kdp_readioport_req_t *rq, caddr_t data, uint16_t lcpu) |
315 | { |
316 | uint16_t addr = rq->address; |
317 | uint16_t size = rq->nbytes; |
318 | |
319 | if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) { |
320 | return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_ioport_read, rq, data); |
321 | } |
322 | |
323 | switch (size) |
324 | { |
325 | case 1: |
326 | *((uint8_t *) data) = inb(addr); |
327 | break; |
328 | case 2: |
329 | *((uint16_t *) data) = inw(addr); |
330 | break; |
331 | case 4: |
332 | *((uint32_t *) data) = inl(addr); |
333 | break; |
334 | default: |
335 | return KDPERR_BADFLAVOR; |
336 | } |
337 | |
338 | return KDPERR_NO_ERROR; |
339 | } |
340 | |
341 | int |
342 | kdp_machine_ioport_write(kdp_writeioport_req_t *rq, caddr_t data, uint16_t lcpu) |
343 | { |
344 | uint16_t addr = rq->address; |
345 | uint16_t size = rq->nbytes; |
346 | |
347 | if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) { |
348 | return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_ioport_write, rq, data); |
349 | } |
350 | |
351 | switch (size) |
352 | { |
353 | case 1: |
354 | outb(addr, *((uint8_t *) data)); |
355 | break; |
356 | case 2: |
357 | outw(addr, *((uint16_t *) data)); |
358 | break; |
359 | case 4: |
360 | outl(addr, *((uint32_t *) data)); |
361 | break; |
362 | default: |
363 | return KDPERR_BADFLAVOR; |
364 | } |
365 | |
366 | return KDPERR_NO_ERROR; |
367 | } |
368 | |
369 | int |
370 | kdp_machine_msr64_read(kdp_readmsr64_req_t *rq, caddr_t data, uint16_t lcpu) |
371 | { |
372 | uint64_t *value = (uint64_t *) data; |
373 | uint32_t msr = rq->address; |
374 | |
375 | if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) { |
376 | return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_msr64_read, rq, data); |
377 | } |
378 | |
379 | *value = rdmsr64(msr); |
380 | return KDPERR_NO_ERROR; |
381 | } |
382 | |
383 | int |
384 | kdp_machine_msr64_write(kdp_writemsr64_req_t *rq, caddr_t data, uint16_t lcpu) |
385 | { |
386 | uint64_t *value = (uint64_t *) data; |
387 | uint32_t msr = rq->address; |
388 | |
389 | if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) { |
390 | return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_msr64_write, rq, data); |
391 | } |
392 | |
393 | wrmsr64(msr, *value); |
394 | return KDPERR_NO_ERROR; |
395 | } |
396 | |
397 | pt_entry_t *debugger_ptep; |
398 | vm_map_offset_t debugger_window_kva; |
399 | |
400 | /* Establish a pagetable window that can be remapped on demand. |
401 | * This is utilized by the debugger to address regions outside |
402 | * the physical map. |
403 | */ |
404 | |
405 | void |
406 | kdp_machine_init(void) { |
407 | if (debug_boot_arg == 0) |
408 | return; |
409 | |
410 | vm_map_entry_t e; |
411 | kern_return_t kr; |
412 | |
413 | kr = vm_map_find_space(kernel_map, |
414 | &debugger_window_kva, |
415 | PAGE_SIZE, 0, |
416 | 0, |
417 | VM_MAP_KERNEL_FLAGS_NONE, |
418 | VM_KERN_MEMORY_OSFMK, |
419 | &e); |
420 | |
421 | if (kr != KERN_SUCCESS) { |
422 | panic("%s: vm_map_find_space failed with %d\n" , __FUNCTION__, kr); |
423 | } |
424 | |
425 | vm_map_unlock(kernel_map); |
426 | |
427 | debugger_ptep = pmap_pte(kernel_pmap, debugger_window_kva); |
428 | |
429 | if (debugger_ptep == NULL) { |
430 | pmap_expand(kernel_pmap, debugger_window_kva, PMAP_EXPAND_OPTIONS_NONE); |
431 | debugger_ptep = pmap_pte(kernel_pmap, debugger_window_kva); |
432 | } |
433 | } |
434 | |
435 | |
436 | |
437 | |