1 | /* |
2 | * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | |
30 | /* |
31 | * APPLE NOTE: This file is compiled even if dtrace is unconfig'd. A symbol |
32 | * from this file (_dtrace_register_anon_DOF) always needs to be exported for |
33 | * an external kext to link against. |
34 | */ |
35 | |
36 | #if CONFIG_DTRACE |
37 | |
38 | #define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from mach/ppc/thread_status.h */ |
39 | #include <kern/thread.h> |
40 | #include <mach/thread_status.h> |
41 | |
42 | #include <stdarg.h> |
43 | #include <string.h> |
44 | #include <sys/malloc.h> |
45 | #include <sys/time.h> |
46 | #include <sys/proc.h> |
47 | #include <sys/proc_internal.h> |
48 | #include <sys/kauth.h> |
49 | #include <sys/user.h> |
50 | #include <sys/systm.h> |
51 | #include <sys/dtrace.h> |
52 | #include <sys/dtrace_impl.h> |
53 | #include <libkern/OSAtomic.h> |
54 | #include <libkern/OSKextLibPrivate.h> |
55 | #include <kern/kern_types.h> |
56 | #include <kern/timer_call.h> |
57 | #include <kern/thread_call.h> |
58 | #include <kern/task.h> |
59 | #include <kern/sched_prim.h> |
60 | #include <kern/queue.h> |
61 | #include <miscfs/devfs/devfs.h> |
62 | #include <kern/kalloc.h> |
63 | |
64 | #include <mach/vm_param.h> |
65 | #include <mach/mach_vm.h> |
66 | #include <mach/task.h> |
67 | #include <vm/pmap.h> |
68 | #include <vm/vm_map.h> /* All the bits we care about are guarded by MACH_KERNEL_PRIVATE :-( */ |
69 | |
70 | /* |
71 | * pid/proc |
72 | */ |
73 | /* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */ |
74 | #define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */ |
75 | |
76 | void |
77 | dtrace_sprlock(proc_t *p) |
78 | { |
79 | lck_mtx_assert(&p->p_mlock, LCK_MTX_ASSERT_NOTOWNED); |
80 | lck_mtx_lock(&p->p_dtrace_sprlock); |
81 | } |
82 | |
83 | void |
84 | dtrace_sprunlock(proc_t *p) |
85 | { |
86 | lck_mtx_unlock(&p->p_dtrace_sprlock); |
87 | |
88 | } |
89 | |
90 | /* Not called from probe context */ |
91 | proc_t * |
92 | sprlock(pid_t pid) |
93 | { |
94 | proc_t* p; |
95 | |
96 | if ((p = proc_find(pid)) == PROC_NULL) { |
97 | return PROC_NULL; |
98 | } |
99 | |
100 | task_suspend_internal(p->task); |
101 | |
102 | dtrace_sprlock(p); |
103 | |
104 | proc_lock(p); |
105 | |
106 | return p; |
107 | } |
108 | |
109 | /* Not called from probe context */ |
110 | void |
111 | sprunlock(proc_t *p) |
112 | { |
113 | if (p != PROC_NULL) { |
114 | proc_unlock(p); |
115 | |
116 | dtrace_sprunlock(p); |
117 | |
118 | task_resume_internal(p->task); |
119 | |
120 | proc_rele(p); |
121 | } |
122 | } |
123 | |
124 | /* |
125 | * uread/uwrite |
126 | */ |
127 | |
128 | // These are not exported from vm_map.h. |
129 | extern kern_return_t vm_map_read_user(vm_map_t map, vm_map_address_t src_addr, void *dst_p, vm_size_t size); |
130 | extern kern_return_t vm_map_write_user(vm_map_t map, void *src_p, vm_map_address_t dst_addr, vm_size_t size); |
131 | |
132 | /* Not called from probe context */ |
133 | int |
134 | uread(proc_t *p, void *buf, user_size_t len, user_addr_t a) |
135 | { |
136 | kern_return_t ret; |
137 | |
138 | ASSERT(p != PROC_NULL); |
139 | ASSERT(p->task != NULL); |
140 | |
141 | task_t task = p->task; |
142 | |
143 | /* |
144 | * Grab a reference to the task vm_map_t to make sure |
145 | * the map isn't pulled out from under us. |
146 | * |
147 | * Because the proc_lock is not held at all times on all code |
148 | * paths leading here, it is possible for the proc to have |
149 | * exited. If the map is null, fail. |
150 | */ |
151 | vm_map_t map = get_task_map_reference(task); |
152 | if (map) { |
153 | ret = vm_map_read_user( map, (vm_map_address_t)a, buf, (vm_size_t)len); |
154 | vm_map_deallocate(map); |
155 | } else |
156 | ret = KERN_TERMINATED; |
157 | |
158 | return (int)ret; |
159 | } |
160 | |
161 | |
162 | /* Not called from probe context */ |
163 | int |
164 | uwrite(proc_t *p, void *buf, user_size_t len, user_addr_t a) |
165 | { |
166 | kern_return_t ret; |
167 | |
168 | ASSERT(p != NULL); |
169 | ASSERT(p->task != NULL); |
170 | |
171 | task_t task = p->task; |
172 | |
173 | /* |
174 | * Grab a reference to the task vm_map_t to make sure |
175 | * the map isn't pulled out from under us. |
176 | * |
177 | * Because the proc_lock is not held at all times on all code |
178 | * paths leading here, it is possible for the proc to have |
179 | * exited. If the map is null, fail. |
180 | */ |
181 | vm_map_t map = get_task_map_reference(task); |
182 | if (map) { |
183 | /* Find the memory permissions. */ |
184 | uint32_t nestingDepth=999999; |
185 | vm_region_submap_short_info_data_64_t info; |
186 | mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64; |
187 | mach_vm_address_t address = (mach_vm_address_t)a; |
188 | mach_vm_size_t sizeOfRegion = (mach_vm_size_t)len; |
189 | |
190 | ret = mach_vm_region_recurse(map, &address, &sizeOfRegion, &nestingDepth, (vm_region_recurse_info_t)&info, &count); |
191 | if (ret != KERN_SUCCESS) |
192 | goto done; |
193 | |
194 | vm_prot_t reprotect; |
195 | |
196 | if (!(info.protection & VM_PROT_WRITE)) { |
197 | /* Save the original protection values for restoration later */ |
198 | reprotect = info.protection; |
199 | |
200 | if (info.max_protection & VM_PROT_WRITE) { |
201 | /* The memory is not currently writable, but can be made writable. */ |
202 | ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, (reprotect & ~VM_PROT_EXECUTE) | VM_PROT_WRITE); |
203 | } else { |
204 | /* |
205 | * The memory is not currently writable, and cannot be made writable. We need to COW this memory. |
206 | * |
207 | * Strange, we can't just say "reprotect | VM_PROT_COPY", that fails. |
208 | */ |
209 | ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE); |
210 | } |
211 | |
212 | if (ret != KERN_SUCCESS) |
213 | goto done; |
214 | |
215 | } else { |
216 | /* The memory was already writable. */ |
217 | reprotect = VM_PROT_NONE; |
218 | } |
219 | |
220 | ret = vm_map_write_user( map, |
221 | buf, |
222 | (vm_map_address_t)a, |
223 | (vm_size_t)len); |
224 | |
225 | dtrace_flush_caches(); |
226 | |
227 | if (ret != KERN_SUCCESS) |
228 | goto done; |
229 | |
230 | if (reprotect != VM_PROT_NONE) { |
231 | ASSERT(reprotect & VM_PROT_EXECUTE); |
232 | ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect); |
233 | } |
234 | |
235 | done: |
236 | vm_map_deallocate(map); |
237 | } else |
238 | ret = KERN_TERMINATED; |
239 | |
240 | return (int)ret; |
241 | } |
242 | |
243 | /* |
244 | * cpuvar |
245 | */ |
246 | lck_mtx_t cpu_lock; |
247 | lck_mtx_t cyc_lock; |
248 | lck_mtx_t mod_lock; |
249 | |
250 | dtrace_cpu_t *cpu_list; |
251 | cpu_core_t *cpu_core; /* XXX TLB lockdown? */ |
252 | |
253 | /* |
254 | * cred_t |
255 | */ |
256 | |
257 | /* |
258 | * dtrace_CRED() can be called from probe context. We cannot simply call kauth_cred_get() since |
259 | * that function may try to resolve a lazy credential binding, which entails taking the proc_lock. |
260 | */ |
261 | cred_t * |
262 | dtrace_CRED(void) |
263 | { |
264 | struct uthread *uthread = get_bsdthread_info(current_thread()); |
265 | |
266 | if (uthread == NULL) |
267 | return NULL; |
268 | else |
269 | return uthread->uu_ucred; /* May return NOCRED which is defined to be 0 */ |
270 | } |
271 | |
272 | #define HAS_ALLPRIVS(cr) priv_isfullset(&CR_OEPRIV(cr)) |
273 | #define HAS_PRIVILEGE(cr, pr) ((pr) == PRIV_ALL ? \ |
274 | HAS_ALLPRIVS(cr) : \ |
275 | PRIV_ISASSERT(&CR_OEPRIV(cr), pr)) |
276 | |
277 | int PRIV_POLICY_CHOICE(void* cred, int priv, int all) |
278 | { |
279 | #pragma unused(priv, all) |
280 | return kauth_cred_issuser(cred); /* XXX TODO: How is this different from PRIV_POLICY_ONLY? */ |
281 | } |
282 | |
283 | int |
284 | PRIV_POLICY_ONLY(void *cr, int priv, int boolean) |
285 | { |
286 | #pragma unused(priv, boolean) |
287 | return kauth_cred_issuser(cr); /* XXX TODO: HAS_PRIVILEGE(cr, priv); */ |
288 | } |
289 | |
290 | uid_t |
291 | crgetuid(const cred_t *cr) { cred_t copy_cr = *cr; return kauth_cred_getuid(©_cr); } |
292 | |
293 | /* |
294 | * "cyclic" |
295 | */ |
296 | |
297 | typedef struct wrap_timer_call { |
298 | /* node attributes */ |
299 | cyc_handler_t hdlr; |
300 | cyc_time_t when; |
301 | uint64_t deadline; |
302 | int cpuid; |
303 | boolean_t suspended; |
304 | struct timer_call call; |
305 | |
306 | /* next item in the linked list */ |
307 | LIST_ENTRY(wrap_timer_call) entries; |
308 | } wrap_timer_call_t; |
309 | |
310 | #define WAKEUP_REAPER 0x7FFFFFFFFFFFFFFFLL |
311 | #define NEARLY_FOREVER 0x7FFFFFFFFFFFFFFELL |
312 | |
313 | |
314 | typedef struct cyc_list { |
315 | cyc_omni_handler_t cyl_omni; |
316 | wrap_timer_call_t cyl_wrap_by_cpus[]; |
317 | #if __arm__ && (__BIGGEST_ALIGNMENT__ > 4) |
318 | } __attribute__ ((aligned (8))) cyc_list_t; |
319 | #else |
320 | } cyc_list_t; |
321 | #endif |
322 | |
323 | /* CPU going online/offline notifications */ |
324 | void (*dtrace_cpu_state_changed_hook)(int, boolean_t) = NULL; |
325 | void dtrace_cpu_state_changed(int, boolean_t); |
326 | |
327 | void |
328 | dtrace_install_cpu_hooks(void) { |
329 | dtrace_cpu_state_changed_hook = dtrace_cpu_state_changed; |
330 | } |
331 | |
332 | void |
333 | dtrace_cpu_state_changed(int cpuid, boolean_t is_running) { |
334 | #pragma unused(cpuid) |
335 | wrap_timer_call_t *wrapTC = NULL; |
336 | boolean_t suspend = (is_running ? FALSE : TRUE); |
337 | dtrace_icookie_t s; |
338 | |
339 | /* Ensure that we're not going to leave the CPU */ |
340 | s = dtrace_interrupt_disable(); |
341 | assert(cpuid == cpu_number()); |
342 | |
343 | LIST_FOREACH(wrapTC, &(cpu_list[cpu_number()].cpu_cyc_list), entries) { |
344 | assert(wrapTC->cpuid == cpu_number()); |
345 | if (suspend) { |
346 | assert(!wrapTC->suspended); |
347 | /* If this fails, we'll panic anyway, so let's do this now. */ |
348 | if (!timer_call_cancel(&wrapTC->call)) |
349 | panic("timer_call_set_suspend() failed to cancel a timer call" ); |
350 | wrapTC->suspended = TRUE; |
351 | } else { |
352 | /* Rearm the timer, but ensure it was suspended first. */ |
353 | assert(wrapTC->suspended); |
354 | clock_deadline_for_periodic_event(wrapTC->when.cyt_interval, mach_absolute_time(), |
355 | &wrapTC->deadline); |
356 | timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline, |
357 | TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL); |
358 | wrapTC->suspended = FALSE; |
359 | } |
360 | |
361 | } |
362 | |
363 | /* Restore the previous interrupt state. */ |
364 | dtrace_interrupt_enable(s); |
365 | } |
366 | |
367 | static void |
368 | _timer_call_apply_cyclic( void *ignore, void *vTChdl ) |
369 | { |
370 | #pragma unused(ignore) |
371 | wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)vTChdl; |
372 | |
373 | (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg ); |
374 | |
375 | clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) ); |
376 | timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL ); |
377 | } |
378 | |
379 | static cyclic_id_t |
380 | timer_call_add_cyclic(wrap_timer_call_t *wrapTC, cyc_handler_t *handler, cyc_time_t *when) |
381 | { |
382 | uint64_t now; |
383 | dtrace_icookie_t s; |
384 | |
385 | timer_call_setup( &(wrapTC->call), _timer_call_apply_cyclic, NULL ); |
386 | wrapTC->hdlr = *handler; |
387 | wrapTC->when = *when; |
388 | |
389 | nanoseconds_to_absolutetime( wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval ); |
390 | |
391 | now = mach_absolute_time(); |
392 | wrapTC->deadline = now; |
393 | |
394 | clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) ); |
395 | |
396 | /* Insert the timer to the list of the running timers on this CPU, and start it. */ |
397 | s = dtrace_interrupt_disable(); |
398 | wrapTC->cpuid = cpu_number(); |
399 | LIST_INSERT_HEAD(&cpu_list[wrapTC->cpuid].cpu_cyc_list, wrapTC, entries); |
400 | timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline, |
401 | TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL); |
402 | wrapTC->suspended = FALSE; |
403 | dtrace_interrupt_enable(s); |
404 | |
405 | return (cyclic_id_t)wrapTC; |
406 | } |
407 | |
408 | /* |
409 | * Executed on the CPU the timer is running on. |
410 | */ |
411 | static void |
412 | timer_call_remove_cyclic(wrap_timer_call_t *wrapTC) |
413 | { |
414 | assert(wrapTC); |
415 | assert(cpu_number() == wrapTC->cpuid); |
416 | |
417 | if (!timer_call_cancel(&wrapTC->call)) |
418 | panic("timer_call_remove_cyclic() failed to cancel a timer call" ); |
419 | |
420 | LIST_REMOVE(wrapTC, entries); |
421 | } |
422 | |
423 | static void * |
424 | timer_call_get_cyclic_arg(wrap_timer_call_t *wrapTC) |
425 | { |
426 | return (wrapTC ? wrapTC->hdlr.cyh_arg : NULL); |
427 | } |
428 | |
429 | cyclic_id_t |
430 | cyclic_timer_add(cyc_handler_t *handler, cyc_time_t *when) |
431 | { |
432 | wrap_timer_call_t *wrapTC = _MALLOC(sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK); |
433 | if (NULL == wrapTC) |
434 | return CYCLIC_NONE; |
435 | else |
436 | return timer_call_add_cyclic( wrapTC, handler, when ); |
437 | } |
438 | |
439 | void |
440 | cyclic_timer_remove(cyclic_id_t cyclic) |
441 | { |
442 | ASSERT( cyclic != CYCLIC_NONE ); |
443 | |
444 | /* Removing a timer call must be done on the CPU the timer is running on. */ |
445 | wrap_timer_call_t *wrapTC = (wrap_timer_call_t *) cyclic; |
446 | dtrace_xcall(wrapTC->cpuid, (dtrace_xcall_t) timer_call_remove_cyclic, (void*) cyclic); |
447 | |
448 | _FREE((void *)cyclic, M_TEMP); |
449 | } |
450 | |
451 | static void |
452 | _cyclic_add_omni(cyc_list_t *cyc_list) |
453 | { |
454 | cyc_time_t cT; |
455 | cyc_handler_t cH; |
456 | cyc_omni_handler_t *omni = &cyc_list->cyl_omni; |
457 | |
458 | (omni->cyo_online)(omni->cyo_arg, CPU, &cH, &cT); |
459 | |
460 | wrap_timer_call_t *wrapTC = &cyc_list->cyl_wrap_by_cpus[cpu_number()]; |
461 | timer_call_add_cyclic(wrapTC, &cH, &cT); |
462 | } |
463 | |
464 | cyclic_id_list_t |
465 | cyclic_add_omni(cyc_omni_handler_t *omni) |
466 | { |
467 | cyc_list_t *cyc_list = |
468 | _MALLOC(sizeof(cyc_list_t) + NCPU * sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK); |
469 | |
470 | if (NULL == cyc_list) |
471 | return NULL; |
472 | |
473 | cyc_list->cyl_omni = *omni; |
474 | |
475 | dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_add_omni, (void *)cyc_list); |
476 | |
477 | return (cyclic_id_list_t)cyc_list; |
478 | } |
479 | |
480 | static void |
481 | _cyclic_remove_omni(cyc_list_t *cyc_list) |
482 | { |
483 | cyc_omni_handler_t *omni = &cyc_list->cyl_omni; |
484 | void *oarg; |
485 | wrap_timer_call_t *wrapTC; |
486 | |
487 | /* |
488 | * If the processor was offline when dtrace started, we did not allocate |
489 | * a cyclic timer for this CPU. |
490 | */ |
491 | if ((wrapTC = &cyc_list->cyl_wrap_by_cpus[cpu_number()]) != NULL) { |
492 | oarg = timer_call_get_cyclic_arg(wrapTC); |
493 | timer_call_remove_cyclic(wrapTC); |
494 | (omni->cyo_offline)(omni->cyo_arg, CPU, oarg); |
495 | } |
496 | } |
497 | |
498 | void |
499 | cyclic_remove_omni(cyclic_id_list_t cyc_list) |
500 | { |
501 | ASSERT(cyc_list != NULL); |
502 | |
503 | dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_remove_omni, (void *)cyc_list); |
504 | _FREE(cyc_list, M_TEMP); |
505 | } |
506 | |
507 | typedef struct wrap_thread_call { |
508 | thread_call_t TChdl; |
509 | cyc_handler_t hdlr; |
510 | cyc_time_t when; |
511 | uint64_t deadline; |
512 | } wrap_thread_call_t; |
513 | |
514 | /* |
515 | * _cyclic_apply will run on some thread under kernel_task. That's OK for the |
516 | * cleaner and the deadman, but too distant in time and place for the profile provider. |
517 | */ |
518 | static void |
519 | _cyclic_apply( void *ignore, void *vTChdl ) |
520 | { |
521 | #pragma unused(ignore) |
522 | wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)vTChdl; |
523 | |
524 | (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg ); |
525 | |
526 | clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) ); |
527 | (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline ); |
528 | |
529 | /* Did cyclic_remove request a wakeup call when this thread call was re-armed? */ |
530 | if (wrapTC->when.cyt_interval == WAKEUP_REAPER) |
531 | thread_wakeup((event_t)wrapTC); |
532 | } |
533 | |
534 | cyclic_id_t |
535 | cyclic_add(cyc_handler_t *handler, cyc_time_t *when) |
536 | { |
537 | uint64_t now; |
538 | |
539 | wrap_thread_call_t *wrapTC = _MALLOC(sizeof(wrap_thread_call_t), M_TEMP, M_ZERO | M_WAITOK); |
540 | if (NULL == wrapTC) |
541 | return CYCLIC_NONE; |
542 | |
543 | wrapTC->TChdl = thread_call_allocate( _cyclic_apply, NULL ); |
544 | wrapTC->hdlr = *handler; |
545 | wrapTC->when = *when; |
546 | |
547 | ASSERT(when->cyt_when == 0); |
548 | ASSERT(when->cyt_interval < WAKEUP_REAPER); |
549 | |
550 | nanoseconds_to_absolutetime(wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval); |
551 | |
552 | now = mach_absolute_time(); |
553 | wrapTC->deadline = now; |
554 | |
555 | clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) ); |
556 | (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline ); |
557 | |
558 | return (cyclic_id_t)wrapTC; |
559 | } |
560 | |
561 | static void |
562 | noop_cyh_func(void * ignore) |
563 | { |
564 | #pragma unused(ignore) |
565 | } |
566 | |
567 | void |
568 | cyclic_remove(cyclic_id_t cyclic) |
569 | { |
570 | wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)cyclic; |
571 | |
572 | ASSERT(cyclic != CYCLIC_NONE); |
573 | |
574 | while (!thread_call_cancel(wrapTC->TChdl)) { |
575 | int ret = assert_wait(wrapTC, THREAD_UNINT); |
576 | ASSERT(ret == THREAD_WAITING); |
577 | |
578 | wrapTC->when.cyt_interval = WAKEUP_REAPER; |
579 | |
580 | ret = thread_block(THREAD_CONTINUE_NULL); |
581 | ASSERT(ret == THREAD_AWAKENED); |
582 | } |
583 | |
584 | if (thread_call_free(wrapTC->TChdl)) |
585 | _FREE(wrapTC, M_TEMP); |
586 | else { |
587 | /* Gut this cyclic and move on ... */ |
588 | wrapTC->hdlr.cyh_func = noop_cyh_func; |
589 | wrapTC->when.cyt_interval = NEARLY_FOREVER; |
590 | } |
591 | } |
592 | |
593 | kern_return_t _dtrace_register_anon_DOF(char *, uchar_t *, uint_t); |
594 | |
595 | kern_return_t |
596 | _dtrace_register_anon_DOF(char *name, uchar_t *data, uint_t nelements) |
597 | { |
598 | #pragma unused(name, data, nelements) |
599 | return KERN_FAILURE; |
600 | } |
601 | |
602 | int |
603 | ddi_driver_major(dev_info_t *devi) { return (int)major(CAST_DOWN_EXPLICIT(int,devi)); } |
604 | |
605 | int |
606 | ddi_create_minor_node(dev_info_t *dip, const char *name, int spec_type, |
607 | minor_t minor_num, const char *node_type, int flag) |
608 | { |
609 | #pragma unused(spec_type,node_type,flag) |
610 | dev_t dev = makedev( ddi_driver_major(dip), minor_num ); |
611 | |
612 | if (NULL == devfs_make_node( dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, name, 0 )) |
613 | return DDI_FAILURE; |
614 | else |
615 | return DDI_SUCCESS; |
616 | } |
617 | |
618 | void |
619 | ddi_remove_minor_node(dev_info_t *dip, char *name) |
620 | { |
621 | #pragma unused(dip,name) |
622 | /* XXX called from dtrace_detach, so NOTREACHED for now. */ |
623 | } |
624 | |
625 | major_t |
626 | getemajor( dev_t d ) |
627 | { |
628 | return (major_t) major(d); |
629 | } |
630 | |
631 | minor_t |
632 | getminor ( dev_t d ) |
633 | { |
634 | return (minor_t) minor(d); |
635 | } |
636 | |
637 | extern void Debugger(const char*); |
638 | |
639 | void |
640 | debug_enter(char *c) { Debugger(c); } |
641 | |
642 | /* |
643 | * kmem |
644 | */ |
645 | |
646 | void * |
647 | dt_kmem_alloc_site(size_t size, int kmflag, vm_allocation_site_t *site) |
648 | { |
649 | #pragma unused(kmflag) |
650 | |
651 | /* |
652 | * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact). |
653 | * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock. |
654 | */ |
655 | vm_size_t vsize = size; |
656 | return kalloc_canblock(&vsize, TRUE, site); |
657 | } |
658 | |
659 | void * |
660 | dt_kmem_zalloc_site(size_t size, int kmflag, vm_allocation_site_t *site) |
661 | { |
662 | #pragma unused(kmflag) |
663 | |
664 | /* |
665 | * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact). |
666 | * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock. |
667 | */ |
668 | vm_size_t vsize = size; |
669 | void* buf = kalloc_canblock(&vsize, TRUE, site); |
670 | |
671 | if(!buf) |
672 | return NULL; |
673 | |
674 | bzero(buf, size); |
675 | |
676 | return buf; |
677 | } |
678 | |
679 | void |
680 | dt_kmem_free(void *buf, size_t size) |
681 | { |
682 | #pragma unused(size) |
683 | /* |
684 | * DTrace relies on this, its doing a lot of NULL frees. |
685 | * A null free causes the debug builds to panic. |
686 | */ |
687 | if (buf == NULL) return; |
688 | |
689 | ASSERT(size > 0); |
690 | |
691 | kfree(buf, size); |
692 | } |
693 | |
694 | |
695 | |
696 | /* |
697 | * aligned dt_kmem allocator |
698 | * align should be a power of two |
699 | */ |
700 | |
701 | void* |
702 | dt_kmem_alloc_aligned_site(size_t size, size_t align, int kmflag, vm_allocation_site_t *site) |
703 | { |
704 | void *mem, **addr_to_free; |
705 | intptr_t mem_aligned; |
706 | size_t *size_to_free, hdr_size; |
707 | |
708 | /* Must be a power of two. */ |
709 | assert(align != 0); |
710 | assert((align & (align - 1)) == 0); |
711 | |
712 | /* |
713 | * We are going to add a header to the allocation. It contains |
714 | * the address to free and the total size of the buffer. |
715 | */ |
716 | hdr_size = sizeof(size_t) + sizeof(void*); |
717 | mem = dt_kmem_alloc_site(size + align + hdr_size, kmflag, site); |
718 | if (mem == NULL) |
719 | return NULL; |
720 | |
721 | mem_aligned = (intptr_t) (((intptr_t) mem + align + hdr_size) & ~(align - 1)); |
722 | |
723 | /* Write the address to free in the header. */ |
724 | addr_to_free = (void**) (mem_aligned - sizeof(void*)); |
725 | *addr_to_free = mem; |
726 | |
727 | /* Write the size to free in the header. */ |
728 | size_to_free = (size_t*) (mem_aligned - hdr_size); |
729 | *size_to_free = size + align + hdr_size; |
730 | |
731 | return (void*) mem_aligned; |
732 | } |
733 | |
734 | void* |
735 | dt_kmem_zalloc_aligned_site(size_t size, size_t align, int kmflag, vm_allocation_site_t *s) |
736 | { |
737 | void* buf; |
738 | |
739 | buf = dt_kmem_alloc_aligned_site(size, align, kmflag, s); |
740 | |
741 | if(!buf) |
742 | return NULL; |
743 | |
744 | bzero(buf, size); |
745 | |
746 | return buf; |
747 | } |
748 | |
749 | void |
750 | dt_kmem_free_aligned(void* buf, size_t size) |
751 | { |
752 | #pragma unused(size) |
753 | intptr_t ptr = (intptr_t) buf; |
754 | void **addr_to_free = (void**) (ptr - sizeof(void*)); |
755 | size_t *size_to_free = (size_t*) (ptr - (sizeof(size_t) + sizeof(void*))); |
756 | |
757 | if (buf == NULL) |
758 | return; |
759 | |
760 | dt_kmem_free(*addr_to_free, *size_to_free); |
761 | } |
762 | |
763 | /* |
764 | * dtrace wants to manage just a single block: dtrace_state_percpu_t * NCPU, and |
765 | * doesn't specify constructor, destructor, or reclaim methods. |
766 | * At present, it always zeroes the block it obtains from kmem_cache_alloc(). |
767 | * We'll manage this constricted use of kmem_cache with ordinary _MALLOC and _FREE. |
768 | */ |
769 | kmem_cache_t * |
770 | kmem_cache_create( |
771 | const char *name, /* descriptive name for this cache */ |
772 | size_t bufsize, /* size of the objects it manages */ |
773 | size_t align, /* required object alignment */ |
774 | int (*constructor)(void *, void *, int), /* object constructor */ |
775 | void (*destructor)(void *, void *), /* object destructor */ |
776 | void (*reclaim)(void *), /* memory reclaim callback */ |
777 | void *private, /* pass-thru arg for constr/destr/reclaim */ |
778 | vmem_t *vmp, /* vmem source for slab allocation */ |
779 | int cflags) /* cache creation flags */ |
780 | { |
781 | #pragma unused(name,align,constructor,destructor,reclaim,private,vmp,cflags) |
782 | return (kmem_cache_t *)bufsize; /* A cookie that tracks the single object size. */ |
783 | } |
784 | |
785 | void * |
786 | kmem_cache_alloc(kmem_cache_t *cp, int kmflag) |
787 | { |
788 | #pragma unused(kmflag) |
789 | size_t bufsize = (size_t)cp; |
790 | return (void *)_MALLOC(bufsize, M_TEMP, M_WAITOK); |
791 | } |
792 | |
793 | void |
794 | kmem_cache_free(kmem_cache_t *cp, void *buf) |
795 | { |
796 | #pragma unused(cp) |
797 | _FREE(buf, M_TEMP); |
798 | } |
799 | |
800 | void |
801 | kmem_cache_destroy(kmem_cache_t *cp) |
802 | { |
803 | #pragma unused(cp) |
804 | } |
805 | |
806 | /* |
807 | * vmem (Solaris "slab" allocator) used by DTrace solely to hand out resource ids |
808 | */ |
809 | typedef unsigned int u_daddr_t; |
810 | #include "blist.h" |
811 | |
812 | /* By passing around blist *handles*, the underlying blist can be resized as needed. */ |
813 | struct blist_hdl { |
814 | blist_t blist; |
815 | }; |
816 | |
817 | vmem_t * |
818 | vmem_create(const char *name, void *base, size_t size, size_t quantum, void *ignore5, |
819 | void *ignore6, vmem_t *source, size_t qcache_max, int vmflag) |
820 | { |
821 | #pragma unused(name,quantum,ignore5,ignore6,source,qcache_max,vmflag) |
822 | blist_t bl; |
823 | struct blist_hdl *p = _MALLOC(sizeof(struct blist_hdl), M_TEMP, M_WAITOK); |
824 | |
825 | ASSERT(quantum == 1); |
826 | ASSERT(NULL == ignore5); |
827 | ASSERT(NULL == ignore6); |
828 | ASSERT(NULL == source); |
829 | ASSERT(0 == qcache_max); |
830 | ASSERT(vmflag & VMC_IDENTIFIER); |
831 | |
832 | size = MIN(128, size); /* Clamp to 128 initially, since the underlying data structure is pre-allocated */ |
833 | |
834 | p->blist = bl = blist_create( size ); |
835 | blist_free(bl, 0, size); |
836 | if (base) blist_alloc( bl, (daddr_t)(uintptr_t)base ); /* Chomp off initial ID(s) */ |
837 | |
838 | return (vmem_t *)p; |
839 | } |
840 | |
841 | void * |
842 | vmem_alloc(vmem_t *vmp, size_t size, int vmflag) |
843 | { |
844 | #pragma unused(vmflag) |
845 | struct blist_hdl *q = (struct blist_hdl *)vmp; |
846 | blist_t bl = q->blist; |
847 | daddr_t p; |
848 | |
849 | p = blist_alloc(bl, (daddr_t)size); |
850 | |
851 | if ((daddr_t)-1 == p) { |
852 | blist_resize(&bl, (bl->bl_blocks) << 1, 1); |
853 | q->blist = bl; |
854 | p = blist_alloc(bl, (daddr_t)size); |
855 | if ((daddr_t)-1 == p) |
856 | panic("vmem_alloc: failure after blist_resize!" ); |
857 | } |
858 | |
859 | return (void *)(uintptr_t)p; |
860 | } |
861 | |
862 | void |
863 | vmem_free(vmem_t *vmp, void *vaddr, size_t size) |
864 | { |
865 | struct blist_hdl *p = (struct blist_hdl *)vmp; |
866 | |
867 | blist_free( p->blist, (daddr_t)(uintptr_t)vaddr, (daddr_t)size ); |
868 | } |
869 | |
870 | void |
871 | vmem_destroy(vmem_t *vmp) |
872 | { |
873 | struct blist_hdl *p = (struct blist_hdl *)vmp; |
874 | |
875 | blist_destroy( p->blist ); |
876 | _FREE( p, sizeof(struct blist_hdl) ); |
877 | } |
878 | |
879 | /* |
880 | * Timing |
881 | */ |
882 | |
883 | /* |
884 | * dtrace_gethrestime() provides the "walltimestamp", a value that is anchored at |
885 | * January 1, 1970. Because it can be called from probe context, it must take no locks. |
886 | */ |
887 | |
888 | hrtime_t |
889 | dtrace_gethrestime(void) |
890 | { |
891 | clock_sec_t secs; |
892 | clock_nsec_t nanosecs; |
893 | uint64_t secs64, ns64; |
894 | |
895 | clock_get_calendar_nanotime_nowait(&secs, &nanosecs); |
896 | secs64 = (uint64_t)secs; |
897 | ns64 = (uint64_t)nanosecs; |
898 | |
899 | ns64 = ns64 + (secs64 * 1000000000LL); |
900 | return ns64; |
901 | } |
902 | |
903 | /* |
904 | * dtrace_gethrtime() provides high-resolution timestamps with machine-dependent origin. |
905 | * Hence its primary use is to specify intervals. |
906 | */ |
907 | |
908 | hrtime_t |
909 | dtrace_abs_to_nano(uint64_t elapsed) |
910 | { |
911 | static mach_timebase_info_data_t sTimebaseInfo = { 0, 0 }; |
912 | |
913 | /* |
914 | * If this is the first time we've run, get the timebase. |
915 | * We can use denom == 0 to indicate that sTimebaseInfo is |
916 | * uninitialised because it makes no sense to have a zero |
917 | * denominator in a fraction. |
918 | */ |
919 | |
920 | if ( sTimebaseInfo.denom == 0 ) { |
921 | (void) clock_timebase_info(&sTimebaseInfo); |
922 | } |
923 | |
924 | /* |
925 | * Convert to nanoseconds. |
926 | * return (elapsed * (uint64_t)sTimebaseInfo.numer)/(uint64_t)sTimebaseInfo.denom; |
927 | * |
928 | * Provided the final result is representable in 64 bits the following maneuver will |
929 | * deliver that result without intermediate overflow. |
930 | */ |
931 | if (sTimebaseInfo.denom == sTimebaseInfo.numer) |
932 | return elapsed; |
933 | else if (sTimebaseInfo.denom == 1) |
934 | return elapsed * (uint64_t)sTimebaseInfo.numer; |
935 | else { |
936 | /* Decompose elapsed = eta32 * 2^32 + eps32: */ |
937 | uint64_t eta32 = elapsed >> 32; |
938 | uint64_t eps32 = elapsed & 0x00000000ffffffffLL; |
939 | |
940 | uint32_t numer = sTimebaseInfo.numer, denom = sTimebaseInfo.denom; |
941 | |
942 | /* Form product of elapsed64 (decomposed) and numer: */ |
943 | uint64_t mu64 = numer * eta32; |
944 | uint64_t lambda64 = numer * eps32; |
945 | |
946 | /* Divide the constituents by denom: */ |
947 | uint64_t q32 = mu64/denom; |
948 | uint64_t r32 = mu64 - (q32 * denom); /* mu64 % denom */ |
949 | |
950 | return (q32 << 32) + ((r32 << 32) + lambda64)/denom; |
951 | } |
952 | } |
953 | |
954 | hrtime_t |
955 | dtrace_gethrtime(void) |
956 | { |
957 | static uint64_t start = 0; |
958 | |
959 | if (start == 0) |
960 | start = mach_absolute_time(); |
961 | |
962 | return dtrace_abs_to_nano(mach_absolute_time() - start); |
963 | } |
964 | |
965 | /* |
966 | * Atomicity and synchronization |
967 | */ |
968 | uint32_t |
969 | dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new) |
970 | { |
971 | if (OSCompareAndSwap( (UInt32)cmp, (UInt32)new, (volatile UInt32 *)target )) |
972 | return cmp; |
973 | else |
974 | return ~cmp; /* Must return something *other* than cmp */ |
975 | } |
976 | |
977 | void * |
978 | dtrace_casptr(void *target, void *cmp, void *new) |
979 | { |
980 | if (OSCompareAndSwapPtr( cmp, new, (void**)target )) |
981 | return cmp; |
982 | else |
983 | return (void *)(~(uintptr_t)cmp); /* Must return something *other* than cmp */ |
984 | } |
985 | |
986 | /* |
987 | * Interrupt manipulation |
988 | */ |
989 | dtrace_icookie_t |
990 | dtrace_interrupt_disable(void) |
991 | { |
992 | return (dtrace_icookie_t)ml_set_interrupts_enabled(FALSE); |
993 | } |
994 | |
995 | void |
996 | dtrace_interrupt_enable(dtrace_icookie_t reenable) |
997 | { |
998 | (void)ml_set_interrupts_enabled((boolean_t)reenable); |
999 | } |
1000 | |
1001 | /* |
1002 | * MP coordination |
1003 | */ |
1004 | static void |
1005 | dtrace_sync_func(void) {} |
1006 | |
1007 | /* |
1008 | * dtrace_sync() is not called from probe context. |
1009 | */ |
1010 | void |
1011 | dtrace_sync(void) |
1012 | { |
1013 | dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL); |
1014 | } |
1015 | |
1016 | /* |
1017 | * The dtrace_copyin/out/instr and dtrace_fuword* routines can be called from probe context. |
1018 | */ |
1019 | |
1020 | extern kern_return_t dtrace_copyio_preflight(addr64_t); |
1021 | extern kern_return_t dtrace_copyio_postflight(addr64_t); |
1022 | |
1023 | static int |
1024 | dtrace_copycheck(user_addr_t uaddr, uintptr_t kaddr, size_t size) |
1025 | { |
1026 | #pragma unused(kaddr) |
1027 | |
1028 | vm_offset_t recover = dtrace_set_thread_recover( current_thread(), 0 ); /* Snare any extant recovery point. */ |
1029 | dtrace_set_thread_recover( current_thread(), recover ); /* Put it back. We *must not* re-enter and overwrite. */ |
1030 | |
1031 | ASSERT(kaddr + size >= kaddr); |
1032 | |
1033 | if ( uaddr + size < uaddr || /* Avoid address wrap. */ |
1034 | KERN_FAILURE == dtrace_copyio_preflight(uaddr)) /* Machine specific setup/constraints. */ |
1035 | { |
1036 | DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); |
1037 | cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr; |
1038 | return (0); |
1039 | } |
1040 | return (1); |
1041 | } |
1042 | |
1043 | void |
1044 | dtrace_copyin(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags) |
1045 | { |
1046 | #pragma unused(flags) |
1047 | |
1048 | if (dtrace_copycheck( src, dst, len )) { |
1049 | if (copyin((const user_addr_t)src, (char *)dst, (vm_size_t)len)) { |
1050 | DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); |
1051 | cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src; |
1052 | } |
1053 | dtrace_copyio_postflight(src); |
1054 | } |
1055 | } |
1056 | |
1057 | void |
1058 | dtrace_copyinstr(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags) |
1059 | { |
1060 | #pragma unused(flags) |
1061 | |
1062 | size_t actual; |
1063 | |
1064 | if (dtrace_copycheck( src, dst, len )) { |
1065 | /* copyin as many as 'len' bytes. */ |
1066 | int error = copyinstr((const user_addr_t)src, (char *)dst, (vm_size_t)len, &actual); |
1067 | |
1068 | /* |
1069 | * ENAMETOOLONG is returned when 'len' bytes have been copied in but the NUL terminator was |
1070 | * not encountered. That does not require raising CPU_DTRACE_BADADDR, and we press on. |
1071 | * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left |
1072 | * to the caller. |
1073 | */ |
1074 | if (error && error != ENAMETOOLONG) { |
1075 | DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); |
1076 | cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src; |
1077 | } |
1078 | dtrace_copyio_postflight(src); |
1079 | } |
1080 | } |
1081 | |
1082 | void |
1083 | dtrace_copyout(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags) |
1084 | { |
1085 | #pragma unused(flags) |
1086 | |
1087 | if (dtrace_copycheck( dst, src, len )) { |
1088 | if (copyout((const void *)src, dst, (vm_size_t)len)) { |
1089 | DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); |
1090 | cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst; |
1091 | } |
1092 | dtrace_copyio_postflight(dst); |
1093 | } |
1094 | } |
1095 | |
1096 | void |
1097 | dtrace_copyoutstr(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags) |
1098 | { |
1099 | #pragma unused(flags) |
1100 | |
1101 | size_t actual; |
1102 | |
1103 | if (dtrace_copycheck( dst, src, len )) { |
1104 | |
1105 | /* |
1106 | * ENAMETOOLONG is returned when 'len' bytes have been copied out but the NUL terminator was |
1107 | * not encountered. We raise CPU_DTRACE_BADADDR in that case. |
1108 | * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left |
1109 | * to the caller. |
1110 | */ |
1111 | if (copyoutstr((const void *)src, dst, (size_t)len, &actual)) { |
1112 | DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); |
1113 | cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst; |
1114 | } |
1115 | dtrace_copyio_postflight(dst); |
1116 | } |
1117 | } |
1118 | |
1119 | extern const int copysize_limit_panic; |
1120 | |
1121 | int dtrace_copy_maxsize(void) |
1122 | { |
1123 | return copysize_limit_panic; |
1124 | } |
1125 | |
1126 | |
1127 | int |
1128 | dtrace_buffer_copyout(const void *kaddr, user_addr_t uaddr, vm_size_t nbytes) |
1129 | { |
1130 | int maxsize = dtrace_copy_maxsize(); |
1131 | /* |
1132 | * Partition the copyout in copysize_limit_panic-sized chunks |
1133 | */ |
1134 | while (nbytes >= (vm_size_t)maxsize) { |
1135 | if (copyout(kaddr, uaddr, maxsize) != 0) |
1136 | return (EFAULT); |
1137 | |
1138 | nbytes -= maxsize; |
1139 | uaddr += maxsize; |
1140 | kaddr += maxsize; |
1141 | } |
1142 | if (nbytes > 0) { |
1143 | if (copyout(kaddr, uaddr, nbytes) != 0) |
1144 | return (EFAULT); |
1145 | } |
1146 | |
1147 | return (0); |
1148 | } |
1149 | |
1150 | uint8_t |
1151 | dtrace_fuword8(user_addr_t uaddr) |
1152 | { |
1153 | uint8_t ret = 0; |
1154 | |
1155 | DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); |
1156 | if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) { |
1157 | if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) { |
1158 | DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); |
1159 | cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr; |
1160 | } |
1161 | dtrace_copyio_postflight(uaddr); |
1162 | } |
1163 | DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); |
1164 | |
1165 | return(ret); |
1166 | } |
1167 | |
1168 | uint16_t |
1169 | dtrace_fuword16(user_addr_t uaddr) |
1170 | { |
1171 | uint16_t ret = 0; |
1172 | |
1173 | DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); |
1174 | if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) { |
1175 | if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) { |
1176 | DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); |
1177 | cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr; |
1178 | } |
1179 | dtrace_copyio_postflight(uaddr); |
1180 | } |
1181 | DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); |
1182 | |
1183 | return(ret); |
1184 | } |
1185 | |
1186 | uint32_t |
1187 | dtrace_fuword32(user_addr_t uaddr) |
1188 | { |
1189 | uint32_t ret = 0; |
1190 | |
1191 | DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); |
1192 | if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) { |
1193 | if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) { |
1194 | DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); |
1195 | cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr; |
1196 | } |
1197 | dtrace_copyio_postflight(uaddr); |
1198 | } |
1199 | DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); |
1200 | |
1201 | return(ret); |
1202 | } |
1203 | |
1204 | uint64_t |
1205 | dtrace_fuword64(user_addr_t uaddr) |
1206 | { |
1207 | uint64_t ret = 0; |
1208 | |
1209 | DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); |
1210 | if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) { |
1211 | if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) { |
1212 | DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); |
1213 | cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr; |
1214 | } |
1215 | dtrace_copyio_postflight(uaddr); |
1216 | } |
1217 | DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); |
1218 | |
1219 | return(ret); |
1220 | } |
1221 | |
1222 | /* |
1223 | * Emulation of Solaris fuword / suword |
1224 | * Called from the fasttrap provider, so the use of copyin/out requires fewer safegaurds. |
1225 | */ |
1226 | |
1227 | int |
1228 | fuword8(user_addr_t uaddr, uint8_t *value) |
1229 | { |
1230 | if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint8_t)) != 0) { |
1231 | return -1; |
1232 | } |
1233 | |
1234 | return 0; |
1235 | } |
1236 | |
1237 | int |
1238 | fuword16(user_addr_t uaddr, uint16_t *value) |
1239 | { |
1240 | if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint16_t)) != 0) { |
1241 | return -1; |
1242 | } |
1243 | |
1244 | return 0; |
1245 | } |
1246 | |
1247 | int |
1248 | fuword32(user_addr_t uaddr, uint32_t *value) |
1249 | { |
1250 | if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t)) != 0) { |
1251 | return -1; |
1252 | } |
1253 | |
1254 | return 0; |
1255 | } |
1256 | |
1257 | int |
1258 | fuword64(user_addr_t uaddr, uint64_t *value) |
1259 | { |
1260 | if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t)) != 0) { |
1261 | return -1; |
1262 | } |
1263 | |
1264 | return 0; |
1265 | } |
1266 | |
1267 | void |
1268 | fuword32_noerr(user_addr_t uaddr, uint32_t *value) |
1269 | { |
1270 | if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t))) { |
1271 | *value = 0; |
1272 | } |
1273 | } |
1274 | |
1275 | void |
1276 | fuword64_noerr(user_addr_t uaddr, uint64_t *value) |
1277 | { |
1278 | if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t))) { |
1279 | *value = 0; |
1280 | } |
1281 | } |
1282 | |
1283 | int |
1284 | suword64(user_addr_t addr, uint64_t value) |
1285 | { |
1286 | if (copyout((const void *)&value, addr, sizeof(value)) != 0) { |
1287 | return -1; |
1288 | } |
1289 | |
1290 | return 0; |
1291 | } |
1292 | |
1293 | int |
1294 | suword32(user_addr_t addr, uint32_t value) |
1295 | { |
1296 | if (copyout((const void *)&value, addr, sizeof(value)) != 0) { |
1297 | return -1; |
1298 | } |
1299 | |
1300 | return 0; |
1301 | } |
1302 | |
1303 | /* |
1304 | * Miscellaneous |
1305 | */ |
1306 | extern boolean_t dtrace_tally_fault(user_addr_t); |
1307 | |
1308 | boolean_t |
1309 | dtrace_tally_fault(user_addr_t uaddr) |
1310 | { |
1311 | DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); |
1312 | cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr; |
1313 | return( DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT) ? TRUE : FALSE ); |
1314 | } |
1315 | |
1316 | #define TOTTY 0x02 |
1317 | extern int prf(const char *, va_list, int, struct tty *); /* bsd/kern/subr_prf.h */ |
1318 | |
1319 | int |
1320 | vuprintf(const char *format, va_list ap) |
1321 | { |
1322 | return prf(format, ap, TOTTY, NULL); |
1323 | } |
1324 | |
1325 | /* Not called from probe context */ |
1326 | void cmn_err( int level, const char *format, ... ) |
1327 | { |
1328 | #pragma unused(level) |
1329 | va_list alist; |
1330 | |
1331 | va_start(alist, format); |
1332 | vuprintf(format, alist); |
1333 | va_end(alist); |
1334 | uprintf("\n" ); |
1335 | } |
1336 | |
1337 | /* |
1338 | * History: |
1339 | * 2002-01-24 gvdl Initial implementation of strstr |
1340 | */ |
1341 | |
1342 | __private_extern__ const char * |
1343 | strstr(const char *in, const char *str) |
1344 | { |
1345 | char c; |
1346 | size_t len; |
1347 | if (!in || !str) |
1348 | return in; |
1349 | |
1350 | c = *str++; |
1351 | if (!c) |
1352 | return (const char *) in; // Trivial empty string case |
1353 | |
1354 | len = strlen(str); |
1355 | do { |
1356 | char sc; |
1357 | |
1358 | do { |
1359 | sc = *in++; |
1360 | if (!sc) |
1361 | return (char *) 0; |
1362 | } while (sc != c); |
1363 | } while (strncmp(in, str, len) != 0); |
1364 | |
1365 | return (const char *) (in - 1); |
1366 | } |
1367 | |
1368 | const void* |
1369 | bsearch(const void *key, const void *base0, size_t nmemb, size_t size, int (*compar)(const void *, const void *)) |
1370 | { |
1371 | const char *base = base0; |
1372 | size_t lim; |
1373 | int cmp; |
1374 | const void *p; |
1375 | for (lim = nmemb; lim != 0; lim >>= 1) { |
1376 | p = base + (lim >> 1) * size; |
1377 | cmp = (*compar)(key, p); |
1378 | if (cmp == 0) |
1379 | return p; |
1380 | if (cmp > 0) { /* key > p: move right */ |
1381 | base = (const char *)p + size; |
1382 | lim--; |
1383 | } /* else move left */ |
1384 | } |
1385 | return (NULL); |
1386 | } |
1387 | |
1388 | /* |
1389 | * Runtime and ABI |
1390 | */ |
1391 | uintptr_t |
1392 | dtrace_caller(int ignore) |
1393 | { |
1394 | #pragma unused(ignore) |
1395 | return -1; /* Just as in Solaris dtrace_asm.s */ |
1396 | } |
1397 | |
1398 | int |
1399 | dtrace_getstackdepth(int aframes) |
1400 | { |
1401 | struct frame *fp = (struct frame *)__builtin_frame_address(0); |
1402 | struct frame *nextfp, *minfp, *stacktop; |
1403 | int depth = 0; |
1404 | int on_intr; |
1405 | |
1406 | if ((on_intr = CPU_ON_INTR(CPU)) != 0) |
1407 | stacktop = (struct frame *)dtrace_get_cpu_int_stack_top(); |
1408 | else |
1409 | stacktop = (struct frame *)(dtrace_get_kernel_stack(current_thread()) + kernel_stack_size); |
1410 | |
1411 | minfp = fp; |
1412 | |
1413 | aframes++; |
1414 | |
1415 | for (;;) { |
1416 | depth++; |
1417 | |
1418 | nextfp = *(struct frame **)fp; |
1419 | |
1420 | if (nextfp <= minfp || nextfp >= stacktop) { |
1421 | if (on_intr) { |
1422 | /* |
1423 | * Hop from interrupt stack to thread stack. |
1424 | */ |
1425 | vm_offset_t kstack_base = dtrace_get_kernel_stack(current_thread()); |
1426 | |
1427 | minfp = (struct frame *)kstack_base; |
1428 | stacktop = (struct frame *)(kstack_base + kernel_stack_size); |
1429 | |
1430 | on_intr = 0; |
1431 | continue; |
1432 | } |
1433 | break; |
1434 | } |
1435 | |
1436 | fp = nextfp; |
1437 | minfp = fp; |
1438 | } |
1439 | |
1440 | if (depth <= aframes) |
1441 | return (0); |
1442 | |
1443 | return (depth - aframes); |
1444 | } |
1445 | |
1446 | int |
1447 | dtrace_addr_in_module(void* addr, struct modctl *ctl) |
1448 | { |
1449 | return OSKextKextForAddress(addr) == (void*)ctl->mod_address; |
1450 | } |
1451 | |
1452 | /* |
1453 | * Unconsidered |
1454 | */ |
1455 | void |
1456 | dtrace_vtime_enable(void) {} |
1457 | |
1458 | void |
1459 | dtrace_vtime_disable(void) {} |
1460 | |
1461 | #else /* else ! CONFIG_DTRACE */ |
1462 | |
1463 | #include <sys/types.h> |
1464 | #include <mach/vm_types.h> |
1465 | #include <mach/kmod.h> |
1466 | |
1467 | /* |
1468 | * This exists to prevent build errors when dtrace is unconfigured. |
1469 | */ |
1470 | |
1471 | kern_return_t _dtrace_register_anon_DOF(char *, unsigned char *, uint32_t); |
1472 | |
1473 | kern_return_t _dtrace_register_anon_DOF(char *arg1, unsigned char *arg2, uint32_t arg3) { |
1474 | #pragma unused(arg1, arg2, arg3) |
1475 | |
1476 | return KERN_FAILURE; |
1477 | } |
1478 | |
1479 | #endif /* CONFIG_DTRACE */ |
1480 | |