| 1 | /* |
| 2 | * Copyright (c) 2011 Apple Computer, Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | #include <kern/ipc_tt.h> /* port_name_to_task */ |
| 29 | #include <kern/thread.h> |
| 30 | #include <kern/machine.h> |
| 31 | #include <kern/kalloc.h> |
| 32 | #include <mach/mach_types.h> |
| 33 | #include <sys/errno.h> |
| 34 | #include <sys/ktrace.h> |
| 35 | |
| 36 | #include <kperf/action.h> |
| 37 | #include <kperf/buffer.h> |
| 38 | #include <kperf/kdebug_trigger.h> |
| 39 | #include <kperf/kperf.h> |
| 40 | #include <kperf/kperf_timer.h> |
| 41 | #include <kperf/lazy.h> |
| 42 | #include <kperf/pet.h> |
| 43 | #include <kperf/sample.h> |
| 44 | |
| 45 | /* from libkern/libkern.h */ |
| 46 | extern uint64_t strtouq(const char *, char **, int); |
| 47 | |
| 48 | lck_grp_t kperf_lck_grp; |
| 49 | |
| 50 | /* IDs of threads on CPUs before starting the PET thread */ |
| 51 | uint64_t *kperf_tid_on_cpus = NULL; |
| 52 | |
| 53 | /* one wired sample buffer per CPU */ |
| 54 | static struct kperf_sample *intr_samplev; |
| 55 | static unsigned int intr_samplec = 0; |
| 56 | |
| 57 | /* current sampling status */ |
| 58 | static unsigned sampling_status = KPERF_SAMPLING_OFF; |
| 59 | |
| 60 | /* only init once */ |
| 61 | static boolean_t kperf_initted = FALSE; |
| 62 | |
| 63 | /* whether or not to callback to kperf on context switch */ |
| 64 | boolean_t kperf_on_cpu_active = FALSE; |
| 65 | |
| 66 | unsigned int kperf_thread_blocked_action; |
| 67 | unsigned int kperf_cpu_sample_action; |
| 68 | |
| 69 | struct kperf_sample * |
| 70 | kperf_intr_sample_buffer(void) |
| 71 | { |
| 72 | unsigned ncpu = cpu_number(); |
| 73 | |
| 74 | assert(ml_get_interrupts_enabled() == FALSE); |
| 75 | assert(ncpu < intr_samplec); |
| 76 | |
| 77 | return &(intr_samplev[ncpu]); |
| 78 | } |
| 79 | |
| 80 | /* setup interrupt sample buffers */ |
| 81 | int |
| 82 | kperf_init(void) |
| 83 | { |
| 84 | static lck_grp_attr_t lck_grp_attr; |
| 85 | |
| 86 | unsigned ncpus = 0; |
| 87 | int err; |
| 88 | |
| 89 | if (kperf_initted) { |
| 90 | return 0; |
| 91 | } |
| 92 | |
| 93 | lck_grp_attr_setdefault(&lck_grp_attr); |
| 94 | lck_grp_init(&kperf_lck_grp, "kperf" , &lck_grp_attr); |
| 95 | |
| 96 | ncpus = machine_info.logical_cpu_max; |
| 97 | |
| 98 | /* create buffers to remember which threads don't need to be sampled by PET */ |
| 99 | kperf_tid_on_cpus = kalloc_tag(ncpus * sizeof(*kperf_tid_on_cpus), |
| 100 | VM_KERN_MEMORY_DIAG); |
| 101 | if (kperf_tid_on_cpus == NULL) { |
| 102 | err = ENOMEM; |
| 103 | goto error; |
| 104 | } |
| 105 | bzero(kperf_tid_on_cpus, ncpus * sizeof(*kperf_tid_on_cpus)); |
| 106 | |
| 107 | /* create the interrupt buffers */ |
| 108 | intr_samplec = ncpus; |
| 109 | intr_samplev = kalloc_tag(ncpus * sizeof(*intr_samplev), |
| 110 | VM_KERN_MEMORY_DIAG); |
| 111 | if (intr_samplev == NULL) { |
| 112 | err = ENOMEM; |
| 113 | goto error; |
| 114 | } |
| 115 | bzero(intr_samplev, ncpus * sizeof(*intr_samplev)); |
| 116 | |
| 117 | /* create kdebug trigger filter buffers */ |
| 118 | if ((err = kperf_kdebug_init())) { |
| 119 | goto error; |
| 120 | } |
| 121 | |
| 122 | kperf_initted = TRUE; |
| 123 | return 0; |
| 124 | |
| 125 | error: |
| 126 | if (intr_samplev) { |
| 127 | kfree(intr_samplev, ncpus * sizeof(*intr_samplev)); |
| 128 | intr_samplev = NULL; |
| 129 | intr_samplec = 0; |
| 130 | } |
| 131 | |
| 132 | if (kperf_tid_on_cpus) { |
| 133 | kfree(kperf_tid_on_cpus, ncpus * sizeof(*kperf_tid_on_cpus)); |
| 134 | kperf_tid_on_cpus = NULL; |
| 135 | } |
| 136 | |
| 137 | return err; |
| 138 | } |
| 139 | |
| 140 | void |
| 141 | kperf_reset(void) |
| 142 | { |
| 143 | /* turn off sampling first */ |
| 144 | (void)kperf_sampling_disable(); |
| 145 | |
| 146 | /* cleanup miscellaneous configuration first */ |
| 147 | kperf_lazy_reset(); |
| 148 | (void)kperf_kdbg_cswitch_set(0); |
| 149 | (void)kperf_set_lightweight_pet(0); |
| 150 | kperf_kdebug_reset(); |
| 151 | |
| 152 | /* timers, which require actions, first */ |
| 153 | kperf_timer_reset(); |
| 154 | kperf_action_reset(); |
| 155 | } |
| 156 | |
| 157 | void |
| 158 | kperf_kernel_configure(const char *config) |
| 159 | { |
| 160 | int pairs = 0; |
| 161 | char *end; |
| 162 | bool pet = false; |
| 163 | |
| 164 | assert(config != NULL); |
| 165 | |
| 166 | ktrace_start_single_threaded(); |
| 167 | |
| 168 | ktrace_kernel_configure(KTRACE_KPERF); |
| 169 | |
| 170 | if (config[0] == 'p') { |
| 171 | pet = true; |
| 172 | config++; |
| 173 | } |
| 174 | |
| 175 | do { |
| 176 | uint32_t action_samplers; |
| 177 | uint64_t timer_period_ns; |
| 178 | uint64_t timer_period; |
| 179 | |
| 180 | pairs += 1; |
| 181 | kperf_action_set_count(pairs); |
| 182 | kperf_timer_set_count(pairs); |
| 183 | |
| 184 | action_samplers = (uint32_t)strtouq(config, &end, 0); |
| 185 | if (config == end) { |
| 186 | kprintf("kperf: unable to parse '%s' as action sampler\n" , config); |
| 187 | goto out; |
| 188 | } |
| 189 | config = end; |
| 190 | |
| 191 | kperf_action_set_samplers(pairs, action_samplers); |
| 192 | |
| 193 | if (config[0] == '\0') { |
| 194 | kprintf("kperf: missing timer period in config\n" ); |
| 195 | goto out; |
| 196 | } |
| 197 | config++; |
| 198 | |
| 199 | timer_period_ns = strtouq(config, &end, 0); |
| 200 | if (config == end) { |
| 201 | kprintf("kperf: unable to parse '%s' as timer period\n" , config); |
| 202 | goto out; |
| 203 | } |
| 204 | nanoseconds_to_absolutetime(timer_period_ns, &timer_period); |
| 205 | config = end; |
| 206 | |
| 207 | kperf_timer_set_period(pairs - 1, timer_period); |
| 208 | kperf_timer_set_action(pairs - 1, pairs); |
| 209 | |
| 210 | if (pet) { |
| 211 | kperf_timer_set_petid(pairs - 1); |
| 212 | kperf_set_lightweight_pet(1); |
| 213 | pet = false; |
| 214 | } |
| 215 | } while (*(config++) == ','); |
| 216 | |
| 217 | int error = kperf_sampling_enable(); |
| 218 | if (error) { |
| 219 | kprintf("kperf: cannot enable sampling at boot: %d" , error); |
| 220 | } |
| 221 | |
| 222 | out: |
| 223 | ktrace_end_single_threaded(); |
| 224 | } |
| 225 | |
| 226 | void kperf_on_cpu_internal(thread_t thread, thread_continue_t continuation, |
| 227 | uintptr_t *starting_fp); |
| 228 | void |
| 229 | kperf_on_cpu_internal(thread_t thread, thread_continue_t continuation, |
| 230 | uintptr_t *starting_fp) |
| 231 | { |
| 232 | if (kperf_kdebug_cswitch) { |
| 233 | /* trace the new thread's PID for Instruments */ |
| 234 | int pid = task_pid(get_threadtask(thread)); |
| 235 | BUF_DATA(PERF_TI_CSWITCH, thread_tid(thread), pid); |
| 236 | } |
| 237 | if (kperf_lightweight_pet_active) { |
| 238 | kperf_pet_on_cpu(thread, continuation, starting_fp); |
| 239 | } |
| 240 | if (kperf_lazy_wait_action != 0) { |
| 241 | kperf_lazy_wait_sample(thread, continuation, starting_fp); |
| 242 | } |
| 243 | } |
| 244 | |
| 245 | void |
| 246 | kperf_on_cpu_update(void) |
| 247 | { |
| 248 | kperf_on_cpu_active = kperf_kdebug_cswitch || |
| 249 | kperf_lightweight_pet_active || |
| 250 | kperf_lazy_wait_action != 0; |
| 251 | } |
| 252 | |
| 253 | /* random misc-ish functions */ |
| 254 | uint32_t |
| 255 | kperf_get_thread_flags(thread_t thread) |
| 256 | { |
| 257 | return thread->kperf_flags; |
| 258 | } |
| 259 | |
| 260 | void |
| 261 | kperf_set_thread_flags(thread_t thread, uint32_t flags) |
| 262 | { |
| 263 | thread->kperf_flags = flags; |
| 264 | } |
| 265 | |
| 266 | unsigned int |
| 267 | kperf_sampling_status(void) |
| 268 | { |
| 269 | return sampling_status; |
| 270 | } |
| 271 | |
| 272 | int |
| 273 | kperf_sampling_enable(void) |
| 274 | { |
| 275 | if (sampling_status == KPERF_SAMPLING_ON) { |
| 276 | return 0; |
| 277 | } |
| 278 | |
| 279 | if (sampling_status != KPERF_SAMPLING_OFF) { |
| 280 | panic("kperf: sampling was %d when asked to enable" , sampling_status); |
| 281 | } |
| 282 | |
| 283 | /* make sure interrupt tables and actions are initted */ |
| 284 | if (!kperf_initted || (kperf_action_get_count() == 0)) { |
| 285 | return ECANCELED; |
| 286 | } |
| 287 | |
| 288 | /* mark as running */ |
| 289 | sampling_status = KPERF_SAMPLING_ON; |
| 290 | kperf_lightweight_pet_active_update(); |
| 291 | |
| 292 | /* tell timers to enable */ |
| 293 | kperf_timer_go(); |
| 294 | |
| 295 | return 0; |
| 296 | } |
| 297 | |
| 298 | int |
| 299 | kperf_sampling_disable(void) |
| 300 | { |
| 301 | if (sampling_status != KPERF_SAMPLING_ON) { |
| 302 | return 0; |
| 303 | } |
| 304 | |
| 305 | /* mark a shutting down */ |
| 306 | sampling_status = KPERF_SAMPLING_SHUTDOWN; |
| 307 | |
| 308 | /* tell timers to disable */ |
| 309 | kperf_timer_stop(); |
| 310 | |
| 311 | /* mark as off */ |
| 312 | sampling_status = KPERF_SAMPLING_OFF; |
| 313 | kperf_lightweight_pet_active_update(); |
| 314 | |
| 315 | return 0; |
| 316 | } |
| 317 | |
| 318 | boolean_t |
| 319 | kperf_thread_get_dirty(thread_t thread) |
| 320 | { |
| 321 | return (thread->c_switch != thread->kperf_c_switch); |
| 322 | } |
| 323 | |
| 324 | void |
| 325 | kperf_thread_set_dirty(thread_t thread, boolean_t dirty) |
| 326 | { |
| 327 | if (dirty) { |
| 328 | thread->kperf_c_switch = thread->c_switch - 1; |
| 329 | } else { |
| 330 | thread->kperf_c_switch = thread->c_switch; |
| 331 | } |
| 332 | } |
| 333 | |
| 334 | int |
| 335 | kperf_port_to_pid(mach_port_name_t portname) |
| 336 | { |
| 337 | if (!MACH_PORT_VALID(portname)) { |
| 338 | return -1; |
| 339 | } |
| 340 | |
| 341 | task_t task = port_name_to_task(portname); |
| 342 | if (task == TASK_NULL) { |
| 343 | return -1; |
| 344 | } |
| 345 | pid_t pid = task_pid(task); |
| 346 | /* drop the ref taken by port_name_to_task */ |
| 347 | task_deallocate_internal(task); |
| 348 | |
| 349 | return pid; |
| 350 | } |
| 351 | |