| 1 | /* |
| 2 | * Copyright (c) 2011 Apple Computer, Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | |
| 29 | /* Sample thread data */ |
| 30 | |
| 31 | #include <kern/debug.h> /* panic */ |
| 32 | #include <kern/thread.h> /* thread_* */ |
| 33 | #include <kern/timer.h> /* timer_data_t */ |
| 34 | #include <kern/policy_internal.h> /* TASK_POLICY_* */ |
| 35 | #include <mach/mach_types.h> |
| 36 | |
| 37 | #include <kperf/kperf.h> |
| 38 | #include <kperf/buffer.h> |
| 39 | #include <kperf/context.h> |
| 40 | #include <kperf/thread_samplers.h> |
| 41 | #include <kperf/ast.h> |
| 42 | |
| 43 | #if MONOTONIC |
| 44 | #include <kern/monotonic.h> |
| 45 | #include <machine/monotonic.h> |
| 46 | #endif /* MONOTONIC */ |
| 47 | |
| 48 | extern boolean_t stackshot_thread_is_idle_worker_unsafe(thread_t thread); |
| 49 | |
| 50 | /* |
| 51 | * XXX Deprecated, use thread scheduling sampler instead. |
| 52 | * |
| 53 | * Taken from AppleProfileGetRunModeOfThread and CHUD. Still here for |
| 54 | * backwards compatibility. |
| 55 | */ |
| 56 | |
| 57 | #define KPERF_TI_RUNNING (1U << 0) |
| 58 | #define KPERF_TI_RUNNABLE (1U << 1) |
| 59 | #define KPERF_TI_WAIT (1U << 2) |
| 60 | #define KPERF_TI_UNINT (1U << 3) |
| 61 | #define KPERF_TI_SUSP (1U << 4) |
| 62 | #define KPERF_TI_TERMINATE (1U << 5) |
| 63 | #define KPERF_TI_IDLE (1U << 6) |
| 64 | |
| 65 | static uint32_t |
| 66 | kperf_thread_info_runmode_legacy(thread_t thread) |
| 67 | { |
| 68 | uint32_t kperf_state = 0; |
| 69 | int sched_state = thread->state; |
| 70 | processor_t last_processor = thread->last_processor; |
| 71 | |
| 72 | if ((last_processor != PROCESSOR_NULL) && (thread == last_processor->active_thread)) { |
| 73 | kperf_state |= KPERF_TI_RUNNING; |
| 74 | } |
| 75 | if (sched_state & TH_RUN) { |
| 76 | kperf_state |= KPERF_TI_RUNNABLE; |
| 77 | } |
| 78 | if (sched_state & TH_WAIT) { |
| 79 | kperf_state |= KPERF_TI_WAIT; |
| 80 | } |
| 81 | if (sched_state & TH_UNINT) { |
| 82 | kperf_state |= KPERF_TI_UNINT; |
| 83 | } |
| 84 | if (sched_state & TH_SUSP) { |
| 85 | kperf_state |= KPERF_TI_SUSP; |
| 86 | } |
| 87 | if (sched_state & TH_TERMINATE) { |
| 88 | kperf_state |= KPERF_TI_TERMINATE; |
| 89 | } |
| 90 | if (sched_state & TH_IDLE) { |
| 91 | kperf_state |= KPERF_TI_IDLE; |
| 92 | } |
| 93 | |
| 94 | #if !CONFIG_EMBEDDED |
| 95 | /* on desktop, if state is blank, leave not idle set */ |
| 96 | if (kperf_state == 0) { |
| 97 | return (TH_IDLE << 16); |
| 98 | } |
| 99 | #endif /* !CONFIG_EMBEDDED */ |
| 100 | |
| 101 | /* high two bytes are inverted mask, low two bytes are normal */ |
| 102 | return (((~kperf_state & 0xffff) << 16) | (kperf_state & 0xffff)); |
| 103 | } |
| 104 | |
| 105 | void |
| 106 | kperf_thread_info_sample(struct kperf_thread_info *ti, struct kperf_context *context) |
| 107 | { |
| 108 | thread_t cur_thread = context->cur_thread; |
| 109 | |
| 110 | BUF_INFO(PERF_TI_SAMPLE, (uintptr_t)thread_tid(cur_thread)); |
| 111 | |
| 112 | ti->kpthi_pid = context->cur_pid; |
| 113 | ti->kpthi_tid = thread_tid(cur_thread); |
| 114 | ti->kpthi_dq_addr = thread_dispatchqaddr(cur_thread); |
| 115 | ti->kpthi_runmode = kperf_thread_info_runmode_legacy(cur_thread); |
| 116 | |
| 117 | BUF_VERB(PERF_TI_SAMPLE | DBG_FUNC_END); |
| 118 | } |
| 119 | |
| 120 | void |
| 121 | kperf_thread_info_log(struct kperf_thread_info *ti) |
| 122 | { |
| 123 | BUF_DATA(PERF_TI_DATA, ti->kpthi_pid, ti->kpthi_tid /* K64-only */, |
| 124 | ti->kpthi_dq_addr, ti->kpthi_runmode); |
| 125 | } |
| 126 | |
| 127 | /* |
| 128 | * Scheduling information reports inputs and outputs of the scheduler state for |
| 129 | * a thread. |
| 130 | */ |
| 131 | |
| 132 | void |
| 133 | kperf_thread_scheduling_sample(struct kperf_thread_scheduling *thsc, |
| 134 | struct kperf_context *context) |
| 135 | { |
| 136 | assert(thsc != NULL); |
| 137 | assert(context != NULL); |
| 138 | |
| 139 | thread_t thread = context->cur_thread; |
| 140 | |
| 141 | BUF_INFO(PERF_TI_SCHEDSAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread)); |
| 142 | |
| 143 | thsc->kpthsc_user_time = timer_grab(&thread->user_timer); |
| 144 | uint64_t system_time = timer_grab(&thread->system_timer); |
| 145 | |
| 146 | if (thread->precise_user_kernel_time) { |
| 147 | thsc->kpthsc_system_time = system_time; |
| 148 | } else { |
| 149 | thsc->kpthsc_user_time += system_time; |
| 150 | thsc->kpthsc_system_time = 0; |
| 151 | } |
| 152 | |
| 153 | thsc->kpthsc_runnable_time = timer_grab(&thread->runnable_timer); |
| 154 | thsc->kpthsc_state = thread->state; |
| 155 | thsc->kpthsc_base_priority = thread->base_pri; |
| 156 | thsc->kpthsc_sched_priority = thread->sched_pri; |
| 157 | thsc->kpthsc_effective_qos = thread->effective_policy.thep_qos; |
| 158 | thsc->kpthsc_requested_qos = thread->requested_policy.thrp_qos; |
| 159 | thsc->kpthsc_requested_qos_override = MAX(thread->requested_policy.thrp_qos_override, |
| 160 | thread->requested_policy.thrp_qos_workq_override); |
| 161 | thsc->kpthsc_requested_qos_promote = thread->requested_policy.thrp_qos_promote; |
| 162 | thsc->kpthsc_requested_qos_ipc_override = thread->requested_policy.thrp_qos_ipc_override; |
| 163 | thsc->kpthsc_requested_qos_sync_ipc_override = thread->requested_policy.thrp_qos_sync_ipc_override; |
| 164 | thsc->kpthsc_effective_latency_qos = thread->effective_policy.thep_latency_qos; |
| 165 | |
| 166 | BUF_INFO(PERF_TI_SCHEDSAMPLE | DBG_FUNC_END); |
| 167 | } |
| 168 | |
| 169 | |
| 170 | void |
| 171 | kperf_thread_scheduling_log(struct kperf_thread_scheduling *thsc) |
| 172 | { |
| 173 | assert(thsc != NULL); |
| 174 | #if defined(__LP64__) |
| 175 | BUF_DATA(PERF_TI_SCHEDDATA_2, thsc->kpthsc_user_time, |
| 176 | thsc->kpthsc_system_time, |
| 177 | (((uint64_t)thsc->kpthsc_base_priority) << 48) |
| 178 | | ((uint64_t)thsc->kpthsc_sched_priority << 32) |
| 179 | | ((uint64_t)(thsc->kpthsc_state & 0xff) << 24) |
| 180 | | (thsc->kpthsc_effective_qos << 6) |
| 181 | | (thsc->kpthsc_requested_qos << 3) |
| 182 | | thsc->kpthsc_requested_qos_override, |
| 183 | ((uint64_t)thsc->kpthsc_effective_latency_qos << 61) |
| 184 | | ((uint64_t)thsc->kpthsc_requested_qos_promote << 58) |
| 185 | | ((uint64_t)thsc->kpthsc_requested_qos_ipc_override << 55) |
| 186 | | ((uint64_t)thsc->kpthsc_requested_qos_sync_ipc_override << 52) |
| 187 | ); |
| 188 | BUF_DATA(PERF_TI_SCHEDDATA_3, thsc->kpthsc_runnable_time); |
| 189 | #else |
| 190 | BUF_DATA(PERF_TI_SCHEDDATA1_32, UPPER_32(thsc->kpthsc_user_time), |
| 191 | LOWER_32(thsc->kpthsc_user_time), |
| 192 | UPPER_32(thsc->kpthsc_system_time), |
| 193 | LOWER_32(thsc->kpthsc_system_time) |
| 194 | ); |
| 195 | BUF_DATA(PERF_TI_SCHEDDATA2_32_2, (((uint32_t)thsc->kpthsc_base_priority) << 16) |
| 196 | | thsc->kpthsc_sched_priority, |
| 197 | ((thsc->kpthsc_state & 0xff) << 24) |
| 198 | | (thsc->kpthsc_effective_qos << 6) |
| 199 | | (thsc->kpthsc_requested_qos << 3) |
| 200 | | thsc->kpthsc_requested_qos_override, |
| 201 | ((uint32_t)thsc->kpthsc_effective_latency_qos << 29) |
| 202 | | ((uint32_t)thsc->kpthsc_requested_qos_promote << 26) |
| 203 | | ((uint32_t)thsc->kpthsc_requested_qos_ipc_override << 23) |
| 204 | | ((uint32_t)thsc->kpthsc_requested_qos_sync_ipc_override << 20) |
| 205 | ); |
| 206 | BUF_DATA(PERF_TI_SCHEDDATA3_32, UPPER_32(thsc->kpthsc_runnable_time), |
| 207 | LOWER_32(thsc->kpthsc_runnable_time)); |
| 208 | #endif /* defined(__LP64__) */ |
| 209 | } |
| 210 | |
| 211 | /* |
| 212 | * Snapshot information maintains parity with stackshot information for other, |
| 213 | * miscellaneous information about threads. |
| 214 | */ |
| 215 | |
| 216 | #define KPERF_THREAD_SNAPSHOT_DARWIN_BG (1U << 0); |
| 217 | #define KPERF_THREAD_SNAPSHOT_PASSIVE_IO (1U << 1); |
| 218 | #define KPERF_THREAD_SNAPSHOT_GFI (1U << 2); |
| 219 | #define KPERF_THREAD_SNAPSHOT_IDLE_WQ (1U << 3); |
| 220 | /* max is 1U << 7 */ |
| 221 | |
| 222 | void |
| 223 | kperf_thread_snapshot_sample(struct kperf_thread_snapshot *thsn, |
| 224 | struct kperf_context *context) |
| 225 | { |
| 226 | assert(thsn != NULL); |
| 227 | assert(context != NULL); |
| 228 | |
| 229 | thread_t thread = context->cur_thread; |
| 230 | |
| 231 | BUF_INFO(PERF_TI_SNAPSAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread)); |
| 232 | |
| 233 | thsn->kpthsn_last_made_runnable_time = thread->last_made_runnable_time; |
| 234 | |
| 235 | thsn->kpthsn_flags = 0; |
| 236 | if (thread->effective_policy.thep_darwinbg) { |
| 237 | thsn->kpthsn_flags |= KPERF_THREAD_SNAPSHOT_DARWIN_BG; |
| 238 | } |
| 239 | if (proc_get_effective_thread_policy(thread, TASK_POLICY_PASSIVE_IO)) { |
| 240 | thsn->kpthsn_flags |= KPERF_THREAD_SNAPSHOT_PASSIVE_IO; |
| 241 | } |
| 242 | if (thread->options & TH_OPT_GLOBAL_FORCED_IDLE) { |
| 243 | thsn->kpthsn_flags |= KPERF_THREAD_SNAPSHOT_GFI |
| 244 | } |
| 245 | if (stackshot_thread_is_idle_worker_unsafe(thread)) { |
| 246 | thsn->kpthsn_flags |= KPERF_THREAD_SNAPSHOT_IDLE_WQ; |
| 247 | } |
| 248 | |
| 249 | thsn->kpthsn_suspend_count = thread->suspend_count; |
| 250 | thsn->kpthsn_io_tier = proc_get_effective_thread_policy(thread, TASK_POLICY_IO); |
| 251 | |
| 252 | BUF_VERB(PERF_TI_SNAPSAMPLE | DBG_FUNC_END); |
| 253 | } |
| 254 | |
| 255 | void |
| 256 | kperf_thread_snapshot_log(struct kperf_thread_snapshot *thsn) |
| 257 | { |
| 258 | assert(thsn != NULL); |
| 259 | #if defined(__LP64__) |
| 260 | BUF_DATA(PERF_TI_SNAPDATA, thsn->kpthsn_flags | ((uint32_t)(thsn->kpthsn_suspend_count) << 8) |
| 261 | | (thsn->kpthsn_io_tier << 24), |
| 262 | thsn->kpthsn_last_made_runnable_time); |
| 263 | #else |
| 264 | BUF_DATA(PERF_TI_SNAPDATA_32, thsn->kpthsn_flags | ((uint32_t)(thsn->kpthsn_suspend_count) << 8) |
| 265 | | (thsn->kpthsn_io_tier << 24), |
| 266 | UPPER_32(thsn->kpthsn_last_made_runnable_time), |
| 267 | LOWER_32(thsn->kpthsn_last_made_runnable_time)); |
| 268 | #endif /* defined(__LP64__) */ |
| 269 | } |
| 270 | |
| 271 | /* |
| 272 | * Dispatch information only contains the dispatch queue serial number from |
| 273 | * libdispatch. |
| 274 | * |
| 275 | * It's a separate sampler because queue data must be copied in from user space. |
| 276 | */ |
| 277 | |
| 278 | void |
| 279 | kperf_thread_dispatch_sample(struct kperf_thread_dispatch *thdi, |
| 280 | struct kperf_context *context) |
| 281 | { |
| 282 | assert(thdi != NULL); |
| 283 | assert(context != NULL); |
| 284 | |
| 285 | thread_t thread = context->cur_thread; |
| 286 | |
| 287 | BUF_INFO(PERF_TI_DISPSAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread)); |
| 288 | |
| 289 | task_t task = thread->task; |
| 290 | boolean_t task_64 = task_has_64Bit_addr(task); |
| 291 | size_t user_addr_size = task_64 ? 8 : 4; |
| 292 | |
| 293 | assert(thread->task != kernel_task); |
| 294 | uint64_t user_dq_key_addr = thread_dispatchqaddr(thread); |
| 295 | if (user_dq_key_addr == 0) { |
| 296 | goto error; |
| 297 | } |
| 298 | |
| 299 | uint64_t user_dq_addr; |
| 300 | if ((copyin((user_addr_t)user_dq_key_addr, |
| 301 | (char *)&user_dq_addr, |
| 302 | user_addr_size) != 0) || |
| 303 | (user_dq_addr == 0)) |
| 304 | { |
| 305 | goto error; |
| 306 | } |
| 307 | |
| 308 | uint64_t user_dq_serialno_addr = |
| 309 | user_dq_addr + get_task_dispatchqueue_serialno_offset(task); |
| 310 | |
| 311 | if (copyin((user_addr_t)user_dq_serialno_addr, |
| 312 | (char *)&(thdi->kpthdi_dq_serialno), |
| 313 | user_addr_size) == 0) |
| 314 | { |
| 315 | goto out; |
| 316 | } |
| 317 | |
| 318 | error: |
| 319 | thdi->kpthdi_dq_serialno = 0; |
| 320 | |
| 321 | out: |
| 322 | BUF_VERB(PERF_TI_DISPSAMPLE | DBG_FUNC_END); |
| 323 | } |
| 324 | |
| 325 | int |
| 326 | kperf_thread_dispatch_pend(struct kperf_context *context) |
| 327 | { |
| 328 | return kperf_ast_pend(context->cur_thread, T_KPERF_AST_DISPATCH); |
| 329 | } |
| 330 | |
| 331 | void |
| 332 | kperf_thread_dispatch_log(struct kperf_thread_dispatch *thdi) |
| 333 | { |
| 334 | assert(thdi != NULL); |
| 335 | #if defined(__LP64__) |
| 336 | BUF_DATA(PERF_TI_DISPDATA, thdi->kpthdi_dq_serialno); |
| 337 | #else |
| 338 | BUF_DATA(PERF_TI_DISPDATA_32, UPPER_32(thdi->kpthdi_dq_serialno), |
| 339 | LOWER_32(thdi->kpthdi_dq_serialno)); |
| 340 | #endif /* defined(__LP64__) */ |
| 341 | } |
| 342 | |
| 343 | /* |
| 344 | * A bit different from other samplers -- since logging disables interrupts, |
| 345 | * it's a fine place to sample the thread counters. |
| 346 | */ |
| 347 | void |
| 348 | kperf_thread_inscyc_log(struct kperf_context *context) |
| 349 | { |
| 350 | #if MONOTONIC |
| 351 | thread_t cur_thread = current_thread(); |
| 352 | |
| 353 | if (context->cur_thread != cur_thread) { |
| 354 | /* can't safely access another thread's counters */ |
| 355 | return; |
| 356 | } |
| 357 | |
| 358 | uint64_t counts[MT_CORE_NFIXED]; |
| 359 | |
| 360 | int ret = mt_fixed_thread_counts(cur_thread, counts); |
| 361 | if (ret) { |
| 362 | return; |
| 363 | } |
| 364 | |
| 365 | #if defined(__LP64__) |
| 366 | BUF_DATA(PERF_TI_INSCYCDATA, counts[MT_CORE_INSTRS], counts[MT_CORE_CYCLES]); |
| 367 | #else /* defined(__LP64__) */ |
| 368 | /* 32-bit platforms don't count instructions */ |
| 369 | BUF_DATA(PERF_TI_INSCYCDATA_32, 0, 0, UPPER_32(counts[MT_CORE_CYCLES]), |
| 370 | LOWER_32(counts[MT_CORE_CYCLES])); |
| 371 | #endif /* !defined(__LP64__) */ |
| 372 | |
| 373 | #else |
| 374 | #pragma unused(context) |
| 375 | #endif /* MONOTONIC */ |
| 376 | |
| 377 | } |
| 378 | |