| 1 | /* |
| 2 | * Copyright (c) 2012 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | |
| 29 | #include <mach/mach_types.h> |
| 30 | #include <kern/processor.h> |
| 31 | #include <kern/thread.h> |
| 32 | #include <kern/assert.h> |
| 33 | #include <kern/locks.h> |
| 34 | #include <sys/errno.h> |
| 35 | |
| 36 | #include <kperf/kperf.h> |
| 37 | #include <kperf/buffer.h> |
| 38 | #include <kperf/context.h> |
| 39 | #include <kperf/sample.h> |
| 40 | #include <kperf/action.h> |
| 41 | #include <kperf/kperf_kpc.h> |
| 42 | #include <kern/kpc.h> |
| 43 | |
| 44 | #if defined (__arm64__) |
| 45 | #include <arm/cpu_data_internal.h> |
| 46 | #elif defined (__arm__) |
| 47 | #include <arm/cpu_data_internal.h> |
| 48 | #endif |
| 49 | |
| 50 | /* global for whether to read PMCs on context switch */ |
| 51 | int kpc_threads_counting = 0; |
| 52 | |
| 53 | /* whether to call into KPC when a thread goes off CPU */ |
| 54 | boolean_t kpc_off_cpu_active = FALSE; |
| 55 | |
| 56 | /* current config and number of counters in that config */ |
| 57 | static uint32_t kpc_thread_classes = 0; |
| 58 | static uint32_t kpc_thread_classes_count = 0; |
| 59 | |
| 60 | static lck_grp_attr_t *kpc_thread_lckgrp_attr = NULL; |
| 61 | static lck_grp_t *kpc_thread_lckgrp = NULL; |
| 62 | static lck_mtx_t kpc_thread_lock; |
| 63 | |
| 64 | void |
| 65 | kpc_thread_init(void) |
| 66 | { |
| 67 | kpc_thread_lckgrp_attr = lck_grp_attr_alloc_init(); |
| 68 | kpc_thread_lckgrp = lck_grp_alloc_init("kpc" , kpc_thread_lckgrp_attr); |
| 69 | lck_mtx_init(&kpc_thread_lock, kpc_thread_lckgrp, LCK_ATTR_NULL); |
| 70 | } |
| 71 | |
| 72 | uint32_t |
| 73 | kpc_get_thread_counting(void) |
| 74 | { |
| 75 | uint32_t kpc_thread_classes_tmp; |
| 76 | int kpc_threads_counting_tmp; |
| 77 | |
| 78 | /* Make sure we get a consistent snapshot of these values */ |
| 79 | lck_mtx_lock(&kpc_thread_lock); |
| 80 | |
| 81 | kpc_thread_classes_tmp = kpc_thread_classes; |
| 82 | kpc_threads_counting_tmp = kpc_threads_counting; |
| 83 | |
| 84 | lck_mtx_unlock(&kpc_thread_lock); |
| 85 | |
| 86 | if( kpc_threads_counting_tmp ) |
| 87 | return kpc_thread_classes_tmp; |
| 88 | else |
| 89 | return 0; |
| 90 | } |
| 91 | |
| 92 | int |
| 93 | kpc_set_thread_counting(uint32_t classes) |
| 94 | { |
| 95 | uint32_t count; |
| 96 | |
| 97 | lck_mtx_lock(&kpc_thread_lock); |
| 98 | |
| 99 | count = kpc_get_counter_count(classes); |
| 100 | |
| 101 | if( (classes == 0) |
| 102 | || (count == 0) ) |
| 103 | { |
| 104 | /* shut down */ |
| 105 | kpc_threads_counting = FALSE; |
| 106 | } |
| 107 | else |
| 108 | { |
| 109 | /* stash the config */ |
| 110 | kpc_thread_classes = classes; |
| 111 | |
| 112 | /* work out the size */ |
| 113 | kpc_thread_classes_count = count; |
| 114 | assert(kpc_thread_classes_count <= KPC_MAX_COUNTERS); |
| 115 | |
| 116 | /* enable switch */ |
| 117 | kpc_threads_counting = TRUE; |
| 118 | |
| 119 | /* and schedule an AST for this thread... */ |
| 120 | if( !current_thread()->kpc_buf ) |
| 121 | { |
| 122 | current_thread()->kperf_flags |= T_KPC_ALLOC; |
| 123 | act_set_kperf(current_thread()); |
| 124 | } |
| 125 | } |
| 126 | |
| 127 | kpc_off_cpu_update(); |
| 128 | lck_mtx_unlock(&kpc_thread_lock); |
| 129 | |
| 130 | return 0; |
| 131 | } |
| 132 | |
| 133 | /* snapshot current PMCs and update counters in the current thread */ |
| 134 | static void |
| 135 | kpc_update_thread_counters( thread_t thread ) |
| 136 | { |
| 137 | uint32_t i; |
| 138 | uint64_t *tmp = NULL; |
| 139 | cpu_data_t *cpu = NULL; |
| 140 | |
| 141 | cpu = current_cpu_datap(); |
| 142 | |
| 143 | /* 1. stash current PMCs into latest CPU block */ |
| 144 | kpc_get_cpu_counters( FALSE, kpc_thread_classes, |
| 145 | NULL, cpu->cpu_kpc_buf[1] ); |
| 146 | |
| 147 | /* 2. apply delta to old thread */ |
| 148 | if( thread->kpc_buf ) |
| 149 | for( i = 0; i < kpc_thread_classes_count; i++ ) |
| 150 | thread->kpc_buf[i] += cpu->cpu_kpc_buf[1][i] - cpu->cpu_kpc_buf[0][i]; |
| 151 | |
| 152 | /* schedule any necessary allocations */ |
| 153 | if( !current_thread()->kpc_buf ) |
| 154 | { |
| 155 | current_thread()->kperf_flags |= T_KPC_ALLOC; |
| 156 | act_set_kperf(current_thread()); |
| 157 | } |
| 158 | |
| 159 | /* 3. switch the PMC block pointers */ |
| 160 | tmp = cpu->cpu_kpc_buf[1]; |
| 161 | cpu->cpu_kpc_buf[1] = cpu->cpu_kpc_buf[0]; |
| 162 | cpu->cpu_kpc_buf[0] = tmp; |
| 163 | } |
| 164 | |
| 165 | /* get counter values for a thread */ |
| 166 | int |
| 167 | kpc_get_curthread_counters(uint32_t *inoutcount, uint64_t *buf) |
| 168 | { |
| 169 | thread_t thread = current_thread(); |
| 170 | boolean_t enabled; |
| 171 | |
| 172 | /* buffer too small :( */ |
| 173 | if( *inoutcount < kpc_thread_classes_count ) |
| 174 | return EINVAL; |
| 175 | |
| 176 | /* copy data and actual size */ |
| 177 | if( !thread->kpc_buf ) |
| 178 | return EINVAL; |
| 179 | |
| 180 | enabled = ml_set_interrupts_enabled(FALSE); |
| 181 | |
| 182 | /* snap latest version of counters for this thread */ |
| 183 | kpc_update_thread_counters( current_thread() ); |
| 184 | |
| 185 | /* copy out */ |
| 186 | memcpy( buf, thread->kpc_buf, |
| 187 | kpc_thread_classes_count * sizeof(*buf) ); |
| 188 | *inoutcount = kpc_thread_classes_count; |
| 189 | |
| 190 | ml_set_interrupts_enabled(enabled); |
| 191 | |
| 192 | return 0; |
| 193 | } |
| 194 | |
| 195 | void |
| 196 | kpc_off_cpu_update(void) |
| 197 | { |
| 198 | kpc_off_cpu_active = kpc_threads_counting; |
| 199 | } |
| 200 | |
| 201 | void |
| 202 | kpc_off_cpu_internal(thread_t thread) |
| 203 | { |
| 204 | if (kpc_threads_counting) { |
| 205 | kpc_update_thread_counters(thread); |
| 206 | } |
| 207 | } |
| 208 | |
| 209 | void |
| 210 | kpc_thread_create(thread_t thread) |
| 211 | { |
| 212 | /* nothing to do if we're not counting */ |
| 213 | if(!kpc_threads_counting) |
| 214 | return; |
| 215 | |
| 216 | /* give the new thread a counterbuf */ |
| 217 | thread->kpc_buf = kpc_counterbuf_alloc(); |
| 218 | } |
| 219 | |
| 220 | void |
| 221 | kpc_thread_destroy(thread_t thread) |
| 222 | { |
| 223 | uint64_t *buf = NULL; |
| 224 | |
| 225 | /* usual case: no kpc buf, just return */ |
| 226 | if( !thread->kpc_buf ) |
| 227 | return; |
| 228 | |
| 229 | /* otherwise, don't leak */ |
| 230 | buf = thread->kpc_buf; |
| 231 | thread->kpc_buf = NULL; |
| 232 | kpc_counterbuf_free(buf); |
| 233 | } |
| 234 | |
| 235 | /* ast callback on a thread */ |
| 236 | void |
| 237 | kpc_thread_ast_handler( thread_t thread ) |
| 238 | { |
| 239 | /* see if we want an alloc */ |
| 240 | if( thread->kperf_flags & T_KPC_ALLOC ) |
| 241 | thread->kpc_buf = kpc_counterbuf_alloc(); |
| 242 | } |
| 243 | |