| 1 | /* |
| 2 | * Copyright (c) 2000-2008 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | /* |
| 29 | * @OSF_COPYRIGHT@ |
| 30 | */ |
| 31 | /* |
| 32 | * @APPLE_FREE_COPYRIGHT@ |
| 33 | */ |
| 34 | /* |
| 35 | * File: timer.c |
| 36 | * Purpose: Routines for handling the machine independent timer. |
| 37 | */ |
| 38 | |
| 39 | #include <mach/mach_types.h> |
| 40 | |
| 41 | #include <kern/timer_queue.h> |
| 42 | #include <kern/timer_call.h> |
| 43 | #include <kern/clock.h> |
| 44 | #include <kern/thread.h> |
| 45 | #include <kern/processor.h> |
| 46 | #include <kern/macro_help.h> |
| 47 | #include <kern/spl.h> |
| 48 | #include <kern/timer_queue.h> |
| 49 | #include <kern/pms.h> |
| 50 | |
| 51 | #include <machine/commpage.h> |
| 52 | #include <machine/machine_routines.h> |
| 53 | |
| 54 | #include <sys/kdebug.h> |
| 55 | #include <i386/cpu_data.h> |
| 56 | #include <i386/cpu_topology.h> |
| 57 | #include <i386/cpu_threads.h> |
| 58 | |
| 59 | uint32_t spurious_timers; |
| 60 | |
| 61 | /* |
| 62 | * Event timer interrupt. |
| 63 | * |
| 64 | * XXX a drawback of this implementation is that events serviced earlier must not set deadlines |
| 65 | * that occur before the entire chain completes. |
| 66 | * |
| 67 | * XXX a better implementation would use a set of generic callouts and iterate over them |
| 68 | */ |
| 69 | void |
| 70 | timer_intr(int user_mode, |
| 71 | uint64_t rip) |
| 72 | { |
| 73 | uint64_t abstime; |
| 74 | rtclock_timer_t *mytimer; |
| 75 | cpu_data_t *pp; |
| 76 | int64_t latency; |
| 77 | uint64_t pmdeadline; |
| 78 | boolean_t timer_processed = FALSE; |
| 79 | |
| 80 | pp = current_cpu_datap(); |
| 81 | |
| 82 | SCHED_STATS_TIMER_POP(current_processor()); |
| 83 | |
| 84 | abstime = mach_absolute_time(); /* Get the time now */ |
| 85 | |
| 86 | /* has a pending clock timer expired? */ |
| 87 | mytimer = &pp->rtclock_timer; /* Point to the event timer */ |
| 88 | |
| 89 | if ((timer_processed = ((mytimer->deadline <= abstime) || |
| 90 | (abstime >= (mytimer->queue.earliest_soft_deadline))))) { |
| 91 | /* |
| 92 | * Log interrupt service latency (-ve value expected by tool) |
| 93 | * a non-PM event is expected next. |
| 94 | * The requested deadline may be earlier than when it was set |
| 95 | * - use MAX to avoid reporting bogus latencies. |
| 96 | */ |
| 97 | latency = (int64_t) (abstime - MAX(mytimer->deadline, |
| 98 | mytimer->when_set)); |
| 99 | /* Log zero timer latencies when opportunistically processing |
| 100 | * coalesced timers. |
| 101 | */ |
| 102 | if (latency < 0) { |
| 103 | TCOAL_DEBUG(0xEEEE0000, abstime, mytimer->queue.earliest_soft_deadline, abstime - mytimer->queue.earliest_soft_deadline, 0, 0); |
| 104 | latency = 0; |
| 105 | } |
| 106 | |
| 107 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
| 108 | DECR_TRAP_LATENCY | DBG_FUNC_NONE, |
| 109 | -latency, |
| 110 | ((user_mode != 0) ? rip : VM_KERNEL_UNSLIDE(rip)), |
| 111 | user_mode, 0, 0); |
| 112 | |
| 113 | mytimer->has_expired = TRUE; /* Remember that we popped */ |
| 114 | mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime); |
| 115 | mytimer->has_expired = FALSE; |
| 116 | |
| 117 | /* Get the time again since we ran a bit */ |
| 118 | abstime = mach_absolute_time(); |
| 119 | mytimer->when_set = abstime; |
| 120 | } |
| 121 | |
| 122 | /* is it time for power management state change? */ |
| 123 | if ((pmdeadline = pmCPUGetDeadline(pp)) && (pmdeadline <= abstime)) { |
| 124 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
| 125 | DECR_PM_DEADLINE | DBG_FUNC_START, |
| 126 | 0, 0, 0, 0, 0); |
| 127 | pmCPUDeadline(pp); |
| 128 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
| 129 | DECR_PM_DEADLINE | DBG_FUNC_END, |
| 130 | 0, 0, 0, 0, 0); |
| 131 | timer_processed = TRUE; |
| 132 | abstime = mach_absolute_time(); /* Get the time again since we ran a bit */ |
| 133 | } |
| 134 | |
| 135 | uint64_t quantum_deadline = pp->quantum_timer_deadline; |
| 136 | /* is it the quantum timer expiration? */ |
| 137 | if ((quantum_deadline <= abstime) && (quantum_deadline > 0)) { |
| 138 | pp->quantum_timer_deadline = 0; |
| 139 | quantum_timer_expire(abstime); |
| 140 | } |
| 141 | |
| 142 | /* schedule our next deadline */ |
| 143 | x86_lcpu()->rtcDeadline = EndOfAllTime; |
| 144 | timer_resync_deadlines(); |
| 145 | |
| 146 | if (__improbable(timer_processed == FALSE)) |
| 147 | spurious_timers++; |
| 148 | } |
| 149 | |
| 150 | /* |
| 151 | * Set the clock deadline. |
| 152 | */ |
| 153 | void timer_set_deadline(uint64_t deadline) |
| 154 | { |
| 155 | rtclock_timer_t *mytimer; |
| 156 | spl_t s; |
| 157 | cpu_data_t *pp; |
| 158 | |
| 159 | s = splclock(); /* no interruptions */ |
| 160 | pp = current_cpu_datap(); |
| 161 | |
| 162 | mytimer = &pp->rtclock_timer; /* Point to the timer itself */ |
| 163 | mytimer->deadline = deadline; /* Set new expiration time */ |
| 164 | mytimer->when_set = mach_absolute_time(); |
| 165 | |
| 166 | timer_resync_deadlines(); |
| 167 | |
| 168 | splx(s); |
| 169 | } |
| 170 | |
| 171 | void |
| 172 | quantum_timer_set_deadline(uint64_t deadline) |
| 173 | { |
| 174 | cpu_data_t *pp; |
| 175 | /* We should've only come into this path with interrupts disabled */ |
| 176 | assert(ml_get_interrupts_enabled() == FALSE); |
| 177 | |
| 178 | pp = current_cpu_datap(); |
| 179 | pp->quantum_timer_deadline = deadline; |
| 180 | timer_resync_deadlines(); |
| 181 | } |
| 182 | |
| 183 | /* |
| 184 | * Re-evaluate the outstanding deadlines and select the most proximate. |
| 185 | * |
| 186 | * Should be called at splclock. |
| 187 | */ |
| 188 | void |
| 189 | timer_resync_deadlines(void) |
| 190 | { |
| 191 | uint64_t deadline = EndOfAllTime; |
| 192 | uint64_t pmdeadline; |
| 193 | uint64_t quantum_deadline; |
| 194 | rtclock_timer_t *mytimer; |
| 195 | spl_t s = splclock(); |
| 196 | cpu_data_t *pp; |
| 197 | uint32_t decr; |
| 198 | |
| 199 | pp = current_cpu_datap(); |
| 200 | if (!pp->cpu_running) |
| 201 | /* There's really nothing to do if this processor is down */ |
| 202 | return; |
| 203 | |
| 204 | /* |
| 205 | * If we have a clock timer set, pick that. |
| 206 | */ |
| 207 | mytimer = &pp->rtclock_timer; |
| 208 | if (!mytimer->has_expired && |
| 209 | 0 < mytimer->deadline && mytimer->deadline < EndOfAllTime) |
| 210 | deadline = mytimer->deadline; |
| 211 | |
| 212 | /* |
| 213 | * If we have a power management deadline, see if that's earlier. |
| 214 | */ |
| 215 | pmdeadline = pmCPUGetDeadline(pp); |
| 216 | if (0 < pmdeadline && pmdeadline < deadline) |
| 217 | deadline = pmdeadline; |
| 218 | |
| 219 | /* If we have the quantum timer setup, check that */ |
| 220 | quantum_deadline = pp->quantum_timer_deadline; |
| 221 | if ((quantum_deadline > 0) && |
| 222 | (quantum_deadline < deadline)) |
| 223 | deadline = quantum_deadline; |
| 224 | |
| 225 | |
| 226 | /* |
| 227 | * Go and set the "pop" event. |
| 228 | */ |
| 229 | decr = (uint32_t) setPop(deadline); |
| 230 | |
| 231 | /* Record non-PM deadline for latency tool */ |
| 232 | if (decr != 0 && deadline != pmdeadline) { |
| 233 | uint64_t queue_count = 0; |
| 234 | if (deadline != quantum_deadline) { |
| 235 | /* |
| 236 | * For non-quantum timer put the queue count |
| 237 | * in the tracepoint. |
| 238 | */ |
| 239 | queue_count = mytimer->queue.count; |
| 240 | } |
| 241 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
| 242 | DECR_SET_DEADLINE | DBG_FUNC_NONE, |
| 243 | decr, 2, |
| 244 | deadline, |
| 245 | queue_count, 0); |
| 246 | } |
| 247 | splx(s); |
| 248 | } |
| 249 | |
| 250 | void |
| 251 | timer_queue_expire_local( |
| 252 | __unused void *arg) |
| 253 | { |
| 254 | rtclock_timer_t *mytimer; |
| 255 | uint64_t abstime; |
| 256 | cpu_data_t *pp; |
| 257 | |
| 258 | pp = current_cpu_datap(); |
| 259 | |
| 260 | mytimer = &pp->rtclock_timer; |
| 261 | abstime = mach_absolute_time(); |
| 262 | |
| 263 | mytimer->has_expired = TRUE; |
| 264 | mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime); |
| 265 | mytimer->has_expired = FALSE; |
| 266 | mytimer->when_set = mach_absolute_time(); |
| 267 | |
| 268 | timer_resync_deadlines(); |
| 269 | } |
| 270 | |
| 271 | void |
| 272 | timer_queue_expire_rescan( |
| 273 | __unused void *arg) |
| 274 | { |
| 275 | rtclock_timer_t *mytimer; |
| 276 | uint64_t abstime; |
| 277 | cpu_data_t *pp; |
| 278 | |
| 279 | assert(ml_get_interrupts_enabled() == FALSE); |
| 280 | pp = current_cpu_datap(); |
| 281 | |
| 282 | mytimer = &pp->rtclock_timer; |
| 283 | abstime = mach_absolute_time(); |
| 284 | |
| 285 | mytimer->has_expired = TRUE; |
| 286 | mytimer->deadline = timer_queue_expire_with_options(&mytimer->queue, abstime, TRUE); |
| 287 | mytimer->has_expired = FALSE; |
| 288 | mytimer->when_set = mach_absolute_time(); |
| 289 | |
| 290 | timer_resync_deadlines(); |
| 291 | } |
| 292 | |
| 293 | #define TIMER_RESORT_THRESHOLD_ABSTIME (50 * NSEC_PER_MSEC) |
| 294 | |
| 295 | #if TCOAL_PRIO_STATS |
| 296 | int32_t nc_tcl, rt_tcl, bg_tcl, kt_tcl, fp_tcl, ts_tcl, qos_tcl; |
| 297 | #define TCOAL_PRIO_STAT(x) (x++) |
| 298 | #else |
| 299 | #define TCOAL_PRIO_STAT(x) |
| 300 | #endif |
| 301 | |
| 302 | boolean_t |
| 303 | timer_resort_threshold(uint64_t skew) { |
| 304 | if (skew >= TIMER_RESORT_THRESHOLD_ABSTIME) |
| 305 | return TRUE; |
| 306 | else |
| 307 | return FALSE; |
| 308 | } |
| 309 | |
| 310 | /* |
| 311 | * Return the local timer queue for a running processor |
| 312 | * else return the boot processor's timer queue. |
| 313 | */ |
| 314 | mpqueue_head_t * |
| 315 | timer_queue_assign( |
| 316 | uint64_t deadline) |
| 317 | { |
| 318 | cpu_data_t *cdp = current_cpu_datap(); |
| 319 | mpqueue_head_t *queue; |
| 320 | |
| 321 | if (cdp->cpu_running) { |
| 322 | queue = &cdp->rtclock_timer.queue; |
| 323 | |
| 324 | if (deadline < cdp->rtclock_timer.deadline) |
| 325 | timer_set_deadline(deadline); |
| 326 | } |
| 327 | else |
| 328 | queue = &cpu_datap(master_cpu)->rtclock_timer.queue; |
| 329 | |
| 330 | return (queue); |
| 331 | } |
| 332 | |
| 333 | void |
| 334 | timer_queue_cancel( |
| 335 | mpqueue_head_t *queue, |
| 336 | uint64_t deadline, |
| 337 | uint64_t new_deadline) |
| 338 | { |
| 339 | if (queue == ¤t_cpu_datap()->rtclock_timer.queue) { |
| 340 | if (deadline < new_deadline) |
| 341 | timer_set_deadline(new_deadline); |
| 342 | } |
| 343 | } |
| 344 | |
| 345 | /* |
| 346 | * timer_queue_migrate_cpu() is called from the Power-Management kext |
| 347 | * when a logical processor goes idle (in a deep C-state) with a distant |
| 348 | * deadline so that it's timer queue can be moved to another processor. |
| 349 | * This target processor should be the least idle (most busy) -- |
| 350 | * currently this is the primary processor for the calling thread's package. |
| 351 | * Locking restrictions demand that the target cpu must be the boot cpu. |
| 352 | */ |
| 353 | uint32_t |
| 354 | timer_queue_migrate_cpu(int target_cpu) |
| 355 | { |
| 356 | cpu_data_t *target_cdp = cpu_datap(target_cpu); |
| 357 | cpu_data_t *cdp = current_cpu_datap(); |
| 358 | int ntimers_moved; |
| 359 | |
| 360 | assert(!ml_get_interrupts_enabled()); |
| 361 | assert(target_cpu != cdp->cpu_number); |
| 362 | assert(target_cpu == master_cpu); |
| 363 | |
| 364 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
| 365 | DECR_TIMER_MIGRATE | DBG_FUNC_START, |
| 366 | target_cpu, |
| 367 | cdp->rtclock_timer.deadline, (cdp->rtclock_timer.deadline >>32), |
| 368 | 0, 0); |
| 369 | |
| 370 | /* |
| 371 | * Move timer requests from the local queue to the target processor's. |
| 372 | * The return value is the number of requests moved. If this is 0, |
| 373 | * it indicates that the first (i.e. earliest) timer is earlier than |
| 374 | * the earliest for the target processor. Since this would force a |
| 375 | * resync, the move of this and all later requests is aborted. |
| 376 | */ |
| 377 | ntimers_moved = timer_queue_migrate(&cdp->rtclock_timer.queue, |
| 378 | &target_cdp->rtclock_timer.queue); |
| 379 | |
| 380 | /* |
| 381 | * Assuming we moved stuff, clear local deadline. |
| 382 | */ |
| 383 | if (ntimers_moved > 0) { |
| 384 | cdp->rtclock_timer.deadline = EndOfAllTime; |
| 385 | setPop(EndOfAllTime); |
| 386 | } |
| 387 | |
| 388 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
| 389 | DECR_TIMER_MIGRATE | DBG_FUNC_END, |
| 390 | target_cpu, ntimers_moved, 0, 0, 0); |
| 391 | |
| 392 | return ntimers_moved; |
| 393 | } |
| 394 | |
| 395 | mpqueue_head_t * |
| 396 | timer_queue_cpu(int cpu) |
| 397 | { |
| 398 | return &cpu_datap(cpu)->rtclock_timer.queue; |
| 399 | } |
| 400 | |
| 401 | void |
| 402 | timer_call_cpu(int cpu, void (*fn)(void *), void *arg) |
| 403 | { |
| 404 | mp_cpus_call(cpu_to_cpumask(cpu), SYNC, fn, arg); |
| 405 | } |
| 406 | |
| 407 | void |
| 408 | timer_call_nosync_cpu(int cpu, void (*fn)(void *), void *arg) |
| 409 | { |
| 410 | /* XXX Needs error checking and retry */ |
| 411 | mp_cpus_call(cpu_to_cpumask(cpu), NOSYNC, fn, arg); |
| 412 | } |
| 413 | |
| 414 | |
| 415 | static timer_coalescing_priority_params_ns_t tcoal_prio_params_init = |
| 416 | { |
| 417 | .idle_entry_timer_processing_hdeadline_threshold_ns = 5000ULL * NSEC_PER_USEC, |
| 418 | .interrupt_timer_coalescing_ilat_threshold_ns = 30ULL * NSEC_PER_USEC, |
| 419 | .timer_resort_threshold_ns = 50 * NSEC_PER_MSEC, |
| 420 | .timer_coalesce_rt_shift = 0, |
| 421 | .timer_coalesce_bg_shift = -5, |
| 422 | .timer_coalesce_kt_shift = 3, |
| 423 | .timer_coalesce_fp_shift = 3, |
| 424 | .timer_coalesce_ts_shift = 3, |
| 425 | .timer_coalesce_rt_ns_max = 0ULL, |
| 426 | .timer_coalesce_bg_ns_max = 100 * NSEC_PER_MSEC, |
| 427 | .timer_coalesce_kt_ns_max = 1 * NSEC_PER_MSEC, |
| 428 | .timer_coalesce_fp_ns_max = 1 * NSEC_PER_MSEC, |
| 429 | .timer_coalesce_ts_ns_max = 1 * NSEC_PER_MSEC, |
| 430 | .latency_qos_scale = {3, 2, 1, -2, -15, -15}, |
| 431 | .latency_qos_ns_max ={1 * NSEC_PER_MSEC, 5 * NSEC_PER_MSEC, 20 * NSEC_PER_MSEC, |
| 432 | 75 * NSEC_PER_MSEC, 10000 * NSEC_PER_MSEC, 10000 * NSEC_PER_MSEC}, |
| 433 | .latency_tier_rate_limited = {FALSE, FALSE, FALSE, FALSE, TRUE, TRUE}, |
| 434 | }; |
| 435 | |
| 436 | timer_coalescing_priority_params_ns_t * timer_call_get_priority_params(void) |
| 437 | { |
| 438 | return &tcoal_prio_params_init; |
| 439 | } |
| 440 | |