| 1 | /* |
| 2 | * Copyright (c) 2000-2015 Apple Computer, Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | |
| 29 | #include <libkern/OSAtomic.h> |
| 30 | #include <kern/debug.h> |
| 31 | #include <machine/atomic.h> |
| 32 | |
| 33 | enum { |
| 34 | false = 0, |
| 35 | true = 1 |
| 36 | }; |
| 37 | |
| 38 | #ifndef NULL |
| 39 | #define NULL ((void *)0) |
| 40 | #endif |
| 41 | |
| 42 | #define ATOMIC_DEBUG DEBUG |
| 43 | |
| 44 | #if ATOMIC_DEBUG |
| 45 | #define ALIGN_TEST(p,t) do{if((uintptr_t)p&(sizeof(t)-1)) panic("Unaligned atomic pointer %p\n",p);}while(0) |
| 46 | #else |
| 47 | #define ALIGN_TEST(p,t) do{}while(0) |
| 48 | #endif |
| 49 | |
| 50 | // 19831745 - start of big hammer! |
| 51 | #pragma clang diagnostic push |
| 52 | #pragma clang diagnostic ignored "-Wcast-qual" |
| 53 | |
| 54 | /* |
| 55 | * atomic operations |
| 56 | * These are _the_ atomic operations, now implemented via compiler built-ins. |
| 57 | * It is expected that this C implementation is a candidate for Link-Time- |
| 58 | * Optimization inlining, whereas the assembler implementations they replace |
| 59 | * were not. |
| 60 | */ |
| 61 | |
| 62 | #undef OSCompareAndSwap8 |
| 63 | Boolean OSCompareAndSwap8(UInt8 oldValue, UInt8 newValue, volatile UInt8 *address) |
| 64 | { |
| 65 | return __c11_atomic_compare_exchange_strong((_Atomic UInt8 *)address, &oldValue, newValue, |
| 66 | memory_order_acq_rel_smp, memory_order_relaxed); |
| 67 | } |
| 68 | |
| 69 | #undef OSCompareAndSwap16 |
| 70 | Boolean OSCompareAndSwap16(UInt16 oldValue, UInt16 newValue, volatile UInt16 *address) |
| 71 | { |
| 72 | return __c11_atomic_compare_exchange_strong((_Atomic UInt16 *)address, &oldValue, newValue, |
| 73 | memory_order_acq_rel_smp, memory_order_relaxed); |
| 74 | } |
| 75 | |
| 76 | #undef OSCompareAndSwap |
| 77 | Boolean OSCompareAndSwap(UInt32 oldValue, UInt32 newValue, volatile UInt32 *address) |
| 78 | { |
| 79 | ALIGN_TEST(address, UInt32); |
| 80 | return __c11_atomic_compare_exchange_strong((_Atomic UInt32 *)address, &oldValue, newValue, |
| 81 | memory_order_acq_rel_smp, memory_order_relaxed); |
| 82 | } |
| 83 | |
| 84 | #undef OSCompareAndSwap64 |
| 85 | Boolean OSCompareAndSwap64(UInt64 oldValue, UInt64 newValue, volatile UInt64 *address) |
| 86 | { |
| 87 | /* |
| 88 | * _Atomic uint64 requires 8-byte alignment on all architectures. |
| 89 | * This silences the compiler cast warning. ALIGN_TEST() verifies |
| 90 | * that the cast was legal, if defined. |
| 91 | */ |
| 92 | _Atomic UInt64 *aligned_addr = (_Atomic UInt64 *)(uintptr_t)address; |
| 93 | |
| 94 | ALIGN_TEST(address, UInt64); |
| 95 | return __c11_atomic_compare_exchange_strong(aligned_addr, &oldValue, newValue, |
| 96 | memory_order_acq_rel_smp, memory_order_relaxed); |
| 97 | } |
| 98 | |
| 99 | #undef OSCompareAndSwapPtr |
| 100 | Boolean OSCompareAndSwapPtr(void *oldValue, void *newValue, void * volatile *address) |
| 101 | { |
| 102 | #if __LP64__ |
| 103 | return OSCompareAndSwap64((UInt64)oldValue, (UInt64)newValue, (volatile UInt64 *)address); |
| 104 | #else |
| 105 | return OSCompareAndSwap((UInt32)oldValue, (UInt32)newValue, (volatile UInt32 *)address); |
| 106 | #endif |
| 107 | } |
| 108 | |
| 109 | SInt8 OSAddAtomic8(SInt32 amount, volatile SInt8 *address) |
| 110 | { |
| 111 | return __c11_atomic_fetch_add((_Atomic SInt8*)address, amount, memory_order_relaxed); |
| 112 | } |
| 113 | |
| 114 | SInt16 OSAddAtomic16(SInt32 amount, volatile SInt16 *address) |
| 115 | { |
| 116 | return __c11_atomic_fetch_add((_Atomic SInt16*)address, amount, memory_order_relaxed); |
| 117 | } |
| 118 | |
| 119 | #undef OSAddAtomic |
| 120 | SInt32 OSAddAtomic(SInt32 amount, volatile SInt32 *address) |
| 121 | { |
| 122 | ALIGN_TEST(address, UInt32); |
| 123 | return __c11_atomic_fetch_add((_Atomic SInt32*)address, amount, memory_order_relaxed); |
| 124 | } |
| 125 | |
| 126 | #undef OSAddAtomic64 |
| 127 | SInt64 OSAddAtomic64(SInt64 amount, volatile SInt64 *address) |
| 128 | { |
| 129 | _Atomic SInt64* aligned_address = (_Atomic SInt64*)(uintptr_t)address; |
| 130 | |
| 131 | ALIGN_TEST(address, SInt64); |
| 132 | return __c11_atomic_fetch_add(aligned_address, amount, memory_order_relaxed); |
| 133 | } |
| 134 | |
| 135 | #undef OSAddAtomicLong |
| 136 | long |
| 137 | OSAddAtomicLong(long theAmount, volatile long *address) |
| 138 | { |
| 139 | #ifdef __LP64__ |
| 140 | return (long)OSAddAtomic64((SInt64)theAmount, (SInt64*)address); |
| 141 | #else |
| 142 | return (long)OSAddAtomic((SInt32)theAmount, address); |
| 143 | #endif |
| 144 | } |
| 145 | |
| 146 | #undef OSIncrementAtomic |
| 147 | SInt32 OSIncrementAtomic(volatile SInt32 * value) |
| 148 | { |
| 149 | return OSAddAtomic(1, value); |
| 150 | } |
| 151 | |
| 152 | #undef OSDecrementAtomic |
| 153 | SInt32 OSDecrementAtomic(volatile SInt32 * value) |
| 154 | { |
| 155 | return OSAddAtomic(-1, value); |
| 156 | } |
| 157 | |
| 158 | #undef OSBitAndAtomic |
| 159 | UInt32 OSBitAndAtomic(UInt32 mask, volatile UInt32 * value) |
| 160 | { |
| 161 | return __c11_atomic_fetch_and((_Atomic UInt32*)value, mask, memory_order_relaxed); |
| 162 | } |
| 163 | |
| 164 | #undef OSBitOrAtomic |
| 165 | UInt32 OSBitOrAtomic(UInt32 mask, volatile UInt32 * value) |
| 166 | { |
| 167 | return __c11_atomic_fetch_or((_Atomic UInt32*)value, mask, memory_order_relaxed); |
| 168 | } |
| 169 | |
| 170 | #undef OSBitXorAtomic |
| 171 | UInt32 OSBitXorAtomic(UInt32 mask, volatile UInt32 * value) |
| 172 | { |
| 173 | return __c11_atomic_fetch_xor((_Atomic UInt32*)value, mask, memory_order_relaxed); |
| 174 | } |
| 175 | |
| 176 | static Boolean OSTestAndSetClear(UInt32 bit, Boolean wantSet, volatile UInt8 * startAddress) |
| 177 | { |
| 178 | UInt8 mask = 1; |
| 179 | UInt8 oldValue; |
| 180 | UInt8 wantValue; |
| 181 | |
| 182 | startAddress += (bit / 8); |
| 183 | mask <<= (7 - (bit % 8)); |
| 184 | wantValue = wantSet ? mask : 0; |
| 185 | |
| 186 | do { |
| 187 | oldValue = *startAddress; |
| 188 | if ((oldValue & mask) == wantValue) { |
| 189 | break; |
| 190 | } |
| 191 | } while (! __c11_atomic_compare_exchange_strong((_Atomic UInt8 *)startAddress, |
| 192 | &oldValue, (oldValue & ~mask) | wantValue, memory_order_relaxed, memory_order_relaxed)); |
| 193 | |
| 194 | return (oldValue & mask) == wantValue; |
| 195 | } |
| 196 | |
| 197 | Boolean OSTestAndSet(UInt32 bit, volatile UInt8 * startAddress) |
| 198 | { |
| 199 | return OSTestAndSetClear(bit, true, startAddress); |
| 200 | } |
| 201 | |
| 202 | Boolean OSTestAndClear(UInt32 bit, volatile UInt8 * startAddress) |
| 203 | { |
| 204 | return OSTestAndSetClear(bit, false, startAddress); |
| 205 | } |
| 206 | |
| 207 | /* |
| 208 | * silly unaligned versions |
| 209 | */ |
| 210 | |
| 211 | SInt8 OSIncrementAtomic8(volatile SInt8 * value) |
| 212 | { |
| 213 | return OSAddAtomic8(1, value); |
| 214 | } |
| 215 | |
| 216 | SInt8 OSDecrementAtomic8(volatile SInt8 * value) |
| 217 | { |
| 218 | return OSAddAtomic8(-1, value); |
| 219 | } |
| 220 | |
| 221 | UInt8 OSBitAndAtomic8(UInt32 mask, volatile UInt8 * value) |
| 222 | { |
| 223 | return __c11_atomic_fetch_and((_Atomic UInt8 *)value, mask, memory_order_relaxed); |
| 224 | } |
| 225 | |
| 226 | UInt8 OSBitOrAtomic8(UInt32 mask, volatile UInt8 * value) |
| 227 | { |
| 228 | return __c11_atomic_fetch_or((_Atomic UInt8 *)value, mask, memory_order_relaxed); |
| 229 | } |
| 230 | |
| 231 | UInt8 OSBitXorAtomic8(UInt32 mask, volatile UInt8 * value) |
| 232 | { |
| 233 | return __c11_atomic_fetch_xor((_Atomic UInt8 *)value, mask, memory_order_relaxed); |
| 234 | } |
| 235 | |
| 236 | SInt16 OSIncrementAtomic16(volatile SInt16 * value) |
| 237 | { |
| 238 | return OSAddAtomic16(1, value); |
| 239 | } |
| 240 | |
| 241 | SInt16 OSDecrementAtomic16(volatile SInt16 * value) |
| 242 | { |
| 243 | return OSAddAtomic16(-1, value); |
| 244 | } |
| 245 | |
| 246 | UInt16 OSBitAndAtomic16(UInt32 mask, volatile UInt16 * value) |
| 247 | { |
| 248 | return __c11_atomic_fetch_and((_Atomic UInt16 *)value, mask, memory_order_relaxed); |
| 249 | } |
| 250 | |
| 251 | UInt16 OSBitOrAtomic16(UInt32 mask, volatile UInt16 * value) |
| 252 | { |
| 253 | return __c11_atomic_fetch_or((_Atomic UInt16 *)value, mask, memory_order_relaxed); |
| 254 | } |
| 255 | |
| 256 | UInt16 OSBitXorAtomic16(UInt32 mask, volatile UInt16 * value) |
| 257 | { |
| 258 | return __c11_atomic_fetch_xor((_Atomic UInt16 *)value, mask, memory_order_relaxed); |
| 259 | } |
| 260 | |
| 261 | // 19831745 - end of big hammer! |
| 262 | #pragma clang diagnostic pop |
| 263 | |
| 264 | |