| 1 | /* |
| 2 | * Copyright (c) 2015-2018 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | |
| 29 | #include <sys/cprotect.h> |
| 30 | #include <sys/malloc.h> |
| 31 | #include <sys/mount_internal.h> |
| 32 | #include <sys/filio.h> |
| 33 | #include <sys/content_protection.h> |
| 34 | #include <libkern/crypto/sha1.h> |
| 35 | #include <libkern/libkern.h> |
| 36 | //for write protection |
| 37 | #include <vm/vm_kern.h> |
| 38 | #include <vm/vm_map.h> |
| 39 | |
| 40 | #define PTR_ADD(type, base, offset) (type)((uintptr_t)(base) + (offset)) |
| 41 | |
| 42 | // -- struct cpx -- |
| 43 | |
| 44 | /* |
| 45 | * This structure contains the unwrapped key and is passed to the lower layers. |
| 46 | * It is private so users must use the accessors declared in sys/cprotect.h |
| 47 | * to read/write it. |
| 48 | */ |
| 49 | |
| 50 | // cpx_flags |
| 51 | typedef uint32_t cpx_flags_t; |
| 52 | enum { |
| 53 | CPX_SEP_WRAPPEDKEY = 0x01, |
| 54 | CPX_IV_AES_CTX_INITIALIZED = 0x02, |
| 55 | CPX_USE_OFFSET_FOR_IV = 0x04, |
| 56 | |
| 57 | // Using AES IV context generated from key |
| 58 | CPX_IV_AES_CTX_VFS = 0x08, |
| 59 | CPX_SYNTHETIC_OFFSET_FOR_IV = 0x10, |
| 60 | CPX_COMPOSITEKEY = 0x20, |
| 61 | |
| 62 | //write page protection |
| 63 | CPX_WRITE_PROTECTABLE = 0x40 |
| 64 | }; |
| 65 | |
| 66 | struct cpx { |
| 67 | #if DEBUG |
| 68 | uint32_t cpx_magic1; |
| 69 | #endif |
| 70 | aes_encrypt_ctx cpx_iv_aes_ctx; // Context used for generating the IV |
| 71 | cpx_flags_t cpx_flags; |
| 72 | uint16_t cpx_max_key_len; |
| 73 | uint16_t cpx_key_len; |
| 74 | uint8_t cpx_cached_key[]; |
| 75 | }; |
| 76 | |
| 77 | // -- cpx_t accessors -- |
| 78 | |
| 79 | size_t cpx_size(size_t key_size) |
| 80 | { |
| 81 | size_t size = sizeof(struct cpx) + key_size; |
| 82 | |
| 83 | #if DEBUG |
| 84 | size += 4; // Extra for magic |
| 85 | #endif |
| 86 | |
| 87 | return size; |
| 88 | } |
| 89 | |
| 90 | size_t cpx_sizex(const struct cpx *cpx) |
| 91 | { |
| 92 | return cpx_size(cpx->cpx_max_key_len); |
| 93 | } |
| 94 | |
| 95 | cpx_t cpx_alloc(size_t key_len) |
| 96 | { |
| 97 | cpx_t cpx = NULL; |
| 98 | |
| 99 | #if CONFIG_KEYPAGE_WP |
| 100 | /* |
| 101 | * Macs only use 1 key per volume, so force it into its own page. |
| 102 | * This way, we can write-protect as needed. |
| 103 | */ |
| 104 | size_t cpsize = cpx_size (key_len); |
| 105 | if (cpsize < PAGE_SIZE) { |
| 106 | /* |
| 107 | * Don't use MALLOC to allocate the page-sized structure. Instead, |
| 108 | * use kmem_alloc to bypass KASAN since we are supplying our own |
| 109 | * unilateral write protection on this page. Note that kmem_alloc |
| 110 | * can block. |
| 111 | */ |
| 112 | if (kmem_alloc (kernel_map, (vm_offset_t *)&cpx, PAGE_SIZE, VM_KERN_MEMORY_FILE)) { |
| 113 | /* |
| 114 | * returning NULL at this point (due to failed allocation) would just |
| 115 | * result in a panic. fall back to attempting a normal MALLOC, and don't |
| 116 | * let the cpx get marked PROTECTABLE. |
| 117 | */ |
| 118 | MALLOC(cpx, cpx_t, cpx_size(key_len), M_TEMP, M_WAITOK); |
| 119 | } |
| 120 | else { |
| 121 | //mark the page as protectable, since kmem_alloc succeeded. |
| 122 | cpx->cpx_flags |= CPX_WRITE_PROTECTABLE; |
| 123 | } |
| 124 | } |
| 125 | else { |
| 126 | panic ("cpx_size too large ! (%lu)" , cpsize); |
| 127 | } |
| 128 | #else |
| 129 | /* If key page write protection disabled, just switch to kernel MALLOC */ |
| 130 | MALLOC(cpx, cpx_t, cpx_size(key_len), M_TEMP, M_WAITOK); |
| 131 | #endif |
| 132 | cpx_init(cpx, key_len); |
| 133 | |
| 134 | return cpx; |
| 135 | } |
| 136 | |
| 137 | /* this is really a void function */ |
| 138 | void cpx_writeprotect (cpx_t cpx) |
| 139 | { |
| 140 | #if CONFIG_KEYPAGE_WP |
| 141 | void *cpxstart = (void*)cpx; |
| 142 | void *cpxend = (void*)((uint8_t*)cpx + PAGE_SIZE); |
| 143 | if (cpx->cpx_flags & CPX_WRITE_PROTECTABLE) { |
| 144 | vm_map_protect (kernel_map, (vm_map_offset_t)cpxstart, (vm_map_offset_t)cpxend, (VM_PROT_READ), FALSE); |
| 145 | } |
| 146 | #else |
| 147 | (void) cpx; |
| 148 | #endif |
| 149 | return; |
| 150 | } |
| 151 | |
| 152 | #if DEBUG |
| 153 | static const uint32_t cpx_magic1 = 0x7b787063; // cpx{ |
| 154 | static const uint32_t cpx_magic2 = 0x7870637d; // }cpx |
| 155 | #endif |
| 156 | |
| 157 | void cpx_free(cpx_t cpx) |
| 158 | { |
| 159 | |
| 160 | #if DEBUG |
| 161 | assert(cpx->cpx_magic1 == cpx_magic1); |
| 162 | assert(*PTR_ADD(uint32_t *, cpx, cpx_sizex(cpx) - 4) == cpx_magic2); |
| 163 | #endif |
| 164 | |
| 165 | #if CONFIG_KEYPAGE_WP |
| 166 | /* unprotect the page before bzeroing */ |
| 167 | void *cpxstart = (void*)cpx; |
| 168 | void *cpxend = (void*)((uint8_t*)cpx + PAGE_SIZE); |
| 169 | if (cpx->cpx_flags & CPX_WRITE_PROTECTABLE) { |
| 170 | vm_map_protect (kernel_map, (vm_map_offset_t)cpxstart, (vm_map_offset_t)cpxend, (VM_PROT_DEFAULT), FALSE); |
| 171 | |
| 172 | //now zero the memory after un-protecting it |
| 173 | bzero(cpx->cpx_cached_key, cpx->cpx_max_key_len); |
| 174 | |
| 175 | //If we are here, then we used kmem_alloc to get the page. Must use kmem_free to drop it. |
| 176 | kmem_free(kernel_map, (vm_offset_t)cpx, PAGE_SIZE); |
| 177 | return; |
| 178 | } |
| 179 | #else |
| 180 | bzero(cpx->cpx_cached_key, cpx->cpx_max_key_len); |
| 181 | FREE(cpx, M_TEMP); |
| 182 | return; |
| 183 | #endif |
| 184 | |
| 185 | } |
| 186 | |
| 187 | void cpx_init(cpx_t cpx, size_t key_len) |
| 188 | { |
| 189 | #if DEBUG |
| 190 | cpx->cpx_magic1 = cpx_magic1; |
| 191 | *PTR_ADD(uint32_t *, cpx, cpx_size(key_len) - 4) = cpx_magic2; |
| 192 | #endif |
| 193 | cpx->cpx_flags = 0; |
| 194 | cpx->cpx_key_len = 0; |
| 195 | cpx->cpx_max_key_len = key_len; |
| 196 | } |
| 197 | |
| 198 | bool cpx_is_sep_wrapped_key(const struct cpx *cpx) |
| 199 | { |
| 200 | return ISSET(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY); |
| 201 | } |
| 202 | |
| 203 | void cpx_set_is_sep_wrapped_key(struct cpx *cpx, bool v) |
| 204 | { |
| 205 | if (v) |
| 206 | SET(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY); |
| 207 | else |
| 208 | CLR(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY); |
| 209 | } |
| 210 | |
| 211 | bool cpx_is_composite_key(const struct cpx *cpx) |
| 212 | { |
| 213 | return ISSET(cpx->cpx_flags, CPX_COMPOSITEKEY); |
| 214 | } |
| 215 | |
| 216 | void cpx_set_is_composite_key(struct cpx *cpx, bool v) |
| 217 | { |
| 218 | if (v) |
| 219 | SET(cpx->cpx_flags, CPX_COMPOSITEKEY); |
| 220 | else |
| 221 | CLR(cpx->cpx_flags, CPX_COMPOSITEKEY); |
| 222 | } |
| 223 | |
| 224 | bool cpx_use_offset_for_iv(const struct cpx *cpx) |
| 225 | { |
| 226 | return ISSET(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV); |
| 227 | } |
| 228 | |
| 229 | void cpx_set_use_offset_for_iv(struct cpx *cpx, bool v) |
| 230 | { |
| 231 | if (v) |
| 232 | SET(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV); |
| 233 | else |
| 234 | CLR(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV); |
| 235 | } |
| 236 | |
| 237 | bool cpx_synthetic_offset_for_iv(const struct cpx *cpx) |
| 238 | { |
| 239 | return ISSET(cpx->cpx_flags, CPX_SYNTHETIC_OFFSET_FOR_IV); |
| 240 | } |
| 241 | |
| 242 | void cpx_set_synthetic_offset_for_iv(struct cpx *cpx, bool v) |
| 243 | { |
| 244 | if (v) |
| 245 | SET(cpx->cpx_flags, CPX_SYNTHETIC_OFFSET_FOR_IV); |
| 246 | else |
| 247 | CLR(cpx->cpx_flags, CPX_SYNTHETIC_OFFSET_FOR_IV); |
| 248 | } |
| 249 | |
| 250 | uint16_t cpx_max_key_len(const struct cpx *cpx) |
| 251 | { |
| 252 | return cpx->cpx_max_key_len; |
| 253 | } |
| 254 | |
| 255 | uint16_t cpx_key_len(const struct cpx *cpx) |
| 256 | { |
| 257 | return cpx->cpx_key_len; |
| 258 | } |
| 259 | |
| 260 | void cpx_set_key_len(struct cpx *cpx, uint16_t key_len) |
| 261 | { |
| 262 | cpx->cpx_key_len = key_len; |
| 263 | |
| 264 | if (ISSET(cpx->cpx_flags, CPX_IV_AES_CTX_VFS)) { |
| 265 | /* |
| 266 | * We assume that if the key length is being modified, the key |
| 267 | * has changed. As a result, un-set any bits related to the |
| 268 | * AES context, if needed. They should be re-generated |
| 269 | * on-demand. |
| 270 | */ |
| 271 | CLR(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED | CPX_IV_AES_CTX_VFS); |
| 272 | } |
| 273 | } |
| 274 | |
| 275 | bool cpx_has_key(const struct cpx *cpx) |
| 276 | { |
| 277 | return cpx->cpx_key_len > 0; |
| 278 | } |
| 279 | |
| 280 | #pragma clang diagnostic push |
| 281 | #pragma clang diagnostic ignored "-Wcast-qual" |
| 282 | void *cpx_key(const struct cpx *cpx) |
| 283 | { |
| 284 | return (void *)cpx->cpx_cached_key; |
| 285 | } |
| 286 | #pragma clang diagnostic pop |
| 287 | |
| 288 | void cpx_set_aes_iv_key(struct cpx *cpx, void *iv_key) |
| 289 | { |
| 290 | aes_encrypt_key128(iv_key, &cpx->cpx_iv_aes_ctx); |
| 291 | SET(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED | CPX_USE_OFFSET_FOR_IV); |
| 292 | CLR(cpx->cpx_flags, CPX_IV_AES_CTX_VFS); |
| 293 | } |
| 294 | |
| 295 | aes_encrypt_ctx *cpx_iv_aes_ctx(struct cpx *cpx) |
| 296 | { |
| 297 | if (ISSET(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED)) |
| 298 | return &cpx->cpx_iv_aes_ctx; |
| 299 | |
| 300 | SHA1_CTX sha1ctxt; |
| 301 | uint8_t digest[SHA_DIGEST_LENGTH]; /* Kiv */ |
| 302 | |
| 303 | /* First init the cp_cache_iv_key[] */ |
| 304 | SHA1Init(&sha1ctxt); |
| 305 | |
| 306 | /* |
| 307 | * We can only use this when the keys are generated in the AP; As a result |
| 308 | * we only use the first 32 bytes of key length in the cache key |
| 309 | */ |
| 310 | SHA1Update(&sha1ctxt, cpx->cpx_cached_key, cpx->cpx_key_len); |
| 311 | SHA1Final(digest, &sha1ctxt); |
| 312 | |
| 313 | cpx_set_aes_iv_key(cpx, digest); |
| 314 | SET(cpx->cpx_flags, CPX_IV_AES_CTX_VFS); |
| 315 | |
| 316 | return &cpx->cpx_iv_aes_ctx; |
| 317 | } |
| 318 | |
| 319 | void cpx_flush(cpx_t cpx) |
| 320 | { |
| 321 | bzero(cpx->cpx_cached_key, cpx->cpx_max_key_len); |
| 322 | bzero(&cpx->cpx_iv_aes_ctx, sizeof(cpx->cpx_iv_aes_ctx)); |
| 323 | cpx->cpx_flags = 0; |
| 324 | cpx->cpx_key_len = 0; |
| 325 | } |
| 326 | |
| 327 | bool cpx_can_copy(const struct cpx *src, const struct cpx *dst) |
| 328 | { |
| 329 | return src->cpx_key_len <= dst->cpx_max_key_len; |
| 330 | } |
| 331 | |
| 332 | void cpx_copy(const struct cpx *src, cpx_t dst) |
| 333 | { |
| 334 | uint16_t key_len = cpx_key_len(src); |
| 335 | cpx_set_key_len(dst, key_len); |
| 336 | memcpy(cpx_key(dst), cpx_key(src), key_len); |
| 337 | dst->cpx_flags = src->cpx_flags; |
| 338 | if (ISSET(dst->cpx_flags, CPX_IV_AES_CTX_INITIALIZED)) |
| 339 | dst->cpx_iv_aes_ctx = src->cpx_iv_aes_ctx; |
| 340 | } |
| 341 | |
| 342 | typedef struct { |
| 343 | cp_lock_state_t state; |
| 344 | int valid_uuid; |
| 345 | uuid_t volume_uuid; |
| 346 | } cp_lock_vfs_callback_arg; |
| 347 | |
| 348 | static int |
| 349 | cp_lock_vfs_callback(mount_t mp, void *arg) |
| 350 | { |
| 351 | cp_lock_vfs_callback_arg *callback_arg = (cp_lock_vfs_callback_arg *)arg; |
| 352 | |
| 353 | if (callback_arg->valid_uuid) { |
| 354 | struct vfs_attr va; |
| 355 | VFSATTR_INIT(&va); |
| 356 | VFSATTR_WANTED(&va, f_uuid); |
| 357 | |
| 358 | if (vfs_getattr(mp, &va, vfs_context_current())) |
| 359 | return 0; |
| 360 | |
| 361 | if (!VFSATTR_IS_SUPPORTED(&va, f_uuid)) |
| 362 | return 0; |
| 363 | |
| 364 | if(memcmp(va.f_uuid, callback_arg->volume_uuid, sizeof(uuid_t))) |
| 365 | return 0; |
| 366 | } |
| 367 | |
| 368 | VFS_IOCTL(mp, FIODEVICELOCKED, (void *)(uintptr_t)callback_arg->state, 0, vfs_context_kernel()); |
| 369 | return 0; |
| 370 | } |
| 371 | |
| 372 | int |
| 373 | cp_key_store_action(cp_key_store_action_t action) |
| 374 | { |
| 375 | cp_lock_vfs_callback_arg callback_arg; |
| 376 | |
| 377 | switch (action) { |
| 378 | case CP_ACTION_LOCKED: |
| 379 | case CP_ACTION_UNLOCKED: |
| 380 | callback_arg.state = (action == CP_ACTION_LOCKED ? CP_LOCKED_STATE : CP_UNLOCKED_STATE); |
| 381 | memset(callback_arg.volume_uuid, 0, sizeof(uuid_t)); |
| 382 | callback_arg.valid_uuid = 0; |
| 383 | return vfs_iterate(0, cp_lock_vfs_callback, (void *)&callback_arg); |
| 384 | default: |
| 385 | return -1; |
| 386 | } |
| 387 | } |
| 388 | |
| 389 | int |
| 390 | cp_key_store_action_for_volume(uuid_t volume_uuid, cp_key_store_action_t action) |
| 391 | { |
| 392 | cp_lock_vfs_callback_arg callback_arg; |
| 393 | |
| 394 | switch (action) { |
| 395 | case CP_ACTION_LOCKED: |
| 396 | case CP_ACTION_UNLOCKED: |
| 397 | callback_arg.state = (action == CP_ACTION_LOCKED ? CP_LOCKED_STATE : CP_UNLOCKED_STATE); |
| 398 | memcpy(callback_arg.volume_uuid, volume_uuid, sizeof(uuid_t)); |
| 399 | callback_arg.valid_uuid = 1; |
| 400 | return vfs_iterate(0, cp_lock_vfs_callback, (void *)&callback_arg); |
| 401 | default: |
| 402 | return -1; |
| 403 | } |
| 404 | } |
| 405 | |
| 406 | int |
| 407 | cp_is_valid_class(int isdir, int32_t protectionclass) |
| 408 | { |
| 409 | /* |
| 410 | * The valid protection classes are from 0 -> N |
| 411 | * We use a signed argument to detect unassigned values from |
| 412 | * directory entry creation time in HFS. |
| 413 | */ |
| 414 | if (isdir) { |
| 415 | /* Directories are not allowed to have F, but they can have "NONE" */ |
| 416 | return ((protectionclass >= PROTECTION_CLASS_DIR_NONE) && |
| 417 | (protectionclass <= PROTECTION_CLASS_D)); |
| 418 | } |
| 419 | else { |
| 420 | return ((protectionclass >= PROTECTION_CLASS_A) && |
| 421 | (protectionclass <= PROTECTION_CLASS_F)); |
| 422 | } |
| 423 | } |
| 424 | |
| 425 | /* |
| 426 | * Parses versions of the form 12A316, i.e. <major><minor><revision> and |
| 427 | * returns a uint32_t in the form 0xaabbcccc where aa = <major>, |
| 428 | * bb = <ASCII char>, cccc = <revision>. |
| 429 | */ |
| 430 | static cp_key_os_version_t |
| 431 | parse_os_version(const char *vers) |
| 432 | { |
| 433 | const char *p = vers; |
| 434 | |
| 435 | int a = 0; |
| 436 | while (*p >= '0' && *p <= '9') { |
| 437 | a = a * 10 + *p - '0'; |
| 438 | ++p; |
| 439 | } |
| 440 | |
| 441 | if (!a) |
| 442 | return 0; |
| 443 | |
| 444 | int b = *p++; |
| 445 | if (!b) |
| 446 | return 0; |
| 447 | |
| 448 | int c = 0; |
| 449 | while (*p >= '0' && *p <= '9') { |
| 450 | c = c * 10 + *p - '0'; |
| 451 | ++p; |
| 452 | } |
| 453 | |
| 454 | if (!c) |
| 455 | return 0; |
| 456 | |
| 457 | return (a & 0xff) << 24 | b << 16 | (c & 0xffff); |
| 458 | } |
| 459 | |
| 460 | cp_key_os_version_t |
| 461 | cp_os_version(void) |
| 462 | { |
| 463 | static cp_key_os_version_t cp_os_version; |
| 464 | |
| 465 | if (cp_os_version) |
| 466 | return cp_os_version; |
| 467 | |
| 468 | if (!osversion[0]) |
| 469 | return 0; |
| 470 | |
| 471 | cp_os_version = parse_os_version(osversion); |
| 472 | if (!cp_os_version) { |
| 473 | printf("cp_os_version: unable to parse osversion `%s'\n" , osversion); |
| 474 | cp_os_version = 1; |
| 475 | } |
| 476 | |
| 477 | return cp_os_version; |
| 478 | } |
| 479 | |