| 1 | /* |
| 2 | * Copyright (c) 1991-2015 Apple Computer, Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | #include <sys/param.h> |
| 29 | #include <sys/types.h> |
| 30 | #include <sys/uio.h> |
| 31 | #include <sys/vnode.h> |
| 32 | #include <vm/vm_kern.h> |
| 33 | #include <mach/kern_return.h> |
| 34 | #include <mach/vm_param.h> |
| 35 | #include <kern/cpu_number.h> |
| 36 | #include <mach-o/fat.h> |
| 37 | #include <kern/mach_loader.h> |
| 38 | #include <kern/mach_fat.h> |
| 39 | #include <libkern/OSByteOrder.h> |
| 40 | #include <machine/exec.h> |
| 41 | |
| 42 | /********************************************************************** |
| 43 | * Routine: fatfile_getarch() |
| 44 | * |
| 45 | * Function: Locate the architecture-dependant contents of a fat |
| 46 | * file that match this CPU. |
| 47 | * |
| 48 | * Args: header: A pointer to the fat file header. |
| 49 | * size: How large the fat file header is (including fat_arch array) |
| 50 | * req_cpu_type: The required cpu type. |
| 51 | * mask_bits: Bits to mask from the sub-image type when |
| 52 | * grading it vs. the req_cpu_type |
| 53 | * archret (out): Pointer to fat_arch structure to hold |
| 54 | * the results. |
| 55 | * |
| 56 | * Returns: KERN_SUCCESS: Valid architecture found. |
| 57 | * KERN_FAILURE: No valid architecture found. |
| 58 | **********************************************************************/ |
| 59 | static load_return_t |
| 60 | fatfile_getarch( |
| 61 | vm_offset_t data_ptr, |
| 62 | vm_size_t data_size, |
| 63 | cpu_type_t req_cpu_type, |
| 64 | cpu_type_t mask_bits, |
| 65 | struct fat_arch *archret) |
| 66 | { |
| 67 | load_return_t lret; |
| 68 | struct fat_arch *arch; |
| 69 | struct fat_arch *best_arch; |
| 70 | int grade; |
| 71 | int best_grade; |
| 72 | uint32_t nfat_arch, max_nfat_arch; |
| 73 | cpu_type_t testtype; |
| 74 | cpu_type_t testsubtype; |
| 75 | struct fat_header *; |
| 76 | |
| 77 | if (sizeof(struct fat_header) > data_size) { |
| 78 | return (LOAD_FAILURE); |
| 79 | } |
| 80 | |
| 81 | header = (struct fat_header *)data_ptr; |
| 82 | nfat_arch = OSSwapBigToHostInt32(header->nfat_arch); |
| 83 | |
| 84 | max_nfat_arch = (data_size - sizeof(struct fat_header)) / sizeof(struct fat_arch); |
| 85 | if (nfat_arch > max_nfat_arch) { |
| 86 | /* nfat_arch would cause us to read off end of buffer */ |
| 87 | return (LOAD_BADMACHO); |
| 88 | } |
| 89 | |
| 90 | /* |
| 91 | * Scan the fat_arch's looking for the best one. */ |
| 92 | best_arch = NULL; |
| 93 | best_grade = 0; |
| 94 | arch = (struct fat_arch *) (data_ptr + sizeof(struct fat_header)); |
| 95 | for (; nfat_arch-- > 0; arch++) { |
| 96 | testtype = OSSwapBigToHostInt32(arch->cputype); |
| 97 | testsubtype = OSSwapBigToHostInt32(arch->cpusubtype) & ~CPU_SUBTYPE_MASK; |
| 98 | |
| 99 | /* |
| 100 | * Check to see if right cpu type. |
| 101 | */ |
| 102 | if((testtype & ~mask_bits) != (req_cpu_type & ~mask_bits)) { |
| 103 | continue; |
| 104 | } |
| 105 | |
| 106 | /* |
| 107 | * Get the grade of the cpu subtype (without feature flags) |
| 108 | */ |
| 109 | grade = grade_binary(testtype, testsubtype); |
| 110 | |
| 111 | /* |
| 112 | * Remember it if it's the best we've seen. |
| 113 | */ |
| 114 | if (grade > best_grade) { |
| 115 | best_grade = grade; |
| 116 | best_arch = arch; |
| 117 | } |
| 118 | } |
| 119 | |
| 120 | /* |
| 121 | * Return our results. |
| 122 | */ |
| 123 | if (best_arch == NULL) { |
| 124 | lret = LOAD_BADARCH; |
| 125 | } else { |
| 126 | archret->cputype = |
| 127 | OSSwapBigToHostInt32(best_arch->cputype); |
| 128 | archret->cpusubtype = |
| 129 | OSSwapBigToHostInt32(best_arch->cpusubtype); |
| 130 | archret->offset = |
| 131 | OSSwapBigToHostInt32(best_arch->offset); |
| 132 | archret->size = |
| 133 | OSSwapBigToHostInt32(best_arch->size); |
| 134 | archret->align = |
| 135 | OSSwapBigToHostInt32(best_arch->align); |
| 136 | |
| 137 | lret = LOAD_SUCCESS; |
| 138 | } |
| 139 | |
| 140 | /* |
| 141 | * Free the memory we allocated and return. |
| 142 | */ |
| 143 | return(lret); |
| 144 | } |
| 145 | |
| 146 | load_return_t |
| 147 | fatfile_getbestarch( |
| 148 | vm_offset_t data_ptr, |
| 149 | vm_size_t data_size, |
| 150 | struct fat_arch *archret) |
| 151 | { |
| 152 | /* |
| 153 | * Ignore all architectural bits when determining if an image |
| 154 | * in a fat file should be skipped or graded. |
| 155 | */ |
| 156 | return fatfile_getarch(data_ptr, data_size, cpu_type(), CPU_ARCH_MASK, archret); |
| 157 | } |
| 158 | |
| 159 | load_return_t |
| 160 | fatfile_getbestarch_for_cputype( |
| 161 | cpu_type_t cputype, |
| 162 | vm_offset_t data_ptr, |
| 163 | vm_size_t data_size, |
| 164 | struct fat_arch *archret) |
| 165 | { |
| 166 | /* |
| 167 | * Scan the fat_arch array for exact matches for this cpu_type_t only |
| 168 | */ |
| 169 | return fatfile_getarch(data_ptr, data_size, cputype, 0, archret); |
| 170 | } |
| 171 | |
| 172 | /********************************************************************** |
| 173 | * Routine: fatfile_getarch_with_bits() |
| 174 | * |
| 175 | * Function: Locate the architecture-dependant contents of a fat |
| 176 | * file that match this CPU. |
| 177 | * |
| 178 | * Args: vp: The vnode for the fat file. |
| 179 | * archbits: Architecture specific feature bits |
| 180 | * header: A pointer to the fat file header. |
| 181 | * archret (out): Pointer to fat_arch structure to hold |
| 182 | * the results. |
| 183 | * |
| 184 | * Returns: KERN_SUCCESS: Valid architecture found. |
| 185 | * KERN_FAILURE: No valid architecture found. |
| 186 | **********************************************************************/ |
| 187 | load_return_t |
| 188 | fatfile_getarch_with_bits( |
| 189 | integer_t archbits, |
| 190 | vm_offset_t data_ptr, |
| 191 | vm_size_t data_size, |
| 192 | struct fat_arch *archret) |
| 193 | { |
| 194 | /* |
| 195 | * Scan the fat_arch array for matches with the requested |
| 196 | * architectural bits set, and for the current hardware cpu CPU. |
| 197 | */ |
| 198 | return fatfile_getarch(data_ptr, data_size, (archbits & CPU_ARCH_MASK) | (cpu_type() & ~CPU_ARCH_MASK), 0, archret); |
| 199 | } |
| 200 | |
| 201 | /* |
| 202 | * Validate the fat_header and fat_arch array in memory. We check that: |
| 203 | * |
| 204 | * 1) arch count would not exceed the data buffer |
| 205 | * 2) arch list does not contain duplicate cputype/cpusubtype tuples |
| 206 | * 3) arch list does not have two overlapping slices. The area |
| 207 | * at the front of the file containing the fat headers is implicitly |
| 208 | * a range that a slice should also not try to cover |
| 209 | */ |
| 210 | load_return_t |
| 211 | fatfile_validate_fatarches(vm_offset_t data_ptr, vm_size_t data_size) |
| 212 | { |
| 213 | uint32_t magic, nfat_arch; |
| 214 | uint32_t max_nfat_arch, i, j; |
| 215 | uint32_t ; |
| 216 | |
| 217 | struct fat_arch *arches; |
| 218 | struct fat_header *; |
| 219 | |
| 220 | if (sizeof(struct fat_header) > data_size) { |
| 221 | return (LOAD_FAILURE); |
| 222 | } |
| 223 | |
| 224 | header = (struct fat_header *)data_ptr; |
| 225 | magic = OSSwapBigToHostInt32(header->magic); |
| 226 | nfat_arch = OSSwapBigToHostInt32(header->nfat_arch); |
| 227 | |
| 228 | if (magic != FAT_MAGIC) { |
| 229 | /* must be FAT_MAGIC big endian */ |
| 230 | return (LOAD_FAILURE); |
| 231 | } |
| 232 | |
| 233 | max_nfat_arch = (data_size - sizeof(struct fat_header)) / sizeof(struct fat_arch); |
| 234 | if (nfat_arch > max_nfat_arch) { |
| 235 | /* nfat_arch would cause us to read off end of buffer */ |
| 236 | return (LOAD_BADMACHO); |
| 237 | } |
| 238 | |
| 239 | /* now that we know the fat_arch list fits in the buffer, how much does it use? */ |
| 240 | fat_header_size = sizeof(struct fat_header) + nfat_arch * sizeof(struct fat_arch); |
| 241 | arches = (struct fat_arch *)(data_ptr + sizeof(struct fat_header)); |
| 242 | |
| 243 | for (i=0; i < nfat_arch; i++) { |
| 244 | uint32_t i_begin = OSSwapBigToHostInt32(arches[i].offset); |
| 245 | uint32_t i_size = OSSwapBigToHostInt32(arches[i].size); |
| 246 | uint32_t i_cputype = OSSwapBigToHostInt32(arches[i].cputype); |
| 247 | uint32_t i_cpusubtype = OSSwapBigToHostInt32(arches[i].cpusubtype); |
| 248 | |
| 249 | if (i_begin < fat_header_size) { |
| 250 | /* slice is trying to claim part of the file used by fat headers themselves */ |
| 251 | return (LOAD_BADMACHO); |
| 252 | } |
| 253 | |
| 254 | if ((UINT32_MAX - i_size) < i_begin) { |
| 255 | /* start + size would overflow */ |
| 256 | return (LOAD_BADMACHO); |
| 257 | } |
| 258 | uint32_t i_end = i_begin + i_size; |
| 259 | |
| 260 | for (j=i+1; j < nfat_arch; j++) { |
| 261 | uint32_t j_begin = OSSwapBigToHostInt32(arches[j].offset); |
| 262 | uint32_t j_size = OSSwapBigToHostInt32(arches[j].size); |
| 263 | uint32_t j_cputype = OSSwapBigToHostInt32(arches[j].cputype); |
| 264 | uint32_t j_cpusubtype = OSSwapBigToHostInt32(arches[j].cpusubtype); |
| 265 | |
| 266 | if ((i_cputype == j_cputype) && (i_cpusubtype == j_cpusubtype)) { |
| 267 | /* duplicate cputype/cpusubtype, results in ambiguous references */ |
| 268 | return (LOAD_BADMACHO); |
| 269 | } |
| 270 | |
| 271 | if ((UINT32_MAX - j_size) < j_begin) { |
| 272 | /* start + size would overflow */ |
| 273 | return (LOAD_BADMACHO); |
| 274 | } |
| 275 | uint32_t j_end = j_begin + j_size; |
| 276 | |
| 277 | if (i_begin <= j_begin) { |
| 278 | if (i_end <= j_begin) { |
| 279 | /* I completely precedes J */ |
| 280 | } else { |
| 281 | /* I started before J, but ends somewhere in or after J */ |
| 282 | return (LOAD_BADMACHO); |
| 283 | } |
| 284 | } else { |
| 285 | if (i_begin >= j_end) { |
| 286 | /* I started after J started but also after J ended */ |
| 287 | } else { |
| 288 | /* I started after J started but before it ended, so there is overlap */ |
| 289 | return (LOAD_BADMACHO); |
| 290 | } |
| 291 | } |
| 292 | } |
| 293 | } |
| 294 | |
| 295 | return (LOAD_SUCCESS); |
| 296 | } |
| 297 | |