1 | /* |
2 | * Copyright (c) 2007-2016 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | #include <stdarg.h> |
29 | #include <string.h> |
30 | #include <mach-o/loader.h> |
31 | #include <mach-o/nlist.h> |
32 | #include <mach-o/reloc.h> |
33 | #if KERNEL |
34 | #include <kern/kalloc.h> |
35 | #include <libkern/libkern.h> |
36 | #include <mach/vm_param.h> |
37 | #include <vm/vm_kern.h> |
38 | #else |
39 | #include <stdio.h> |
40 | #include <stdlib.h> |
41 | #include <mach/mach_init.h> |
42 | #include <mach-o/swap.h> |
43 | #endif |
44 | |
45 | #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld" |
46 | #include <AssertMacros.h> |
47 | |
48 | #include "kxld_util.h" |
49 | |
50 | #if !KERNEL |
51 | static void unswap_macho_32(u_char *file, enum NXByteOrder host_order, |
52 | enum NXByteOrder target_order); |
53 | static void unswap_macho_64(u_char *file, enum NXByteOrder host_order, |
54 | enum NXByteOrder target_order); |
55 | #endif /* !KERNEL */ |
56 | |
57 | #if DEBUG |
58 | static unsigned long num_allocations = 0; |
59 | static unsigned long num_frees = 0; |
60 | static unsigned long bytes_allocated = 0; |
61 | static unsigned long bytes_freed = 0; |
62 | #endif |
63 | |
64 | static KXLDLoggingCallback s_logging_callback = NULL; |
65 | static char s_callback_name[64] = "internal" ; |
66 | static void *s_callback_data = NULL; |
67 | |
68 | #if !KERNEL |
69 | static boolean_t s_cross_link_enabled = FALSE; |
70 | static kxld_size_t s_cross_link_page_size = PAGE_SIZE; |
71 | #endif |
72 | |
73 | |
74 | /******************************************************************************* |
75 | *******************************************************************************/ |
76 | void |
77 | kxld_set_logging_callback(KXLDLoggingCallback logging_callback) |
78 | { |
79 | s_logging_callback = logging_callback; |
80 | } |
81 | |
82 | /******************************************************************************* |
83 | *******************************************************************************/ |
84 | void |
85 | kxld_set_logging_callback_data(const char *name, void *user_data) |
86 | { |
87 | if (name) { |
88 | (void)strlcpy(s_callback_name, name, sizeof(s_callback_name)); |
89 | /* disallow format strings in the kxld logging callback name */ |
90 | for (size_t i = 0; i < sizeof(s_callback_name); i++) { |
91 | if (s_callback_name[i] == '%') { |
92 | s_callback_name[i] = '.'; |
93 | } |
94 | } |
95 | } else { |
96 | (void)strlcpy(s_callback_name, "internal" , sizeof(s_callback_name)); |
97 | } |
98 | |
99 | s_callback_data = user_data; |
100 | } |
101 | |
102 | /******************************************************************************* |
103 | *******************************************************************************/ |
104 | void |
105 | kxld_log(KXLDLogSubsystem subsystem, KXLDLogLevel level, |
106 | const char *in_format, ...) |
107 | { |
108 | char stack_buffer[256]; |
109 | char *alloc_buffer = NULL; |
110 | char *format = stack_buffer; |
111 | u_int length = 0; |
112 | va_list ap; |
113 | |
114 | if (s_logging_callback) { |
115 | |
116 | length = snprintf(stack_buffer, sizeof(stack_buffer), "kxld[%s]: %s" , |
117 | s_callback_name, in_format); |
118 | |
119 | if (length >= sizeof(stack_buffer)) { |
120 | length += 1; |
121 | alloc_buffer = kxld_alloc(length); |
122 | if (!alloc_buffer) return; |
123 | |
124 | snprintf(alloc_buffer, length, "kxld[%s]: %s" , |
125 | s_callback_name, in_format); |
126 | format = alloc_buffer; |
127 | } |
128 | |
129 | va_start(ap, in_format); |
130 | s_logging_callback(subsystem, level, format, ap, s_callback_data); |
131 | va_end(ap); |
132 | |
133 | if (alloc_buffer) { |
134 | kxld_free(alloc_buffer, length); |
135 | } |
136 | } |
137 | } |
138 | |
139 | /* We'll use kalloc for any page-based allocations under this threshold, and |
140 | * kmem_alloc otherwise. |
141 | */ |
142 | #define KALLOC_MAX 16 * 1024 |
143 | |
144 | /******************************************************************************* |
145 | *******************************************************************************/ |
146 | void * |
147 | kxld_alloc(size_t size) |
148 | { |
149 | void * ptr = NULL; |
150 | |
151 | #if KERNEL |
152 | ptr = kalloc(size); |
153 | #else |
154 | ptr = malloc(size); |
155 | #endif |
156 | |
157 | #if DEBUG |
158 | if (ptr) { |
159 | ++num_allocations; |
160 | bytes_allocated += size; |
161 | } |
162 | #endif |
163 | |
164 | return ptr; |
165 | } |
166 | |
167 | /******************************************************************************* |
168 | *******************************************************************************/ |
169 | void * |
170 | kxld_page_alloc_untracked(size_t size) |
171 | { |
172 | void * ptr = NULL; |
173 | #if KERNEL |
174 | kern_return_t rval = 0; |
175 | vm_offset_t addr = 0; |
176 | #endif /* KERNEL */ |
177 | |
178 | size = round_page(size); |
179 | |
180 | #if KERNEL |
181 | if (size < KALLOC_MAX) { |
182 | ptr = kalloc(size); |
183 | } else { |
184 | rval = kmem_alloc(kernel_map, &addr, size, VM_KERN_MEMORY_OSKEXT); |
185 | if (!rval) ptr = (void *) addr; |
186 | } |
187 | #else /* !KERNEL */ |
188 | ptr = malloc(size); |
189 | #endif /* KERNEL */ |
190 | |
191 | return ptr; |
192 | } |
193 | |
194 | /******************************************************************************* |
195 | *******************************************************************************/ |
196 | void * |
197 | kxld_page_alloc(size_t size) |
198 | { |
199 | void * ptr = NULL; |
200 | |
201 | ptr = kxld_page_alloc_untracked(size); |
202 | #if DEBUG |
203 | if (ptr) { |
204 | ++num_allocations; |
205 | bytes_allocated += round_page(size); |
206 | } |
207 | #endif /* DEBUG */ |
208 | |
209 | return ptr; |
210 | } |
211 | |
212 | /******************************************************************************* |
213 | *******************************************************************************/ |
214 | void * |
215 | kxld_alloc_pageable(size_t size) |
216 | { |
217 | size = round_page(size); |
218 | |
219 | #if KERNEL |
220 | kern_return_t rval = 0; |
221 | vm_offset_t ptr = 0; |
222 | |
223 | rval = kmem_alloc_pageable(kernel_map, &ptr, size, VM_KERN_MEMORY_OSKEXT); |
224 | if (rval) ptr = 0; |
225 | |
226 | return (void *) ptr; |
227 | #else |
228 | return kxld_page_alloc_untracked(size); |
229 | #endif |
230 | } |
231 | |
232 | /******************************************************************************* |
233 | *******************************************************************************/ |
234 | void |
235 | kxld_free(void *ptr, size_t size __unused) |
236 | { |
237 | #if DEBUG |
238 | ++num_frees; |
239 | bytes_freed += size; |
240 | #endif |
241 | |
242 | #if KERNEL |
243 | kfree(ptr, size); |
244 | #else |
245 | free(ptr); |
246 | #endif |
247 | } |
248 | |
249 | /******************************************************************************* |
250 | *******************************************************************************/ |
251 | void |
252 | kxld_page_free_untracked(void *ptr, size_t size __unused) |
253 | { |
254 | #if KERNEL |
255 | size = round_page(size); |
256 | |
257 | if (size < KALLOC_MAX) { |
258 | kfree(ptr, size); |
259 | } else { |
260 | kmem_free(kernel_map, (vm_offset_t) ptr, size); |
261 | } |
262 | #else /* !KERNEL */ |
263 | free(ptr); |
264 | #endif /* KERNEL */ |
265 | } |
266 | |
267 | |
268 | /******************************************************************************* |
269 | *******************************************************************************/ |
270 | void |
271 | kxld_page_free(void *ptr, size_t size) |
272 | { |
273 | #if DEBUG |
274 | ++num_frees; |
275 | bytes_freed += round_page(size); |
276 | #endif /* DEBUG */ |
277 | kxld_page_free_untracked(ptr, size); |
278 | } |
279 | |
280 | /******************************************************************************* |
281 | *******************************************************************************/ |
282 | kern_return_t |
283 | validate_and_swap_macho_32(u_char *file, u_long size |
284 | #if !KERNEL |
285 | , enum NXByteOrder host_order |
286 | #endif /* !KERNEL */ |
287 | ) |
288 | { |
289 | kern_return_t rval = KERN_FAILURE; |
290 | struct mach_header *mach_hdr = (struct mach_header *) ((void *) file); |
291 | struct load_command *load_hdr = NULL; |
292 | struct segment_command *seg_hdr = NULL; |
293 | struct section *sects = NULL; |
294 | struct relocation_info *relocs = NULL; |
295 | struct symtab_command *symtab_hdr = NULL; |
296 | struct nlist *symtab = NULL; |
297 | u_long offset = 0; |
298 | u_int cmd = 0; |
299 | u_int cmdsize = 0; |
300 | u_int i = 0; |
301 | u_int j = 0; |
302 | #if !KERNEL |
303 | boolean_t swap = FALSE; |
304 | #endif /* !KERNEL */ |
305 | |
306 | check(file); |
307 | check(size); |
308 | |
309 | /* Verify that the file is big enough for the mach header */ |
310 | require_action(size >= sizeof(*mach_hdr), finish, |
311 | rval=KERN_FAILURE; |
312 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); |
313 | offset = sizeof(*mach_hdr); |
314 | |
315 | #if !KERNEL |
316 | /* Swap the mach header if necessary */ |
317 | if (mach_hdr->magic == MH_CIGAM) { |
318 | swap = TRUE; |
319 | (void) swap_mach_header(mach_hdr, host_order); |
320 | } |
321 | #endif /* !KERNEL */ |
322 | |
323 | /* Validate the mach_header's magic number */ |
324 | require_action(mach_hdr->magic == MH_MAGIC, finish, |
325 | rval=KERN_FAILURE; |
326 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO |
327 | "Invalid magic number: 0x%x." , mach_hdr->magic)); |
328 | |
329 | /* If in the running kernel, and asked to validate the kernel |
330 | * (which is the only file of type MH_EXECUTE we should ever see), |
331 | * then just assume it's ok or we wouldn't be running to begin with. |
332 | */ |
333 | #if KERNEL |
334 | if (mach_hdr->filetype == MH_EXECUTE) { |
335 | rval = KERN_SUCCESS; |
336 | goto finish; |
337 | } |
338 | #endif /* KERNEL */ |
339 | |
340 | /* Validate and potentially swap the load commands */ |
341 | for(i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) { |
342 | |
343 | /* Get the load command and size */ |
344 | load_hdr = (struct load_command *) ((void *) (file + offset)); |
345 | cmd = load_hdr->cmd; |
346 | cmdsize = load_hdr->cmdsize; |
347 | |
348 | #if !KERNEL |
349 | if (swap) { |
350 | cmd = OSSwapInt32(load_hdr->cmd); |
351 | cmdsize = OSSwapInt32(load_hdr->cmdsize); |
352 | } |
353 | #endif /* !KERNEL */ |
354 | |
355 | /* Verify that the file is big enough to contain the load command */ |
356 | require_action(size >= offset + cmdsize, finish, |
357 | rval=KERN_FAILURE; |
358 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); |
359 | |
360 | switch(cmd) { |
361 | case LC_SEGMENT: |
362 | /* Get and swap the segment header */ |
363 | seg_hdr = (struct segment_command *) load_hdr; |
364 | #if !KERNEL |
365 | if (swap) swap_segment_command(seg_hdr, host_order); |
366 | #endif /* !KERNEL */ |
367 | |
368 | /* Get and swap the section headers */ |
369 | sects = (struct section *) &seg_hdr[1]; |
370 | #if !KERNEL |
371 | if (swap) swap_section(sects, seg_hdr->nsects, host_order); |
372 | #endif /* !KERNEL */ |
373 | |
374 | /* Ignore segments with no vm size */ |
375 | if (!seg_hdr->vmsize) continue; |
376 | |
377 | /* Verify that the file is big enough for the segment data. */ |
378 | require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish, |
379 | rval=KERN_FAILURE; |
380 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); |
381 | |
382 | for (j = 0; j < seg_hdr->nsects; ++j) { |
383 | |
384 | /* Verify that, if the section is not to be zero filled on |
385 | * demand, that file is big enough for the section's data. |
386 | */ |
387 | require_action((sects[j].flags & S_ZEROFILL) || |
388 | (size >= sects[j].offset + sects[j].size), finish, |
389 | rval=KERN_FAILURE; |
390 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); |
391 | |
392 | /* Verify that the file is big enough for the section's |
393 | * relocation entries. |
394 | */ |
395 | require_action(size >= |
396 | sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish, |
397 | rval=KERN_FAILURE; |
398 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); |
399 | |
400 | /* Swap the relocation entries */ |
401 | relocs = (struct relocation_info *) ((void *) (file + sects[j].reloff)); |
402 | #if !KERNEL |
403 | if (swap) { |
404 | swap_relocation_info(relocs, sects[j].nreloc, |
405 | host_order); |
406 | } |
407 | #endif /* !KERNEL */ |
408 | } |
409 | |
410 | break; |
411 | case LC_SYMTAB: |
412 | /* Get and swap the symtab header */ |
413 | symtab_hdr = (struct symtab_command *) load_hdr; |
414 | #if !KERNEL |
415 | if (swap) swap_symtab_command(symtab_hdr, host_order); |
416 | #endif /* !KERNEL */ |
417 | |
418 | /* Verify that the file is big enough for the symbol table */ |
419 | require_action(size >= |
420 | symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish, |
421 | rval=KERN_FAILURE; |
422 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); |
423 | |
424 | /* Verify that the file is big enough for the string table */ |
425 | require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish, |
426 | rval=KERN_FAILURE; |
427 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); |
428 | |
429 | #if !KERNEL |
430 | /* Swap the symbol table entries */ |
431 | symtab = (struct nlist *) ((void *) (file + symtab_hdr->symoff)); |
432 | if (swap) swap_nlist(symtab, symtab_hdr->nsyms, host_order); |
433 | #endif /* !KERNEL */ |
434 | |
435 | break; |
436 | default: |
437 | #if !KERNEL |
438 | /* Swap the load command */ |
439 | if (swap) swap_load_command(load_hdr, host_order); |
440 | #endif /* !KERNEL */ |
441 | break; |
442 | } |
443 | } |
444 | |
445 | rval = KERN_SUCCESS; |
446 | |
447 | finish: |
448 | return rval; |
449 | } |
450 | |
451 | /******************************************************************************* |
452 | *******************************************************************************/ |
453 | kern_return_t |
454 | validate_and_swap_macho_64(u_char *file, u_long size |
455 | #if !KERNEL |
456 | , enum NXByteOrder host_order |
457 | #endif /* !KERNEL */ |
458 | ) |
459 | { |
460 | kern_return_t rval = KERN_FAILURE; |
461 | struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) file); |
462 | struct load_command *load_hdr = NULL; |
463 | struct segment_command_64 *seg_hdr = NULL; |
464 | struct section_64 *sects = NULL; |
465 | struct relocation_info *relocs = NULL; |
466 | struct symtab_command *symtab_hdr = NULL; |
467 | struct nlist_64 *symtab = NULL; |
468 | u_long offset = 0; |
469 | u_int cmd = 0; |
470 | u_int cmdsize = 0; |
471 | u_int i = 0; |
472 | u_int j = 0; |
473 | #if !KERNEL |
474 | boolean_t swap = FALSE; |
475 | #endif /* !KERNEL */ |
476 | |
477 | check(file); |
478 | check(size); |
479 | |
480 | /* Verify that the file is big enough for the mach header */ |
481 | require_action(size >= sizeof(*mach_hdr), finish, |
482 | rval=KERN_FAILURE; |
483 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); |
484 | offset = sizeof(*mach_hdr); |
485 | |
486 | #if !KERNEL |
487 | /* Swap the mach header if necessary */ |
488 | if (mach_hdr->magic == MH_CIGAM_64) { |
489 | swap = TRUE; |
490 | (void) swap_mach_header_64(mach_hdr, host_order); |
491 | } |
492 | #endif /* !KERNEL */ |
493 | |
494 | /* Validate the mach_header's magic number */ |
495 | require_action(mach_hdr->magic == MH_MAGIC_64, finish, |
496 | rval=KERN_FAILURE; |
497 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO |
498 | "Invalid magic number: 0x%x." , mach_hdr->magic)); |
499 | |
500 | /* If in the running kernel, and asked to validate the kernel |
501 | * (which is the only file of type MH_EXECUTE we should ever see), |
502 | * then just assume it's ok or we wouldn't be running to begin with. |
503 | */ |
504 | #if KERNEL |
505 | if (mach_hdr->filetype == MH_EXECUTE) { |
506 | rval = KERN_SUCCESS; |
507 | goto finish; |
508 | } |
509 | #endif /* KERNEL */ |
510 | |
511 | /* Validate and potentially swap the load commands */ |
512 | for(i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) { |
513 | /* Get the load command and size */ |
514 | load_hdr = (struct load_command *) ((void *) (file + offset)); |
515 | cmd = load_hdr->cmd; |
516 | cmdsize = load_hdr->cmdsize; |
517 | |
518 | #if !KERNEL |
519 | if (swap) { |
520 | cmd = OSSwapInt32(load_hdr->cmd); |
521 | cmdsize = OSSwapInt32(load_hdr->cmdsize); |
522 | } |
523 | #endif /* !KERNEL */ |
524 | |
525 | /* Verify that the file is big enough to contain the load command */ |
526 | require_action(size >= offset + cmdsize, finish, |
527 | rval=KERN_FAILURE; |
528 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); |
529 | switch(cmd) { |
530 | case LC_SEGMENT_64: |
531 | /* Get and swap the segment header */ |
532 | seg_hdr = (struct segment_command_64 *) ((void *) load_hdr); |
533 | #if !KERNEL |
534 | if (swap) swap_segment_command_64(seg_hdr, host_order); |
535 | #endif /* !KERNEL */ |
536 | |
537 | /* Get and swap the section headers */ |
538 | sects = (struct section_64 *) &seg_hdr[1]; |
539 | #if !KERNEL |
540 | if (swap) swap_section_64(sects, seg_hdr->nsects, host_order); |
541 | #endif /* !KERNEL */ |
542 | |
543 | /* If the segment has no vm footprint, skip it */ |
544 | if (!seg_hdr->vmsize) continue; |
545 | |
546 | /* Verify that the file is big enough for the segment data. */ |
547 | require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish, |
548 | rval=KERN_FAILURE; |
549 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); |
550 | |
551 | for (j = 0; j < seg_hdr->nsects; ++j) { |
552 | |
553 | /* Verify that, if the section is not to be zero filled on |
554 | * demand, that file is big enough for the section's data. |
555 | */ |
556 | require_action((sects[j].flags & S_ZEROFILL) || |
557 | (size >= sects[j].offset + sects[j].size), finish, |
558 | rval=KERN_FAILURE; |
559 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); |
560 | |
561 | /* Verify that the file is big enough for the section's |
562 | * relocation entries. |
563 | */ |
564 | require_action(size >= |
565 | sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish, |
566 | rval=KERN_FAILURE; |
567 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); |
568 | |
569 | /* Swap the relocation entries */ |
570 | relocs = (struct relocation_info *) ((void *) (file + sects[j].reloff)); |
571 | #if !KERNEL |
572 | if (swap) { |
573 | swap_relocation_info(relocs, sects[j].nreloc, |
574 | host_order); |
575 | } |
576 | #endif /* !KERNEL */ |
577 | } |
578 | |
579 | break; |
580 | case LC_SYMTAB: |
581 | /* Get and swap the symtab header */ |
582 | symtab_hdr = (struct symtab_command *) load_hdr; |
583 | #if !KERNEL |
584 | if (swap) swap_symtab_command(symtab_hdr, host_order); |
585 | #endif /* !KERNEL */ |
586 | |
587 | /* Verify that the file is big enough for the symbol table */ |
588 | require_action(size >= |
589 | symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish, |
590 | rval=KERN_FAILURE; |
591 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); |
592 | |
593 | /* Verify that the file is big enough for the string table */ |
594 | require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish, |
595 | rval=KERN_FAILURE; |
596 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); |
597 | |
598 | #if !KERNEL |
599 | /* Swap the symbol table entries */ |
600 | symtab = (struct nlist_64 *) ((void *) (file + symtab_hdr->symoff)); |
601 | if (swap) swap_nlist_64(symtab, symtab_hdr->nsyms, host_order); |
602 | #endif /* !KERNEL */ |
603 | |
604 | break; |
605 | default: |
606 | #if !KERNEL |
607 | /* Swap the load command */ |
608 | if (swap) swap_load_command(load_hdr, host_order); |
609 | #endif /* !KERNEL */ |
610 | break; |
611 | } |
612 | } |
613 | |
614 | rval = KERN_SUCCESS; |
615 | |
616 | finish: |
617 | return rval; |
618 | } |
619 | |
620 | #if !KERNEL |
621 | /******************************************************************************* |
622 | *******************************************************************************/ |
623 | void unswap_macho(u_char *file, enum NXByteOrder host_order, |
624 | enum NXByteOrder target_order) |
625 | { |
626 | struct mach_header *hdr = (struct mach_header *) ((void *) file); |
627 | |
628 | if (!hdr) return; |
629 | |
630 | if (hdr->magic == MH_MAGIC) { |
631 | unswap_macho_32(file, host_order, target_order); |
632 | } else if (hdr->magic == MH_MAGIC_64) { |
633 | unswap_macho_64(file, host_order, target_order); |
634 | } |
635 | } |
636 | |
637 | /******************************************************************************* |
638 | *******************************************************************************/ |
639 | static void |
640 | unswap_macho_32(u_char *file, enum NXByteOrder host_order, |
641 | enum NXByteOrder target_order) |
642 | { |
643 | struct mach_header *mach_hdr = (struct mach_header *) ((void *) file); |
644 | struct load_command *load_hdr = NULL; |
645 | struct segment_command *seg_hdr = NULL; |
646 | struct section *sects = NULL; |
647 | struct symtab_command *symtab_hdr = NULL; |
648 | struct nlist *symtab = NULL; |
649 | u_long offset = 0; |
650 | u_int cmd = 0; |
651 | u_int size = 0; |
652 | u_int i = 0; |
653 | |
654 | check(file); |
655 | |
656 | if (target_order == host_order) return; |
657 | |
658 | offset = sizeof(*mach_hdr); |
659 | for(i = 0; i < mach_hdr->ncmds; ++i, offset += size) { |
660 | load_hdr = (struct load_command *) ((void *) (file + offset)); |
661 | cmd = load_hdr->cmd; |
662 | size = load_hdr->cmdsize; |
663 | |
664 | switch(cmd) { |
665 | case LC_SEGMENT: |
666 | seg_hdr = (struct segment_command *) load_hdr; |
667 | sects = (struct section *) &seg_hdr[1]; |
668 | |
669 | /* We don't need to unswap relocations because this function is |
670 | * called when linking is completed (so there are no relocations). |
671 | */ |
672 | |
673 | swap_section(sects, seg_hdr->nsects, target_order); |
674 | swap_segment_command(seg_hdr, target_order); |
675 | break; |
676 | case LC_SYMTAB: |
677 | symtab_hdr = (struct symtab_command *) load_hdr; |
678 | symtab = (struct nlist*) ((void *) (file + symtab_hdr->symoff)); |
679 | |
680 | swap_nlist(symtab, symtab_hdr->nsyms, target_order); |
681 | swap_symtab_command(symtab_hdr, target_order); |
682 | |
683 | break; |
684 | default: |
685 | swap_load_command(load_hdr, target_order); |
686 | break; |
687 | } |
688 | } |
689 | |
690 | (void) swap_mach_header(mach_hdr, target_order); |
691 | } |
692 | |
693 | /******************************************************************************* |
694 | *******************************************************************************/ |
695 | static void |
696 | unswap_macho_64(u_char *file, enum NXByteOrder host_order, |
697 | enum NXByteOrder target_order) |
698 | { |
699 | struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) file); |
700 | struct load_command *load_hdr = NULL; |
701 | struct segment_command_64 *seg_hdr = NULL; |
702 | struct section_64 *sects = NULL; |
703 | struct symtab_command *symtab_hdr = NULL; |
704 | struct nlist_64 *symtab = NULL; |
705 | u_long offset = 0; |
706 | u_int cmd = 0; |
707 | u_int size = 0; |
708 | u_int i = 0; |
709 | |
710 | check(file); |
711 | |
712 | if (target_order == host_order) return; |
713 | |
714 | offset = sizeof(*mach_hdr); |
715 | for(i = 0; i < mach_hdr->ncmds; ++i, offset += size) { |
716 | load_hdr = (struct load_command *) ((void *) (file + offset)); |
717 | cmd = load_hdr->cmd; |
718 | size = load_hdr->cmdsize; |
719 | |
720 | switch(cmd) { |
721 | case LC_SEGMENT_64: |
722 | seg_hdr = (struct segment_command_64 *) ((void *) load_hdr); |
723 | sects = (struct section_64 *) &seg_hdr[1]; |
724 | |
725 | /* We don't need to unswap relocations because this function is |
726 | * called when linking is completed (so there are no relocations). |
727 | */ |
728 | |
729 | swap_section_64(sects, seg_hdr->nsects, target_order); |
730 | swap_segment_command_64(seg_hdr, target_order); |
731 | break; |
732 | case LC_SYMTAB: |
733 | symtab_hdr = (struct symtab_command *) load_hdr; |
734 | symtab = (struct nlist_64 *) ((void *) (file + symtab_hdr->symoff)); |
735 | |
736 | swap_nlist_64(symtab, symtab_hdr->nsyms, target_order); |
737 | swap_symtab_command(symtab_hdr, target_order); |
738 | |
739 | break; |
740 | default: |
741 | swap_load_command(load_hdr, target_order); |
742 | break; |
743 | } |
744 | } |
745 | |
746 | (void) swap_mach_header_64(mach_hdr, target_order); |
747 | } |
748 | #endif /* !KERNEL */ |
749 | |
750 | /******************************************************************************* |
751 | *******************************************************************************/ |
752 | kxld_addr_t |
753 | kxld_align_address(kxld_addr_t address, u_int align) |
754 | { |
755 | kxld_addr_t alignment = (1 << align); |
756 | kxld_addr_t low_bits = 0; |
757 | |
758 | if (!align) return address; |
759 | |
760 | low_bits = (address) & (alignment - 1); |
761 | if (low_bits) { |
762 | address += (alignment - low_bits); |
763 | } |
764 | |
765 | return address; |
766 | } |
767 | |
768 | /******************************************************************************* |
769 | *******************************************************************************/ |
770 | boolean_t |
771 | kxld_is_32_bit(cpu_type_t cputype) |
772 | { |
773 | return !(cputype & CPU_ARCH_ABI64); |
774 | } |
775 | |
776 | /******************************************************************************* |
777 | * Borrowed (and slightly modified) the libc implementation for the kernel |
778 | * until the kernel has a supported strstr(). |
779 | * Find the first occurrence of find in s. |
780 | *******************************************************************************/ |
781 | const char * |
782 | kxld_strstr(const char *s, const char *find) |
783 | { |
784 | #if KERNEL |
785 | char c, sc; |
786 | size_t len; |
787 | if (!s || !find) |
788 | return s; |
789 | if ((c = *find++) != 0) { |
790 | len = strlen(find); |
791 | do { |
792 | do { |
793 | if ((sc = *s++) == 0) |
794 | return (NULL); |
795 | } while (sc != c); |
796 | } while (strncmp(s, find, len) != 0); |
797 | s--; |
798 | } |
799 | return s; |
800 | #else |
801 | return strstr(s, find); |
802 | #endif /* KERNEL */ |
803 | } |
804 | |
805 | /******************************************************************************* |
806 | *******************************************************************************/ |
807 | void |
808 | kxld_print_memory_report(void) |
809 | { |
810 | #if DEBUG |
811 | kxld_log(kKxldLogLinking, kKxldLogExplicit, "kxld memory usage report:\n" |
812 | "\tNumber of allocations: %8lu\n" |
813 | "\tNumber of frees: %8lu\n" |
814 | "\tAverage allocation size: %8lu\n" |
815 | "\tTotal bytes allocated: %8lu\n" |
816 | "\tTotal bytes freed: %8lu\n" |
817 | "\tTotal bytes leaked: %8lu" , |
818 | num_allocations, num_frees, bytes_allocated / num_allocations, |
819 | bytes_allocated, bytes_freed, bytes_allocated - bytes_freed); |
820 | #endif |
821 | } |
822 | |
823 | /********************************************************************* |
824 | *********************************************************************/ |
825 | #if !KERNEL |
826 | boolean_t kxld_set_cross_link_page_size(kxld_size_t target_page_size) |
827 | { |
828 | // verify radix 2 |
829 | if ((target_page_size != 0) && |
830 | ((target_page_size & (target_page_size - 1)) == 0)) { |
831 | |
832 | s_cross_link_enabled = TRUE; |
833 | s_cross_link_page_size = target_page_size; |
834 | |
835 | return TRUE; |
836 | } else { |
837 | return FALSE; |
838 | } |
839 | } |
840 | #endif /* !KERNEL */ |
841 | |
842 | /********************************************************************* |
843 | *********************************************************************/ |
844 | kxld_size_t kxld_get_effective_page_size(void) |
845 | { |
846 | #if KERNEL |
847 | return PAGE_SIZE; |
848 | #else |
849 | if (s_cross_link_enabled) { |
850 | return s_cross_link_page_size; |
851 | } else { |
852 | return PAGE_SIZE; |
853 | } |
854 | #endif /* KERNEL */ |
855 | } |
856 | |
857 | /********************************************************************* |
858 | *********************************************************************/ |
859 | kxld_addr_t kxld_round_page_cross_safe(kxld_addr_t offset) |
860 | { |
861 | #if KERNEL |
862 | return round_page(offset); |
863 | #else |
864 | // assume s_cross_link_page_size is power of 2 |
865 | if (s_cross_link_enabled) { |
866 | return (offset + (s_cross_link_page_size - 1)) & |
867 | (~(s_cross_link_page_size - 1)); |
868 | } else { |
869 | return round_page(offset); |
870 | } |
871 | #endif /* KERNEL */ |
872 | } |
873 | |
874 | #if SPLIT_KEXTS_DEBUG |
875 | |
876 | void kxld_show_split_info(splitKextLinkInfo *info) |
877 | { |
878 | kxld_log(kKxldLogLinking, kKxldLogErr, |
879 | "splitKextLinkInfo: \n" |
880 | "kextExecutable %p to %p kextSize %lu \n" |
881 | "linkedKext %p to %p linkedKextSize %lu \n" |
882 | "vmaddr_TEXT %p vmaddr_TEXT_EXEC %p " |
883 | "vmaddr_DATA %p vmaddr_DATA_CONST %p " |
884 | "vmaddr_LLVM_COV %p vmaddr_LINKEDIT %p" , |
885 | (void *) info->kextExecutable, |
886 | (void *) (info->kextExecutable + info->kextSize), |
887 | info->kextSize, |
888 | (void*) info->linkedKext, |
889 | (void*) (info->linkedKext + info->linkedKextSize), |
890 | info->linkedKextSize, |
891 | (void *) info->vmaddr_TEXT, |
892 | (void *) info->vmaddr_TEXT_EXEC, |
893 | (void *) info->vmaddr_DATA, |
894 | (void *) info->vmaddr_DATA_CONST, |
895 | (void *) info->vmaddr_LLVM_COV, |
896 | (void *) info->vmaddr_LINKEDIT); |
897 | } |
898 | |
899 | boolean_t isTargetKextName(const char * the_name) |
900 | { |
901 | if (the_name && 0 == strcmp(the_name, KXLD_TARGET_KEXT)) { |
902 | return(TRUE); |
903 | } |
904 | return(FALSE); |
905 | } |
906 | #endif |
907 | |
908 | |