| 1 | /* |
| 2 | * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | /* Copyright (c) 1991 NeXT Computer, Inc. All rights reserved. |
| 29 | * |
| 30 | * File: bsd/kern/kern_core.c |
| 31 | * |
| 32 | * This file contains machine independent code for performing core dumps. |
| 33 | * |
| 34 | */ |
| 35 | #if CONFIG_COREDUMP |
| 36 | |
| 37 | #include <mach/vm_param.h> |
| 38 | #include <mach/thread_status.h> |
| 39 | #include <sys/content_protection.h> |
| 40 | #include <sys/param.h> |
| 41 | #include <sys/systm.h> |
| 42 | #include <sys/signalvar.h> |
| 43 | #include <sys/resourcevar.h> |
| 44 | #include <sys/namei.h> |
| 45 | #include <sys/vnode_internal.h> |
| 46 | #include <sys/proc_internal.h> |
| 47 | #include <sys/kauth.h> |
| 48 | #include <sys/timeb.h> |
| 49 | #include <sys/times.h> |
| 50 | #include <sys/acct.h> |
| 51 | #include <sys/file_internal.h> |
| 52 | #include <sys/uio.h> |
| 53 | #include <sys/kernel.h> |
| 54 | #include <sys/stat.h> |
| 55 | |
| 56 | #include <mach-o/loader.h> |
| 57 | #include <mach/vm_region.h> |
| 58 | #include <mach/vm_statistics.h> |
| 59 | |
| 60 | #include <vm/vm_kern.h> |
| 61 | #include <vm/vm_protos.h> /* last */ |
| 62 | #include <vm/vm_map.h> /* current_map() */ |
| 63 | #include <mach/mach_vm.h> /* mach_vm_region_recurse() */ |
| 64 | #include <mach/task.h> /* task_suspend() */ |
| 65 | #include <kern/task.h> /* get_task_numacts() */ |
| 66 | |
| 67 | #include <security/audit/audit.h> |
| 68 | |
| 69 | #if CONFIG_CSR |
| 70 | #include <sys/codesign.h> |
| 71 | #include <sys/csr.h> |
| 72 | #endif |
| 73 | |
| 74 | typedef struct { |
| 75 | int flavor; /* the number for this flavor */ |
| 76 | mach_msg_type_number_t count; /* count of ints in this flavor */ |
| 77 | } mythread_state_flavor_t; |
| 78 | |
| 79 | #if defined (__i386__) || defined (__x86_64__) |
| 80 | mythread_state_flavor_t thread_flavor_array [] = { |
| 81 | {x86_THREAD_STATE, x86_THREAD_STATE_COUNT}, |
| 82 | {x86_FLOAT_STATE, x86_FLOAT_STATE_COUNT}, |
| 83 | {x86_EXCEPTION_STATE, x86_EXCEPTION_STATE_COUNT}, |
| 84 | }; |
| 85 | int mynum_flavors=3; |
| 86 | #elif defined (__arm__) |
| 87 | mythread_state_flavor_t thread_flavor_array[]={ |
| 88 | {ARM_THREAD_STATE , ARM_THREAD_STATE_COUNT}, |
| 89 | {ARM_VFP_STATE, ARM_VFP_STATE_COUNT}, |
| 90 | {ARM_EXCEPTION_STATE, ARM_EXCEPTION_STATE_COUNT} |
| 91 | }; |
| 92 | int mynum_flavors=3; |
| 93 | |
| 94 | #elif defined (__arm64__) |
| 95 | mythread_state_flavor_t thread_flavor_array[]={ |
| 96 | {ARM_THREAD_STATE64 , ARM_THREAD_STATE64_COUNT}, |
| 97 | /* ARM64_TODO: VFP */ |
| 98 | {ARM_EXCEPTION_STATE64, ARM_EXCEPTION_STATE64_COUNT} |
| 99 | }; |
| 100 | int mynum_flavors=2; |
| 101 | #else |
| 102 | #error architecture not supported |
| 103 | #endif |
| 104 | |
| 105 | |
| 106 | typedef struct { |
| 107 | vm_offset_t ; |
| 108 | int hoffset; |
| 109 | mythread_state_flavor_t *flavors; |
| 110 | int tstate_size; |
| 111 | int flavor_count; |
| 112 | } tir_t; |
| 113 | |
| 114 | extern int freespace_mb(vnode_t vp); |
| 115 | |
| 116 | /* XXX not in a Mach header anywhere */ |
| 117 | kern_return_t thread_getstatus(thread_t act, int flavor, |
| 118 | thread_state_t tstate, mach_msg_type_number_t *count); |
| 119 | void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *); |
| 120 | |
| 121 | #ifdef SECURE_KERNEL |
| 122 | __XNU_PRIVATE_EXTERN int do_coredump = 0; /* default: don't dump cores */ |
| 123 | #else |
| 124 | __XNU_PRIVATE_EXTERN int do_coredump = 1; /* default: dump cores */ |
| 125 | #endif |
| 126 | __XNU_PRIVATE_EXTERN int sugid_coredump = 0; /* default: but not SGUID binaries */ |
| 127 | |
| 128 | |
| 129 | /* cpu_type returns only the most generic indication of the current CPU. */ |
| 130 | /* in a core we want to know the kind of process. */ |
| 131 | |
| 132 | static cpu_type_t |
| 133 | process_cpu_type(proc_t core_proc) |
| 134 | { |
| 135 | cpu_type_t what_we_think; |
| 136 | #if defined (__i386__) || defined (__x86_64__) |
| 137 | if (IS_64BIT_PROCESS(core_proc)) { |
| 138 | what_we_think = CPU_TYPE_X86_64; |
| 139 | } else { |
| 140 | what_we_think = CPU_TYPE_I386; |
| 141 | } |
| 142 | #elif defined (__arm__) || defined(__arm64__) |
| 143 | if (IS_64BIT_PROCESS(core_proc)) { |
| 144 | what_we_think = CPU_TYPE_ARM64; |
| 145 | } else { |
| 146 | what_we_think = CPU_TYPE_ARM; |
| 147 | } |
| 148 | #endif |
| 149 | |
| 150 | return what_we_think; |
| 151 | } |
| 152 | |
| 153 | static cpu_type_t |
| 154 | process_cpu_subtype(proc_t core_proc) |
| 155 | { |
| 156 | cpu_type_t what_we_think; |
| 157 | #if defined (__i386__) || defined (__x86_64__) |
| 158 | if (IS_64BIT_PROCESS(core_proc)) { |
| 159 | what_we_think = CPU_SUBTYPE_X86_64_ALL; |
| 160 | } else { |
| 161 | what_we_think = CPU_SUBTYPE_I386_ALL; |
| 162 | } |
| 163 | #elif defined (__arm__) || defined(__arm64__) |
| 164 | if (IS_64BIT_PROCESS(core_proc)) { |
| 165 | what_we_think = CPU_SUBTYPE_ARM64_ALL; |
| 166 | } else { |
| 167 | what_we_think = CPU_SUBTYPE_ARM_ALL; |
| 168 | } |
| 169 | #endif |
| 170 | return what_we_think; |
| 171 | } |
| 172 | |
| 173 | static void |
| 174 | collectth_state(thread_t th_act, void *tirp) |
| 175 | { |
| 176 | vm_offset_t ; |
| 177 | int hoffset, i ; |
| 178 | mythread_state_flavor_t *flavors; |
| 179 | struct thread_command *tc; |
| 180 | tir_t *t = (tir_t *)tirp; |
| 181 | |
| 182 | /* |
| 183 | * Fill in thread command structure. |
| 184 | */ |
| 185 | header = t->header; |
| 186 | hoffset = t->hoffset; |
| 187 | flavors = t->flavors; |
| 188 | |
| 189 | tc = (struct thread_command *) (header + hoffset); |
| 190 | tc->cmd = LC_THREAD; |
| 191 | tc->cmdsize = sizeof(struct thread_command) |
| 192 | + t->tstate_size; |
| 193 | hoffset += sizeof(struct thread_command); |
| 194 | /* |
| 195 | * Follow with a struct thread_state_flavor and |
| 196 | * the appropriate thread state struct for each |
| 197 | * thread state flavor. |
| 198 | */ |
| 199 | for (i = 0; i < t->flavor_count; i++) { |
| 200 | *(mythread_state_flavor_t *)(header+hoffset) = |
| 201 | flavors[i]; |
| 202 | hoffset += sizeof(mythread_state_flavor_t); |
| 203 | thread_getstatus(th_act, flavors[i].flavor, |
| 204 | (thread_state_t)(header+hoffset), |
| 205 | &flavors[i].count); |
| 206 | hoffset += flavors[i].count*sizeof(int); |
| 207 | } |
| 208 | |
| 209 | t->hoffset = hoffset; |
| 210 | } |
| 211 | |
| 212 | /* |
| 213 | * coredump |
| 214 | * |
| 215 | * Description: Create a core image on the file "core" for the process |
| 216 | * indicated |
| 217 | * |
| 218 | * Parameters: core_proc Process to dump core [*] |
| 219 | * reserve_mb If non-zero, leave filesystem with |
| 220 | * at least this much free space. |
| 221 | * coredump_flags Extra options (ignore rlimit, run fsync) |
| 222 | * |
| 223 | * Returns: 0 Success |
| 224 | * EFAULT Failed |
| 225 | * |
| 226 | * IMPORTANT: This function can only be called on the current process, due |
| 227 | * to assumptions below; see variable declaration section for |
| 228 | * details. |
| 229 | */ |
| 230 | #define MAX_TSTATE_FLAVORS 10 |
| 231 | int |
| 232 | coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags) |
| 233 | { |
| 234 | /* Begin assumptions that limit us to only the current process */ |
| 235 | vfs_context_t ctx = vfs_context_current(); |
| 236 | vm_map_t map = current_map(); |
| 237 | task_t task = current_task(); |
| 238 | /* End assumptions */ |
| 239 | kauth_cred_t cred = vfs_context_ucred(ctx); |
| 240 | int error = 0; |
| 241 | struct vnode_attr va; |
| 242 | int thread_count, segment_count; |
| 243 | int command_size, , tstate_size; |
| 244 | int hoffset; |
| 245 | off_t foffset; |
| 246 | mach_vm_offset_t vmoffset; |
| 247 | vm_offset_t ; |
| 248 | mach_vm_size_t vmsize; |
| 249 | vm_prot_t prot; |
| 250 | vm_prot_t maxprot; |
| 251 | vm_inherit_t inherit; |
| 252 | int error1 = 0; |
| 253 | char stack_name[MAXCOMLEN+6]; |
| 254 | char *alloced_name = NULL; |
| 255 | char *name; |
| 256 | mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS]; |
| 257 | vm_size_t mapsize; |
| 258 | int i; |
| 259 | uint32_t nesting_depth = 0; |
| 260 | kern_return_t kret; |
| 261 | struct vm_region_submap_info_64 vbr; |
| 262 | mach_msg_type_number_t vbrcount = 0; |
| 263 | tir_t tir1; |
| 264 | struct vnode * vp; |
| 265 | struct mach_header *mh = NULL; /* protected by is_64 */ |
| 266 | struct mach_header_64 *mh64 = NULL; /* protected by is_64 */ |
| 267 | int is_64 = 0; |
| 268 | size_t = sizeof(struct mach_header); |
| 269 | size_t segment_command_sz = sizeof(struct segment_command); |
| 270 | |
| 271 | if (current_proc() != core_proc) { |
| 272 | panic("coredump() called against proc that is not current_proc: %p" , core_proc); |
| 273 | } |
| 274 | |
| 275 | if (do_coredump == 0 || /* Not dumping at all */ |
| 276 | ( (sugid_coredump == 0) && /* Not dumping SUID/SGID binaries */ |
| 277 | ( (kauth_cred_getsvuid(cred) != kauth_cred_getruid(cred)) || |
| 278 | (kauth_cred_getsvgid(cred) != kauth_cred_getrgid(cred))))) { |
| 279 | |
| 280 | #if CONFIG_AUDIT |
| 281 | audit_proc_coredump(core_proc, NULL, EFAULT); |
| 282 | #endif |
| 283 | return (EFAULT); |
| 284 | } |
| 285 | |
| 286 | #if CONFIG_CSR |
| 287 | /* If the process is restricted, CSR isn't configured to allow |
| 288 | * restricted processes to be debugged, and CSR isn't configured in |
| 289 | * AppleInternal mode, then don't dump core. */ |
| 290 | if (cs_restricted(core_proc) && |
| 291 | csr_check(CSR_ALLOW_TASK_FOR_PID) && |
| 292 | csr_check(CSR_ALLOW_APPLE_INTERNAL)) { |
| 293 | #if CONFIG_AUDIT |
| 294 | audit_proc_coredump(core_proc, NULL, EFAULT); |
| 295 | #endif |
| 296 | return (EFAULT); |
| 297 | } |
| 298 | #endif |
| 299 | |
| 300 | if (IS_64BIT_PROCESS(core_proc)) { |
| 301 | is_64 = 1; |
| 302 | mach_header_sz = sizeof(struct mach_header_64); |
| 303 | segment_command_sz = sizeof(struct segment_command_64); |
| 304 | } |
| 305 | |
| 306 | mapsize = get_vmmap_size(map); |
| 307 | |
| 308 | if (((coredump_flags & COREDUMP_IGNORE_ULIMIT) == 0) && |
| 309 | (mapsize >= core_proc->p_rlimit[RLIMIT_CORE].rlim_cur)) |
| 310 | return (EFAULT); |
| 311 | |
| 312 | (void) task_suspend_internal(task); |
| 313 | |
| 314 | MALLOC(alloced_name, char *, MAXPATHLEN, M_TEMP, M_NOWAIT | M_ZERO); |
| 315 | |
| 316 | /* create name according to sysctl'able format string */ |
| 317 | /* if name creation fails, fall back to historical behaviour... */ |
| 318 | if (alloced_name == NULL || |
| 319 | proc_core_name(core_proc->p_comm, kauth_cred_getuid(cred), |
| 320 | core_proc->p_pid, alloced_name, MAXPATHLEN)) { |
| 321 | snprintf(stack_name, sizeof(stack_name), |
| 322 | "/cores/core.%d" , core_proc->p_pid); |
| 323 | name = stack_name; |
| 324 | } else |
| 325 | name = alloced_name; |
| 326 | |
| 327 | if ((error = vnode_open(name, (O_CREAT | FWRITE | O_NOFOLLOW), S_IRUSR, VNODE_LOOKUP_NOFOLLOW, &vp, ctx))) |
| 328 | goto out2; |
| 329 | |
| 330 | VATTR_INIT(&va); |
| 331 | VATTR_WANTED(&va, va_nlink); |
| 332 | /* Don't dump to non-regular files or files with links. */ |
| 333 | if (vp->v_type != VREG || |
| 334 | vnode_getattr(vp, &va, ctx) || va.va_nlink != 1) { |
| 335 | error = EFAULT; |
| 336 | goto out; |
| 337 | } |
| 338 | |
| 339 | VATTR_INIT(&va); /* better to do it here than waste more stack in vnode_setsize */ |
| 340 | VATTR_SET(&va, va_data_size, 0); |
| 341 | if (core_proc == initproc) { |
| 342 | VATTR_SET(&va, va_dataprotect_class, PROTECTION_CLASS_D); |
| 343 | } |
| 344 | vnode_setattr(vp, &va, ctx); |
| 345 | core_proc->p_acflag |= ACORE; |
| 346 | |
| 347 | if ((reserve_mb > 0) && |
| 348 | ((freespace_mb(vp) - (mapsize >> 20)) < reserve_mb)) { |
| 349 | error = ENOSPC; |
| 350 | goto out; |
| 351 | } |
| 352 | |
| 353 | /* |
| 354 | * If the task is modified while dumping the file |
| 355 | * (e.g., changes in threads or VM, the resulting |
| 356 | * file will not necessarily be correct. |
| 357 | */ |
| 358 | |
| 359 | thread_count = get_task_numacts(task); |
| 360 | segment_count = get_vmmap_entries(map); /* XXX */ |
| 361 | tir1.flavor_count = sizeof(thread_flavor_array)/sizeof(mythread_state_flavor_t); |
| 362 | bcopy(thread_flavor_array, flavors,sizeof(thread_flavor_array)); |
| 363 | tstate_size = 0; |
| 364 | for (i = 0; i < tir1.flavor_count; i++) |
| 365 | tstate_size += sizeof(mythread_state_flavor_t) + |
| 366 | (flavors[i].count * sizeof(int)); |
| 367 | command_size = segment_count * segment_command_sz + |
| 368 | thread_count*sizeof(struct thread_command) + |
| 369 | tstate_size*thread_count; |
| 370 | |
| 371 | header_size = command_size + mach_header_sz; |
| 372 | |
| 373 | if (kmem_alloc(kernel_map, &header, (vm_size_t)header_size, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) { |
| 374 | error = ENOMEM; |
| 375 | goto out; |
| 376 | } |
| 377 | |
| 378 | /* |
| 379 | * Set up Mach-O header. |
| 380 | */ |
| 381 | if (is_64) { |
| 382 | mh64 = (struct mach_header_64 *)header; |
| 383 | mh64->magic = MH_MAGIC_64; |
| 384 | mh64->cputype = process_cpu_type(core_proc); |
| 385 | mh64->cpusubtype = process_cpu_subtype(core_proc); |
| 386 | mh64->filetype = MH_CORE; |
| 387 | mh64->ncmds = segment_count + thread_count; |
| 388 | mh64->sizeofcmds = command_size; |
| 389 | mh64->reserved = 0; /* 8 byte alignment */ |
| 390 | } else { |
| 391 | mh = (struct mach_header *)header; |
| 392 | mh->magic = MH_MAGIC; |
| 393 | mh->cputype = process_cpu_type(core_proc); |
| 394 | mh->cpusubtype = process_cpu_subtype(core_proc); |
| 395 | mh->filetype = MH_CORE; |
| 396 | mh->ncmds = segment_count + thread_count; |
| 397 | mh->sizeofcmds = command_size; |
| 398 | } |
| 399 | |
| 400 | hoffset = mach_header_sz; /* offset into header */ |
| 401 | foffset = round_page(header_size); /* offset into file */ |
| 402 | vmoffset = MACH_VM_MIN_ADDRESS; /* offset into VM */ |
| 403 | |
| 404 | /* |
| 405 | * We use to check for an error, here, now we try and get |
| 406 | * as much as we can |
| 407 | */ |
| 408 | while (segment_count > 0) { |
| 409 | struct segment_command *sc; |
| 410 | struct segment_command_64 *sc64; |
| 411 | |
| 412 | /* |
| 413 | * Get region information for next region. |
| 414 | */ |
| 415 | |
| 416 | while (1) { |
| 417 | vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64; |
| 418 | if((kret = mach_vm_region_recurse(map, |
| 419 | &vmoffset, &vmsize, &nesting_depth, |
| 420 | (vm_region_recurse_info_t)&vbr, |
| 421 | &vbrcount)) != KERN_SUCCESS) { |
| 422 | break; |
| 423 | } |
| 424 | /* |
| 425 | * If we get a valid mapping back, but we're dumping |
| 426 | * a 32 bit process, and it's over the allowable |
| 427 | * address space of a 32 bit process, it's the same |
| 428 | * as if mach_vm_region_recurse() failed. |
| 429 | */ |
| 430 | if (!(is_64) && |
| 431 | (vmoffset + vmsize > VM_MAX_ADDRESS)) { |
| 432 | kret = KERN_INVALID_ADDRESS; |
| 433 | break; |
| 434 | } |
| 435 | if(vbr.is_submap) { |
| 436 | nesting_depth++; |
| 437 | continue; |
| 438 | } else { |
| 439 | break; |
| 440 | } |
| 441 | } |
| 442 | if(kret != KERN_SUCCESS) |
| 443 | break; |
| 444 | |
| 445 | prot = vbr.protection; |
| 446 | maxprot = vbr.max_protection; |
| 447 | inherit = vbr.inheritance; |
| 448 | /* |
| 449 | * Fill in segment command structure. |
| 450 | */ |
| 451 | if (is_64) { |
| 452 | sc64 = (struct segment_command_64 *)(header + hoffset); |
| 453 | sc64->cmd = LC_SEGMENT_64; |
| 454 | sc64->cmdsize = sizeof(struct segment_command_64); |
| 455 | /* segment name is zeroed by kmem_alloc */ |
| 456 | sc64->segname[0] = 0; |
| 457 | sc64->vmaddr = vmoffset; |
| 458 | sc64->vmsize = vmsize; |
| 459 | sc64->fileoff = foffset; |
| 460 | sc64->filesize = vmsize; |
| 461 | sc64->maxprot = maxprot; |
| 462 | sc64->initprot = prot; |
| 463 | sc64->nsects = 0; |
| 464 | sc64->flags = 0; |
| 465 | } else { |
| 466 | sc = (struct segment_command *) (header + hoffset); |
| 467 | sc->cmd = LC_SEGMENT; |
| 468 | sc->cmdsize = sizeof(struct segment_command); |
| 469 | /* segment name is zeroed by kmem_alloc */ |
| 470 | sc->segname[0] = 0; |
| 471 | sc->vmaddr = CAST_DOWN_EXPLICIT(vm_offset_t,vmoffset); |
| 472 | sc->vmsize = CAST_DOWN_EXPLICIT(vm_size_t,vmsize); |
| 473 | sc->fileoff = CAST_DOWN_EXPLICIT(uint32_t,foffset); /* will never truncate */ |
| 474 | sc->filesize = CAST_DOWN_EXPLICIT(uint32_t,vmsize); /* will never truncate */ |
| 475 | sc->maxprot = maxprot; |
| 476 | sc->initprot = prot; |
| 477 | sc->nsects = 0; |
| 478 | sc->flags = 0; |
| 479 | } |
| 480 | |
| 481 | /* |
| 482 | * Write segment out. Try as hard as possible to |
| 483 | * get read access to the data. |
| 484 | */ |
| 485 | if ((prot & VM_PROT_READ) == 0) { |
| 486 | mach_vm_protect(map, vmoffset, vmsize, FALSE, |
| 487 | prot|VM_PROT_READ); |
| 488 | } |
| 489 | /* |
| 490 | * Only actually perform write if we can read. |
| 491 | * Note: if we can't read, then we end up with |
| 492 | * a hole in the file. |
| 493 | */ |
| 494 | if ((maxprot & VM_PROT_READ) == VM_PROT_READ |
| 495 | && vbr.user_tag != VM_MEMORY_IOKIT |
| 496 | && coredumpok(map,vmoffset)) { |
| 497 | |
| 498 | error = vn_rdwr_64(UIO_WRITE, vp, vmoffset, vmsize, foffset, |
| 499 | (IS_64BIT_PROCESS(core_proc) ? UIO_USERSPACE64 : UIO_USERSPACE32), |
| 500 | IO_NOCACHE|IO_NODELOCKED|IO_UNIT, cred, (int64_t *) 0, core_proc); |
| 501 | |
| 502 | } |
| 503 | |
| 504 | hoffset += segment_command_sz; |
| 505 | foffset += vmsize; |
| 506 | vmoffset += vmsize; |
| 507 | segment_count--; |
| 508 | } |
| 509 | |
| 510 | /* |
| 511 | * If there are remaining segments which have not been written |
| 512 | * out because break in the loop above, then they were not counted |
| 513 | * because they exceed the real address space of the executable |
| 514 | * type: remove them from the header's count. This is OK, since |
| 515 | * we are allowed to have a sparse area following the segments. |
| 516 | */ |
| 517 | if (is_64) { |
| 518 | mh64->ncmds -= segment_count; |
| 519 | mh64->sizeofcmds -= segment_count * segment_command_sz; |
| 520 | } else { |
| 521 | mh->ncmds -= segment_count; |
| 522 | mh->sizeofcmds -= segment_count * segment_command_sz; |
| 523 | } |
| 524 | |
| 525 | tir1.header = header; |
| 526 | tir1.hoffset = hoffset; |
| 527 | tir1.flavors = flavors; |
| 528 | tir1.tstate_size = tstate_size; |
| 529 | task_act_iterate_wth_args(task, collectth_state,&tir1); |
| 530 | |
| 531 | /* |
| 532 | * Write out the Mach header at the beginning of the |
| 533 | * file. OK to use a 32 bit write for this. |
| 534 | */ |
| 535 | error = vn_rdwr(UIO_WRITE, vp, (caddr_t)header, header_size, (off_t)0, |
| 536 | UIO_SYSSPACE, IO_NOCACHE|IO_NODELOCKED|IO_UNIT, cred, (int *) 0, core_proc); |
| 537 | kmem_free(kernel_map, header, header_size); |
| 538 | |
| 539 | if ((coredump_flags & COREDUMP_FULLFSYNC) && error == 0) |
| 540 | error = VNOP_IOCTL(vp, F_FULLFSYNC, (caddr_t)NULL, 0, ctx); |
| 541 | out: |
| 542 | error1 = vnode_close(vp, FWRITE, ctx); |
| 543 | out2: |
| 544 | #if CONFIG_AUDIT |
| 545 | audit_proc_coredump(core_proc, name, error); |
| 546 | #endif |
| 547 | if (alloced_name != NULL) |
| 548 | FREE(alloced_name, M_TEMP); |
| 549 | if (error == 0) |
| 550 | error = error1; |
| 551 | |
| 552 | return (error); |
| 553 | } |
| 554 | |
| 555 | #else /* CONFIG_COREDUMP */ |
| 556 | |
| 557 | /* When core dumps aren't needed, no need to compile this file at all */ |
| 558 | |
| 559 | #error assertion failed: this section is not compiled |
| 560 | |
| 561 | #endif /* CONFIG_COREDUMP */ |
| 562 | |