int kern_dump_pmap_traverse_send_callback(vm_map_offset_t start, vm_map_offset_t end, void *context) { struct kern_dump_send_context *kdc = (struct kern_dump_send_context *)context; int ret = KERN_SUCCESS; kernel_segment_command_t sc; vm_size_t size = (vm_size_t)(end - start); if (kdc->hoffset + sizeof(sc) > kdc->header_size) { return (KERN_NO_SPACE); } /* * Fill in segment command structure. */ sc.cmd = LC_SEGMENT_KERNEL; sc.cmdsize = sizeof(kernel_segment_command_t); sc.segname[0] = 0; sc.vmaddr = (vm_address_t)start; sc.vmsize = size; sc.fileoff = (vm_address_t)kdc->foffset; sc.filesize = size; sc.maxprot = VM_PROT_READ; sc.initprot = VM_PROT_READ; sc.nsects = 0; sc.flags = 0; if ((ret = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(kdc->hoffset) , &kdc->hoffset)) < 0) { printf ("kdp_send_crashdump_pkt failed with error %d\n", ret); goto out; } if ((ret = kdp_send_crashdump_data (KDP_DATA, NULL, sizeof(kernel_segment_command_t) , (caddr_t) &sc)) < 0) { printf ("kdp_send_crashdump_data failed with error %d\n", ret); goto out; } kdc->hoffset += sizeof(kernel_segment_command_t); if ((ret = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(kdc->foffset) , &kdc->foffset)) < 0) { printf ("kdp_send_crashdump_pkt failed with error %d\n", ret); goto out; } if ((ret = kdp_send_crashdump_data (KDP_DATA, NULL, (unsigned int)size, (caddr_t)(uintptr_t)start)) < 0) { printf ("kdp_send_crashdump_data failed with error %d\n", ret); goto out; } kdc->foffset += size; out: return (ret); }
/* Intended to be called from the kernel trap handler if an unrecoverable fault * occurs during a crashdump (which shouldn't happen since we validate mappings * and so on). This should be reworked to attempt some form of recovery. */ int kdp_dump_trap( int type, __unused x86_saved_state64_t *saved_state) { printf ("An unexpected trap (type %d) occurred during the system dump, terminating.\n", type); kdp_send_crashdump_pkt (KDP_EOF, NULL, 0, ((void *) 0)); abort_panic_transfer(); kdp_flag &= ~KDP_PANIC_DUMP_ENABLED; kdp_flag &= ~PANIC_CORE_ON_NMI; kdp_flag &= ~PANIC_LOG_DUMP; kdp_reset(); kdp_raise_exception(EXC_BAD_ACCESS, 0, 0, kdp.saved_state); return( 0 ); }
int kern_dump(void) { vm_map_t map; unsigned int thread_count, segment_count; unsigned int command_size = 0, header_size = 0, tstate_size = 0; uint64_t hoffset = 0, foffset = 0, nfoffset = 0; unsigned int max_header_size = 0; vm_offset_t header, txstart; vm_map_offset_t vmoffset; struct mach_header_64 *mh64; struct segment_command_64 *sc64; mach_vm_size_t size = 0; vm_prot_t prot = 0; vm_prot_t maxprot = 0; mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS]; vm_size_t nflavors; vm_size_t i; uint32_t nesting_depth = 0; kern_return_t kret = 0; struct vm_region_submap_info_64 vbr; mach_msg_type_number_t vbrcount = 0; tir_t tir1; int error = 0; int panic_error = 0; map = kernel_map; thread_count = 1; segment_count = get_vmmap_entries(map); printf("Kernel map has %d entries\n", segment_count); nflavors = kdp_mynum_flavors; bcopy((char *)thread_flavor_array,(char *) flavors,sizeof(thread_flavor_array)); for (i = 0; i < nflavors; i++) tstate_size += (uint32_t)(sizeof(mythread_state_flavor_t) + (flavors[i].count * sizeof(int))); command_size = (uint32_t)((segment_count) * sizeof(struct segment_command_64) + thread_count * sizeof(struct thread_command) + tstate_size * thread_count); header_size = command_size + (uint32_t)sizeof(struct mach_header_64); header = (vm_offset_t) command_buffer; /* * Set up Mach-O header for currently executing 32 bit kernel. */ printf ("Generated Mach-O header size was %d\n", header_size); mh64 = (struct mach_header_64 *) header; mh64->magic = MH_MAGIC_64; mh64->cputype = cpu_type(); mh64->cpusubtype = cpu_subtype(); mh64->filetype = MH_CORE; mh64->ncmds = segment_count + thread_count; mh64->sizeofcmds = command_size; mh64->flags = 0; mh64->reserved = 0; hoffset = sizeof(struct mach_header_64); /* offset into header */ foffset = (uint32_t)round_page(header_size); /* offset into file */ /* Padding */ if ((foffset - header_size) < (4*sizeof(struct segment_command_64))) { foffset += (uint32_t)((4*sizeof(struct segment_command_64)) - (foffset-header_size)); } max_header_size = (unsigned int)foffset; vmoffset = vm_map_min(map); /* Transmit the Mach-O MH_CORE header, and seek forward past the * area reserved for the segment and thread commands * to begin data transmission */ if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(nfoffset) , &nfoffset)) < 0) { printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error); error = panic_error; goto out; } if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, sizeof(struct mach_header_64), (caddr_t) mh64) < 0)) { printf ("kdp_send_crashdump_data failed with error %d\n", panic_error); error = panic_error; goto out; } if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset) < 0)) { printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error); error = panic_error; goto out; } printf ("Transmitting kernel state, please wait: "); while ((segment_count > 0) || (kret == KERN_SUCCESS)){ while (1) { /* * Get region information for next region. */ vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64; if((kret = mach_vm_region_recurse(map, &vmoffset, &size, &nesting_depth, (vm_region_recurse_info_t)&vbr, &vbrcount)) != KERN_SUCCESS) { break; } if(vbr.is_submap) { nesting_depth++; continue; } else { break; } } if(kret != KERN_SUCCESS) break; prot = vbr.protection; maxprot = vbr.max_protection; /* * Fill in segment command structure. */ if (hoffset > max_header_size) break; sc64 = (struct segment_command_64 *) (header); sc64->cmd = LC_SEGMENT_64; sc64->cmdsize = sizeof(struct segment_command_64); sc64->segname[0] = 0; sc64->vmaddr = vmoffset; sc64->vmsize = size; sc64->fileoff = foffset; sc64->filesize = size; sc64->maxprot = maxprot; sc64->initprot = prot; sc64->nsects = 0; if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) { printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error); error = panic_error; goto out; } if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, sizeof(struct segment_command_64) , (caddr_t) sc64)) < 0) { printf ("kdp_send_crashdump_data failed with error %d\n", panic_error); error = panic_error; goto out; } /* Do not transmit memory tagged VM_MEMORY_IOKIT - instead, * seek past that region on the server - this creates a * hole in the file. */ if ((vbr.user_tag != VM_MEMORY_IOKIT)) { if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset)) < 0) { printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error); error = panic_error; goto out; } txstart = vmoffset; if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, (unsigned int)size, (caddr_t) txstart)) < 0) { printf ("kdp_send_crashdump_data failed with error %d\n", panic_error); error = panic_error; goto out; } } hoffset += (unsigned int)sizeof(struct segment_command_64); foffset += (unsigned int)size; vmoffset += size; segment_count--; } tir1.header = header; tir1.hoffset = 0; tir1.flavors = flavors; tir1.tstate_size = tstate_size; /* Now send out the LC_THREAD load command, with the thread information * for the current activation. * Note that the corefile can contain LC_SEGMENT commands with file * offsets that point past the edge of the corefile, in the event that * the last N VM regions were all I/O mapped or otherwise * non-transferable memory, not followed by a normal VM region; * i.e. there will be no hole that reaches to the end of the core file. */ kern_collectth_state (current_thread(), &tir1); if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) { printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error); error = panic_error; goto out; } if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, tir1.hoffset , (caddr_t) header)) < 0) { printf ("kdp_send_crashdump_data failed with error %d\n", panic_error); error = panic_error; goto out; } /* last packet */ if ((panic_error = kdp_send_crashdump_pkt (KDP_EOF, NULL, 0, ((void *) 0))) < 0) { printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error); error = panic_error; goto out; } out: return (error); }
int kern_dump(void) { int ret; struct kern_dump_preflight_context kdc_preflight; struct kern_dump_send_context kdc_send; uint32_t segment_count; size_t command_size = 0, header_size = 0, tstate_size = 0; uint64_t hoffset = 0, foffset = 0; kernel_mach_header_t mh; kdc_preflight.region_count = 0; kdc_preflight.dumpable_bytes = 0; ret = pmap_traverse_present_mappings(kernel_pmap, VM_MIN_KERNEL_AND_KEXT_ADDRESS, VM_MAX_KERNEL_ADDRESS, kern_dump_pmap_traverse_preflight_callback, &kdc_preflight); if (ret) { printf("pmap traversal failed: %d\n", ret); return (ret); } printf("Kernel dump region count: %u\n", kdc_preflight.region_count); printf("Kernel dump byte count: %llu\n", kdc_preflight.dumpable_bytes); segment_count = kdc_preflight.region_count; tstate_size = sizeof(struct thread_command) + kern_collectth_state_size(); command_size = segment_count * sizeof(kernel_segment_command_t) + tstate_size; header_size = command_size + sizeof(kernel_mach_header_t); /* * Set up Mach-O header for currently executing kernel. */ printf ("Generated Mach-O header size was %lu\n", header_size); mh.magic = _mh_execute_header.magic; mh.cputype = _mh_execute_header.cputype;; mh.cpusubtype = _mh_execute_header.cpusubtype; mh.filetype = MH_CORE; mh.ncmds = segment_count + 1 /* thread */; mh.sizeofcmds = (uint32_t)command_size; mh.flags = 0; #if defined(__LP64__) mh.reserved = 0; #endif hoffset = 0; /* offset into header */ foffset = (uint32_t)round_page(header_size); /* offset into file */ /* Transmit the Mach-O MH_CORE header, and seek forward past the * area reserved for the segment and thread commands * to begin data transmission */ if ((ret = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) { printf ("kdp_send_crashdump_pkt failed with error %d\n", ret); goto out; } if ((ret = kdp_send_crashdump_data (KDP_DATA, NULL, sizeof(kernel_mach_header_t), (caddr_t) &mh) < 0)) { printf ("kdp_send_crashdump_data failed with error %d\n", ret); goto out; } hoffset += sizeof(kernel_mach_header_t); if ((ret = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset) < 0)) { printf ("kdp_send_crashdump_pkt failed with error %d\n", ret); goto out; } printf ("Transmitting kernel state, please wait: "); kdc_send.hoffset = hoffset; kdc_send.foffset = foffset; kdc_send.header_size = header_size; ret = pmap_traverse_present_mappings(kernel_pmap, VM_MIN_KERNEL_AND_KEXT_ADDRESS, VM_MAX_KERNEL_ADDRESS, kern_dump_pmap_traverse_send_callback, &kdc_send); if (ret) { kprintf("pmap traversal failed: %d\n", ret); return (ret); } /* Reload mutated offsets */ hoffset = kdc_send.hoffset; foffset = kdc_send.foffset; /* * Now send out the LC_THREAD load command, with the thread information * for the current activation. */ if (tstate_size > 0) { char tstate[tstate_size]; kern_collectth_state (current_thread(), tstate, tstate_size); if ((ret = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(hoffset), &hoffset)) < 0) { printf ("kdp_send_crashdump_pkt failed with error %d\n", ret); goto out; } if ((ret = kdp_send_crashdump_data (KDP_DATA, NULL, tstate_size, tstate)) < 0) { printf ("kdp_send_crashdump_data failed with error %d\n", ret); goto out; } hoffset += tstate_size; } /* last packet */ if ((ret = kdp_send_crashdump_pkt (KDP_EOF, NULL, 0, ((void *) 0))) < 0) { printf ("kdp_send_crashdump_pkt failed with error %d\n", ret); goto out; } out: return (ret); }