/****************************************************************************** * Generic MIB initialisation. * * This is a hack, and should be replaced with SYSINITs * at some point. */ void sysctl_mib_init(void) { cputype = cpu_type(); cpusubtype = cpu_subtype(); cputhreadtype = cpu_threadtype(); #if defined(__i386__) || defined (__x86_64__) cpu64bit = (_get_cpu_capabilities() & k64Bit) == k64Bit; #else #error Unsupported arch #endif /* * Populate the optional portion of the hw.* MIB. * * XXX This could be broken out into parts of the code * that actually directly relate to the functions in * question. */ if (cputhreadtype != CPU_THREADTYPE_NONE) { sysctl_register_oid(&sysctl__hw_cputhreadtype); } #if defined (__i386__) || defined (__x86_64__) /* hw.cpufamily */ cpufamily = cpuid_cpufamily(); /* hw.cacheconfig */ cacheconfig[0] = ml_cpu_cache_sharing(0); cacheconfig[1] = ml_cpu_cache_sharing(1); cacheconfig[2] = ml_cpu_cache_sharing(2); cacheconfig[3] = ml_cpu_cache_sharing(3); cacheconfig[4] = 0; /* hw.cachesize */ cachesize[0] = ml_cpu_cache_size(0); cachesize[1] = ml_cpu_cache_size(1); cachesize[2] = ml_cpu_cache_size(2); cachesize[3] = ml_cpu_cache_size(3); cachesize[4] = 0; /* hw.packages */ packages = roundup(ml_cpu_cache_sharing(0), cpuid_info()->thread_count) / cpuid_info()->thread_count; #else #error unknown architecture #endif /* !__i386__ && !__x86_64 && !__arm__ */ }
void kdp_machine_hostinfo( kdp_hostinfo_t *hostinfo ) { int i; hostinfo->cpus_mask = 0; for (i = 0; i < machine_info.max_cpus; i++) { if (cpu_data_ptr[i] == NULL) continue; hostinfo->cpus_mask |= (1 << i); } hostinfo->cpu_type = cpu_type(); hostinfo->cpu_subtype = cpu_subtype(); }
int kern_dump(void) { vm_map_t map; unsigned int thread_count, segment_count; unsigned int command_size = 0, header_size = 0, tstate_size = 0; uint64_t hoffset = 0, foffset = 0, nfoffset = 0; unsigned int max_header_size = 0; vm_offset_t header, txstart; vm_map_offset_t vmoffset; struct mach_header_64 *mh64; struct segment_command_64 *sc64; mach_vm_size_t size = 0; vm_prot_t prot = 0; vm_prot_t maxprot = 0; mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS]; vm_size_t nflavors; vm_size_t i; uint32_t nesting_depth = 0; kern_return_t kret = 0; struct vm_region_submap_info_64 vbr; mach_msg_type_number_t vbrcount = 0; tir_t tir1; int error = 0; int panic_error = 0; map = kernel_map; thread_count = 1; segment_count = get_vmmap_entries(map); printf("Kernel map has %d entries\n", segment_count); nflavors = kdp_mynum_flavors; bcopy((char *)thread_flavor_array,(char *) flavors,sizeof(thread_flavor_array)); for (i = 0; i < nflavors; i++) tstate_size += (uint32_t)(sizeof(mythread_state_flavor_t) + (flavors[i].count * sizeof(int))); command_size = (uint32_t)((segment_count) * sizeof(struct segment_command_64) + thread_count * sizeof(struct thread_command) + tstate_size * thread_count); header_size = command_size + (uint32_t)sizeof(struct mach_header_64); header = (vm_offset_t) command_buffer; /* * Set up Mach-O header for currently executing 32 bit kernel. */ printf ("Generated Mach-O header size was %d\n", header_size); mh64 = (struct mach_header_64 *) header; mh64->magic = MH_MAGIC_64; mh64->cputype = cpu_type(); mh64->cpusubtype = cpu_subtype(); mh64->filetype = MH_CORE; mh64->ncmds = segment_count + thread_count; mh64->sizeofcmds = command_size; mh64->flags = 0; mh64->reserved = 0; hoffset = sizeof(struct mach_header_64); /* offset into header */ foffset = (uint32_t)round_page(header_size); /* offset into file */ /* Padding */ if ((foffset - header_size) < (4*sizeof(struct segment_command_64))) { foffset += (uint32_t)((4*sizeof(struct segment_command_64)) - (foffset-header_size)); } max_header_size = (unsigned int)foffset; vmoffset = vm_map_min(map); /* Transmit the Mach-O MH_CORE header, and seek forward past the * area reserved for the segment and thread commands * to begin data transmission */ if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(nfoffset) , &nfoffset)) < 0) { printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error); error = panic_error; goto out; } if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, sizeof(struct mach_header_64), (caddr_t) mh64) < 0)) { printf ("kdp_send_crashdump_data failed with error %d\n", panic_error); error = panic_error; goto out; } if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset) < 0)) { printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error); error = panic_error; goto out; } printf ("Transmitting kernel state, please wait: "); while ((segment_count > 0) || (kret == KERN_SUCCESS)){ while (1) { /* * Get region information for next region. */ vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64; if((kret = mach_vm_region_recurse(map, &vmoffset, &size, &nesting_depth, (vm_region_recurse_info_t)&vbr, &vbrcount)) != KERN_SUCCESS) { break; } if(vbr.is_submap) { nesting_depth++; continue; } else { break; } } if(kret != KERN_SUCCESS) break; prot = vbr.protection; maxprot = vbr.max_protection; /* * Fill in segment command structure. */ if (hoffset > max_header_size) break; sc64 = (struct segment_command_64 *) (header); sc64->cmd = LC_SEGMENT_64; sc64->cmdsize = sizeof(struct segment_command_64); sc64->segname[0] = 0; sc64->vmaddr = vmoffset; sc64->vmsize = size; sc64->fileoff = foffset; sc64->filesize = size; sc64->maxprot = maxprot; sc64->initprot = prot; sc64->nsects = 0; if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) { printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error); error = panic_error; goto out; } if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, sizeof(struct segment_command_64) , (caddr_t) sc64)) < 0) { printf ("kdp_send_crashdump_data failed with error %d\n", panic_error); error = panic_error; goto out; } /* Do not transmit memory tagged VM_MEMORY_IOKIT - instead, * seek past that region on the server - this creates a * hole in the file. */ if ((vbr.user_tag != VM_MEMORY_IOKIT)) { if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset)) < 0) { printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error); error = panic_error; goto out; } txstart = vmoffset; if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, (unsigned int)size, (caddr_t) txstart)) < 0) { printf ("kdp_send_crashdump_data failed with error %d\n", panic_error); error = panic_error; goto out; } } hoffset += (unsigned int)sizeof(struct segment_command_64); foffset += (unsigned int)size; vmoffset += size; segment_count--; } tir1.header = header; tir1.hoffset = 0; tir1.flavors = flavors; tir1.tstate_size = tstate_size; /* Now send out the LC_THREAD load command, with the thread information * for the current activation. * Note that the corefile can contain LC_SEGMENT commands with file * offsets that point past the edge of the corefile, in the event that * the last N VM regions were all I/O mapped or otherwise * non-transferable memory, not followed by a normal VM region; * i.e. there will be no hole that reaches to the end of the core file. */ kern_collectth_state (current_thread(), &tir1); if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) { printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error); error = panic_error; goto out; } if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, tir1.hoffset , (caddr_t) header)) < 0) { printf ("kdp_send_crashdump_data failed with error %d\n", panic_error); error = panic_error; goto out; } /* last packet */ if ((panic_error = kdp_send_crashdump_pkt (KDP_EOF, NULL, 0, ((void *) 0))) < 0) { printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error); error = panic_error; goto out; } out: return (error); }
/****************************************************************************** * Generic MIB initialisation. * * This is a hack, and should be replaced with SYSINITs * at some point. */ void sysctl_mib_init(void) { cputype = cpu_type(); cpusubtype = cpu_subtype(); cputhreadtype = cpu_threadtype(); #if defined(__i386__) || defined (__x86_64__) cpu64bit = (_get_cpu_capabilities() & k64Bit) == k64Bit; #elif defined(__arm__) kprintf("sysctl_mib_init: NEED ARM DEFINES\n"); #else #error Unsupported arch #endif /* * Populate the optional portion of the hw.* MIB. * * XXX This could be broken out into parts of the code * that actually directly relate to the functions in * question. */ if (cputhreadtype != CPU_THREADTYPE_NONE) { sysctl_register_oid(&sysctl__hw_cputhreadtype); } #if defined (__i386__) || defined (__x86_64__) #define is_capability_set(k) (((_get_cpu_capabilities() & (k)) == (k)) ? 1 : 0) mmx_flag = is_capability_set(kHasMMX); sse_flag = is_capability_set(kHasSSE); sse2_flag = is_capability_set(kHasSSE2); sse3_flag = is_capability_set(kHasSSE3); supplementalsse3_flag = is_capability_set(kHasSupplementalSSE3); sse4_1_flag = is_capability_set(kHasSSE4_1); sse4_2_flag = is_capability_set(kHasSSE4_2); x86_64_flag = is_capability_set(k64Bit); aes_flag = is_capability_set(kHasAES); avx1_0_flag = is_capability_set(kHasAVX1_0); rdrand_flag = is_capability_set(kHasRDRAND); f16c_flag = is_capability_set(kHasF16C); enfstrg_flag = is_capability_set(kHasENFSTRG); /* hw.cpufamily */ cpufamily = cpuid_cpufamily(); /* hw.cacheconfig */ cacheconfig[0] = ml_cpu_cache_sharing(0); cacheconfig[1] = ml_cpu_cache_sharing(1); cacheconfig[2] = ml_cpu_cache_sharing(2); cacheconfig[3] = ml_cpu_cache_sharing(3); cacheconfig[4] = 0; /* hw.cachesize */ cachesize[0] = ml_cpu_cache_size(0); cachesize[1] = ml_cpu_cache_size(1); cachesize[2] = ml_cpu_cache_size(2); cachesize[3] = ml_cpu_cache_size(3); cachesize[4] = 0; /* hw.packages */ packages = roundup(ml_cpu_cache_sharing(0), cpuid_info()->thread_count) / cpuid_info()->thread_count; #elif defined(__arm__) kprintf("sysctl_mib_init: shortcircuiting to finish, reimplement\n"); #else #error unknown architecture #endif /* !__i386__ && !__x86_64 && !__arm__ */ }