static malloc_zone_t * zone_default_get(void) { malloc_zone_t **zones = NULL; unsigned int num_zones = 0; /* * On OSX 10.12, malloc_default_zone returns a special zone that is not * present in the list of registered zones. That zone uses a "lite zone" * if one is present (apparently enabled when malloc stack logging is * enabled), or the first registered zone otherwise. In practice this * means unless malloc stack logging is enabled, the first registered * zone is the default. So get the list of zones to get the first one, * instead of relying on malloc_default_zone. */ if (KERN_SUCCESS != malloc_get_all_zones(0, NULL, (vm_address_t**)&zones, &num_zones)) { /* * Reset the value in case the failure happened after it was * set. */ num_zones = 0; } if (num_zones) return (zones[0]); return (malloc_default_zone()); }
static const void foreach_zone_in_this_process (range_callback_info_t *info) { if (info == NULL || info->zone_callback == NULL) return; vm_address_t *zones = NULL; unsigned int num_zones = 0; kern_return_t err = malloc_get_all_zones (0, task_peek, &zones, &num_zones); if (KERN_SUCCESS == err) { for (unsigned int i=0; i<num_zones; ++i) { info->zone_callback (info, (const malloc_zone_t *)zones[i]); } } }
// // send_all_zones // // Send addresses of all zones. // void Monitor::send_all_zones() { Zone *zone = Zone::zone(); print("zones %s\n", _args[1]); if (zone) { print("zone %p %p %s\n", zone, zone, malloc_get_zone_name((malloc_zone_t *)zone)); } vm_address_t *zone_addresses; unsigned count = 0; malloc_get_all_zones(mach_task_self(), NULL, &zone_addresses, &count); for (unsigned i = 0; i < count; i++) { malloc_zone_t *malloc_zone = (malloc_zone_t *)zone_addresses[i]; if (malloc_zone != (malloc_zone_t *)zone) print("zone %p 0x00000000 \"%s\"\n", malloc_zone, malloc_get_zone_name(malloc_zone)); } print("\\zones\n"); }
static const void foreach_zone_in_this_process (range_callback_info_t *info) { if (info == NULL || info->zone_callback == NULL) return; vm_address_t *zones = NULL; unsigned int num_zones = 0; kern_return_t err = malloc_get_all_zones (0, task_peek, &zones, &num_zones); if (KERN_SUCCESS == err) { for (unsigned int i=0; i<num_zones; ++i) { info->zone_callback (info, (const malloc_zone_t *)zones[i]); } } if (info->check_vm_regions) { #if defined (VM_REGION_SUBMAP_SHORT_INFO_COUNT_64) typedef vm_region_submap_short_info_data_64_t RegionInfo; enum { kRegionInfoSize = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 }; #else typedef vm_region_submap_info_data_64_t RegionInfo; enum { kRegionInfoSize = VM_REGION_SUBMAP_INFO_COUNT_64 }; #endif task_t task = mach_task_self(); mach_vm_address_t vm_region_base_addr; mach_vm_size_t vm_region_size; natural_t vm_region_depth; RegionInfo vm_region_info; ((range_contains_data_callback_info_t *)info->baton)->unique = true; for (vm_region_base_addr = 0, vm_region_size = 1; vm_region_size != 0; vm_region_base_addr += vm_region_size) { mach_msg_type_number_t vm_region_info_size = kRegionInfoSize; const kern_return_t err = mach_vm_region_recurse (task, &vm_region_base_addr, &vm_region_size, &vm_region_depth, (vm_region_recurse_info_t)&vm_region_info, &vm_region_info_size); if (err) break; // Check all read + write regions. This will cover the thread stacks // and any regions of memory that aren't covered by the heap if (vm_region_info.protection & VM_PROT_WRITE && vm_region_info.protection & VM_PROT_READ) { //printf ("checking vm_region: [0x%16.16llx - 0x%16.16llx)\n", (uint64_t)vm_region_base_addr, (uint64_t)vm_region_base_addr + vm_region_size); range_info_callback (task, info->baton, stack_logging_type_vm_region, vm_region_base_addr, vm_region_size); } } } }