예제 #1
0
int main() {
  mach_port_t process_to_write;
  kern_return_t error;

  if(getuid() && geteuid()) {
    printf("You need to be root to vm_write!\n");
  } else{
    error = task_for_pid(mach_task_self(), PID, &process_to_write);

    if ((error != KERN_SUCCESS) || !MACH_PORT_VALID(process_to_write)) {
      printf("Error getting the process!\n");
    }
    
    mach_port_name_t task;
    vm_map_offset_t vmoffset;
    vm_map_size_t vmsize;
    uint32_t nesting_depth = 0;
    struct vm_region_submap_info_64 vbr;
    mach_msg_type_number_t vbrcount = 16;
    kern_return_t kr;

    if ((kr = mach_vm_region_recurse(process_to_write, &vmoffset, &vmsize,
                                    &nesting_depth,
                                    (vm_region_recurse_info_t)&vbr,
                                    &vbrcount)) != KERN_SUCCESS) 
    {
      printf("Error");
    }

    printf("%p\n", (void *) (uintptr_t)vmoffset);
  }
  
  return 0;
}
예제 #2
0
void
gum_darwin_enumerate_ranges (mach_port_t task,
                             GumPageProtection prot,
                             GumFoundRangeFunc func,
                             gpointer user_data)
{
  mach_vm_address_t address = MACH_VM_MIN_ADDRESS;
  mach_vm_size_t size = (mach_vm_size_t) 0;
  natural_t depth = 0;

  while (TRUE)
  {
    struct vm_region_submap_info_64 info;
    mach_msg_type_number_t info_count;
    kern_return_t kr;
    GumPageProtection cur_prot;

    while (TRUE)
    {
      info_count = VM_REGION_SUBMAP_INFO_COUNT_64;
      kr = mach_vm_region_recurse (task, &address, &size, &depth,
          (vm_region_recurse_info_t) &info, &info_count);
      if (kr != KERN_SUCCESS)
        break;

      if (info.is_submap)
      {
        depth++;
        continue;
      }
      else
      {
        break;
      }
    }

    if (kr != KERN_SUCCESS)
      break;

    cur_prot = gum_page_protection_from_mach (info.protection);

    if ((cur_prot & prot) == prot)
    {
      GumMemoryRange range;
      GumRangeDetails details;

      range.base_address = address;
      range.size = size;

      details.range = ⦥
      details.prot = cur_prot;

      if (!func (&details, user_data))
        return;
    }

    address += size;
    size = 0;
  }
}
예제 #3
0
static kern_return_t
macosx_vm_region_recurse_long (task_t task, 
			       mach_vm_address_t addr,
			       mach_vm_address_t *r_start,
			       mach_vm_size_t *r_size,
			       vm_prot_t *prot,
			       vm_prot_t *max_prot)
{
  vm_region_submap_info_data_64_t r_long_data;
  mach_msg_type_number_t r_info_size;
  natural_t r_depth;
  kern_return_t kret;

  r_info_size = VM_REGION_SUBMAP_INFO_COUNT_64;
  r_depth = 1000;
  *r_start = addr;
    
  kret = mach_vm_region_recurse (task, 
				 r_start, r_size,
				 & r_depth,
				 (vm_region_recurse_info_t) &r_long_data, 
				 &r_info_size);
  if (kret == KERN_SUCCESS)
    {
      *prot = r_long_data.protection;
      *max_prot = r_long_data.max_protection;
#ifdef DEBUG_MACOSX_MUTILS
      mutils_debug ("macosx_vm_region_recurse_long ( 0x%8.8llx ): [ 0x%8.8llx - 0x%8.8llx ) "
		    "depth = %d, prot = %c%c%s max_prot = %c%c%s\n",
		    (uint64_t) addr,
		    (uint64_t) (*r_start), 
		    (uint64_t) (*r_start + *r_size), 
		    r_depth, 
		    *prot & VM_PROT_COPY ? 'c' : '-',
		    *prot & VM_PROT_NO_CHANGE ? '!' : '-',
		    g_macosx_protection_strs[*prot & 7],
		    *max_prot & VM_PROT_COPY ? 'c' : '-',
		    *max_prot & VM_PROT_NO_CHANGE ? '!' : '-',
		    g_macosx_protection_strs[*max_prot & 7]);
#endif
    }
  else
    {
#ifdef DEBUG_MACOSX_MUTILS
      mutils_debug ("macosx_vm_region_recurse_long ( 0x%8.8llx ): ERROR %s\n",
		    (uint64_t) addr, MACH_ERROR_STRING (kret));
#endif
      *r_start = 0;
      *r_size = 0;
      *prot = VM_PROT_NONE;
      *max_prot = VM_PROT_NONE;
    }

  return kret;
}
예제 #4
0
mach_vm_address_t getTaskBaseAddress(mach_port_name_t taskPort) {
    mach_vm_address_t vmoffset;
    mach_vm_size_t vmsize;
    uint32_t nesting_depth = 0;
    struct vm_region_submap_info_64 vbr;
    mach_msg_type_number_t vbrcount = 16;
    kern_return_t kr;

    //assume the first region is the task __text
    kr = mach_vm_region_recurse(taskPort, &vmoffset, &vmsize,
        &nesting_depth,
        (vm_region_recurse_info_t)&vbr,
        &vbrcount);
    assert(kr == KERN_SUCCESS);

    return vmoffset;
}
예제 #5
0
bool ZGRegionSubmapInfo(ZGMemoryMap processTask, ZGMemoryAddress *address, ZGMemorySize *size, ZGMemorySubmapInfo *regionInfo)
{
	bool success = true;
	mach_msg_type_number_t infoCount;
	natural_t depth = 0;
	
	while (true)
	{
		infoCount = VM_REGION_SUBMAP_INFO_COUNT_64;
		success = success && mach_vm_region_recurse(processTask, address, size, &depth, (vm_region_recurse_info_t)regionInfo, &infoCount) == KERN_SUCCESS;
		if (!success || !regionInfo->is_submap)
		{
			break;
		}
		depth++;
	}
	return success;
}
예제 #6
0
파일: mach_vm_utils.c 프로젝트: Tyilo/hydra
protection_backup *backup_protection(vm_map_t task, mach_vm_address_t address, mach_vm_size_t size) {
	mach_vm_address_t max_address = address + size;
	
	protection_backup *first = NULL;
	protection_backup *last = NULL;
	
	natural_t depth = 1;
	while(address < max_address) {
		struct vm_region_submap_info_64 info;
		mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
		if(mach_vm_region_recurse(task, &address, &size, &depth, (vm_region_info_64_t)&info, &count) == KERN_INVALID_ADDRESS) {
			break;
		}
		if(info.is_submap) {
			depth++;
		} else {
			protection_backup *current = malloc(sizeof(protection_backup));
			current->address = address;
			current->size = size;
			current->protection = info.protection;
			current->maxprotection = info.max_protection;
			current->next = NULL;
			
			if(!first) {
				first = current;
			} else {
				last->next = current;
			}
			last = current;
			
			address += size;
		}
	}
	
	return first;
}
예제 #7
0
static const void
foreach_zone_in_this_process (range_callback_info_t *info)
{
    if (info == NULL || info->zone_callback == NULL)
        return;

    vm_address_t *zones = NULL;
    unsigned int num_zones = 0;
        
    kern_return_t err = malloc_get_all_zones (0, task_peek, &zones, &num_zones);
    if (KERN_SUCCESS == err)
    {
        for (unsigned int i=0; i<num_zones; ++i)
        {
            info->zone_callback (info, (const malloc_zone_t *)zones[i]);
        }
    }
    
    if (info->check_vm_regions)
    {
#if defined (VM_REGION_SUBMAP_SHORT_INFO_COUNT_64)
        typedef vm_region_submap_short_info_data_64_t RegionInfo;
        enum { kRegionInfoSize = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 };
#else
        typedef vm_region_submap_info_data_64_t RegionInfo;
        enum { kRegionInfoSize = VM_REGION_SUBMAP_INFO_COUNT_64 };
#endif
        task_t task = mach_task_self();
    	mach_vm_address_t vm_region_base_addr;
    	mach_vm_size_t vm_region_size;
    	natural_t vm_region_depth;
    	RegionInfo vm_region_info;

        ((range_contains_data_callback_info_t *)info->baton)->unique = true;

        for (vm_region_base_addr = 0, vm_region_size = 1; vm_region_size != 0; vm_region_base_addr += vm_region_size)
        {
            mach_msg_type_number_t vm_region_info_size = kRegionInfoSize;
            const kern_return_t err = mach_vm_region_recurse (task,
                                                              &vm_region_base_addr,
                                                              &vm_region_size,
                                                              &vm_region_depth,
                                                              (vm_region_recurse_info_t)&vm_region_info,
                                                              &vm_region_info_size);
            if (err)
                break;
            // Check all read + write regions. This will cover the thread stacks 
            // and any regions of memory that aren't covered by the heap
            if (vm_region_info.protection & VM_PROT_WRITE && 
                vm_region_info.protection & VM_PROT_READ)
            {
                //printf ("checking vm_region: [0x%16.16llx - 0x%16.16llx)\n", (uint64_t)vm_region_base_addr, (uint64_t)vm_region_base_addr + vm_region_size);
                range_info_callback (task, 
                                     info->baton, 
                                     stack_logging_type_vm_region, 
                                     vm_region_base_addr, 
                                     vm_region_size);
            }
        }
    }
}
예제 #8
0
파일: kdp_vm.c 프로젝트: Prajna/xnu
int
kern_dump(void)
{
	vm_map_t	map;
	unsigned int	thread_count, segment_count;
	unsigned int	command_size = 0, header_size = 0, tstate_size = 0;
	uint64_t	hoffset = 0, foffset = 0, nfoffset = 0;
	unsigned int	max_header_size = 0;
	vm_offset_t	header, txstart;
	vm_map_offset_t vmoffset;
	struct mach_header_64		*mh64;
	struct segment_command_64	*sc64;
	mach_vm_size_t	size = 0;
	vm_prot_t	prot = 0;
	vm_prot_t	maxprot = 0;
	mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS];
	vm_size_t	nflavors;
	vm_size_t	i;
	uint32_t	nesting_depth = 0;
	kern_return_t	kret = 0;
	struct vm_region_submap_info_64	vbr;
	mach_msg_type_number_t	vbrcount  = 0;
	tir_t tir1;

	int error = 0;
	int panic_error = 0;

	map = kernel_map;

	thread_count = 1;
	segment_count = get_vmmap_entries(map); 
  
	printf("Kernel map has %d entries\n", segment_count);

	nflavors = kdp_mynum_flavors;
	bcopy((char *)thread_flavor_array,(char *) flavors,sizeof(thread_flavor_array));

	for (i = 0; i < nflavors; i++)
		tstate_size += (uint32_t)(sizeof(mythread_state_flavor_t) +
		    (flavors[i].count * sizeof(int)));

	command_size = (uint32_t)((segment_count) *
	    sizeof(struct segment_command_64) +
	    thread_count * sizeof(struct thread_command) +
	    tstate_size * thread_count);

	header_size = command_size + (uint32_t)sizeof(struct mach_header_64);
	header = (vm_offset_t) command_buffer;
	
	/*
	 *	Set up Mach-O header for currently executing 32 bit kernel.
	 */
	printf ("Generated Mach-O header size was %d\n", header_size);

	mh64 = (struct mach_header_64 *) header;
	mh64->magic = MH_MAGIC_64;
	mh64->cputype = cpu_type();
	mh64->cpusubtype = cpu_subtype();
	mh64->filetype = MH_CORE;
	mh64->ncmds = segment_count + thread_count;
	mh64->sizeofcmds = command_size;
	mh64->flags = 0;
	mh64->reserved = 0;

	hoffset = sizeof(struct mach_header_64);	/* offset into header */
	foffset = (uint32_t)round_page(header_size);	/* offset into file */
	/* Padding */
	if ((foffset - header_size) < (4*sizeof(struct segment_command_64))) {
		foffset += (uint32_t)((4*sizeof(struct segment_command_64)) - (foffset-header_size)); 
	}

	max_header_size = (unsigned int)foffset;

	vmoffset = vm_map_min(map);

	/* Transmit the Mach-O MH_CORE header, and seek forward past the 
	 * area reserved for the segment and thread commands 
	 * to begin data transmission 
	 */
	if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(nfoffset) , &nfoffset)) < 0) { 
		printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
		error = panic_error;
		goto out;
	} 

	if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, sizeof(struct mach_header_64), (caddr_t) mh64) < 0)) {
		printf ("kdp_send_crashdump_data failed with error %d\n", panic_error);
		error = panic_error;
		goto out;
	}
	if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset) < 0)) {
		printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
		error = panic_error;
		goto out;
	}
	printf ("Transmitting kernel state, please wait: ");

	while ((segment_count > 0) || (kret == KERN_SUCCESS)){

		while (1) {

			/*
			 *	Get region information for next region.
			 */

			vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64;
			if((kret = mach_vm_region_recurse(map, 
				    &vmoffset, &size, &nesting_depth, 
				    (vm_region_recurse_info_t)&vbr,
				    &vbrcount)) != KERN_SUCCESS) {
				break;
			}

			if(vbr.is_submap) {
				nesting_depth++;
				continue;
			} else {
				break;
			}
		}

		if(kret != KERN_SUCCESS)
			break;

		prot = vbr.protection;
		maxprot = vbr.max_protection;

		/*
		 *	Fill in segment command structure.
		 */
    
		if (hoffset > max_header_size)
			break;
		sc64 = (struct segment_command_64 *) (header);
		sc64->cmd = LC_SEGMENT_64;
		sc64->cmdsize = sizeof(struct segment_command_64);
		sc64->segname[0] = 0;
		sc64->vmaddr = vmoffset;
		sc64->vmsize = size;
		sc64->fileoff = foffset;
		sc64->filesize = size;
		sc64->maxprot = maxprot;
		sc64->initprot = prot;
		sc64->nsects = 0;

		if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) { 
			printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
			error = panic_error;
			goto out;
		} 
    
		if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, sizeof(struct segment_command_64) , (caddr_t) sc64)) < 0) {
			printf ("kdp_send_crashdump_data failed with error %d\n", panic_error);
			error = panic_error;
			goto out;
		}

		/* Do not transmit memory tagged VM_MEMORY_IOKIT - instead,
		 * seek past that region on the server - this creates a
		 * hole in the file.
		 */

		if ((vbr.user_tag != VM_MEMORY_IOKIT)) {

			if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset)) < 0) {
				printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
				error = panic_error;
				goto out;
			}

			txstart = vmoffset;

			if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, (unsigned int)size, (caddr_t) txstart)) < 0)	{
				printf ("kdp_send_crashdump_data failed with error %d\n", panic_error);
				error = panic_error;
				goto out;
			}
		}

		hoffset += (unsigned int)sizeof(struct segment_command_64);
		foffset += (unsigned int)size;
		vmoffset += size;
		segment_count--;
	}
	tir1.header = header;
	tir1.hoffset = 0;
	tir1.flavors = flavors;
	tir1.tstate_size = tstate_size;

	/* Now send out the LC_THREAD load command, with the thread information
	 * for the current activation.
	 * Note that the corefile can contain LC_SEGMENT commands with file
	 * offsets that point past the edge of the corefile, in the event that
	 * the last N VM regions were all I/O mapped or otherwise
	 * non-transferable memory,  not followed by a normal VM region;
	 * i.e. there will be no hole that reaches to the end of the core file.
	 */
	kern_collectth_state (current_thread(), &tir1);

	if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) { 
		printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
		error = panic_error;
		goto out;
	}
  
	if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, tir1.hoffset , (caddr_t) header)) < 0) {
		printf ("kdp_send_crashdump_data failed with error %d\n", panic_error);
		error = panic_error;
		goto out;
	}
    
	/* last packet */
	if ((panic_error = kdp_send_crashdump_pkt (KDP_EOF, NULL, 0, ((void *) 0))) < 0)
	{
		printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
		error = panic_error;
		goto out;
	}
out:
	return (error);
}
예제 #9
0
static RList *ios_dbg_maps(RDebug *dbg, int only_modules) {
	boolt contiguous = R_FALSE;
	ut32 oldprot = UT32_MAX;
	ut32 oldmaxprot = UT32_MAX;
	char buf[1024];
	char module_name[MAXPATHLEN];
	mach_vm_address_t address = MACH_VM_MIN_ADDRESS;
	mach_vm_size_t size = (mach_vm_size_t) 0;
	mach_vm_size_t osize = (mach_vm_size_t) 0;
	natural_t depth = 0;
	int tid = dbg->pid;
	task_t task = pid_to_task (tid);
	RDebugMap *mr = NULL;
	RList *list = NULL;
	int i = 0;
	if (only_modules) {
		return xnu_dbg_modules (dbg);
	}
#if __arm64__ || __aarch64__
	size = osize = 16384; // acording to frida
#else
	size = osize = 4096;
#endif
#if 0
	if (dbg->pid == 0) {
		vm_address_t base = get_kernel_base (task);
		eprintf ("Kernel Base Address: 0x%"PFMT64x"\n", (ut64)base);
		return NULL;
	}
#endif
	kern_return_t kr;
	for (;;) {
		struct vm_region_submap_info_64 info;
		mach_msg_type_number_t info_count;

		info_count = VM_REGION_SUBMAP_INFO_COUNT_64;
		memset (&info, 0, sizeof (info));
		kr = mach_vm_region_recurse (task, &address, &size, &depth,
			(vm_region_recurse_info_t) &info, &info_count);
		if (kr != KERN_SUCCESS) {
			//eprintf ("Cannot kern succ recurse\n");
			break;
		}
		if (info.is_submap) {
			depth++;
			continue;
		}
		if (!list) {
			list = r_list_new ();
			//list->free = (RListFree*)r_debug_map_free;
		}
		{
			module_name[0] = 0;
			int ret = proc_regionfilename (tid, address,
				module_name, sizeof (module_name));
			module_name[ret] = 0;
		}
#if 0
		oldprot = info.protection;
		oldmaxprot = info.max_protection;
// contiguous pages seems to hide some map names
		if (mr) {
			if (address == mr->addr + mr->size) {
				if (oldmaxprot == info.max_protection) {
					contiguous = R_FALSE;
				} else if (oldprot != UT32_MAX && oldprot == info.protection) {
					/* expand region */
					mr->size += size;
					contiguous = R_TRUE;
				} else {
					contiguous = R_FALSE;
				}
			} else {
				contiguous = R_FALSE;
			}
		} else contiguous = R_FALSE;
		//if (info.max_protection == oldprot && !contiguous) {
#endif
		if (1) {
			#define xwr2rwx(x) ((x&1)<<2) | (x&2) | ((x&4)>>2)
			// XXX: if its shared, it cannot be read?
			snprintf (buf, sizeof (buf), "%s %02x %s%s%s%s%s %s depth=%d",
				r_str_rwx_i (xwr2rwx (info.max_protection)), i,
				unparse_inheritance (info.inheritance),
				info.user_tag? " user": "",
				info.is_submap? " sub": "",
				info.inheritance? " inherit": "",
				info.is_submap ? " submap": "",
				module_name, depth);
				//info.shared ? "shar" : "priv", 
				//info.reserved ? "reserved" : "not-reserved",
				//""); //module_name);
			mr = r_debug_map_new (buf, address, address+size,
					xwr2rwx (info.protection), 0);
			if (mr == NULL) {
				eprintf ("Cannot create r_debug_map_new\n");
				break;
			}
			mr->file = strdup (module_name);
			i++;
			r_list_append (list, mr);
		}
		if (size<1) {
			eprintf ("EFUCK\n");
			size = osize; // f**k
		}
		address += size;
		size = 0;
	}
	return list;
}
예제 #10
0
파일: MachVMMemory.cpp 프로젝트: Keno/lldb
// rsize and dirty_size is not adjusted for dyld shared cache and multiple __LINKEDIT segment, as in vmmap. In practice, dirty_size doesn't differ much but rsize may. There is performance penalty for the adjustment. Right now, only use the dirty_size.
void 
MachVMMemory::GetRegionSizes(task_t task, mach_vm_size_t &rsize, mach_vm_size_t &dirty_size)
{
#if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
    
    task_vm_info_data_t vm_info;
    mach_msg_type_number_t info_count;
    kern_return_t kr;
    
    info_count = TASK_VM_INFO_COUNT;
#ifdef TASK_VM_INFO_PURGEABLE
    kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, &info_count);
#else
    kr = task_info(task, TASK_VM_INFO, (task_info_t)&vm_info, &info_count);
#endif
    if (kr == KERN_SUCCESS)
        dirty_size = vm_info.internal;
    
#else
    mach_vm_address_t address = 0;
    mach_vm_size_t size;
    kern_return_t err = 0;
    unsigned nestingDepth = 0;
    mach_vm_size_t pages_resident = 0;
    mach_vm_size_t pages_dirtied = 0;
    
    while (1)
    {
        mach_msg_type_number_t count;
        struct vm_region_submap_info_64 info;
        
        count = VM_REGION_SUBMAP_INFO_COUNT_64;
        err = mach_vm_region_recurse(task, &address, &size, &nestingDepth, (vm_region_info_t)&info, &count);
        if (err == KERN_INVALID_ADDRESS)
        {
            // It seems like this is a good break too.
            break;
        }
        else if (err)
        {
            mach_error("vm_region",err);
            break; // reached last region
        }
        
        bool should_count = true;
        if (info.is_submap)
        { // is it a submap?
            nestingDepth++;
            should_count = false;
        }
        else
        {
            // Don't count malloc stack logging data in the TOTAL VM usage lines.
            if (info.user_tag == VM_MEMORY_ANALYSIS_TOOL)
                should_count = false;
            
            address = address+size;
        }
        
        if (should_count)
        {
            pages_resident += info.pages_resident;
            pages_dirtied += info.pages_dirtied;
        }
    }
    
    vm_size_t pagesize = PageSize (task);
    rsize = pages_resident * pagesize;
    dirty_size = pages_dirtied * pagesize;
    
#endif
}
예제 #11
0
static void
darwin_debug_regions_recurse (task_t task)
{
  mach_vm_address_t r_addr;
  mach_vm_address_t r_start;
  mach_vm_size_t r_size;
  natural_t r_depth;
  mach_msg_type_number_t r_info_size;
  vm_region_submap_short_info_data_64_t r_info;
  kern_return_t kret;
  int ret;
  struct cleanup *table_chain;
  struct ui_out *uiout = current_uiout;

  table_chain = make_cleanup_ui_out_table_begin_end (uiout, 9, -1, "regions");

  if (gdbarch_addr_bit (target_gdbarch ()) <= 32)
    {
      ui_out_table_header (uiout, 10, ui_left, "start", "Start");
      ui_out_table_header (uiout, 10, ui_left, "end", "End");
    }
  else
    {
      ui_out_table_header (uiout, 18, ui_left, "start", "Start");
      ui_out_table_header (uiout, 18, ui_left, "end", "End");
    }
  ui_out_table_header (uiout, 3, ui_left, "min-prot", "Min");
  ui_out_table_header (uiout, 3, ui_left, "max-prot", "Max");
  ui_out_table_header (uiout, 5, ui_left, "inheritence", "Inh");
  ui_out_table_header (uiout, 9, ui_left, "share-mode", "Shr");
  ui_out_table_header (uiout, 1, ui_left, "depth", "D");
  ui_out_table_header (uiout, 3, ui_left, "submap", "Sm");
  ui_out_table_header (uiout, 0, ui_noalign, "tag", "Tag");

  ui_out_table_body (uiout);

  r_start = 0;
  r_depth = 0;
  while (1)
    {
      const char *tag;
      struct cleanup *row_chain;

      r_info_size = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
      r_size = -1;
      kret = mach_vm_region_recurse (task, &r_start, &r_size, &r_depth,
				     (vm_region_recurse_info_t) &r_info,
				     &r_info_size);
      if (kret != KERN_SUCCESS)
	break;
      row_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "regions-row");

      ui_out_field_core_addr (uiout, "start", target_gdbarch (), r_start);
      ui_out_field_core_addr (uiout, "end", target_gdbarch (), r_start + r_size);
      ui_out_field_string (uiout, "min-prot", 
			   unparse_protection (r_info.protection));
      ui_out_field_string (uiout, "max-prot", 
			   unparse_protection (r_info.max_protection));
      ui_out_field_string (uiout, "inheritence",
			   unparse_inheritance (r_info.inheritance));
      ui_out_field_string (uiout, "share-mode",
			   unparse_share_mode (r_info.share_mode));
      ui_out_field_int (uiout, "depth", r_depth);
      ui_out_field_string (uiout, "submap",
			   r_info.is_submap ? _("sm ") : _("obj"));
      tag = unparse_user_tag (r_info.user_tag);
      if (tag)
	ui_out_field_string (uiout, "tag", tag);
      else
	ui_out_field_int (uiout, "tag", r_info.user_tag);

      do_cleanups (row_chain);

      if (!ui_out_is_mi_like_p (uiout))
	ui_out_text (uiout, "\n");

      if (r_info.is_submap)
	r_depth++;
      else
	r_start += r_size;
    }
  do_cleanups (table_chain);

}
예제 #12
0
파일: dtrace_glue.c 프로젝트: argp/xnu
/* Not called from probe context */
int
uwrite(proc_t *p, void *buf, user_size_t len, user_addr_t a)
{
	kern_return_t ret;

	ASSERT(p != NULL);
	ASSERT(p->task != NULL);

	task_t task = p->task;

	/*
	 * Grab a reference to the task vm_map_t to make sure
	 * the map isn't pulled out from under us.
	 *
	 * Because the proc_lock is not held at all times on all code
	 * paths leading here, it is possible for the proc to have
	 * exited. If the map is null, fail.
	 */
	vm_map_t map = get_task_map_reference(task);
	if (map) {
		/* Find the memory permissions. */
		uint32_t nestingDepth=999999;
		vm_region_submap_short_info_data_64_t info;
		mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
		mach_vm_address_t address = (mach_vm_address_t)a;
		mach_vm_size_t sizeOfRegion = (mach_vm_size_t)len;
	
		ret = mach_vm_region_recurse(map, &address, &sizeOfRegion, &nestingDepth, (vm_region_recurse_info_t)&info, &count);
		if (ret != KERN_SUCCESS)
			goto done;

		vm_prot_t reprotect;

		if (!(info.protection & VM_PROT_WRITE)) {
			/* Save the original protection values for restoration later */
			reprotect = info.protection;

			if (info.max_protection & VM_PROT_WRITE) {
				/* The memory is not currently writable, but can be made writable. */
				ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, (reprotect & ~VM_PROT_EXECUTE) | VM_PROT_WRITE);
			} else {
				/*
				 * The memory is not currently writable, and cannot be made writable. We need to COW this memory.
				 *
				 * Strange, we can't just say "reprotect | VM_PROT_COPY", that fails.
				 */
				ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE);
			}

			if (ret != KERN_SUCCESS)
				goto done;

		} else {
			/* The memory was already writable. */
			reprotect = VM_PROT_NONE;
		}

		ret = vm_map_write_user( map,
					 buf,
					 (vm_map_address_t)a,
					 (vm_size_t)len);

		dtrace_flush_caches();

		if (ret != KERN_SUCCESS)
			goto done;

		if (reprotect != VM_PROT_NONE) {
			ASSERT(reprotect & VM_PROT_EXECUTE);
			ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect);
		}

done:
		vm_map_deallocate(map);
	} else 
		ret = KERN_TERMINATED;

	return (int)ret;
}