예제 #1
1
파일: proc.c 프로젝트: nugxperience/yara
YR_MEMORY_BLOCK* _yr_get_next_block(
    YR_MEMORY_BLOCK_ITERATOR* iterator)
{
  YR_PROC_ITERATOR_CTX* context = (YR_PROC_ITERATOR_CTX*) iterator->context;

  kern_return_t kr;
  mach_msg_type_number_t info_count;
  mach_port_t object;
  vm_region_basic_info_data_64_t info;
  vm_size_t size = 0;
  vm_address_t address = context->current_block.base \
                       + context->current_block.size;
  do
  {
    info_count = VM_REGION_BASIC_INFO_COUNT_64;

    kr = vm_region_64(
        context->task,
        &address,
        &size,
        VM_REGION_BASIC_INFO,
        (vm_region_info_t) &info,
        &info_count,
        &object);

    if (kr == KERN_SUCCESS)
    {
      context->current_block.base = address;
      context->current_block.size = size;

      return &context->current_block;
    }

  } while (kr != KERN_INVALID_ADDRESS);

  return NULL;
}
예제 #2
0
static int tsk_getperm(RIO *io, task_t task, vm_address_t addr) {
	kern_return_t kr;
	mach_port_t object;
	vm_size_t vmsize;
	mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;
	vm_region_flavor_t flavor = VM_REGION_BASIC_INFO_64;
	vm_region_basic_info_data_64_t info;
	kr = vm_region_64 (task, &addr, &vmsize, flavor, (vm_region_info_t)&info, &info_count, &object);
	return (kr != KERN_SUCCESS ? 0 : info.protection);
}
예제 #3
0
static int mach_write_at(RIOMach *riom, const void *buff, int len, ut64 addr) {
	task_t task = riom->task;
#if 0
/* get paVM_PROT_EXECUTEge perms */
        kern_return_t err;
	int ret, _basic64[VM_REGION_BASIC_INFO_COUNT_64];
	vm_region_basic_info_64_t basic64 = (vm_region_basic_info_64_t)_basic64;
	mach_msg_type_number_t	infocnt;
const int pagesize = 4096;
vm_offset_t addrbase;
	mach_port_t	objname;
	vm_size_t size = pagesize;

eprintf ("   0x%llx\n", addr);
	infocnt = VM_REGION_BASIC_INFO_COUNT_64;
addrbase = addr;
size = len;
	// intentionally use VM_REGION_BASIC_INFO and get up-converted
	ret = vm_region_64 (task, &addrbase, &size, VM_REGION_BASIC_INFO_64,
					 (vm_region_info_t)basic64, &infocnt, &objname);
eprintf ("+ PERMS (%x) %llx\n", basic64->protection, addr);
	if (ret == -1) {
		eprintf ("Cant get vm region info\n");
	}

#endif
/* get page perms */

        // XXX SHOULD RESTORE PERMS LATER!!!
        if (vm_protect (task, addr, len, 0, VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE) != KERN_SUCCESS)
		//if (mach_vm_protect (task, addr, len, 0, VM_PROT_READ | VM_PROT_WRITE) != KERN_SUCCESS)
			if (vm_protect (task, addr, len, 0, VM_PROT_WRITE) != KERN_SUCCESS)
				eprintf ("cant change page perms to rw at 0x%"PFMT64x" with len= %d\n", addr, len);
        if (vm_write (task, (vm_address_t)addr,
                	(vm_offset_t)buff, (mach_msg_type_number_t)len) != KERN_SUCCESS)
                eprintf ("cant write on memory\n");
	//if (vm_read_overwrite(task, addr, 4, buff, &sz)) { eprintf ("cannot overwrite\n"); }

#if 0
eprintf ("addrbase: %x\n", addrbase);
eprintf ("change prems to %x\n", basic64->protection);
int prot = 0;
if (basic64->protection & 1) prot |= VM_PROT_EXECUTE;
if (basic64->protection & 2) prot |= VM_PROT_WRITE;
if (basic64->protection & 4) prot |= VM_PROT_READ;
printf ("%d vs %d\n", prot, basic64->protection);
int prot = VM_PROT_READ | VM_PROT_EXECUTE;
        if (vm_protect (task, addr, len, 0, prot) != KERN_SUCCESS) { //basic64->protection) != KERN_SUCCESS) {
        	eprintf ("Oops (0x%"PFMT64x") error (%s)\n", addr,
			MACH_ERROR_STRING (err));
                eprintf ("cant change page perms to rx\n");
	}
#endif
	return len;
}
예제 #4
0
static mach_error_t
allocateBranchIslandAux(
		BranchIsland	**island,
		void *originalFunctionAddress,
		bool forward)
{
	assert( island );
	assert( sizeof( BranchIsland ) <= kPageSize );

	vm_map_t task_self = mach_task_self();
	vm_address_t original_address = (vm_address_t) originalFunctionAddress;
	vm_address_t address = original_address;

	for (;;) {
		vm_size_t vmsize = 0;
		memory_object_name_t object = 0;
		kern_return_t kr = 0;
		vm_region_flavor_t flavor = VM_REGION_BASIC_INFO;
		// Find the region the address is in.
#if __WORDSIZE == 32
		vm_region_basic_info_data_t info;
		mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT;
		kr = vm_region(task_self, &address, &vmsize, flavor,
			       (vm_region_info_t)&info, &info_count, &object);
#else
		vm_region_basic_info_data_64_t info;
		mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;
		kr = vm_region_64(task_self, &address, &vmsize, flavor,
				  (vm_region_info_t)&info, &info_count, &object);
#endif
		if (kr != KERN_SUCCESS)
			return kr;
		assert((address & (kPageSize - 1)) == 0);

		// Go to the first page before or after this region
		vm_address_t new_address = forward ? address + vmsize : address - kPageSize;
#if __WORDSIZE == 64
		if(!jump_in_range(original_address, new_address))
			break;
#endif
		address = new_address;

		// Try to allocate this page.
		kr = vm_allocate(task_self, &address, kPageSize, 0);
		if (kr == KERN_SUCCESS) {
			*island = (BranchIsland*) address;
			return err_none;
		}
		if (kr != KERN_NO_SPACE)
			return kr;
	}

	return KERN_NO_SPACE;
}
예제 #5
0
/* On Mac OS X, the only way to get enough information is to become root. Pretty frustrating!*/
int run_get_dynamic_proc_info(pid_t pid, RunProcDyn *rpd)
{
	task_t task;
	kern_return_t error;
	mach_msg_type_number_t count;
	thread_array_t thread_table;
	thread_basic_info_t thi;
	thread_basic_info_data_t thi_data;
	unsigned table_size;
	struct task_basic_info ti;

	error = task_for_pid(mach_task_self(), pid, &task);
	if (error != KERN_SUCCESS) {
		/* fprintf(stderr, "++ Probably you have to set suid or become root.\n"); */
		rpd->rss = rpd->vsize = 0;
		rpd->utime = rpd->stime = 0;
		return 0;
	}
	count = TASK_BASIC_INFO_COUNT;
	error = task_info(task, TASK_BASIC_INFO, (task_info_t)&ti, &count);
	assert(error == KERN_SUCCESS);
	{ /* adapted from ps/tasks.c */
		vm_region_basic_info_data_64_t b_info;
        vm_address_t address = GLOBAL_SHARED_TEXT_SEGMENT;
        vm_size_t size;
        mach_port_t object_name;
        count = VM_REGION_BASIC_INFO_COUNT_64;
        error = vm_region_64(task, &address, &size, VM_REGION_BASIC_INFO,
							 (vm_region_info_t)&b_info, &count, &object_name);
		if (error == KERN_SUCCESS) {
			if (b_info.reserved && size == (SHARED_TEXT_REGION_SIZE) &&
                ti.virtual_size > (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE))
			{
				ti.virtual_size -= (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE);
			}
		}
		rpd->rss = ti.resident_size;
		rpd->vsize = ti.virtual_size;
	}
	{ /* calculate CPU times, adapted from top/libtop.c */
		unsigned i;
		rpd->utime = ti.user_time.seconds + ti.user_time.microseconds * 1e-6;
		rpd->stime = ti.system_time.seconds + ti.system_time.microseconds * 1e-6;
		error = task_threads(task, &thread_table, &table_size);
		assert(error == KERN_SUCCESS);
		thi = &thi_data;
		for (i = 0; i != table_size; ++i) {
			count = THREAD_BASIC_INFO_COUNT;
			error = thread_info(thread_table[i], THREAD_BASIC_INFO, (thread_info_t)thi, &count);
			assert(error == KERN_SUCCESS);
			if ((thi->flags & TH_FLAGS_IDLE) == 0) {
				rpd->utime += thi->user_time.seconds + thi->user_time.microseconds * 1e-6;
				rpd->stime += thi->system_time.seconds + thi->system_time.microseconds * 1e-6;
			}
			if (task != mach_task_self()) {
				error = mach_port_deallocate(mach_task_self(), thread_table[i]);
				assert(error == KERN_SUCCESS);
			}
		}
		error = vm_deallocate(mach_task_self(), (vm_offset_t)thread_table, table_size * sizeof(thread_array_t));
		assert(error == KERN_SUCCESS);
	}
	mach_port_deallocate(mach_task_self(), task);
	return 0;
}
예제 #6
0
파일: proc.c 프로젝트: GeeksXtreme/yara
int yr_process_get_memory(
    pid_t pid,
    YR_MEMORY_BLOCK** first_block)
{
  task_t task;
  kern_return_t kr;

  vm_size_t size = 0;
  vm_address_t address = 0;
  vm_region_basic_info_data_64_t info;
  mach_msg_type_number_t info_count;
  mach_port_t object;

  unsigned char* data;

  YR_MEMORY_BLOCK* new_block;
  YR_MEMORY_BLOCK* current_block = NULL;

  *first_block = NULL;

  if ((kr = task_for_pid(mach_task_self(), pid, &task)) != KERN_SUCCESS)
    return ERROR_COULD_NOT_ATTACH_TO_PROCESS;

  do {

    info_count = VM_REGION_BASIC_INFO_COUNT_64;

    kr = vm_region_64(
        task,
        &address,
        &size,
        VM_REGION_BASIC_INFO,
        (vm_region_info_t) &info,
        &info_count,
        &object);

    if (kr == KERN_SUCCESS)
    {
      data = (unsigned char*) yr_malloc(size);

      if (data == NULL)
        return ERROR_INSUFICIENT_MEMORY;

      if (vm_read_overwrite(
              task,
              address,
              size,
              (vm_address_t)
              data,
              &size) == KERN_SUCCESS)
      {
        new_block = (YR_MEMORY_BLOCK*) yr_malloc(sizeof(YR_MEMORY_BLOCK));

        if (new_block == NULL)
        {
          yr_free(data);
          return ERROR_INSUFICIENT_MEMORY;
        }

        if (*first_block == NULL)
          *first_block = new_block;

        new_block->base = address;
        new_block->size = size;
        new_block->data = data;
        new_block->next = NULL;

        if (current_block != NULL)
          current_block->next = new_block;

        current_block = new_block;
      }
      else
      {
        yr_free(data);
      }

      address += size;
    }


  } while (kr != KERN_INVALID_ADDRESS);

  if (task != MACH_PORT_NULL)
    mach_port_deallocate(mach_task_self(), task);

  return ERROR_SUCCESS;
}
예제 #7
0
/*
 * Return a tuple of RSS and VMS memory usage.
 */
static PyObject*
get_memory_info(PyObject* self, PyObject* args)
{
    long pid;
    int err;
    unsigned int info_count = TASK_BASIC_INFO_COUNT;
    mach_port_t task;
    struct task_basic_info tasks_info;
    vm_region_basic_info_data_64_t  b_info;
    vm_address_t address = GLOBAL_SHARED_TEXT_SEGMENT;
    vm_size_t size;
    mach_port_t object_name;

    // the argument passed should be a process id
    if (! PyArg_ParseTuple(args, "l", &pid)) {
        return NULL;
    }

    /* task_for_pid() requires special privileges
     * "This function can be called only if the process is owned by the
     * procmod group or if the caller is root."
     * - http://developer.apple.com/documentation/MacOSX/Conceptual/universal_binary/universal_binary_tips/chapter_5_section_19.html */
    err = task_for_pid(mach_task_self(), pid, &task);
    if ( err == KERN_SUCCESS) {
        info_count = TASK_BASIC_INFO_COUNT;
        err = task_info(task, TASK_BASIC_INFO, (task_info_t)&tasks_info, &info_count);
        if (err != KERN_SUCCESS) {
                if (err == 4) {
                    // errcode 4 is "invalid argument" (access denied)
                    return AccessDenied();
                }
                // otherwise throw a runtime error with appropriate error code
                return PyErr_Format(PyExc_RuntimeError,
                                    "task_info(TASK_BASIC_INFO) failed");
        }

        /* Issue #73 http://code.google.com/p/psutil/issues/detail?id=73
         * adjust the virtual memory size down to account for
         * shared memory that task_info.virtual_size includes w/every process
         */
        info_count = VM_REGION_BASIC_INFO_COUNT_64;
        err = vm_region_64(task, &address, &size, VM_REGION_BASIC_INFO,
            (vm_region_info_t)&b_info, &info_count, &object_name);
        if (err == KERN_SUCCESS) {
            if (b_info.reserved && size == (SHARED_TEXT_REGION_SIZE) &&
                tasks_info.virtual_size > (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE))
            {
                tasks_info.virtual_size -= (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE);
            }
        }
    }

    else {
        if (! pid_exists(pid) ) {
            return NoSuchProcess();
        }

        // pid exists, so return AccessDenied error since task_for_pid() failed
        return AccessDenied();
    }

    return Py_BuildValue("(ll)", tasks_info.resident_size, tasks_info.virtual_size);
}
void
vma_iterate (vma_iterate_callback_fn callback, void *data)
{
#if defined __linux__ /* || defined __CYGWIN__ */

  struct rofile rof;
  int c;

  /* Open the current process' maps file.  It describes one VMA per line.  */
  if (rof_open (&rof, "/proc/self/maps") < 0)
    return;

  for (;;)
    {
      unsigned long start, end;
      unsigned int flags;

      /* Parse one line.  First start and end.  */
      if (!(rof_scanf_lx (&rof, &start) >= 0
            && rof_getchar (&rof) == '-'
            && rof_scanf_lx (&rof, &end) >= 0))
        break;
      /* Then the flags.  */
      do
        c = rof_getchar (&rof);
      while (c == ' ');
      flags = 0;
      if (c == 'r')
        flags |= VMA_PROT_READ;
      c = rof_getchar (&rof);
      if (c == 'w')
        flags |= VMA_PROT_WRITE;
      c = rof_getchar (&rof);
      if (c == 'x')
        flags |= VMA_PROT_EXECUTE;
      while (c = rof_getchar (&rof), c != -1 && c != '\n')
        ;

      if (callback (data, start, end, flags))
        break;
    }
  rof_close (&rof);

#elif defined __FreeBSD__ || defined __NetBSD__

  struct rofile rof;
  int c;

  /* Open the current process' maps file.  It describes one VMA per line.  */
  if (rof_open (&rof, "/proc/curproc/map") < 0)
    return;

  for (;;)
    {
      unsigned long start, end;
      unsigned int flags;

      /* Parse one line.  First start.  */
      if (!(rof_getchar (&rof) == '0'
            && rof_getchar (&rof) == 'x'
            && rof_scanf_lx (&rof, &start) >= 0))
        break;
      while (c = rof_peekchar (&rof), c == ' ' || c == '\t')
        rof_getchar (&rof);
      /* Then end.  */
      if (!(rof_getchar (&rof) == '0'
            && rof_getchar (&rof) == 'x'
            && rof_scanf_lx (&rof, &end) >= 0))
        break;
      /* Then the flags.  */
      do
        c = rof_getchar (&rof);
      while (c == ' ');
      flags = 0;
      if (c == 'r')
        flags |= VMA_PROT_READ;
      c = rof_getchar (&rof);
      if (c == 'w')
        flags |= VMA_PROT_WRITE;
      c = rof_getchar (&rof);
      if (c == 'x')
        flags |= VMA_PROT_EXECUTE;
      while (c = rof_getchar (&rof), c != -1 && c != '\n')
        ;

      if (callback (data, start, end, flags))
        break;
    }
  rof_close (&rof);

#elif defined __sgi || defined __osf__ /* IRIX, OSF/1 */

  size_t pagesize;
  char fnamebuf[6+10+1];
  char *fname;
  int fd;
  int nmaps;
  size_t memneed;
# if HAVE_MAP_ANONYMOUS
#  define zero_fd -1
#  define map_flags MAP_ANONYMOUS
# else
  int zero_fd;
#  define map_flags 0
# endif
  void *auxmap;
  unsigned long auxmap_start;
  unsigned long auxmap_end;
  prmap_t* maps;
  prmap_t* mp;

  pagesize = getpagesize ();

  /* Construct fname = sprintf (fnamebuf+i, "/proc/%u", getpid ()).  */
  fname = fnamebuf + sizeof (fnamebuf) - 1;
  *fname = '\0';
  {
    unsigned int value = getpid ();
    do
      *--fname = (value % 10) + '0';
    while ((value = value / 10) > 0);
  }
  fname -= 6;
  memcpy (fname, "/proc/", 6);

  fd = open (fname, O_RDONLY);
  if (fd < 0)
    return;

  if (ioctl (fd, PIOCNMAP, &nmaps) < 0)
    goto fail2;

  memneed = (nmaps + 10) * sizeof (prmap_t);
  /* Allocate memneed bytes of memory.
     We cannot use alloca here, because not much stack space is guaranteed.
     We also cannot use malloc here, because a malloc() call may call mmap()
     and thus pre-allocate available memory.
     So use mmap(), and ignore the resulting VMA.  */
  memneed = ((memneed - 1) / pagesize + 1) * pagesize;
# if !HAVE_MAP_ANONYMOUS
  zero_fd = open ("/dev/zero", O_RDONLY, 0644);
  if (zero_fd < 0)
    goto fail2;
# endif
  auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
                          map_flags | MAP_PRIVATE, zero_fd, 0);
# if !HAVE_MAP_ANONYMOUS
  close (zero_fd);
# endif
  if (auxmap == (void *) -1)
    goto fail2;
  auxmap_start = (unsigned long) auxmap;
  auxmap_end = auxmap_start + memneed;
  maps = (prmap_t *) auxmap;

  if (ioctl (fd, PIOCMAP, maps) < 0)
    goto fail1;

  for (mp = maps;;)
    {
      unsigned long start, end;
      unsigned int flags;

      start = (unsigned long) mp->pr_vaddr;
      end = start + mp->pr_size;
      if (start == 0 && end == 0)
        break;
      flags = 0;
      if (mp->pr_mflags & MA_READ)
        flags |= VMA_PROT_READ;
      if (mp->pr_mflags & MA_WRITE)
        flags |= VMA_PROT_WRITE;
      if (mp->pr_mflags & MA_EXEC)
        flags |= VMA_PROT_EXECUTE;
      mp++;
      if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
        {
          /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
             = [start,auxmap_start-1] u [auxmap_end,end-1].  */
          if (start < auxmap_start)
            if (callback (data, start, auxmap_start, flags))
              break;
          if (auxmap_end - 1 < end - 1)
            if (callback (data, auxmap_end, end, flags))
              break;
        }
      else
        {
          if (callback (data, start, end, flags))
            break;
        }
    }
  munmap (auxmap, memneed);
  close (fd);
  return;

 fail1:
  munmap (auxmap, memneed);
 fail2:
  close (fd);
  return;

#elif defined __APPLE__ && defined __MACH__ /* Mac OS X */

  task_t task = mach_task_self ();
  vm_address_t address;
  vm_size_t size;

  for (address = VM_MIN_ADDRESS;; address += size)
    {
      int more;
      mach_port_t object_name;
      unsigned int flags;
      /* In Mac OS X 10.5, the types vm_address_t, vm_offset_t, vm_size_t have
         32 bits in 32-bit processes and 64 bits in 64-bit processes. Whereas
         mach_vm_address_t and mach_vm_size_t are always 64 bits large.
         Mac OS X 10.5 has three vm_region like methods:
           - vm_region. It has arguments that depend on whether the current
             process is 32-bit or 64-bit. When linking dynamically, this
             function exists only in 32-bit processes. Therefore we use it only
             in 32-bit processes.
           - vm_region_64. It has arguments that depend on whether the current
             process is 32-bit or 64-bit. It interprets a flavor
             VM_REGION_BASIC_INFO as VM_REGION_BASIC_INFO_64, which is
             dangerous since 'struct vm_region_basic_info_64' is larger than
             'struct vm_region_basic_info'; therefore let's write
             VM_REGION_BASIC_INFO_64 explicitly.
           - mach_vm_region. It has arguments that are 64-bit always. This
             function is useful when you want to access the VM of a process
             other than the current process.
         In 64-bit processes, we could use vm_region_64 or mach_vm_region.
         I choose vm_region_64 because it uses the same types as vm_region,
         resulting in less conditional code.  */
# if defined __ppc64__ || defined __x86_64__
      struct vm_region_basic_info_64 info;
      mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;

      more = (vm_region_64 (task, &address, &size, VM_REGION_BASIC_INFO_64,
                            (vm_region_info_t)&info, &info_count, &object_name)
              == KERN_SUCCESS);
# else
      struct vm_region_basic_info info;
      mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT;

      more = (vm_region (task, &address, &size, VM_REGION_BASIC_INFO,
                         (vm_region_info_t)&info, &info_count, &object_name)
              == KERN_SUCCESS);
# endif
      if (object_name != MACH_PORT_NULL)
        mach_port_deallocate (mach_task_self (), object_name);
      if (!more)
        break;
      flags = 0;
      if (info.protection & VM_PROT_READ)
        flags |= VMA_PROT_READ;
      if (info.protection & VM_PROT_WRITE)
        flags |= VMA_PROT_WRITE;
      if (info.protection & VM_PROT_EXECUTE)
        flags |= VMA_PROT_EXECUTE;
      if (callback (data, address, address + size, flags))
        break;
    }

#elif (defined _WIN32 || defined __WIN32__) || defined __CYGWIN__
  /* Windows platform.  Use the native Windows API.  */

  MEMORY_BASIC_INFORMATION info;
  unsigned long address = 0;

  while (VirtualQuery ((void*)address, &info, sizeof(info)) == sizeof(info))
    {
      if (info.State != MEM_FREE)
        /* Ignore areas where info.State has the value MEM_RESERVE or,
           equivalently, info.Protect has the undocumented value 0.
           This is needed, so that on Cygwin, areas used by malloc() are
           distinguished from areas reserved for future malloc().  */
        if (info.State != MEM_RESERVE)
          {
            unsigned long start, end;
            unsigned int flags;

            start = (unsigned long)info.BaseAddress;
            end = start + info.RegionSize;
            switch (info.Protect & ~(PAGE_GUARD|PAGE_NOCACHE))
              {
              case PAGE_READONLY:
                flags = VMA_PROT_READ;
                break;
              case PAGE_READWRITE:
              case PAGE_WRITECOPY:
                flags = VMA_PROT_READ | VMA_PROT_WRITE;
                break;
              case PAGE_EXECUTE:
                flags = VMA_PROT_EXECUTE;
                break;
              case PAGE_EXECUTE_READ:
                flags = VMA_PROT_READ | VMA_PROT_EXECUTE;
                break;
              case PAGE_EXECUTE_READWRITE:
              case PAGE_EXECUTE_WRITECOPY:
                flags = VMA_PROT_READ | VMA_PROT_WRITE | VMA_PROT_EXECUTE;
                break;
              case PAGE_NOACCESS:
              default:
                flags = 0;
                break;
              }

            if (callback (data, start, end, flags))
              break;
          }
      address = (unsigned long)info.BaseAddress + info.RegionSize;
    }

#elif defined __BEOS__ || defined __HAIKU__
  /* Use the BeOS specific API.  */

  area_info info;
  int32 cookie;

  cookie = 0;
  while (get_next_area_info (0, &cookie, &info) == B_OK)
    {
      unsigned long start, end;
      unsigned int flags;

      start = (unsigned long) info.address;
      end = start + info.size;
      flags = 0;
      if (info.protection & B_READ_AREA)
        flags |= VMA_PROT_READ | VMA_PROT_EXECUTE;
      if (info.protection & B_WRITE_AREA)
        flags |= VMA_PROT_WRITE;

      if (callback (data, start, end, flags))
        break;
    }

#elif HAVE_MQUERY /* OpenBSD */

  uintptr_t pagesize;
  uintptr_t address;
  int /*bool*/ address_known_mapped;

  pagesize = getpagesize ();
  /* Avoid calling mquery with a NULL first argument, because this argument
     value has a specific meaning.  We know the NULL page is unmapped.  */
  address = pagesize;
  address_known_mapped = 0;
  for (;;)
    {
      /* Test whether the page at address is mapped.  */
      if (address_known_mapped
          || mquery ((void *) address, pagesize, 0, MAP_FIXED, -1, 0)
             == (void *) -1)
        {
          /* The page at address is mapped.
             This is the start of an interval.  */
          uintptr_t start = address;
          uintptr_t end;

          /* Find the end of the interval.  */
          end = (uintptr_t) mquery ((void *) address, pagesize, 0, 0, -1, 0);
          if (end == (uintptr_t) (void *) -1)
            end = 0; /* wrap around */
          address = end;

          /* It's too complicated to find out about the flags.  Just pass 0.  */
          if (callback (data, start, end, 0))
            break;

          if (address < pagesize) /* wrap around? */
            break;
        }
      /* Here we know that the page at address is unmapped.  */
      {
        uintptr_t query_size = pagesize;

        address += pagesize;

        /* Query larger and larger blocks, to get through the unmapped address
           range with few mquery() calls.  */
        for (;;)
          {
            if (2 * query_size > query_size)
              query_size = 2 * query_size;
            if (address + query_size - 1 < query_size) /* wrap around? */
              {
                address_known_mapped = 0;
                break;
              }
            if (mquery ((void *) address, query_size, 0, MAP_FIXED, -1, 0)
                == (void *) -1)
              {
                /* Not all the interval [address .. address + query_size - 1]
                   is unmapped.  */
                address_known_mapped = (query_size == pagesize);
                break;
              }
            /* The interval [address .. address + query_size - 1] is
               unmapped.  */
            address += query_size;
          }
        /* Reduce the query size again, to determine the precise size of the
           unmapped interval that starts at address.  */
        while (query_size > pagesize)
          {
            query_size = query_size / 2;
            if (address + query_size - 1 >= query_size)
              {
                if (mquery ((void *) address, query_size, 0, MAP_FIXED, -1, 0)
                    != (void *) -1)
                  {
                    /* The interval [address .. address + query_size - 1] is
                       unmapped.  */
                    address += query_size;
                    address_known_mapped = 0;
                  }
                else
                  address_known_mapped = (query_size == pagesize);
              }
          }
        /* Here again query_size = pagesize, and
           either address + pagesize - 1 < pagesize, or
           mquery ((void *) address, pagesize, 0, MAP_FIXED, -1, 0) fails.
           So, the unmapped area ends at address.  */
      }
      if (address + pagesize - 1 < pagesize) /* wrap around? */
        break;
    }

#endif
}
예제 #9
0
//------------------------------------------------------------------------------
// Name:
// Desc:
//------------------------------------------------------------------------------
QList<IRegion::pointer> DebuggerCore::memory_regions() const {

#if 0
    static const char * inheritance_strings[] = {
		"SHARE", "COPY", "NONE", "DONATE_COPY",
	};

	static const char * behavior_strings[] = {
		"DEFAULT", "RANDOM", "SEQUENTIAL", "RESQNTL", "WILLNEED", "DONTNEED",
	};
#endif

	QList<IRegion::pointer> regions;
	if(pid_ != 0) {
		task_t the_task;
		kern_return_t kr = task_for_pid(mach_task_self(), pid_, &the_task);
		if(kr != KERN_SUCCESS) {
			qDebug("task_for_pid failed");
            return QList<IRegion::pointer>();
		}

		vm_size_t vmsize;
		vm_address_t address;
		vm_region_basic_info_data_64_t info;
		mach_msg_type_number_t info_count;
		vm_region_flavor_t flavor;
		memory_object_name_t object;

		kr = KERN_SUCCESS;
		address = 0;

		do {
			flavor     = VM_REGION_BASIC_INFO_64;
			info_count = VM_REGION_BASIC_INFO_COUNT_64;
			kr = vm_region_64(the_task, &address, &vmsize, flavor, (vm_region_info_64_t)&info, &info_count, &object);
			if(kr == KERN_SUCCESS) {

				const edb::address_t start               = address;
				const edb::address_t end                 = address + vmsize;
				const edb::address_t base                = address;
				const QString name                       = QString();
				const IRegion::permissions_t permissions =
					((info.protection & VM_PROT_READ)    ? PROT_READ  : 0) |
					((info.protection & VM_PROT_WRITE)   ? PROT_WRITE : 0) |
					((info.protection & VM_PROT_EXECUTE) ? PROT_EXEC  : 0);

				regions.push_back(std::make_shared<PlatformRegion>(start, end, base, name, permissions));

				/*
				printf("%016llx-%016llx %8uK %c%c%c/%c%c%c %11s %6s %10s uwir=%hu sub=%u\n",
				address, (address + vmsize), (vmsize >> 10),
				(info.protection & VM_PROT_READ)        ? 'r' : '-',
				(info.protection & VM_PROT_WRITE)       ? 'w' : '-',
				(info.protection & VM_PROT_EXECUTE)     ? 'x' : '-',
				(info.max_protection & VM_PROT_READ)    ? 'r' : '-',
				(info.max_protection & VM_PROT_WRITE)   ? 'w' : '-',
				(info.max_protection & VM_PROT_EXECUTE) ? 'x' : '-',
				inheritance_strings[info.inheritance],
				(info.shared) ? "shared" : "-",
				behavior_strings[info.behavior],
				info.user_wired_count,
				info.reserved);
				*/

				address += vmsize;
			} else if(kr != KERN_INVALID_ADDRESS) {
				if(the_task != MACH_PORT_NULL) {
					mach_port_deallocate(mach_task_self(), the_task);
				}
                return QList<IRegion::pointer>();
			}
		} while(kr != KERN_INVALID_ADDRESS);

		if(the_task != MACH_PORT_NULL) {
			mach_port_deallocate(mach_task_self(), the_task);
		}
	}

	return regions;
}
예제 #10
0
	mach_error_t
allocateBranchIsland(
		BranchIsland	**island,
		void *originalFunctionAddress)
{
	assert( island );
	assert( sizeof( BranchIsland ) <= kPageSize );

	vm_map_t task_self = mach_task_self();
	vm_address_t original_address = (vm_address_t) originalFunctionAddress;
	static vm_address_t last_allocated = 0;
	vm_address_t address =
		last_allocated ? last_allocated : original_address;

	for (;;) {
		vm_size_t vmsize = 0;
		memory_object_name_t object = 0;
		kern_return_t kr = 0;
		vm_region_flavor_t flavor = VM_REGION_BASIC_INFO;
		// Find the page the address is in.
#if __WORDSIZE == 32
		vm_region_basic_info_data_t info;
		mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT;
		kr = vm_region(task_self, &address, &vmsize, flavor,
			       (vm_region_info_t)&info, &info_count, &object);
#else
		vm_region_basic_info_data_64_t info;
		mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;
		kr = vm_region_64(task_self, &address, &vmsize, flavor,
				  (vm_region_info_t)&info, &info_count, &object);
#endif
		if (kr != KERN_SUCCESS)
			return kr;

		// Don't underflow. This could be made to work, but this is a
		// convenient place to give up.
		assert((address & (kPageSize - 1)) == 0);
		if (address == 0)
			break;

		// Go back one page.
		vm_address_t new_address = address - kPageSize;
#if __WORDSIZE == 64
		if(original_address - new_address - 5 > INT32_MAX)
			break;
#endif
		address = new_address;

		// Try to allocate this page.
		kr = vm_allocate(task_self, &address, kPageSize, 0);
		if (kr == KERN_SUCCESS) {
			*island = (BranchIsland*) address;
			last_allocated = address;
			return err_none;
		}
		if (kr != KERN_NO_SPACE)
			return kr;
	}

	return KERN_NO_SPACE;
}
예제 #11
0
int
vma_iterate (vma_iterate_callback_fn callback, void *data)
{
#if defined __linux__ || defined __ANDROID__ || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ || defined __NetBSD__ || defined __minix /* || defined __CYGWIN__ */

# if defined __FreeBSD__
  /* On FreeBSD with procfs (but not GNU/kFreeBSD, which uses linprocfs), the
     function vma_iterate_proc does not return the virtual memory areas that
     were created by anonymous mmap.  See
     <https://svnweb.freebsd.org/base/head/sys/fs/procfs/procfs_map.c?view=markup>
     So use vma_iterate_proc only as a fallback.  */
  int retval = vma_iterate_bsd (callback, data);
  if (retval == 0)
      return 0;

  return vma_iterate_proc (callback, data);
# else
  /* On the other platforms, try the /proc approach first, and the sysctl()
     as a fallback.  */
  int retval = vma_iterate_proc (callback, data);
  if (retval == 0)
      return 0;

  return vma_iterate_bsd (callback, data);
# endif

#elif defined __sgi || defined __osf__ /* IRIX, OSF/1 */

  size_t pagesize;
  char fnamebuf[6+10+1];
  char *fname;
  int fd;
  int nmaps;
  size_t memneed;
# if HAVE_MAP_ANONYMOUS
#  define zero_fd -1
#  define map_flags MAP_ANONYMOUS
# else
  int zero_fd;
#  define map_flags 0
# endif
  void *auxmap;
  unsigned long auxmap_start;
  unsigned long auxmap_end;
  prmap_t* maps;
  prmap_t* mp;

  pagesize = getpagesize ();

  /* Construct fname = sprintf (fnamebuf+i, "/proc/%u", getpid ()).  */
  fname = fnamebuf + sizeof (fnamebuf) - 1;
  *fname = '\0';
  {
    unsigned int value = getpid ();
    do
      *--fname = (value % 10) + '0';
    while ((value = value / 10) > 0);
  }
  fname -= 6;
  memcpy (fname, "/proc/", 6);

  fd = open (fname, O_RDONLY);
  if (fd < 0)
    return -1;

  if (ioctl (fd, PIOCNMAP, &nmaps) < 0)
    goto fail2;

  memneed = (nmaps + 10) * sizeof (prmap_t);
  /* Allocate memneed bytes of memory.
     We cannot use alloca here, because not much stack space is guaranteed.
     We also cannot use malloc here, because a malloc() call may call mmap()
     and thus pre-allocate available memory.
     So use mmap(), and ignore the resulting VMA.  */
  memneed = ((memneed - 1) / pagesize + 1) * pagesize;
# if !HAVE_MAP_ANONYMOUS
  zero_fd = open ("/dev/zero", O_RDONLY, 0644);
  if (zero_fd < 0)
    goto fail2;
# endif
  auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
                          map_flags | MAP_PRIVATE, zero_fd, 0);
# if !HAVE_MAP_ANONYMOUS
  close (zero_fd);
# endif
  if (auxmap == (void *) -1)
    goto fail2;
  auxmap_start = (unsigned long) auxmap;
  auxmap_end = auxmap_start + memneed;
  maps = (prmap_t *) auxmap;

  if (ioctl (fd, PIOCMAP, maps) < 0)
    goto fail1;

  for (mp = maps;;)
    {
      unsigned long start, end;
      unsigned int flags;

      start = (unsigned long) mp->pr_vaddr;
      end = start + mp->pr_size;
      if (start == 0 && end == 0)
        break;
      flags = 0;
      if (mp->pr_mflags & MA_READ)
        flags |= VMA_PROT_READ;
      if (mp->pr_mflags & MA_WRITE)
        flags |= VMA_PROT_WRITE;
      if (mp->pr_mflags & MA_EXEC)
        flags |= VMA_PROT_EXECUTE;
      mp++;
      if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
        {
          /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
             = [start,auxmap_start-1] u [auxmap_end,end-1].  */
          if (start < auxmap_start)
            if (callback (data, start, auxmap_start, flags))
              break;
          if (auxmap_end - 1 < end - 1)
            if (callback (data, auxmap_end, end, flags))
              break;
        }
      else
        {
          if (callback (data, start, end, flags))
            break;
        }
    }
  munmap (auxmap, memneed);
  close (fd);
  return 0;

 fail1:
  munmap (auxmap, memneed);
 fail2:
  close (fd);
  return -1;

#elif defined __sun /* Solaris */

  /* Note: Solaris <sys/procfs.h> defines a different type prmap_t with
     _STRUCTURED_PROC than without! Here's a table of sizeof(prmap_t):
                                  32-bit   64-bit
         _STRUCTURED_PROC = 0       32       56
         _STRUCTURED_PROC = 1       96      104
     Therefore, if the include files provide the newer API, prmap_t has
     the bigger size, and thus you MUST use the newer API.  And if the
     include files provide the older API, prmap_t has the smaller size,
     and thus you MUST use the older API.  */

# if defined PIOCNMAP && defined PIOCMAP
  /* We must use the older /proc interface.  */

  size_t pagesize;
  char fnamebuf[6+10+1];
  char *fname;
  int fd;
  int nmaps;
  size_t memneed;
#  if HAVE_MAP_ANONYMOUS
#   define zero_fd -1
#   define map_flags MAP_ANONYMOUS
#  else /* Solaris <= 7 */
  int zero_fd;
#   define map_flags 0
#  endif
  void *auxmap;
  unsigned long auxmap_start;
  unsigned long auxmap_end;
  prmap_t* maps;
  prmap_t* mp;

  pagesize = getpagesize ();

  /* Construct fname = sprintf (fnamebuf+i, "/proc/%u", getpid ()).  */
  fname = fnamebuf + sizeof (fnamebuf) - 1;
  *fname = '\0';
  {
    unsigned int value = getpid ();
    do
      *--fname = (value % 10) + '0';
    while ((value = value / 10) > 0);
  }
  fname -= 6;
  memcpy (fname, "/proc/", 6);

  fd = open (fname, O_RDONLY);
  if (fd < 0)
    return -1;

  if (ioctl (fd, PIOCNMAP, &nmaps) < 0)
    goto fail2;

  memneed = (nmaps + 10) * sizeof (prmap_t);
  /* Allocate memneed bytes of memory.
     We cannot use alloca here, because not much stack space is guaranteed.
     We also cannot use malloc here, because a malloc() call may call mmap()
     and thus pre-allocate available memory.
     So use mmap(), and ignore the resulting VMA.  */
  memneed = ((memneed - 1) / pagesize + 1) * pagesize;
#  if !HAVE_MAP_ANONYMOUS
  zero_fd = open ("/dev/zero", O_RDONLY, 0644);
  if (zero_fd < 0)
    goto fail2;
#  endif
  auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
                          map_flags | MAP_PRIVATE, zero_fd, 0);
#  if !HAVE_MAP_ANONYMOUS
  close (zero_fd);
#  endif
  if (auxmap == (void *) -1)
    goto fail2;
  auxmap_start = (unsigned long) auxmap;
  auxmap_end = auxmap_start + memneed;
  maps = (prmap_t *) auxmap;

  if (ioctl (fd, PIOCMAP, maps) < 0)
    goto fail1;

  for (mp = maps;;)
    {
      unsigned long start, end;
      unsigned int flags;

      start = (unsigned long) mp->pr_vaddr;
      end = start + mp->pr_size;
      if (start == 0 && end == 0)
        break;
      flags = 0;
      if (mp->pr_mflags & MA_READ)
        flags |= VMA_PROT_READ;
      if (mp->pr_mflags & MA_WRITE)
        flags |= VMA_PROT_WRITE;
      if (mp->pr_mflags & MA_EXEC)
        flags |= VMA_PROT_EXECUTE;
      mp++;
      if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
        {
          /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
             = [start,auxmap_start-1] u [auxmap_end,end-1].  */
          if (start < auxmap_start)
            if (callback (data, start, auxmap_start, flags))
              break;
          if (auxmap_end - 1 < end - 1)
            if (callback (data, auxmap_end, end, flags))
              break;
        }
      else
        {
          if (callback (data, start, end, flags))
            break;
        }
    }
  munmap (auxmap, memneed);
  close (fd);
  return 0;

 fail1:
  munmap (auxmap, memneed);
 fail2:
  close (fd);
  return -1;

# else
  /* We must use the newer /proc interface.
     Documentation:
     https://docs.oracle.com/cd/E23824_01/html/821-1473/proc-4.html
     The contents of /proc/<pid>/map consists of records of type
     prmap_t.  These are different in 32-bit and 64-bit processes,
     but here we are fortunately accessing only the current process.  */

  size_t pagesize;
  char fnamebuf[6+10+4+1];
  char *fname;
  int fd;
  int nmaps;
  size_t memneed;
#  if HAVE_MAP_ANONYMOUS
#   define zero_fd -1
#   define map_flags MAP_ANONYMOUS
#  else /* Solaris <= 7 */
  int zero_fd;
#   define map_flags 0
#  endif
  void *auxmap;
  unsigned long auxmap_start;
  unsigned long auxmap_end;
  prmap_t* maps;
  prmap_t* maps_end;
  prmap_t* mp;

  pagesize = getpagesize ();

  /* Construct fname = sprintf (fnamebuf+i, "/proc/%u/map", getpid ()).  */
  fname = fnamebuf + sizeof (fnamebuf) - 1 - 4;
  memcpy (fname, "/map", 4 + 1);
  {
    unsigned int value = getpid ();
    do
      *--fname = (value % 10) + '0';
    while ((value = value / 10) > 0);
  }
  fname -= 6;
  memcpy (fname, "/proc/", 6);

  fd = open (fname, O_RDONLY);
  if (fd < 0)
    return -1;

  {
    struct stat statbuf;
    if (fstat (fd, &statbuf) < 0)
      goto fail2;
    nmaps = statbuf.st_size / sizeof (prmap_t);
  }

  memneed = (nmaps + 10) * sizeof (prmap_t);
  /* Allocate memneed bytes of memory.
     We cannot use alloca here, because not much stack space is guaranteed.
     We also cannot use malloc here, because a malloc() call may call mmap()
     and thus pre-allocate available memory.
     So use mmap(), and ignore the resulting VMA.  */
  memneed = ((memneed - 1) / pagesize + 1) * pagesize;
#  if !HAVE_MAP_ANONYMOUS
  zero_fd = open ("/dev/zero", O_RDONLY, 0644);
  if (zero_fd < 0)
    goto fail2;
#  endif
  auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
                          map_flags | MAP_PRIVATE, zero_fd, 0);
#  if !HAVE_MAP_ANONYMOUS
  close (zero_fd);
#  endif
  if (auxmap == (void *) -1)
    goto fail2;
  auxmap_start = (unsigned long) auxmap;
  auxmap_end = auxmap_start + memneed;
  maps = (prmap_t *) auxmap;

  /* Read up to memneed bytes from fd into maps.  */
  {
    size_t remaining = memneed;
    size_t total_read = 0;
    char *ptr = (char *) maps;

    do
      {
        size_t nread = read (fd, ptr, remaining);
        if (nread == (size_t)-1)
          {
            if (errno == EINTR)
              continue;
            goto fail1;
          }
        if (nread == 0)
          /* EOF */
          break;
        total_read += nread;
        ptr += nread;
        remaining -= nread;
      }
    while (remaining > 0);

    nmaps = (memneed - remaining) / sizeof (prmap_t);
    maps_end = maps + nmaps;
  }

  for (mp = maps; mp < maps_end; mp++)
    {
      unsigned long start, end;
      unsigned int flags;

      start = (unsigned long) mp->pr_vaddr;
      end = start + mp->pr_size;
      flags = 0;
      if (mp->pr_mflags & MA_READ)
        flags |= VMA_PROT_READ;
      if (mp->pr_mflags & MA_WRITE)
        flags |= VMA_PROT_WRITE;
      if (mp->pr_mflags & MA_EXEC)
        flags |= VMA_PROT_EXECUTE;
      if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
        {
          /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
             = [start,auxmap_start-1] u [auxmap_end,end-1].  */
          if (start < auxmap_start)
            if (callback (data, start, auxmap_start, flags))
              break;
          if (auxmap_end - 1 < end - 1)
            if (callback (data, auxmap_end, end, flags))
              break;
        }
      else
        {
          if (callback (data, start, end, flags))
            break;
        }
    }
  munmap (auxmap, memneed);
  close (fd);
  return 0;

 fail1:
  munmap (auxmap, memneed);
 fail2:
  close (fd);
  return -1;

# endif

#elif HAVE_PSTAT_GETPROCVM /* HP-UX */

  unsigned long pagesize = getpagesize ();
  int i;

  for (i = 0; ; i++)
    {
      struct pst_vm_status info;
      int ret = pstat_getprocvm (&info, sizeof (info), 0, i);
      if (ret < 0)
        return -1;
      if (ret == 0)
        break;
      {
        unsigned long start = info.pst_vaddr;
        unsigned long end = start + info.pst_length * pagesize;
        unsigned int flags = 0;
        if (info.pst_permission & PS_PROT_READ)
          flags |= VMA_PROT_READ;
        if (info.pst_permission & PS_PROT_WRITE)
          flags |= VMA_PROT_WRITE;
        if (info.pst_permission & PS_PROT_EXECUTE)
          flags |= VMA_PROT_EXECUTE;

        if (callback (data, start, end, flags))
          break;
      }
    }

#elif defined __APPLE__ && defined __MACH__ /* Mac OS X */

  task_t task = mach_task_self ();
  vm_address_t address;
  vm_size_t size;

  for (address = VM_MIN_ADDRESS;; address += size)
    {
      int more;
      mach_port_t object_name;
      unsigned int flags;
      /* In Mac OS X 10.5, the types vm_address_t, vm_offset_t, vm_size_t have
         32 bits in 32-bit processes and 64 bits in 64-bit processes. Whereas
         mach_vm_address_t and mach_vm_size_t are always 64 bits large.
         Mac OS X 10.5 has three vm_region like methods:
           - vm_region. It has arguments that depend on whether the current
             process is 32-bit or 64-bit. When linking dynamically, this
             function exists only in 32-bit processes. Therefore we use it only
             in 32-bit processes.
           - vm_region_64. It has arguments that depend on whether the current
             process is 32-bit or 64-bit. It interprets a flavor
             VM_REGION_BASIC_INFO as VM_REGION_BASIC_INFO_64, which is
             dangerous since 'struct vm_region_basic_info_64' is larger than
             'struct vm_region_basic_info'; therefore let's write
             VM_REGION_BASIC_INFO_64 explicitly.
           - mach_vm_region. It has arguments that are 64-bit always. This
             function is useful when you want to access the VM of a process
             other than the current process.
         In 64-bit processes, we could use vm_region_64 or mach_vm_region.
         I choose vm_region_64 because it uses the same types as vm_region,
         resulting in less conditional code.  */
# if defined __ppc64__ || defined __x86_64__
      struct vm_region_basic_info_64 info;
      mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;

      more = (vm_region_64 (task, &address, &size, VM_REGION_BASIC_INFO_64,
                            (vm_region_info_t)&info, &info_count, &object_name)
              == KERN_SUCCESS);
# else
      struct vm_region_basic_info info;
      mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT;

      more = (vm_region (task, &address, &size, VM_REGION_BASIC_INFO,
                         (vm_region_info_t)&info, &info_count, &object_name)
              == KERN_SUCCESS);
# endif
      if (object_name != MACH_PORT_NULL)
        mach_port_deallocate (mach_task_self (), object_name);
      if (!more)
        break;
      flags = 0;
      if (info.protection & VM_PROT_READ)
        flags |= VMA_PROT_READ;
      if (info.protection & VM_PROT_WRITE)
        flags |= VMA_PROT_WRITE;
      if (info.protection & VM_PROT_EXECUTE)
        flags |= VMA_PROT_EXECUTE;
      if (callback (data, address, address + size, flags))
        break;
    }
  return 0;

#elif defined __GNU__ /* GNU/Hurd */

  /* The Hurd has a /proc/self/maps that looks like the Linux one, but it
     lacks the VMAs created through anonymous mmap.  Therefore use the Mach
     API.
     Documentation:
     https://www.gnu.org/software/hurd/gnumach-doc/Memory-Attributes.html */

  task_t task = mach_task_self ();
  vm_address_t address;
  vm_size_t size;

  for (address = 0;; address += size)
    {
      vm_prot_t protection;
      vm_prot_t max_protection;
      vm_inherit_t inheritance;
      boolean_t shared;
      memory_object_name_t object_name;
      vm_offset_t offset;
      unsigned int flags;

      if (!(vm_region (task, &address, &size, &protection, &max_protection,
                         &inheritance, &shared, &object_name, &offset)
            == KERN_SUCCESS))
        break;
      mach_port_deallocate (task, object_name);
      flags = 0;
      if (protection & VM_PROT_READ)
        flags |= VMA_PROT_READ;
      if (protection & VM_PROT_WRITE)
        flags |= VMA_PROT_WRITE;
      if (protection & VM_PROT_EXECUTE)
        flags |= VMA_PROT_EXECUTE;
      if (callback (data, address, address + size, flags))
        break;
    }
  return 0;

#elif defined _WIN32 || defined __CYGWIN__
  /* Windows platform.  Use the native Windows API.  */

  MEMORY_BASIC_INFORMATION info;
  uintptr_t address = 0;

  while (VirtualQuery ((void*)address, &info, sizeof(info)) == sizeof(info))
    {
      if (info.State != MEM_FREE)
        /* Ignore areas where info.State has the value MEM_RESERVE or,
           equivalently, info.Protect has the undocumented value 0.
           This is needed, so that on Cygwin, areas used by malloc() are
           distinguished from areas reserved for future malloc().  */
        if (info.State != MEM_RESERVE)
          {
            uintptr_t start, end;
            unsigned int flags;

            start = (uintptr_t)info.BaseAddress;
            end = start + info.RegionSize;
            switch (info.Protect & ~(PAGE_GUARD|PAGE_NOCACHE))
              {
              case PAGE_READONLY:
                flags = VMA_PROT_READ;
                break;
              case PAGE_READWRITE:
              case PAGE_WRITECOPY:
                flags = VMA_PROT_READ | VMA_PROT_WRITE;
                break;
              case PAGE_EXECUTE:
                flags = VMA_PROT_EXECUTE;
                break;
              case PAGE_EXECUTE_READ:
                flags = VMA_PROT_READ | VMA_PROT_EXECUTE;
                break;
              case PAGE_EXECUTE_READWRITE:
              case PAGE_EXECUTE_WRITECOPY:
                flags = VMA_PROT_READ | VMA_PROT_WRITE | VMA_PROT_EXECUTE;
                break;
              case PAGE_NOACCESS:
              default:
                flags = 0;
                break;
              }

            if (callback (data, start, end, flags))
              break;
          }
      address = (uintptr_t)info.BaseAddress + info.RegionSize;
    }
  return 0;

#elif defined __BEOS__ || defined __HAIKU__
  /* Use the BeOS specific API.  */

  area_info info;
  int32 cookie;

  cookie = 0;
  while (get_next_area_info (0, &cookie, &info) == B_OK)
    {
      unsigned long start, end;
      unsigned int flags;

      start = (unsigned long) info.address;
      end = start + info.size;
      flags = 0;
      if (info.protection & B_READ_AREA)
        flags |= VMA_PROT_READ | VMA_PROT_EXECUTE;
      if (info.protection & B_WRITE_AREA)
        flags |= VMA_PROT_WRITE;

      if (callback (data, start, end, flags))
        break;
    }
  return 0;

#elif HAVE_MQUERY /* OpenBSD */

# if defined __OpenBSD__
  /* Try sysctl() first.  It is more efficient than the mquery() loop below
     and also provides the flags.  */
  {
    int retval = vma_iterate_bsd (callback, data);
    if (retval == 0)
      return 0;
  }
# endif

  {
    uintptr_t pagesize;
    uintptr_t address;
    int /*bool*/ address_known_mapped;

    pagesize = getpagesize ();
    /* Avoid calling mquery with a NULL first argument, because this argument
       value has a specific meaning.  We know the NULL page is unmapped.  */
    address = pagesize;
    address_known_mapped = 0;
    for (;;)
      {
        /* Test whether the page at address is mapped.  */
        if (address_known_mapped
            || mquery ((void *) address, pagesize, 0, MAP_FIXED, -1, 0)
               == (void *) -1)
          {
            /* The page at address is mapped.
               This is the start of an interval.  */
            uintptr_t start = address;
            uintptr_t end;

            /* Find the end of the interval.  */
            end = (uintptr_t) mquery ((void *) address, pagesize, 0, 0, -1, 0);
            if (end == (uintptr_t) (void *) -1)
              end = 0; /* wrap around */
            address = end;

            /* It's too complicated to find out about the flags.
               Just pass 0.  */
            if (callback (data, start, end, 0))
              break;

            if (address < pagesize) /* wrap around? */
              break;
          }
        /* Here we know that the page at address is unmapped.  */
        {
          uintptr_t query_size = pagesize;

          address += pagesize;

          /* Query larger and larger blocks, to get through the unmapped address
             range with few mquery() calls.  */
          for (;;)
            {
              if (2 * query_size > query_size)
                query_size = 2 * query_size;
              if (address + query_size - 1 < query_size) /* wrap around? */
                {
                  address_known_mapped = 0;
                  break;
                }
              if (mquery ((void *) address, query_size, 0, MAP_FIXED, -1, 0)
                  == (void *) -1)
                {
                  /* Not all the interval [address .. address + query_size - 1]
                     is unmapped.  */
                  address_known_mapped = (query_size == pagesize);
                  break;
                }
              /* The interval [address .. address + query_size - 1] is
                 unmapped.  */
              address += query_size;
            }
          /* Reduce the query size again, to determine the precise size of the
             unmapped interval that starts at address.  */
          while (query_size > pagesize)
            {
              query_size = query_size / 2;
              if (address + query_size - 1 >= query_size)
                {
                  if (mquery ((void *) address, query_size, 0, MAP_FIXED, -1, 0)
                      != (void *) -1)
                    {
                      /* The interval [address .. address + query_size - 1] is
                         unmapped.  */
                      address += query_size;
                      address_known_mapped = 0;
                    }
                  else
                    address_known_mapped = (query_size == pagesize);
                }
            }
          /* Here again query_size = pagesize, and
             either address + pagesize - 1 < pagesize, or
             mquery ((void *) address, pagesize, 0, MAP_FIXED, -1, 0) fails.
             So, the unmapped area ends at address.  */
        }
        if (address + pagesize - 1 < pagesize) /* wrap around? */
          break;
      }
    return 0;
  }

#else

  /* Not implemented.  */
  return -1;

#endif
}
예제 #12
0
static int
set_task(struct psi_process *proci, struct kinfo_proc *p)
{
    task_port_t task;
    unsigned int info_count;
    struct task_basic_info tasks_info;
    thread_array_t thread_list;
    unsigned int thread_count;


    if (task_for_pid(mach_task_self(),
                     p->kp_proc.p_pid, &task) != KERN_SUCCESS) {
        proci->pcpu_status     = PSI_STATUS_PRIVS;
        proci->utime_status    = PSI_STATUS_PRIVS;
        proci->stime_status    = PSI_STATUS_PRIVS;
        proci->nthreads_status = PSI_STATUS_PRIVS;
        proci->rss_status      = PSI_STATUS_PRIVS;
        proci->vsz_status      = PSI_STATUS_PRIVS;
        return 0;
    }

    if (task_threads(task, &thread_list, &thread_count) == KERN_SUCCESS) {
        int i;
        struct timespec utime = { 0, 0 };
        struct timespec stime = { 0, 0 };
        int t_cpu = 0;
        int failed = 0;

        proci->nthreads = thread_count;
        proci->nthreads_status = PSI_STATUS_OK;

        for (i = 0; i < thread_count; ++i) {
            struct thread_basic_info t_info;
            unsigned int             icount = THREAD_BASIC_INFO_COUNT;

            if (thread_info(thread_list[i], THREAD_BASIC_INFO,
                            (thread_info_t)&t_info, &icount) == KERN_SUCCESS) {
                utime.tv_sec  += t_info.user_time.seconds;
                utime.tv_nsec += t_info.user_time.microseconds * 1000;
                stime.tv_sec  += t_info.system_time.seconds;
                stime.tv_nsec += t_info.system_time.microseconds * 1000;
                t_cpu         += t_info.cpu_usage;
            } else {
                failed = 1;
            }
        }

        if (failed) {
            proci->pcpu_status  = PSI_STATUS_PRIVS;
            proci->utime_status = PSI_STATUS_PRIVS;
            proci->stime_status = PSI_STATUS_PRIVS;
        } else {
            proci->pcpu = 100.0 * (double)(t_cpu) / TH_USAGE_SCALE;
            proci->pcpu_status = PSI_STATUS_OK;

            proci->utime = utime;
            proci->utime_status = PSI_STATUS_OK;

            proci->stime = stime;
            proci->stime_status = PSI_STATUS_OK;
        }
    } else {
        proci->pcpu_status     = PSI_STATUS_PRIVS;
        proci->utime_status    = PSI_STATUS_PRIVS;
        proci->stime_status    = PSI_STATUS_PRIVS;
        proci->nthreads_status = PSI_STATUS_PRIVS;
    }
    vm_deallocate(mach_task_self(),
        (vm_address_t)thread_list, sizeof(thread_array_t)*(thread_count));

    info_count = TASK_BASIC_INFO_COUNT;
    if (task_info(task, TASK_BASIC_INFO,
                  (task_info_t)&tasks_info, &info_count) == KERN_SUCCESS) {
        vm_region_basic_info_data_64_t  b_info;
        vm_address_t                    address = GLOBAL_SHARED_TEXT_SEGMENT;
        vm_size_t                       size;
        mach_port_t                     object_name;

        /*
         * try to determine if this task has the split libraries mapped in... if
         * so, adjust its virtual size down by the 2 segments that are used for
         * split libraries
         */
        info_count = VM_REGION_BASIC_INFO_COUNT_64;
        if (vm_region_64(task, &address, &size, VM_REGION_BASIC_INFO,
                        (vm_region_info_t)&b_info, &info_count,
                        &object_name) == KERN_SUCCESS) {
            if (b_info.reserved && size == (SHARED_TEXT_REGION_SIZE) &&
                    tasks_info.virtual_size >
                    (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE)) {
                tasks_info.virtual_size -=
                    (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE);
            }
        }

        proci->rss        = tasks_info.resident_size;
        proci->rss_status = PSI_STATUS_OK;
        proci->vsz        = tasks_info.virtual_size;
        proci->vsz_status = PSI_STATUS_OK;
    } else {
        proci->rss_status = PSI_STATUS_PRIVS;
        proci->vsz_status = PSI_STATUS_PRIVS;
    }

    return 0;
}