Ejemplo n.º 1
0
static mach_error_t
allocateBranchIslandAux(
		BranchIsland	**island,
		void *originalFunctionAddress,
		bool forward)
{
	assert( island );
	assert( sizeof( BranchIsland ) <= kPageSize );

	vm_map_t task_self = mach_task_self();
	vm_address_t original_address = (vm_address_t) originalFunctionAddress;
	vm_address_t address = original_address;

	for (;;) {
		vm_size_t vmsize = 0;
		memory_object_name_t object = 0;
		kern_return_t kr = 0;
		vm_region_flavor_t flavor = VM_REGION_BASIC_INFO;
		// Find the region the address is in.
#if __WORDSIZE == 32
		vm_region_basic_info_data_t info;
		mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT;
		kr = vm_region(task_self, &address, &vmsize, flavor,
			       (vm_region_info_t)&info, &info_count, &object);
#else
		vm_region_basic_info_data_64_t info;
		mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;
		kr = vm_region_64(task_self, &address, &vmsize, flavor,
				  (vm_region_info_t)&info, &info_count, &object);
#endif
		if (kr != KERN_SUCCESS)
			return kr;
		assert((address & (kPageSize - 1)) == 0);

		// Go to the first page before or after this region
		vm_address_t new_address = forward ? address + vmsize : address - kPageSize;
#if __WORDSIZE == 64
		if(!jump_in_range(original_address, new_address))
			break;
#endif
		address = new_address;

		// Try to allocate this page.
		kr = vm_allocate(task_self, &address, kPageSize, 0);
		if (kr == KERN_SUCCESS) {
			*island = (BranchIsland*) address;
			return err_none;
		}
		if (kr != KERN_NO_SPACE)
			return kr;
	}

	return KERN_NO_SPACE;
}
Ejemplo n.º 2
0
/* Wire down all memory currently allocated at START for LEN bytes;
   host_priv is the privileged host port. */
static void
wire_segment_internal (vm_address_t start,
		       vm_size_t len,
		       host_priv_t host_priv)
{
  vm_address_t addr;
  vm_size_t size;
  vm_prot_t protection;
  vm_prot_t max_protection;
  vm_inherit_t inheritance;
  boolean_t shared;
  mach_port_t object_name;
  vm_offset_t offset;
  error_t err;
  volatile char *poke;

  do
    {
      addr = start;
      err = vm_region (mach_task_self (), &addr, &size, &protection,
		       &max_protection, &inheritance, &shared, &object_name,
		       &offset);
      if (err)
	return;

      /* The current region begins at ADDR and is SIZE long.  If it
      	 extends beyond the LEN, prune it. */
      if (addr + size > start + len)
	size = len - (addr - start);

      /* Set protection to allow all access possible */
      vm_protect (mach_task_self (), addr, size, 0, max_protection);

      /* Generate write faults */
      for (poke = (char *) addr;
	   (vm_address_t) poke < addr + size;
	   poke += vm_page_size)
	*poke = *poke;

      /* Wire pages */
      vm_wire (host_priv, mach_task_self (), addr, size, max_protection);

      /* Set protection back to what it was */
      vm_protect (mach_task_self (), addr, size, 0, protection);


      mach_port_deallocate (mach_task_self (), object_name);

      len -= (addr - start) + size;
      start = addr + size;
    }
  while (len);
}
Ejemplo n.º 3
0
 kern_return_t memmgr::getMemoryProtection(void *addr, vm_prot_t &prot)
 {
     vm_region_basic_info_data_t info;
     vm_size_t vmsize;
     vm_address_t address = (vm_address_t)addr;
     mach_msg_type_number_t infoCount = VM_REGION_BASIC_INFO_COUNT;
     memory_object_name_t object;
     kern_return_t status = vm_region(mach_task_self(), &address, &vmsize,
                                      VM_REGION_BASIC_INFO, (vm_region_info_t)&info, &infoCount,
                                      &object);
     prot = info.protection;
     return status;
 }
Ejemplo n.º 4
0
static PyObject *MachTask_get_mmaps(PyObject *self, PyObject *args) {
    kern_return_t err;
    vm_address_t address = 0;
    unsigned int mapsize = 0;
    unsigned int nextaddr = 0;
    unsigned int prot = 0;

    PyObject *memlist = NULL;
    PyObject *tup = NULL;
    vm_region_basic_info_data_64_t info;
    mach_port_t name; //FIXME leak?
    unsigned int count = VM_REGION_BASIC_INFO_COUNT_64;

    memlist = PyList_New(0);

    do {
        address = nextaddr;
        err = vm_region(((MachPort*)self)->port,
                &address, &mapsize, VM_REGION_BASIC_INFO_64, 
                (vm_region_info_t)&info, &count, &name);
        if (err != KERN_SUCCESS) {
            address += 4096;
            continue;
        }

        prot = 0;
        if (info.protection & VM_PROT_READ)
            prot |= 4;
        if (info.protection & VM_PROT_WRITE)
            prot |= 2;
        if (info.protection & VM_PROT_EXECUTE)
            prot |= 1;
        if (info.shared != 0)
            prot |= 8;

        if (prot != 0) {
            tup = PyTuple_New(4);
            PyTuple_SetItem(tup, 0, PyLong_FromUnsignedLong(address));
            PyTuple_SetItem(tup, 1, PyLong_FromUnsignedLong(mapsize));
            PyTuple_SetItem(tup, 2, PyLong_FromUnsignedLong(prot));
            PyTuple_SetItem(tup, 3, PyString_FromString("")); //FIXME mmap names

            PyList_Append(memlist, tup);
        }

        nextaddr = address + mapsize;
    } while (nextaddr > address);

    return memlist;
}
Ejemplo n.º 5
0
static int
get_data_region(
		vm_address_t *address,
		vm_size_t *size
		)
{
	region_t region;
	kern_return_t ret;
	struct section *sect;

	sect = (struct section *) getsectbyname(SEG_DATA, SECT_DATA);
	region.address = 0;
	*address = 0;
	for (;;) {
		ret = vm_region(task_self(), 
				&region.address, 
				&region.size, 
				&region.protection, 
				&region.max_protection, 
				&region.inheritance,
				&region.shared, 
				&region.object_name, 
				&region.offset);
		if (ret != KERN_SUCCESS || region.address >= VM_HIGHDATA) {
			break;
		}
		if (*address != 0) {
			if (region.address > *address + *size) {
				if (!filldatagap(*address, size, 
						 region.address)) {
					return (0);
				}
			} 
			*size += region.size;
		} else {
			if (region.address == sect->addr) {
				*address = region.address;
				*size = region.size;
			} 
		}
		region.address += region.size;
	}
	return (1);
}
Ejemplo n.º 6
0
/* Returns the starting address and the protection. Pass in mach_task_self() and the starting address. */
static bool GetRegionInfo( mach_port_t self, const void *address, vm_address_t &startOut, vm_prot_t &protectionOut )
{
	struct vm_region_basic_info_64 info;
	mach_msg_type_number_t infoCnt = VM_REGION_BASIC_INFO_COUNT_64;
	mach_port_t unused;
	vm_size_t size = 0;
	vm_address_t start = vm_address_t( address );
	kern_return_t ret = vm_region( self, &start, &size, VM_REGION_BASIC_INFO_64,
				       (vm_region_info_t)&info, &infoCnt, &unused );
	
	if( ret != KERN_SUCCESS ||
		start >= (vm_address_t)address ||
		(vm_address_t)address >= start + size )
	{
		return false;
	}
	startOut = start;
	protectionOut = info.protection;
	return true;
}
Ejemplo n.º 7
0
static void
print_regions (void)
{
  task_t target_task = mach_task_self ();
  vm_address_t address = (vm_address_t) 0;
  vm_size_t size;
  struct vm_region_basic_info info;
  mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT;
  mach_port_t object_name;

  printf ("   address     size prot maxp\n");

  while (vm_region (target_task, &address, &size, VM_REGION_BASIC_INFO,
		    (vm_region_info_t) &info, &info_count, &object_name)
	 == KERN_SUCCESS && info_count == VM_REGION_BASIC_INFO_COUNT)
    {
      print_region (address, size, info.protection, info.max_protection);

      if (object_name != MACH_PORT_NULL)
	mach_port_deallocate (target_task, object_name);

      address += size;
    }
}
Ejemplo n.º 8
0
int
sigsegv_get_vma (unsigned long req_address, struct vma_struct *vma)
{
  unsigned long prev_address = 0, prev_size = 0;
  unsigned long join_address = 0, join_size = 0;
  int more = 1;
  vm_address_t address;
  vm_size_t size;
  mach_port_t object_name;
#ifdef VM_REGION_BASIC_INFO
  task_t task = mach_task_self ();
  struct vm_region_basic_info info;
  mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT;
#else
  task_t task = task_self ();
  vm_prot_t protection, max_protection;
  vm_inherit_t inheritance;
  boolean_t shared;
  vm_offset_t offset;
#endif

  for (address = VM_MIN_ADDRESS; more; address += size)
    {
#ifdef VM_REGION_BASIC_INFO
      more = (vm_region (task, &address, &size, VM_REGION_BASIC_INFO,
                         (vm_region_info_t)&info, &info_count, &object_name)
              == KERN_SUCCESS);
#else
      more = (vm_region (task, &address, &size, &protection, &max_protection,
                         &inheritance, &shared, &object_name, &offset)
              == KERN_SUCCESS);
#endif
      if (!more)
        {
          address = join_address + join_size;
          size = 0;
        }

      if ((unsigned long) address == join_address + join_size)
        join_size += size;
      else
        {
          prev_address = join_address;
          prev_size = join_size;
          join_address = (unsigned long) address;
          join_size = size;
        }

#ifdef VM_REGION_BASIC_INFO
      if (object_name != MACH_PORT_NULL)
        mach_port_deallocate (mach_task_self (), object_name);
      info_count = VM_REGION_BASIC_INFO_COUNT;
#endif

#if STACK_DIRECTION < 0
      if (join_address <= req_address && join_address + join_size > req_address)
        {
          vma->start = join_address;
          vma->end = join_address + join_size;
          vma->prev_end = prev_address + prev_size;
          vma->is_near_this = simple_is_near_this;
          return 0;
        }
#else
      if (prev_address <= req_address && prev_address + prev_size > req_address)
        {
          vma->start = prev_address;
          vma->end = prev_address + prev_size;
          vma->next_start = join_address;
          vma->is_near_this = simple_is_near_this;
          return 0;
        }
#endif
    }

#if STACK_DIRECTION > 0
  if (join_address <= req_address && join_address + size > req_address)
    {
      vma->start = prev_address;
      vma->end = prev_address + prev_size;
      vma->next_start = ~0UL;
      vma->is_near_this = simple_is_near_this;
      return 0;
    }
#endif

  return -1;
}
Ejemplo n.º 9
0
/* Build the list of regions that need to be dumped.  Regions with
   addresses above VM_DATA_TOP are omitted.  Adjacent regions with
   identical protection are merged.  Note that non-writable regions
   cannot be omitted because they some regions created at run time are
   read-only.  */
static void
build_region_list (void)
{
  task_t target_task = mach_task_self ();
  vm_address_t address = (vm_address_t) 0;
  vm_size_t size;
  struct vm_region_basic_info info;
  mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT;
  mach_port_t object_name;
  struct region_t *r;

#if VERBOSE
  printf ("--- List of All Regions ---\n");
  printf ("   address     size prot maxp\n");
#endif

  while (vm_region (target_task, &address, &size, VM_REGION_BASIC_INFO,
		    (vm_region_info_t) &info, &info_count, &object_name)
	 == KERN_SUCCESS && info_count == VM_REGION_BASIC_INFO_COUNT)
    {
      /* Done when we reach addresses of shared libraries, which are
	 loaded in high memory.  */
      if (address >= VM_DATA_TOP)
	break;

#if VERBOSE
      print_region (address, size, info.protection, info.max_protection);
#endif

      /* If a region immediately follows the previous one (the one
	 most recently added to the list) and has identical
	 protection, merge it with the latter.  Otherwise create a
	 new list element for it.  */
      if (region_list_tail
	  && info.protection == region_list_tail->protection
	  && info.max_protection == region_list_tail->max_protection
	  && region_list_tail->address + region_list_tail->size == address)
	{
	  region_list_tail->size += size;
	}
      else
	{
	  r = malloc (sizeof *r);

	  if (!r)
	    unexec_error ("cannot allocate region structure");

	  r->address = address;
	  r->size = size;
	  r->protection = info.protection;
	  r->max_protection = info.max_protection;

	  r->next = 0;
	  if (region_list_head == 0)
	    {
	      region_list_head = r;
	      region_list_tail = r;
	    }
	  else
	    {
	      region_list_tail->next = r;
	      region_list_tail = r;
	    }

	  /* Deallocate (unused) object name returned by
	     vm_region.  */
	  if (object_name != MACH_PORT_NULL)
	    mach_port_deallocate (target_task, object_name);
	}

      address += size;
    }

  printf ("--- List of Regions to be Dumped ---\n");
  print_region_list ();
}
void
vma_iterate (vma_iterate_callback_fn callback, void *data)
{
#if defined __linux__ /* || defined __CYGWIN__ */

  struct rofile rof;
  int c;

  /* Open the current process' maps file.  It describes one VMA per line.  */
  if (rof_open (&rof, "/proc/self/maps") < 0)
    return;

  for (;;)
    {
      unsigned long start, end;
      unsigned int flags;

      /* Parse one line.  First start and end.  */
      if (!(rof_scanf_lx (&rof, &start) >= 0
            && rof_getchar (&rof) == '-'
            && rof_scanf_lx (&rof, &end) >= 0))
        break;
      /* Then the flags.  */
      do
        c = rof_getchar (&rof);
      while (c == ' ');
      flags = 0;
      if (c == 'r')
        flags |= VMA_PROT_READ;
      c = rof_getchar (&rof);
      if (c == 'w')
        flags |= VMA_PROT_WRITE;
      c = rof_getchar (&rof);
      if (c == 'x')
        flags |= VMA_PROT_EXECUTE;
      while (c = rof_getchar (&rof), c != -1 && c != '\n')
        ;

      if (callback (data, start, end, flags))
        break;
    }
  rof_close (&rof);

#elif defined __FreeBSD__ || defined __NetBSD__

  struct rofile rof;
  int c;

  /* Open the current process' maps file.  It describes one VMA per line.  */
  if (rof_open (&rof, "/proc/curproc/map") < 0)
    return;

  for (;;)
    {
      unsigned long start, end;
      unsigned int flags;

      /* Parse one line.  First start.  */
      if (!(rof_getchar (&rof) == '0'
            && rof_getchar (&rof) == 'x'
            && rof_scanf_lx (&rof, &start) >= 0))
        break;
      while (c = rof_peekchar (&rof), c == ' ' || c == '\t')
        rof_getchar (&rof);
      /* Then end.  */
      if (!(rof_getchar (&rof) == '0'
            && rof_getchar (&rof) == 'x'
            && rof_scanf_lx (&rof, &end) >= 0))
        break;
      /* Then the flags.  */
      do
        c = rof_getchar (&rof);
      while (c == ' ');
      flags = 0;
      if (c == 'r')
        flags |= VMA_PROT_READ;
      c = rof_getchar (&rof);
      if (c == 'w')
        flags |= VMA_PROT_WRITE;
      c = rof_getchar (&rof);
      if (c == 'x')
        flags |= VMA_PROT_EXECUTE;
      while (c = rof_getchar (&rof), c != -1 && c != '\n')
        ;

      if (callback (data, start, end, flags))
        break;
    }
  rof_close (&rof);

#elif defined __sgi || defined __osf__ /* IRIX, OSF/1 */

  size_t pagesize;
  char fnamebuf[6+10+1];
  char *fname;
  int fd;
  int nmaps;
  size_t memneed;
# if HAVE_MAP_ANONYMOUS
#  define zero_fd -1
#  define map_flags MAP_ANONYMOUS
# else
  int zero_fd;
#  define map_flags 0
# endif
  void *auxmap;
  unsigned long auxmap_start;
  unsigned long auxmap_end;
  prmap_t* maps;
  prmap_t* mp;

  pagesize = getpagesize ();

  /* Construct fname = sprintf (fnamebuf+i, "/proc/%u", getpid ()).  */
  fname = fnamebuf + sizeof (fnamebuf) - 1;
  *fname = '\0';
  {
    unsigned int value = getpid ();
    do
      *--fname = (value % 10) + '0';
    while ((value = value / 10) > 0);
  }
  fname -= 6;
  memcpy (fname, "/proc/", 6);

  fd = open (fname, O_RDONLY);
  if (fd < 0)
    return;

  if (ioctl (fd, PIOCNMAP, &nmaps) < 0)
    goto fail2;

  memneed = (nmaps + 10) * sizeof (prmap_t);
  /* Allocate memneed bytes of memory.
     We cannot use alloca here, because not much stack space is guaranteed.
     We also cannot use malloc here, because a malloc() call may call mmap()
     and thus pre-allocate available memory.
     So use mmap(), and ignore the resulting VMA.  */
  memneed = ((memneed - 1) / pagesize + 1) * pagesize;
# if !HAVE_MAP_ANONYMOUS
  zero_fd = open ("/dev/zero", O_RDONLY, 0644);
  if (zero_fd < 0)
    goto fail2;
# endif
  auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
                          map_flags | MAP_PRIVATE, zero_fd, 0);
# if !HAVE_MAP_ANONYMOUS
  close (zero_fd);
# endif
  if (auxmap == (void *) -1)
    goto fail2;
  auxmap_start = (unsigned long) auxmap;
  auxmap_end = auxmap_start + memneed;
  maps = (prmap_t *) auxmap;

  if (ioctl (fd, PIOCMAP, maps) < 0)
    goto fail1;

  for (mp = maps;;)
    {
      unsigned long start, end;
      unsigned int flags;

      start = (unsigned long) mp->pr_vaddr;
      end = start + mp->pr_size;
      if (start == 0 && end == 0)
        break;
      flags = 0;
      if (mp->pr_mflags & MA_READ)
        flags |= VMA_PROT_READ;
      if (mp->pr_mflags & MA_WRITE)
        flags |= VMA_PROT_WRITE;
      if (mp->pr_mflags & MA_EXEC)
        flags |= VMA_PROT_EXECUTE;
      mp++;
      if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
        {
          /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
             = [start,auxmap_start-1] u [auxmap_end,end-1].  */
          if (start < auxmap_start)
            if (callback (data, start, auxmap_start, flags))
              break;
          if (auxmap_end - 1 < end - 1)
            if (callback (data, auxmap_end, end, flags))
              break;
        }
      else
        {
          if (callback (data, start, end, flags))
            break;
        }
    }
  munmap (auxmap, memneed);
  close (fd);
  return;

 fail1:
  munmap (auxmap, memneed);
 fail2:
  close (fd);
  return;

#elif defined __APPLE__ && defined __MACH__ /* Mac OS X */

  task_t task = mach_task_self ();
  vm_address_t address;
  vm_size_t size;

  for (address = VM_MIN_ADDRESS;; address += size)
    {
      int more;
      mach_port_t object_name;
      unsigned int flags;
      /* In Mac OS X 10.5, the types vm_address_t, vm_offset_t, vm_size_t have
         32 bits in 32-bit processes and 64 bits in 64-bit processes. Whereas
         mach_vm_address_t and mach_vm_size_t are always 64 bits large.
         Mac OS X 10.5 has three vm_region like methods:
           - vm_region. It has arguments that depend on whether the current
             process is 32-bit or 64-bit. When linking dynamically, this
             function exists only in 32-bit processes. Therefore we use it only
             in 32-bit processes.
           - vm_region_64. It has arguments that depend on whether the current
             process is 32-bit or 64-bit. It interprets a flavor
             VM_REGION_BASIC_INFO as VM_REGION_BASIC_INFO_64, which is
             dangerous since 'struct vm_region_basic_info_64' is larger than
             'struct vm_region_basic_info'; therefore let's write
             VM_REGION_BASIC_INFO_64 explicitly.
           - mach_vm_region. It has arguments that are 64-bit always. This
             function is useful when you want to access the VM of a process
             other than the current process.
         In 64-bit processes, we could use vm_region_64 or mach_vm_region.
         I choose vm_region_64 because it uses the same types as vm_region,
         resulting in less conditional code.  */
# if defined __ppc64__ || defined __x86_64__
      struct vm_region_basic_info_64 info;
      mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;

      more = (vm_region_64 (task, &address, &size, VM_REGION_BASIC_INFO_64,
                            (vm_region_info_t)&info, &info_count, &object_name)
              == KERN_SUCCESS);
# else
      struct vm_region_basic_info info;
      mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT;

      more = (vm_region (task, &address, &size, VM_REGION_BASIC_INFO,
                         (vm_region_info_t)&info, &info_count, &object_name)
              == KERN_SUCCESS);
# endif
      if (object_name != MACH_PORT_NULL)
        mach_port_deallocate (mach_task_self (), object_name);
      if (!more)
        break;
      flags = 0;
      if (info.protection & VM_PROT_READ)
        flags |= VMA_PROT_READ;
      if (info.protection & VM_PROT_WRITE)
        flags |= VMA_PROT_WRITE;
      if (info.protection & VM_PROT_EXECUTE)
        flags |= VMA_PROT_EXECUTE;
      if (callback (data, address, address + size, flags))
        break;
    }

#elif (defined _WIN32 || defined __WIN32__) || defined __CYGWIN__
  /* Windows platform.  Use the native Windows API.  */

  MEMORY_BASIC_INFORMATION info;
  unsigned long address = 0;

  while (VirtualQuery ((void*)address, &info, sizeof(info)) == sizeof(info))
    {
      if (info.State != MEM_FREE)
        /* Ignore areas where info.State has the value MEM_RESERVE or,
           equivalently, info.Protect has the undocumented value 0.
           This is needed, so that on Cygwin, areas used by malloc() are
           distinguished from areas reserved for future malloc().  */
        if (info.State != MEM_RESERVE)
          {
            unsigned long start, end;
            unsigned int flags;

            start = (unsigned long)info.BaseAddress;
            end = start + info.RegionSize;
            switch (info.Protect & ~(PAGE_GUARD|PAGE_NOCACHE))
              {
              case PAGE_READONLY:
                flags = VMA_PROT_READ;
                break;
              case PAGE_READWRITE:
              case PAGE_WRITECOPY:
                flags = VMA_PROT_READ | VMA_PROT_WRITE;
                break;
              case PAGE_EXECUTE:
                flags = VMA_PROT_EXECUTE;
                break;
              case PAGE_EXECUTE_READ:
                flags = VMA_PROT_READ | VMA_PROT_EXECUTE;
                break;
              case PAGE_EXECUTE_READWRITE:
              case PAGE_EXECUTE_WRITECOPY:
                flags = VMA_PROT_READ | VMA_PROT_WRITE | VMA_PROT_EXECUTE;
                break;
              case PAGE_NOACCESS:
              default:
                flags = 0;
                break;
              }

            if (callback (data, start, end, flags))
              break;
          }
      address = (unsigned long)info.BaseAddress + info.RegionSize;
    }

#elif defined __BEOS__ || defined __HAIKU__
  /* Use the BeOS specific API.  */

  area_info info;
  int32 cookie;

  cookie = 0;
  while (get_next_area_info (0, &cookie, &info) == B_OK)
    {
      unsigned long start, end;
      unsigned int flags;

      start = (unsigned long) info.address;
      end = start + info.size;
      flags = 0;
      if (info.protection & B_READ_AREA)
        flags |= VMA_PROT_READ | VMA_PROT_EXECUTE;
      if (info.protection & B_WRITE_AREA)
        flags |= VMA_PROT_WRITE;

      if (callback (data, start, end, flags))
        break;
    }

#elif HAVE_MQUERY /* OpenBSD */

  uintptr_t pagesize;
  uintptr_t address;
  int /*bool*/ address_known_mapped;

  pagesize = getpagesize ();
  /* Avoid calling mquery with a NULL first argument, because this argument
     value has a specific meaning.  We know the NULL page is unmapped.  */
  address = pagesize;
  address_known_mapped = 0;
  for (;;)
    {
      /* Test whether the page at address is mapped.  */
      if (address_known_mapped
          || mquery ((void *) address, pagesize, 0, MAP_FIXED, -1, 0)
             == (void *) -1)
        {
          /* The page at address is mapped.
             This is the start of an interval.  */
          uintptr_t start = address;
          uintptr_t end;

          /* Find the end of the interval.  */
          end = (uintptr_t) mquery ((void *) address, pagesize, 0, 0, -1, 0);
          if (end == (uintptr_t) (void *) -1)
            end = 0; /* wrap around */
          address = end;

          /* It's too complicated to find out about the flags.  Just pass 0.  */
          if (callback (data, start, end, 0))
            break;

          if (address < pagesize) /* wrap around? */
            break;
        }
      /* Here we know that the page at address is unmapped.  */
      {
        uintptr_t query_size = pagesize;

        address += pagesize;

        /* Query larger and larger blocks, to get through the unmapped address
           range with few mquery() calls.  */
        for (;;)
          {
            if (2 * query_size > query_size)
              query_size = 2 * query_size;
            if (address + query_size - 1 < query_size) /* wrap around? */
              {
                address_known_mapped = 0;
                break;
              }
            if (mquery ((void *) address, query_size, 0, MAP_FIXED, -1, 0)
                == (void *) -1)
              {
                /* Not all the interval [address .. address + query_size - 1]
                   is unmapped.  */
                address_known_mapped = (query_size == pagesize);
                break;
              }
            /* The interval [address .. address + query_size - 1] is
               unmapped.  */
            address += query_size;
          }
        /* Reduce the query size again, to determine the precise size of the
           unmapped interval that starts at address.  */
        while (query_size > pagesize)
          {
            query_size = query_size / 2;
            if (address + query_size - 1 >= query_size)
              {
                if (mquery ((void *) address, query_size, 0, MAP_FIXED, -1, 0)
                    != (void *) -1)
                  {
                    /* The interval [address .. address + query_size - 1] is
                       unmapped.  */
                    address += query_size;
                    address_known_mapped = 0;
                  }
                else
                  address_known_mapped = (query_size == pagesize);
              }
          }
        /* Here again query_size = pagesize, and
           either address + pagesize - 1 < pagesize, or
           mquery ((void *) address, pagesize, 0, MAP_FIXED, -1, 0) fails.
           So, the unmapped area ends at address.  */
      }
      if (address + pagesize - 1 < pagesize) /* wrap around? */
        break;
    }

#endif
}
Ejemplo n.º 11
0
void
x86_64_reset_hook (void)
{
  int offset = (COMPILER_REGBLOCK_N_FIXED * (sizeof (SCHEME_OBJECT)));
  unsigned char * rsi_value = ((unsigned char *) Registers);

  x86_64_interface_initialize ();

  /* These must match machines/x86-64/lapgen.scm */

  SETUP_REGISTER (asm_scheme_to_interface); 		/* 0 */
  SETUP_REGISTER (asm_scheme_to_interface_call);	/* 1 */

  if (offset != RSI_TRAMPOLINE_TO_INTERFACE_OFFSET)
    {
      outf_fatal ("\nx86_64_reset_hook: RSI_TRAMPOLINE_TO_INTERFACE_OFFSET\n");
      Microcode_Termination (TERM_EXIT);
    }
  SETUP_REGISTER (asm_trampoline_to_interface);		/* 2 */

  SETUP_REGISTER (asm_interrupt_procedure);		/* 3 */
  SETUP_REGISTER (asm_interrupt_continuation);		/* 4 */
  SETUP_REGISTER (asm_interrupt_closure);		/* 5 */
  SETUP_REGISTER (asm_interrupt_dlink);			/* 6 */
  SETUP_REGISTER (asm_primitive_apply);			/* 7 */
  SETUP_REGISTER (asm_primitive_lexpr_apply);		/* 8 */
  SETUP_REGISTER (asm_assignment_trap);			/* 9 */
  SETUP_REGISTER (asm_reference_trap);			/* 10 */
  SETUP_REGISTER (asm_safe_reference_trap);		/* 11 */
  SETUP_REGISTER (asm_link);				/* 12 */
  SETUP_REGISTER (asm_error);				/* 13 */
  SETUP_REGISTER (asm_primitive_error);			/* 14 */
  SETUP_REGISTER (asm_generic_add);			/* 15 */
  SETUP_REGISTER (asm_generic_subtract);		/* 16 */
  SETUP_REGISTER (asm_generic_multiply);		/* 17 */
  SETUP_REGISTER (asm_generic_divide);			/* 18 */
  SETUP_REGISTER (asm_generic_equal);			/* 19 */
  SETUP_REGISTER (asm_generic_less);			/* 20 */
  SETUP_REGISTER (asm_generic_greater);			/* 21 */
  SETUP_REGISTER (asm_generic_increment);		/* 22 */
  SETUP_REGISTER (asm_generic_decrement);		/* 23 */
  SETUP_REGISTER (asm_generic_zero);			/* 24 */
  SETUP_REGISTER (asm_generic_positive);		/* 25 */
  SETUP_REGISTER (asm_generic_negative);		/* 26 */
  SETUP_REGISTER (asm_generic_quotient);		/* 27 */
  SETUP_REGISTER (asm_generic_remainder);		/* 28 */
  SETUP_REGISTER (asm_generic_modulo);			/* 29 */
  SETUP_REGISTER (asm_sc_apply);			/* 30 */
  SETUP_REGISTER (asm_sc_apply_size_1);			/* 31 */
  SETUP_REGISTER (asm_sc_apply_size_2);			/* 32 */
  SETUP_REGISTER (asm_sc_apply_size_3);			/* 33 */
  SETUP_REGISTER (asm_sc_apply_size_4);			/* 34 */
  SETUP_REGISTER (asm_sc_apply_size_5);			/* 35 */
  SETUP_REGISTER (asm_sc_apply_size_6);			/* 36 */
  SETUP_REGISTER (asm_sc_apply_size_7);			/* 37 */
  SETUP_REGISTER (asm_sc_apply_size_8);			/* 38 */

  /* Logically, this should be up by the other interrupt routines, but
     I just wrote all those numbers above by hand and am too exhausted
     by that gruelling effort to be inclined to go to the trouble to
     renumber them now.  */
  SETUP_REGISTER (asm_interrupt_continuation_2);	/* 39 */

  SETUP_REGISTER (asm_fixnum_shift);			/* 40 */

#ifdef _MACH_UNIX
  {
    vm_address_t addr;
    vm_size_t size;
    vm_prot_t prot;
    vm_prot_t max_prot;
    vm_inherit_t inheritance;
    boolean_t shared;
    port_t object;
    vm_offset_t offset;

    addr = ((vm_address_t) Heap);
    if ((vm_region ((task_self ()), &addr, &size, &prot, &max_prot,
		    &inheritance, &shared, &object, &offset))
	!= KERN_SUCCESS)
      {
	outf_fatal ( "compiler_reset: vm_region() failed.\n");
	Microcode_Termination (TERM_EXIT);
	/*NOTREACHED*/
      }
    if ((prot & VM_PROT_SCHEME) != VM_PROT_SCHEME)
      {
	if ((max_prot & VM_PROT_SCHEME) != VM_PROT_SCHEME)
	  {
	    outf_fatal (
			"compiler_reset: inadequate protection for Heap.\n");
	    outf_fatal ( "maximum = 0x%lx; desired = 0x%lx\n",
			((unsigned long) (max_prot & VM_PROT_SCHEME)),
			((unsigned long) VM_PROT_SCHEME));
	    Microcode_Termination (TERM_EXIT);
	    /*NOTREACHED*/
	  }
	if ((vm_protect ((task_self ()), ((vm_address_t) Heap),
			 (((char *) constant_end) - ((char *) Heap)),
			 0, VM_PROT_SCHEME))
	    != KERN_SUCCESS)
	  {
	    outf_fatal ("Unable to change protection for Heap.\n");
	    outf_fatal ("actual = 0x%lx; desired = 0x%lx\n",
			((unsigned long) (prot & VM_PROT_SCHEME)),
			((unsigned long) VM_PROT_SCHEME));
	    Microcode_Termination (TERM_EXIT);
	    /*NOTREACHED*/
	  }
      }
  }
#endif /* _MACH_UNIX */
}
Ejemplo n.º 12
0
	mach_error_t
allocateBranchIsland(
		BranchIsland	**island,
		void *originalFunctionAddress)
{
	assert( island );
	assert( sizeof( BranchIsland ) <= kPageSize );

	vm_map_t task_self = mach_task_self();
	vm_address_t original_address = (vm_address_t) originalFunctionAddress;
	static vm_address_t last_allocated = 0;
	vm_address_t address =
		last_allocated ? last_allocated : original_address;

	for (;;) {
		vm_size_t vmsize = 0;
		memory_object_name_t object = 0;
		kern_return_t kr = 0;
		vm_region_flavor_t flavor = VM_REGION_BASIC_INFO;
		// Find the page the address is in.
#if __WORDSIZE == 32
		vm_region_basic_info_data_t info;
		mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT;
		kr = vm_region(task_self, &address, &vmsize, flavor,
			       (vm_region_info_t)&info, &info_count, &object);
#else
		vm_region_basic_info_data_64_t info;
		mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;
		kr = vm_region_64(task_self, &address, &vmsize, flavor,
				  (vm_region_info_t)&info, &info_count, &object);
#endif
		if (kr != KERN_SUCCESS)
			return kr;

		// Don't underflow. This could be made to work, but this is a
		// convenient place to give up.
		assert((address & (kPageSize - 1)) == 0);
		if (address == 0)
			break;

		// Go back one page.
		vm_address_t new_address = address - kPageSize;
#if __WORDSIZE == 64
		if(original_address - new_address - 5 > INT32_MAX)
			break;
#endif
		address = new_address;

		// Try to allocate this page.
		kr = vm_allocate(task_self, &address, kPageSize, 0);
		if (kr == KERN_SUCCESS) {
			*island = (BranchIsland*) address;
			last_allocated = address;
			return err_none;
		}
		if (kr != KERN_NO_SPACE)
			return kr;
	}

	return KERN_NO_SPACE;
}
Ejemplo n.º 13
0
int __make_writable( void * addr )
{
    kern_return_t kr = KERN_SUCCESS;
    vm_address_t region_start = ( vm_address_t ) addr;
    vm_size_t region_size = 0;
    vm_region_flavor_t region_flavor = VM_REGION_BASIC_INFO;
    struct vm_region_basic_info region_info;
    mach_msg_type_number_t region_info_count = sizeof( struct vm_region_basic_info );
    memory_object_name_t region_object_name = 0;
    int good_to_go = 1;

    bzero( &region_info, sizeof( struct vm_region_basic_info ) );

    // make sure we can write to this region of virtual memory...
    kr = vm_region( mach_task_self( ), &region_start, &region_size, region_flavor,
                    ( vm_region_info_t ) &region_info, &region_info_count, &region_object_name );

    if ( kr == KERN_SUCCESS )
    {
        if ( ( region_info.max_protection & VM_PROT_WRITE ) == 0 )
        {
            good_to_go = 0;

            // VM_PROT_COPY along with VM_WRITE essentially makes this region a
            //  'copy on write' area of shared memory. We can't just make the region
            //  writeable, because it's shared between multiple processes - it's loaded
            //  once into physical memory, and simply mapped into the virtual memory
            //  of any process using it. Enabling copy-on-write simply means that when
            //  any process tries to write to this region of memory, the whole page is
            //  duplicated in physical memory for the benefit of that process alone.
            kr = vm_protect( mach_task_self( ), region_start, region_size, TRUE,
                             region_info.max_protection | VM_PROT_WRITE | VM_PROT_COPY );

            if ( kr == KERN_SUCCESS )
            {
                good_to_go = 1;
            }
            else
            {
                LogEmergency( "Failed to make target memory writable ! %d (%s)",
                              kr, mach_error_string(kr) );
            }
        }

        if ( ( good_to_go ) && ( region_info.protection & VM_PROT_WRITE ) == 0 )
        {
            // no write permission, have to add it...
            good_to_go = 0;
            kr = vm_protect( mach_task_self( ), region_start, region_size, FALSE,
                             region_info.protection | VM_PROT_WRITE );

            if ( kr == KERN_SUCCESS )
            {
                good_to_go = 1;
            }
            else
            {
                LogEmergency( "Failed to make target memory writable ! %d (%s)",
                              kr, mach_error_string(kr) );
            }
        }
    }

    return ( good_to_go );
}
Ejemplo n.º 14
0
int
vma_iterate (vma_iterate_callback_fn callback, void *data)
{
#if defined __linux__ || defined __ANDROID__ || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ || defined __NetBSD__ || defined __minix /* || defined __CYGWIN__ */

# if defined __FreeBSD__
  /* On FreeBSD with procfs (but not GNU/kFreeBSD, which uses linprocfs), the
     function vma_iterate_proc does not return the virtual memory areas that
     were created by anonymous mmap.  See
     <https://svnweb.freebsd.org/base/head/sys/fs/procfs/procfs_map.c?view=markup>
     So use vma_iterate_proc only as a fallback.  */
  int retval = vma_iterate_bsd (callback, data);
  if (retval == 0)
      return 0;

  return vma_iterate_proc (callback, data);
# else
  /* On the other platforms, try the /proc approach first, and the sysctl()
     as a fallback.  */
  int retval = vma_iterate_proc (callback, data);
  if (retval == 0)
      return 0;

  return vma_iterate_bsd (callback, data);
# endif

#elif defined __sgi || defined __osf__ /* IRIX, OSF/1 */

  size_t pagesize;
  char fnamebuf[6+10+1];
  char *fname;
  int fd;
  int nmaps;
  size_t memneed;
# if HAVE_MAP_ANONYMOUS
#  define zero_fd -1
#  define map_flags MAP_ANONYMOUS
# else
  int zero_fd;
#  define map_flags 0
# endif
  void *auxmap;
  unsigned long auxmap_start;
  unsigned long auxmap_end;
  prmap_t* maps;
  prmap_t* mp;

  pagesize = getpagesize ();

  /* Construct fname = sprintf (fnamebuf+i, "/proc/%u", getpid ()).  */
  fname = fnamebuf + sizeof (fnamebuf) - 1;
  *fname = '\0';
  {
    unsigned int value = getpid ();
    do
      *--fname = (value % 10) + '0';
    while ((value = value / 10) > 0);
  }
  fname -= 6;
  memcpy (fname, "/proc/", 6);

  fd = open (fname, O_RDONLY);
  if (fd < 0)
    return -1;

  if (ioctl (fd, PIOCNMAP, &nmaps) < 0)
    goto fail2;

  memneed = (nmaps + 10) * sizeof (prmap_t);
  /* Allocate memneed bytes of memory.
     We cannot use alloca here, because not much stack space is guaranteed.
     We also cannot use malloc here, because a malloc() call may call mmap()
     and thus pre-allocate available memory.
     So use mmap(), and ignore the resulting VMA.  */
  memneed = ((memneed - 1) / pagesize + 1) * pagesize;
# if !HAVE_MAP_ANONYMOUS
  zero_fd = open ("/dev/zero", O_RDONLY, 0644);
  if (zero_fd < 0)
    goto fail2;
# endif
  auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
                          map_flags | MAP_PRIVATE, zero_fd, 0);
# if !HAVE_MAP_ANONYMOUS
  close (zero_fd);
# endif
  if (auxmap == (void *) -1)
    goto fail2;
  auxmap_start = (unsigned long) auxmap;
  auxmap_end = auxmap_start + memneed;
  maps = (prmap_t *) auxmap;

  if (ioctl (fd, PIOCMAP, maps) < 0)
    goto fail1;

  for (mp = maps;;)
    {
      unsigned long start, end;
      unsigned int flags;

      start = (unsigned long) mp->pr_vaddr;
      end = start + mp->pr_size;
      if (start == 0 && end == 0)
        break;
      flags = 0;
      if (mp->pr_mflags & MA_READ)
        flags |= VMA_PROT_READ;
      if (mp->pr_mflags & MA_WRITE)
        flags |= VMA_PROT_WRITE;
      if (mp->pr_mflags & MA_EXEC)
        flags |= VMA_PROT_EXECUTE;
      mp++;
      if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
        {
          /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
             = [start,auxmap_start-1] u [auxmap_end,end-1].  */
          if (start < auxmap_start)
            if (callback (data, start, auxmap_start, flags))
              break;
          if (auxmap_end - 1 < end - 1)
            if (callback (data, auxmap_end, end, flags))
              break;
        }
      else
        {
          if (callback (data, start, end, flags))
            break;
        }
    }
  munmap (auxmap, memneed);
  close (fd);
  return 0;

 fail1:
  munmap (auxmap, memneed);
 fail2:
  close (fd);
  return -1;

#elif defined __sun /* Solaris */

  /* Note: Solaris <sys/procfs.h> defines a different type prmap_t with
     _STRUCTURED_PROC than without! Here's a table of sizeof(prmap_t):
                                  32-bit   64-bit
         _STRUCTURED_PROC = 0       32       56
         _STRUCTURED_PROC = 1       96      104
     Therefore, if the include files provide the newer API, prmap_t has
     the bigger size, and thus you MUST use the newer API.  And if the
     include files provide the older API, prmap_t has the smaller size,
     and thus you MUST use the older API.  */

# if defined PIOCNMAP && defined PIOCMAP
  /* We must use the older /proc interface.  */

  size_t pagesize;
  char fnamebuf[6+10+1];
  char *fname;
  int fd;
  int nmaps;
  size_t memneed;
#  if HAVE_MAP_ANONYMOUS
#   define zero_fd -1
#   define map_flags MAP_ANONYMOUS
#  else /* Solaris <= 7 */
  int zero_fd;
#   define map_flags 0
#  endif
  void *auxmap;
  unsigned long auxmap_start;
  unsigned long auxmap_end;
  prmap_t* maps;
  prmap_t* mp;

  pagesize = getpagesize ();

  /* Construct fname = sprintf (fnamebuf+i, "/proc/%u", getpid ()).  */
  fname = fnamebuf + sizeof (fnamebuf) - 1;
  *fname = '\0';
  {
    unsigned int value = getpid ();
    do
      *--fname = (value % 10) + '0';
    while ((value = value / 10) > 0);
  }
  fname -= 6;
  memcpy (fname, "/proc/", 6);

  fd = open (fname, O_RDONLY);
  if (fd < 0)
    return -1;

  if (ioctl (fd, PIOCNMAP, &nmaps) < 0)
    goto fail2;

  memneed = (nmaps + 10) * sizeof (prmap_t);
  /* Allocate memneed bytes of memory.
     We cannot use alloca here, because not much stack space is guaranteed.
     We also cannot use malloc here, because a malloc() call may call mmap()
     and thus pre-allocate available memory.
     So use mmap(), and ignore the resulting VMA.  */
  memneed = ((memneed - 1) / pagesize + 1) * pagesize;
#  if !HAVE_MAP_ANONYMOUS
  zero_fd = open ("/dev/zero", O_RDONLY, 0644);
  if (zero_fd < 0)
    goto fail2;
#  endif
  auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
                          map_flags | MAP_PRIVATE, zero_fd, 0);
#  if !HAVE_MAP_ANONYMOUS
  close (zero_fd);
#  endif
  if (auxmap == (void *) -1)
    goto fail2;
  auxmap_start = (unsigned long) auxmap;
  auxmap_end = auxmap_start + memneed;
  maps = (prmap_t *) auxmap;

  if (ioctl (fd, PIOCMAP, maps) < 0)
    goto fail1;

  for (mp = maps;;)
    {
      unsigned long start, end;
      unsigned int flags;

      start = (unsigned long) mp->pr_vaddr;
      end = start + mp->pr_size;
      if (start == 0 && end == 0)
        break;
      flags = 0;
      if (mp->pr_mflags & MA_READ)
        flags |= VMA_PROT_READ;
      if (mp->pr_mflags & MA_WRITE)
        flags |= VMA_PROT_WRITE;
      if (mp->pr_mflags & MA_EXEC)
        flags |= VMA_PROT_EXECUTE;
      mp++;
      if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
        {
          /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
             = [start,auxmap_start-1] u [auxmap_end,end-1].  */
          if (start < auxmap_start)
            if (callback (data, start, auxmap_start, flags))
              break;
          if (auxmap_end - 1 < end - 1)
            if (callback (data, auxmap_end, end, flags))
              break;
        }
      else
        {
          if (callback (data, start, end, flags))
            break;
        }
    }
  munmap (auxmap, memneed);
  close (fd);
  return 0;

 fail1:
  munmap (auxmap, memneed);
 fail2:
  close (fd);
  return -1;

# else
  /* We must use the newer /proc interface.
     Documentation:
     https://docs.oracle.com/cd/E23824_01/html/821-1473/proc-4.html
     The contents of /proc/<pid>/map consists of records of type
     prmap_t.  These are different in 32-bit and 64-bit processes,
     but here we are fortunately accessing only the current process.  */

  size_t pagesize;
  char fnamebuf[6+10+4+1];
  char *fname;
  int fd;
  int nmaps;
  size_t memneed;
#  if HAVE_MAP_ANONYMOUS
#   define zero_fd -1
#   define map_flags MAP_ANONYMOUS
#  else /* Solaris <= 7 */
  int zero_fd;
#   define map_flags 0
#  endif
  void *auxmap;
  unsigned long auxmap_start;
  unsigned long auxmap_end;
  prmap_t* maps;
  prmap_t* maps_end;
  prmap_t* mp;

  pagesize = getpagesize ();

  /* Construct fname = sprintf (fnamebuf+i, "/proc/%u/map", getpid ()).  */
  fname = fnamebuf + sizeof (fnamebuf) - 1 - 4;
  memcpy (fname, "/map", 4 + 1);
  {
    unsigned int value = getpid ();
    do
      *--fname = (value % 10) + '0';
    while ((value = value / 10) > 0);
  }
  fname -= 6;
  memcpy (fname, "/proc/", 6);

  fd = open (fname, O_RDONLY);
  if (fd < 0)
    return -1;

  {
    struct stat statbuf;
    if (fstat (fd, &statbuf) < 0)
      goto fail2;
    nmaps = statbuf.st_size / sizeof (prmap_t);
  }

  memneed = (nmaps + 10) * sizeof (prmap_t);
  /* Allocate memneed bytes of memory.
     We cannot use alloca here, because not much stack space is guaranteed.
     We also cannot use malloc here, because a malloc() call may call mmap()
     and thus pre-allocate available memory.
     So use mmap(), and ignore the resulting VMA.  */
  memneed = ((memneed - 1) / pagesize + 1) * pagesize;
#  if !HAVE_MAP_ANONYMOUS
  zero_fd = open ("/dev/zero", O_RDONLY, 0644);
  if (zero_fd < 0)
    goto fail2;
#  endif
  auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
                          map_flags | MAP_PRIVATE, zero_fd, 0);
#  if !HAVE_MAP_ANONYMOUS
  close (zero_fd);
#  endif
  if (auxmap == (void *) -1)
    goto fail2;
  auxmap_start = (unsigned long) auxmap;
  auxmap_end = auxmap_start + memneed;
  maps = (prmap_t *) auxmap;

  /* Read up to memneed bytes from fd into maps.  */
  {
    size_t remaining = memneed;
    size_t total_read = 0;
    char *ptr = (char *) maps;

    do
      {
        size_t nread = read (fd, ptr, remaining);
        if (nread == (size_t)-1)
          {
            if (errno == EINTR)
              continue;
            goto fail1;
          }
        if (nread == 0)
          /* EOF */
          break;
        total_read += nread;
        ptr += nread;
        remaining -= nread;
      }
    while (remaining > 0);

    nmaps = (memneed - remaining) / sizeof (prmap_t);
    maps_end = maps + nmaps;
  }

  for (mp = maps; mp < maps_end; mp++)
    {
      unsigned long start, end;
      unsigned int flags;

      start = (unsigned long) mp->pr_vaddr;
      end = start + mp->pr_size;
      flags = 0;
      if (mp->pr_mflags & MA_READ)
        flags |= VMA_PROT_READ;
      if (mp->pr_mflags & MA_WRITE)
        flags |= VMA_PROT_WRITE;
      if (mp->pr_mflags & MA_EXEC)
        flags |= VMA_PROT_EXECUTE;
      if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
        {
          /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
             = [start,auxmap_start-1] u [auxmap_end,end-1].  */
          if (start < auxmap_start)
            if (callback (data, start, auxmap_start, flags))
              break;
          if (auxmap_end - 1 < end - 1)
            if (callback (data, auxmap_end, end, flags))
              break;
        }
      else
        {
          if (callback (data, start, end, flags))
            break;
        }
    }
  munmap (auxmap, memneed);
  close (fd);
  return 0;

 fail1:
  munmap (auxmap, memneed);
 fail2:
  close (fd);
  return -1;

# endif

#elif HAVE_PSTAT_GETPROCVM /* HP-UX */

  unsigned long pagesize = getpagesize ();
  int i;

  for (i = 0; ; i++)
    {
      struct pst_vm_status info;
      int ret = pstat_getprocvm (&info, sizeof (info), 0, i);
      if (ret < 0)
        return -1;
      if (ret == 0)
        break;
      {
        unsigned long start = info.pst_vaddr;
        unsigned long end = start + info.pst_length * pagesize;
        unsigned int flags = 0;
        if (info.pst_permission & PS_PROT_READ)
          flags |= VMA_PROT_READ;
        if (info.pst_permission & PS_PROT_WRITE)
          flags |= VMA_PROT_WRITE;
        if (info.pst_permission & PS_PROT_EXECUTE)
          flags |= VMA_PROT_EXECUTE;

        if (callback (data, start, end, flags))
          break;
      }
    }

#elif defined __APPLE__ && defined __MACH__ /* Mac OS X */

  task_t task = mach_task_self ();
  vm_address_t address;
  vm_size_t size;

  for (address = VM_MIN_ADDRESS;; address += size)
    {
      int more;
      mach_port_t object_name;
      unsigned int flags;
      /* In Mac OS X 10.5, the types vm_address_t, vm_offset_t, vm_size_t have
         32 bits in 32-bit processes and 64 bits in 64-bit processes. Whereas
         mach_vm_address_t and mach_vm_size_t are always 64 bits large.
         Mac OS X 10.5 has three vm_region like methods:
           - vm_region. It has arguments that depend on whether the current
             process is 32-bit or 64-bit. When linking dynamically, this
             function exists only in 32-bit processes. Therefore we use it only
             in 32-bit processes.
           - vm_region_64. It has arguments that depend on whether the current
             process is 32-bit or 64-bit. It interprets a flavor
             VM_REGION_BASIC_INFO as VM_REGION_BASIC_INFO_64, which is
             dangerous since 'struct vm_region_basic_info_64' is larger than
             'struct vm_region_basic_info'; therefore let's write
             VM_REGION_BASIC_INFO_64 explicitly.
           - mach_vm_region. It has arguments that are 64-bit always. This
             function is useful when you want to access the VM of a process
             other than the current process.
         In 64-bit processes, we could use vm_region_64 or mach_vm_region.
         I choose vm_region_64 because it uses the same types as vm_region,
         resulting in less conditional code.  */
# if defined __ppc64__ || defined __x86_64__
      struct vm_region_basic_info_64 info;
      mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;

      more = (vm_region_64 (task, &address, &size, VM_REGION_BASIC_INFO_64,
                            (vm_region_info_t)&info, &info_count, &object_name)
              == KERN_SUCCESS);
# else
      struct vm_region_basic_info info;
      mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT;

      more = (vm_region (task, &address, &size, VM_REGION_BASIC_INFO,
                         (vm_region_info_t)&info, &info_count, &object_name)
              == KERN_SUCCESS);
# endif
      if (object_name != MACH_PORT_NULL)
        mach_port_deallocate (mach_task_self (), object_name);
      if (!more)
        break;
      flags = 0;
      if (info.protection & VM_PROT_READ)
        flags |= VMA_PROT_READ;
      if (info.protection & VM_PROT_WRITE)
        flags |= VMA_PROT_WRITE;
      if (info.protection & VM_PROT_EXECUTE)
        flags |= VMA_PROT_EXECUTE;
      if (callback (data, address, address + size, flags))
        break;
    }
  return 0;

#elif defined __GNU__ /* GNU/Hurd */

  /* The Hurd has a /proc/self/maps that looks like the Linux one, but it
     lacks the VMAs created through anonymous mmap.  Therefore use the Mach
     API.
     Documentation:
     https://www.gnu.org/software/hurd/gnumach-doc/Memory-Attributes.html */

  task_t task = mach_task_self ();
  vm_address_t address;
  vm_size_t size;

  for (address = 0;; address += size)
    {
      vm_prot_t protection;
      vm_prot_t max_protection;
      vm_inherit_t inheritance;
      boolean_t shared;
      memory_object_name_t object_name;
      vm_offset_t offset;
      unsigned int flags;

      if (!(vm_region (task, &address, &size, &protection, &max_protection,
                         &inheritance, &shared, &object_name, &offset)
            == KERN_SUCCESS))
        break;
      mach_port_deallocate (task, object_name);
      flags = 0;
      if (protection & VM_PROT_READ)
        flags |= VMA_PROT_READ;
      if (protection & VM_PROT_WRITE)
        flags |= VMA_PROT_WRITE;
      if (protection & VM_PROT_EXECUTE)
        flags |= VMA_PROT_EXECUTE;
      if (callback (data, address, address + size, flags))
        break;
    }
  return 0;

#elif defined _WIN32 || defined __CYGWIN__
  /* Windows platform.  Use the native Windows API.  */

  MEMORY_BASIC_INFORMATION info;
  uintptr_t address = 0;

  while (VirtualQuery ((void*)address, &info, sizeof(info)) == sizeof(info))
    {
      if (info.State != MEM_FREE)
        /* Ignore areas where info.State has the value MEM_RESERVE or,
           equivalently, info.Protect has the undocumented value 0.
           This is needed, so that on Cygwin, areas used by malloc() are
           distinguished from areas reserved for future malloc().  */
        if (info.State != MEM_RESERVE)
          {
            uintptr_t start, end;
            unsigned int flags;

            start = (uintptr_t)info.BaseAddress;
            end = start + info.RegionSize;
            switch (info.Protect & ~(PAGE_GUARD|PAGE_NOCACHE))
              {
              case PAGE_READONLY:
                flags = VMA_PROT_READ;
                break;
              case PAGE_READWRITE:
              case PAGE_WRITECOPY:
                flags = VMA_PROT_READ | VMA_PROT_WRITE;
                break;
              case PAGE_EXECUTE:
                flags = VMA_PROT_EXECUTE;
                break;
              case PAGE_EXECUTE_READ:
                flags = VMA_PROT_READ | VMA_PROT_EXECUTE;
                break;
              case PAGE_EXECUTE_READWRITE:
              case PAGE_EXECUTE_WRITECOPY:
                flags = VMA_PROT_READ | VMA_PROT_WRITE | VMA_PROT_EXECUTE;
                break;
              case PAGE_NOACCESS:
              default:
                flags = 0;
                break;
              }

            if (callback (data, start, end, flags))
              break;
          }
      address = (uintptr_t)info.BaseAddress + info.RegionSize;
    }
  return 0;

#elif defined __BEOS__ || defined __HAIKU__
  /* Use the BeOS specific API.  */

  area_info info;
  int32 cookie;

  cookie = 0;
  while (get_next_area_info (0, &cookie, &info) == B_OK)
    {
      unsigned long start, end;
      unsigned int flags;

      start = (unsigned long) info.address;
      end = start + info.size;
      flags = 0;
      if (info.protection & B_READ_AREA)
        flags |= VMA_PROT_READ | VMA_PROT_EXECUTE;
      if (info.protection & B_WRITE_AREA)
        flags |= VMA_PROT_WRITE;

      if (callback (data, start, end, flags))
        break;
    }
  return 0;

#elif HAVE_MQUERY /* OpenBSD */

# if defined __OpenBSD__
  /* Try sysctl() first.  It is more efficient than the mquery() loop below
     and also provides the flags.  */
  {
    int retval = vma_iterate_bsd (callback, data);
    if (retval == 0)
      return 0;
  }
# endif

  {
    uintptr_t pagesize;
    uintptr_t address;
    int /*bool*/ address_known_mapped;

    pagesize = getpagesize ();
    /* Avoid calling mquery with a NULL first argument, because this argument
       value has a specific meaning.  We know the NULL page is unmapped.  */
    address = pagesize;
    address_known_mapped = 0;
    for (;;)
      {
        /* Test whether the page at address is mapped.  */
        if (address_known_mapped
            || mquery ((void *) address, pagesize, 0, MAP_FIXED, -1, 0)
               == (void *) -1)
          {
            /* The page at address is mapped.
               This is the start of an interval.  */
            uintptr_t start = address;
            uintptr_t end;

            /* Find the end of the interval.  */
            end = (uintptr_t) mquery ((void *) address, pagesize, 0, 0, -1, 0);
            if (end == (uintptr_t) (void *) -1)
              end = 0; /* wrap around */
            address = end;

            /* It's too complicated to find out about the flags.
               Just pass 0.  */
            if (callback (data, start, end, 0))
              break;

            if (address < pagesize) /* wrap around? */
              break;
          }
        /* Here we know that the page at address is unmapped.  */
        {
          uintptr_t query_size = pagesize;

          address += pagesize;

          /* Query larger and larger blocks, to get through the unmapped address
             range with few mquery() calls.  */
          for (;;)
            {
              if (2 * query_size > query_size)
                query_size = 2 * query_size;
              if (address + query_size - 1 < query_size) /* wrap around? */
                {
                  address_known_mapped = 0;
                  break;
                }
              if (mquery ((void *) address, query_size, 0, MAP_FIXED, -1, 0)
                  == (void *) -1)
                {
                  /* Not all the interval [address .. address + query_size - 1]
                     is unmapped.  */
                  address_known_mapped = (query_size == pagesize);
                  break;
                }
              /* The interval [address .. address + query_size - 1] is
                 unmapped.  */
              address += query_size;
            }
          /* Reduce the query size again, to determine the precise size of the
             unmapped interval that starts at address.  */
          while (query_size > pagesize)
            {
              query_size = query_size / 2;
              if (address + query_size - 1 >= query_size)
                {
                  if (mquery ((void *) address, query_size, 0, MAP_FIXED, -1, 0)
                      != (void *) -1)
                    {
                      /* The interval [address .. address + query_size - 1] is
                         unmapped.  */
                      address += query_size;
                      address_known_mapped = 0;
                    }
                  else
                    address_known_mapped = (query_size == pagesize);
                }
            }
          /* Here again query_size = pagesize, and
             either address + pagesize - 1 < pagesize, or
             mquery ((void *) address, pagesize, 0, MAP_FIXED, -1, 0) fails.
             So, the unmapped area ends at address.  */
        }
        if (address + pagesize - 1 < pagesize) /* wrap around? */
          break;
      }
    return 0;
  }

#else

  /* Not implemented.  */
  return -1;

#endif
}
Ejemplo n.º 15
0
static void unexec_doit(int infd,int outfd)
   {
   int i,j,hpos,opos;
   extern int malloc_freezedry(void);
   struct region
      {
      struct region *next;
      unsigned long addr;
      unsigned long size;
      vm_prot_t prot;
      vm_prot_t mprot;
      } *regions=0,*cregion,**pregions;
   struct mach_header mh;
   struct segment_command *lc,*sp;
   struct symtab_command *st;
   struct section *sect;

   malloc_cookie=malloc_freezedry();
   
      {
      vm_task_t task=task_self();
      vm_address_t addr;
      vm_size_t size;
      vm_prot_t prot,mprot;
      vm_inherit_t inhe;
      boolean_t shrd;
      port_t name;
      vm_offset_t offset;
      
      for(addr=VM_MIN_ADDRESS,pregions=&regions;
          vm_region(task,&addr,&size,&prot,&mprot,
                    &inhe,&shrd,&name,&offset)==KERN_SUCCESS;
          addr += size)
         {
         (*pregions)=alloca(sizeof(struct region));
         (*pregions)->addr=addr;
         (*pregions)->size=size;
         (*pregions)->prot=prot;
         (*pregions)->mprot=mprot;
         (*pregions)->next=0;
         pregions=&((*pregions)->next);
         }
      }
   
   for(cregion=regions;cregion;cregion=cregion->next)
      while ((cregion->next) &&
             (cregion->next->addr==cregion->addr+cregion->size) &&
             (cregion->next->prot==cregion->prot) &&
             (cregion->next->mprot==cregion->mprot))
         {
         cregion->size += cregion->next->size;
         cregion->next = cregion->next->next;
         }

   mcopy(infd,-1,0,(unsigned long) &mh,sizeof(mh));
   lc=alloca(mh.sizeofcmds);
   mcopy(infd,-1,sizeof(mh),(unsigned long) lc,mh.sizeofcmds);
   
   for(pregions=&regions;*pregions;)
      {
      if (!((*pregions)->prot&VM_PROT_WRITE)
          || ((*pregions)->addr>=0x3000000))
         goto kill_region;
      
      for(sp=lc,i=0;
          i<mh.ncmds;
          i++,sp=(struct segment_command *)(((char *)sp)+sp->cmdsize))
         {
         unsigned long ob,oe;
         if (sp->cmd!=LC_SEGMENT||(strcmp(sp->segname,SEG_DATA)==0)) continue;
         ob=MAX((*pregions)->addr,sp->vmaddr);
         oe=MIN((*pregions)->addr+(*pregions)->size,sp->vmaddr+sp->vmsize);
         if (ob >= oe) continue;
         if (ob==(*pregions)->addr)
            if (oe==(*pregions)->addr+(*pregions)->size)
               {
               goto kill_region;
               }
            else
               {
               (*pregions)->addr=oe;
               (*pregions)->size-=(oe-ob);
               }
         else
            if (oe==(*pregions)->addr+(*pregions)->size)
               {
               (*pregions)->size-=(oe-ob);
               }
            else
               {
               cregion=alloca(sizeof(*cregion));
               cregion->addr=oe;
               cregion->size=((*pregions)->addr+(*pregions)->size)-oe;
               cregion->prot=(*pregions)->prot;
               cregion->mprot=(*pregions)->mprot;
               cregion->next=(*pregions)->next;
               (*pregions)->size=ob-(*pregions)->addr;
               (*pregions)->next=cregion;
               }
         }
      pregions=&((*pregions)->next);
      continue;
    kill_region:
      *pregions=(*pregions)->next;
      }

   for(sp=lc,i=mh.ncmds,hpos=sizeof(mh),opos=0;
       i>0;
       i--,sp=(struct segment_command *)(((char *)sp)+sp->cmdsize))
      switch (sp->cmd)
         {
       case LC_SEGMENT:
         if (strcmp(sp->segname,SEG_DATA)==0)
            {
            mh.ncmds--;
            j=sp->cmdsize;
            while (regions)
               {
               mcopy(-1,outfd,regions->addr,opos,regions->size);
               sp->cmd=LC_SEGMENT;
               sp->cmdsize=sizeof(*sp);
               strncpy(sp->segname,SEG_DATA,sizeof(sp->segname));
               sp->vmaddr=regions->addr;
               sp->vmsize=regions->size;
               sp->filesize=regions->size;
               sp->maxprot=regions->prot;
               sp->initprot=regions->mprot;
               sp->nsects=0;
               sp->flags=0;
               sp->fileoff=opos;
               opos+=sp->filesize;
               mcopy(-1,outfd,(unsigned long)sp,hpos,sp->cmdsize);
               hpos+=sp->cmdsize;
               mh.ncmds++;
               regions=regions->next;
               }
            sp->cmdsize=j;
            regions=0;
            }
         else if (strcmp(sp->segname,SEG_LINKEDIT)==0)
            {
            mh.ncmds--;
            }
         else
            {
            mcopy(infd,outfd,sp->fileoff,opos,sp->filesize);
            sect=(struct section *) (((char *)sp)+sizeof(*sp));
            for(j=0;j<sp->nsects;j++)
               {
               if (sect[j].offset!=0)
                  sect[j].offset=(sect[j].offset-sp->fileoff)+opos;
               if (sect[j].reloff!=0)
                  sect[j].reloff=(sect[j].reloff-sp->fileoff)+opos;
               }
            sp->fileoff=opos;
            opos+=sp->filesize;
            mcopy(-1,outfd,(unsigned long)sp,hpos,sp->cmdsize);
            hpos+=sp->cmdsize;
            }
	 break;
       case LC_SYMTAB:
         st=(struct symtab_command *)sp;
         
         mcopy(infd,outfd,st->symoff,opos,st->nsyms*sizeof(struct nlist));
         st->symoff=opos;
         opos+=sizeof(struct nlist)*st->nsyms;
         
         mcopy(infd,outfd,st->stroff,opos,st->strsize);
         ((struct symtab_command *)sp)->stroff=opos;
         opos+=((struct symtab_command *)sp)->strsize;
       default:
         mcopy(-1,outfd,(unsigned long)sp,hpos,sp->cmdsize);
         hpos+=sp->cmdsize;
         }
   mh.sizeofcmds=hpos-sizeof(mh);
   mcopy(-1,outfd,(unsigned long) &mh,0,sizeof(mh));
   }