Exemplo n.º 1
0
static int
getentropy_fallback(void *buf, size_t len)
{
	uint8_t results[SHA512_DIGEST_LENGTH];
	int save_errno = errno, e, pgs = sysconf(_SC_PAGESIZE), faster = 0, repeat;
	static int cnt;
	struct timespec ts;
	struct timeval tv;
	struct pst_vminfo pvi;
	struct pst_vm_status pvs;
	struct pst_dynamic pdy;
	struct rusage ru;
	sigset_t sigset;
	struct stat st;
	SHA512_CTX ctx;
	static pid_t lastpid;
	pid_t pid;
	size_t i, ii, m;
	char *p;

	pid = getpid();
	if (lastpid == pid) {
		faster = 1;
		repeat = 2;
	} else {
		faster = 0;
		lastpid = pid;
		repeat = REPEAT;
	}
	for (i = 0; i < len; ) {
		int j;
		SHA512_Init(&ctx);
		for (j = 0; j < repeat; j++) {
			HX((e = gettimeofday(&tv, NULL)) == -1, tv);
			if (e != -1) {
				cnt += (int)tv.tv_sec;
				cnt += (int)tv.tv_usec;
			}

			HX(pstat_getvminfo(&pvi, sizeof(pvi), 1, 0) != 1, pvi);
			HX(pstat_getprocvm(&pvs, sizeof(pvs), 0, 0) != 1, pvs);

			for (ii = 0; ii < sizeof(cl)/sizeof(cl[0]); ii++)
				HX(clock_gettime(cl[ii], &ts) == -1, ts);

			HX((pid = getpid()) == -1, pid);
			HX((pid = getsid(pid)) == -1, pid);
			HX((pid = getppid()) == -1, pid);
			HX((pid = getpgid(0)) == -1, pid);
			HX((e = getpriority(0, 0)) == -1, e);

			if(pstat_getdynamic(&pdy, sizeof(pdy), 1, 0) != 1) {
				HD(errno);
			} else {
				HD(pdy.psd_avg_1_min);
				HD(pdy.psd_avg_5_min);
				HD(pdy.psd_avg_15_min);
			}

			if (!faster) {
				ts.tv_sec = 0;
				ts.tv_nsec = 1;
				(void) nanosleep(&ts, NULL);
			}

			HX(sigpending(&sigset) == -1, sigset);
			HX(sigprocmask(SIG_BLOCK, NULL, &sigset) == -1,
			    sigset);

			HF(getentropy);	/* an addr in this library */
			HF(printf);		/* an addr in libc */
			p = (char *)&p;
			HD(p);		/* an addr on stack */
			p = (char *)&errno;
			HD(p);		/* the addr of errno */

			if (i == 0) {
				struct sockaddr_storage ss;
				struct statvfs stvfs;
				struct termios tios;
				socklen_t ssl;
				off_t off;

				/*
				 * Prime-sized mappings encourage fragmentation;
				 * thus exposing some address entropy.
				 */
				struct mm {
					size_t	npg;
					void	*p;
				} mm[] =	 {
					{ 17, MAP_FAILED }, { 3, MAP_FAILED },
					{ 11, MAP_FAILED }, { 2, MAP_FAILED },
					{ 5, MAP_FAILED }, { 3, MAP_FAILED },
					{ 7, MAP_FAILED }, { 1, MAP_FAILED },
					{ 57, MAP_FAILED }, { 3, MAP_FAILED },
					{ 131, MAP_FAILED }, { 1, MAP_FAILED },
				};

				for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) {
					HX(mm[m].p = mmap(NULL,
					    mm[m].npg * pgs,
					    PROT_READ|PROT_WRITE,
					    MAP_PRIVATE|MAP_ANON, -1,
					    (off_t)0), mm[m].p);
					if (mm[m].p != MAP_FAILED) {
						size_t mo;

						/* Touch some memory... */
						p = mm[m].p;
						mo = cnt %
						    (mm[m].npg * pgs - 1);
						p[mo] = 1;
						cnt += (int)((long)(mm[m].p)
						    / pgs);
					}

					/* Check cnts and times... */
					for (ii = 0; ii < sizeof(cl)/sizeof(cl[0]);
					    ii++) {
						HX((e = clock_gettime(cl[ii],
						    &ts)) == -1, ts);
						if (e != -1)
							cnt += (int)ts.tv_nsec;
					}

					HX((e = getrusage(RUSAGE_SELF,
					    &ru)) == -1, ru);
					if (e != -1) {
						cnt += (int)ru.ru_utime.tv_sec;
						cnt += (int)ru.ru_utime.tv_usec;
					}
				}

				for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) {
					if (mm[m].p != MAP_FAILED)
						munmap(mm[m].p, mm[m].npg * pgs);
					mm[m].p = MAP_FAILED;
				}

				HX(stat(".", &st) == -1, st);
				HX(statvfs(".", &stvfs) == -1, stvfs);

				HX(stat("/", &st) == -1, st);
				HX(statvfs("/", &stvfs) == -1, stvfs);

				HX((e = fstat(0, &st)) == -1, st);
				if (e == -1) {
					if (S_ISREG(st.st_mode) ||
					    S_ISFIFO(st.st_mode) ||
					    S_ISSOCK(st.st_mode)) {
						HX(fstatvfs(0, &stvfs) == -1,
						    stvfs);
						HX((off = lseek(0, (off_t)0,
						    SEEK_CUR)) < 0, off);
					}
					if (S_ISCHR(st.st_mode)) {
						HX(tcgetattr(0, &tios) == -1,
						    tios);
					} else if (S_ISSOCK(st.st_mode)) {
						memset(&ss, 0, sizeof ss);
						ssl = sizeof(ss);
						HX(getpeername(0,
						    (void *)&ss, &ssl) == -1,
						    ss);
					}
				}

				HX((e = getrusage(RUSAGE_CHILDREN,
				    &ru)) == -1, ru);
				if (e != -1) {
					cnt += (int)ru.ru_utime.tv_sec;
					cnt += (int)ru.ru_utime.tv_usec;
				}
			} else {
				/* Subsequent hashes absorb previous result */
				HD(results);
			}

			HX((e = gettimeofday(&tv, NULL)) == -1, tv);
			if (e != -1) {
				cnt += (int)tv.tv_sec;
				cnt += (int)tv.tv_usec;
			}

			HD(cnt);
		}
		SHA512_Final(results, &ctx);
		memcpy((char *)buf + i, results, min(sizeof(results), len - i));
		i += min(sizeof(results), len - i);
	}
	explicit_bzero(&ctx, sizeof ctx);
	explicit_bzero(results, sizeof results);
	if (gotdata(buf, len) == 0) {
		errno = save_errno;
		return 0;		/* satisfied */
	}
	errno = EIO;
	return -1;
}
Exemplo n.º 2
0
int
vma_iterate (vma_iterate_callback_fn callback, void *data)
{
#if defined __linux__ || defined __ANDROID__ || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ || defined __NetBSD__ || defined __minix /* || defined __CYGWIN__ */

# if defined __FreeBSD__
  /* On FreeBSD with procfs (but not GNU/kFreeBSD, which uses linprocfs), the
     function vma_iterate_proc does not return the virtual memory areas that
     were created by anonymous mmap.  See
     <https://svnweb.freebsd.org/base/head/sys/fs/procfs/procfs_map.c?view=markup>
     So use vma_iterate_proc only as a fallback.  */
  int retval = vma_iterate_bsd (callback, data);
  if (retval == 0)
      return 0;

  return vma_iterate_proc (callback, data);
# else
  /* On the other platforms, try the /proc approach first, and the sysctl()
     as a fallback.  */
  int retval = vma_iterate_proc (callback, data);
  if (retval == 0)
      return 0;

  return vma_iterate_bsd (callback, data);
# endif

#elif defined __sgi || defined __osf__ /* IRIX, OSF/1 */

  size_t pagesize;
  char fnamebuf[6+10+1];
  char *fname;
  int fd;
  int nmaps;
  size_t memneed;
# if HAVE_MAP_ANONYMOUS
#  define zero_fd -1
#  define map_flags MAP_ANONYMOUS
# else
  int zero_fd;
#  define map_flags 0
# endif
  void *auxmap;
  unsigned long auxmap_start;
  unsigned long auxmap_end;
  prmap_t* maps;
  prmap_t* mp;

  pagesize = getpagesize ();

  /* Construct fname = sprintf (fnamebuf+i, "/proc/%u", getpid ()).  */
  fname = fnamebuf + sizeof (fnamebuf) - 1;
  *fname = '\0';
  {
    unsigned int value = getpid ();
    do
      *--fname = (value % 10) + '0';
    while ((value = value / 10) > 0);
  }
  fname -= 6;
  memcpy (fname, "/proc/", 6);

  fd = open (fname, O_RDONLY);
  if (fd < 0)
    return -1;

  if (ioctl (fd, PIOCNMAP, &nmaps) < 0)
    goto fail2;

  memneed = (nmaps + 10) * sizeof (prmap_t);
  /* Allocate memneed bytes of memory.
     We cannot use alloca here, because not much stack space is guaranteed.
     We also cannot use malloc here, because a malloc() call may call mmap()
     and thus pre-allocate available memory.
     So use mmap(), and ignore the resulting VMA.  */
  memneed = ((memneed - 1) / pagesize + 1) * pagesize;
# if !HAVE_MAP_ANONYMOUS
  zero_fd = open ("/dev/zero", O_RDONLY, 0644);
  if (zero_fd < 0)
    goto fail2;
# endif
  auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
                          map_flags | MAP_PRIVATE, zero_fd, 0);
# if !HAVE_MAP_ANONYMOUS
  close (zero_fd);
# endif
  if (auxmap == (void *) -1)
    goto fail2;
  auxmap_start = (unsigned long) auxmap;
  auxmap_end = auxmap_start + memneed;
  maps = (prmap_t *) auxmap;

  if (ioctl (fd, PIOCMAP, maps) < 0)
    goto fail1;

  for (mp = maps;;)
    {
      unsigned long start, end;
      unsigned int flags;

      start = (unsigned long) mp->pr_vaddr;
      end = start + mp->pr_size;
      if (start == 0 && end == 0)
        break;
      flags = 0;
      if (mp->pr_mflags & MA_READ)
        flags |= VMA_PROT_READ;
      if (mp->pr_mflags & MA_WRITE)
        flags |= VMA_PROT_WRITE;
      if (mp->pr_mflags & MA_EXEC)
        flags |= VMA_PROT_EXECUTE;
      mp++;
      if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
        {
          /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
             = [start,auxmap_start-1] u [auxmap_end,end-1].  */
          if (start < auxmap_start)
            if (callback (data, start, auxmap_start, flags))
              break;
          if (auxmap_end - 1 < end - 1)
            if (callback (data, auxmap_end, end, flags))
              break;
        }
      else
        {
          if (callback (data, start, end, flags))
            break;
        }
    }
  munmap (auxmap, memneed);
  close (fd);
  return 0;

 fail1:
  munmap (auxmap, memneed);
 fail2:
  close (fd);
  return -1;

#elif defined __sun /* Solaris */

  /* Note: Solaris <sys/procfs.h> defines a different type prmap_t with
     _STRUCTURED_PROC than without! Here's a table of sizeof(prmap_t):
                                  32-bit   64-bit
         _STRUCTURED_PROC = 0       32       56
         _STRUCTURED_PROC = 1       96      104
     Therefore, if the include files provide the newer API, prmap_t has
     the bigger size, and thus you MUST use the newer API.  And if the
     include files provide the older API, prmap_t has the smaller size,
     and thus you MUST use the older API.  */

# if defined PIOCNMAP && defined PIOCMAP
  /* We must use the older /proc interface.  */

  size_t pagesize;
  char fnamebuf[6+10+1];
  char *fname;
  int fd;
  int nmaps;
  size_t memneed;
#  if HAVE_MAP_ANONYMOUS
#   define zero_fd -1
#   define map_flags MAP_ANONYMOUS
#  else /* Solaris <= 7 */
  int zero_fd;
#   define map_flags 0
#  endif
  void *auxmap;
  unsigned long auxmap_start;
  unsigned long auxmap_end;
  prmap_t* maps;
  prmap_t* mp;

  pagesize = getpagesize ();

  /* Construct fname = sprintf (fnamebuf+i, "/proc/%u", getpid ()).  */
  fname = fnamebuf + sizeof (fnamebuf) - 1;
  *fname = '\0';
  {
    unsigned int value = getpid ();
    do
      *--fname = (value % 10) + '0';
    while ((value = value / 10) > 0);
  }
  fname -= 6;
  memcpy (fname, "/proc/", 6);

  fd = open (fname, O_RDONLY);
  if (fd < 0)
    return -1;

  if (ioctl (fd, PIOCNMAP, &nmaps) < 0)
    goto fail2;

  memneed = (nmaps + 10) * sizeof (prmap_t);
  /* Allocate memneed bytes of memory.
     We cannot use alloca here, because not much stack space is guaranteed.
     We also cannot use malloc here, because a malloc() call may call mmap()
     and thus pre-allocate available memory.
     So use mmap(), and ignore the resulting VMA.  */
  memneed = ((memneed - 1) / pagesize + 1) * pagesize;
#  if !HAVE_MAP_ANONYMOUS
  zero_fd = open ("/dev/zero", O_RDONLY, 0644);
  if (zero_fd < 0)
    goto fail2;
#  endif
  auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
                          map_flags | MAP_PRIVATE, zero_fd, 0);
#  if !HAVE_MAP_ANONYMOUS
  close (zero_fd);
#  endif
  if (auxmap == (void *) -1)
    goto fail2;
  auxmap_start = (unsigned long) auxmap;
  auxmap_end = auxmap_start + memneed;
  maps = (prmap_t *) auxmap;

  if (ioctl (fd, PIOCMAP, maps) < 0)
    goto fail1;

  for (mp = maps;;)
    {
      unsigned long start, end;
      unsigned int flags;

      start = (unsigned long) mp->pr_vaddr;
      end = start + mp->pr_size;
      if (start == 0 && end == 0)
        break;
      flags = 0;
      if (mp->pr_mflags & MA_READ)
        flags |= VMA_PROT_READ;
      if (mp->pr_mflags & MA_WRITE)
        flags |= VMA_PROT_WRITE;
      if (mp->pr_mflags & MA_EXEC)
        flags |= VMA_PROT_EXECUTE;
      mp++;
      if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
        {
          /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
             = [start,auxmap_start-1] u [auxmap_end,end-1].  */
          if (start < auxmap_start)
            if (callback (data, start, auxmap_start, flags))
              break;
          if (auxmap_end - 1 < end - 1)
            if (callback (data, auxmap_end, end, flags))
              break;
        }
      else
        {
          if (callback (data, start, end, flags))
            break;
        }
    }
  munmap (auxmap, memneed);
  close (fd);
  return 0;

 fail1:
  munmap (auxmap, memneed);
 fail2:
  close (fd);
  return -1;

# else
  /* We must use the newer /proc interface.
     Documentation:
     https://docs.oracle.com/cd/E23824_01/html/821-1473/proc-4.html
     The contents of /proc/<pid>/map consists of records of type
     prmap_t.  These are different in 32-bit and 64-bit processes,
     but here we are fortunately accessing only the current process.  */

  size_t pagesize;
  char fnamebuf[6+10+4+1];
  char *fname;
  int fd;
  int nmaps;
  size_t memneed;
#  if HAVE_MAP_ANONYMOUS
#   define zero_fd -1
#   define map_flags MAP_ANONYMOUS
#  else /* Solaris <= 7 */
  int zero_fd;
#   define map_flags 0
#  endif
  void *auxmap;
  unsigned long auxmap_start;
  unsigned long auxmap_end;
  prmap_t* maps;
  prmap_t* maps_end;
  prmap_t* mp;

  pagesize = getpagesize ();

  /* Construct fname = sprintf (fnamebuf+i, "/proc/%u/map", getpid ()).  */
  fname = fnamebuf + sizeof (fnamebuf) - 1 - 4;
  memcpy (fname, "/map", 4 + 1);
  {
    unsigned int value = getpid ();
    do
      *--fname = (value % 10) + '0';
    while ((value = value / 10) > 0);
  }
  fname -= 6;
  memcpy (fname, "/proc/", 6);

  fd = open (fname, O_RDONLY);
  if (fd < 0)
    return -1;

  {
    struct stat statbuf;
    if (fstat (fd, &statbuf) < 0)
      goto fail2;
    nmaps = statbuf.st_size / sizeof (prmap_t);
  }

  memneed = (nmaps + 10) * sizeof (prmap_t);
  /* Allocate memneed bytes of memory.
     We cannot use alloca here, because not much stack space is guaranteed.
     We also cannot use malloc here, because a malloc() call may call mmap()
     and thus pre-allocate available memory.
     So use mmap(), and ignore the resulting VMA.  */
  memneed = ((memneed - 1) / pagesize + 1) * pagesize;
#  if !HAVE_MAP_ANONYMOUS
  zero_fd = open ("/dev/zero", O_RDONLY, 0644);
  if (zero_fd < 0)
    goto fail2;
#  endif
  auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
                          map_flags | MAP_PRIVATE, zero_fd, 0);
#  if !HAVE_MAP_ANONYMOUS
  close (zero_fd);
#  endif
  if (auxmap == (void *) -1)
    goto fail2;
  auxmap_start = (unsigned long) auxmap;
  auxmap_end = auxmap_start + memneed;
  maps = (prmap_t *) auxmap;

  /* Read up to memneed bytes from fd into maps.  */
  {
    size_t remaining = memneed;
    size_t total_read = 0;
    char *ptr = (char *) maps;

    do
      {
        size_t nread = read (fd, ptr, remaining);
        if (nread == (size_t)-1)
          {
            if (errno == EINTR)
              continue;
            goto fail1;
          }
        if (nread == 0)
          /* EOF */
          break;
        total_read += nread;
        ptr += nread;
        remaining -= nread;
      }
    while (remaining > 0);

    nmaps = (memneed - remaining) / sizeof (prmap_t);
    maps_end = maps + nmaps;
  }

  for (mp = maps; mp < maps_end; mp++)
    {
      unsigned long start, end;
      unsigned int flags;

      start = (unsigned long) mp->pr_vaddr;
      end = start + mp->pr_size;
      flags = 0;
      if (mp->pr_mflags & MA_READ)
        flags |= VMA_PROT_READ;
      if (mp->pr_mflags & MA_WRITE)
        flags |= VMA_PROT_WRITE;
      if (mp->pr_mflags & MA_EXEC)
        flags |= VMA_PROT_EXECUTE;
      if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
        {
          /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
             = [start,auxmap_start-1] u [auxmap_end,end-1].  */
          if (start < auxmap_start)
            if (callback (data, start, auxmap_start, flags))
              break;
          if (auxmap_end - 1 < end - 1)
            if (callback (data, auxmap_end, end, flags))
              break;
        }
      else
        {
          if (callback (data, start, end, flags))
            break;
        }
    }
  munmap (auxmap, memneed);
  close (fd);
  return 0;

 fail1:
  munmap (auxmap, memneed);
 fail2:
  close (fd);
  return -1;

# endif

#elif HAVE_PSTAT_GETPROCVM /* HP-UX */

  unsigned long pagesize = getpagesize ();
  int i;

  for (i = 0; ; i++)
    {
      struct pst_vm_status info;
      int ret = pstat_getprocvm (&info, sizeof (info), 0, i);
      if (ret < 0)
        return -1;
      if (ret == 0)
        break;
      {
        unsigned long start = info.pst_vaddr;
        unsigned long end = start + info.pst_length * pagesize;
        unsigned int flags = 0;
        if (info.pst_permission & PS_PROT_READ)
          flags |= VMA_PROT_READ;
        if (info.pst_permission & PS_PROT_WRITE)
          flags |= VMA_PROT_WRITE;
        if (info.pst_permission & PS_PROT_EXECUTE)
          flags |= VMA_PROT_EXECUTE;

        if (callback (data, start, end, flags))
          break;
      }
    }

#elif defined __APPLE__ && defined __MACH__ /* Mac OS X */

  task_t task = mach_task_self ();
  vm_address_t address;
  vm_size_t size;

  for (address = VM_MIN_ADDRESS;; address += size)
    {
      int more;
      mach_port_t object_name;
      unsigned int flags;
      /* In Mac OS X 10.5, the types vm_address_t, vm_offset_t, vm_size_t have
         32 bits in 32-bit processes and 64 bits in 64-bit processes. Whereas
         mach_vm_address_t and mach_vm_size_t are always 64 bits large.
         Mac OS X 10.5 has three vm_region like methods:
           - vm_region. It has arguments that depend on whether the current
             process is 32-bit or 64-bit. When linking dynamically, this
             function exists only in 32-bit processes. Therefore we use it only
             in 32-bit processes.
           - vm_region_64. It has arguments that depend on whether the current
             process is 32-bit or 64-bit. It interprets a flavor
             VM_REGION_BASIC_INFO as VM_REGION_BASIC_INFO_64, which is
             dangerous since 'struct vm_region_basic_info_64' is larger than
             'struct vm_region_basic_info'; therefore let's write
             VM_REGION_BASIC_INFO_64 explicitly.
           - mach_vm_region. It has arguments that are 64-bit always. This
             function is useful when you want to access the VM of a process
             other than the current process.
         In 64-bit processes, we could use vm_region_64 or mach_vm_region.
         I choose vm_region_64 because it uses the same types as vm_region,
         resulting in less conditional code.  */
# if defined __ppc64__ || defined __x86_64__
      struct vm_region_basic_info_64 info;
      mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;

      more = (vm_region_64 (task, &address, &size, VM_REGION_BASIC_INFO_64,
                            (vm_region_info_t)&info, &info_count, &object_name)
              == KERN_SUCCESS);
# else
      struct vm_region_basic_info info;
      mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT;

      more = (vm_region (task, &address, &size, VM_REGION_BASIC_INFO,
                         (vm_region_info_t)&info, &info_count, &object_name)
              == KERN_SUCCESS);
# endif
      if (object_name != MACH_PORT_NULL)
        mach_port_deallocate (mach_task_self (), object_name);
      if (!more)
        break;
      flags = 0;
      if (info.protection & VM_PROT_READ)
        flags |= VMA_PROT_READ;
      if (info.protection & VM_PROT_WRITE)
        flags |= VMA_PROT_WRITE;
      if (info.protection & VM_PROT_EXECUTE)
        flags |= VMA_PROT_EXECUTE;
      if (callback (data, address, address + size, flags))
        break;
    }
  return 0;

#elif defined __GNU__ /* GNU/Hurd */

  /* The Hurd has a /proc/self/maps that looks like the Linux one, but it
     lacks the VMAs created through anonymous mmap.  Therefore use the Mach
     API.
     Documentation:
     https://www.gnu.org/software/hurd/gnumach-doc/Memory-Attributes.html */

  task_t task = mach_task_self ();
  vm_address_t address;
  vm_size_t size;

  for (address = 0;; address += size)
    {
      vm_prot_t protection;
      vm_prot_t max_protection;
      vm_inherit_t inheritance;
      boolean_t shared;
      memory_object_name_t object_name;
      vm_offset_t offset;
      unsigned int flags;

      if (!(vm_region (task, &address, &size, &protection, &max_protection,
                         &inheritance, &shared, &object_name, &offset)
            == KERN_SUCCESS))
        break;
      mach_port_deallocate (task, object_name);
      flags = 0;
      if (protection & VM_PROT_READ)
        flags |= VMA_PROT_READ;
      if (protection & VM_PROT_WRITE)
        flags |= VMA_PROT_WRITE;
      if (protection & VM_PROT_EXECUTE)
        flags |= VMA_PROT_EXECUTE;
      if (callback (data, address, address + size, flags))
        break;
    }
  return 0;

#elif defined _WIN32 || defined __CYGWIN__
  /* Windows platform.  Use the native Windows API.  */

  MEMORY_BASIC_INFORMATION info;
  uintptr_t address = 0;

  while (VirtualQuery ((void*)address, &info, sizeof(info)) == sizeof(info))
    {
      if (info.State != MEM_FREE)
        /* Ignore areas where info.State has the value MEM_RESERVE or,
           equivalently, info.Protect has the undocumented value 0.
           This is needed, so that on Cygwin, areas used by malloc() are
           distinguished from areas reserved for future malloc().  */
        if (info.State != MEM_RESERVE)
          {
            uintptr_t start, end;
            unsigned int flags;

            start = (uintptr_t)info.BaseAddress;
            end = start + info.RegionSize;
            switch (info.Protect & ~(PAGE_GUARD|PAGE_NOCACHE))
              {
              case PAGE_READONLY:
                flags = VMA_PROT_READ;
                break;
              case PAGE_READWRITE:
              case PAGE_WRITECOPY:
                flags = VMA_PROT_READ | VMA_PROT_WRITE;
                break;
              case PAGE_EXECUTE:
                flags = VMA_PROT_EXECUTE;
                break;
              case PAGE_EXECUTE_READ:
                flags = VMA_PROT_READ | VMA_PROT_EXECUTE;
                break;
              case PAGE_EXECUTE_READWRITE:
              case PAGE_EXECUTE_WRITECOPY:
                flags = VMA_PROT_READ | VMA_PROT_WRITE | VMA_PROT_EXECUTE;
                break;
              case PAGE_NOACCESS:
              default:
                flags = 0;
                break;
              }

            if (callback (data, start, end, flags))
              break;
          }
      address = (uintptr_t)info.BaseAddress + info.RegionSize;
    }
  return 0;

#elif defined __BEOS__ || defined __HAIKU__
  /* Use the BeOS specific API.  */

  area_info info;
  int32 cookie;

  cookie = 0;
  while (get_next_area_info (0, &cookie, &info) == B_OK)
    {
      unsigned long start, end;
      unsigned int flags;

      start = (unsigned long) info.address;
      end = start + info.size;
      flags = 0;
      if (info.protection & B_READ_AREA)
        flags |= VMA_PROT_READ | VMA_PROT_EXECUTE;
      if (info.protection & B_WRITE_AREA)
        flags |= VMA_PROT_WRITE;

      if (callback (data, start, end, flags))
        break;
    }
  return 0;

#elif HAVE_MQUERY /* OpenBSD */

# if defined __OpenBSD__
  /* Try sysctl() first.  It is more efficient than the mquery() loop below
     and also provides the flags.  */
  {
    int retval = vma_iterate_bsd (callback, data);
    if (retval == 0)
      return 0;
  }
# endif

  {
    uintptr_t pagesize;
    uintptr_t address;
    int /*bool*/ address_known_mapped;

    pagesize = getpagesize ();
    /* Avoid calling mquery with a NULL first argument, because this argument
       value has a specific meaning.  We know the NULL page is unmapped.  */
    address = pagesize;
    address_known_mapped = 0;
    for (;;)
      {
        /* Test whether the page at address is mapped.  */
        if (address_known_mapped
            || mquery ((void *) address, pagesize, 0, MAP_FIXED, -1, 0)
               == (void *) -1)
          {
            /* The page at address is mapped.
               This is the start of an interval.  */
            uintptr_t start = address;
            uintptr_t end;

            /* Find the end of the interval.  */
            end = (uintptr_t) mquery ((void *) address, pagesize, 0, 0, -1, 0);
            if (end == (uintptr_t) (void *) -1)
              end = 0; /* wrap around */
            address = end;

            /* It's too complicated to find out about the flags.
               Just pass 0.  */
            if (callback (data, start, end, 0))
              break;

            if (address < pagesize) /* wrap around? */
              break;
          }
        /* Here we know that the page at address is unmapped.  */
        {
          uintptr_t query_size = pagesize;

          address += pagesize;

          /* Query larger and larger blocks, to get through the unmapped address
             range with few mquery() calls.  */
          for (;;)
            {
              if (2 * query_size > query_size)
                query_size = 2 * query_size;
              if (address + query_size - 1 < query_size) /* wrap around? */
                {
                  address_known_mapped = 0;
                  break;
                }
              if (mquery ((void *) address, query_size, 0, MAP_FIXED, -1, 0)
                  == (void *) -1)
                {
                  /* Not all the interval [address .. address + query_size - 1]
                     is unmapped.  */
                  address_known_mapped = (query_size == pagesize);
                  break;
                }
              /* The interval [address .. address + query_size - 1] is
                 unmapped.  */
              address += query_size;
            }
          /* Reduce the query size again, to determine the precise size of the
             unmapped interval that starts at address.  */
          while (query_size > pagesize)
            {
              query_size = query_size / 2;
              if (address + query_size - 1 >= query_size)
                {
                  if (mquery ((void *) address, query_size, 0, MAP_FIXED, -1, 0)
                      != (void *) -1)
                    {
                      /* The interval [address .. address + query_size - 1] is
                         unmapped.  */
                      address += query_size;
                      address_known_mapped = 0;
                    }
                  else
                    address_known_mapped = (query_size == pagesize);
                }
            }
          /* Here again query_size = pagesize, and
             either address + pagesize - 1 < pagesize, or
             mquery ((void *) address, pagesize, 0, MAP_FIXED, -1, 0) fails.
             So, the unmapped area ends at address.  */
        }
        if (address + pagesize - 1 < pagesize) /* wrap around? */
          break;
      }
    return 0;
  }

#else

  /* Not implemented.  */
  return -1;

#endif
}