Ejemplo n.º 1
0
void
MemoryBarMenuItem::BarUpdate()
{
	area_info areaInfo;
	ssize_t cookie = 0;
	int64 lram_size = 0;
	int64 lwram_size = 0;
	bool exists = false;

	while (get_next_area_info(fTeamID, &cookie, &areaInfo) == B_OK) {
		exists = true;
		lram_size += areaInfo.ram_size;

		// TODO: this won't work this way anymore under Haiku!
//		int zone = (int (areaInfo.address) & 0xf0000000) >> 24;
		if ((areaInfo.protection & B_WRITE_AREA) != 0)
			lwram_size += areaInfo.ram_size;
//			&& (zone & 0xf0) != 0xA0			// Exclude media buffers
//			&& (fTeamID != gAppServerTeamID || zone != 0x90))	// Exclude app_server side of bitmaps
	}
	if (!exists) {
		team_info info;
		exists = get_team_info(fTeamID, &info) == B_OK;
	}
	if (exists) {
		fWriteMemory = lwram_size / 1024;
		fAllMemory = lram_size / 1024;
		DrawBar(false);
	} else
		fWriteMemory = -1;
}
Ejemplo n.º 2
0
static void
list_areas_for_id(team_id id)
{	
	int32 cookie = 0;
	team_info teamInfo;
	area_info areaInfo;

	if (id != 1 && get_team_info(id, &teamInfo) == B_BAD_TEAM_ID) {
		printf("\nteam %ld unknown\n", id);
		return;
	} else if (id == 1)
		strcpy(teamInfo.args, "KERNEL SPACE");

	printf("\n%s (team %ld)\n", teamInfo.args, id);
	printf("   ID                             name   address     size   alloc. #-cow  #-in #-out\n");
	printf("------------------------------------------------------------------------------------\n");

	while (get_next_area_info(id, &cookie, &areaInfo) == B_OK) {
		printf("%5ld %32s  %08lx %8lx %8lx %5ld %5ld %5ld\n",
			areaInfo.area,
			areaInfo.name,
//			(addr_t)areaInfo.address,
			(uint32)areaInfo.address,
			areaInfo.size,
			areaInfo.ram_size,
			areaInfo.copy_count,
			areaInfo.in_count,
			areaInfo.out_count);
	}
}
Ejemplo n.º 3
0
void
__set_stack_protection(void)
{
	if (__gABIVersion < B_HAIKU_ABI_GCC_2_HAIKU) {
		area_info info;
		ssize_t cookie = 0;

		while (get_next_area_info(B_CURRENT_TEAM, &cookie, &info) == B_OK) {
			if ((info.protection & B_STACK_AREA) != 0) {
				_kern_set_area_protection(info.area,
					B_READ_AREA | B_WRITE_AREA | B_EXECUTE_AREA | B_STACK_AREA);
			}
		}
	}
}
Ejemplo n.º 4
0
status_t
LocalDebuggerInterface::GetAreaInfos(BObjectList<AreaInfo>& infos)
{
	// get the team's areas
	area_info areaInfo;
	ssize_t cookie = 0;
	while (get_next_area_info(fTeamID, &cookie, &areaInfo) == B_OK) {
		AreaInfo* info = new(std::nothrow) AreaInfo(fTeamID, areaInfo.area,
			areaInfo.name, (addr_t)areaInfo.address, areaInfo.size,
			areaInfo.ram_size, areaInfo.lock, areaInfo.protection);
		if (info == NULL || !infos.AddItem(info)) {
			delete info;
			return B_NO_MEMORY;
		}
	}

	return B_OK;
}
Ejemplo n.º 5
0
/**
* BeOS entropy poll
*/
void BeOS_EntropySource::poll(Entropy_Accumulator& accum)
   {
   system_info info_sys;
   get_system_info(&info_sys);
   accum.add(info_sys, BOTAN_ENTROPY_ESTIMATE_SYSTEM_DATA);

   key_info info_key; // current state of the keyboard
   get_key_info(&info_key);
   accum.add(info_key, BOTAN_ENTROPY_ESTIMATE_SYSTEM_DATA);

   team_info info_team;
   int32 cookie_team = 0;

   while(get_next_team_info(&cookie_team, &info_team) == B_OK)
      {
      accum.add(info_team, BOTAN_ENTROPY_ESTIMATE_SYSTEM_DATA);

      team_id id = info_team.team;
      int32 cookie = 0;

      thread_info info_thr;
      while(get_next_thread_info(id, &cookie, &info_thr) == B_OK)
         accum.add(info_thr, BOTAN_ENTROPY_ESTIMATE_SYSTEM_DATA);

      cookie = 0;
      image_info info_img;
      while(get_next_image_info(id, &cookie, &info_img) == B_OK)
         accum.add(info_img, BOTAN_ENTROPY_ESTIMATE_SYSTEM_DATA);

      cookie = 0;
      sem_info info_sem;
      while(get_next_sem_info(id, &cookie, &info_sem) == B_OK)
         accum.add(info_sem, BOTAN_ENTROPY_ESTIMATE_SYSTEM_DATA);

      cookie = 0;
      area_info info_area;
      while(get_next_area_info(id, &cookie, &info_area) == B_OK)
         accum.add(info_area, BOTAN_ENTROPY_ESTIMATE_SYSTEM_DATA);

      if(accum.polling_finished())
         break;
      }
   }
Ejemplo n.º 6
0
void
beos_backend_startup(void)
{
	char		nom[50];
	char		nvnom[50];
	area_info	inf;
	int32		cook = 0;

	/* Perform the remapping process */

	/* Loop in all our team areas */
	while (get_next_area_info(0, &cook, &inf) == B_OK)
	{
		strcpy(nom, inf.name);
		strcpy(nvnom, inf.name);
		nom[9] = 0;
		nvnom[5] = 'i';
		/* Is it a SYS V area ? */
		if (!strcmp(nom, "SYSV_IPC_"))
		{
			void	   *area_address;
			area_id		area_postmaster;

			/* Get the area address */
			area_address = inf.address;
			/* Destroy the bad area */
			delete_area(inf.area);
			/* Find the postmaster area */
			area_postmaster = find_area(inf.name);
			/* Compute new area name */
			sprintf(nvnom, "SYSV_IPC %d", area_postmaster);
			/* Clone it at the exact same address */
			clone_area(nvnom, &area_address, B_CLONE_ADDRESS, B_READ_AREA | B_WRITE_AREA, area_postmaster);
		}
	}

	/* remapping done release semaphore to allow other backend to startup */

	release_sem(beos_shm_sem);
}
void
vma_iterate (vma_iterate_callback_fn callback, void *data)
{
#if defined __linux__ /* || defined __CYGWIN__ */

  struct rofile rof;
  int c;

  /* Open the current process' maps file.  It describes one VMA per line.  */
  if (rof_open (&rof, "/proc/self/maps") < 0)
    return;

  for (;;)
    {
      unsigned long start, end;
      unsigned int flags;

      /* Parse one line.  First start and end.  */
      if (!(rof_scanf_lx (&rof, &start) >= 0
            && rof_getchar (&rof) == '-'
            && rof_scanf_lx (&rof, &end) >= 0))
        break;
      /* Then the flags.  */
      do
        c = rof_getchar (&rof);
      while (c == ' ');
      flags = 0;
      if (c == 'r')
        flags |= VMA_PROT_READ;
      c = rof_getchar (&rof);
      if (c == 'w')
        flags |= VMA_PROT_WRITE;
      c = rof_getchar (&rof);
      if (c == 'x')
        flags |= VMA_PROT_EXECUTE;
      while (c = rof_getchar (&rof), c != -1 && c != '\n')
        ;

      if (callback (data, start, end, flags))
        break;
    }
  rof_close (&rof);

#elif defined __FreeBSD__ || defined __NetBSD__

  struct rofile rof;
  int c;

  /* Open the current process' maps file.  It describes one VMA per line.  */
  if (rof_open (&rof, "/proc/curproc/map") < 0)
    return;

  for (;;)
    {
      unsigned long start, end;
      unsigned int flags;

      /* Parse one line.  First start.  */
      if (!(rof_getchar (&rof) == '0'
            && rof_getchar (&rof) == 'x'
            && rof_scanf_lx (&rof, &start) >= 0))
        break;
      while (c = rof_peekchar (&rof), c == ' ' || c == '\t')
        rof_getchar (&rof);
      /* Then end.  */
      if (!(rof_getchar (&rof) == '0'
            && rof_getchar (&rof) == 'x'
            && rof_scanf_lx (&rof, &end) >= 0))
        break;
      /* Then the flags.  */
      do
        c = rof_getchar (&rof);
      while (c == ' ');
      flags = 0;
      if (c == 'r')
        flags |= VMA_PROT_READ;
      c = rof_getchar (&rof);
      if (c == 'w')
        flags |= VMA_PROT_WRITE;
      c = rof_getchar (&rof);
      if (c == 'x')
        flags |= VMA_PROT_EXECUTE;
      while (c = rof_getchar (&rof), c != -1 && c != '\n')
        ;

      if (callback (data, start, end, flags))
        break;
    }
  rof_close (&rof);

#elif defined __sgi || defined __osf__ /* IRIX, OSF/1 */

  size_t pagesize;
  char fnamebuf[6+10+1];
  char *fname;
  int fd;
  int nmaps;
  size_t memneed;
# if HAVE_MAP_ANONYMOUS
#  define zero_fd -1
#  define map_flags MAP_ANONYMOUS
# else
  int zero_fd;
#  define map_flags 0
# endif
  void *auxmap;
  unsigned long auxmap_start;
  unsigned long auxmap_end;
  prmap_t* maps;
  prmap_t* mp;

  pagesize = getpagesize ();

  /* Construct fname = sprintf (fnamebuf+i, "/proc/%u", getpid ()).  */
  fname = fnamebuf + sizeof (fnamebuf) - 1;
  *fname = '\0';
  {
    unsigned int value = getpid ();
    do
      *--fname = (value % 10) + '0';
    while ((value = value / 10) > 0);
  }
  fname -= 6;
  memcpy (fname, "/proc/", 6);

  fd = open (fname, O_RDONLY);
  if (fd < 0)
    return;

  if (ioctl (fd, PIOCNMAP, &nmaps) < 0)
    goto fail2;

  memneed = (nmaps + 10) * sizeof (prmap_t);
  /* Allocate memneed bytes of memory.
     We cannot use alloca here, because not much stack space is guaranteed.
     We also cannot use malloc here, because a malloc() call may call mmap()
     and thus pre-allocate available memory.
     So use mmap(), and ignore the resulting VMA.  */
  memneed = ((memneed - 1) / pagesize + 1) * pagesize;
# if !HAVE_MAP_ANONYMOUS
  zero_fd = open ("/dev/zero", O_RDONLY, 0644);
  if (zero_fd < 0)
    goto fail2;
# endif
  auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
                          map_flags | MAP_PRIVATE, zero_fd, 0);
# if !HAVE_MAP_ANONYMOUS
  close (zero_fd);
# endif
  if (auxmap == (void *) -1)
    goto fail2;
  auxmap_start = (unsigned long) auxmap;
  auxmap_end = auxmap_start + memneed;
  maps = (prmap_t *) auxmap;

  if (ioctl (fd, PIOCMAP, maps) < 0)
    goto fail1;

  for (mp = maps;;)
    {
      unsigned long start, end;
      unsigned int flags;

      start = (unsigned long) mp->pr_vaddr;
      end = start + mp->pr_size;
      if (start == 0 && end == 0)
        break;
      flags = 0;
      if (mp->pr_mflags & MA_READ)
        flags |= VMA_PROT_READ;
      if (mp->pr_mflags & MA_WRITE)
        flags |= VMA_PROT_WRITE;
      if (mp->pr_mflags & MA_EXEC)
        flags |= VMA_PROT_EXECUTE;
      mp++;
      if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
        {
          /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
             = [start,auxmap_start-1] u [auxmap_end,end-1].  */
          if (start < auxmap_start)
            if (callback (data, start, auxmap_start, flags))
              break;
          if (auxmap_end - 1 < end - 1)
            if (callback (data, auxmap_end, end, flags))
              break;
        }
      else
        {
          if (callback (data, start, end, flags))
            break;
        }
    }
  munmap (auxmap, memneed);
  close (fd);
  return;

 fail1:
  munmap (auxmap, memneed);
 fail2:
  close (fd);
  return;

#elif defined __APPLE__ && defined __MACH__ /* Mac OS X */

  task_t task = mach_task_self ();
  vm_address_t address;
  vm_size_t size;

  for (address = VM_MIN_ADDRESS;; address += size)
    {
      int more;
      mach_port_t object_name;
      unsigned int flags;
      /* In Mac OS X 10.5, the types vm_address_t, vm_offset_t, vm_size_t have
         32 bits in 32-bit processes and 64 bits in 64-bit processes. Whereas
         mach_vm_address_t and mach_vm_size_t are always 64 bits large.
         Mac OS X 10.5 has three vm_region like methods:
           - vm_region. It has arguments that depend on whether the current
             process is 32-bit or 64-bit. When linking dynamically, this
             function exists only in 32-bit processes. Therefore we use it only
             in 32-bit processes.
           - vm_region_64. It has arguments that depend on whether the current
             process is 32-bit or 64-bit. It interprets a flavor
             VM_REGION_BASIC_INFO as VM_REGION_BASIC_INFO_64, which is
             dangerous since 'struct vm_region_basic_info_64' is larger than
             'struct vm_region_basic_info'; therefore let's write
             VM_REGION_BASIC_INFO_64 explicitly.
           - mach_vm_region. It has arguments that are 64-bit always. This
             function is useful when you want to access the VM of a process
             other than the current process.
         In 64-bit processes, we could use vm_region_64 or mach_vm_region.
         I choose vm_region_64 because it uses the same types as vm_region,
         resulting in less conditional code.  */
# if defined __ppc64__ || defined __x86_64__
      struct vm_region_basic_info_64 info;
      mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;

      more = (vm_region_64 (task, &address, &size, VM_REGION_BASIC_INFO_64,
                            (vm_region_info_t)&info, &info_count, &object_name)
              == KERN_SUCCESS);
# else
      struct vm_region_basic_info info;
      mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT;

      more = (vm_region (task, &address, &size, VM_REGION_BASIC_INFO,
                         (vm_region_info_t)&info, &info_count, &object_name)
              == KERN_SUCCESS);
# endif
      if (object_name != MACH_PORT_NULL)
        mach_port_deallocate (mach_task_self (), object_name);
      if (!more)
        break;
      flags = 0;
      if (info.protection & VM_PROT_READ)
        flags |= VMA_PROT_READ;
      if (info.protection & VM_PROT_WRITE)
        flags |= VMA_PROT_WRITE;
      if (info.protection & VM_PROT_EXECUTE)
        flags |= VMA_PROT_EXECUTE;
      if (callback (data, address, address + size, flags))
        break;
    }

#elif (defined _WIN32 || defined __WIN32__) || defined __CYGWIN__
  /* Windows platform.  Use the native Windows API.  */

  MEMORY_BASIC_INFORMATION info;
  unsigned long address = 0;

  while (VirtualQuery ((void*)address, &info, sizeof(info)) == sizeof(info))
    {
      if (info.State != MEM_FREE)
        /* Ignore areas where info.State has the value MEM_RESERVE or,
           equivalently, info.Protect has the undocumented value 0.
           This is needed, so that on Cygwin, areas used by malloc() are
           distinguished from areas reserved for future malloc().  */
        if (info.State != MEM_RESERVE)
          {
            unsigned long start, end;
            unsigned int flags;

            start = (unsigned long)info.BaseAddress;
            end = start + info.RegionSize;
            switch (info.Protect & ~(PAGE_GUARD|PAGE_NOCACHE))
              {
              case PAGE_READONLY:
                flags = VMA_PROT_READ;
                break;
              case PAGE_READWRITE:
              case PAGE_WRITECOPY:
                flags = VMA_PROT_READ | VMA_PROT_WRITE;
                break;
              case PAGE_EXECUTE:
                flags = VMA_PROT_EXECUTE;
                break;
              case PAGE_EXECUTE_READ:
                flags = VMA_PROT_READ | VMA_PROT_EXECUTE;
                break;
              case PAGE_EXECUTE_READWRITE:
              case PAGE_EXECUTE_WRITECOPY:
                flags = VMA_PROT_READ | VMA_PROT_WRITE | VMA_PROT_EXECUTE;
                break;
              case PAGE_NOACCESS:
              default:
                flags = 0;
                break;
              }

            if (callback (data, start, end, flags))
              break;
          }
      address = (unsigned long)info.BaseAddress + info.RegionSize;
    }

#elif defined __BEOS__ || defined __HAIKU__
  /* Use the BeOS specific API.  */

  area_info info;
  int32 cookie;

  cookie = 0;
  while (get_next_area_info (0, &cookie, &info) == B_OK)
    {
      unsigned long start, end;
      unsigned int flags;

      start = (unsigned long) info.address;
      end = start + info.size;
      flags = 0;
      if (info.protection & B_READ_AREA)
        flags |= VMA_PROT_READ | VMA_PROT_EXECUTE;
      if (info.protection & B_WRITE_AREA)
        flags |= VMA_PROT_WRITE;

      if (callback (data, start, end, flags))
        break;
    }

#elif HAVE_MQUERY /* OpenBSD */

  uintptr_t pagesize;
  uintptr_t address;
  int /*bool*/ address_known_mapped;

  pagesize = getpagesize ();
  /* Avoid calling mquery with a NULL first argument, because this argument
     value has a specific meaning.  We know the NULL page is unmapped.  */
  address = pagesize;
  address_known_mapped = 0;
  for (;;)
    {
      /* Test whether the page at address is mapped.  */
      if (address_known_mapped
          || mquery ((void *) address, pagesize, 0, MAP_FIXED, -1, 0)
             == (void *) -1)
        {
          /* The page at address is mapped.
             This is the start of an interval.  */
          uintptr_t start = address;
          uintptr_t end;

          /* Find the end of the interval.  */
          end = (uintptr_t) mquery ((void *) address, pagesize, 0, 0, -1, 0);
          if (end == (uintptr_t) (void *) -1)
            end = 0; /* wrap around */
          address = end;

          /* It's too complicated to find out about the flags.  Just pass 0.  */
          if (callback (data, start, end, 0))
            break;

          if (address < pagesize) /* wrap around? */
            break;
        }
      /* Here we know that the page at address is unmapped.  */
      {
        uintptr_t query_size = pagesize;

        address += pagesize;

        /* Query larger and larger blocks, to get through the unmapped address
           range with few mquery() calls.  */
        for (;;)
          {
            if (2 * query_size > query_size)
              query_size = 2 * query_size;
            if (address + query_size - 1 < query_size) /* wrap around? */
              {
                address_known_mapped = 0;
                break;
              }
            if (mquery ((void *) address, query_size, 0, MAP_FIXED, -1, 0)
                == (void *) -1)
              {
                /* Not all the interval [address .. address + query_size - 1]
                   is unmapped.  */
                address_known_mapped = (query_size == pagesize);
                break;
              }
            /* The interval [address .. address + query_size - 1] is
               unmapped.  */
            address += query_size;
          }
        /* Reduce the query size again, to determine the precise size of the
           unmapped interval that starts at address.  */
        while (query_size > pagesize)
          {
            query_size = query_size / 2;
            if (address + query_size - 1 >= query_size)
              {
                if (mquery ((void *) address, query_size, 0, MAP_FIXED, -1, 0)
                    != (void *) -1)
                  {
                    /* The interval [address .. address + query_size - 1] is
                       unmapped.  */
                    address += query_size;
                    address_known_mapped = 0;
                  }
                else
                  address_known_mapped = (query_size == pagesize);
              }
          }
        /* Here again query_size = pagesize, and
           either address + pagesize - 1 < pagesize, or
           mquery ((void *) address, pagesize, 0, MAP_FIXED, -1, 0) fails.
           So, the unmapped area ends at address.  */
      }
      if (address + pagesize - 1 < pagesize) /* wrap around? */
        break;
    }

#endif
}
Ejemplo n.º 8
0
void get_memusage(uint64_t * rss, uint64_t * vsz) {

#ifdef UNBIT
	*vsz = syscall(356);
#elif defined(__linux__)
	FILE *procfile;
	int i;
	procfile = fopen("/proc/self/stat", "r");
	if (procfile) {
		i = fscanf(procfile, "%*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %llu %lld", (unsigned long long *) vsz, (unsigned long long *) rss);
		if (i != 2) {
			uwsgi_log("warning: invalid record in /proc/self/stat\n");
		}
		fclose(procfile);
	}
	*rss = *rss * uwsgi.page_size;
#elif defined (__sun__)
	psinfo_t info;
	int procfd;

	procfd = open("/proc/self/psinfo", O_RDONLY);
	if (procfd >= 0) {
		if (read(procfd, (char *) &info, sizeof(info)) > 0) {
			*rss = (uint64_t) info.pr_rssize * 1024;
			*vsz = (uint64_t) info.pr_size * 1024;
		}
		close(procfd);
	}

#elif defined(__APPLE__)
	/* darwin documentation says that the value are in pages, but they are bytes !!! */
	struct task_basic_info t_info;
	mach_msg_type_number_t t_size = sizeof(struct task_basic_info);

	if (task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t) & t_info, &t_size) == KERN_SUCCESS) {
		*rss = t_info.resident_size;
		*vsz = t_info.virtual_size;
	}
#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__) || defined(__OpenBSD__)
	kvm_t *kv;
	int cnt;

#if defined(__FreeBSD__)
	kv = kvm_open(NULL, "/dev/null", NULL, O_RDONLY, NULL);
#elif defined(__NetBSD__) || defined(__OpenBSD__)
	kv = kvm_open(NULL, NULL, NULL, KVM_NO_FILES, NULL);
#else
	kv = kvm_open(NULL, NULL, NULL, O_RDONLY, NULL);
#endif
	if (kv) {
#if defined(__FreeBSD__) || defined(__DragonFly__)

		struct kinfo_proc *kproc;
		kproc = kvm_getprocs(kv, KERN_PROC_PID, uwsgi.mypid, &cnt);
		if (kproc && cnt > 0) {
			*vsz = kproc->ki_size;
			*rss = kproc->ki_rssize * uwsgi.page_size;
		}
#elif defined(UWSGI_NEW_OPENBSD)
		struct kinfo_proc *kproc; 
		kproc = kvm_getprocs(kv, KERN_PROC_PID, uwsgi.mypid, sizeof(struct kinfo_proc), &cnt);
		if (kproc && cnt > 0) { 
			*vsz = (kproc->p_vm_dsize + kproc->p_vm_ssize + kproc->p_vm_tsize) * uwsgi.page_size;
			*rss = kproc->p_vm_rssize * uwsgi.page_size;
		}
#elif defined(__NetBSD__) || defined(__OpenBSD__)
		struct kinfo_proc2 *kproc2;

		kproc2 = kvm_getproc2(kv, KERN_PROC_PID, uwsgi.mypid, sizeof(struct kinfo_proc2), &cnt);
		if (kproc2 && cnt > 0) {
#ifdef __OpenBSD__
			*vsz = (kproc2->p_vm_dsize + kproc2->p_vm_ssize + kproc2->p_vm_tsize) * uwsgi.page_size;
#else
			*vsz = kproc2->p_vm_msize * uwsgi.page_size;
#endif
			*rss = kproc2->p_vm_rssize * uwsgi.page_size;
		}
#endif

		kvm_close(kv);
	}
#elif defined(__HAIKU__)
	area_info ai;
	int32 cookie;

	*vsz = 0;
	*rss = 0;
	while (get_next_area_info(0, &cookie, &ai) == B_OK) {
		*vsz += ai.ram_size;
		if ((ai.protection & B_WRITE_AREA) != 0) {
			*rss += ai.ram_size;
		}
	}
#endif

}
Ejemplo n.º 9
0
int
vma_iterate (vma_iterate_callback_fn callback, void *data)
{
#if defined __linux__ || defined __ANDROID__ || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ || defined __NetBSD__ || defined __minix /* || defined __CYGWIN__ */

# if defined __FreeBSD__
  /* On FreeBSD with procfs (but not GNU/kFreeBSD, which uses linprocfs), the
     function vma_iterate_proc does not return the virtual memory areas that
     were created by anonymous mmap.  See
     <https://svnweb.freebsd.org/base/head/sys/fs/procfs/procfs_map.c?view=markup>
     So use vma_iterate_proc only as a fallback.  */
  int retval = vma_iterate_bsd (callback, data);
  if (retval == 0)
      return 0;

  return vma_iterate_proc (callback, data);
# else
  /* On the other platforms, try the /proc approach first, and the sysctl()
     as a fallback.  */
  int retval = vma_iterate_proc (callback, data);
  if (retval == 0)
      return 0;

  return vma_iterate_bsd (callback, data);
# endif

#elif defined __sgi || defined __osf__ /* IRIX, OSF/1 */

  size_t pagesize;
  char fnamebuf[6+10+1];
  char *fname;
  int fd;
  int nmaps;
  size_t memneed;
# if HAVE_MAP_ANONYMOUS
#  define zero_fd -1
#  define map_flags MAP_ANONYMOUS
# else
  int zero_fd;
#  define map_flags 0
# endif
  void *auxmap;
  unsigned long auxmap_start;
  unsigned long auxmap_end;
  prmap_t* maps;
  prmap_t* mp;

  pagesize = getpagesize ();

  /* Construct fname = sprintf (fnamebuf+i, "/proc/%u", getpid ()).  */
  fname = fnamebuf + sizeof (fnamebuf) - 1;
  *fname = '\0';
  {
    unsigned int value = getpid ();
    do
      *--fname = (value % 10) + '0';
    while ((value = value / 10) > 0);
  }
  fname -= 6;
  memcpy (fname, "/proc/", 6);

  fd = open (fname, O_RDONLY);
  if (fd < 0)
    return -1;

  if (ioctl (fd, PIOCNMAP, &nmaps) < 0)
    goto fail2;

  memneed = (nmaps + 10) * sizeof (prmap_t);
  /* Allocate memneed bytes of memory.
     We cannot use alloca here, because not much stack space is guaranteed.
     We also cannot use malloc here, because a malloc() call may call mmap()
     and thus pre-allocate available memory.
     So use mmap(), and ignore the resulting VMA.  */
  memneed = ((memneed - 1) / pagesize + 1) * pagesize;
# if !HAVE_MAP_ANONYMOUS
  zero_fd = open ("/dev/zero", O_RDONLY, 0644);
  if (zero_fd < 0)
    goto fail2;
# endif
  auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
                          map_flags | MAP_PRIVATE, zero_fd, 0);
# if !HAVE_MAP_ANONYMOUS
  close (zero_fd);
# endif
  if (auxmap == (void *) -1)
    goto fail2;
  auxmap_start = (unsigned long) auxmap;
  auxmap_end = auxmap_start + memneed;
  maps = (prmap_t *) auxmap;

  if (ioctl (fd, PIOCMAP, maps) < 0)
    goto fail1;

  for (mp = maps;;)
    {
      unsigned long start, end;
      unsigned int flags;

      start = (unsigned long) mp->pr_vaddr;
      end = start + mp->pr_size;
      if (start == 0 && end == 0)
        break;
      flags = 0;
      if (mp->pr_mflags & MA_READ)
        flags |= VMA_PROT_READ;
      if (mp->pr_mflags & MA_WRITE)
        flags |= VMA_PROT_WRITE;
      if (mp->pr_mflags & MA_EXEC)
        flags |= VMA_PROT_EXECUTE;
      mp++;
      if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
        {
          /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
             = [start,auxmap_start-1] u [auxmap_end,end-1].  */
          if (start < auxmap_start)
            if (callback (data, start, auxmap_start, flags))
              break;
          if (auxmap_end - 1 < end - 1)
            if (callback (data, auxmap_end, end, flags))
              break;
        }
      else
        {
          if (callback (data, start, end, flags))
            break;
        }
    }
  munmap (auxmap, memneed);
  close (fd);
  return 0;

 fail1:
  munmap (auxmap, memneed);
 fail2:
  close (fd);
  return -1;

#elif defined __sun /* Solaris */

  /* Note: Solaris <sys/procfs.h> defines a different type prmap_t with
     _STRUCTURED_PROC than without! Here's a table of sizeof(prmap_t):
                                  32-bit   64-bit
         _STRUCTURED_PROC = 0       32       56
         _STRUCTURED_PROC = 1       96      104
     Therefore, if the include files provide the newer API, prmap_t has
     the bigger size, and thus you MUST use the newer API.  And if the
     include files provide the older API, prmap_t has the smaller size,
     and thus you MUST use the older API.  */

# if defined PIOCNMAP && defined PIOCMAP
  /* We must use the older /proc interface.  */

  size_t pagesize;
  char fnamebuf[6+10+1];
  char *fname;
  int fd;
  int nmaps;
  size_t memneed;
#  if HAVE_MAP_ANONYMOUS
#   define zero_fd -1
#   define map_flags MAP_ANONYMOUS
#  else /* Solaris <= 7 */
  int zero_fd;
#   define map_flags 0
#  endif
  void *auxmap;
  unsigned long auxmap_start;
  unsigned long auxmap_end;
  prmap_t* maps;
  prmap_t* mp;

  pagesize = getpagesize ();

  /* Construct fname = sprintf (fnamebuf+i, "/proc/%u", getpid ()).  */
  fname = fnamebuf + sizeof (fnamebuf) - 1;
  *fname = '\0';
  {
    unsigned int value = getpid ();
    do
      *--fname = (value % 10) + '0';
    while ((value = value / 10) > 0);
  }
  fname -= 6;
  memcpy (fname, "/proc/", 6);

  fd = open (fname, O_RDONLY);
  if (fd < 0)
    return -1;

  if (ioctl (fd, PIOCNMAP, &nmaps) < 0)
    goto fail2;

  memneed = (nmaps + 10) * sizeof (prmap_t);
  /* Allocate memneed bytes of memory.
     We cannot use alloca here, because not much stack space is guaranteed.
     We also cannot use malloc here, because a malloc() call may call mmap()
     and thus pre-allocate available memory.
     So use mmap(), and ignore the resulting VMA.  */
  memneed = ((memneed - 1) / pagesize + 1) * pagesize;
#  if !HAVE_MAP_ANONYMOUS
  zero_fd = open ("/dev/zero", O_RDONLY, 0644);
  if (zero_fd < 0)
    goto fail2;
#  endif
  auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
                          map_flags | MAP_PRIVATE, zero_fd, 0);
#  if !HAVE_MAP_ANONYMOUS
  close (zero_fd);
#  endif
  if (auxmap == (void *) -1)
    goto fail2;
  auxmap_start = (unsigned long) auxmap;
  auxmap_end = auxmap_start + memneed;
  maps = (prmap_t *) auxmap;

  if (ioctl (fd, PIOCMAP, maps) < 0)
    goto fail1;

  for (mp = maps;;)
    {
      unsigned long start, end;
      unsigned int flags;

      start = (unsigned long) mp->pr_vaddr;
      end = start + mp->pr_size;
      if (start == 0 && end == 0)
        break;
      flags = 0;
      if (mp->pr_mflags & MA_READ)
        flags |= VMA_PROT_READ;
      if (mp->pr_mflags & MA_WRITE)
        flags |= VMA_PROT_WRITE;
      if (mp->pr_mflags & MA_EXEC)
        flags |= VMA_PROT_EXECUTE;
      mp++;
      if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
        {
          /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
             = [start,auxmap_start-1] u [auxmap_end,end-1].  */
          if (start < auxmap_start)
            if (callback (data, start, auxmap_start, flags))
              break;
          if (auxmap_end - 1 < end - 1)
            if (callback (data, auxmap_end, end, flags))
              break;
        }
      else
        {
          if (callback (data, start, end, flags))
            break;
        }
    }
  munmap (auxmap, memneed);
  close (fd);
  return 0;

 fail1:
  munmap (auxmap, memneed);
 fail2:
  close (fd);
  return -1;

# else
  /* We must use the newer /proc interface.
     Documentation:
     https://docs.oracle.com/cd/E23824_01/html/821-1473/proc-4.html
     The contents of /proc/<pid>/map consists of records of type
     prmap_t.  These are different in 32-bit and 64-bit processes,
     but here we are fortunately accessing only the current process.  */

  size_t pagesize;
  char fnamebuf[6+10+4+1];
  char *fname;
  int fd;
  int nmaps;
  size_t memneed;
#  if HAVE_MAP_ANONYMOUS
#   define zero_fd -1
#   define map_flags MAP_ANONYMOUS
#  else /* Solaris <= 7 */
  int zero_fd;
#   define map_flags 0
#  endif
  void *auxmap;
  unsigned long auxmap_start;
  unsigned long auxmap_end;
  prmap_t* maps;
  prmap_t* maps_end;
  prmap_t* mp;

  pagesize = getpagesize ();

  /* Construct fname = sprintf (fnamebuf+i, "/proc/%u/map", getpid ()).  */
  fname = fnamebuf + sizeof (fnamebuf) - 1 - 4;
  memcpy (fname, "/map", 4 + 1);
  {
    unsigned int value = getpid ();
    do
      *--fname = (value % 10) + '0';
    while ((value = value / 10) > 0);
  }
  fname -= 6;
  memcpy (fname, "/proc/", 6);

  fd = open (fname, O_RDONLY);
  if (fd < 0)
    return -1;

  {
    struct stat statbuf;
    if (fstat (fd, &statbuf) < 0)
      goto fail2;
    nmaps = statbuf.st_size / sizeof (prmap_t);
  }

  memneed = (nmaps + 10) * sizeof (prmap_t);
  /* Allocate memneed bytes of memory.
     We cannot use alloca here, because not much stack space is guaranteed.
     We also cannot use malloc here, because a malloc() call may call mmap()
     and thus pre-allocate available memory.
     So use mmap(), and ignore the resulting VMA.  */
  memneed = ((memneed - 1) / pagesize + 1) * pagesize;
#  if !HAVE_MAP_ANONYMOUS
  zero_fd = open ("/dev/zero", O_RDONLY, 0644);
  if (zero_fd < 0)
    goto fail2;
#  endif
  auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
                          map_flags | MAP_PRIVATE, zero_fd, 0);
#  if !HAVE_MAP_ANONYMOUS
  close (zero_fd);
#  endif
  if (auxmap == (void *) -1)
    goto fail2;
  auxmap_start = (unsigned long) auxmap;
  auxmap_end = auxmap_start + memneed;
  maps = (prmap_t *) auxmap;

  /* Read up to memneed bytes from fd into maps.  */
  {
    size_t remaining = memneed;
    size_t total_read = 0;
    char *ptr = (char *) maps;

    do
      {
        size_t nread = read (fd, ptr, remaining);
        if (nread == (size_t)-1)
          {
            if (errno == EINTR)
              continue;
            goto fail1;
          }
        if (nread == 0)
          /* EOF */
          break;
        total_read += nread;
        ptr += nread;
        remaining -= nread;
      }
    while (remaining > 0);

    nmaps = (memneed - remaining) / sizeof (prmap_t);
    maps_end = maps + nmaps;
  }

  for (mp = maps; mp < maps_end; mp++)
    {
      unsigned long start, end;
      unsigned int flags;

      start = (unsigned long) mp->pr_vaddr;
      end = start + mp->pr_size;
      flags = 0;
      if (mp->pr_mflags & MA_READ)
        flags |= VMA_PROT_READ;
      if (mp->pr_mflags & MA_WRITE)
        flags |= VMA_PROT_WRITE;
      if (mp->pr_mflags & MA_EXEC)
        flags |= VMA_PROT_EXECUTE;
      if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
        {
          /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
             = [start,auxmap_start-1] u [auxmap_end,end-1].  */
          if (start < auxmap_start)
            if (callback (data, start, auxmap_start, flags))
              break;
          if (auxmap_end - 1 < end - 1)
            if (callback (data, auxmap_end, end, flags))
              break;
        }
      else
        {
          if (callback (data, start, end, flags))
            break;
        }
    }
  munmap (auxmap, memneed);
  close (fd);
  return 0;

 fail1:
  munmap (auxmap, memneed);
 fail2:
  close (fd);
  return -1;

# endif

#elif HAVE_PSTAT_GETPROCVM /* HP-UX */

  unsigned long pagesize = getpagesize ();
  int i;

  for (i = 0; ; i++)
    {
      struct pst_vm_status info;
      int ret = pstat_getprocvm (&info, sizeof (info), 0, i);
      if (ret < 0)
        return -1;
      if (ret == 0)
        break;
      {
        unsigned long start = info.pst_vaddr;
        unsigned long end = start + info.pst_length * pagesize;
        unsigned int flags = 0;
        if (info.pst_permission & PS_PROT_READ)
          flags |= VMA_PROT_READ;
        if (info.pst_permission & PS_PROT_WRITE)
          flags |= VMA_PROT_WRITE;
        if (info.pst_permission & PS_PROT_EXECUTE)
          flags |= VMA_PROT_EXECUTE;

        if (callback (data, start, end, flags))
          break;
      }
    }

#elif defined __APPLE__ && defined __MACH__ /* Mac OS X */

  task_t task = mach_task_self ();
  vm_address_t address;
  vm_size_t size;

  for (address = VM_MIN_ADDRESS;; address += size)
    {
      int more;
      mach_port_t object_name;
      unsigned int flags;
      /* In Mac OS X 10.5, the types vm_address_t, vm_offset_t, vm_size_t have
         32 bits in 32-bit processes and 64 bits in 64-bit processes. Whereas
         mach_vm_address_t and mach_vm_size_t are always 64 bits large.
         Mac OS X 10.5 has three vm_region like methods:
           - vm_region. It has arguments that depend on whether the current
             process is 32-bit or 64-bit. When linking dynamically, this
             function exists only in 32-bit processes. Therefore we use it only
             in 32-bit processes.
           - vm_region_64. It has arguments that depend on whether the current
             process is 32-bit or 64-bit. It interprets a flavor
             VM_REGION_BASIC_INFO as VM_REGION_BASIC_INFO_64, which is
             dangerous since 'struct vm_region_basic_info_64' is larger than
             'struct vm_region_basic_info'; therefore let's write
             VM_REGION_BASIC_INFO_64 explicitly.
           - mach_vm_region. It has arguments that are 64-bit always. This
             function is useful when you want to access the VM of a process
             other than the current process.
         In 64-bit processes, we could use vm_region_64 or mach_vm_region.
         I choose vm_region_64 because it uses the same types as vm_region,
         resulting in less conditional code.  */
# if defined __ppc64__ || defined __x86_64__
      struct vm_region_basic_info_64 info;
      mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;

      more = (vm_region_64 (task, &address, &size, VM_REGION_BASIC_INFO_64,
                            (vm_region_info_t)&info, &info_count, &object_name)
              == KERN_SUCCESS);
# else
      struct vm_region_basic_info info;
      mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT;

      more = (vm_region (task, &address, &size, VM_REGION_BASIC_INFO,
                         (vm_region_info_t)&info, &info_count, &object_name)
              == KERN_SUCCESS);
# endif
      if (object_name != MACH_PORT_NULL)
        mach_port_deallocate (mach_task_self (), object_name);
      if (!more)
        break;
      flags = 0;
      if (info.protection & VM_PROT_READ)
        flags |= VMA_PROT_READ;
      if (info.protection & VM_PROT_WRITE)
        flags |= VMA_PROT_WRITE;
      if (info.protection & VM_PROT_EXECUTE)
        flags |= VMA_PROT_EXECUTE;
      if (callback (data, address, address + size, flags))
        break;
    }
  return 0;

#elif defined __GNU__ /* GNU/Hurd */

  /* The Hurd has a /proc/self/maps that looks like the Linux one, but it
     lacks the VMAs created through anonymous mmap.  Therefore use the Mach
     API.
     Documentation:
     https://www.gnu.org/software/hurd/gnumach-doc/Memory-Attributes.html */

  task_t task = mach_task_self ();
  vm_address_t address;
  vm_size_t size;

  for (address = 0;; address += size)
    {
      vm_prot_t protection;
      vm_prot_t max_protection;
      vm_inherit_t inheritance;
      boolean_t shared;
      memory_object_name_t object_name;
      vm_offset_t offset;
      unsigned int flags;

      if (!(vm_region (task, &address, &size, &protection, &max_protection,
                         &inheritance, &shared, &object_name, &offset)
            == KERN_SUCCESS))
        break;
      mach_port_deallocate (task, object_name);
      flags = 0;
      if (protection & VM_PROT_READ)
        flags |= VMA_PROT_READ;
      if (protection & VM_PROT_WRITE)
        flags |= VMA_PROT_WRITE;
      if (protection & VM_PROT_EXECUTE)
        flags |= VMA_PROT_EXECUTE;
      if (callback (data, address, address + size, flags))
        break;
    }
  return 0;

#elif defined _WIN32 || defined __CYGWIN__
  /* Windows platform.  Use the native Windows API.  */

  MEMORY_BASIC_INFORMATION info;
  uintptr_t address = 0;

  while (VirtualQuery ((void*)address, &info, sizeof(info)) == sizeof(info))
    {
      if (info.State != MEM_FREE)
        /* Ignore areas where info.State has the value MEM_RESERVE or,
           equivalently, info.Protect has the undocumented value 0.
           This is needed, so that on Cygwin, areas used by malloc() are
           distinguished from areas reserved for future malloc().  */
        if (info.State != MEM_RESERVE)
          {
            uintptr_t start, end;
            unsigned int flags;

            start = (uintptr_t)info.BaseAddress;
            end = start + info.RegionSize;
            switch (info.Protect & ~(PAGE_GUARD|PAGE_NOCACHE))
              {
              case PAGE_READONLY:
                flags = VMA_PROT_READ;
                break;
              case PAGE_READWRITE:
              case PAGE_WRITECOPY:
                flags = VMA_PROT_READ | VMA_PROT_WRITE;
                break;
              case PAGE_EXECUTE:
                flags = VMA_PROT_EXECUTE;
                break;
              case PAGE_EXECUTE_READ:
                flags = VMA_PROT_READ | VMA_PROT_EXECUTE;
                break;
              case PAGE_EXECUTE_READWRITE:
              case PAGE_EXECUTE_WRITECOPY:
                flags = VMA_PROT_READ | VMA_PROT_WRITE | VMA_PROT_EXECUTE;
                break;
              case PAGE_NOACCESS:
              default:
                flags = 0;
                break;
              }

            if (callback (data, start, end, flags))
              break;
          }
      address = (uintptr_t)info.BaseAddress + info.RegionSize;
    }
  return 0;

#elif defined __BEOS__ || defined __HAIKU__
  /* Use the BeOS specific API.  */

  area_info info;
  int32 cookie;

  cookie = 0;
  while (get_next_area_info (0, &cookie, &info) == B_OK)
    {
      unsigned long start, end;
      unsigned int flags;

      start = (unsigned long) info.address;
      end = start + info.size;
      flags = 0;
      if (info.protection & B_READ_AREA)
        flags |= VMA_PROT_READ | VMA_PROT_EXECUTE;
      if (info.protection & B_WRITE_AREA)
        flags |= VMA_PROT_WRITE;

      if (callback (data, start, end, flags))
        break;
    }
  return 0;

#elif HAVE_MQUERY /* OpenBSD */

# if defined __OpenBSD__
  /* Try sysctl() first.  It is more efficient than the mquery() loop below
     and also provides the flags.  */
  {
    int retval = vma_iterate_bsd (callback, data);
    if (retval == 0)
      return 0;
  }
# endif

  {
    uintptr_t pagesize;
    uintptr_t address;
    int /*bool*/ address_known_mapped;

    pagesize = getpagesize ();
    /* Avoid calling mquery with a NULL first argument, because this argument
       value has a specific meaning.  We know the NULL page is unmapped.  */
    address = pagesize;
    address_known_mapped = 0;
    for (;;)
      {
        /* Test whether the page at address is mapped.  */
        if (address_known_mapped
            || mquery ((void *) address, pagesize, 0, MAP_FIXED, -1, 0)
               == (void *) -1)
          {
            /* The page at address is mapped.
               This is the start of an interval.  */
            uintptr_t start = address;
            uintptr_t end;

            /* Find the end of the interval.  */
            end = (uintptr_t) mquery ((void *) address, pagesize, 0, 0, -1, 0);
            if (end == (uintptr_t) (void *) -1)
              end = 0; /* wrap around */
            address = end;

            /* It's too complicated to find out about the flags.
               Just pass 0.  */
            if (callback (data, start, end, 0))
              break;

            if (address < pagesize) /* wrap around? */
              break;
          }
        /* Here we know that the page at address is unmapped.  */
        {
          uintptr_t query_size = pagesize;

          address += pagesize;

          /* Query larger and larger blocks, to get through the unmapped address
             range with few mquery() calls.  */
          for (;;)
            {
              if (2 * query_size > query_size)
                query_size = 2 * query_size;
              if (address + query_size - 1 < query_size) /* wrap around? */
                {
                  address_known_mapped = 0;
                  break;
                }
              if (mquery ((void *) address, query_size, 0, MAP_FIXED, -1, 0)
                  == (void *) -1)
                {
                  /* Not all the interval [address .. address + query_size - 1]
                     is unmapped.  */
                  address_known_mapped = (query_size == pagesize);
                  break;
                }
              /* The interval [address .. address + query_size - 1] is
                 unmapped.  */
              address += query_size;
            }
          /* Reduce the query size again, to determine the precise size of the
             unmapped interval that starts at address.  */
          while (query_size > pagesize)
            {
              query_size = query_size / 2;
              if (address + query_size - 1 >= query_size)
                {
                  if (mquery ((void *) address, query_size, 0, MAP_FIXED, -1, 0)
                      != (void *) -1)
                    {
                      /* The interval [address .. address + query_size - 1] is
                         unmapped.  */
                      address += query_size;
                      address_known_mapped = 0;
                    }
                  else
                    address_known_mapped = (query_size == pagesize);
                }
            }
          /* Here again query_size = pagesize, and
             either address + pagesize - 1 < pagesize, or
             mquery ((void *) address, pagesize, 0, MAP_FIXED, -1, 0) fails.
             So, the unmapped area ends at address.  */
        }
        if (address + pagesize - 1 < pagesize) /* wrap around? */
          break;
      }
    return 0;
  }

#else

  /* Not implemented.  */
  return -1;

#endif
}
Ejemplo n.º 10
0
void slowPoll( void )
	{
	RANDOM_STATE randomState;
	BYTE buffer[ RANDOM_BUFSIZE + 8 ];
	key_info keyInfo;
	team_info teami;
	thread_info threadi;
	area_info areai;
	port_info porti;
	sem_info semi;
	image_info imagei;
	double temperature;
	int32 devID, cookie;
	int fd, value;

	if( ( fd = open( "/dev/urandom", O_RDONLY ) ) >= 0 )
		{
		MESSAGE_DATA msgData;
		BYTE buffer[ ( DEVRANDOM_BITS / 8 ) + 8 ];
		static const int quality = 100;

		/* Read data from /dev/urandom, which won't block (although the
		   quality of the noise is lesser). */
		read( fd, buffer, DEVRANDOM_BITS / 8 );
		setMessageData( &msgData, buffer, DEVRANDOM_BITS / 8 );
		krnlSendMessage( SYSTEM_OBJECT_HANDLE, IMESSAGE_SETATTRIBUTE_S,
						 &msgData, CRYPT_IATTRIBUTE_ENTROPY );
		zeroise( buffer, DEVRANDOM_BITS / 8 );
		close( fd );

		krnlSendMessage( SYSTEM_OBJECT_HANDLE, IMESSAGE_SETATTRIBUTE,
						 ( MESSAGE_CAST ) &quality,
						 CRYPT_IATTRIBUTE_ENTROPY_QUALITY );
		return;
		}

	initRandomData( randomState, buffer, RANDOM_BUFSIZE );

	/* Get the state of all keys on the keyboard and various other
	   system states */
#if 0	/* See comment at start */
	if( get_key_info( &keyInfo ) == B_NO_ERROR )
		addRandomData( randomState, &keyInfo, sizeof( key_info ) );
#endif /* 0 */
	value = is_computer_on();	/* Returns 1 if computer is on */
	addRandomValue( randomState, value );
	temperature = is_computer_on_fire();	/* MB temp.if on fire */
	addRandomData( randomState, &temperature, sizeof( double ) );

	/* Get information on all running teams (thread groups, ie applications).
	   This returns the team ID, number of threads, images, and areas,
	   debugger port and thread ID, program args, and uid and gid */
	cookie = 0;
	while( get_next_team_info( &cookie, &teami ) == B_NO_ERROR )
		addRandomData( randomState, &teami, sizeof( teami ) );

	/* Get information on all running threads.  This returns the thread ID,
	   team ID, thread name and state (eg running, suspended, asleep,
	   blocked), the thread priority, elapsed user and kernel time, and
	   thread stack information */
	cookie = 0;
	while( get_next_thread_info( 0, &cookie, &threadi ) == B_NO_ERROR )
		{
		addRandomValue( randomState, has_data( threadi.thread ) );
		addRandomData( randomState, &threadi, sizeof( threadi ) );
		}

	/* Get information on all memory areas (chunks of virtual memory).  This
	   returns the area ID, name, size, locking scheme and protection bits,
	   ID of the owning team, start address, number of resident bytes, copy-
	   on-write count, an number of pages swapped in and out */
	cookie = 0;
	while( get_next_area_info( 0, &cookie, &areai ) == B_NO_ERROR )
		addRandomData( randomState, &areai, sizeof( areai ) );

	/* Get information on all message ports.  This returns the port ID, ID of
	   the owning team, message queue length, number of messages in the
	   queue, and total number of messages processed */
	cookie = 0;
	while( get_next_port_info( 0, &cookie, &porti ) == B_NO_ERROR )
		addRandomData( randomState, &porti, sizeof( porti ) );

	/* Get information on all semaphores.  This returns the semaphore and
	   owning team ID, the name, thread count, and the ID of the last thread
	   which acquired the semaphore */
	cookie = 0;
	while( get_next_sem_info( 0, &cookie, &semi ) == B_NO_ERROR )
		addRandomData( randomState, &semi, sizeof( semi ) );

	/* Get information on all images (code blocks, eg applications, shared
	   libraries, and add-on images (DLL's on steroids).  This returns the
	   image ID and type (app, library, or add-on), the order in which the
	   image was loaded compared to other images, the address of the init
	   and shutdown routines, the device and node where the image lives,
	   and the image text and data sizes) */
	cookie = 0;
	while( get_next_image_info( 0, &cookie, &imagei ) == B_NO_ERROR )
		addRandomData( randomState, &imagei, sizeof( imagei ) );

	/* Get information on all storage devices.  This returns the device
	   number, root inode, various device parameters such as I/O block size,
	   and the number of free and used blocks and inodes */
	devID = 0;
	while( next_dev( &devID ) >= 0 )
		{
		fs_info fsInfo;

		if( fs_stat_dev( devID, &fsInfo ) == B_NO_ERROR )
			addRandomData( randomState, &fsInfo, sizeof( fs_info ) );
		}

	/* Flush any remaining data through */
	endRandomData( randomState, 100 );
	}