Exemplo n.º 1
0
int
getloadavg (double loadavg[], int nelem)
{
  int elem = 0;			/* Return value.  */

# ifdef NO_GET_LOAD_AVG
#  define LDAV_DONE
  /* Set errno to zero to indicate that there was no particular error;
     this function just can't work at all on this system.  */
  errno = 0;
  elem = -1;
# endif

# if !defined (LDAV_DONE) && defined (HAVE_LIBKSTAT)
/* Use libkstat because we don't have to be root.  */
#  define LDAV_DONE
  kstat_ctl_t *kc;
  kstat_t *ksp;
  kstat_named_t *kn;

  kc = kstat_open ();
  if (kc == 0)
    return -1;
  ksp = kstat_lookup (kc, "unix", 0, "system_misc");
  if (ksp == 0 )
    return -1;
  if (kstat_read (kc, ksp, 0) == -1)
    return -1;


  kn = kstat_data_lookup (ksp, "avenrun_1min");
  if (kn == 0)
    {
      /* Return -1 if no load average information is available.  */
      nelem = 0;
      elem = -1;
    }

  if (nelem >= 1)
    loadavg[elem++] = (double) kn->value.ul/FSCALE;

  if (nelem >= 2)
    {
      kn = kstat_data_lookup (ksp, "avenrun_5min");
      if (kn != 0)
	{
	  loadavg[elem++] = (double) kn->value.ul/FSCALE;

	  if (nelem >= 3)
	    {
	      kn = kstat_data_lookup (ksp, "avenrun_15min");
	      if (kn != 0)
		loadavg[elem++] = (double) kn->value.ul/FSCALE;
	    }
	}
    }

  kstat_close (kc);
# endif /* HAVE_LIBKSTAT */

# if !defined (LDAV_DONE) && defined (hpux) && defined (HAVE_PSTAT_GETDYNAMIC)
/* Use pstat_getdynamic() because we don't have to be root.  */
#  define LDAV_DONE
#  undef LOAD_AVE_TYPE

  struct pst_dynamic dyn_info;
  if (pstat_getdynamic (&dyn_info, sizeof (dyn_info), 0, 0) < 0)
    return -1;
  if (nelem > 0)
    loadavg[elem++] = dyn_info.psd_avg_1_min;
  if (nelem > 1)
    loadavg[elem++] = dyn_info.psd_avg_5_min;
  if (nelem > 2)
    loadavg[elem++] = dyn_info.psd_avg_15_min;

# endif /* hpux && HAVE_PSTAT_GETDYNAMIC */

# if !defined (LDAV_DONE) && defined (__linux__)
#  define LDAV_DONE
#  undef LOAD_AVE_TYPE

#  ifndef LINUX_LDAV_FILE
#   define LINUX_LDAV_FILE "/proc/loadavg"
#  endif

  char ldavgbuf[40];
  double load_ave[3];
  int fd, count;

  fd = open (LINUX_LDAV_FILE, O_RDONLY);
  if (fd == -1)
    return -1;
  count = read (fd, ldavgbuf, 40);
  (void) close (fd);
  if (count <= 0)
    return -1;

  /* The following sscanf must use the C locale.  */
  setlocale (LC_NUMERIC, "C");
  count = sscanf (ldavgbuf, "%lf %lf %lf",
		  &load_ave[0], &load_ave[1], &load_ave[2]);
  setlocale (LC_NUMERIC, "");
  if (count < 1)
    return -1;

  for (elem = 0; elem < nelem && elem < count; elem++)
    loadavg[elem] = load_ave[elem];

  return elem;

# endif /* __linux__ */

# if !defined (LDAV_DONE) && defined (__NetBSD__)
#  define LDAV_DONE
#  undef LOAD_AVE_TYPE

#  ifndef NETBSD_LDAV_FILE
#   define NETBSD_LDAV_FILE "/kern/loadavg"
#  endif

  unsigned long int load_ave[3], scale;
  int count;
  FILE *fp;

  fp = fopen (NETBSD_LDAV_FILE, "r");
  if (fp == NULL)
    return -1;
  count = fscanf (fp, "%lu %lu %lu %lu\n",
		  &load_ave[0], &load_ave[1], &load_ave[2],
		  &scale);
  (void) fclose (fp);
  if (count != 4)
    return -1;

  for (elem = 0; elem < nelem; elem++)
    loadavg[elem] = (double) load_ave[elem] / (double) scale;

  return elem;

# endif /* __NetBSD__ */

# if !defined (LDAV_DONE) && defined (NeXT)
#  define LDAV_DONE
  /* The NeXT code was adapted from iscreen 3.2.  */

  host_t host;
  struct processor_set_basic_info info;
  unsigned info_count;

  /* We only know how to get the 1-minute average for this system,
     so even if the caller asks for more than 1, we only return 1.  */

  if (!getloadavg_initialized)
    {
      if (processor_set_default (host_self (), &default_set) == KERN_SUCCESS)
	getloadavg_initialized = 1;
    }

  if (getloadavg_initialized)
    {
      info_count = PROCESSOR_SET_BASIC_INFO_COUNT;
      if (processor_set_info (default_set, PROCESSOR_SET_BASIC_INFO, &host,
			      (processor_set_info_t) &info, &info_count)
	  != KERN_SUCCESS)
	getloadavg_initialized = 0;
      else
	{
	  if (nelem > 0)
	    loadavg[elem++] = (double) info.load_average / LOAD_SCALE;
	}
    }

  if (!getloadavg_initialized)
    return -1;
# endif /* NeXT */

# if !defined (LDAV_DONE) && defined (UMAX)
#  define LDAV_DONE
/* UMAX 4.2, which runs on the Encore Multimax multiprocessor, does not
   have a /dev/kmem.  Information about the workings of the running kernel
   can be gathered with inq_stats system calls.
   We only know how to get the 1-minute average for this system.  */

  struct proc_summary proc_sum_data;
  struct stat_descr proc_info;
  double load;
  register unsigned int i, j;

  if (cpus == 0)
    {
      register unsigned int c, i;
      struct cpu_config conf;
      struct stat_descr desc;

      desc.sd_next = 0;
      desc.sd_subsys = SUBSYS_CPU;
      desc.sd_type = CPUTYPE_CONFIG;
      desc.sd_addr = (char *) &conf;
      desc.sd_size = sizeof conf;

      if (inq_stats (1, &desc))
	return -1;

      c = 0;
      for (i = 0; i < conf.config_maxclass; ++i)
	{
	  struct class_stats stats;
	  memset (&stats, '\0', sizeof stats);

	  desc.sd_type = CPUTYPE_CLASS;
	  desc.sd_objid = i;
	  desc.sd_addr = (char *) &stats;
	  desc.sd_size = sizeof stats;

	  if (inq_stats (1, &desc))
	    return -1;

	  c += stats.class_numcpus;
	}
      cpus = c;
      samples = cpus < 2 ? 3 : (2 * cpus / 3);
    }

  proc_info.sd_next = 0;
  proc_info.sd_subsys = SUBSYS_PROC;
  proc_info.sd_type = PROCTYPE_SUMMARY;
  proc_info.sd_addr = (char *) &proc_sum_data;
  proc_info.sd_size = sizeof (struct proc_summary);
  proc_info.sd_sizeused = 0;

  if (inq_stats (1, &proc_info) != 0)
    return -1;

  load = proc_sum_data.ps_nrunnable;
  j = 0;
  for (i = samples - 1; i > 0; --i)
    {
      load += proc_sum_data.ps_nrun[j];
      if (j++ == PS_NRUNSIZE)
	j = 0;
    }

  if (nelem > 0)
    loadavg[elem++] = load / samples / cpus;
# endif /* UMAX */

# if !defined (LDAV_DONE) && defined (DGUX)
#  define LDAV_DONE
  /* This call can return -1 for an error, but with good args
     it's not supposed to fail.  The first argument is for no
     apparent reason of type 'long int *'.  */
  dg_sys_info ((long int *) &load_info,
	       DG_SYS_INFO_LOAD_INFO_TYPE,
	       DG_SYS_INFO_LOAD_VERSION_0);

  if (nelem > 0)
    loadavg[elem++] = load_info.one_minute;
  if (nelem > 1)
    loadavg[elem++] = load_info.five_minute;
  if (nelem > 2)
    loadavg[elem++] = load_info.fifteen_minute;
# endif /* DGUX */

# if !defined (LDAV_DONE) && defined (apollo)
#  define LDAV_DONE
/* Apollo code from [email protected] (Ray Lischner).

   This system call is not documented.  The load average is obtained as
   three long integers, for the load average over the past minute,
   five minutes, and fifteen minutes.  Each value is a scaled integer,
   with 16 bits of integer part and 16 bits of fraction part.

   I'm not sure which operating system first supported this system call,
   but I know that SR10.2 supports it.  */

  extern void proc1_$get_loadav ();
  unsigned long load_ave[3];

  proc1_$get_loadav (load_ave);

  if (nelem > 0)
    loadavg[elem++] = load_ave[0] / 65536.0;
  if (nelem > 1)
    loadavg[elem++] = load_ave[1] / 65536.0;
  if (nelem > 2)
    loadavg[elem++] = load_ave[2] / 65536.0;
# endif /* apollo */

# if !defined (LDAV_DONE) && defined (OSF_MIPS)
#  define LDAV_DONE

  struct tbl_loadavg load_ave;
  table (TBL_LOADAVG, 0, &load_ave, 1, sizeof (load_ave));
  loadavg[elem++]
    = (load_ave.tl_lscale == 0
       ? load_ave.tl_avenrun.d[0]
       : (load_ave.tl_avenrun.l[0] / (double) load_ave.tl_lscale));
# endif	/* OSF_MIPS */

# if !defined (LDAV_DONE) && (defined (__MSDOS__) || defined (WINDOWS32))
#  define LDAV_DONE

  /* A faithful emulation is going to have to be saved for a rainy day.  */
  for ( ; elem < nelem; elem++)
    {
      loadavg[elem] = 0.0;
    }
# endif  /* __MSDOS__ || WINDOWS32 */

# if !defined (LDAV_DONE) && defined (OSF_ALPHA)
#  define LDAV_DONE

  struct tbl_loadavg load_ave;
  table (TBL_LOADAVG, 0, &load_ave, 1, sizeof (load_ave));
  for (elem = 0; elem < nelem; elem++)
    loadavg[elem]
      = (load_ave.tl_lscale == 0
       ? load_ave.tl_avenrun.d[elem]
       : (load_ave.tl_avenrun.l[elem] / (double) load_ave.tl_lscale));
# endif /* OSF_ALPHA */

# if !defined (LDAV_DONE) && defined (VMS)
  /* VMS specific code -- read from the Load Ave driver.  */

  LOAD_AVE_TYPE load_ave[3];
  static int getloadavg_initialized = 0;
#  ifdef eunice
  struct
  {
    int dsc$w_length;
    char *dsc$a_pointer;
  } descriptor;
#  endif

  /* Ensure that there is a channel open to the load ave device.  */
  if (!getloadavg_initialized)
    {
      /* Attempt to open the channel.  */
#  ifdef eunice
      descriptor.dsc$w_length = 18;
      descriptor.dsc$a_pointer = "$$VMS_LOAD_AVERAGE";
#  else
      $DESCRIPTOR (descriptor, "LAV0:");
#  endif
      if (sys$assign (&descriptor, &channel, 0, 0) & 1)
	getloadavg_initialized = 1;
    }

  /* Read the load average vector.  */
  if (getloadavg_initialized
      && !(sys$qiow (0, channel, IO$_READVBLK, 0, 0, 0,
		     load_ave, 12, 0, 0, 0, 0) & 1))
    {
      sys$dassgn (channel);
      getloadavg_initialized = 0;
    }

  if (!getloadavg_initialized)
    return -1;
# endif /* VMS */

# if !defined (LDAV_DONE) && defined(LOAD_AVE_TYPE) && !defined(VMS)

  /* UNIX-specific code -- read the average from /dev/kmem.  */

#  define LDAV_PRIVILEGED		/* This code requires special installation.  */

  LOAD_AVE_TYPE load_ave[3];

  /* Get the address of LDAV_SYMBOL.  */
  if (offset == 0)
    {
#  ifndef sgi
#   ifndef NLIST_STRUCT
      strcpy (nl[0].n_name, LDAV_SYMBOL);
      strcpy (nl[1].n_name, "");
#   else /* NLIST_STRUCT */
#    ifdef HAVE_STRUCT_NLIST_N_UN_N_NAME
      nl[0].n_un.n_name = LDAV_SYMBOL;
      nl[1].n_un.n_name = 0;
#    else /* not HAVE_STRUCT_NLIST_N_UN_N_NAME */
      nl[0].n_name = LDAV_SYMBOL;
      nl[1].n_name = 0;
#    endif /* not HAVE_STRUCT_NLIST_N_UN_N_NAME */
#   endif /* NLIST_STRUCT */

#   ifndef SUNOS_5
      if (
#    if !(defined (_AIX) && !defined (ps2))
	  nlist (KERNEL_FILE, nl)
#    else  /* _AIX */
	  knlist (nl, 1, sizeof (nl[0]))
#    endif
	  >= 0)
	  /* Omit "&& nl[0].n_type != 0 " -- it breaks on Sun386i.  */
	  {
#    ifdef FIXUP_KERNEL_SYMBOL_ADDR
	    FIXUP_KERNEL_SYMBOL_ADDR (nl);
#    endif
	    offset = nl[0].n_value;
	  }
#   endif /* !SUNOS_5 */
#  else  /* sgi */
      int ldav_off;

      ldav_off = sysmp (MP_KERNADDR, MPKA_AVENRUN);
      if (ldav_off != -1)
	offset = (long) ldav_off & 0x7fffffff;
#  endif /* sgi */
    }

  /* Make sure we have /dev/kmem open.  */
  if (!getloadavg_initialized)
    {
#  ifndef SUNOS_5
      channel = open ("/dev/kmem", 0);
      if (channel >= 0)
	{
	  /* Set the channel to close on exec, so it does not
	     litter any child's descriptor table.  */
#   ifdef F_SETFD
#    ifndef FD_CLOEXEC
#     define FD_CLOEXEC 1
#    endif
	  (void) fcntl (channel, F_SETFD, FD_CLOEXEC);
#   endif
	  getloadavg_initialized = 1;
	}
#  else /* SUNOS_5 */
      /* We pass 0 for the kernel, corefile, and swapfile names
	 to use the currently running kernel.  */
      kd = kvm_open (0, 0, 0, O_RDONLY, 0);
      if (kd != 0)
	{
	  /* nlist the currently running kernel.  */
	  kvm_nlist (kd, nl);
	  offset = nl[0].n_value;
	  getloadavg_initialized = 1;
	}
#  endif /* SUNOS_5 */
    }

  /* If we can, get the load average values.  */
  if (offset && getloadavg_initialized)
    {
      /* Try to read the load.  */
#  ifndef SUNOS_5
      if (lseek (channel, offset, 0) == -1L
	  || read (channel, (char *) load_ave, sizeof (load_ave))
	  != sizeof (load_ave))
	{
	  close (channel);
	  getloadavg_initialized = 0;
	}
#  else  /* SUNOS_5 */
      if (kvm_read (kd, offset, (char *) load_ave, sizeof (load_ave))
	  != sizeof (load_ave))
        {
          kvm_close (kd);
          getloadavg_initialized = 0;
	}
#  endif /* SUNOS_5 */
    }

  if (offset == 0 || !getloadavg_initialized)
    return -1;
# endif /* LOAD_AVE_TYPE and not VMS */

# if !defined (LDAV_DONE) && defined (LOAD_AVE_TYPE) /* Including VMS.  */
  if (nelem > 0)
    loadavg[elem++] = LDAV_CVT (load_ave[0]);
  if (nelem > 1)
    loadavg[elem++] = LDAV_CVT (load_ave[1]);
  if (nelem > 2)
    loadavg[elem++] = LDAV_CVT (load_ave[2]);

#  define LDAV_DONE
# endif /* !LDAV_DONE && LOAD_AVE_TYPE */

# ifdef LDAV_DONE
  return elem;
# else
  /* Set errno to zero to indicate that there was no particular error;
     this function just can't work at all on this system.  */
  errno = 0;
  return -1;
# endif
}
Exemplo n.º 2
0
get_system_info(struct system_info *si)
{
    long avenrun[3];
    long total;

    /* get the cp_time array */
    (void) getkval(cp_time_offset, (int *)cp_time, sizeof(cp_time),
                   "_cp_time");

    /* get load average array */
    (void) getkval(avenrun_offset, (int *)avenrun, sizeof(avenrun),
                   "_avenrun");

    /* get mpid -- process id of last process */
    (void) getkval(mpid_offset, &(si->last_pid), sizeof(si->last_pid),
                   "_mpid");

    /* convert load averages to doubles */
    {
        register int i;
        for(i=0; i<3; i++)
            si->load_avg[i] = ((double)avenrun[i])/LSCALE;
    }

    /* convert cp_time counts to percentages */
    total = percentages(CPUSTATES, cpu_states, cp_time, cp_old, cp_diff);

    /* sum memory statistics */
    {
        /* get total -- systemwide main memory usage structure */
        /* Does not work on NeXT system.  Use vm_statistics() for paging info. */
        /* struct vmtotal total;
         * (void) getkval(total_offset, (int *)(&total), sizeof(total),
         *	       "_total");
         */
        /* convert memory stats to Kbytes */
        /* memory_stats[0] = -1;
         * memory_stats[1] = pagetok(total.t_arm);
         * memory_stats[2] = pagetok(total.t_rm);
         * memory_stats[3] = -1;
         * memory_stats[4] = pagetok(total.t_avm);
         * memory_stats[5] = pagetok(total.t_vm);
         * memory_stats[6] = -1;
         * memory_stats[7] = pagetok(total.t_free);
         */
        kern_return_t status;
        unsigned int count=HOST_BASIC_INFO_COUNT;
        status = vm_statistics(task_self(), &vm_stats);
#ifdef DEBUG
        if(status != KERN_SUCCESS)
            mach_error("An error calling vm_statistics()!", status);
#endif
        status = host_info(host_self(), HOST_BASIC_INFO, (host_info_t)&host_stats, &count);
#ifdef DEBUG
        if(status != KERN_SUCCESS)
            mach_error("An error calling host_info()!", status);
#endif
        /* convert memory stats to Kbytes */
        memory_stats[0] = pagetok(host_stats.memory_size / vm_stats.pagesize);
        memory_stats[1] = pagetok(vm_stats.active_count);
        memory_stats[2] = pagetok(vm_stats.inactive_count);
        memory_stats[3] = pagetok(vm_stats.wire_count);
        memory_stats[4] = pagetok(vm_stats.free_count);
        if (swappgsin < 0)
        {
            memory_stats[5] = 1;
            memory_stats[6] = 1;
        } else {
            memory_stats[5] = pagetok(((vm_stats.pageins - swappgsin)));
            memory_stats[6] = pagetok(((vm_stats.pageouts - swappgsout)));
        }
        swappgsin = vm_stats.pageins;
        swappgsout = vm_stats.pageouts;
    }

    /* set arrays and strings */
    si->cpustates = cpu_states;
    si->memory = memory_stats;
}
Exemplo n.º 3
0
/* 
 * This function will send a single Mach message in an attempt 
 * to obtain the kernel version.  Once it has the reply from Mach, 
 * it will iterate the string and obtain a release number, system 
 * name, and kernel version string. 
 * 
 * This is trying to be as efficient as possible, but will probably 
 * end up being a very gnarly hack with no real value whatsoever. 
 */ 
void detectOpSysVersion(void) 
{ 
  kern_return_t kret; 
  kernel_version_t kver; 
  
  kret = host_kernel_version(host_self(), kver); 
  
  if (kret != KERN_SUCCESS) { 
    mach_error("host_kernel_version() failed.", kret); 
  } else { 
    int idx = 0, len = 0; 
    
    /* Copy the kernel version to a string */ 
    strcpy(version, kver); 
    len = strlen(version); 
    
    /* Remove trailing \n, if it exists */ 
    if (version[len -1] == '\n') 
      version[len - 1] = '\0'; 

    /* This is the slowest part of the whole thing... */ 
    for (idx = 0; idx < len; idx++) { 
      
      /* Version: [0-9].[0-9]: */ 
      if (isdigit(version[idx]) && version[idx + 1] == '.' && 
          isdigit(version[idx + 2]) && version[idx + 3] == ':') 
      { 
        sprintf(release, 
                "%c.%c\0", 
                version[idx], 
                version[idx + 2]); 
        break; 
      } 
      
      /* Version: [0-9].[0-9].[0-9]: */ 
      if (isdigit(version[idx]) && version[idx + 1] == '.' && 
          isdigit(version[idx + 2]) && version[idx + 3] == '.' && 
          isdigit(version[idx + 4]) && version[idx + 5] == ':') 
      { 
        sprintf(release, 
                "%c.%c.%c\0", 
                version[idx], 
                version[idx + 2], 
                version[idx + 4]); 
        break; 
      } 
      
      /* Version: [0-9][0-9].[0-9].[0-9]: */ 
      if (isdigit(version[idx]) && isdigit(version[idx + 1]) && 
          version[idx + 2] == '.' && isdigit(version[idx + 3]) && 
          version[idx + 4] == '.' && isdigit(version[idx + 5]) && 
          version[idx + 6] == ':') 
      { 
        sprintf(release, 
                "%c%c.%c.%c\0", 
                version[idx], 
                version[idx + 1], 
                version[idx + 3], 
                version[idx + 5]); 
        break; 
      } 
    } 
    
    /* 
     * Compute the system name using the major version. 
     * 
     * If release[1] != '.', then it's Darwin... and this program would 
     * look very stupid if it printed 'NEXTSTEP 10.0' on an OSX box. 
     */ 
    if (release[1] == '.') { 
      switch (release[0]) { 
        case '0': 
        case '1': 
        case '2': 
        case '3': 
          sprintf(sysname, "NEXTSTEP\0"); 
          break; 
    
        case '4': 
         sprintf(sysname, "OPENSTEP\0"); 
          break; 

        case '5': 
          sprintf(sysname, "Rhapsody\0"); 
          /* barf barf, Apple. */ 
          for (idx = 0; idx < len; idx++) 
            if (version[idx] == '\n') 
              version[idx] = ' '; 
          break; 

        default: 
          sprintf(sysname, "Darwin\0"); 
      } 
    } else { 
      sprintf(sysname, "Darwin\0"); 
    } 
  } /* if (kret != ... */ 

  /* 
   * Let's get the hostname on our way out 
   */ 
  gethostname(nodename, 255); 
} 
Exemplo n.º 4
0
/*
 * This function is called very early on in the Mach startup, from the
 * function start_kernel_threads() in osfmk/kern/startup.c.  It's called
 * in the context of the current (startup) task using a call to the
 * function kernel_thread_create() to jump into start_kernel_threads().
 * Internally, kernel_thread_create() calls thread_create_internal(),
 * which calls uthread_alloc().  The function of uthread_alloc() is
 * normally to allocate a uthread structure, and fill out the uu_sigmask,
 * uu_context fields.  It skips filling these out in the case of the "task"
 * being "kernel_task", because the order of operation is inverted.  To
 * account for that, we need to manually fill in at least the contents
 * of the uu_context.vc_ucred field so that the uthread structure can be
 * used like any other.
 */
void
bsd_init(void)
{
	struct uthread *ut;
	unsigned int i;
#if __i386__ || __x86_64__
	int error;
#endif	
	struct vfs_context context;
	kern_return_t	ret;
	struct ucred temp_cred;

#define bsd_init_kprintf(x...) /* kprintf("bsd_init: " x) */

	kernel_flock = funnel_alloc(KERNEL_FUNNEL);
	if (kernel_flock == (funnel_t *)0 ) {
		panic("bsd_init: Failed to allocate kernel funnel");
	}
        
	printf(copyright);
	
	bsd_init_kprintf("calling kmeminit\n");
	kmeminit();
	
	bsd_init_kprintf("calling parse_bsd_args\n");
	parse_bsd_args();

	/* Initialize kauth subsystem before instancing the first credential */
	bsd_init_kprintf("calling kauth_init\n");
	kauth_init();

	/* Initialize process and pgrp structures. */
	bsd_init_kprintf("calling procinit\n");
	procinit();

	/* Initialize the ttys (MUST be before kminit()/bsd_autoconf()!)*/
	tty_init();

	kernproc = &proc0;	/* implicitly bzero'ed */

	/* kernel_task->proc = kernproc; */
	set_bsdtask_info(kernel_task,(void *)kernproc);

	/* give kernproc a name */
	bsd_init_kprintf("calling process_name\n");
	process_name("kernel_task", kernproc);

	/* allocate proc lock group attribute and group */
	bsd_init_kprintf("calling lck_grp_attr_alloc_init\n");
	proc_lck_grp_attr= lck_grp_attr_alloc_init();

	proc_lck_grp = lck_grp_alloc_init("proc",  proc_lck_grp_attr);
#ifndef CONFIG_EMBEDDED
	proc_slock_grp = lck_grp_alloc_init("proc-slock",  proc_lck_grp_attr);
	proc_fdmlock_grp = lck_grp_alloc_init("proc-fdmlock",  proc_lck_grp_attr);
	proc_mlock_grp = lck_grp_alloc_init("proc-mlock",  proc_lck_grp_attr);
#endif
	/* Allocate proc lock attribute */
	proc_lck_attr = lck_attr_alloc_init();
#if 0
#if __PROC_INTERNAL_DEBUG
	lck_attr_setdebug(proc_lck_attr);
#endif
#endif

#ifdef CONFIG_EMBEDDED
	proc_list_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr);
	proc_klist_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_mlock, proc_lck_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_fdmlock, proc_lck_grp, proc_lck_attr);
	lck_spin_init(&kernproc->p_slock, proc_lck_grp, proc_lck_attr);
#else	
	proc_list_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr);
	proc_klist_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_mlock, proc_mlock_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_fdmlock, proc_fdmlock_grp, proc_lck_attr);
	lck_spin_init(&kernproc->p_slock, proc_slock_grp, proc_lck_attr);
#endif

	execargs_cache_lock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr);
	execargs_cache_size = bsd_simul_execs;
	execargs_free_count = bsd_simul_execs;
	execargs_cache = (vm_offset_t *)kalloc(bsd_simul_execs * sizeof(vm_offset_t));
	bzero(execargs_cache, bsd_simul_execs * sizeof(vm_offset_t));
	
	if (current_task() != kernel_task)
		printf("bsd_init: We have a problem, "
				"current task is not kernel task\n");
	
	bsd_init_kprintf("calling get_bsdthread_info\n");
	ut = (uthread_t)get_bsdthread_info(current_thread());

#if CONFIG_MACF
	/*
	 * Initialize the MAC Framework
	 */
	mac_policy_initbsd();
	kernproc->p_mac_enforce = 0;
#endif /* MAC */

	/*
	 * Create process 0.
	 */
	proc_list_lock();
	LIST_INSERT_HEAD(&allproc, kernproc, p_list);
	kernproc->p_pgrp = &pgrp0;
	LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash);
	LIST_INIT(&pgrp0.pg_members);
#ifdef CONFIG_EMBEDDED
	lck_mtx_init(&pgrp0.pg_mlock, proc_lck_grp, proc_lck_attr);	
#else
	lck_mtx_init(&pgrp0.pg_mlock, proc_mlock_grp, proc_lck_attr);
#endif
	/* There is no other bsd thread this point and is safe without pgrp lock */
	LIST_INSERT_HEAD(&pgrp0.pg_members, kernproc, p_pglist);
	kernproc->p_listflag |= P_LIST_INPGRP;
	kernproc->p_pgrpid = 0;

	pgrp0.pg_session = &session0;
	pgrp0.pg_membercnt = 1;

	session0.s_count = 1;
	session0.s_leader = kernproc;
	session0.s_listflags = 0;
#ifdef CONFIG_EMBEDDED
	lck_mtx_init(&session0.s_mlock, proc_lck_grp, proc_lck_attr);
#else
	lck_mtx_init(&session0.s_mlock, proc_mlock_grp, proc_lck_attr);
#endif
	LIST_INSERT_HEAD(SESSHASH(0), &session0, s_hash);
	proc_list_unlock();

#if CONFIG_LCTX
	kernproc->p_lctx = NULL;
#endif

	kernproc->task = kernel_task;
	
	kernproc->p_stat = SRUN;
	kernproc->p_flag = P_SYSTEM;
	kernproc->p_nice = NZERO;
	kernproc->p_pptr = kernproc;

	TAILQ_INIT(&kernproc->p_uthlist);
	TAILQ_INSERT_TAIL(&kernproc->p_uthlist, ut, uu_list);
	
	kernproc->sigwait = FALSE;
	kernproc->sigwait_thread = THREAD_NULL;
	kernproc->exit_thread = THREAD_NULL;
	kernproc->p_csflags = CS_VALID;

	/*
	 * Create credential.  This also Initializes the audit information.
	 */
	bsd_init_kprintf("calling bzero\n");
	bzero(&temp_cred, sizeof(temp_cred));
	temp_cred.cr_ngroups = 1;

	temp_cred.cr_audit.as_aia_p = &audit_default_aia;
        /* XXX the following will go away with cr_au */
	temp_cred.cr_au.ai_auid = AU_DEFAUDITID;

	bsd_init_kprintf("calling kauth_cred_create\n");
	kernproc->p_ucred = kauth_cred_create(&temp_cred); 

	/* give the (already exisiting) initial thread a reference on it */
	bsd_init_kprintf("calling kauth_cred_ref\n");
	kauth_cred_ref(kernproc->p_ucred);
	ut->uu_context.vc_ucred = kernproc->p_ucred;
	ut->uu_context.vc_thread = current_thread();

	TAILQ_INIT(&kernproc->p_aio_activeq);
	TAILQ_INIT(&kernproc->p_aio_doneq);
	kernproc->p_aio_total_count = 0;
	kernproc->p_aio_active_count = 0;

	bsd_init_kprintf("calling file_lock_init\n");
	file_lock_init();

#if CONFIG_MACF
	mac_cred_label_associate_kernel(kernproc->p_ucred);
	mac_task_label_update_cred (kernproc->p_ucred, (struct task *) kernproc->task);
#endif

	/* Create the file descriptor table. */
	filedesc0.fd_refcnt = 1+1;	/* +1 so shutdown will not _FREE_ZONE */
	kernproc->p_fd = &filedesc0;
	filedesc0.fd_cmask = cmask;
	filedesc0.fd_knlistsize = -1;
	filedesc0.fd_knlist = NULL;
	filedesc0.fd_knhash = NULL;
	filedesc0.fd_knhashmask = 0;

	/* Create the limits structures. */
	kernproc->p_limit = &limit0;
	for (i = 0; i < sizeof(kernproc->p_rlimit)/sizeof(kernproc->p_rlimit[0]); i++)
		limit0.pl_rlimit[i].rlim_cur = 
			limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY;
	limit0.pl_rlimit[RLIMIT_NOFILE].rlim_cur = NOFILE;
	limit0.pl_rlimit[RLIMIT_NPROC].rlim_cur = maxprocperuid;
	limit0.pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc;
	limit0.pl_rlimit[RLIMIT_STACK] = vm_initial_limit_stack;
	limit0.pl_rlimit[RLIMIT_DATA] = vm_initial_limit_data;
	limit0.pl_rlimit[RLIMIT_CORE] = vm_initial_limit_core;
	limit0.pl_refcnt = 1;

	kernproc->p_stats = &pstats0;
	kernproc->p_sigacts = &sigacts0;

	/*
	 * Charge root for two  processes: init and mach_init.
	 */
	bsd_init_kprintf("calling chgproccnt\n");
	(void)chgproccnt(0, 1);

	/*
	 *	Allocate a kernel submap for pageable memory
	 *	for temporary copying (execve()).
	 */
	{
		vm_offset_t	minimum;

		bsd_init_kprintf("calling kmem_suballoc\n");
		ret = kmem_suballoc(kernel_map,
				&minimum,
				(vm_size_t)bsd_pageable_map_size,
				TRUE,
				VM_FLAGS_ANYWHERE,
				&bsd_pageable_map);
		if (ret != KERN_SUCCESS) 
			panic("bsd_init: Failed to allocate bsd pageable map");
	}

	/*
	 * Initialize buffers and hash links for buffers
	 *
	 * SIDE EFFECT: Starts a thread for bcleanbuf_thread(), so must
	 *		happen after a credential has been associated with
	 *		the kernel task.
	 */
	bsd_init_kprintf("calling bsd_bufferinit\n");
	bsd_bufferinit();

	/* Initialize the execve() semaphore */
	bsd_init_kprintf("calling semaphore_create\n");

	if (ret != KERN_SUCCESS)
		panic("bsd_init: Failed to create execve semaphore");

	/*
	 * Initialize the calendar.
	 */
	bsd_init_kprintf("calling IOKitInitializeTime\n");
	IOKitInitializeTime();

	if (turn_on_log_leaks && !new_nkdbufs)
		new_nkdbufs = 200000;
	start_kern_tracing(new_nkdbufs);
	if (turn_on_log_leaks)
		log_leaks = 1;

	bsd_init_kprintf("calling ubc_init\n");
	ubc_init();

	/* Initialize the file systems. */
	bsd_init_kprintf("calling vfsinit\n");
	vfsinit();

#if SOCKETS
	/* Initialize per-CPU cache allocator */
	mcache_init();

	/* Initialize mbuf's. */
	bsd_init_kprintf("calling mbinit\n");
	mbinit();
	net_str_id_init(); /* for mbuf tags */
#endif /* SOCKETS */

	/*
	 * Initializes security event auditing.
	 * XXX: Should/could this occur later?
	 */
#if CONFIG_AUDIT
	bsd_init_kprintf("calling audit_init\n");
 	audit_init();  
#endif

	/* Initialize kqueues */
	bsd_init_kprintf("calling knote_init\n");
	knote_init();

	/* Initialize for async IO */
	bsd_init_kprintf("calling aio_init\n");
	aio_init();

	/* Initialize pipes */
	bsd_init_kprintf("calling pipeinit\n");
	pipeinit();

	/* Initialize SysV shm subsystem locks; the subsystem proper is
	 * initialized through a sysctl.
	 */
#if SYSV_SHM
	bsd_init_kprintf("calling sysv_shm_lock_init\n");
	sysv_shm_lock_init();
#endif
#if SYSV_SEM
	bsd_init_kprintf("calling sysv_sem_lock_init\n");
	sysv_sem_lock_init();
#endif
#if SYSV_MSG
	bsd_init_kprintf("sysv_msg_lock_init\n");
	sysv_msg_lock_init();
#endif
	bsd_init_kprintf("calling pshm_lock_init\n");
	pshm_lock_init();
	bsd_init_kprintf("calling psem_lock_init\n");
	psem_lock_init();

	pthread_init();
	/* POSIX Shm and Sem */
	bsd_init_kprintf("calling pshm_cache_init\n");
	pshm_cache_init();
	bsd_init_kprintf("calling psem_cache_init\n");
	psem_cache_init();
	bsd_init_kprintf("calling time_zone_slock_init\n");
	time_zone_slock_init();

	/* Stack snapshot facility lock */
	stackshot_lock_init();
	/*
	 * Initialize protocols.  Block reception of incoming packets
	 * until everything is ready.
	 */
	bsd_init_kprintf("calling sysctl_register_fixed\n");
	sysctl_register_fixed(); 
	bsd_init_kprintf("calling sysctl_mib_init\n");
	sysctl_mib_init();
#if NETWORKING
	bsd_init_kprintf("calling dlil_init\n");
	dlil_init();
	bsd_init_kprintf("calling proto_kpi_init\n");
	proto_kpi_init();
#endif /* NETWORKING */
#if SOCKETS
	bsd_init_kprintf("calling socketinit\n");
	socketinit();
	bsd_init_kprintf("calling domaininit\n");
	domaininit();
#endif /* SOCKETS */

	kernproc->p_fd->fd_cdir = NULL;
	kernproc->p_fd->fd_rdir = NULL;

#if CONFIG_EMBEDDED
	/* Initialize kernel memory status notifications */
	bsd_init_kprintf("calling kern_memorystatus_init\n");
	kern_memorystatus_init();
#endif

#ifdef GPROF
	/* Initialize kernel profiling. */
	kmstartup();
#endif

	/* kick off timeout driven events by calling first time */
	thread_wakeup(&lbolt);
	timeout(lightning_bolt, 0, hz);

	bsd_init_kprintf("calling bsd_autoconf\n");
	bsd_autoconf();

#if CONFIG_DTRACE
	dtrace_postinit();
#endif

	/*
	 * We attach the loopback interface *way* down here to ensure
	 * it happens after autoconf(), otherwise it becomes the
	 * "primary" interface.
	 */
#include <loop.h>
#if NLOOP > 0
	bsd_init_kprintf("calling loopattach\n");
	loopattach();			/* XXX */
#endif

#if PFLOG
	/* Initialize packet filter log interface */
	pfloginit();
#endif /* PFLOG */

#if NETHER > 0
	/* Register the built-in dlil ethernet interface family */
	bsd_init_kprintf("calling ether_family_init\n");
	ether_family_init();
#endif /* ETHER */

#if NETWORKING
	/* Call any kext code that wants to run just after network init */
	bsd_init_kprintf("calling net_init_run\n");
	net_init_run();
	
	/* register user tunnel kernel control handler */
	utun_register_control();
#endif /* NETWORKING */

	bsd_init_kprintf("calling vnode_pager_bootstrap\n");
	vnode_pager_bootstrap();
#if 0
	/* XXX Hack for early debug stop */
	printf("\nabout to sleep for 10 seconds\n");
	IOSleep( 10 * 1000 );
	/* Debugger("hello"); */
#endif

	bsd_init_kprintf("calling inittodr\n");
	inittodr(0);

#if CONFIG_EMBEDDED
	{
		/* print out early VM statistics */
		kern_return_t kr1;
		vm_statistics_data_t stat;
		mach_msg_type_number_t count;

		count = HOST_VM_INFO_COUNT;
		kr1 = host_statistics(host_self(),
				      HOST_VM_INFO,
				      (host_info_t)&stat,
				      &count);
		kprintf("Mach Virtual Memory Statistics (page size of 4096) bytes\n"
			"Pages free:\t\t\t%u.\n"
			"Pages active:\t\t\t%u.\n"
			"Pages inactive:\t\t\t%u.\n"
			"Pages wired down:\t\t%u.\n"
			"\"Translation faults\":\t\t%u.\n"
			"Pages copy-on-write:\t\t%u.\n"
			"Pages zero filled:\t\t%u.\n"
			"Pages reactivated:\t\t%u.\n"
			"Pageins:\t\t\t%u.\n"
			"Pageouts:\t\t\t%u.\n"
			"Object cache: %u hits of %u lookups (%d%% hit rate)\n",

			stat.free_count,
			stat.active_count,
			stat.inactive_count,
			stat.wire_count,
			stat.faults,
			stat.cow_faults,
			stat.zero_fill_count,
			stat.reactivations,
			stat.pageins,
			stat.pageouts,
			stat.hits,
			stat.lookups,
			(stat.hits == 0) ? 100 :
			                   ((stat.lookups * 100) / stat.hits));
	}
#endif /* CONFIG_EMBEDDED */
	
	/* Mount the root file system. */
	while( TRUE) {
		int err;

		bsd_init_kprintf("calling setconf\n");
		setconf();

		bsd_init_kprintf("vfs_mountroot\n");
		if (0 == (err = vfs_mountroot()))
			break;
		rootdevice[0] = '\0';
#if NFSCLIENT
		if (mountroot == netboot_mountroot) {
			PE_display_icon( 0, "noroot");  /* XXX a netboot-specific icon would be nicer */
			vc_progress_set(FALSE, 0);
			for (i=1; 1; i*=2) {
				printf("bsd_init: failed to mount network root, error %d, %s\n",
					err, PE_boot_args());
				printf("We are hanging here...\n");
				IOSleep(i*60*1000);
			}
			/*NOTREACHED*/
		}
#endif
		printf("cannot mount root, errno = %d\n", err);
		boothowto |= RB_ASKNAME;
	}

	IOSecureBSDRoot(rootdevice);

	context.vc_thread = current_thread();
	context.vc_ucred = kernproc->p_ucred;
	mountlist.tqh_first->mnt_flag |= MNT_ROOTFS;

	bsd_init_kprintf("calling VFS_ROOT\n");
	/* Get the vnode for '/'.  Set fdp->fd_fd.fd_cdir to reference it. */
	if (VFS_ROOT(mountlist.tqh_first, &rootvnode, &context))
		panic("bsd_init: cannot find root vnode: %s", PE_boot_args());
	rootvnode->v_flag |= VROOT;
	(void)vnode_ref(rootvnode);
	(void)vnode_put(rootvnode);
	filedesc0.fd_cdir = rootvnode;

#if NFSCLIENT
	if (mountroot == netboot_mountroot) {
		int err;
		/* post mount setup */
		if ((err = netboot_setup()) != 0) {
			PE_display_icon( 0, "noroot");  /* XXX a netboot-specific icon would be nicer */
			vc_progress_set(FALSE, 0);
			for (i=1; 1; i*=2) {
				printf("bsd_init: NetBoot could not find root, error %d: %s\n",
					err, PE_boot_args());
				printf("We are hanging here...\n");
				IOSleep(i*60*1000);
			}
			/*NOTREACHED*/
		}
	}
#endif
	

#if CONFIG_IMAGEBOOT
	/*
	 * See if a system disk image is present. If so, mount it and
	 * switch the root vnode to point to it
	 */ 
  
	if(imageboot_needed()) {
		int err;

		/* An image was found */
		if((err = imageboot_setup())) {
			/*
			 * this is not fatal. Keep trying to root
			 * off the original media
			 */
			printf("%s: imageboot could not find root, %d\n",
				__FUNCTION__, err);
		}
	}
#endif /* CONFIG_IMAGEBOOT */
  
	/* set initial time; all other resource data is  already zero'ed */
	microtime(&kernproc->p_start);
	kernproc->p_stats->p_start = kernproc->p_start;	/* for compat */

#if DEVFS
	{
	    char mounthere[] = "/dev";	/* !const because of internal casting */

	    bsd_init_kprintf("calling devfs_kernel_mount\n");
	    devfs_kernel_mount(mounthere);
	}
#endif /* DEVFS */
	
	/* Initialize signal state for process 0. */
	bsd_init_kprintf("calling siginit\n");
	siginit(kernproc);

	bsd_init_kprintf("calling bsd_utaskbootstrap\n");
	bsd_utaskbootstrap();

#if defined(__LP64__)
	kernproc->p_flag |= P_LP64;
	printf("Kernel is LP64\n");
#endif
#if __i386__ || __x86_64__
	/* this should be done after the root filesystem is mounted */
	error = set_archhandler(kernproc, CPU_TYPE_POWERPC);
	// 10/30/08 - gab: <rdar://problem/6324501>
	// if default 'translate' can't be found, see if the understudy is available
	if (ENOENT == error) {
		strlcpy(exec_archhandler_ppc.path, kRosettaStandIn_str, MAXPATHLEN);
		error = set_archhandler(kernproc, CPU_TYPE_POWERPC);
	}
	if (error) /* XXX make more generic */
		exec_archhandler_ppc.path[0] = 0;
#endif	

	bsd_init_kprintf("calling mountroot_post_hook\n");

	/* invoke post-root-mount hook */
	if (mountroot_post_hook != NULL)
		mountroot_post_hook();

#if 0 /* not yet */
	consider_zone_gc(FALSE);
#endif

	bsd_init_kprintf("done\n");
}