Example #1
0
File: main.c Project: mwilbur/minix
/*===========================================================================*
 *		            sef_cb_init_fresh                                *
 *===========================================================================*/
PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
{
/* Initialize the reincarnation server. */
  struct boot_image *ip;
  int s,i;
  int nr_image_srvs, nr_image_priv_srvs, nr_uncaught_init_srvs;
  struct rproc *rp;
  struct rproc *replica_rp;
  struct rprocpub *rpub;
  struct boot_image image[NR_BOOT_PROCS];
  struct boot_image_priv *boot_image_priv;
  struct boot_image_sys *boot_image_sys;
  struct boot_image_dev *boot_image_dev;
  int pid, replica_pid;
  endpoint_t replica_endpoint;
  int ipc_to;
  int *calls;
  int all_c[] = { ALL_C, NULL_C };
  int no_c[] = {  NULL_C };

  /* See if we run in verbose mode. */
  env_parse("rs_verbose", "d", 0, &rs_verbose, 0, 1);

  if ((s = sys_getinfo(GET_HZ, &system_hz, sizeof(system_hz), 0, 0)) != OK)
	  panic("Cannot get system timer frequency\n");

  /* Initialize the global init descriptor. */
  rinit.rproctab_gid = cpf_grant_direct(ANY, (vir_bytes) rprocpub,
      sizeof(rprocpub), CPF_READ);
  if(!GRANT_VALID(rinit.rproctab_gid)) {
      panic("unable to create rprocpub table grant: %d", rinit.rproctab_gid);
  }

  /* Initialize some global variables. */
  rupdate.flags = 0;
  shutting_down = FALSE;

  /* Get a copy of the boot image table. */
  if ((s = sys_getimage(image)) != OK) {
      panic("unable to get copy of boot image table: %d", s);
  }

  /* Determine the number of system services in the boot image table. */
  nr_image_srvs = 0;
  for(i=0;i<NR_BOOT_PROCS;i++) {
      ip = &image[i];

      /* System services only. */
      if(iskerneln(_ENDPOINT_P(ip->endpoint))) {
          continue;
      }
      nr_image_srvs++;
  }

  /* Determine the number of entries in the boot image priv table and make sure
   * it matches the number of system services in the boot image table.
   */
  nr_image_priv_srvs = 0;
  for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) {
      boot_image_priv = &boot_image_priv_table[i];

      /* System services only. */
      if(iskerneln(_ENDPOINT_P(boot_image_priv->endpoint))) {
          continue;
      }
      nr_image_priv_srvs++;
  }
  if(nr_image_srvs != nr_image_priv_srvs) {
	panic("boot image table and boot image priv table mismatch");
  }

  /* Reset the system process table. */
  for (rp=BEG_RPROC_ADDR; rp<END_RPROC_ADDR; rp++) {
      rp->r_flags = 0;
      rp->r_pub = &rprocpub[rp - rproc];
      rp->r_pub->in_use = FALSE;
  }

  /* Initialize the system process table in 4 steps, each of them following
   * the appearance of system services in the boot image priv table.
   * - Step 1: set priviliges, sys properties, and dev properties (if any)
   * for every system service.
   */
  for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) {
      boot_image_priv = &boot_image_priv_table[i];

      /* System services only. */
      if(iskerneln(_ENDPOINT_P(boot_image_priv->endpoint))) {
          continue;
      }

      /* Lookup the corresponding entries in other tables. */
      boot_image_info_lookup(boot_image_priv->endpoint, image,
          &ip, NULL, &boot_image_sys, &boot_image_dev);
      rp = &rproc[boot_image_priv - boot_image_priv_table];
      rpub = rp->r_pub;

      /*
       * Set privileges.
       */
      /* Get label. */
      strcpy(rpub->label, boot_image_priv->label);

      /* Force a static priv id for system services in the boot image. */
      rp->r_priv.s_id = static_priv_id(
          _ENDPOINT_P(boot_image_priv->endpoint));
      
      /* Initialize privilege bitmaps and signal manager. */
      rp->r_priv.s_flags = boot_image_priv->flags;          /* priv flags */
      rp->r_priv.s_trap_mask= SRV_OR_USR(rp, SRV_T, USR_T); /* traps */
      ipc_to = SRV_OR_USR(rp, SRV_M, USR_M);                /* targets */
      fill_send_mask(&rp->r_priv.s_ipc_to, ipc_to == ALL_M);
      rp->r_priv.s_sig_mgr= SRV_OR_USR(rp, SRV_SM, USR_SM); /* sig mgr */
      rp->r_priv.s_bak_sig_mgr = NONE;                      /* backup sig mgr */
      
      /* Initialize kernel call mask bitmap. */
      calls = SRV_OR_USR(rp, SRV_KC, USR_KC) == ALL_C ? all_c : no_c;
      fill_call_mask(calls, NR_SYS_CALLS,
          rp->r_priv.s_k_call_mask, KERNEL_CALL, TRUE);

      /* Set the privilege structure. */
      if(boot_image_priv->endpoint != RS_PROC_NR) {
          if ((s = sys_privctl(ip->endpoint, SYS_PRIV_SET_SYS, &(rp->r_priv)))
              != OK) {
              panic("unable to set privilege structure: %d", s);
          }
      }

      /* Synch the privilege structure with the kernel. */
      if ((s = sys_getpriv(&(rp->r_priv), ip->endpoint)) != OK) {
          panic("unable to synch privilege structure: %d", s);
      }

      /*
       * Set sys properties.
       */
      rpub->sys_flags = boot_image_sys->flags;        /* sys flags */

      /*
       * Set dev properties.
       */
      rpub->dev_flags = boot_image_dev->flags;        /* device flags */
      rpub->dev_nr = boot_image_dev->dev_nr;          /* major device number */
      rpub->dev_style = boot_image_dev->dev_style;    /* device style */
      rpub->dev_style2 = boot_image_dev->dev_style2;  /* device style 2 */

      /* Get process name. */
      strcpy(rpub->proc_name, ip->proc_name);

      /* Build command settings. */
      rp->r_cmd[0]= '\0';
      rp->r_script[0]= '\0';
      build_cmd_dep(rp);

      /* Initialize vm call mask bitmap. */
      calls = SRV_OR_USR(rp, SRV_VC, USR_VC) == ALL_C ? all_c : no_c;
      fill_call_mask(calls, NR_VM_CALLS, rpub->vm_call_mask, VM_RQ_BASE, TRUE);

      /* Scheduling parameters. */
      rp->r_scheduler = SRV_OR_USR(rp, SRV_SCH, USR_SCH);
      rp->r_priority = SRV_OR_USR(rp, SRV_Q, USR_Q);
      rp->r_quantum = SRV_OR_USR(rp, SRV_QT, USR_QT);

      /* Get some settings from the boot image table. */
      rpub->endpoint = ip->endpoint;

      /* Set some defaults. */
      rp->r_old_rp = NULL;                     /* no old version yet */
      rp->r_new_rp = NULL;                     /* no new version yet */
      rp->r_prev_rp = NULL;                    /* no prev replica yet */
      rp->r_next_rp = NULL;                    /* no next replica yet */
      rp->r_uid = 0;                           /* root */
      rp->r_check_tm = 0;                      /* not checked yet */
      getuptime(&rp->r_alive_tm);              /* currently alive */
      rp->r_stop_tm = 0;                       /* not exiting yet */
      rp->r_restarts = 0;                      /* no restarts so far */
      rp->r_period = 0;                        /* no period yet */
      rp->r_exec = NULL;                       /* no in-memory copy yet */
      rp->r_exec_len = 0;

      /* Mark as in use and active. */
      rp->r_flags = RS_IN_USE | RS_ACTIVE;
      rproc_ptr[_ENDPOINT_P(rpub->endpoint)]= rp;
      rpub->in_use = TRUE;
  }

  /* - Step 2: allow every system service in the boot image to run. */
  nr_uncaught_init_srvs = 0;
  for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) {
      boot_image_priv = &boot_image_priv_table[i];

      /* System services only. */
      if(iskerneln(_ENDPOINT_P(boot_image_priv->endpoint))) {
          continue;
      }

      /* Lookup the corresponding slot in the system process table. */
      rp = &rproc[boot_image_priv - boot_image_priv_table];
      rpub = rp->r_pub;

      /* RS is already running as we speak. */
      if(boot_image_priv->endpoint == RS_PROC_NR) {
          if ((s = init_service(rp, SEF_INIT_FRESH)) != OK) {
              panic("unable to initialize RS: %d", s);
          }
          continue;
      }

      /* Allow the service to run. */
      if ((s = sched_init_proc(rp)) != OK) {
          panic("unable to initialize scheduling: %d", s);
      }
      if ((s = sys_privctl(rpub->endpoint, SYS_PRIV_ALLOW, NULL)) != OK) {
          panic("unable to initialize privileges: %d", s);
      }

      /* Initialize service. We assume every service will always get
       * back to us here at boot time.
       */
      if(boot_image_priv->flags & SYS_PROC) {
          if ((s = init_service(rp, SEF_INIT_FRESH)) != OK) {
              panic("unable to initialize service: %d", s);
          }
          if(rpub->sys_flags & SF_SYNCH_BOOT) {
              /* Catch init ready message now to synchronize. */
              catch_boot_init_ready(rpub->endpoint);
          }
          else {
              /* Catch init ready message later. */
              nr_uncaught_init_srvs++;
          }
      }
  }

  /* - Step 3: let every system service complete initialization by
   * catching all the init ready messages left.
   */
  while(nr_uncaught_init_srvs) {
      catch_boot_init_ready(ANY);
      nr_uncaught_init_srvs--;
  }

  /* - Step 4: all the system services in the boot image are now running.
   * Complete the initialization of the system process table in collaboration
   * with other system services.
   */
  for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) {
      boot_image_priv = &boot_image_priv_table[i];

      /* System services only. */
      if(iskerneln(_ENDPOINT_P(boot_image_priv->endpoint))) {
          continue;
      }

      /* Lookup the corresponding slot in the system process table. */
      rp = &rproc[boot_image_priv - boot_image_priv_table];
      rpub = rp->r_pub;

      /* Get pid from PM. */
      rp->r_pid = getnpid(rpub->endpoint);
      if(rp->r_pid == -1) {
          panic("unable to get pid");
      }
  }

  /* Set alarm to periodically check service status. */
  if (OK != (s=sys_setalarm(RS_DELTA_T, 0)))
      panic("couldn't set alarm: %d", s);

  /* Now create a new RS instance with a private page table and let the current
   * instance live update into the replica. Clone RS' own slot first.
   */
  rp = rproc_ptr[_ENDPOINT_P(RS_PROC_NR)];
  if((s = clone_slot(rp, &replica_rp)) != OK) {
      panic("unable to clone current RS instance: %d", s);
  }

  /* Fork a new RS instance. */
  pid = srv_fork();
  if(pid == -1) {
      panic("unable to fork a new RS instance");
  }
  replica_pid = pid ? pid : getpid();
  replica_endpoint = getnprocnr(replica_pid);
  replica_rp->r_pid = replica_pid;
  replica_rp->r_pub->endpoint = replica_endpoint;

  if(pid == 0) {
      /* New RS instance running. */

      /* Live update the old instance into the new one. */
      s = update_service(&rp, &replica_rp, RS_SWAP);
      if(s != OK) {
          panic("unable to live update RS: %d", s);
      }
      cpf_reload();

      /* Clean up the old RS instance, the new instance will take over. */
      cleanup_service(rp);

      /* Map out our own text and data. */
      unmap_ok = 1;
      _minix_unmapzero();

      /* Ask VM to pin memory for the new RS instance. */
      if((s = vm_memctl(RS_PROC_NR, VM_RS_MEM_PIN)) != OK) {
          panic("unable to pin memory for the new RS instance: %d", s);
      }
  }
  else {
      /* Old RS instance running. */

      /* Set up privileges for the new instance and let it run. */
      s = sys_privctl(replica_endpoint, SYS_PRIV_SET_SYS, &(replica_rp->r_priv));
      if(s != OK) {
          panic("unable to set privileges for the new RS instance: %d", s);
      }
      if ((s = sched_init_proc(replica_rp)) != OK) {
          panic("unable to initialize RS replica scheduling: %d", s);
      }
      s = sys_privctl(replica_endpoint, SYS_PRIV_YIELD, NULL);
      if(s != OK) {
          panic("unable to yield control to the new RS instance: %d", s);
      }
      NOT_REACHABLE;
  }

  return(OK);
}
Example #2
0
/*===========================================================================*
 *			kmain 	                             		*
 *===========================================================================*/
void kmain(kinfo_t *local_cbi)
{
/* Start the ball rolling. */
  struct boot_image *ip;	/* boot image pointer */
  register struct proc *rp;	/* process pointer */
  register int i, j;

  /* save a global copy of the boot parameters */
  memcpy(&kinfo, local_cbi, sizeof(kinfo));
  memcpy(&kmess, kinfo.kmess, sizeof(kmess));

#ifdef __arm__
  /* We want to initialize serial before we do any output */
  omap3_ser_init();
#endif
  /* We can talk now */
  printf("MINIX booting\n");

  /* Kernel may use bits of main memory before VM is started */
  kernel_may_alloc = 1;

  assert(sizeof(kinfo.boot_procs) == sizeof(image));
  memcpy(kinfo.boot_procs, image, sizeof(kinfo.boot_procs));

  cstart();

  BKL_LOCK();
 
   DEBUGEXTRA(("main()\n"));

   proc_init();

   if(NR_BOOT_MODULES != kinfo.mbi.mods_count)
   	panic("expecting %d boot processes/modules, found %d",
		NR_BOOT_MODULES, kinfo.mbi.mods_count);

  /* Set up proc table entries for processes in boot image. */
  for (i=0; i < NR_BOOT_PROCS; ++i) {
	int schedulable_proc;
	proc_nr_t proc_nr;
	int ipc_to_m, kcalls;
	sys_map_t map;

	ip = &image[i];				/* process' attributes */
	DEBUGEXTRA(("initializing %s... ", ip->proc_name));
	rp = proc_addr(ip->proc_nr);		/* get process pointer */
	ip->endpoint = rp->p_endpoint;		/* ipc endpoint */
	make_zero64(rp->p_cpu_time_left);
	if(i < NR_TASKS)			/* name (tasks only) */
		strlcpy(rp->p_name, ip->proc_name, sizeof(rp->p_name));

	if(i >= NR_TASKS) {
		/* Remember this so it can be passed to VM */
		multiboot_module_t *mb_mod = &kinfo.module_list[i - NR_TASKS];
		ip->start_addr = mb_mod->mod_start;
		ip->len = mb_mod->mod_end - mb_mod->mod_start;
	}
	
	reset_proc_accounting(rp);

	/* See if this process is immediately schedulable.
	 * In that case, set its privileges now and allow it to run.
	 * Only kernel tasks and the root system process get to run immediately.
	 * All the other system processes are inhibited from running by the
	 * RTS_NO_PRIV flag. They can only be scheduled once the root system
	 * process has set their privileges.
	 */
	proc_nr = proc_nr(rp);
	schedulable_proc = (iskerneln(proc_nr) || isrootsysn(proc_nr) ||
		proc_nr == VM_PROC_NR);
	if(schedulable_proc) {
	    /* Assign privilege structure. Force a static privilege id. */
            (void) get_priv(rp, static_priv_id(proc_nr));

            /* Priviliges for kernel tasks. */
	    if(proc_nr == VM_PROC_NR) {
                priv(rp)->s_flags = VM_F;
                priv(rp)->s_trap_mask = SRV_T;
		ipc_to_m = SRV_M;
		kcalls = SRV_KC;
                priv(rp)->s_sig_mgr = SELF;
                rp->p_priority = SRV_Q;
                rp->p_quantum_size_ms = SRV_QT;
	    }
	    else if(iskerneln(proc_nr)) {
                /* Privilege flags. */
                priv(rp)->s_flags = (proc_nr == IDLE ? IDL_F : TSK_F);
                /* Allowed traps. */
                priv(rp)->s_trap_mask = (proc_nr == CLOCK 
                    || proc_nr == SYSTEM  ? CSK_T : TSK_T);
                ipc_to_m = TSK_M;                  /* allowed targets */
                kcalls = TSK_KC;                   /* allowed kernel calls */
            }
            /* Priviliges for the root system process. */
            else {
	    	assert(isrootsysn(proc_nr));
                priv(rp)->s_flags= RSYS_F;        /* privilege flags */
                priv(rp)->s_trap_mask= SRV_T;     /* allowed traps */
                ipc_to_m = SRV_M;                 /* allowed targets */
                kcalls = SRV_KC;                  /* allowed kernel calls */
                priv(rp)->s_sig_mgr = SRV_SM;     /* signal manager */
                rp->p_priority = SRV_Q;	          /* priority queue */
                rp->p_quantum_size_ms = SRV_QT;   /* quantum size */
            }

            /* Fill in target mask. */
            memset(&map, 0, sizeof(map));

            if (ipc_to_m == ALL_M) {
                for(j = 0; j < NR_SYS_PROCS; j++)
                    set_sys_bit(map, j);
            }

            fill_sendto_mask(rp, &map);

            /* Fill in kernel call mask. */
            for(j = 0; j < SYS_CALL_MASK_SIZE; j++) {
                priv(rp)->s_k_call_mask[j] = (kcalls == NO_C ? 0 : (~0));
            }
	}
	else {
	    /* Don't let the process run for now. */
            RTS_SET(rp, RTS_NO_PRIV | RTS_NO_QUANTUM);
	}

	/* Arch-specific state initialization. */
	arch_boot_proc(ip, rp);

	/* scheduling functions depend on proc_ptr pointing somewhere. */
	if(!get_cpulocal_var(proc_ptr))
		get_cpulocal_var(proc_ptr) = rp;

	/* Process isn't scheduled until VM has set up a pagetable for it. */
	if(rp->p_nr != VM_PROC_NR && rp->p_nr >= 0) {
		rp->p_rts_flags |= RTS_VMINHIBIT;
		rp->p_rts_flags |= RTS_BOOTINHIBIT;
	}

	rp->p_rts_flags |= RTS_PROC_STOP;
	rp->p_rts_flags &= ~RTS_SLOT_FREE;
	DEBUGEXTRA(("done\n"));
  }

  /* update boot procs info for VM */
  memcpy(kinfo.boot_procs, image, sizeof(kinfo.boot_procs));

#define IPCNAME(n) { \
	assert((n) >= 0 && (n) <= IPCNO_HIGHEST); \
	assert(!ipc_call_names[n]);	\
	ipc_call_names[n] = #n; \
}

  arch_post_init();

  IPCNAME(SEND);
  IPCNAME(RECEIVE);
  IPCNAME(SENDREC);
  IPCNAME(NOTIFY);
  IPCNAME(SENDNB);
  IPCNAME(SENDA);

  /* System and processes initialization */
  memory_init();
  DEBUGEXTRA(("system_init()... "));
  system_init();
  DEBUGEXTRA(("done\n"));

  /* The bootstrap phase is over, so we can add the physical
   * memory used for it to the free list.
   */
  add_memmap(&kinfo, kinfo.bootstrap_start, kinfo.bootstrap_len);

#ifdef CONFIG_SMP
  if (config_no_apic) {
	  BOOT_VERBOSE(printf("APIC disabled, disables SMP, using legacy PIC\n"));
	  smp_single_cpu_fallback();
  } else if (config_no_smp) {
	  BOOT_VERBOSE(printf("SMP disabled, using legacy PIC\n"));
	  smp_single_cpu_fallback();
  } else {
	  smp_init();
	  /*
	   * if smp_init() returns it means that it failed and we try to finish
	   * single CPU booting
	   */
	  bsp_finish_booting();
  }
#else
  /* 
   * if configured for a single CPU, we are already on the kernel stack which we
   * are going to use everytime we execute kernel code. We finish booting and we
   * never return here
   */
  bsp_finish_booting();
#endif

  NOT_REACHABLE;
}
Example #3
0
/*===========================================================================*
 *				main                                         *
 *===========================================================================*/
PUBLIC int main(void)
{
/* Start the ball rolling. */
  struct boot_image *ip;	/* boot image pointer */
  register struct proc *rp;	/* process pointer */
  register int i, j;
  size_t argsz;			/* size of arguments passed to crtso on stack */

  BKL_LOCK();
   /* Global value to test segment sanity. */
   magictest = MAGICTEST;
 
   DEBUGEXTRA(("main()\n"));

   proc_init();

  /* Set up proc table entries for processes in boot image.  The stacks
   * of the servers have been added to the data segment by the monitor, so
   * the stack pointer is set to the end of the data segment.
   */

  for (i=0; i < NR_BOOT_PROCS; ++i) {
	int schedulable_proc;
	proc_nr_t proc_nr;
	int ipc_to_m, kcalls;
	sys_map_t map;

	ip = &image[i];				/* process' attributes */
	DEBUGEXTRA(("initializing %s... ", ip->proc_name));
	rp = proc_addr(ip->proc_nr);		/* get process pointer */
	ip->endpoint = rp->p_endpoint;		/* ipc endpoint */
	make_zero64(rp->p_cpu_time_left);
	strncpy(rp->p_name, ip->proc_name, P_NAME_LEN); /* set process name */
	
	reset_proc_accounting(rp);

	/* See if this process is immediately schedulable.
	 * In that case, set its privileges now and allow it to run.
	 * Only kernel tasks and the root system process get to run immediately.
	 * All the other system processes are inhibited from running by the
	 * RTS_NO_PRIV flag. They can only be scheduled once the root system
	 * process has set their privileges.
	 */
	proc_nr = proc_nr(rp);
	schedulable_proc = (iskerneln(proc_nr) || isrootsysn(proc_nr));
	if(schedulable_proc) {
	    /* Assign privilege structure. Force a static privilege id. */
            (void) get_priv(rp, static_priv_id(proc_nr));

            /* Priviliges for kernel tasks. */
            if(iskerneln(proc_nr)) {
                /* Privilege flags. */
                priv(rp)->s_flags = (proc_nr == IDLE ? IDL_F : TSK_F);
                /* Allowed traps. */
                priv(rp)->s_trap_mask = (proc_nr == CLOCK 
                    || proc_nr == SYSTEM  ? CSK_T : TSK_T);
                ipc_to_m = TSK_M;                  /* allowed targets */
                kcalls = TSK_KC;                   /* allowed kernel calls */
            }
            /* Priviliges for the root system process. */
            else if(isrootsysn(proc_nr)) {
                priv(rp)->s_flags= RSYS_F;        /* privilege flags */
                priv(rp)->s_trap_mask= SRV_T;     /* allowed traps */
                ipc_to_m = SRV_M;                 /* allowed targets */
                kcalls = SRV_KC;                  /* allowed kernel calls */
                priv(rp)->s_sig_mgr = SRV_SM;     /* signal manager */
                rp->p_priority = SRV_Q;	          /* priority queue */
                rp->p_quantum_size_ms = SRV_QT;   /* quantum size */
            }
            /* Priviliges for ordinary process. */
            else {
		NOT_REACHABLE;
            }

            /* Fill in target mask. */
            memset(&map, 0, sizeof(map));

            if (ipc_to_m == ALL_M) {
                for(j = 0; j < NR_SYS_PROCS; j++)
                    set_sys_bit(map, j);
            }

            fill_sendto_mask(rp, &map);

            /* Fill in kernel call mask. */
            for(j = 0; j < SYS_CALL_MASK_SIZE; j++) {
                priv(rp)->s_k_call_mask[j] = (kcalls == NO_C ? 0 : (~0));
            }
	}
	else {
	    /* Don't let the process run for now. */
            RTS_SET(rp, RTS_NO_PRIV | RTS_NO_QUANTUM);
	}
	rp->p_memmap[T].mem_vir  = ABS2CLICK(ip->memmap.text_vaddr);
	rp->p_memmap[T].mem_phys = ABS2CLICK(ip->memmap.text_paddr);
	rp->p_memmap[T].mem_len  = ABS2CLICK(ip->memmap.text_bytes);
	rp->p_memmap[D].mem_vir  = ABS2CLICK(ip->memmap.data_vaddr);
	rp->p_memmap[D].mem_phys = ABS2CLICK(ip->memmap.data_paddr);
	rp->p_memmap[D].mem_len  = ABS2CLICK(ip->memmap.data_bytes);
	rp->p_memmap[S].mem_phys = ABS2CLICK(ip->memmap.data_paddr +
					     ip->memmap.data_bytes +
					     ip->memmap.stack_bytes);
	rp->p_memmap[S].mem_vir  = ABS2CLICK(ip->memmap.data_vaddr +
					     ip->memmap.data_bytes +
					     ip->memmap.stack_bytes);
	rp->p_memmap[S].mem_len  = 0;

	/* Set initial register values.  The processor status word for tasks 
	 * is different from that of other processes because tasks can
	 * access I/O; this is not allowed to less-privileged processes 
	 */
	rp->p_reg.pc = ip->memmap.entry;
	rp->p_reg.psw = (iskerneln(proc_nr)) ? INIT_TASK_PSW : INIT_PSW;

	/* Initialize the server stack pointer. Take it down three words
	 * to give crtso.s something to use as "argc", "argv" and "envp".
	 */
	if (isusern(proc_nr)) {		/* user-space process? */ 
		rp->p_reg.sp = (rp->p_memmap[S].mem_vir +
				rp->p_memmap[S].mem_len) << CLICK_SHIFT;
		argsz = 3 * sizeof(reg_t);
		rp->p_reg.sp -= argsz;
		phys_memset(rp->p_reg.sp - 
			(rp->p_memmap[S].mem_vir << CLICK_SHIFT) +
			(rp->p_memmap[S].mem_phys << CLICK_SHIFT), 
			0, argsz);
	}

	/* scheduling functions depend on proc_ptr pointing somewhere. */
	if(!get_cpulocal_var(proc_ptr))
		get_cpulocal_var(proc_ptr) = rp;

	/* If this process has its own page table, VM will set the
	 * PT up and manage it. VM will signal the kernel when it has
	 * done this; until then, don't let it run.
	 */
	if(ip->flags & PROC_FULLVM)
		rp->p_rts_flags |= RTS_VMINHIBIT;

	rp->p_rts_flags |= RTS_PROC_STOP;
	rp->p_rts_flags &= ~RTS_SLOT_FREE;
	alloc_segments(rp);
	DEBUGEXTRA(("done\n"));
  }

#define IPCNAME(n) { \
	assert((n) >= 0 && (n) <= IPCNO_HIGHEST); \
	assert(!ipc_call_names[n]);	\
	ipc_call_names[n] = #n; \
}

  IPCNAME(SEND);
  IPCNAME(RECEIVE);
  IPCNAME(SENDREC);
  IPCNAME(NOTIFY);
  IPCNAME(SENDNB);
  IPCNAME(SENDA);

  /* Architecture-dependent initialization. */
  DEBUGEXTRA(("arch_init()... "));
  arch_init();
  DEBUGEXTRA(("done\n"));

  /* System and processes initialization */
  DEBUGEXTRA(("system_init()... "));
  system_init();
  DEBUGEXTRA(("done\n"));

#ifdef CONFIG_SMP
  if (config_no_apic) {
	  BOOT_VERBOSE(printf("APIC disabled, disables SMP, using legacy PIC\n"));
	  smp_single_cpu_fallback();
  } else if (config_no_smp) {
	  BOOT_VERBOSE(printf("SMP disabled, using legacy PIC\n"));
	  smp_single_cpu_fallback();
  } else {
	  smp_init();
	  /*
	   * if smp_init() returns it means that it failed and we try to finish
	   * single CPU booting
	   */
	  bsp_finish_booting();
  }
#else
  /* 
   * if configured for a single CPU, we are already on the kernel stack which we
   * are going to use everytime we execute kernel code. We finish booting and we
   * never return here
   */
  bsp_finish_booting();
#endif

  NOT_REACHABLE;
  return 1;
}
Example #4
0
/*===========================================================================*
 *		            sef_cb_init_fresh                                *
 *===========================================================================*/
PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
{
/* Initialize the reincarnation server. */
  struct sigaction sa;
  struct boot_image *ip;
  int s,i,j;
  int nr_image_srvs, nr_image_priv_srvs, nr_uncaught_init_srvs;
  struct rproc *rp;
  struct rprocpub *rpub;
  struct boot_image image[NR_BOOT_PROCS];
  struct mproc mproc[NR_PROCS];
  struct exec header;
  struct boot_image_priv *boot_image_priv;
  struct boot_image_sys *boot_image_sys;
  struct boot_image_dev *boot_image_dev;

  /* See if we run in verbose mode. */
  env_parse("rs_verbose", "d", 0, &rs_verbose, 0, 1);

  /* Initialize the global init descriptor. */
  rinit.rproctab_gid = cpf_grant_direct(ANY, (vir_bytes) rprocpub,
      sizeof(rprocpub), CPF_READ);
  if(!GRANT_VALID(rinit.rproctab_gid)) {
      panic("RS", "unable to create rprocpub table grant", rinit.rproctab_gid);
  }

  /* Initialize the global update descriptor. */
  rupdate.flags = 0;

  /* Get a copy of the boot image table. */
  if ((s = sys_getimage(image)) != OK) {
      panic("RS", "unable to get copy of boot image table", s);
  }

  /* Determine the number of system services in the boot image table and
   * compute the size required for the boot image buffer.
   */
  nr_image_srvs = 0;
  boot_image_buffer_size = 0;
  for(i=0;i<NR_BOOT_PROCS;i++) {
      ip = &image[i];

      /* System services only. */
      if(iskerneln(_ENDPOINT_P(ip->endpoint))) {
          continue;
      }
      nr_image_srvs++;

      /* Lookup the corresponding entry in the boot image sys table. */
      boot_image_info_lookup(ip->endpoint, image,
          NULL, NULL, &boot_image_sys, NULL);

      /* If we must keep a copy of this system service, read the header
       * and increase the size of the boot image buffer.
       */
      if(boot_image_sys->flags & SF_USE_COPY) {
          if((s = sys_getaoutheader(&header, i)) != OK) {
              panic("RS", "unable to get copy of a.out header", s);
          }
          boot_image_buffer_size += header.a_hdrlen
              + header.a_text + header.a_data;
      }
  }

  /* Determine the number of entries in the boot image priv table and make sure
   * it matches the number of system services in the boot image table.
   */
  nr_image_priv_srvs = 0;
  for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) {
      boot_image_priv = &boot_image_priv_table[i];

      /* System services only. */
      if(iskerneln(_ENDPOINT_P(boot_image_priv->endpoint))) {
          continue;
      }
      nr_image_priv_srvs++;
  }
  if(nr_image_srvs != nr_image_priv_srvs) {
      panic("RS", "boot image table and boot image priv table mismatch",
          NO_NUM);
  }

  /* Allocate boot image buffer. */
  if(boot_image_buffer_size > 0) {
      boot_image_buffer = rs_startup_sbrk(boot_image_buffer_size);
      if(boot_image_buffer == (char *) -1) {
          panic("RS", "unable to allocate boot image buffer", NO_NUM);
      }
  }

  /* Reset the system process table. */
  for (rp=BEG_RPROC_ADDR; rp<END_RPROC_ADDR; rp++) {
      rp->r_flags = 0;
      rp->r_pub = &rprocpub[rp - rproc];
      rp->r_pub->in_use = FALSE;
  }

  /* Initialize the system process table in 4 steps, each of them following
   * the appearance of system services in the boot image priv table.
   * - Step 1: get a copy of the executable image of every system service that
   * requires it while it is not yet running.
   * In addition, set priviliges, sys properties, and dev properties (if any)
   * for every system service.
   */
  for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) {
      boot_image_priv = &boot_image_priv_table[i];

      /* System services only. */
      if(iskerneln(_ENDPOINT_P(boot_image_priv->endpoint))) {
          continue;
      }

      /* Lookup the corresponding entries in other tables. */
      boot_image_info_lookup(boot_image_priv->endpoint, image,
          &ip, NULL, &boot_image_sys, &boot_image_dev);
      rp = &rproc[boot_image_priv - boot_image_priv_table];
      rpub = rp->r_pub;

      /*
       * Get a copy of the executable image if required.
       */
      rp->r_exec_len = 0;
      rp->r_exec = NULL;
      if(boot_image_sys->flags & SF_USE_COPY) {
          exec_image_copy(ip - image, ip, rp);
      }

      /*
       * Set privileges.
       */
      /* Get label. */
      strcpy(rpub->label, boot_image_priv->label);

      if(boot_image_priv->endpoint != RS_PROC_NR) {
          /* Force a static priv id for system services in the boot image. */
          rp->r_priv.s_id = static_priv_id(
              _ENDPOINT_P(boot_image_priv->endpoint));

          /* Initialize privilege bitmaps. */
          rp->r_priv.s_flags = boot_image_priv->flags;         /* priv flags */
          rp->r_priv.s_trap_mask = boot_image_priv->trap_mask; /* traps */
          memcpy(&rp->r_priv.s_ipc_to, &boot_image_priv->ipc_to,
                            sizeof(rp->r_priv.s_ipc_to));      /* targets */

          /* Initialize kernel call mask bitmap from unordered set. */
          fill_call_mask(boot_image_priv->k_calls, NR_SYS_CALLS,
              rp->r_priv.s_k_call_mask, KERNEL_CALL, TRUE);

          /* Set the privilege structure. */
          if ((s = sys_privctl(ip->endpoint, SYS_PRIV_SET_SYS, &(rp->r_priv)))
              != OK) {
              panic("RS", "unable to set privilege structure", s);
          }
      }

      /* Synch the privilege structure with the kernel. */
      if ((s = sys_getpriv(&(rp->r_priv), ip->endpoint)) != OK) {
          panic("RS", "unable to synch privilege structure", s);
      }

      /*
       * Set sys properties.
       */
      rpub->sys_flags = boot_image_sys->flags;        /* sys flags */

      /*
       * Set dev properties.
       */
      rpub->dev_nr = boot_image_dev->dev_nr;          /* major device number */
      rpub->dev_style = boot_image_dev->dev_style;    /* device style */
      rpub->period = boot_image_dev->period;          /* heartbeat period */

      /* Get process name. */
      strcpy(rpub->proc_name, ip->proc_name);

      /* Get command settings. */
      rp->r_cmd[0]= '\0';
      rp->r_argv[0] = rp->r_cmd;
      rp->r_argv[1] = NULL;
      rp->r_argc = 1;
      rp->r_script[0]= '\0';

      /* Initialize vm call mask bitmap from unordered set. */
      fill_call_mask(boot_image_priv->vm_calls, NR_VM_CALLS,
          rpub->vm_call_mask, VM_RQ_BASE, TRUE);

      /* Get some settings from the boot image table. */
      rp->r_nice = ip->priority;
      rpub->endpoint = ip->endpoint;

      /* Set some defaults. */
      rp->r_uid = 0;                           /* root */
      rp->r_check_tm = 0;                      /* not checked yet */
      getuptime(&rp->r_alive_tm);              /* currently alive */
      rp->r_stop_tm = 0;                       /* not exiting yet */
      rp->r_restarts = 0;                      /* no restarts so far */
      rp->r_set_resources = 0;                 /* don't set resources */

      /* Mark as in use. */
      rp->r_flags = RS_IN_USE;
      rproc_ptr[_ENDPOINT_P(rpub->endpoint)]= rp;
      rpub->in_use = TRUE;
  }

  /* - Step 2: allow every system service in the boot image to run.
   */
  nr_uncaught_init_srvs = 0;
  for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) {
      boot_image_priv = &boot_image_priv_table[i];

      /* System services only. */
      if(iskerneln(_ENDPOINT_P(boot_image_priv->endpoint))) {
          continue;
      }

      /* Ignore RS. */
      if(boot_image_priv->endpoint == RS_PROC_NR) {
          continue;
      }

      /* Lookup the corresponding slot in the system process table. */
      rp = &rproc[boot_image_priv - boot_image_priv_table];
      rpub = rp->r_pub;

      /* Allow the service to run. */
      if ((s = sys_privctl(rpub->endpoint, SYS_PRIV_ALLOW, NULL)) != OK) {
          panic("RS", "unable to initialize privileges", s);
      }

      /* Initialize service. We assume every service will always get
       * back to us here at boot time.
       */
      if(boot_image_priv->flags & SYS_PROC) {
          if ((s = init_service(rp, SEF_INIT_FRESH)) != OK) {
              panic("RS", "unable to initialize service", s);
          }
          if(rpub->sys_flags & SF_SYNCH_BOOT) {
              /* Catch init ready message now to synchronize. */
              catch_boot_init_ready(rpub->endpoint);
          }
          else {
              /* Catch init ready message later. */
              nr_uncaught_init_srvs++;
          }
      }
  }

  /* - Step 3: let every system service complete initialization by
   * catching all the init ready messages left.
   */
  while(nr_uncaught_init_srvs) {
      catch_boot_init_ready(ANY);
      nr_uncaught_init_srvs--;
  }

  /* - Step 4: all the system services in the boot image are now running.
   * Complete the initialization of the system process table in collaboration
   * with other system processes.
   */
  if ((s = getsysinfo(PM_PROC_NR, SI_PROC_TAB, mproc)) != OK) {
      panic("RS", "unable to get copy of PM process table", s);
  }
  for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) {
      boot_image_priv = &boot_image_priv_table[i];

      /* System services only. */
      if(iskerneln(_ENDPOINT_P(boot_image_priv->endpoint))) {
          continue;
      }

      /* Lookup the corresponding slot in the system process table. */
      rp = &rproc[boot_image_priv - boot_image_priv_table];
      rpub = rp->r_pub;

      /* Get pid from PM process table. */
      rp->r_pid = NO_PID;
      for (j = 0; j < NR_PROCS; j++) {
          if (mproc[j].mp_endpoint == rpub->endpoint) {
              rp->r_pid = mproc[j].mp_pid;
              break;
          }
      }
      if(j == NR_PROCS) {
          panic("RS", "unable to get pid", NO_NUM);
      }
  }

  /*
   * Now complete RS initialization process in collaboration with other
   * system services.
   */
  /* Let the rest of the system know about our dynamically allocated buffer. */
  if(boot_image_buffer_size > 0) {
      boot_image_buffer = rs_startup_sbrk_synch(boot_image_buffer_size);
      if(boot_image_buffer == (char *) -1) {
          panic("RS", "unable to synch boot image buffer", NO_NUM);
      }
  }

  /* Set alarm to periodically check service status. */
  if (OK != (s=sys_setalarm(RS_DELTA_T, 0)))
      panic("RS", "couldn't set alarm", s);

  /* Install signal handlers. Ask PM to transform signal into message. */
  sa.sa_handler = SIG_MESS;
  sigemptyset(&sa.sa_mask);
  sa.sa_flags = 0;
  if (sigaction(SIGCHLD,&sa,NULL)<0) panic("RS","sigaction failed", errno);
  if (sigaction(SIGTERM,&sa,NULL)<0) panic("RS","sigaction failed", errno);

  /* Initialize the exec pipe. */
  if (pipe(exec_pipe) == -1)
	panic("RS", "pipe failed", errno);
  if (fcntl(exec_pipe[0], F_SETFD,
	fcntl(exec_pipe[0], F_GETFD) | FD_CLOEXEC) == -1)
  {
	panic("RS", "fcntl set FD_CLOEXEC on pipe input failed", errno);
  }
  if (fcntl(exec_pipe[1], F_SETFD,
	fcntl(exec_pipe[1], F_GETFD) | FD_CLOEXEC) == -1)
  {
	panic("RS", "fcntl set FD_CLOEXEC on pipe output failed", errno);
  }
  if (fcntl(exec_pipe[0], F_SETFL,
	fcntl(exec_pipe[0], F_GETFL) | O_NONBLOCK) == -1)
  {
	panic("RS", "fcntl set O_NONBLOCK on pipe input failed", errno);
  }

 /* Map out our own text and data. This is normally done in crtso.o
  * but RS is an exception - we don't get to talk to VM so early on.
  * That's why we override munmap() and munmap_text() in utility.c.
  *
  * _minix_unmapzero() is the same code in crtso.o that normally does
  * it on startup. It's best that it's there as crtso.o knows exactly
  * what the ranges are of the filler data.
  */
  unmap_ok = 1;
  _minix_unmapzero();

  return(OK);
}
Example #5
0
void main(void)
{
/* Start the ball rolling. */
	struct boot_image *ip;		/* boot image pointer */
	register struct proc *rp;	/* process pointer */
	register struct priv *sp;	/* privilege structure pointer */
	register int i, j;
	int hdrindex;			/* index to array of a.out headers */
	phys_clicks text_base;
	vir_clicks text_clicks, data_clicks, st_clicks;
	reg_t ktsb;			/* kernel task stack base */
	struct exec *e_hdr = 0;		/* for a copy of an a.out header */

	/* Global value to test segment sanity. */
	magictest = MAGICTEST;

	/* Clear the process table. Anounce each slot as empty and set up mappings 
	 * for proc_addr() and proc_nr() macros. Do the same for the table with 
	 * privilege structures for the system processes.
	 */
	for (rp = BEG_PROC_ADDR, i = -NR_TASKS; rp < END_PROC_ADDR; ++rp, ++i) {
  	rp->p_rts_flags = RTS_SLOT_FREE;		/* initialize free slot */
#ifdef CONFIG_DEBUG_KERNEL_SCHED_CHECK
		rp->p_magic = PMAGIC;
#endif
		rp->p_nr = i;				/* proc number from ptr */
		rp->p_endpoint = _ENDPOINT(0, rp->p_nr); /* generation no. 0 */
	}

	for (sp = BEG_PRIV_ADDR, i = 0; sp < END_PRIV_ADDR; ++sp, ++i) {
		sp->s_proc_nr = ENDPT_NONE;			/* initialize as free */
		sp->s_id = i;				/* priv structure index */
		ppriv_addr[i] = sp;			/* priv ptr from number */
	}

	/* Set up proc table entries for processes in boot image.  The stacks of the
	 * kernel tasks are initialized to an array in data space.  The stacks
	 * of the servers have been added to the data segment by the monitor, so
	 * the stack pointer is set to the end of the data segment.  All the
	 * processes are in low memory on the 8086.  On the 386 only the kernel
	 * is in low memory, the rest is loaded in extended memory.
	 */

	/* Task stacks. */
	ktsb = (reg_t) t_stack;

	for (i=0; i < NR_BOOT_PROCS; ++i) {
		int schedulable_proc, proc_nr;
		int ipc_to_m, kcalls;

		ip = &image[i];				/* process' attributes */
		rp = proc_addr(ip->proc_nr);		/* get process pointer */
		ip->endpoint = rp->p_endpoint;		/* ipc endpoint */
		rp->p_max_priority = ip->priority;	/* max scheduling priority */
		rp->p_priority = ip->priority;		/* current priority */
		rp->p_quantum_size = ip->quantum;	/* quantum size in ticks */
		rp->p_ticks_left = ip->quantum;		/* current credit */

		strncpy(rp->p_name, ip->proc_name, P_NAME_LEN); /* set process name */
		/* See if this process is immediately schedulable.
		 * In that case, set its privileges now and allow it to run.
		 * Only kernel tasks and the root system process get to run immediately.
		 * All the other system processes are inhibited from running by the
		 * RTS_NO_PRIV flag. They can only be scheduled once the root system
		 * process has set their privileges.
		 */
		proc_nr = proc_nr(rp);
		schedulable_proc = (iskerneln(proc_nr) || isrootsysn(proc_nr));
		if(schedulable_proc) {
			/* Assign privilege structure. Force a static privilege id. */
			(void) get_priv(rp, static_priv_id(proc_nr));

			/* Priviliges for kernel tasks. */
			if(iskerneln(proc_nr)) {
				/* Privilege flags. */
				priv(rp)->s_flags = (proc_nr == IDLE ? IDL_F : TSK_F);
				/* Allowed traps. */
				priv(rp)->s_trap_mask = (proc_nr == CLOCK
					|| proc_nr == SYSTEM  ? CSK_T : TSK_T);
				ipc_to_m = TSK_M;                  /* allowed targets */
				kcalls = TSK_KC;                   /* allowed kernel calls */
			} else if(isrootsysn(proc_nr)) {
			/* Priviliges for the root system process. */
				priv(rp)->s_flags= RSYS_F;         /* privilege flags */
				priv(rp)->s_trap_mask= RSYS_T;     /* allowed traps */
				ipc_to_m = RSYS_M;                 /* allowed targets */
				kcalls = RSYS_KC;                  /* allowed kernel calls */
			}

			/* Fill in target mask. */
			for (j=0; j < NR_SYS_PROCS; j++) {
				if (ipc_to_m & (1 << j))
					set_sendto_bit(rp, j);
				else
					unset_sendto_bit(rp, j);
			}

			/* Fill in kernel call mask. */
			for(j = 0; j < CALL_MASK_SIZE; j++) {
				priv(rp)->s_k_call_mask[j] = (kcalls == NO_C ? 0 : (~0));
			}
		} else {
			/*Don't let the process run for now. */
			RTS_SET(rp, RTS_NO_PRIV);
		}

		if (iskerneln(proc_nr)) {               /* part of the kernel? */
			if (ip->stksize > 0) {		/* HARDWARE stack size is 0 */
				rp->p_priv->s_stack_guard = (reg_t *) ktsb;
				*rp->p_priv->s_stack_guard = STACK_GUARD;
			}

			ktsb += ip->stksize;	/* point to high end of stack */
			rp->p_reg.sp = ktsb;	/* this task's initial stack ptr */
			hdrindex = 0;		/* all use the first a.out header */
		} else {
			hdrindex = 1 + i-NR_TASKS;	/* system/user processes */
		}

		/* Architecture-specific way to find out aout header of this
		 * boot process.
		 */
		e_hdr = arch_get_aout_header(hdrindex);

		/* Convert addresses to clicks and build process memory map */
		text_base = e_hdr->a_syms >> CLICK_SHIFT;
		st_clicks= (e_hdr->a_total + CLICK_SIZE-1) >> CLICK_SHIFT;
		data_clicks = (e_hdr->a_text + e_hdr->a_data + e_hdr->a_bss + CLICK_SIZE-1) >> CLICK_SHIFT;
		text_clicks = 0;

		rp->p_memmap[T].mem_phys = text_base;
		rp->p_memmap[T].mem_len  = text_clicks;
		rp->p_memmap[D].mem_phys = text_base + text_clicks;
		rp->p_memmap[D].mem_len  = data_clicks;
		rp->p_memmap[S].mem_phys = text_base + text_clicks + st_clicks;
		rp->p_memmap[S].mem_vir  = st_clicks;
		rp->p_memmap[S].mem_len  = 0;

		/* Patch (override) the non-kernel process' entry points in image table. The
		 * image table is located in kernel/kernel_syms.c. The kernel processes like
		 * IDLE, SYSTEM, CLOCK, HARDWARE are not changed because they are part of kernel
		 * and the entry points are set at compilation time. In case of IDLE or HARDWARE
		 * the entry point can be ignored becasue they never run (set RTS_PROC_STOP).
		 */
		if (!iskerneln(proc_nr(rp)))
			ip->initial_pc = (task_t*)e_hdr->a_entry;

		/* Set initial register values.  The processor status word for tasks 
		 * is different from that of other processes because tasks can
		 * access I/O; this is not allowed to less-privileged processes 
		 */
		rp->p_reg.pc = (reg_t) ip->initial_pc;
		rp->p_reg.psw = (iskerneln(proc_nr)) ? INIT_TASK_PSW : INIT_PSW;

		/* Initialize the server stack pointer. Take it down one word
		 * to give crtso.s something to use as "argc","argv" and "envp".
		 */
		if (isusern(proc_nr)) {		/* user-space process? */
			rp->p_reg.sp = (rp->p_memmap[S].mem_vir + rp->p_memmap[S].mem_len)
					<< CLICK_SHIFT;
			rp->p_reg.sp -= 3*sizeof(reg_t);
		}

		/* scheduling functions depend on proc_ptr pointing somewhere. */
		if(!proc_ptr)
			proc_ptr = rp;

		/* If this process has its own page table, VM will set the
		 * PT up and manage it. VM will signal the kernel when it has
		 * done this; until then, don't let it run.
		 */
		if(ip->flags & PROC_FULLVM)
			RTS_SET(rp, RTS_VMINHIBIT);

		/* IDLE & HARDWARE task is never put on a run queue as it is
		 * never ready to run.
		 */
		if (rp->p_nr == HARDWARE)
			RTS_SET(rp, RTS_PROC_STOP);

		if (rp->p_nr == IDLE)
			RTS_SET(rp, RTS_PROC_STOP);

		RTS_UNSET(rp, RTS_SLOT_FREE); /* remove RTS_SLOT_FREE and schedule */
		alloc_segments(rp);
	} /* for */

	/* Architecture-dependent initialization. */
	arch_init();

#ifdef CONFIG_DEBUG_KERNEL_STATS_PROFILE
	sprofiling = 0;      /* we're not profiling until instructed to */
#endif
	cprof_procs_no = 0;  /* init nr of hash table slots used */

#ifdef CONFIG_IDLE_TSC
	idle_tsc = cvu64(0);
#endif

	vm_running = 0;
	krandom.random_sources = RANDOM_SOURCES;
	krandom.random_elements = RANDOM_ELEMENTS;

	/* Nucleos is now ready. All boot image processes are on the ready queue.
	 * Return to the assembly code to start running the current process. 
	 */
	bill_ptr = proc_addr(IDLE);		/* it has to point somewhere */
	announce();				/* print Nucleos startup banner */

	/*
	 * enable timer interrupts and clock task on the boot CPU
	 */
	if (boot_cpu_init_timer(system_hz)) {
		kernel_panic("FATAL : failed to initialize timer interrupts, "
			    "cannot continue without any clock source!",
			    NO_NUM);
	}

	/* Warnings for sanity checks that take time. These warnings are printed
	 * so it's a clear warning no full release should be done with them
	 * enabled.
	 */
#ifdef CONFIG_DEBUG_KERNEL_SCHED_CHECK
	FIXME("CONFIG_DEBUG_KERNEL_SCHED_CHECK enabled");
#endif

#ifdef CONFIG_DEBUG_KERNEL_VMASSERT
	FIXME("CONFIG_DEBUG_KERNEL_VMASSERT enabled");
#endif

#ifdef CONFIG_DEBUG_PROC_CHECK
	FIXME("PROC check enabled");
#endif

	restart();
}