Пример #1
0
/*===========================================================================*
 *				select_request_async			     *
 *===========================================================================*/
static int select_request_async(struct filp *f, int *ops, int block)
{
  int r, rops, major;
  struct dmap *dp;

  rops = *ops;

  /* By default, nothing to do */
  *ops = 0;

  if (!block && (f->filp_select_flags & FSF_BLOCKED)) {
	/* This filp is blocked waiting for a reply, but we don't want to
	 * block ourselves. Unless we're awaiting the initial reply, these
	 * operations won't be ready */
	if (!(f->filp_select_flags & FSF_BUSY)) {
		if ((rops & SEL_RD) && (f->filp_select_flags & FSF_RD_BLOCK))
			rops &= ~SEL_RD;
		if ((rops & SEL_WR) && (f->filp_select_flags & FSF_WR_BLOCK))
			rops &= ~SEL_WR;
		if ((rops & SEL_ERR) && (f->filp_select_flags & FSF_ERR_BLOCK))
			rops &= ~SEL_ERR;
		if (!(rops & (SEL_RD|SEL_WR|SEL_ERR)))
			return(OK);
	}
  }

  f->filp_select_flags |= FSF_UPDATE;
  if (block) {
	rops |= SEL_NOTIFY;
	if (rops & SEL_RD)	f->filp_select_flags |= FSF_RD_BLOCK;
	if (rops & SEL_WR)	f->filp_select_flags |= FSF_WR_BLOCK;
	if (rops & SEL_ERR)	f->filp_select_flags |= FSF_ERR_BLOCK;
  }

  if (f->filp_select_flags & FSF_BUSY)
	return(SUSPEND);

  major = major(f->filp_vno->v_sdev);
  if (major < 0 || major >= NR_DEVICES) return(ENXIO);
  dp = &dmap[major];
  if (dp->dmap_sel_filp)
	return(SUSPEND);

  f->filp_select_flags &= ~FSF_UPDATE;
  r = dev_io(VFS_DEV_SELECT, f->filp_vno->v_sdev, rops, NULL,
	     cvu64(0), 0, 0, FALSE);
  if (r < 0 && r != SUSPEND)
	return(r);

  if (r != SUSPEND)
	panic("select_request_asynch: expected SUSPEND got: %d", r);

  dp->dmap_sel_filp = f;
  f->filp_select_flags |= FSF_BUSY;

  return(SUSPEND);
}
Пример #2
0
/*===========================================================================*
 *				do_lseek				     *
 *===========================================================================*/
int do_lseek()
{
/* Perform the lseek(ls_fd, offset, whence) system call. */
  register struct filp *rfilp;
  int r = OK, seekfd, seekwhence;
  off_t offset;
  u64_t pos, newpos;

  seekfd = job_m_in.ls_fd;
  seekwhence = job_m_in.whence;
  offset = (off_t) job_m_in.offset_lo;

  /* Check to see if the file descriptor is valid. */
  if ( (rfilp = get_filp(seekfd, VNODE_READ)) == NULL) return(err_code);

  /* No lseek on pipes. */
  if (S_ISFIFO(rfilp->filp_vno->v_mode)) {
	unlock_filp(rfilp);
	return(ESPIPE);
  }

  /* The value of 'whence' determines the start position to use. */
  switch(seekwhence) {
    case SEEK_SET: pos = cvu64(0);	break;
    case SEEK_CUR: pos = rfilp->filp_pos;	break;
    case SEEK_END: pos = cvul64(rfilp->filp_vno->v_size);	break;
    default: unlock_filp(rfilp); return(EINVAL);
  }

  if (offset >= 0)
	newpos = add64ul(pos, offset);
  else
	newpos = sub64ul(pos, -offset);

  /* Check for overflow. */
  if (ex64hi(newpos) != 0) {
	r = EOVERFLOW;
  } else if ((off_t) ex64lo(newpos) < 0) { /* no negative file size */
	r = EOVERFLOW;
  } else {
	/* insert the new position into the output message */
	m_out.reply_l1 = ex64lo(newpos);

	if (cmp64(newpos, rfilp->filp_pos) != 0) {
		rfilp->filp_pos = newpos;

		/* Inhibit read ahead request */
		r = req_inhibread(rfilp->filp_vno->v_fs_e,
				  rfilp->filp_vno->v_inode_nr);
	}
  }

  unlock_filp(rfilp);
  return(r);
}
Пример #3
0
/*===========================================================================*
 *				scall_lseek				     *
 *===========================================================================*/
int scall_lseek()
{
	/* Perform the lseek(ls_fd, offset, whence) system call. */
	register struct filp *rfilp;
	int r;
	long offset;
	u64_t pos, newpos;

	/* Check to see if the file descriptor is valid. */
	if ( (rfilp = get_filp(m_in.ls_fd)) == NIL_FILP)
		return(err_code);

	/* No lseek on pipes. */
	if (rfilp->filp_vno->v_pipe == I_PIPE)
		return -ESPIPE;

	/* The value of 'whence' determines the start position to use. */
	switch(m_in.whence) {
	case SEEK_SET:
		pos = cvu64(0);
		break;

	case SEEK_CUR:
		pos = rfilp->filp_pos;
		break;

	case SEEK_END:
		pos = cvul64(rfilp->filp_vno->v_size);
		break;

	default:
		return(-EINVAL);
	}

	offset= m_in.offset_lo;
	if (offset >= 0)
		newpos= add64ul(pos, offset);
	else
		newpos= sub64ul(pos, -offset);

	/* Check for overflow. */
	if (ex64hi(newpos) != 0)
		return -EINVAL;

	if (cmp64(newpos, rfilp->filp_pos) != 0) { /* Inhibit read ahead request */
		r = req_inhibread(rfilp->filp_vno->v_fs_e, rfilp->filp_vno->v_inode_nr);

		if (r != 0)
			return r;
	}

	rfilp->filp_pos = newpos;

	return ex64lo(newpos);
}
Пример #4
0
/*===========================================================================*
 *				actual_llseek				     *
 *===========================================================================*/
int actual_llseek(struct fproc *rfp, message *m_out, int seekfd, int seekwhence,
	u64_t offset)
{
/* Perform the llseek(ls_fd, offset, whence) system call. */
  register struct filp *rfilp;
  u64_t pos, newpos;
  int r = OK;
  long off_hi = ex64hi(offset);

  /* Check to see if the file descriptor is valid. */
  if ( (rfilp = get_filp2(rfp, seekfd, VNODE_READ)) == NULL) {
	return(err_code);
  }

  /* No lseek on pipes. */
  if (S_ISFIFO(rfilp->filp_vno->v_mode)) {
	unlock_filp(rfilp);
	return(ESPIPE);
  }

  /* The value of 'whence' determines the start position to use. */
  switch(seekwhence) {
    case SEEK_SET: pos = cvu64(0);	break;
    case SEEK_CUR: pos = rfilp->filp_pos;	break;
    case SEEK_END: pos = cvul64(rfilp->filp_vno->v_size);	break;
    default: unlock_filp(rfilp); return(EINVAL);
  }

  newpos = pos + offset;

  /* Check for overflow. */
  if ((off_hi > 0) && cmp64(newpos, pos) < 0)
      r = EINVAL;
  else if ((off_hi < 0) && cmp64(newpos, pos) > 0)
      r = EINVAL;
  else {
	/* insert the new position into the output message */
	m_out->reply_l1 = ex64lo(newpos);
	m_out->reply_l2 = ex64hi(newpos);

	if (cmp64(newpos, rfilp->filp_pos) != 0) {
		rfilp->filp_pos = newpos;

		/* Inhibit read ahead request */
		r = req_inhibread(rfilp->filp_vno->v_fs_e,
				  rfilp->filp_vno->v_inode_nr);
	}
  }

  unlock_filp(rfilp);
  return(r);
}
Пример #5
0
/*===========================================================================*
 *				select_request_sync			     *
 *===========================================================================*/
static int select_request_sync(struct filp *f, int *ops, int block)
{
  int rops;

  rops = *ops;
  if (block) rops |= SEL_NOTIFY;
  *ops = dev_io(VFS_DEV_SELECT, f->filp_vno->v_sdev, rops, NULL,
		cvu64(0), 0, 0, FALSE);
  if (*ops < 0)
	return(*ops);

  return(OK);
}
Пример #6
0
/*===========================================================================*
 *				do_llseek				     *
 *===========================================================================*/
PUBLIC int do_llseek()
{
/* Perform the llseek(ls_fd, offset, whence) system call. */
  register struct filp *rfilp;
  u64_t pos, newpos;
  int r = OK;

  /* Check to see if the file descriptor is valid. */
  if ( (rfilp = get_filp(m_in.ls_fd, VNODE_READ)) == NULL) return(err_code);

  /* No lseek on pipes. */
  if (rfilp->filp_vno->v_pipe == I_PIPE) {
	unlock_filp(rfilp);
	return(ESPIPE);
  }

  /* The value of 'whence' determines the start position to use. */
  switch(m_in.whence) {
    case SEEK_SET: pos = cvu64(0);	break;
    case SEEK_CUR: pos = rfilp->filp_pos;	break;
    case SEEK_END: pos = cvul64(rfilp->filp_vno->v_size);	break;
    default: unlock_filp(rfilp); return(EINVAL);
  }

  newpos = add64(pos, make64(m_in.offset_lo, m_in.offset_high));

  /* Check for overflow. */
  if (( (long) m_in.offset_high > 0) && cmp64(newpos, pos) < 0)
      r = EINVAL;
  else if (( (long) m_in.offset_high < 0) && cmp64(newpos, pos) > 0)
      r = EINVAL;
  else {
	rfilp->filp_pos = newpos;

	/* insert the new position into the output message */
	m_out.reply_l1 = ex64lo(newpos);
	m_out.reply_l2 = ex64hi(newpos);

	if (cmp64(newpos, rfilp->filp_pos) != 0) {
		/* Inhibit read ahead request */
		r = req_inhibread(rfilp->filp_vno->v_fs_e,
				  rfilp->filp_vno->v_inode_nr);
	}
  }

  unlock_filp(rfilp);
  return(r);
}
Пример #7
0
/*===========================================================================*
 *				get_fd					     *
 *===========================================================================*/
PUBLIC int get_fd(int start, mode_t bits, int *k, struct filp **fpt)
{
/* Look for a free file descriptor and a free filp slot.  Fill in the mode word
 * in the latter, but don't claim either one yet, since the open() or creat()
 * may yet fail.
 */

  register struct filp *f;
  register int i;

  /* Search the fproc fp_filp table for a free file descriptor. */
  for (i = start; i < OPEN_MAX; i++) {
	if (fp->fp_filp[i] == NULL && !FD_ISSET(i, &fp->fp_filp_inuse)) {
		/* A file descriptor has been located. */
		*k = i;
		break;
	}
  }

  /* Check to see if a file descriptor has been found. */
  if (i >= OPEN_MAX) return(EMFILE);

  /* If we don't care about a filp, return now */
  if (fpt == NULL) return(OK);

  /* Now that a file descriptor has been found, look for a free filp slot. */
  for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
	assert(f->filp_count >= 0);
	if (f->filp_count == 0 && mutex_trylock(&f->filp_lock) == 0) {
		if (verbose) printf("get_fd: locking filp=%p\n", f);
		f->filp_mode = bits;
		f->filp_pos = cvu64(0);
		f->filp_selectors = 0;
		f->filp_select_ops = 0;
		f->filp_pipe_select_ops = 0;
		f->filp_flags = 0;
		f->filp_state = FS_NORMAL;
		f->filp_select_flags = 0;
		f->filp_softlock = NULL;
		*fpt = f;
		return(OK);
	}
  }

  /* If control passes here, the filp table must be full.  Report that back. */
  return(ENFILE);
}
Пример #8
0
/*===========================================================================*
 *				get_range				     *
 *===========================================================================*/
static size_t get_range(struct fbd_rule *rule, u64_t pos, size_t *size,
	u64_t *skip)
{
	/* Compute the range within the given request range that is affected
	 * by the given rule, and optionally the number of bytes preceding
	 * the range that are also affected by the rule.
	 */
	u64_t delta;
	size_t off;
	int to_eof;

	to_eof = cmp64(rule->start, rule->end) >= 0;

	if (cmp64(pos, rule->start) > 0) {
		if (skip != NULL) *skip = sub64(pos, rule->start);

		off = 0;
	}
	else {
		if (skip != NULL) *skip = cvu64(0);

		delta = sub64(rule->start, pos);

		assert(ex64hi(delta) == 0);

		off = ex64lo(delta);
	}

	if (!to_eof) {
		assert(cmp64(pos, rule->end) < 0);

		delta = sub64(rule->end, pos);

		if (cmp64u(delta, *size) < 0)
			*size = ex64lo(delta);
	}

	assert(*size > off);

	*size -= off;

	return off;
}
Пример #9
0
void print_procs(int maxlines,
	struct proc *proc1, struct proc *proc2,
	struct mproc *mproc)
{
	int p, nprocs, tot=0;
	u64_t idleticks = cvu64(0);
	u64_t kernelticks = cvu64(0);
	u64_t systemticks = cvu64(0);
	u64_t userticks = cvu64(0);
	u64_t total_ticks = cvu64(0);
	unsigned long tcyc;
	unsigned long tmp;
	int blockedseen = 0;
	struct tp tick_procs[PROCS];

	for(p = nprocs = 0; p < PROCS; p++) {
		if(isemptyp(&proc2[p]))
			continue;
		tick_procs[nprocs].p = proc2 + p;
		if(proc1[p].p_endpoint == proc2[p].p_endpoint) {
			tick_procs[nprocs].ticks =
				sub64(proc2[p].p_cycles, proc1[p].p_cycles);
		} else {
			tick_procs[nprocs].ticks =
				proc2[p].p_cycles;
		}
		total_ticks = add64(total_ticks, tick_procs[nprocs].ticks);
		if(p-NR_TASKS == IDLE) {
			idleticks = tick_procs[nprocs].ticks;
			continue;
		}
		if(p-NR_TASKS == KERNEL) {
			kernelticks = tick_procs[nprocs].ticks;
			continue;
		}
		if(mproc[proc2[p].p_nr].mp_procgrp == 0)
			systemticks = add64(systemticks, tick_procs[nprocs].ticks);
		else if (p > NR_TASKS)
			userticks = add64(userticks, tick_procs[nprocs].ticks);

		nprocs++;
	}

	if (!cmp64u(total_ticks, 0))
		return;

	qsort(tick_procs, nprocs, sizeof(tick_procs[0]), cmp_ticks);

	tcyc = div64u(total_ticks, SCALE);

	tmp = div64u(userticks, SCALE);
	printf("CPU states: %6.2f%% user, ", 100.0*(tmp)/tcyc);

	tmp = div64u(systemticks, SCALE);
	printf("%6.2f%% system, ", 100.0*tmp/tcyc);

	tmp = div64u(kernelticks, SCALE);
	printf("%6.2f%% kernel, ", 100.0*tmp/tcyc);

	tmp = div64u(idleticks, SCALE);
	printf("%6.2f%% idle", 100.0*tmp/tcyc);

#define NEWLINE do { printf("\n"); if(--maxlines <= 0) { return; } } while(0) 
	NEWLINE;
	NEWLINE;

	printf("  PID USERNAME PRI NICE   SIZE STATE   TIME     CPU COMMAND");
	NEWLINE;
	for(p = 0; p < nprocs; p++) {
		struct proc *pr;
		int pnr;
		int level = 0;

		pnr = tick_procs[p].p->p_nr;

		if(pnr < 0) {
			/* skip old kernel tasks as they don't run anymore */
			continue;
		}

		pr = tick_procs[p].p;

		/* If we're in blocked verbose mode, indicate start of
		 * blocked processes.
		 */
		if(blockedverbose && pr->p_rts_flags && !blockedseen) {
			NEWLINE;
			printf("Blocked processes:");
			NEWLINE;
			blockedseen = 1;
		}

		print_proc(&tick_procs[p], &mproc[pnr], tcyc);
		NEWLINE;

		if(!blockedverbose)
			continue;

		/* Traverse dependency chain if blocked. */
		while(pr->p_rts_flags) {
			endpoint_t dep = NONE;
			struct tp *tpdep;
			level += 5;

			if((dep = P_BLOCKEDON(pr)) == NONE) {
				printf("not blocked on a process");
				NEWLINE;
				break;
			}

			if(dep == ANY)
				break;

			tpdep = lookup(dep, tick_procs, nprocs);
			pr = tpdep->p;
			printf("%*s> ", level, "");
			print_proc(tpdep, &mproc[pr->p_nr], tcyc);
			NEWLINE;
		}
	}
}
Пример #10
0
void main(void)
{
/* Start the ball rolling. */
	struct boot_image *ip;		/* boot image pointer */
	register struct proc *rp;	/* process pointer */
	register struct priv *sp;	/* privilege structure pointer */
	register int i, j;
	int hdrindex;			/* index to array of a.out headers */
	phys_clicks text_base;
	vir_clicks text_clicks, data_clicks, st_clicks;
	reg_t ktsb;			/* kernel task stack base */
	struct exec *e_hdr = 0;		/* for a copy of an a.out header */

	/* Global value to test segment sanity. */
	magictest = MAGICTEST;

	/* Clear the process table. Anounce each slot as empty and set up mappings 
	 * for proc_addr() and proc_nr() macros. Do the same for the table with 
	 * privilege structures for the system processes.
	 */
	for (rp = BEG_PROC_ADDR, i = -NR_TASKS; rp < END_PROC_ADDR; ++rp, ++i) {
  	rp->p_rts_flags = RTS_SLOT_FREE;		/* initialize free slot */
#ifdef CONFIG_DEBUG_KERNEL_SCHED_CHECK
		rp->p_magic = PMAGIC;
#endif
		rp->p_nr = i;				/* proc number from ptr */
		rp->p_endpoint = _ENDPOINT(0, rp->p_nr); /* generation no. 0 */
	}

	for (sp = BEG_PRIV_ADDR, i = 0; sp < END_PRIV_ADDR; ++sp, ++i) {
		sp->s_proc_nr = ENDPT_NONE;			/* initialize as free */
		sp->s_id = i;				/* priv structure index */
		ppriv_addr[i] = sp;			/* priv ptr from number */
	}

	/* Set up proc table entries for processes in boot image.  The stacks of the
	 * kernel tasks are initialized to an array in data space.  The stacks
	 * of the servers have been added to the data segment by the monitor, so
	 * the stack pointer is set to the end of the data segment.  All the
	 * processes are in low memory on the 8086.  On the 386 only the kernel
	 * is in low memory, the rest is loaded in extended memory.
	 */

	/* Task stacks. */
	ktsb = (reg_t) t_stack;

	for (i=0; i < NR_BOOT_PROCS; ++i) {
		int schedulable_proc, proc_nr;
		int ipc_to_m, kcalls;

		ip = &image[i];				/* process' attributes */
		rp = proc_addr(ip->proc_nr);		/* get process pointer */
		ip->endpoint = rp->p_endpoint;		/* ipc endpoint */
		rp->p_max_priority = ip->priority;	/* max scheduling priority */
		rp->p_priority = ip->priority;		/* current priority */
		rp->p_quantum_size = ip->quantum;	/* quantum size in ticks */
		rp->p_ticks_left = ip->quantum;		/* current credit */

		strncpy(rp->p_name, ip->proc_name, P_NAME_LEN); /* set process name */
		/* See if this process is immediately schedulable.
		 * In that case, set its privileges now and allow it to run.
		 * Only kernel tasks and the root system process get to run immediately.
		 * All the other system processes are inhibited from running by the
		 * RTS_NO_PRIV flag. They can only be scheduled once the root system
		 * process has set their privileges.
		 */
		proc_nr = proc_nr(rp);
		schedulable_proc = (iskerneln(proc_nr) || isrootsysn(proc_nr));
		if(schedulable_proc) {
			/* Assign privilege structure. Force a static privilege id. */
			(void) get_priv(rp, static_priv_id(proc_nr));

			/* Priviliges for kernel tasks. */
			if(iskerneln(proc_nr)) {
				/* Privilege flags. */
				priv(rp)->s_flags = (proc_nr == IDLE ? IDL_F : TSK_F);
				/* Allowed traps. */
				priv(rp)->s_trap_mask = (proc_nr == CLOCK
					|| proc_nr == SYSTEM  ? CSK_T : TSK_T);
				ipc_to_m = TSK_M;                  /* allowed targets */
				kcalls = TSK_KC;                   /* allowed kernel calls */
			} else if(isrootsysn(proc_nr)) {
			/* Priviliges for the root system process. */
				priv(rp)->s_flags= RSYS_F;         /* privilege flags */
				priv(rp)->s_trap_mask= RSYS_T;     /* allowed traps */
				ipc_to_m = RSYS_M;                 /* allowed targets */
				kcalls = RSYS_KC;                  /* allowed kernel calls */
			}

			/* Fill in target mask. */
			for (j=0; j < NR_SYS_PROCS; j++) {
				if (ipc_to_m & (1 << j))
					set_sendto_bit(rp, j);
				else
					unset_sendto_bit(rp, j);
			}

			/* Fill in kernel call mask. */
			for(j = 0; j < CALL_MASK_SIZE; j++) {
				priv(rp)->s_k_call_mask[j] = (kcalls == NO_C ? 0 : (~0));
			}
		} else {
			/*Don't let the process run for now. */
			RTS_SET(rp, RTS_NO_PRIV);
		}

		if (iskerneln(proc_nr)) {               /* part of the kernel? */
			if (ip->stksize > 0) {		/* HARDWARE stack size is 0 */
				rp->p_priv->s_stack_guard = (reg_t *) ktsb;
				*rp->p_priv->s_stack_guard = STACK_GUARD;
			}

			ktsb += ip->stksize;	/* point to high end of stack */
			rp->p_reg.sp = ktsb;	/* this task's initial stack ptr */
			hdrindex = 0;		/* all use the first a.out header */
		} else {
			hdrindex = 1 + i-NR_TASKS;	/* system/user processes */
		}

		/* Architecture-specific way to find out aout header of this
		 * boot process.
		 */
		e_hdr = arch_get_aout_header(hdrindex);

		/* Convert addresses to clicks and build process memory map */
		text_base = e_hdr->a_syms >> CLICK_SHIFT;
		st_clicks= (e_hdr->a_total + CLICK_SIZE-1) >> CLICK_SHIFT;
		data_clicks = (e_hdr->a_text + e_hdr->a_data + e_hdr->a_bss + CLICK_SIZE-1) >> CLICK_SHIFT;
		text_clicks = 0;

		rp->p_memmap[T].mem_phys = text_base;
		rp->p_memmap[T].mem_len  = text_clicks;
		rp->p_memmap[D].mem_phys = text_base + text_clicks;
		rp->p_memmap[D].mem_len  = data_clicks;
		rp->p_memmap[S].mem_phys = text_base + text_clicks + st_clicks;
		rp->p_memmap[S].mem_vir  = st_clicks;
		rp->p_memmap[S].mem_len  = 0;

		/* Patch (override) the non-kernel process' entry points in image table. The
		 * image table is located in kernel/kernel_syms.c. The kernel processes like
		 * IDLE, SYSTEM, CLOCK, HARDWARE are not changed because they are part of kernel
		 * and the entry points are set at compilation time. In case of IDLE or HARDWARE
		 * the entry point can be ignored becasue they never run (set RTS_PROC_STOP).
		 */
		if (!iskerneln(proc_nr(rp)))
			ip->initial_pc = (task_t*)e_hdr->a_entry;

		/* Set initial register values.  The processor status word for tasks 
		 * is different from that of other processes because tasks can
		 * access I/O; this is not allowed to less-privileged processes 
		 */
		rp->p_reg.pc = (reg_t) ip->initial_pc;
		rp->p_reg.psw = (iskerneln(proc_nr)) ? INIT_TASK_PSW : INIT_PSW;

		/* Initialize the server stack pointer. Take it down one word
		 * to give crtso.s something to use as "argc","argv" and "envp".
		 */
		if (isusern(proc_nr)) {		/* user-space process? */
			rp->p_reg.sp = (rp->p_memmap[S].mem_vir + rp->p_memmap[S].mem_len)
					<< CLICK_SHIFT;
			rp->p_reg.sp -= 3*sizeof(reg_t);
		}

		/* scheduling functions depend on proc_ptr pointing somewhere. */
		if(!proc_ptr)
			proc_ptr = rp;

		/* If this process has its own page table, VM will set the
		 * PT up and manage it. VM will signal the kernel when it has
		 * done this; until then, don't let it run.
		 */
		if(ip->flags & PROC_FULLVM)
			RTS_SET(rp, RTS_VMINHIBIT);

		/* IDLE & HARDWARE task is never put on a run queue as it is
		 * never ready to run.
		 */
		if (rp->p_nr == HARDWARE)
			RTS_SET(rp, RTS_PROC_STOP);

		if (rp->p_nr == IDLE)
			RTS_SET(rp, RTS_PROC_STOP);

		RTS_UNSET(rp, RTS_SLOT_FREE); /* remove RTS_SLOT_FREE and schedule */
		alloc_segments(rp);
	} /* for */

	/* Architecture-dependent initialization. */
	arch_init();

#ifdef CONFIG_DEBUG_KERNEL_STATS_PROFILE
	sprofiling = 0;      /* we're not profiling until instructed to */
#endif
	cprof_procs_no = 0;  /* init nr of hash table slots used */

#ifdef CONFIG_IDLE_TSC
	idle_tsc = cvu64(0);
#endif

	vm_running = 0;
	krandom.random_sources = RANDOM_SOURCES;
	krandom.random_elements = RANDOM_ELEMENTS;

	/* Nucleos is now ready. All boot image processes are on the ready queue.
	 * Return to the assembly code to start running the current process. 
	 */
	bill_ptr = proc_addr(IDLE);		/* it has to point somewhere */
	announce();				/* print Nucleos startup banner */

	/*
	 * enable timer interrupts and clock task on the boot CPU
	 */
	if (boot_cpu_init_timer(system_hz)) {
		kernel_panic("FATAL : failed to initialize timer interrupts, "
			    "cannot continue without any clock source!",
			    NO_NUM);
	}

	/* Warnings for sanity checks that take time. These warnings are printed
	 * so it's a clear warning no full release should be done with them
	 * enabled.
	 */
#ifdef CONFIG_DEBUG_KERNEL_SCHED_CHECK
	FIXME("CONFIG_DEBUG_KERNEL_SCHED_CHECK enabled");
#endif

#ifdef CONFIG_DEBUG_KERNEL_VMASSERT
	FIXME("CONFIG_DEBUG_KERNEL_VMASSERT enabled");
#endif

#ifdef CONFIG_DEBUG_PROC_CHECK
	FIXME("PROC check enabled");
#endif

	restart();
}