Esempio n. 1
0
/*===========================================================================*
 *				newmap					     *
 *===========================================================================*/
int newmap(struct proc *caller, struct proc *rp, struct mem_map *map_ptr)
{
  int r;
/* Fetch the memory map. */
  if((r=data_copy(caller->p_endpoint, (vir_bytes) map_ptr,
	KERNEL, (vir_bytes) rp->p_memmap, sizeof(rp->p_memmap))) != OK) {
	printf("newmap: data_copy failed! (%d)\n", r);
	return r;
  }

  alloc_segments(rp);

  return(OK);
}
Esempio n. 2
0
/*===========================================================================*
 *                                   main                                    *
 *===========================================================================*/
PUBLIC void main()
{
/* Start the ball rolling. */

  register struct proc *rp;
  register int t;
  int hdrindex;
  phys_clicks text_base;
  vir_clicks text_clicks;
  vir_clicks data_clicks;
  phys_bytes phys_b;
  reg_t ktsb;			/* kernel task stack base */
  struct memory *memp;
  struct tasktab *ttp;
  struct exec e_hdr;

  licznik_elementow = 0;

  /* Initialize the interrupt controller. */
  intr_init(1);

  /* Interpret memory sizes. */
  mem_init();

  /* Clear the process table.
   * Set up mappings for proc_addr() and proc_number() macros.
   */
  for (rp = BEG_PROC_ADDR, t = -NR_TASKS; rp < END_PROC_ADDR; ++rp, ++t) {
	rp->p_nr = t;		/* proc number from ptr */
        (pproc_addr + NR_TASKS)[t] = rp;        /* proc ptr from number */
  }

  /* Resolve driver selections in the task table. */
  mapdrivers();

  /* Set up proc table entries for tasks and servers.  The stacks of the
   * kernel tasks are initialized to an array in data space.  The stacks
   * of the servers have been added to the data segment by the monitor, so
   * the stack pointer is set to the end of the data segment.  All the
   * processes are in low memory on the 8086.  On the 386 only the kernel
   * is in low memory, the rest is loaded in extended memory.
   */

  /* Task stacks. */
  ktsb = (reg_t) t_stack;

  for (t = -NR_TASKS; t <= LOW_USER; ++t) {
	rp = proc_addr(t);			/* t's process slot */
	ttp = &tasktab[t + NR_TASKS];		/* t's task attributes */
	strcpy(rp->p_name, ttp->name);
	if (t < 0) {
		if (ttp->stksize > 0) {
			rp->p_stguard = (reg_t *) ktsb;
			*rp->p_stguard = STACK_GUARD;
		}
		ktsb += ttp->stksize;
		rp->p_reg.sp = ktsb;
		text_base = code_base >> CLICK_SHIFT;
					/* tasks are all in the kernel */
		hdrindex = 0;		/* and use the first a.out header */
		rp->p_priority = PPRI_TASK;
	} else {
		hdrindex = 1 + t;	/* MM, FS, INIT follow the kernel */
		rp->p_priority = t < LOW_USER ? PPRI_SERVER : PPRI_USER;
	}

	/* The bootstrap loader has created an array of the a.out headers at
	 * absolute address 'aout'.
	 */
	phys_copy(aout + hdrindex * A_MINHDR, vir2phys(&e_hdr),
							(phys_bytes) A_MINHDR);
	text_base = e_hdr.a_syms >> CLICK_SHIFT;
	text_clicks = (e_hdr.a_text + CLICK_SIZE-1) >> CLICK_SHIFT;
	if (!(e_hdr.a_flags & A_SEP)) text_clicks = 0;	/* Common I&D */
	data_clicks = (e_hdr.a_total + CLICK_SIZE-1) >> CLICK_SHIFT;
	rp->p_map[T].mem_phys = text_base;
	rp->p_map[T].mem_len  = text_clicks;
	rp->p_map[D].mem_phys = text_base + text_clicks;
	rp->p_map[D].mem_len  = data_clicks;
	rp->p_map[S].mem_phys = text_base + text_clicks + data_clicks;
	rp->p_map[S].mem_vir  = data_clicks;	/* empty - stack is in data */

	/* Remove server memory from the free memory list.  The boot monitor
	 * promises to put processes at the start of memory chunks.
	 */
	for (memp = mem; memp < &mem[NR_MEMS]; memp++) {
		if (memp->base == text_base) {
			memp->base += text_clicks + data_clicks;
			memp->size -= text_clicks + data_clicks;
		}
	}

	/* Set initial register values. */
	rp->p_reg.pc = (reg_t) ttp->initial_pc;
	rp->p_reg.psw = istaskp(rp) ? INIT_TASK_PSW : INIT_PSW;

	if (t >= 0) {
		/* Initialize the server stack pointer.  Take it down one word
		 * to give crtso.s something to use as "argc".
		 */
		rp->p_reg.sp = (rp->p_map[S].mem_vir +
				rp->p_map[S].mem_len) << CLICK_SHIFT;
		rp->p_reg.sp -= sizeof(reg_t);
	}

	if (!isidlehardware(t)) lock_ready(rp);	/* IDLE, HARDWARE neveready */
	rp->p_flags = 0;

	alloc_segments(rp);
  }
Esempio n. 3
0
/*===========================================================================*
 *				do_update				     *
 *===========================================================================*/
PUBLIC int do_update(struct proc * caller, message * m_ptr)
{
/* Handle sys_update(). Update a process into another by swapping their process
 * slots.
 */
  endpoint_t src_e, dst_e;
  int src_p, dst_p;
  struct proc *src_rp, *dst_rp;
  struct priv *src_privp, *dst_privp;
  struct proc orig_src_proc;
  struct proc orig_dst_proc;
  struct priv orig_src_priv;
  struct priv orig_dst_priv;
  int i;

  /* Lookup slots for source and destination process. */
  src_e = m_ptr->SYS_UPD_SRC_ENDPT;
  if(!isokendpt(src_e, &src_p)) {
      return EINVAL;
  }
  src_rp = proc_addr(src_p);
  src_privp = priv(src_rp);
  if(!(src_privp->s_flags & SYS_PROC)) {
      return EPERM;
  }

  dst_e = m_ptr->SYS_UPD_DST_ENDPT;
  if(!isokendpt(dst_e, &dst_p)) {
      return EINVAL;
  }
  dst_rp = proc_addr(dst_p);
  dst_privp = priv(dst_rp);
  if(!(dst_privp->s_flags & SYS_PROC)) {
      return EPERM;
  }

  /* Check if processes are updatable. */
  if(!proc_is_updatable(src_rp) || !proc_is_updatable(dst_rp)) {
      return EBUSY;
  }

#if DEBUG
  printf("do_update: updating %d (%s, %d, %d) into %d (%s, %d, %d)\n",
      src_rp->p_endpoint, src_rp->p_name, src_rp->p_nr, priv(src_rp)->s_proc_nr,
      dst_rp->p_endpoint, dst_rp->p_name, dst_rp->p_nr, priv(dst_rp)->s_proc_nr);

  proc_stacktrace(src_rp);
  proc_stacktrace(dst_rp);
  printf("do_update: curr ptproc %d\n", get_cpulocal_var(ptproc)->p_endpoint);
#endif

  /* Let destination inherit the target mask from source. */
  for (i=0; i < NR_SYS_PROCS; i++) {
      if (get_sys_bit(priv(src_rp)->s_ipc_to, i)) {
          set_sendto_bit(dst_rp, i);
      }
  }

  /* Save existing data. */
  orig_src_proc = *src_rp;
  orig_src_priv = *(priv(src_rp));
  orig_dst_proc = *dst_rp;
  orig_dst_priv = *(priv(dst_rp));

  /* Swap slots. */
  *src_rp = orig_dst_proc;
  *src_privp = orig_dst_priv;
  *dst_rp = orig_src_proc;
  *dst_privp = orig_src_priv;

  /* Adjust process slots. */
  adjust_proc_slot(src_rp, &orig_src_proc);
  adjust_proc_slot(dst_rp, &orig_dst_proc);

  /* Adjust privilege slots. */
  adjust_priv_slot(priv(src_rp), &orig_src_priv);
  adjust_priv_slot(priv(dst_rp), &orig_dst_priv);

  /* Swap global process slot addresses. */
  swap_proc_slot_pointer(get_cpulocal_var_ptr(ptproc), src_rp, dst_rp);

  /* Fix segments. */
  alloc_segments(src_rp);
  alloc_segments(dst_rp);
  prot_init();

#if DEBUG
  printf("do_update: updated %d (%s, %d, %d) into %d (%s, %d, %d)\n",
      src_rp->p_endpoint, src_rp->p_name, src_rp->p_nr, priv(src_rp)->s_proc_nr,
      dst_rp->p_endpoint, dst_rp->p_name, dst_rp->p_nr, priv(dst_rp)->s_proc_nr);

  proc_stacktrace(src_rp);
  proc_stacktrace(dst_rp);
  printf("do_update: curr ptproc %d\n", get_cpulocal_var(ptproc)->p_endpoint);
#endif

  return OK;
}
Esempio n. 4
0
/*===========================================================================*
 *				main                                         *
 *===========================================================================*/
PUBLIC void main()
{
/* Start the ball rolling. */
  struct boot_image *ip;	/* boot image pointer */
  register struct proc *rp;	/* process pointer */
  register struct priv *sp;	/* privilege structure pointer */
  register int i, s;
  int hdrindex;			/* index to array of a.out headers */
  phys_clicks text_base;
  vir_clicks text_clicks, data_clicks;
  reg_t ktsb;			/* kernel task stack base */
  struct exec e_hdr;		/* for a copy of an a.out header */

  /* Initialize the interrupt controller. */
  intr_init(1);

  /* Clear the process table. Anounce each slot as empty and set up mappings 
   * for proc_addr() and proc_nr() macros. Do the same for the table with 
   * privilege structures for the system processes. 
   */
  for (rp = BEG_PROC_ADDR, i = -NR_TASKS; rp < END_PROC_ADDR; ++rp, ++i) {
  	rp->p_rts_flags = SLOT_FREE;		/* initialize free slot */
	rp->p_nr = i;				/* proc number from ptr */
        (pproc_addr + NR_TASKS)[i] = rp;        /* proc ptr from number */
  }
  for (sp = BEG_PRIV_ADDR, i = 0; sp < END_PRIV_ADDR; ++sp, ++i) {
	sp->s_proc_nr = NONE;			/* initialize as free */
	sp->s_id = i;				/* priv structure index */
	ppriv_addr[i] = sp;			/* priv ptr from number */
  }

  /* Set up proc table entries for processes in boot image.  The stacks of the
   * kernel tasks are initialized to an array in data space.  The stacks
   * of the servers have been added to the data segment by the monitor, so
   * the stack pointer is set to the end of the data segment.  All the
   * processes are in low memory on the 8086.  On the 386 only the kernel
   * is in low memory, the rest is loaded in extended memory.
   */

  /* Task stacks. */
  ktsb = (reg_t) t_stack;

  for (i=0; i < NR_BOOT_PROCS; ++i) {
	ip = &image[i];				/* process' attributes */
	rp = proc_addr(ip->proc_nr);		/* get process pointer */
	rp->p_max_priority = ip->priority;	/* max scheduling priority */
	rp->p_priority = ip->priority;		/* current priority */
	rp->p_quantum_size = ip->quantum;	/* quantum size in ticks */
	rp->p_ticks_left = ip->quantum;		/* current credit */
	strncpy(rp->p_name, ip->proc_name, P_NAME_LEN); /* set process name */
	(void) get_priv(rp, (ip->flags & SYS_PROC));    /* assign structure */
	priv(rp)->s_flags = ip->flags;			/* process flags */
	priv(rp)->s_trap_mask = ip->trap_mask;		/* allowed traps */
	priv(rp)->s_call_mask = ip->call_mask;		/* kernel call mask */
	priv(rp)->s_ipc_to.chunk[0] = ip->ipc_to;	/* restrict targets */
	if (iskerneln(proc_nr(rp))) {		/* part of the kernel? */ 
		if (ip->stksize > 0) {		/* HARDWARE stack size is 0 */
			rp->p_priv->s_stack_guard = (reg_t *) ktsb;
			*rp->p_priv->s_stack_guard = STACK_GUARD;
		}
		ktsb += ip->stksize;	/* point to high end of stack */
		rp->p_reg.sp = ktsb;	/* this task's initial stack ptr */
		text_base = kinfo.code_base >> CLICK_SHIFT;
					/* processes that are in the kernel */
		hdrindex = 0;		/* all use the first a.out header */
	} else {
		hdrindex = 1 + i-NR_TASKS;	/* servers, drivers, INIT */
	}

	/* The bootstrap loader created an array of the a.out headers at
	 * absolute address 'aout'. Get one element to e_hdr.
	 */
	phys_copy(aout + hdrindex * A_MINHDR, vir2phys(&e_hdr),
						(phys_bytes) A_MINHDR);
	/* Convert addresses to clicks and build process memory map */
	text_base = e_hdr.a_syms >> CLICK_SHIFT;
	text_clicks = (e_hdr.a_text + CLICK_SIZE-1) >> CLICK_SHIFT;
	if (!(e_hdr.a_flags & A_SEP)) text_clicks = 0;	   /* common I&D */
	data_clicks = (e_hdr.a_total + CLICK_SIZE-1) >> CLICK_SHIFT;
	rp->p_memmap[T].mem_phys = text_base;
	rp->p_memmap[T].mem_len  = text_clicks;
	rp->p_memmap[D].mem_phys = text_base + text_clicks;
	rp->p_memmap[D].mem_len  = data_clicks;
	rp->p_memmap[S].mem_phys = text_base + text_clicks + data_clicks;
	rp->p_memmap[S].mem_vir  = data_clicks;	/* empty - stack is in data */

	/* Set initial register values.  The processor status word for tasks 
	 * is different from that of other processes because tasks can
	 * access I/O; this is not allowed to less-privileged processes 
	 */
	rp->p_reg.pc = (reg_t) ip->initial_pc;
	rp->p_reg.psw = (iskernelp(rp)) ? INIT_TASK_PSW : INIT_PSW;

	/* Initialize the server stack pointer. Take it down one word
	 * to give crtso.s something to use as "argc".
	 */
	if (isusern(proc_nr(rp))) {		/* user-space process? */ 
		rp->p_reg.sp = (rp->p_memmap[S].mem_vir +
				rp->p_memmap[S].mem_len) << CLICK_SHIFT;
		rp->p_reg.sp -= sizeof(reg_t);
	}
	
	/* Set ready. The HARDWARE task is never ready. */
	if (rp->p_nr != HARDWARE) {
		rp->p_rts_flags = 0;		/* runnable if no flags */
		lock_enqueue(rp);		/* add to scheduling queues */
	} else {
		rp->p_rts_flags = NO_MAP;	/* prevent from running */
	}

	/* Code and data segments must be allocated in protected mode. */
	alloc_segments(rp);
  }
Esempio n. 5
0
/*===========================================================================*
 *				main                                         *
 *===========================================================================*/
PUBLIC int main(void)
{
/* Start the ball rolling. */
  struct boot_image *ip;	/* boot image pointer */
  register struct proc *rp;	/* process pointer */
  register int i, j;
  size_t argsz;			/* size of arguments passed to crtso on stack */

  BKL_LOCK();
   /* Global value to test segment sanity. */
   magictest = MAGICTEST;
 
   DEBUGEXTRA(("main()\n"));

   proc_init();

  /* Set up proc table entries for processes in boot image.  The stacks
   * of the servers have been added to the data segment by the monitor, so
   * the stack pointer is set to the end of the data segment.
   */

  for (i=0; i < NR_BOOT_PROCS; ++i) {
	int schedulable_proc;
	proc_nr_t proc_nr;
	int ipc_to_m, kcalls;
	sys_map_t map;

	ip = &image[i];				/* process' attributes */
	DEBUGEXTRA(("initializing %s... ", ip->proc_name));
	rp = proc_addr(ip->proc_nr);		/* get process pointer */
	ip->endpoint = rp->p_endpoint;		/* ipc endpoint */
	make_zero64(rp->p_cpu_time_left);
	strncpy(rp->p_name, ip->proc_name, P_NAME_LEN); /* set process name */
	
	reset_proc_accounting(rp);

	/* See if this process is immediately schedulable.
	 * In that case, set its privileges now and allow it to run.
	 * Only kernel tasks and the root system process get to run immediately.
	 * All the other system processes are inhibited from running by the
	 * RTS_NO_PRIV flag. They can only be scheduled once the root system
	 * process has set their privileges.
	 */
	proc_nr = proc_nr(rp);
	schedulable_proc = (iskerneln(proc_nr) || isrootsysn(proc_nr));
	if(schedulable_proc) {
	    /* Assign privilege structure. Force a static privilege id. */
            (void) get_priv(rp, static_priv_id(proc_nr));

            /* Priviliges for kernel tasks. */
            if(iskerneln(proc_nr)) {
                /* Privilege flags. */
                priv(rp)->s_flags = (proc_nr == IDLE ? IDL_F : TSK_F);
                /* Allowed traps. */
                priv(rp)->s_trap_mask = (proc_nr == CLOCK 
                    || proc_nr == SYSTEM  ? CSK_T : TSK_T);
                ipc_to_m = TSK_M;                  /* allowed targets */
                kcalls = TSK_KC;                   /* allowed kernel calls */
            }
            /* Priviliges for the root system process. */
            else if(isrootsysn(proc_nr)) {
                priv(rp)->s_flags= RSYS_F;        /* privilege flags */
                priv(rp)->s_trap_mask= SRV_T;     /* allowed traps */
                ipc_to_m = SRV_M;                 /* allowed targets */
                kcalls = SRV_KC;                  /* allowed kernel calls */
                priv(rp)->s_sig_mgr = SRV_SM;     /* signal manager */
                rp->p_priority = SRV_Q;	          /* priority queue */
                rp->p_quantum_size_ms = SRV_QT;   /* quantum size */
            }
            /* Priviliges for ordinary process. */
            else {
		NOT_REACHABLE;
            }

            /* Fill in target mask. */
            memset(&map, 0, sizeof(map));

            if (ipc_to_m == ALL_M) {
                for(j = 0; j < NR_SYS_PROCS; j++)
                    set_sys_bit(map, j);
            }

            fill_sendto_mask(rp, &map);

            /* Fill in kernel call mask. */
            for(j = 0; j < SYS_CALL_MASK_SIZE; j++) {
                priv(rp)->s_k_call_mask[j] = (kcalls == NO_C ? 0 : (~0));
            }
	}
	else {
	    /* Don't let the process run for now. */
            RTS_SET(rp, RTS_NO_PRIV | RTS_NO_QUANTUM);
	}
	rp->p_memmap[T].mem_vir  = ABS2CLICK(ip->memmap.text_vaddr);
	rp->p_memmap[T].mem_phys = ABS2CLICK(ip->memmap.text_paddr);
	rp->p_memmap[T].mem_len  = ABS2CLICK(ip->memmap.text_bytes);
	rp->p_memmap[D].mem_vir  = ABS2CLICK(ip->memmap.data_vaddr);
	rp->p_memmap[D].mem_phys = ABS2CLICK(ip->memmap.data_paddr);
	rp->p_memmap[D].mem_len  = ABS2CLICK(ip->memmap.data_bytes);
	rp->p_memmap[S].mem_phys = ABS2CLICK(ip->memmap.data_paddr +
					     ip->memmap.data_bytes +
					     ip->memmap.stack_bytes);
	rp->p_memmap[S].mem_vir  = ABS2CLICK(ip->memmap.data_vaddr +
					     ip->memmap.data_bytes +
					     ip->memmap.stack_bytes);
	rp->p_memmap[S].mem_len  = 0;

	/* Set initial register values.  The processor status word for tasks 
	 * is different from that of other processes because tasks can
	 * access I/O; this is not allowed to less-privileged processes 
	 */
	rp->p_reg.pc = ip->memmap.entry;
	rp->p_reg.psw = (iskerneln(proc_nr)) ? INIT_TASK_PSW : INIT_PSW;

	/* Initialize the server stack pointer. Take it down three words
	 * to give crtso.s something to use as "argc", "argv" and "envp".
	 */
	if (isusern(proc_nr)) {		/* user-space process? */ 
		rp->p_reg.sp = (rp->p_memmap[S].mem_vir +
				rp->p_memmap[S].mem_len) << CLICK_SHIFT;
		argsz = 3 * sizeof(reg_t);
		rp->p_reg.sp -= argsz;
		phys_memset(rp->p_reg.sp - 
			(rp->p_memmap[S].mem_vir << CLICK_SHIFT) +
			(rp->p_memmap[S].mem_phys << CLICK_SHIFT), 
			0, argsz);
	}

	/* scheduling functions depend on proc_ptr pointing somewhere. */
	if(!get_cpulocal_var(proc_ptr))
		get_cpulocal_var(proc_ptr) = rp;

	/* If this process has its own page table, VM will set the
	 * PT up and manage it. VM will signal the kernel when it has
	 * done this; until then, don't let it run.
	 */
	if(ip->flags & PROC_FULLVM)
		rp->p_rts_flags |= RTS_VMINHIBIT;

	rp->p_rts_flags |= RTS_PROC_STOP;
	rp->p_rts_flags &= ~RTS_SLOT_FREE;
	alloc_segments(rp);
	DEBUGEXTRA(("done\n"));
  }

#define IPCNAME(n) { \
	assert((n) >= 0 && (n) <= IPCNO_HIGHEST); \
	assert(!ipc_call_names[n]);	\
	ipc_call_names[n] = #n; \
}

  IPCNAME(SEND);
  IPCNAME(RECEIVE);
  IPCNAME(SENDREC);
  IPCNAME(NOTIFY);
  IPCNAME(SENDNB);
  IPCNAME(SENDA);

  /* Architecture-dependent initialization. */
  DEBUGEXTRA(("arch_init()... "));
  arch_init();
  DEBUGEXTRA(("done\n"));

  /* System and processes initialization */
  DEBUGEXTRA(("system_init()... "));
  system_init();
  DEBUGEXTRA(("done\n"));

#ifdef CONFIG_SMP
  if (config_no_apic) {
	  BOOT_VERBOSE(printf("APIC disabled, disables SMP, using legacy PIC\n"));
	  smp_single_cpu_fallback();
  } else if (config_no_smp) {
	  BOOT_VERBOSE(printf("SMP disabled, using legacy PIC\n"));
	  smp_single_cpu_fallback();
  } else {
	  smp_init();
	  /*
	   * if smp_init() returns it means that it failed and we try to finish
	   * single CPU booting
	   */
	  bsp_finish_booting();
  }
#else
  /* 
   * if configured for a single CPU, we are already on the kernel stack which we
   * are going to use everytime we execute kernel code. We finish booting and we
   * never return here
   */
  bsp_finish_booting();
#endif

  NOT_REACHABLE;
  return 1;
}
Esempio n. 6
0
/*===========================================================================*
 *				main                                         *
 *===========================================================================*/
PUBLIC void main()
{
/* Start the ball rolling. */
  struct boot_image *ip;	/* boot image pointer */
  register struct proc *rp;	/* process pointer */
  register struct priv *sp;	/* privilege structure pointer */
  register int i, s;
  // a.out 头部数组的索引.
  int hdrindex;			/* index to array of a.out headers */
  phys_clicks text_base;
  vir_clicks text_clicks, data_clicks;
  // 内核任务栈的基地址(低端)
  reg_t ktsb;			/* kernel task stack base */
  // 用来放置 a.out 头部的一个副本.
  struct exec e_hdr;		/* for a copy of an a.out header */

  /* Initialize the interrupt controller. */
  // 初始化 8259 中断控制器芯片.
  intr_init(1);

  /* Clear the process table. Anounce each slot as empty and set up mappings 
   * for proc_addr() and proc_nr() macros. Do the same for the table with 
   * privilege structures for the system processes. 
   */
  // 初如化进程表与进程指针表.
  // BEG_PROC_ADDR: 进程表地址;
  for (rp = BEG_PROC_ADDR, i = -NR_TASKS; rp < END_PROC_ADDR; ++rp, ++i) {
	  // 将进程表中每一项都设置为空闲.
  	rp->p_rts_flags = SLOT_FREE;		/* initialize free slot */
	// 进程号, i 的初值为 -NR_TASKS, 可见系统任务拥有负的进程号
	rp->p_nr = i;				/* proc number from ptr */
	// 建立进程数组与进程指针数组之间的映射关系
        (pproc_addr + NR_TASKS)[i] = rp;        /* proc ptr from number */
  }
  // 初始化优先级表
  for (sp = BEG_PRIV_ADDR, i = 0; sp < END_PRIV_ADDR; ++sp, ++i) {
	sp->s_proc_nr = NONE;			/* initialize as free */
	sp->s_id = i;				/* priv structure index */
	// 建立特权级表与特权级指针表之间的映射关系
	ppriv_addr[i] = sp;			/* priv ptr from number */
  }

  /* Set up proc table entries for tasks and servers.  The stacks of the
   * kernel tasks are initialized to an array in data space.  The stacks
   * of the servers have been added to the data segment by the monitor, so
   * the stack pointer is set to the end of the data segment.  All the
   * processes are in low memory on the 8086.  On the 386 only the kernel
   * is in low memory, the rest is loaded in extended memory.
   */
  /*
   * 为任务和服务进程设置进程表项. 内核任务的栈被初始化成一个在数据空间中的
   * 数组. 服务进程的栈已经由控制器添加到数据段中, 所有它们的栈指针开始时
   * 指向数据段的末尾. 所有的进程都在 8086 的低内存. 对于 386, 只有内核在
   * 低内存, 剩下的都在扩展内存中.
   */

  /* Task stacks. */
  /* 任务栈 */
  ktsb = (reg_t) t_stack;
 // 为那些包含在系统引导映像文件中的程序分配进程表项.
  for (i=0; i < NR_BOOT_PROCS; ++i) {
	ip = &image[i];				/* process' attributes */
	// 获取进程指针
	rp = proc_addr(ip->proc_nr);		/* get process pointer */
	// 最大调度优先级
	rp->p_max_priority = ip->priority;	/* max scheduling priority */
	// 当前调度优先级
	rp->p_priority = ip->priority;		/* current priority */
	// 时间片原子值
	rp->p_quantum_size = ip->quantum;	/* quantum size in ticks */
	// 剩余时间片
	rp->p_ticks_left = ip->quantum;		/* current credit */
	// 将程序名复制到进程表项中
	strncpy(rp->p_name, ip->proc_name, P_NAME_LEN); /* set process name */
	// 为进程分配一个特权级结构体, 即 从系统特权级表中分配一项
	(void) get_priv(rp, (ip->flags & SYS_PROC));    /* assign structure */
	// 初始化特权级结构体的标志.
	priv(rp)->s_flags = ip->flags;			/* process flags */
	// 初始化特权级结构体的 允许的系统调用陷井
	priv(rp)->s_trap_mask = ip->trap_mask;		/* allowed traps */
	priv(rp)->s_call_mask = ip->call_mask;		/* kernel call mask */
	// 初始化进程的消息发送位图
	priv(rp)->s_ipc_to.chunk[0] = ip->ipc_to;	/* restrict targets */
	// 如果进程是内核任务
	if (iskerneln(proc_nr(rp))) {		/* part of the kernel? */ 
		// 如果进程的栈大小大于 0, 设置进程的栈警戒字, 
		if (ip->stksize > 0) {		/* HARDWARE stack size is 0 */
			// 设置内核任务栈警戒字指针.
			rp->p_priv->s_stack_guard = (reg_t *) ktsb;
			// 指针运算符(->) 要比取值运行符 (*) 的优先级要高.
			// 等价于: 
			// *(rp->p_priv->s_stack_guard) = STACK_GUARD
			// 效果是在栈的最顶端(在低地址)放置一个特殊值,
			// 这个值就是栈警戒字.
			*rp->p_priv->s_stack_guard = STACK_GUARD;
		}
		ktsb += ip->stksize;	/* point to high end of stack */
		// 初始进程的栈指针
		rp->p_reg.sp = ktsb;	/* this task's initial stack ptr */
		// kinfo ???
		// 内核代码的基地址右移 CLICK_SHIFT 位, 赋给 text_base.
		text_base = kinfo.code_base >> CLICK_SHIFT;
					/* processes that are in the kernel */
		// 内核任务使用同一个 a.out 头部信息
		hdrindex = 0;		/* all use the first a.out header */
	} else {
		// 非内核任务, 计算它的 a.out 头部数组索引, 因为 0 号项
		// 留给了内核任务, 所以需 加 1.
		hdrindex = 1 + i-NR_TASKS;	/* servers, drivers, INIT */
	}

	/* The bootstrap loader created an array of the a.out headers at
	 * absolute address 'aout'. Get one element to e_hdr.
	 */
	/*
	 * 引导加载程序会在绝对地址 'aout' 处放置一个 a.out 头部数组.
	 * 从中取一项复制到 e_hdr.
	 */
	phys_copy(aout + hdrindex * A_MINHDR, vir2phys(&e_hdr),
						(phys_bytes) A_MINHDR);
	/* Convert addresses to clicks and build process memory map */
	/* 将地址转换为以 click 为单位, 并建立进程内存映射 */
	// 既然这里要设置 text_base, 那 146 行附近的 
	// text_base = kinfo.code_base >> CLICK_SHIFT;
	// 岂不是多余的??
	// 将 a.out 头部的符号表大小右移 CLICK_SHIFT 位,赋给 text_base.
	text_base = e_hdr.a_syms >> CLICK_SHIFT;
	// 计算程序文本段大小, 以 click 为单位, 上取整.
	text_clicks = (e_hdr.a_text + CLICK_SIZE-1) >> CLICK_SHIFT;
	// 如果 a.out 头部指明它的 I/D 是合并的 ???
	if (!(e_hdr.a_flags & A_SEP)) text_clicks = 0;	   /* common I&D */
	// 计算程序占用的内存量, 以 click 为单位, 上取整.
	data_clicks = (e_hdr.a_total + CLICK_SIZE-1) >> CLICK_SHIFT;
	// 初始化进程的内存映射数据结构
	rp->p_memmap[T].mem_phys = text_base;
	rp->p_memmap[T].mem_len  = text_clicks;
	rp->p_memmap[D].mem_phys = text_base + text_clicks;
	rp->p_memmap[D].mem_len  = data_clicks;
	rp->p_memmap[S].mem_phys = text_base + text_clicks + data_clicks;
	rp->p_memmap[S].mem_vir  = data_clicks;	/* empty - stack is in data */

	/* Set initial register values.  The processor status word for tasks 
	 * is different from that of other processes because tasks can
	 * access I/O; this is not allowed to less-privileged processes 
	 */
	/*
	 * 设置寄存器的初始值. 与其他进程相比, 内核任务的处理器状态字
	 * 稍有不同, 因为内核任务可以访问 I/O; 而对于非特权进来来说, 这是不
	 * 允许的.
	 */
	// 初始化进程的 PC 和 processor status word.
	rp->p_reg.pc = (reg_t) ip->initial_pc;
	rp->p_reg.psw = (iskernelp(rp)) ? INIT_TASK_PSW : INIT_PSW;

	/* Initialize the server stack pointer. Take it down one word
	 * to give crtso.s something to use as "argc".
	 */
	/*
	 * 初始化服务器进程的栈指针. 下移一个字的空间, 使得 crtso.s 有
	 * 空间放置 "argc".
	 */
	if (isusern(proc_nr(rp))) {		/* user-space process? */ 
		rp->p_reg.sp = (rp->p_memmap[S].mem_vir +
				rp->p_memmap[S].mem_len) << CLICK_SHIFT;
		rp->p_reg.sp -= sizeof(reg_t);
	}
	
	/* Set ready. The HARDWARE task is never ready. */
	if (rp->p_nr != HARDWARE) {
		// 如果进程不是 HARDWARE, 清空进程标志, 并加入调度队列.
		rp->p_rts_flags = 0;		/* runnable if no flags */
		lock_enqueue(rp);		/* add to scheduling queues */
	} else {
		// 对于  HARDWARE 任务, 则阻止其运行. ???
		rp->p_rts_flags = NO_MAP;	/* prevent from running */
	}

	/* Code and data segments must be allocated in protected mode. */
	/* 数据与代码段必须在保护模式下分配 */
	alloc_segments(rp);
  }
Esempio n. 7
0
void main(void)
{
/* Start the ball rolling. */
	struct boot_image *ip;		/* boot image pointer */
	register struct proc *rp;	/* process pointer */
	register struct priv *sp;	/* privilege structure pointer */
	register int i, j;
	int hdrindex;			/* index to array of a.out headers */
	phys_clicks text_base;
	vir_clicks text_clicks, data_clicks, st_clicks;
	reg_t ktsb;			/* kernel task stack base */
	struct exec *e_hdr = 0;		/* for a copy of an a.out header */

	/* Global value to test segment sanity. */
	magictest = MAGICTEST;

	/* Clear the process table. Anounce each slot as empty and set up mappings 
	 * for proc_addr() and proc_nr() macros. Do the same for the table with 
	 * privilege structures for the system processes.
	 */
	for (rp = BEG_PROC_ADDR, i = -NR_TASKS; rp < END_PROC_ADDR; ++rp, ++i) {
  	rp->p_rts_flags = RTS_SLOT_FREE;		/* initialize free slot */
#ifdef CONFIG_DEBUG_KERNEL_SCHED_CHECK
		rp->p_magic = PMAGIC;
#endif
		rp->p_nr = i;				/* proc number from ptr */
		rp->p_endpoint = _ENDPOINT(0, rp->p_nr); /* generation no. 0 */
	}

	for (sp = BEG_PRIV_ADDR, i = 0; sp < END_PRIV_ADDR; ++sp, ++i) {
		sp->s_proc_nr = ENDPT_NONE;			/* initialize as free */
		sp->s_id = i;				/* priv structure index */
		ppriv_addr[i] = sp;			/* priv ptr from number */
	}

	/* Set up proc table entries for processes in boot image.  The stacks of the
	 * kernel tasks are initialized to an array in data space.  The stacks
	 * of the servers have been added to the data segment by the monitor, so
	 * the stack pointer is set to the end of the data segment.  All the
	 * processes are in low memory on the 8086.  On the 386 only the kernel
	 * is in low memory, the rest is loaded in extended memory.
	 */

	/* Task stacks. */
	ktsb = (reg_t) t_stack;

	for (i=0; i < NR_BOOT_PROCS; ++i) {
		int schedulable_proc, proc_nr;
		int ipc_to_m, kcalls;

		ip = &image[i];				/* process' attributes */
		rp = proc_addr(ip->proc_nr);		/* get process pointer */
		ip->endpoint = rp->p_endpoint;		/* ipc endpoint */
		rp->p_max_priority = ip->priority;	/* max scheduling priority */
		rp->p_priority = ip->priority;		/* current priority */
		rp->p_quantum_size = ip->quantum;	/* quantum size in ticks */
		rp->p_ticks_left = ip->quantum;		/* current credit */

		strncpy(rp->p_name, ip->proc_name, P_NAME_LEN); /* set process name */
		/* See if this process is immediately schedulable.
		 * In that case, set its privileges now and allow it to run.
		 * Only kernel tasks and the root system process get to run immediately.
		 * All the other system processes are inhibited from running by the
		 * RTS_NO_PRIV flag. They can only be scheduled once the root system
		 * process has set their privileges.
		 */
		proc_nr = proc_nr(rp);
		schedulable_proc = (iskerneln(proc_nr) || isrootsysn(proc_nr));
		if(schedulable_proc) {
			/* Assign privilege structure. Force a static privilege id. */
			(void) get_priv(rp, static_priv_id(proc_nr));

			/* Priviliges for kernel tasks. */
			if(iskerneln(proc_nr)) {
				/* Privilege flags. */
				priv(rp)->s_flags = (proc_nr == IDLE ? IDL_F : TSK_F);
				/* Allowed traps. */
				priv(rp)->s_trap_mask = (proc_nr == CLOCK
					|| proc_nr == SYSTEM  ? CSK_T : TSK_T);
				ipc_to_m = TSK_M;                  /* allowed targets */
				kcalls = TSK_KC;                   /* allowed kernel calls */
			} else if(isrootsysn(proc_nr)) {
			/* Priviliges for the root system process. */
				priv(rp)->s_flags= RSYS_F;         /* privilege flags */
				priv(rp)->s_trap_mask= RSYS_T;     /* allowed traps */
				ipc_to_m = RSYS_M;                 /* allowed targets */
				kcalls = RSYS_KC;                  /* allowed kernel calls */
			}

			/* Fill in target mask. */
			for (j=0; j < NR_SYS_PROCS; j++) {
				if (ipc_to_m & (1 << j))
					set_sendto_bit(rp, j);
				else
					unset_sendto_bit(rp, j);
			}

			/* Fill in kernel call mask. */
			for(j = 0; j < CALL_MASK_SIZE; j++) {
				priv(rp)->s_k_call_mask[j] = (kcalls == NO_C ? 0 : (~0));
			}
		} else {
			/*Don't let the process run for now. */
			RTS_SET(rp, RTS_NO_PRIV);
		}

		if (iskerneln(proc_nr)) {               /* part of the kernel? */
			if (ip->stksize > 0) {		/* HARDWARE stack size is 0 */
				rp->p_priv->s_stack_guard = (reg_t *) ktsb;
				*rp->p_priv->s_stack_guard = STACK_GUARD;
			}

			ktsb += ip->stksize;	/* point to high end of stack */
			rp->p_reg.sp = ktsb;	/* this task's initial stack ptr */
			hdrindex = 0;		/* all use the first a.out header */
		} else {
			hdrindex = 1 + i-NR_TASKS;	/* system/user processes */
		}

		/* Architecture-specific way to find out aout header of this
		 * boot process.
		 */
		e_hdr = arch_get_aout_header(hdrindex);

		/* Convert addresses to clicks and build process memory map */
		text_base = e_hdr->a_syms >> CLICK_SHIFT;
		st_clicks= (e_hdr->a_total + CLICK_SIZE-1) >> CLICK_SHIFT;
		data_clicks = (e_hdr->a_text + e_hdr->a_data + e_hdr->a_bss + CLICK_SIZE-1) >> CLICK_SHIFT;
		text_clicks = 0;

		rp->p_memmap[T].mem_phys = text_base;
		rp->p_memmap[T].mem_len  = text_clicks;
		rp->p_memmap[D].mem_phys = text_base + text_clicks;
		rp->p_memmap[D].mem_len  = data_clicks;
		rp->p_memmap[S].mem_phys = text_base + text_clicks + st_clicks;
		rp->p_memmap[S].mem_vir  = st_clicks;
		rp->p_memmap[S].mem_len  = 0;

		/* Patch (override) the non-kernel process' entry points in image table. The
		 * image table is located in kernel/kernel_syms.c. The kernel processes like
		 * IDLE, SYSTEM, CLOCK, HARDWARE are not changed because they are part of kernel
		 * and the entry points are set at compilation time. In case of IDLE or HARDWARE
		 * the entry point can be ignored becasue they never run (set RTS_PROC_STOP).
		 */
		if (!iskerneln(proc_nr(rp)))
			ip->initial_pc = (task_t*)e_hdr->a_entry;

		/* Set initial register values.  The processor status word for tasks 
		 * is different from that of other processes because tasks can
		 * access I/O; this is not allowed to less-privileged processes 
		 */
		rp->p_reg.pc = (reg_t) ip->initial_pc;
		rp->p_reg.psw = (iskerneln(proc_nr)) ? INIT_TASK_PSW : INIT_PSW;

		/* Initialize the server stack pointer. Take it down one word
		 * to give crtso.s something to use as "argc","argv" and "envp".
		 */
		if (isusern(proc_nr)) {		/* user-space process? */
			rp->p_reg.sp = (rp->p_memmap[S].mem_vir + rp->p_memmap[S].mem_len)
					<< CLICK_SHIFT;
			rp->p_reg.sp -= 3*sizeof(reg_t);
		}

		/* scheduling functions depend on proc_ptr pointing somewhere. */
		if(!proc_ptr)
			proc_ptr = rp;

		/* If this process has its own page table, VM will set the
		 * PT up and manage it. VM will signal the kernel when it has
		 * done this; until then, don't let it run.
		 */
		if(ip->flags & PROC_FULLVM)
			RTS_SET(rp, RTS_VMINHIBIT);

		/* IDLE & HARDWARE task is never put on a run queue as it is
		 * never ready to run.
		 */
		if (rp->p_nr == HARDWARE)
			RTS_SET(rp, RTS_PROC_STOP);

		if (rp->p_nr == IDLE)
			RTS_SET(rp, RTS_PROC_STOP);

		RTS_UNSET(rp, RTS_SLOT_FREE); /* remove RTS_SLOT_FREE and schedule */
		alloc_segments(rp);
	} /* for */

	/* Architecture-dependent initialization. */
	arch_init();

#ifdef CONFIG_DEBUG_KERNEL_STATS_PROFILE
	sprofiling = 0;      /* we're not profiling until instructed to */
#endif
	cprof_procs_no = 0;  /* init nr of hash table slots used */

#ifdef CONFIG_IDLE_TSC
	idle_tsc = cvu64(0);
#endif

	vm_running = 0;
	krandom.random_sources = RANDOM_SOURCES;
	krandom.random_elements = RANDOM_ELEMENTS;

	/* Nucleos is now ready. All boot image processes are on the ready queue.
	 * Return to the assembly code to start running the current process. 
	 */
	bill_ptr = proc_addr(IDLE);		/* it has to point somewhere */
	announce();				/* print Nucleos startup banner */

	/*
	 * enable timer interrupts and clock task on the boot CPU
	 */
	if (boot_cpu_init_timer(system_hz)) {
		kernel_panic("FATAL : failed to initialize timer interrupts, "
			    "cannot continue without any clock source!",
			    NO_NUM);
	}

	/* Warnings for sanity checks that take time. These warnings are printed
	 * so it's a clear warning no full release should be done with them
	 * enabled.
	 */
#ifdef CONFIG_DEBUG_KERNEL_SCHED_CHECK
	FIXME("CONFIG_DEBUG_KERNEL_SCHED_CHECK enabled");
#endif

#ifdef CONFIG_DEBUG_KERNEL_VMASSERT
	FIXME("CONFIG_DEBUG_KERNEL_VMASSERT enabled");
#endif

#ifdef CONFIG_DEBUG_PROC_CHECK
	FIXME("PROC check enabled");
#endif

	restart();
}
Esempio n. 8
0
/*===========================================================================*
 *				main                                         *
 *===========================================================================*/
PUBLIC void main()
{
/* Start the ball rolling. */
  struct boot_image *ip;	/* boot image pointer */
  register struct proc *rp;	/* process pointer */
  register struct priv *sp;	/* privilege structure pointer */
  register int i, j, s;
  int hdrindex;			/* index to array of a.out headers */
  phys_clicks text_base;
  vir_clicks text_clicks, data_clicks, st_clicks;
  reg_t ktsb;			/* kernel task stack base */
  struct exec e_hdr;		/* for a copy of an a.out header */

   /* Architecture-dependent initialization. */
   arch_init();

   /* Global value to test segment sanity. */
   magictest = MAGICTEST;
 
  /* Clear the process table. Anounce each slot as empty and set up mappings 
   * for proc_addr() and proc_nr() macros. Do the same for the table with 
   * privilege structures for the system processes. 
   */
  for (rp = BEG_PROC_ADDR, i = -NR_TASKS; rp < END_PROC_ADDR; ++rp, ++i) {
  	rp->p_rts_flags = SLOT_FREE;		/* initialize free slot */
#if DEBUG_SCHED_CHECK
	rp->p_magic = PMAGIC;
#endif
	rp->p_nr = i;				/* proc number from ptr */
	rp->p_endpoint = _ENDPOINT(0, rp->p_nr); /* generation no. 0 */
  }
  for (sp = BEG_PRIV_ADDR, i = 0; sp < END_PRIV_ADDR; ++sp, ++i) {
	sp->s_proc_nr = NONE;			/* initialize as free */
	sp->s_id = i;				/* priv structure index */
	ppriv_addr[i] = sp;			/* priv ptr from number */
  }

  /* Set up proc table entries for processes in boot image.  The stacks of the
   * kernel tasks are initialized to an array in data space.  The stacks
   * of the servers have been added to the data segment by the monitor, so
   * the stack pointer is set to the end of the data segment.  All the
   * processes are in low memory on the 8086.  On the 386 only the kernel
   * is in low memory, the rest is loaded in extended memory.
   */

  /* Task stacks. */
  ktsb = (reg_t) t_stack;

  for (i=0; i < NR_BOOT_PROCS; ++i) {
	int ci;
	bitchunk_t fv;

	ip = &image[i];				/* process' attributes */
	rp = proc_addr(ip->proc_nr);		/* get process pointer */
	ip->endpoint = rp->p_endpoint;		/* ipc endpoint */
	rp->p_max_priority = ip->priority;	/* max scheduling priority */
	rp->p_priority = ip->priority;		/* current priority */
	rp->p_quantum_size = ip->quantum;	/* quantum size in ticks */
	rp->p_ticks_left = ip->quantum;		/* current credit */
	strncpy(rp->p_name, ip->proc_name, P_NAME_LEN); /* set process name */
	(void) get_priv(rp, (ip->flags & SYS_PROC));    /* assign structure */
	priv(rp)->s_flags = ip->flags;			/* process flags */
	priv(rp)->s_trap_mask = ip->trap_mask;		/* allowed traps */

	/* Warn about violations of the boot image table order consistency. */
	if (priv_id(rp) != s_nr_to_id(ip->proc_nr) && (ip->flags & SYS_PROC))
		kprintf("Warning: boot image table has wrong process order\n");

	/* Initialize call mask bitmap from unordered set.
	 * A single SYS_ALL_CALLS is a special case - it
	 * means all calls are allowed.
	 */
	if(ip->nr_k_calls == 1 && ip->k_calls[0] == SYS_ALL_CALLS)
		fv = ~0;		/* fill call mask */
	else
		fv = 0;			/* clear call mask */

	for(ci = 0; ci < CALL_MASK_SIZE; ci++) 	/* fill or clear call mask */
		priv(rp)->s_k_call_mask[ci] = fv;
	if(!fv)			/* not all full? enter calls bit by bit */
		for(ci = 0; ci < ip->nr_k_calls; ci++)
			SET_BIT(priv(rp)->s_k_call_mask,
				ip->k_calls[ci]-KERNEL_CALL);

	for (j = 0; j < NR_SYS_PROCS && j < BITCHUNK_BITS; j++)
		if (ip->ipc_to & (1 << j))
			set_sendto_bit(rp, j);	/* restrict targets */

	if (iskerneln(proc_nr(rp))) {		/* part of the kernel? */ 
		if (ip->stksize > 0) {		/* HARDWARE stack size is 0 */
			rp->p_priv->s_stack_guard = (reg_t *) ktsb;
			*rp->p_priv->s_stack_guard = STACK_GUARD;
		}
		ktsb += ip->stksize;	/* point to high end of stack */
		rp->p_reg.sp = ktsb;	/* this task's initial stack ptr */
		hdrindex = 0;		/* all use the first a.out header */
	} else {
		hdrindex = 1 + i-NR_TASKS;	/* servers, drivers, INIT */
	}

	/* Architecture-specific way to find out aout header of this
	 * boot process.
	 */
	arch_get_aout_headers(hdrindex, &e_hdr);

	/* Convert addresses to clicks and build process memory map */
	text_base = e_hdr.a_syms >> CLICK_SHIFT;
	text_clicks = (e_hdr.a_text + CLICK_SIZE-1) >> CLICK_SHIFT;
	data_clicks = (e_hdr.a_data+e_hdr.a_bss + CLICK_SIZE-1) >> CLICK_SHIFT;
	st_clicks= (e_hdr.a_total + CLICK_SIZE-1) >> CLICK_SHIFT;
	if (!(e_hdr.a_flags & A_SEP))
	{
		data_clicks= (e_hdr.a_text+e_hdr.a_data+e_hdr.a_bss +
			CLICK_SIZE-1) >> CLICK_SHIFT;
		text_clicks = 0;	   /* common I&D */
	}
	rp->p_memmap[T].mem_phys = text_base;
	rp->p_memmap[T].mem_len  = text_clicks;
	rp->p_memmap[D].mem_phys = text_base + text_clicks;
	rp->p_memmap[D].mem_len  = data_clicks;
	rp->p_memmap[S].mem_phys = text_base + text_clicks + st_clicks;
	rp->p_memmap[S].mem_vir  = st_clicks;
	rp->p_memmap[S].mem_len  = 0;

	/* Set initial register values.  The processor status word for tasks 
	 * is different from that of other processes because tasks can
	 * access I/O; this is not allowed to less-privileged processes 
	 */
	rp->p_reg.pc = (reg_t) ip->initial_pc;
	rp->p_reg.psw = (iskernelp(rp)) ? INIT_TASK_PSW : INIT_PSW;

	/* Initialize the server stack pointer. Take it down one word
	 * to give crtso.s something to use as "argc".
	 */
	if (isusern(proc_nr(rp))) {		/* user-space process? */ 
		rp->p_reg.sp = (rp->p_memmap[S].mem_vir +
				rp->p_memmap[S].mem_len) << CLICK_SHIFT;
		rp->p_reg.sp -= sizeof(reg_t);
	}

	/* scheduling functions depend on proc_ptr pointing somewhere. */
	if(!proc_ptr) proc_ptr = rp;

	/* If this process has its own page table, VM will set the
	 * PT up and manage it. VM will signal the kernel when it has
	 * done this; until then, don't let it run.
	 */
	if(priv(rp)->s_flags & PROC_FULLVM)
		RTS_SET(rp, VMINHIBIT);
	
	/* Set ready. The HARDWARE task is never ready. */
	if (rp->p_nr == HARDWARE) RTS_SET(rp, PROC_STOP);
	RTS_UNSET(rp, SLOT_FREE); /* remove SLOT_FREE and schedule */
	alloc_segments(rp);
  }