void test39a() { int g, p; subtest = 1; for (g = 0; g <= _ENDPOINT_MAX_GENERATION; g++) { for (p = -MAX_NR_TASKS; p < MAX_NR_PROCS; p++) { endpoint_t ept; int mg, mp; ept = _ENDPOINT(g, p); mg = _ENDPOINT_G(ept); mp = _ENDPOINT_P(ept); if (mg != g || mp != p) e(1); if (g == 0 && ept != p) e(2); if (ept == ANY || ept == SELF || ept == NONE) e(3); } } }
/*===========================================================================* * do_fork * *===========================================================================*/ int do_fork(struct proc * caller, message * m_ptr) { /* Handle sys_fork(). * m_lsys_krn_sys_fork.endpt has forked. * The child is m_lsys_krn_sys_fork.slot. */ #if defined(__i386__) char *old_fpu_save_area_p; #endif register struct proc *rpc; /* child process pointer */ struct proc *rpp; /* parent process pointer */ int gen; int p_proc; int namelen; if(!isokendpt(m_ptr->m_lsys_krn_sys_fork.endpt, &p_proc)) return EINVAL; rpp = proc_addr(p_proc); rpc = proc_addr(m_ptr->m_lsys_krn_sys_fork.slot); if (isemptyp(rpp) || ! isemptyp(rpc)) return(EINVAL); assert(!(rpp->p_misc_flags & MF_DELIVERMSG)); /* needs to be receiving so we know where the message buffer is */ if(!RTS_ISSET(rpp, RTS_RECEIVING)) { printf("kernel: fork not done synchronously?\n"); return EINVAL; } /* make sure that the FPU context is saved in parent before copy */ save_fpu(rpp); /* Copy parent 'proc' struct to child. And reinitialize some fields. */ gen = _ENDPOINT_G(rpc->p_endpoint); #if defined(__i386__) old_fpu_save_area_p = rpc->p_seg.fpu_state; #endif *rpc = *rpp; /* copy 'proc' struct */ #if defined(__i386__) rpc->p_seg.fpu_state = old_fpu_save_area_p; if(proc_used_fpu(rpp)) memcpy(rpc->p_seg.fpu_state, rpp->p_seg.fpu_state, FPU_XFP_SIZE); #endif if(++gen >= _ENDPOINT_MAX_GENERATION) /* increase generation */ gen = 1; /* generation number wraparound */ rpc->p_nr = m_ptr->m_lsys_krn_sys_fork.slot; /* this was obliterated by copy */ rpc->p_endpoint = _ENDPOINT(gen, rpc->p_nr); /* new endpoint of slot */ rpc->p_reg.retreg = 0; /* child sees pid = 0 to know it is child */ rpc->p_user_time = 0; /* set all the accounting times to 0 */ rpc->p_sys_time = 0; rpc->p_misc_flags &= ~(MF_VIRT_TIMER | MF_PROF_TIMER | MF_SC_TRACE | MF_SPROF_SEEN | MF_STEP); rpc->p_virt_left = 0; /* disable, clear the process-virtual timers */ rpc->p_prof_left = 0; /* Mark process name as being a forked copy */ namelen = strlen(rpc->p_name); #define FORKSTR "*F" if(namelen+strlen(FORKSTR) < sizeof(rpc->p_name)) strcat(rpc->p_name, FORKSTR); /* the child process is not runnable until it's scheduled. */ RTS_SET(rpc, RTS_NO_QUANTUM); reset_proc_accounting(rpc); rpc->p_cpu_time_left = 0; rpc->p_cycles = 0; rpc->p_kcall_cycles = 0; rpc->p_kipc_cycles = 0; rpc->p_signal_received = 0; /* If the parent is a privileged process, take away the privileges from the * child process and inhibit it from running by setting the NO_PRIV flag. * The caller should explicitly set the new privileges before executing. */ if (priv(rpp)->s_flags & SYS_PROC) { rpc->p_priv = priv_addr(USER_PRIV_ID); rpc->p_rts_flags |= RTS_NO_PRIV; } /* Calculate endpoint identifier, so caller knows what it is. */ m_ptr->m_krn_lsys_sys_fork.endpt = rpc->p_endpoint; m_ptr->m_krn_lsys_sys_fork.msgaddr = rpp->p_delivermsg_vir; /* Don't schedule process in VM mode until it has a new pagetable. */ if(m_ptr->m_lsys_krn_sys_fork.flags & PFF_VMINHIBIT) { RTS_SET(rpc, RTS_VMINHIBIT); } /* * Only one in group should have RTS_SIGNALED, child doesn't inherit tracing. */ RTS_UNSET(rpc, (RTS_SIGNALED | RTS_SIG_PENDING | RTS_P_STOP)); (void) sigemptyset(&rpc->p_pending); #if defined(__i386__) rpc->p_seg.p_cr3 = 0; rpc->p_seg.p_cr3_v = NULL; #elif defined(__arm__) rpc->p_seg.p_ttbr = 0; rpc->p_seg.p_ttbr_v = NULL; #endif return OK; }
void main(void) { /* Start the ball rolling. */ struct boot_image *ip; /* boot image pointer */ register struct proc *rp; /* process pointer */ register struct priv *sp; /* privilege structure pointer */ register int i, j; int hdrindex; /* index to array of a.out headers */ phys_clicks text_base; vir_clicks text_clicks, data_clicks, st_clicks; reg_t ktsb; /* kernel task stack base */ struct exec *e_hdr = 0; /* for a copy of an a.out header */ /* Global value to test segment sanity. */ magictest = MAGICTEST; /* Clear the process table. Anounce each slot as empty and set up mappings * for proc_addr() and proc_nr() macros. Do the same for the table with * privilege structures for the system processes. */ for (rp = BEG_PROC_ADDR, i = -NR_TASKS; rp < END_PROC_ADDR; ++rp, ++i) { rp->p_rts_flags = RTS_SLOT_FREE; /* initialize free slot */ #ifdef CONFIG_DEBUG_KERNEL_SCHED_CHECK rp->p_magic = PMAGIC; #endif rp->p_nr = i; /* proc number from ptr */ rp->p_endpoint = _ENDPOINT(0, rp->p_nr); /* generation no. 0 */ } for (sp = BEG_PRIV_ADDR, i = 0; sp < END_PRIV_ADDR; ++sp, ++i) { sp->s_proc_nr = ENDPT_NONE; /* initialize as free */ sp->s_id = i; /* priv structure index */ ppriv_addr[i] = sp; /* priv ptr from number */ } /* Set up proc table entries for processes in boot image. The stacks of the * kernel tasks are initialized to an array in data space. The stacks * of the servers have been added to the data segment by the monitor, so * the stack pointer is set to the end of the data segment. All the * processes are in low memory on the 8086. On the 386 only the kernel * is in low memory, the rest is loaded in extended memory. */ /* Task stacks. */ ktsb = (reg_t) t_stack; for (i=0; i < NR_BOOT_PROCS; ++i) { int schedulable_proc, proc_nr; int ipc_to_m, kcalls; ip = &image[i]; /* process' attributes */ rp = proc_addr(ip->proc_nr); /* get process pointer */ ip->endpoint = rp->p_endpoint; /* ipc endpoint */ rp->p_max_priority = ip->priority; /* max scheduling priority */ rp->p_priority = ip->priority; /* current priority */ rp->p_quantum_size = ip->quantum; /* quantum size in ticks */ rp->p_ticks_left = ip->quantum; /* current credit */ strncpy(rp->p_name, ip->proc_name, P_NAME_LEN); /* set process name */ /* See if this process is immediately schedulable. * In that case, set its privileges now and allow it to run. * Only kernel tasks and the root system process get to run immediately. * All the other system processes are inhibited from running by the * RTS_NO_PRIV flag. They can only be scheduled once the root system * process has set their privileges. */ proc_nr = proc_nr(rp); schedulable_proc = (iskerneln(proc_nr) || isrootsysn(proc_nr)); if(schedulable_proc) { /* Assign privilege structure. Force a static privilege id. */ (void) get_priv(rp, static_priv_id(proc_nr)); /* Priviliges for kernel tasks. */ if(iskerneln(proc_nr)) { /* Privilege flags. */ priv(rp)->s_flags = (proc_nr == IDLE ? IDL_F : TSK_F); /* Allowed traps. */ priv(rp)->s_trap_mask = (proc_nr == CLOCK || proc_nr == SYSTEM ? CSK_T : TSK_T); ipc_to_m = TSK_M; /* allowed targets */ kcalls = TSK_KC; /* allowed kernel calls */ } else if(isrootsysn(proc_nr)) { /* Priviliges for the root system process. */ priv(rp)->s_flags= RSYS_F; /* privilege flags */ priv(rp)->s_trap_mask= RSYS_T; /* allowed traps */ ipc_to_m = RSYS_M; /* allowed targets */ kcalls = RSYS_KC; /* allowed kernel calls */ } /* Fill in target mask. */ for (j=0; j < NR_SYS_PROCS; j++) { if (ipc_to_m & (1 << j)) set_sendto_bit(rp, j); else unset_sendto_bit(rp, j); } /* Fill in kernel call mask. */ for(j = 0; j < CALL_MASK_SIZE; j++) { priv(rp)->s_k_call_mask[j] = (kcalls == NO_C ? 0 : (~0)); } } else { /*Don't let the process run for now. */ RTS_SET(rp, RTS_NO_PRIV); } if (iskerneln(proc_nr)) { /* part of the kernel? */ if (ip->stksize > 0) { /* HARDWARE stack size is 0 */ rp->p_priv->s_stack_guard = (reg_t *) ktsb; *rp->p_priv->s_stack_guard = STACK_GUARD; } ktsb += ip->stksize; /* point to high end of stack */ rp->p_reg.sp = ktsb; /* this task's initial stack ptr */ hdrindex = 0; /* all use the first a.out header */ } else { hdrindex = 1 + i-NR_TASKS; /* system/user processes */ } /* Architecture-specific way to find out aout header of this * boot process. */ e_hdr = arch_get_aout_header(hdrindex); /* Convert addresses to clicks and build process memory map */ text_base = e_hdr->a_syms >> CLICK_SHIFT; st_clicks= (e_hdr->a_total + CLICK_SIZE-1) >> CLICK_SHIFT; data_clicks = (e_hdr->a_text + e_hdr->a_data + e_hdr->a_bss + CLICK_SIZE-1) >> CLICK_SHIFT; text_clicks = 0; rp->p_memmap[T].mem_phys = text_base; rp->p_memmap[T].mem_len = text_clicks; rp->p_memmap[D].mem_phys = text_base + text_clicks; rp->p_memmap[D].mem_len = data_clicks; rp->p_memmap[S].mem_phys = text_base + text_clicks + st_clicks; rp->p_memmap[S].mem_vir = st_clicks; rp->p_memmap[S].mem_len = 0; /* Patch (override) the non-kernel process' entry points in image table. The * image table is located in kernel/kernel_syms.c. The kernel processes like * IDLE, SYSTEM, CLOCK, HARDWARE are not changed because they are part of kernel * and the entry points are set at compilation time. In case of IDLE or HARDWARE * the entry point can be ignored becasue they never run (set RTS_PROC_STOP). */ if (!iskerneln(proc_nr(rp))) ip->initial_pc = (task_t*)e_hdr->a_entry; /* Set initial register values. The processor status word for tasks * is different from that of other processes because tasks can * access I/O; this is not allowed to less-privileged processes */ rp->p_reg.pc = (reg_t) ip->initial_pc; rp->p_reg.psw = (iskerneln(proc_nr)) ? INIT_TASK_PSW : INIT_PSW; /* Initialize the server stack pointer. Take it down one word * to give crtso.s something to use as "argc","argv" and "envp". */ if (isusern(proc_nr)) { /* user-space process? */ rp->p_reg.sp = (rp->p_memmap[S].mem_vir + rp->p_memmap[S].mem_len) << CLICK_SHIFT; rp->p_reg.sp -= 3*sizeof(reg_t); } /* scheduling functions depend on proc_ptr pointing somewhere. */ if(!proc_ptr) proc_ptr = rp; /* If this process has its own page table, VM will set the * PT up and manage it. VM will signal the kernel when it has * done this; until then, don't let it run. */ if(ip->flags & PROC_FULLVM) RTS_SET(rp, RTS_VMINHIBIT); /* IDLE & HARDWARE task is never put on a run queue as it is * never ready to run. */ if (rp->p_nr == HARDWARE) RTS_SET(rp, RTS_PROC_STOP); if (rp->p_nr == IDLE) RTS_SET(rp, RTS_PROC_STOP); RTS_UNSET(rp, RTS_SLOT_FREE); /* remove RTS_SLOT_FREE and schedule */ alloc_segments(rp); } /* for */ /* Architecture-dependent initialization. */ arch_init(); #ifdef CONFIG_DEBUG_KERNEL_STATS_PROFILE sprofiling = 0; /* we're not profiling until instructed to */ #endif cprof_procs_no = 0; /* init nr of hash table slots used */ #ifdef CONFIG_IDLE_TSC idle_tsc = cvu64(0); #endif vm_running = 0; krandom.random_sources = RANDOM_SOURCES; krandom.random_elements = RANDOM_ELEMENTS; /* Nucleos is now ready. All boot image processes are on the ready queue. * Return to the assembly code to start running the current process. */ bill_ptr = proc_addr(IDLE); /* it has to point somewhere */ announce(); /* print Nucleos startup banner */ /* * enable timer interrupts and clock task on the boot CPU */ if (boot_cpu_init_timer(system_hz)) { kernel_panic("FATAL : failed to initialize timer interrupts, " "cannot continue without any clock source!", NO_NUM); } /* Warnings for sanity checks that take time. These warnings are printed * so it's a clear warning no full release should be done with them * enabled. */ #ifdef CONFIG_DEBUG_KERNEL_SCHED_CHECK FIXME("CONFIG_DEBUG_KERNEL_SCHED_CHECK enabled"); #endif #ifdef CONFIG_DEBUG_KERNEL_VMASSERT FIXME("CONFIG_DEBUG_KERNEL_VMASSERT enabled"); #endif #ifdef CONFIG_DEBUG_PROC_CHECK FIXME("PROC check enabled"); #endif restart(); }
/*===========================================================================* * main * *===========================================================================*/ PUBLIC void main() { /* Start the ball rolling. */ struct boot_image *ip; /* boot image pointer */ register struct proc *rp; /* process pointer */ register struct priv *sp; /* privilege structure pointer */ register int i, j, s; int hdrindex; /* index to array of a.out headers */ phys_clicks text_base; vir_clicks text_clicks, data_clicks, st_clicks; reg_t ktsb; /* kernel task stack base */ struct exec e_hdr; /* for a copy of an a.out header */ /* Architecture-dependent initialization. */ arch_init(); /* Global value to test segment sanity. */ magictest = MAGICTEST; /* Clear the process table. Anounce each slot as empty and set up mappings * for proc_addr() and proc_nr() macros. Do the same for the table with * privilege structures for the system processes. */ for (rp = BEG_PROC_ADDR, i = -NR_TASKS; rp < END_PROC_ADDR; ++rp, ++i) { rp->p_rts_flags = SLOT_FREE; /* initialize free slot */ #if DEBUG_SCHED_CHECK rp->p_magic = PMAGIC; #endif rp->p_nr = i; /* proc number from ptr */ rp->p_endpoint = _ENDPOINT(0, rp->p_nr); /* generation no. 0 */ } for (sp = BEG_PRIV_ADDR, i = 0; sp < END_PRIV_ADDR; ++sp, ++i) { sp->s_proc_nr = NONE; /* initialize as free */ sp->s_id = i; /* priv structure index */ ppriv_addr[i] = sp; /* priv ptr from number */ } /* Set up proc table entries for processes in boot image. The stacks of the * kernel tasks are initialized to an array in data space. The stacks * of the servers have been added to the data segment by the monitor, so * the stack pointer is set to the end of the data segment. All the * processes are in low memory on the 8086. On the 386 only the kernel * is in low memory, the rest is loaded in extended memory. */ /* Task stacks. */ ktsb = (reg_t) t_stack; for (i=0; i < NR_BOOT_PROCS; ++i) { int ci; bitchunk_t fv; ip = &image[i]; /* process' attributes */ rp = proc_addr(ip->proc_nr); /* get process pointer */ ip->endpoint = rp->p_endpoint; /* ipc endpoint */ rp->p_max_priority = ip->priority; /* max scheduling priority */ rp->p_priority = ip->priority; /* current priority */ rp->p_quantum_size = ip->quantum; /* quantum size in ticks */ rp->p_ticks_left = ip->quantum; /* current credit */ strncpy(rp->p_name, ip->proc_name, P_NAME_LEN); /* set process name */ (void) get_priv(rp, (ip->flags & SYS_PROC)); /* assign structure */ priv(rp)->s_flags = ip->flags; /* process flags */ priv(rp)->s_trap_mask = ip->trap_mask; /* allowed traps */ /* Warn about violations of the boot image table order consistency. */ if (priv_id(rp) != s_nr_to_id(ip->proc_nr) && (ip->flags & SYS_PROC)) kprintf("Warning: boot image table has wrong process order\n"); /* Initialize call mask bitmap from unordered set. * A single SYS_ALL_CALLS is a special case - it * means all calls are allowed. */ if(ip->nr_k_calls == 1 && ip->k_calls[0] == SYS_ALL_CALLS) fv = ~0; /* fill call mask */ else fv = 0; /* clear call mask */ for(ci = 0; ci < CALL_MASK_SIZE; ci++) /* fill or clear call mask */ priv(rp)->s_k_call_mask[ci] = fv; if(!fv) /* not all full? enter calls bit by bit */ for(ci = 0; ci < ip->nr_k_calls; ci++) SET_BIT(priv(rp)->s_k_call_mask, ip->k_calls[ci]-KERNEL_CALL); for (j = 0; j < NR_SYS_PROCS && j < BITCHUNK_BITS; j++) if (ip->ipc_to & (1 << j)) set_sendto_bit(rp, j); /* restrict targets */ if (iskerneln(proc_nr(rp))) { /* part of the kernel? */ if (ip->stksize > 0) { /* HARDWARE stack size is 0 */ rp->p_priv->s_stack_guard = (reg_t *) ktsb; *rp->p_priv->s_stack_guard = STACK_GUARD; } ktsb += ip->stksize; /* point to high end of stack */ rp->p_reg.sp = ktsb; /* this task's initial stack ptr */ hdrindex = 0; /* all use the first a.out header */ } else { hdrindex = 1 + i-NR_TASKS; /* servers, drivers, INIT */ } /* Architecture-specific way to find out aout header of this * boot process. */ arch_get_aout_headers(hdrindex, &e_hdr); /* Convert addresses to clicks and build process memory map */ text_base = e_hdr.a_syms >> CLICK_SHIFT; text_clicks = (e_hdr.a_text + CLICK_SIZE-1) >> CLICK_SHIFT; data_clicks = (e_hdr.a_data+e_hdr.a_bss + CLICK_SIZE-1) >> CLICK_SHIFT; st_clicks= (e_hdr.a_total + CLICK_SIZE-1) >> CLICK_SHIFT; if (!(e_hdr.a_flags & A_SEP)) { data_clicks= (e_hdr.a_text+e_hdr.a_data+e_hdr.a_bss + CLICK_SIZE-1) >> CLICK_SHIFT; text_clicks = 0; /* common I&D */ } rp->p_memmap[T].mem_phys = text_base; rp->p_memmap[T].mem_len = text_clicks; rp->p_memmap[D].mem_phys = text_base + text_clicks; rp->p_memmap[D].mem_len = data_clicks; rp->p_memmap[S].mem_phys = text_base + text_clicks + st_clicks; rp->p_memmap[S].mem_vir = st_clicks; rp->p_memmap[S].mem_len = 0; /* Set initial register values. The processor status word for tasks * is different from that of other processes because tasks can * access I/O; this is not allowed to less-privileged processes */ rp->p_reg.pc = (reg_t) ip->initial_pc; rp->p_reg.psw = (iskernelp(rp)) ? INIT_TASK_PSW : INIT_PSW; /* Initialize the server stack pointer. Take it down one word * to give crtso.s something to use as "argc". */ if (isusern(proc_nr(rp))) { /* user-space process? */ rp->p_reg.sp = (rp->p_memmap[S].mem_vir + rp->p_memmap[S].mem_len) << CLICK_SHIFT; rp->p_reg.sp -= sizeof(reg_t); } /* scheduling functions depend on proc_ptr pointing somewhere. */ if(!proc_ptr) proc_ptr = rp; /* If this process has its own page table, VM will set the * PT up and manage it. VM will signal the kernel when it has * done this; until then, don't let it run. */ if(priv(rp)->s_flags & PROC_FULLVM) RTS_SET(rp, VMINHIBIT); /* Set ready. The HARDWARE task is never ready. */ if (rp->p_nr == HARDWARE) RTS_SET(rp, PROC_STOP); RTS_UNSET(rp, SLOT_FREE); /* remove SLOT_FREE and schedule */ alloc_segments(rp); }