/*===========================================================================* * do_nice * *===========================================================================*/ PUBLIC int do_nice(message *m_ptr) { /* Change process priority or stop the process. */ int proc_nr, pri, new_q ; register struct proc *rp; /* Extract the message parameters and do sanity checking. */ if(!isokendpt(m_ptr->PR_ENDPT, &proc_nr)) return EINVAL; if (iskerneln(proc_nr)) return(EPERM); pri = m_ptr->PR_PRIORITY; rp = proc_addr(proc_nr); /* The value passed in is currently between PRIO_MIN and PRIO_MAX. * We have to scale this between MIN_USER_Q and MAX_USER_Q to match * the kernel's scheduling queues. */ if (pri < PRIO_MIN || pri > PRIO_MAX) return(EINVAL); new_q = MAX_USER_Q + (pri-PRIO_MIN) * (MIN_USER_Q-MAX_USER_Q+1) / (PRIO_MAX-PRIO_MIN+1); if (new_q < MAX_USER_Q) new_q = MAX_USER_Q; /* shouldn't happen */ if (new_q > MIN_USER_Q) new_q = MIN_USER_Q; /* shouldn't happen */ /* Make sure the process is not running while changing its priority. * Put the process back in its new queue if it is runnable. */ RTS_LOCK_SET(rp, SYS_LOCK); rp->p_max_priority = rp->p_priority = new_q; RTS_LOCK_UNSET(rp, SYS_LOCK); return(OK); }
/*===========================================================================* * do_runctl * *===========================================================================*/ int do_runctl(struct proc * caller, message * m_ptr) { /* Control a process's RTS_PROC_STOP flag. Used for process management. * If the process is queued sending a message or stopped for system call * tracing, and the RC_DELAY request flag is given, set MF_SIG_DELAY instead * of RTS_PROC_STOP, and send a SIGSNDELAY signal later when the process is done * sending (ending the delay). Used by PM for safe signal delivery. */ int proc_nr, action, flags; register struct proc *rp; /* Extract the message parameters and do sanity checking. */ if (!isokendpt(m_ptr->RC_ENDPT, &proc_nr)) return(EINVAL); if (iskerneln(proc_nr)) return(EPERM); rp = proc_addr(proc_nr); action = m_ptr->RC_ACTION; flags = m_ptr->RC_FLAGS; /* Is the target sending or syscall-traced? Then set MF_SIG_DELAY instead. * Do this only when the RC_DELAY flag is set in the request flags field. * The process will not become runnable before PM has called SYS_ENDKSIG. * Note that asynchronous messages are not covered: a process using SENDA * should not also install signal handlers *and* expect POSIX compliance. */ if (action == RC_STOP && (flags & RC_DELAY)) { if (RTS_ISSET(rp, RTS_SENDING) || (rp->p_misc_flags & MF_SC_DEFER)) rp->p_misc_flags |= MF_SIG_DELAY; if (rp->p_misc_flags & MF_SIG_DELAY) return (EBUSY); } /* Either set or clear the stop flag. */ switch (action) { case RC_STOP: #if CONFIG_SMP /* check if we must stop a process on a different CPU */ if (rp->p_cpu != cpuid) { smp_schedule_stop_proc(rp); break; } #endif RTS_SET(rp, RTS_PROC_STOP); break; case RC_RESUME: assert(RTS_ISSET(rp, RTS_PROC_STOP)); RTS_UNSET(rp, RTS_PROC_STOP); break; default: return(EINVAL); } return(OK); }
/*===========================================================================* * do_sigreturn * *===========================================================================*/ PUBLIC int do_sigreturn(struct proc * caller, message * m_ptr) { /* POSIX style signals require sys_sigreturn to put things in order before * the signalled process can resume execution */ struct sigcontext sc; register struct proc *rp; int proc_nr, r; if (! isokendpt(m_ptr->SIG_ENDPT, &proc_nr)) return(EINVAL); if (iskerneln(proc_nr)) return(EPERM); rp = proc_addr(proc_nr); /* Copy in the sigcontext structure. */ if((r=data_copy(m_ptr->SIG_ENDPT, (vir_bytes) m_ptr->SIG_CTXT_PTR, KERNEL, (vir_bytes) &sc, sizeof(struct sigcontext))) != OK) return r; /* Restore user bits of psw from sc, maintain system bits from proc. */ sc.sc_psw = (sc.sc_psw & X86_FLAGS_USER) | (rp->p_reg.psw & ~X86_FLAGS_USER); #if (_MINIX_CHIP == _CHIP_INTEL) /* Don't panic kernel if user gave bad selectors. */ sc.sc_cs = rp->p_reg.cs; sc.sc_ds = rp->p_reg.ds; sc.sc_es = rp->p_reg.es; sc.sc_ss = rp->p_reg.ss; #if _WORD_SIZE == 4 sc.sc_fs = rp->p_reg.fs; sc.sc_gs = rp->p_reg.gs; #endif #endif /* Restore the registers. */ memcpy(&rp->p_reg, &sc.sc_regs, sizeof(sigregs)); #if (_MINIX_CHIP == _CHIP_INTEL) if(sc.sc_flags & MF_FPU_INITIALIZED) { memcpy(rp->p_fpu_state.fpu_save_area_p, &sc.sc_fpu_state, FPU_XFP_SIZE); rp->p_misc_flags |= MF_FPU_INITIALIZED; /* Restore math usage flag. */ /* force reloading FPU */ if (fpu_owner == rp) release_fpu(); } #endif rp->p_misc_flags |= MF_CONTEXT_SET; return(OK); }
/*===========================================================================* * do_newmap * *===========================================================================*/ int do_newmap(struct proc * caller, message * m_ptr) { /* Handle sys_newmap(). Fetch the memory map. */ struct proc *rp; /* process whose map is to be loaded */ struct mem_map *map_ptr; /* virtual address of map inside caller */ int proc_nr; map_ptr = (struct mem_map *) m_ptr->PR_MEM_PTR; if (! isokendpt(m_ptr->PR_ENDPT, &proc_nr)) return(EINVAL); if (iskerneln(proc_nr)) return(EPERM); rp = proc_addr(proc_nr); return newmap(caller, rp, map_ptr); }
/*===========================================================================* * do_getmcontext * *===========================================================================*/ int do_getmcontext(struct proc * caller, message * m_ptr) { /* Retrieve machine context of a process */ register struct proc *rp; int proc_nr, r; mcontext_t mc; if (!isokendpt(m_ptr->m_lsys_krn_sys_getmcontext.endpt, &proc_nr)) return(EINVAL); if (iskerneln(proc_nr)) return(EPERM); rp = proc_addr(proc_nr); #if defined(__i386__) if (!proc_used_fpu(rp)) return(OK); /* No state to copy */ #endif /* Get the mcontext structure into our address space. */ if ((r = data_copy(m_ptr->m_lsys_krn_sys_getmcontext.endpt, m_ptr->m_lsys_krn_sys_getmcontext.ctx_ptr, KERNEL, (vir_bytes) &mc, (phys_bytes) sizeof(mcontext_t))) != OK) return(r); mc.mc_flags = 0; #if defined(__i386__) /* Copy FPU state */ if (proc_used_fpu(rp)) { /* make sure that the FPU context is saved into proc structure first */ save_fpu(rp); mc.mc_flags = (rp->p_misc_flags & MF_FPU_INITIALIZED) ? _MC_FPU_SAVED : 0; assert(sizeof(mc.__fpregs.__fp_reg_set) == FPU_XFP_SIZE); memcpy(&(mc.__fpregs.__fp_reg_set), rp->p_seg.fpu_state, FPU_XFP_SIZE); } #endif /* Copy the mcontext structure to the user's address space. */ if ((r = data_copy(KERNEL, (vir_bytes) &mc, m_ptr->m_lsys_krn_sys_getmcontext.endpt, m_ptr->m_lsys_krn_sys_getmcontext.ctx_ptr, (phys_bytes) sizeof(mcontext_t))) != OK) return(r); return(OK); }
/*===========================================================================* * do_nice * *===========================================================================*/ PUBLIC int do_nice(message *m_ptr) { /* Change process priority or stop the process. */ int proc_nr, pri, new_q ; register struct proc *rp; /* Extract the message parameters and do sanity checking. */ if(!isokendpt(m_ptr->PR_ENDPT, &proc_nr)) return EINVAL; if (iskerneln(proc_nr)) return(EPERM); pri = m_ptr->PR_PRIORITY; rp = proc_addr(proc_nr); if (is_rtp(rp)) return (EPERM); /* don't allow nice for RT processes */ if (pri == PRIO_STOP) { /* Take process off the scheduling queues. */ lock_dequeue(rp); rp->p_rts_flags |= NO_PRIORITY; return(OK); } else if (pri >= PRIO_MIN && pri <= PRIO_MAX) { /* The value passed in is currently between PRIO_MIN and PRIO_MAX. * We have to scale this between MIN_USER_Q and MAX_USER_Q to match * the kernel's scheduling queues. */ new_q = MAX_USER_Q + (pri-PRIO_MIN) * (MIN_USER_Q-MAX_USER_Q+1) / (PRIO_MAX-PRIO_MIN+1); if (new_q < MAX_USER_Q) new_q = MAX_USER_Q; /* shouldn't happen */ if (new_q > MIN_USER_Q) new_q = MIN_USER_Q; /* shouldn't happen */ if (new_q == RT_Q && !is_rtp(rp)) return (EINVAL); /* don't allow other processes in the RT queue */ /* Make sure the process is not running while changing its priority. * Put the process back in its new queue if it is runnable. */ lock_dequeue(rp); rp->p_max_priority = rp->p_priority = new_q; if (! rp->p_rts_flags) lock_enqueue(rp); return(OK); } return(EINVAL); }
/*===========================================================================* * do_quantum * *===========================================================================*/ PUBLIC int do_quantum(message *m_ptr) { int proc_nr, quantum; register struct proc *rp; /* Individuo il numero del processo da modificare */ proc_nr = m_ptr->PR_PROC_NR ; /* Il numero del processo non e` valido */ if (! isokprocn(proc_nr)) return(EINVAL); /* Consento di modificare la dimensione del quanto solo nei processi utente */ if (iskerneln(proc_nr)) return(EPERM); /* Nuova dimensione del quanto */ quantum = m_ptr->PR_QUANTUM; /* Controllo che la sua dimensione rispetti i limiti consentiti */ if (quantum < MIN_QUANTUM_SIZE) { kprintf("WARNING: quantum size exceeds MIN_QUANTUM_SIZE, it will be raised to %d ticks\n",MIN_QUANTUM_SIZE); quantum = MIN_QUANTUM_SIZE; } else if (quantum > MAX_QUANTUM_SIZE) { kprintf("WARNING: quantum size exceeds MAX_QUANTUM_SIZE, it will be lowered to %d ticks\n",MAX_QUANTUM_SIZE); quantum = MAX_QUANTUM_SIZE; } /* Seleziono il processo da aggiornare */ rp = proc_addr(proc_nr); /* Lo rimuovo dalla coda */ lock_dequeue(rp); /* Aggiorno la dimensione del quanto */ rp->p_quantum_size = quantum; /* Reinserisco il processo in coda */ if (! rp->p_rts_flags) lock_enqueue(rp); return(OK); }
void arch_proc_reset(struct proc *pr) { char *v = NULL; struct stackframe_s reg; assert(pr->p_nr < NR_PROCS); if(pr->p_nr >= 0) { v = fpu_state[pr->p_nr]; /* verify alignment */ assert(!((vir_bytes)v % FPUALIGN)); /* initialize state */ memset(v, 0, FPU_XFP_SIZE); } /* Clear process state. */ memset(®, 0, sizeof(pr->p_reg)); if(iskerneln(pr->p_nr)) reg.psw = INIT_TASK_PSW; else reg.psw = INIT_PSW; pr->p_seg.fpu_state = v; /* Initialize the fundamentals that are (initially) the same for all * processes - the segment selectors it gets to use. */ pr->p_reg.cs = USER_CS_SELECTOR; pr->p_reg.gs = pr->p_reg.fs = pr->p_reg.ss = pr->p_reg.es = pr->p_reg.ds = USER_DS_SELECTOR; /* set full context and make sure it gets restored */ arch_proc_setcontext(pr, ®, 0, KTS_FULLCONTEXT); }
/*==========================================================================* * do_trace * *==========================================================================*/ PUBLIC int do_trace(struct proc * caller, message * m_ptr) { /* Handle the debugging commands supported by the ptrace system call * The commands are: * T_STOP stop the process * T_OK enable tracing by parent for this process * T_GETINS return value from instruction space * T_GETDATA return value from data space * T_GETUSER return value from user process table * T_SETINS set value in instruction space * T_SETDATA set value in data space * T_SETUSER set value in user process table * T_RESUME resume execution * T_EXIT exit * T_STEP set trace bit * T_SYSCALL trace system call * T_ATTACH attach to an existing process * T_DETACH detach from a traced process * T_SETOPT set trace options * T_GETRANGE get range of values * T_SETRANGE set range of values * * The T_OK, T_ATTACH, T_EXIT, and T_SETOPT commands are handled completely by * the process manager. T_GETRANGE and T_SETRANGE use sys_vircopy(). All others * come here. */ register struct proc *rp; vir_bytes tr_addr = (vir_bytes) m_ptr->CTL_ADDRESS; long tr_data = m_ptr->CTL_DATA; int tr_request = m_ptr->CTL_REQUEST; int tr_proc_nr_e = m_ptr->CTL_ENDPT, tr_proc_nr; unsigned char ub; int i; #define COPYTOPROC(seg, addr, myaddr, length) { \ struct vir_addr fromaddr, toaddr; \ int r; \ fromaddr.proc_nr_e = KERNEL; \ toaddr.proc_nr_e = tr_proc_nr_e; \ fromaddr.offset = (myaddr); \ toaddr.offset = (addr); \ fromaddr.segment = D; \ toaddr.segment = (seg); \ if((r=virtual_copy_vmcheck(caller, &fromaddr, \ &toaddr, length)) != OK) { \ printf("Can't copy in sys_trace: %d\n", r);\ return r;\ } \ } #define COPYFROMPROC(seg, addr, myaddr, length) { \ struct vir_addr fromaddr, toaddr; \ int r; \ fromaddr.proc_nr_e = tr_proc_nr_e; \ toaddr.proc_nr_e = KERNEL; \ fromaddr.offset = (addr); \ toaddr.offset = (myaddr); \ fromaddr.segment = (seg); \ toaddr.segment = D; \ if((r=virtual_copy_vmcheck(caller, &fromaddr, \ &toaddr, length)) != OK) { \ printf("Can't copy in sys_trace: %d\n", r);\ return r;\ } \ } if(!isokendpt(tr_proc_nr_e, &tr_proc_nr)) return(EINVAL); if (iskerneln(tr_proc_nr)) return(EPERM); rp = proc_addr(tr_proc_nr); if (isemptyp(rp)) return(EINVAL); switch (tr_request) { case T_STOP: /* stop process */ RTS_SET(rp, RTS_P_STOP); rp->p_reg.psw &= ~TRACEBIT; /* clear trace bit */ rp->p_misc_flags &= ~MF_SC_TRACE; /* clear syscall trace flag */ return(OK); case T_GETINS: /* return value from instruction space */ COPYFROMPROC(T, tr_addr, (vir_bytes) &tr_data, sizeof(long)); m_ptr->CTL_DATA = tr_data; break; case T_GETDATA: /* return value from data space */ COPYFROMPROC(D, tr_addr, (vir_bytes) &tr_data, sizeof(long)); m_ptr->CTL_DATA= tr_data; break; case T_GETUSER: /* return value from process table */ if ((tr_addr & (sizeof(long) - 1)) != 0) return(EFAULT); if (tr_addr <= sizeof(struct proc) - sizeof(long)) { m_ptr->CTL_DATA = *(long *) ((char *) rp + (int) tr_addr); break; } /* The process's proc struct is followed by its priv struct. * The alignment here should be unnecessary, but better safe.. */ i = sizeof(long) - 1; tr_addr -= (sizeof(struct proc) + i) & ~i; if (tr_addr > sizeof(struct priv) - sizeof(long)) return(EFAULT); m_ptr->CTL_DATA = *(long *) ((char *) rp->p_priv + (int) tr_addr); break; case T_SETINS: /* set value in instruction space */ COPYTOPROC(T, tr_addr, (vir_bytes) &tr_data, sizeof(long)); m_ptr->CTL_DATA = 0; break; case T_SETDATA: /* set value in data space */ COPYTOPROC(D, tr_addr, (vir_bytes) &tr_data, sizeof(long)); m_ptr->CTL_DATA = 0; break; case T_SETUSER: /* set value in process table */ if ((tr_addr & (sizeof(reg_t) - 1)) != 0 || tr_addr > sizeof(struct stackframe_s) - sizeof(reg_t)) return(EFAULT); i = (int) tr_addr; #if (_MINIX_CHIP == _CHIP_INTEL) /* Altering segment registers might crash the kernel when it * tries to load them prior to restarting a process, so do * not allow it. */ if (i == (int) &((struct proc *) 0)->p_reg.cs || i == (int) &((struct proc *) 0)->p_reg.ds || i == (int) &((struct proc *) 0)->p_reg.es || #if _WORD_SIZE == 4 i == (int) &((struct proc *) 0)->p_reg.gs || i == (int) &((struct proc *) 0)->p_reg.fs || #endif i == (int) &((struct proc *) 0)->p_reg.ss) return(EFAULT); #endif if (i == (int) &((struct proc *) 0)->p_reg.psw) /* only selected bits are changeable */ SETPSW(rp, tr_data); else *(reg_t *) ((char *) &rp->p_reg + i) = (reg_t) tr_data; m_ptr->CTL_DATA = 0; break; case T_DETACH: /* detach tracer */ rp->p_misc_flags &= ~MF_SC_ACTIVE; /* fall through */ case T_RESUME: /* resume execution */ RTS_UNSET(rp, RTS_P_STOP); m_ptr->CTL_DATA = 0; break; case T_STEP: /* set trace bit */ rp->p_reg.psw |= TRACEBIT; RTS_UNSET(rp, RTS_P_STOP); m_ptr->CTL_DATA = 0; break; case T_SYSCALL: /* trace system call */ rp->p_misc_flags |= MF_SC_TRACE; RTS_UNSET(rp, RTS_P_STOP); m_ptr->CTL_DATA = 0; break; case T_READB_INS: /* get value from instruction space */ COPYFROMPROC(T, tr_addr, (vir_bytes) &ub, 1); m_ptr->CTL_DATA = ub; break; case T_WRITEB_INS: /* set value in instruction space */ ub = (unsigned char) (tr_data & 0xff); COPYTOPROC(T, tr_addr, (vir_bytes) &ub, 1); m_ptr->CTL_DATA = 0; break; default: return(EINVAL); } return(OK); }
/*===========================================================================* * sef_cb_init_fresh * *===========================================================================*/ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info) { /* Initialize the reincarnation server. */ struct boot_image *ip; int s,i; int nr_image_srvs, nr_image_priv_srvs, nr_uncaught_init_srvs; struct rproc *rp; struct rproc *replica_rp; struct rprocpub *rpub; struct boot_image image[NR_BOOT_PROCS]; struct boot_image_priv *boot_image_priv; struct boot_image_sys *boot_image_sys; struct boot_image_dev *boot_image_dev; int pid, replica_pid; endpoint_t replica_endpoint; int ipc_to; int *calls; int all_c[] = { ALL_C, NULL_C }; int no_c[] = { NULL_C }; /* See if we run in verbose mode. */ env_parse("rs_verbose", "d", 0, &rs_verbose, 0, 1); if ((s = sys_getinfo(GET_HZ, &system_hz, sizeof(system_hz), 0, 0)) != OK) panic("Cannot get system timer frequency\n"); /* Initialize the global init descriptor. */ rinit.rproctab_gid = cpf_grant_direct(ANY, (vir_bytes) rprocpub, sizeof(rprocpub), CPF_READ); if(!GRANT_VALID(rinit.rproctab_gid)) { panic("unable to create rprocpub table grant: %d", rinit.rproctab_gid); } /* Initialize some global variables. */ rupdate.flags = 0; shutting_down = FALSE; /* Get a copy of the boot image table. */ if ((s = sys_getimage(image)) != OK) { panic("unable to get copy of boot image table: %d", s); } /* Determine the number of system services in the boot image table. */ nr_image_srvs = 0; for(i=0;i<NR_BOOT_PROCS;i++) { ip = &image[i]; /* System services only. */ if(iskerneln(_ENDPOINT_P(ip->endpoint))) { continue; } nr_image_srvs++; } /* Determine the number of entries in the boot image priv table and make sure * it matches the number of system services in the boot image table. */ nr_image_priv_srvs = 0; for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) { boot_image_priv = &boot_image_priv_table[i]; /* System services only. */ if(iskerneln(_ENDPOINT_P(boot_image_priv->endpoint))) { continue; } nr_image_priv_srvs++; } if(nr_image_srvs != nr_image_priv_srvs) { panic("boot image table and boot image priv table mismatch"); } /* Reset the system process table. */ for (rp=BEG_RPROC_ADDR; rp<END_RPROC_ADDR; rp++) { rp->r_flags = 0; rp->r_pub = &rprocpub[rp - rproc]; rp->r_pub->in_use = FALSE; } /* Initialize the system process table in 4 steps, each of them following * the appearance of system services in the boot image priv table. * - Step 1: set priviliges, sys properties, and dev properties (if any) * for every system service. */ for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) { boot_image_priv = &boot_image_priv_table[i]; /* System services only. */ if(iskerneln(_ENDPOINT_P(boot_image_priv->endpoint))) { continue; } /* Lookup the corresponding entries in other tables. */ boot_image_info_lookup(boot_image_priv->endpoint, image, &ip, NULL, &boot_image_sys, &boot_image_dev); rp = &rproc[boot_image_priv - boot_image_priv_table]; rpub = rp->r_pub; /* * Set privileges. */ /* Get label. */ strcpy(rpub->label, boot_image_priv->label); /* Force a static priv id for system services in the boot image. */ rp->r_priv.s_id = static_priv_id( _ENDPOINT_P(boot_image_priv->endpoint)); /* Initialize privilege bitmaps and signal manager. */ rp->r_priv.s_flags = boot_image_priv->flags; /* priv flags */ rp->r_priv.s_trap_mask= SRV_OR_USR(rp, SRV_T, USR_T); /* traps */ ipc_to = SRV_OR_USR(rp, SRV_M, USR_M); /* targets */ fill_send_mask(&rp->r_priv.s_ipc_to, ipc_to == ALL_M); rp->r_priv.s_sig_mgr= SRV_OR_USR(rp, SRV_SM, USR_SM); /* sig mgr */ rp->r_priv.s_bak_sig_mgr = NONE; /* backup sig mgr */ /* Initialize kernel call mask bitmap. */ calls = SRV_OR_USR(rp, SRV_KC, USR_KC) == ALL_C ? all_c : no_c; fill_call_mask(calls, NR_SYS_CALLS, rp->r_priv.s_k_call_mask, KERNEL_CALL, TRUE); /* Set the privilege structure. */ if(boot_image_priv->endpoint != RS_PROC_NR) { if ((s = sys_privctl(ip->endpoint, SYS_PRIV_SET_SYS, &(rp->r_priv))) != OK) { panic("unable to set privilege structure: %d", s); } } /* Synch the privilege structure with the kernel. */ if ((s = sys_getpriv(&(rp->r_priv), ip->endpoint)) != OK) { panic("unable to synch privilege structure: %d", s); } /* * Set sys properties. */ rpub->sys_flags = boot_image_sys->flags; /* sys flags */ /* * Set dev properties. */ rpub->dev_flags = boot_image_dev->flags; /* device flags */ rpub->dev_nr = boot_image_dev->dev_nr; /* major device number */ rpub->dev_style = boot_image_dev->dev_style; /* device style */ rpub->dev_style2 = boot_image_dev->dev_style2; /* device style 2 */ /* Get process name. */ strcpy(rpub->proc_name, ip->proc_name); /* Build command settings. */ rp->r_cmd[0]= '\0'; rp->r_script[0]= '\0'; build_cmd_dep(rp); /* Initialize vm call mask bitmap. */ calls = SRV_OR_USR(rp, SRV_VC, USR_VC) == ALL_C ? all_c : no_c; fill_call_mask(calls, NR_VM_CALLS, rpub->vm_call_mask, VM_RQ_BASE, TRUE); /* Scheduling parameters. */ rp->r_scheduler = SRV_OR_USR(rp, SRV_SCH, USR_SCH); rp->r_priority = SRV_OR_USR(rp, SRV_Q, USR_Q); rp->r_quantum = SRV_OR_USR(rp, SRV_QT, USR_QT); /* Get some settings from the boot image table. */ rpub->endpoint = ip->endpoint; /* Set some defaults. */ rp->r_old_rp = NULL; /* no old version yet */ rp->r_new_rp = NULL; /* no new version yet */ rp->r_prev_rp = NULL; /* no prev replica yet */ rp->r_next_rp = NULL; /* no next replica yet */ rp->r_uid = 0; /* root */ rp->r_check_tm = 0; /* not checked yet */ getuptime(&rp->r_alive_tm); /* currently alive */ rp->r_stop_tm = 0; /* not exiting yet */ rp->r_restarts = 0; /* no restarts so far */ rp->r_period = 0; /* no period yet */ rp->r_exec = NULL; /* no in-memory copy yet */ rp->r_exec_len = 0; /* Mark as in use and active. */ rp->r_flags = RS_IN_USE | RS_ACTIVE; rproc_ptr[_ENDPOINT_P(rpub->endpoint)]= rp; rpub->in_use = TRUE; } /* - Step 2: allow every system service in the boot image to run. */ nr_uncaught_init_srvs = 0; for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) { boot_image_priv = &boot_image_priv_table[i]; /* System services only. */ if(iskerneln(_ENDPOINT_P(boot_image_priv->endpoint))) { continue; } /* Lookup the corresponding slot in the system process table. */ rp = &rproc[boot_image_priv - boot_image_priv_table]; rpub = rp->r_pub; /* RS is already running as we speak. */ if(boot_image_priv->endpoint == RS_PROC_NR) { if ((s = init_service(rp, SEF_INIT_FRESH)) != OK) { panic("unable to initialize RS: %d", s); } continue; } /* Allow the service to run. */ if ((s = sched_init_proc(rp)) != OK) { panic("unable to initialize scheduling: %d", s); } if ((s = sys_privctl(rpub->endpoint, SYS_PRIV_ALLOW, NULL)) != OK) { panic("unable to initialize privileges: %d", s); } /* Initialize service. We assume every service will always get * back to us here at boot time. */ if(boot_image_priv->flags & SYS_PROC) { if ((s = init_service(rp, SEF_INIT_FRESH)) != OK) { panic("unable to initialize service: %d", s); } if(rpub->sys_flags & SF_SYNCH_BOOT) { /* Catch init ready message now to synchronize. */ catch_boot_init_ready(rpub->endpoint); } else { /* Catch init ready message later. */ nr_uncaught_init_srvs++; } } } /* - Step 3: let every system service complete initialization by * catching all the init ready messages left. */ while(nr_uncaught_init_srvs) { catch_boot_init_ready(ANY); nr_uncaught_init_srvs--; } /* - Step 4: all the system services in the boot image are now running. * Complete the initialization of the system process table in collaboration * with other system services. */ for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) { boot_image_priv = &boot_image_priv_table[i]; /* System services only. */ if(iskerneln(_ENDPOINT_P(boot_image_priv->endpoint))) { continue; } /* Lookup the corresponding slot in the system process table. */ rp = &rproc[boot_image_priv - boot_image_priv_table]; rpub = rp->r_pub; /* Get pid from PM. */ rp->r_pid = getnpid(rpub->endpoint); if(rp->r_pid == -1) { panic("unable to get pid"); } } /* Set alarm to periodically check service status. */ if (OK != (s=sys_setalarm(RS_DELTA_T, 0))) panic("couldn't set alarm: %d", s); /* Now create a new RS instance with a private page table and let the current * instance live update into the replica. Clone RS' own slot first. */ rp = rproc_ptr[_ENDPOINT_P(RS_PROC_NR)]; if((s = clone_slot(rp, &replica_rp)) != OK) { panic("unable to clone current RS instance: %d", s); } /* Fork a new RS instance. */ pid = srv_fork(); if(pid == -1) { panic("unable to fork a new RS instance"); } replica_pid = pid ? pid : getpid(); replica_endpoint = getnprocnr(replica_pid); replica_rp->r_pid = replica_pid; replica_rp->r_pub->endpoint = replica_endpoint; if(pid == 0) { /* New RS instance running. */ /* Live update the old instance into the new one. */ s = update_service(&rp, &replica_rp, RS_SWAP); if(s != OK) { panic("unable to live update RS: %d", s); } cpf_reload(); /* Clean up the old RS instance, the new instance will take over. */ cleanup_service(rp); /* Map out our own text and data. */ unmap_ok = 1; _minix_unmapzero(); /* Ask VM to pin memory for the new RS instance. */ if((s = vm_memctl(RS_PROC_NR, VM_RS_MEM_PIN)) != OK) { panic("unable to pin memory for the new RS instance: %d", s); } } else { /* Old RS instance running. */ /* Set up privileges for the new instance and let it run. */ s = sys_privctl(replica_endpoint, SYS_PRIV_SET_SYS, &(replica_rp->r_priv)); if(s != OK) { panic("unable to set privileges for the new RS instance: %d", s); } if ((s = sched_init_proc(replica_rp)) != OK) { panic("unable to initialize RS replica scheduling: %d", s); } s = sys_privctl(replica_endpoint, SYS_PRIV_YIELD, NULL); if(s != OK) { panic("unable to yield control to the new RS instance: %d", s); } NOT_REACHABLE; } return(OK); }
/*===========================================================================* * kmain * *===========================================================================*/ void kmain(kinfo_t *local_cbi) { /* Start the ball rolling. */ struct boot_image *ip; /* boot image pointer */ register struct proc *rp; /* process pointer */ register int i, j; /* save a global copy of the boot parameters */ memcpy(&kinfo, local_cbi, sizeof(kinfo)); memcpy(&kmess, kinfo.kmess, sizeof(kmess)); #ifdef __arm__ /* We want to initialize serial before we do any output */ omap3_ser_init(); #endif /* We can talk now */ printf("MINIX booting\n"); /* Kernel may use bits of main memory before VM is started */ kernel_may_alloc = 1; assert(sizeof(kinfo.boot_procs) == sizeof(image)); memcpy(kinfo.boot_procs, image, sizeof(kinfo.boot_procs)); cstart(); BKL_LOCK(); DEBUGEXTRA(("main()\n")); proc_init(); if(NR_BOOT_MODULES != kinfo.mbi.mods_count) panic("expecting %d boot processes/modules, found %d", NR_BOOT_MODULES, kinfo.mbi.mods_count); /* Set up proc table entries for processes in boot image. */ for (i=0; i < NR_BOOT_PROCS; ++i) { int schedulable_proc; proc_nr_t proc_nr; int ipc_to_m, kcalls; sys_map_t map; ip = &image[i]; /* process' attributes */ DEBUGEXTRA(("initializing %s... ", ip->proc_name)); rp = proc_addr(ip->proc_nr); /* get process pointer */ ip->endpoint = rp->p_endpoint; /* ipc endpoint */ make_zero64(rp->p_cpu_time_left); if(i < NR_TASKS) /* name (tasks only) */ strlcpy(rp->p_name, ip->proc_name, sizeof(rp->p_name)); if(i >= NR_TASKS) { /* Remember this so it can be passed to VM */ multiboot_module_t *mb_mod = &kinfo.module_list[i - NR_TASKS]; ip->start_addr = mb_mod->mod_start; ip->len = mb_mod->mod_end - mb_mod->mod_start; } reset_proc_accounting(rp); /* See if this process is immediately schedulable. * In that case, set its privileges now and allow it to run. * Only kernel tasks and the root system process get to run immediately. * All the other system processes are inhibited from running by the * RTS_NO_PRIV flag. They can only be scheduled once the root system * process has set their privileges. */ proc_nr = proc_nr(rp); schedulable_proc = (iskerneln(proc_nr) || isrootsysn(proc_nr) || proc_nr == VM_PROC_NR); if(schedulable_proc) { /* Assign privilege structure. Force a static privilege id. */ (void) get_priv(rp, static_priv_id(proc_nr)); /* Priviliges for kernel tasks. */ if(proc_nr == VM_PROC_NR) { priv(rp)->s_flags = VM_F; priv(rp)->s_trap_mask = SRV_T; ipc_to_m = SRV_M; kcalls = SRV_KC; priv(rp)->s_sig_mgr = SELF; rp->p_priority = SRV_Q; rp->p_quantum_size_ms = SRV_QT; } else if(iskerneln(proc_nr)) { /* Privilege flags. */ priv(rp)->s_flags = (proc_nr == IDLE ? IDL_F : TSK_F); /* Allowed traps. */ priv(rp)->s_trap_mask = (proc_nr == CLOCK || proc_nr == SYSTEM ? CSK_T : TSK_T); ipc_to_m = TSK_M; /* allowed targets */ kcalls = TSK_KC; /* allowed kernel calls */ } /* Priviliges for the root system process. */ else { assert(isrootsysn(proc_nr)); priv(rp)->s_flags= RSYS_F; /* privilege flags */ priv(rp)->s_trap_mask= SRV_T; /* allowed traps */ ipc_to_m = SRV_M; /* allowed targets */ kcalls = SRV_KC; /* allowed kernel calls */ priv(rp)->s_sig_mgr = SRV_SM; /* signal manager */ rp->p_priority = SRV_Q; /* priority queue */ rp->p_quantum_size_ms = SRV_QT; /* quantum size */ } /* Fill in target mask. */ memset(&map, 0, sizeof(map)); if (ipc_to_m == ALL_M) { for(j = 0; j < NR_SYS_PROCS; j++) set_sys_bit(map, j); } fill_sendto_mask(rp, &map); /* Fill in kernel call mask. */ for(j = 0; j < SYS_CALL_MASK_SIZE; j++) { priv(rp)->s_k_call_mask[j] = (kcalls == NO_C ? 0 : (~0)); } } else { /* Don't let the process run for now. */ RTS_SET(rp, RTS_NO_PRIV | RTS_NO_QUANTUM); } /* Arch-specific state initialization. */ arch_boot_proc(ip, rp); /* scheduling functions depend on proc_ptr pointing somewhere. */ if(!get_cpulocal_var(proc_ptr)) get_cpulocal_var(proc_ptr) = rp; /* Process isn't scheduled until VM has set up a pagetable for it. */ if(rp->p_nr != VM_PROC_NR && rp->p_nr >= 0) { rp->p_rts_flags |= RTS_VMINHIBIT; rp->p_rts_flags |= RTS_BOOTINHIBIT; } rp->p_rts_flags |= RTS_PROC_STOP; rp->p_rts_flags &= ~RTS_SLOT_FREE; DEBUGEXTRA(("done\n")); } /* update boot procs info for VM */ memcpy(kinfo.boot_procs, image, sizeof(kinfo.boot_procs)); #define IPCNAME(n) { \ assert((n) >= 0 && (n) <= IPCNO_HIGHEST); \ assert(!ipc_call_names[n]); \ ipc_call_names[n] = #n; \ } arch_post_init(); IPCNAME(SEND); IPCNAME(RECEIVE); IPCNAME(SENDREC); IPCNAME(NOTIFY); IPCNAME(SENDNB); IPCNAME(SENDA); /* System and processes initialization */ memory_init(); DEBUGEXTRA(("system_init()... ")); system_init(); DEBUGEXTRA(("done\n")); /* The bootstrap phase is over, so we can add the physical * memory used for it to the free list. */ add_memmap(&kinfo, kinfo.bootstrap_start, kinfo.bootstrap_len); #ifdef CONFIG_SMP if (config_no_apic) { BOOT_VERBOSE(printf("APIC disabled, disables SMP, using legacy PIC\n")); smp_single_cpu_fallback(); } else if (config_no_smp) { BOOT_VERBOSE(printf("SMP disabled, using legacy PIC\n")); smp_single_cpu_fallback(); } else { smp_init(); /* * if smp_init() returns it means that it failed and we try to finish * single CPU booting */ bsp_finish_booting(); } #else /* * if configured for a single CPU, we are already on the kernel stack which we * are going to use everytime we execute kernel code. We finish booting and we * never return here */ bsp_finish_booting(); #endif NOT_REACHABLE; }
/*===========================================================================* * do_sigsend * *===========================================================================*/ int do_sigsend(struct proc * caller, message * m_ptr) { /* Handle sys_sigsend, POSIX-style signal handling. */ struct sigmsg smsg; register struct proc *rp; struct sigcontext sc, *scp; struct sigframe fr, *frp; int proc_nr, r; if (!isokendpt(m_ptr->SIG_ENDPT, &proc_nr)) return(EINVAL); if (iskerneln(proc_nr)) return(EPERM); rp = proc_addr(proc_nr); /* Get the sigmsg structure into our address space. */ if((r=data_copy_vmcheck(caller, caller->p_endpoint, (vir_bytes) m_ptr->SIG_CTXT_PTR, KERNEL, (vir_bytes) &smsg, (phys_bytes) sizeof(struct sigmsg))) != OK) return r; /* Compute the user stack pointer where sigcontext will be stored. */ smsg.sm_stkptr = arch_get_sp(rp); scp = (struct sigcontext *) smsg.sm_stkptr - 1; /* Copy the registers to the sigcontext structure. */ memcpy(&sc.sc_regs, (char *) &rp->p_reg, sizeof(sigregs)); #if defined(__i386__) sc.trap_style = rp->p_seg.p_kern_trap_style; if(sc.trap_style == KTS_NONE) { printf("do_sigsend: sigsend an unsaved process\n"); return EINVAL; } if(proc_used_fpu(rp)) { /* save the FPU context before saving it to the sig context */ save_fpu(rp); memcpy(&sc.sc_fpu_state, rp->p_seg.fpu_state, FPU_XFP_SIZE); } #endif /* Finish the sigcontext initialization. */ sc.sc_mask = smsg.sm_mask; sc.sc_flags = rp->p_misc_flags & MF_FPU_INITIALIZED; /* Copy the sigcontext structure to the user's stack. */ if((r=data_copy_vmcheck(caller, KERNEL, (vir_bytes) &sc, m_ptr->SIG_ENDPT, (vir_bytes) scp, (vir_bytes) sizeof(struct sigcontext))) != OK) return r; /* Initialize the sigframe structure. */ frp = (struct sigframe *) scp - 1; fr.sf_scpcopy = scp; fr.sf_retadr2= (void (*)()) rp->p_reg.pc; fr.sf_fp = rp->p_reg.fp; rp->p_reg.fp = (reg_t) &frp->sf_fp; fr.sf_scp = scp; fpu_sigcontext(rp, &fr, &sc); fr.sf_signo = smsg.sm_signo; fr.sf_retadr = (void (*)()) smsg.sm_sigreturn; #if defined(__arm__) /* use the ARM link register to set the return address from the signal * handler */ rp->p_reg.lr = (reg_t) fr.sf_retadr; if(rp->p_reg.lr & 1) { printf("sigsend: LSB LR makes no sense.\n"); } /* pass signal handler parameters in registers */ rp->p_reg.retreg = (reg_t) fr.sf_signo; rp->p_reg.r1 = (reg_t) fr.sf_code; rp->p_reg.r2 = (reg_t) fr.sf_scp; rp->p_misc_flags |= MF_CONTEXT_SET; #endif /* Copy the sigframe structure to the user's stack. */ if((r=data_copy_vmcheck(caller, KERNEL, (vir_bytes) &fr, m_ptr->SIG_ENDPT, (vir_bytes) frp, (vir_bytes) sizeof(struct sigframe))) != OK) return r; /* Reset user registers to execute the signal handler. */ rp->p_reg.sp = (reg_t) frp; rp->p_reg.pc = (reg_t) smsg.sm_sighandler; /* Signal handler should get clean FPU. */ rp->p_misc_flags &= ~MF_FPU_INITIALIZED; if(!RTS_ISSET(rp, RTS_PROC_STOP)) { printf("system: warning: sigsend a running process\n"); printf("caller stack: "); proc_stacktrace(caller); } return(OK); }
/*===========================================================================* * sef_cb_init_fresh * *===========================================================================*/ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info) { /* Initialize the reincarnation server. */ struct sigaction sa; struct boot_image *ip; int s,i,j; int nr_image_srvs, nr_image_priv_srvs, nr_uncaught_init_srvs; struct rproc *rp; struct rprocpub *rpub; struct boot_image image[NR_BOOT_PROCS]; struct mproc mproc[NR_PROCS]; struct exec header; struct boot_image_priv *boot_image_priv; struct boot_image_sys *boot_image_sys; struct boot_image_dev *boot_image_dev; /* See if we run in verbose mode. */ env_parse("rs_verbose", "d", 0, &rs_verbose, 0, 1); /* Initialize the global init descriptor. */ rinit.rproctab_gid = cpf_grant_direct(ANY, (vir_bytes) rprocpub, sizeof(rprocpub), CPF_READ); if(!GRANT_VALID(rinit.rproctab_gid)) { panic("RS", "unable to create rprocpub table grant", rinit.rproctab_gid); } /* Initialize the global update descriptor. */ rupdate.flags = 0; /* Get a copy of the boot image table. */ if ((s = sys_getimage(image)) != OK) { panic("RS", "unable to get copy of boot image table", s); } /* Determine the number of system services in the boot image table and * compute the size required for the boot image buffer. */ nr_image_srvs = 0; boot_image_buffer_size = 0; for(i=0;i<NR_BOOT_PROCS;i++) { ip = &image[i]; /* System services only. */ if(iskerneln(_ENDPOINT_P(ip->endpoint))) { continue; } nr_image_srvs++; /* Lookup the corresponding entry in the boot image sys table. */ boot_image_info_lookup(ip->endpoint, image, NULL, NULL, &boot_image_sys, NULL); /* If we must keep a copy of this system service, read the header * and increase the size of the boot image buffer. */ if(boot_image_sys->flags & SF_USE_COPY) { if((s = sys_getaoutheader(&header, i)) != OK) { panic("RS", "unable to get copy of a.out header", s); } boot_image_buffer_size += header.a_hdrlen + header.a_text + header.a_data; } } /* Determine the number of entries in the boot image priv table and make sure * it matches the number of system services in the boot image table. */ nr_image_priv_srvs = 0; for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) { boot_image_priv = &boot_image_priv_table[i]; /* System services only. */ if(iskerneln(_ENDPOINT_P(boot_image_priv->endpoint))) { continue; } nr_image_priv_srvs++; } if(nr_image_srvs != nr_image_priv_srvs) { panic("RS", "boot image table and boot image priv table mismatch", NO_NUM); } /* Allocate boot image buffer. */ if(boot_image_buffer_size > 0) { boot_image_buffer = rs_startup_sbrk(boot_image_buffer_size); if(boot_image_buffer == (char *) -1) { panic("RS", "unable to allocate boot image buffer", NO_NUM); } } /* Reset the system process table. */ for (rp=BEG_RPROC_ADDR; rp<END_RPROC_ADDR; rp++) { rp->r_flags = 0; rp->r_pub = &rprocpub[rp - rproc]; rp->r_pub->in_use = FALSE; } /* Initialize the system process table in 4 steps, each of them following * the appearance of system services in the boot image priv table. * - Step 1: get a copy of the executable image of every system service that * requires it while it is not yet running. * In addition, set priviliges, sys properties, and dev properties (if any) * for every system service. */ for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) { boot_image_priv = &boot_image_priv_table[i]; /* System services only. */ if(iskerneln(_ENDPOINT_P(boot_image_priv->endpoint))) { continue; } /* Lookup the corresponding entries in other tables. */ boot_image_info_lookup(boot_image_priv->endpoint, image, &ip, NULL, &boot_image_sys, &boot_image_dev); rp = &rproc[boot_image_priv - boot_image_priv_table]; rpub = rp->r_pub; /* * Get a copy of the executable image if required. */ rp->r_exec_len = 0; rp->r_exec = NULL; if(boot_image_sys->flags & SF_USE_COPY) { exec_image_copy(ip - image, ip, rp); } /* * Set privileges. */ /* Get label. */ strcpy(rpub->label, boot_image_priv->label); if(boot_image_priv->endpoint != RS_PROC_NR) { /* Force a static priv id for system services in the boot image. */ rp->r_priv.s_id = static_priv_id( _ENDPOINT_P(boot_image_priv->endpoint)); /* Initialize privilege bitmaps. */ rp->r_priv.s_flags = boot_image_priv->flags; /* priv flags */ rp->r_priv.s_trap_mask = boot_image_priv->trap_mask; /* traps */ memcpy(&rp->r_priv.s_ipc_to, &boot_image_priv->ipc_to, sizeof(rp->r_priv.s_ipc_to)); /* targets */ /* Initialize kernel call mask bitmap from unordered set. */ fill_call_mask(boot_image_priv->k_calls, NR_SYS_CALLS, rp->r_priv.s_k_call_mask, KERNEL_CALL, TRUE); /* Set the privilege structure. */ if ((s = sys_privctl(ip->endpoint, SYS_PRIV_SET_SYS, &(rp->r_priv))) != OK) { panic("RS", "unable to set privilege structure", s); } } /* Synch the privilege structure with the kernel. */ if ((s = sys_getpriv(&(rp->r_priv), ip->endpoint)) != OK) { panic("RS", "unable to synch privilege structure", s); } /* * Set sys properties. */ rpub->sys_flags = boot_image_sys->flags; /* sys flags */ /* * Set dev properties. */ rpub->dev_nr = boot_image_dev->dev_nr; /* major device number */ rpub->dev_style = boot_image_dev->dev_style; /* device style */ rpub->period = boot_image_dev->period; /* heartbeat period */ /* Get process name. */ strcpy(rpub->proc_name, ip->proc_name); /* Get command settings. */ rp->r_cmd[0]= '\0'; rp->r_argv[0] = rp->r_cmd; rp->r_argv[1] = NULL; rp->r_argc = 1; rp->r_script[0]= '\0'; /* Initialize vm call mask bitmap from unordered set. */ fill_call_mask(boot_image_priv->vm_calls, NR_VM_CALLS, rpub->vm_call_mask, VM_RQ_BASE, TRUE); /* Get some settings from the boot image table. */ rp->r_nice = ip->priority; rpub->endpoint = ip->endpoint; /* Set some defaults. */ rp->r_uid = 0; /* root */ rp->r_check_tm = 0; /* not checked yet */ getuptime(&rp->r_alive_tm); /* currently alive */ rp->r_stop_tm = 0; /* not exiting yet */ rp->r_restarts = 0; /* no restarts so far */ rp->r_set_resources = 0; /* don't set resources */ /* Mark as in use. */ rp->r_flags = RS_IN_USE; rproc_ptr[_ENDPOINT_P(rpub->endpoint)]= rp; rpub->in_use = TRUE; } /* - Step 2: allow every system service in the boot image to run. */ nr_uncaught_init_srvs = 0; for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) { boot_image_priv = &boot_image_priv_table[i]; /* System services only. */ if(iskerneln(_ENDPOINT_P(boot_image_priv->endpoint))) { continue; } /* Ignore RS. */ if(boot_image_priv->endpoint == RS_PROC_NR) { continue; } /* Lookup the corresponding slot in the system process table. */ rp = &rproc[boot_image_priv - boot_image_priv_table]; rpub = rp->r_pub; /* Allow the service to run. */ if ((s = sys_privctl(rpub->endpoint, SYS_PRIV_ALLOW, NULL)) != OK) { panic("RS", "unable to initialize privileges", s); } /* Initialize service. We assume every service will always get * back to us here at boot time. */ if(boot_image_priv->flags & SYS_PROC) { if ((s = init_service(rp, SEF_INIT_FRESH)) != OK) { panic("RS", "unable to initialize service", s); } if(rpub->sys_flags & SF_SYNCH_BOOT) { /* Catch init ready message now to synchronize. */ catch_boot_init_ready(rpub->endpoint); } else { /* Catch init ready message later. */ nr_uncaught_init_srvs++; } } } /* - Step 3: let every system service complete initialization by * catching all the init ready messages left. */ while(nr_uncaught_init_srvs) { catch_boot_init_ready(ANY); nr_uncaught_init_srvs--; } /* - Step 4: all the system services in the boot image are now running. * Complete the initialization of the system process table in collaboration * with other system processes. */ if ((s = getsysinfo(PM_PROC_NR, SI_PROC_TAB, mproc)) != OK) { panic("RS", "unable to get copy of PM process table", s); } for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) { boot_image_priv = &boot_image_priv_table[i]; /* System services only. */ if(iskerneln(_ENDPOINT_P(boot_image_priv->endpoint))) { continue; } /* Lookup the corresponding slot in the system process table. */ rp = &rproc[boot_image_priv - boot_image_priv_table]; rpub = rp->r_pub; /* Get pid from PM process table. */ rp->r_pid = NO_PID; for (j = 0; j < NR_PROCS; j++) { if (mproc[j].mp_endpoint == rpub->endpoint) { rp->r_pid = mproc[j].mp_pid; break; } } if(j == NR_PROCS) { panic("RS", "unable to get pid", NO_NUM); } } /* * Now complete RS initialization process in collaboration with other * system services. */ /* Let the rest of the system know about our dynamically allocated buffer. */ if(boot_image_buffer_size > 0) { boot_image_buffer = rs_startup_sbrk_synch(boot_image_buffer_size); if(boot_image_buffer == (char *) -1) { panic("RS", "unable to synch boot image buffer", NO_NUM); } } /* Set alarm to periodically check service status. */ if (OK != (s=sys_setalarm(RS_DELTA_T, 0))) panic("RS", "couldn't set alarm", s); /* Install signal handlers. Ask PM to transform signal into message. */ sa.sa_handler = SIG_MESS; sigemptyset(&sa.sa_mask); sa.sa_flags = 0; if (sigaction(SIGCHLD,&sa,NULL)<0) panic("RS","sigaction failed", errno); if (sigaction(SIGTERM,&sa,NULL)<0) panic("RS","sigaction failed", errno); /* Initialize the exec pipe. */ if (pipe(exec_pipe) == -1) panic("RS", "pipe failed", errno); if (fcntl(exec_pipe[0], F_SETFD, fcntl(exec_pipe[0], F_GETFD) | FD_CLOEXEC) == -1) { panic("RS", "fcntl set FD_CLOEXEC on pipe input failed", errno); } if (fcntl(exec_pipe[1], F_SETFD, fcntl(exec_pipe[1], F_GETFD) | FD_CLOEXEC) == -1) { panic("RS", "fcntl set FD_CLOEXEC on pipe output failed", errno); } if (fcntl(exec_pipe[0], F_SETFL, fcntl(exec_pipe[0], F_GETFL) | O_NONBLOCK) == -1) { panic("RS", "fcntl set O_NONBLOCK on pipe input failed", errno); } /* Map out our own text and data. This is normally done in crtso.o * but RS is an exception - we don't get to talk to VM so early on. * That's why we override munmap() and munmap_text() in utility.c. * * _minix_unmapzero() is the same code in crtso.o that normally does * it on startup. It's best that it's there as crtso.o knows exactly * what the ranges are of the filler data. */ unmap_ok = 1; _minix_unmapzero(); return(OK); }
/*===========================================================================* * main * *===========================================================================*/ PUBLIC int main(void) { /* Start the ball rolling. */ struct boot_image *ip; /* boot image pointer */ register struct proc *rp; /* process pointer */ register int i, j; size_t argsz; /* size of arguments passed to crtso on stack */ BKL_LOCK(); /* Global value to test segment sanity. */ magictest = MAGICTEST; DEBUGEXTRA(("main()\n")); proc_init(); /* Set up proc table entries for processes in boot image. The stacks * of the servers have been added to the data segment by the monitor, so * the stack pointer is set to the end of the data segment. */ for (i=0; i < NR_BOOT_PROCS; ++i) { int schedulable_proc; proc_nr_t proc_nr; int ipc_to_m, kcalls; sys_map_t map; ip = &image[i]; /* process' attributes */ DEBUGEXTRA(("initializing %s... ", ip->proc_name)); rp = proc_addr(ip->proc_nr); /* get process pointer */ ip->endpoint = rp->p_endpoint; /* ipc endpoint */ make_zero64(rp->p_cpu_time_left); strncpy(rp->p_name, ip->proc_name, P_NAME_LEN); /* set process name */ reset_proc_accounting(rp); /* See if this process is immediately schedulable. * In that case, set its privileges now and allow it to run. * Only kernel tasks and the root system process get to run immediately. * All the other system processes are inhibited from running by the * RTS_NO_PRIV flag. They can only be scheduled once the root system * process has set their privileges. */ proc_nr = proc_nr(rp); schedulable_proc = (iskerneln(proc_nr) || isrootsysn(proc_nr)); if(schedulable_proc) { /* Assign privilege structure. Force a static privilege id. */ (void) get_priv(rp, static_priv_id(proc_nr)); /* Priviliges for kernel tasks. */ if(iskerneln(proc_nr)) { /* Privilege flags. */ priv(rp)->s_flags = (proc_nr == IDLE ? IDL_F : TSK_F); /* Allowed traps. */ priv(rp)->s_trap_mask = (proc_nr == CLOCK || proc_nr == SYSTEM ? CSK_T : TSK_T); ipc_to_m = TSK_M; /* allowed targets */ kcalls = TSK_KC; /* allowed kernel calls */ } /* Priviliges for the root system process. */ else if(isrootsysn(proc_nr)) { priv(rp)->s_flags= RSYS_F; /* privilege flags */ priv(rp)->s_trap_mask= SRV_T; /* allowed traps */ ipc_to_m = SRV_M; /* allowed targets */ kcalls = SRV_KC; /* allowed kernel calls */ priv(rp)->s_sig_mgr = SRV_SM; /* signal manager */ rp->p_priority = SRV_Q; /* priority queue */ rp->p_quantum_size_ms = SRV_QT; /* quantum size */ } /* Priviliges for ordinary process. */ else { NOT_REACHABLE; } /* Fill in target mask. */ memset(&map, 0, sizeof(map)); if (ipc_to_m == ALL_M) { for(j = 0; j < NR_SYS_PROCS; j++) set_sys_bit(map, j); } fill_sendto_mask(rp, &map); /* Fill in kernel call mask. */ for(j = 0; j < SYS_CALL_MASK_SIZE; j++) { priv(rp)->s_k_call_mask[j] = (kcalls == NO_C ? 0 : (~0)); } } else { /* Don't let the process run for now. */ RTS_SET(rp, RTS_NO_PRIV | RTS_NO_QUANTUM); } rp->p_memmap[T].mem_vir = ABS2CLICK(ip->memmap.text_vaddr); rp->p_memmap[T].mem_phys = ABS2CLICK(ip->memmap.text_paddr); rp->p_memmap[T].mem_len = ABS2CLICK(ip->memmap.text_bytes); rp->p_memmap[D].mem_vir = ABS2CLICK(ip->memmap.data_vaddr); rp->p_memmap[D].mem_phys = ABS2CLICK(ip->memmap.data_paddr); rp->p_memmap[D].mem_len = ABS2CLICK(ip->memmap.data_bytes); rp->p_memmap[S].mem_phys = ABS2CLICK(ip->memmap.data_paddr + ip->memmap.data_bytes + ip->memmap.stack_bytes); rp->p_memmap[S].mem_vir = ABS2CLICK(ip->memmap.data_vaddr + ip->memmap.data_bytes + ip->memmap.stack_bytes); rp->p_memmap[S].mem_len = 0; /* Set initial register values. The processor status word for tasks * is different from that of other processes because tasks can * access I/O; this is not allowed to less-privileged processes */ rp->p_reg.pc = ip->memmap.entry; rp->p_reg.psw = (iskerneln(proc_nr)) ? INIT_TASK_PSW : INIT_PSW; /* Initialize the server stack pointer. Take it down three words * to give crtso.s something to use as "argc", "argv" and "envp". */ if (isusern(proc_nr)) { /* user-space process? */ rp->p_reg.sp = (rp->p_memmap[S].mem_vir + rp->p_memmap[S].mem_len) << CLICK_SHIFT; argsz = 3 * sizeof(reg_t); rp->p_reg.sp -= argsz; phys_memset(rp->p_reg.sp - (rp->p_memmap[S].mem_vir << CLICK_SHIFT) + (rp->p_memmap[S].mem_phys << CLICK_SHIFT), 0, argsz); } /* scheduling functions depend on proc_ptr pointing somewhere. */ if(!get_cpulocal_var(proc_ptr)) get_cpulocal_var(proc_ptr) = rp; /* If this process has its own page table, VM will set the * PT up and manage it. VM will signal the kernel when it has * done this; until then, don't let it run. */ if(ip->flags & PROC_FULLVM) rp->p_rts_flags |= RTS_VMINHIBIT; rp->p_rts_flags |= RTS_PROC_STOP; rp->p_rts_flags &= ~RTS_SLOT_FREE; alloc_segments(rp); DEBUGEXTRA(("done\n")); } #define IPCNAME(n) { \ assert((n) >= 0 && (n) <= IPCNO_HIGHEST); \ assert(!ipc_call_names[n]); \ ipc_call_names[n] = #n; \ } IPCNAME(SEND); IPCNAME(RECEIVE); IPCNAME(SENDREC); IPCNAME(NOTIFY); IPCNAME(SENDNB); IPCNAME(SENDA); /* Architecture-dependent initialization. */ DEBUGEXTRA(("arch_init()... ")); arch_init(); DEBUGEXTRA(("done\n")); /* System and processes initialization */ DEBUGEXTRA(("system_init()... ")); system_init(); DEBUGEXTRA(("done\n")); #ifdef CONFIG_SMP if (config_no_apic) { BOOT_VERBOSE(printf("APIC disabled, disables SMP, using legacy PIC\n")); smp_single_cpu_fallback(); } else if (config_no_smp) { BOOT_VERBOSE(printf("SMP disabled, using legacy PIC\n")); smp_single_cpu_fallback(); } else { smp_init(); /* * if smp_init() returns it means that it failed and we try to finish * single CPU booting */ bsp_finish_booting(); } #else /* * if configured for a single CPU, we are already on the kernel stack which we * are going to use everytime we execute kernel code. We finish booting and we * never return here */ bsp_finish_booting(); #endif NOT_REACHABLE; return 1; }
/*===========================================================================* * main * *===========================================================================*/ PUBLIC void main() { /* Start the ball rolling. */ struct boot_image *ip; /* boot image pointer */ register struct proc *rp; /* process pointer */ register struct priv *sp; /* privilege structure pointer */ register int i, j, s; int hdrindex; /* index to array of a.out headers */ phys_clicks text_base; vir_clicks text_clicks, data_clicks, st_clicks; reg_t ktsb; /* kernel task stack base */ struct exec e_hdr; /* for a copy of an a.out header */ /* Architecture-dependent initialization. */ arch_init(); /* Global value to test segment sanity. */ magictest = MAGICTEST; /* Clear the process table. Anounce each slot as empty and set up mappings * for proc_addr() and proc_nr() macros. Do the same for the table with * privilege structures for the system processes. */ for (rp = BEG_PROC_ADDR, i = -NR_TASKS; rp < END_PROC_ADDR; ++rp, ++i) { rp->p_rts_flags = SLOT_FREE; /* initialize free slot */ #if DEBUG_SCHED_CHECK rp->p_magic = PMAGIC; #endif rp->p_nr = i; /* proc number from ptr */ rp->p_endpoint = _ENDPOINT(0, rp->p_nr); /* generation no. 0 */ } for (sp = BEG_PRIV_ADDR, i = 0; sp < END_PRIV_ADDR; ++sp, ++i) { sp->s_proc_nr = NONE; /* initialize as free */ sp->s_id = i; /* priv structure index */ ppriv_addr[i] = sp; /* priv ptr from number */ } /* Set up proc table entries for processes in boot image. The stacks of the * kernel tasks are initialized to an array in data space. The stacks * of the servers have been added to the data segment by the monitor, so * the stack pointer is set to the end of the data segment. All the * processes are in low memory on the 8086. On the 386 only the kernel * is in low memory, the rest is loaded in extended memory. */ /* Task stacks. */ ktsb = (reg_t) t_stack; for (i=0; i < NR_BOOT_PROCS; ++i) { int ci; bitchunk_t fv; ip = &image[i]; /* process' attributes */ rp = proc_addr(ip->proc_nr); /* get process pointer */ ip->endpoint = rp->p_endpoint; /* ipc endpoint */ rp->p_max_priority = ip->priority; /* max scheduling priority */ rp->p_priority = ip->priority; /* current priority */ rp->p_quantum_size = ip->quantum; /* quantum size in ticks */ rp->p_ticks_left = ip->quantum; /* current credit */ strncpy(rp->p_name, ip->proc_name, P_NAME_LEN); /* set process name */ (void) get_priv(rp, (ip->flags & SYS_PROC)); /* assign structure */ priv(rp)->s_flags = ip->flags; /* process flags */ priv(rp)->s_trap_mask = ip->trap_mask; /* allowed traps */ /* Warn about violations of the boot image table order consistency. */ if (priv_id(rp) != s_nr_to_id(ip->proc_nr) && (ip->flags & SYS_PROC)) kprintf("Warning: boot image table has wrong process order\n"); /* Initialize call mask bitmap from unordered set. * A single SYS_ALL_CALLS is a special case - it * means all calls are allowed. */ if(ip->nr_k_calls == 1 && ip->k_calls[0] == SYS_ALL_CALLS) fv = ~0; /* fill call mask */ else fv = 0; /* clear call mask */ for(ci = 0; ci < CALL_MASK_SIZE; ci++) /* fill or clear call mask */ priv(rp)->s_k_call_mask[ci] = fv; if(!fv) /* not all full? enter calls bit by bit */ for(ci = 0; ci < ip->nr_k_calls; ci++) SET_BIT(priv(rp)->s_k_call_mask, ip->k_calls[ci]-KERNEL_CALL); for (j = 0; j < NR_SYS_PROCS && j < BITCHUNK_BITS; j++) if (ip->ipc_to & (1 << j)) set_sendto_bit(rp, j); /* restrict targets */ if (iskerneln(proc_nr(rp))) { /* part of the kernel? */ if (ip->stksize > 0) { /* HARDWARE stack size is 0 */ rp->p_priv->s_stack_guard = (reg_t *) ktsb; *rp->p_priv->s_stack_guard = STACK_GUARD; } ktsb += ip->stksize; /* point to high end of stack */ rp->p_reg.sp = ktsb; /* this task's initial stack ptr */ hdrindex = 0; /* all use the first a.out header */ } else { hdrindex = 1 + i-NR_TASKS; /* servers, drivers, INIT */ } /* Architecture-specific way to find out aout header of this * boot process. */ arch_get_aout_headers(hdrindex, &e_hdr); /* Convert addresses to clicks and build process memory map */ text_base = e_hdr.a_syms >> CLICK_SHIFT; text_clicks = (e_hdr.a_text + CLICK_SIZE-1) >> CLICK_SHIFT; data_clicks = (e_hdr.a_data+e_hdr.a_bss + CLICK_SIZE-1) >> CLICK_SHIFT; st_clicks= (e_hdr.a_total + CLICK_SIZE-1) >> CLICK_SHIFT; if (!(e_hdr.a_flags & A_SEP)) { data_clicks= (e_hdr.a_text+e_hdr.a_data+e_hdr.a_bss + CLICK_SIZE-1) >> CLICK_SHIFT; text_clicks = 0; /* common I&D */ } rp->p_memmap[T].mem_phys = text_base; rp->p_memmap[T].mem_len = text_clicks; rp->p_memmap[D].mem_phys = text_base + text_clicks; rp->p_memmap[D].mem_len = data_clicks; rp->p_memmap[S].mem_phys = text_base + text_clicks + st_clicks; rp->p_memmap[S].mem_vir = st_clicks; rp->p_memmap[S].mem_len = 0; /* Set initial register values. The processor status word for tasks * is different from that of other processes because tasks can * access I/O; this is not allowed to less-privileged processes */ rp->p_reg.pc = (reg_t) ip->initial_pc; rp->p_reg.psw = (iskernelp(rp)) ? INIT_TASK_PSW : INIT_PSW; /* Initialize the server stack pointer. Take it down one word * to give crtso.s something to use as "argc". */ if (isusern(proc_nr(rp))) { /* user-space process? */ rp->p_reg.sp = (rp->p_memmap[S].mem_vir + rp->p_memmap[S].mem_len) << CLICK_SHIFT; rp->p_reg.sp -= sizeof(reg_t); } /* scheduling functions depend on proc_ptr pointing somewhere. */ if(!proc_ptr) proc_ptr = rp; /* If this process has its own page table, VM will set the * PT up and manage it. VM will signal the kernel when it has * done this; until then, don't let it run. */ if(priv(rp)->s_flags & PROC_FULLVM) RTS_SET(rp, VMINHIBIT); /* Set ready. The HARDWARE task is never ready. */ if (rp->p_nr == HARDWARE) RTS_SET(rp, PROC_STOP); RTS_UNSET(rp, SLOT_FREE); /* remove SLOT_FREE and schedule */ alloc_segments(rp); }
/*===========================================================================* * do_sigsend * *===========================================================================*/ PUBLIC int do_sigsend(struct proc * caller, message * m_ptr) { /* Handle sys_sigsend, POSIX-style signal handling. */ struct sigmsg smsg; register struct proc *rp; struct sigcontext sc, *scp; struct sigframe fr, *frp; int proc_nr, r; if (!isokendpt(m_ptr->SIG_ENDPT, &proc_nr)) return(EINVAL); if (iskerneln(proc_nr)) return(EPERM); rp = proc_addr(proc_nr); /* Get the sigmsg structure into our address space. */ if((r=data_copy_vmcheck(caller, caller->p_endpoint, (vir_bytes) m_ptr->SIG_CTXT_PTR, KERNEL, (vir_bytes) &smsg, (phys_bytes) sizeof(struct sigmsg))) != OK) return r; /* Compute the user stack pointer where sigcontext will be stored. */ scp = (struct sigcontext *) smsg.sm_stkptr - 1; /* Copy the registers to the sigcontext structure. */ memcpy(&sc.sc_regs, (char *) &rp->p_reg, sizeof(sigregs)); #if (_MINIX_CHIP == _CHIP_INTEL) if(proc_used_fpu(rp)) { /* save the FPU context before saving it to the sig context */ save_fpu(rp); memcpy(&sc.sc_fpu_state, rp->p_fpu_state.fpu_save_area_p, FPU_XFP_SIZE); } #endif /* Finish the sigcontext initialization. */ sc.sc_mask = smsg.sm_mask; sc.sc_flags = rp->p_misc_flags & MF_FPU_INITIALIZED; /* Copy the sigcontext structure to the user's stack. */ if((r=data_copy_vmcheck(caller, KERNEL, (vir_bytes) &sc, m_ptr->SIG_ENDPT, (vir_bytes) scp, (vir_bytes) sizeof(struct sigcontext))) != OK) return r; /* Initialize the sigframe structure. */ frp = (struct sigframe *) scp - 1; fr.sf_scpcopy = scp; fr.sf_retadr2= (void (*)()) rp->p_reg.pc; fr.sf_fp = rp->p_reg.fp; rp->p_reg.fp = (reg_t) &frp->sf_fp; fr.sf_scp = scp; fpu_sigcontext(rp, &fr, &sc); fr.sf_signo = smsg.sm_signo; fr.sf_retadr = (void (*)()) smsg.sm_sigreturn; /* Copy the sigframe structure to the user's stack. */ if((r=data_copy_vmcheck(caller, KERNEL, (vir_bytes) &fr, m_ptr->SIG_ENDPT, (vir_bytes) frp, (vir_bytes) sizeof(struct sigframe))) != OK) return r; /* Reset user registers to execute the signal handler. */ rp->p_reg.sp = (reg_t) frp; rp->p_reg.pc = (reg_t) smsg.sm_sighandler; /* Signal handler should get clean FPU. */ rp->p_misc_flags &= ~MF_FPU_INITIALIZED; if(!RTS_ISSET(rp, RTS_PROC_STOP)) { printf("system: warning: sigsend a running process\n"); printf("caller stack: "); proc_stacktrace(caller); } return(OK); }
void main(void) { /* Start the ball rolling. */ struct boot_image *ip; /* boot image pointer */ register struct proc *rp; /* process pointer */ register struct priv *sp; /* privilege structure pointer */ register int i, j; int hdrindex; /* index to array of a.out headers */ phys_clicks text_base; vir_clicks text_clicks, data_clicks, st_clicks; reg_t ktsb; /* kernel task stack base */ struct exec *e_hdr = 0; /* for a copy of an a.out header */ /* Global value to test segment sanity. */ magictest = MAGICTEST; /* Clear the process table. Anounce each slot as empty and set up mappings * for proc_addr() and proc_nr() macros. Do the same for the table with * privilege structures for the system processes. */ for (rp = BEG_PROC_ADDR, i = -NR_TASKS; rp < END_PROC_ADDR; ++rp, ++i) { rp->p_rts_flags = RTS_SLOT_FREE; /* initialize free slot */ #ifdef CONFIG_DEBUG_KERNEL_SCHED_CHECK rp->p_magic = PMAGIC; #endif rp->p_nr = i; /* proc number from ptr */ rp->p_endpoint = _ENDPOINT(0, rp->p_nr); /* generation no. 0 */ } for (sp = BEG_PRIV_ADDR, i = 0; sp < END_PRIV_ADDR; ++sp, ++i) { sp->s_proc_nr = ENDPT_NONE; /* initialize as free */ sp->s_id = i; /* priv structure index */ ppriv_addr[i] = sp; /* priv ptr from number */ } /* Set up proc table entries for processes in boot image. The stacks of the * kernel tasks are initialized to an array in data space. The stacks * of the servers have been added to the data segment by the monitor, so * the stack pointer is set to the end of the data segment. All the * processes are in low memory on the 8086. On the 386 only the kernel * is in low memory, the rest is loaded in extended memory. */ /* Task stacks. */ ktsb = (reg_t) t_stack; for (i=0; i < NR_BOOT_PROCS; ++i) { int schedulable_proc, proc_nr; int ipc_to_m, kcalls; ip = &image[i]; /* process' attributes */ rp = proc_addr(ip->proc_nr); /* get process pointer */ ip->endpoint = rp->p_endpoint; /* ipc endpoint */ rp->p_max_priority = ip->priority; /* max scheduling priority */ rp->p_priority = ip->priority; /* current priority */ rp->p_quantum_size = ip->quantum; /* quantum size in ticks */ rp->p_ticks_left = ip->quantum; /* current credit */ strncpy(rp->p_name, ip->proc_name, P_NAME_LEN); /* set process name */ /* See if this process is immediately schedulable. * In that case, set its privileges now and allow it to run. * Only kernel tasks and the root system process get to run immediately. * All the other system processes are inhibited from running by the * RTS_NO_PRIV flag. They can only be scheduled once the root system * process has set their privileges. */ proc_nr = proc_nr(rp); schedulable_proc = (iskerneln(proc_nr) || isrootsysn(proc_nr)); if(schedulable_proc) { /* Assign privilege structure. Force a static privilege id. */ (void) get_priv(rp, static_priv_id(proc_nr)); /* Priviliges for kernel tasks. */ if(iskerneln(proc_nr)) { /* Privilege flags. */ priv(rp)->s_flags = (proc_nr == IDLE ? IDL_F : TSK_F); /* Allowed traps. */ priv(rp)->s_trap_mask = (proc_nr == CLOCK || proc_nr == SYSTEM ? CSK_T : TSK_T); ipc_to_m = TSK_M; /* allowed targets */ kcalls = TSK_KC; /* allowed kernel calls */ } else if(isrootsysn(proc_nr)) { /* Priviliges for the root system process. */ priv(rp)->s_flags= RSYS_F; /* privilege flags */ priv(rp)->s_trap_mask= RSYS_T; /* allowed traps */ ipc_to_m = RSYS_M; /* allowed targets */ kcalls = RSYS_KC; /* allowed kernel calls */ } /* Fill in target mask. */ for (j=0; j < NR_SYS_PROCS; j++) { if (ipc_to_m & (1 << j)) set_sendto_bit(rp, j); else unset_sendto_bit(rp, j); } /* Fill in kernel call mask. */ for(j = 0; j < CALL_MASK_SIZE; j++) { priv(rp)->s_k_call_mask[j] = (kcalls == NO_C ? 0 : (~0)); } } else { /*Don't let the process run for now. */ RTS_SET(rp, RTS_NO_PRIV); } if (iskerneln(proc_nr)) { /* part of the kernel? */ if (ip->stksize > 0) { /* HARDWARE stack size is 0 */ rp->p_priv->s_stack_guard = (reg_t *) ktsb; *rp->p_priv->s_stack_guard = STACK_GUARD; } ktsb += ip->stksize; /* point to high end of stack */ rp->p_reg.sp = ktsb; /* this task's initial stack ptr */ hdrindex = 0; /* all use the first a.out header */ } else { hdrindex = 1 + i-NR_TASKS; /* system/user processes */ } /* Architecture-specific way to find out aout header of this * boot process. */ e_hdr = arch_get_aout_header(hdrindex); /* Convert addresses to clicks and build process memory map */ text_base = e_hdr->a_syms >> CLICK_SHIFT; st_clicks= (e_hdr->a_total + CLICK_SIZE-1) >> CLICK_SHIFT; data_clicks = (e_hdr->a_text + e_hdr->a_data + e_hdr->a_bss + CLICK_SIZE-1) >> CLICK_SHIFT; text_clicks = 0; rp->p_memmap[T].mem_phys = text_base; rp->p_memmap[T].mem_len = text_clicks; rp->p_memmap[D].mem_phys = text_base + text_clicks; rp->p_memmap[D].mem_len = data_clicks; rp->p_memmap[S].mem_phys = text_base + text_clicks + st_clicks; rp->p_memmap[S].mem_vir = st_clicks; rp->p_memmap[S].mem_len = 0; /* Patch (override) the non-kernel process' entry points in image table. The * image table is located in kernel/kernel_syms.c. The kernel processes like * IDLE, SYSTEM, CLOCK, HARDWARE are not changed because they are part of kernel * and the entry points are set at compilation time. In case of IDLE or HARDWARE * the entry point can be ignored becasue they never run (set RTS_PROC_STOP). */ if (!iskerneln(proc_nr(rp))) ip->initial_pc = (task_t*)e_hdr->a_entry; /* Set initial register values. The processor status word for tasks * is different from that of other processes because tasks can * access I/O; this is not allowed to less-privileged processes */ rp->p_reg.pc = (reg_t) ip->initial_pc; rp->p_reg.psw = (iskerneln(proc_nr)) ? INIT_TASK_PSW : INIT_PSW; /* Initialize the server stack pointer. Take it down one word * to give crtso.s something to use as "argc","argv" and "envp". */ if (isusern(proc_nr)) { /* user-space process? */ rp->p_reg.sp = (rp->p_memmap[S].mem_vir + rp->p_memmap[S].mem_len) << CLICK_SHIFT; rp->p_reg.sp -= 3*sizeof(reg_t); } /* scheduling functions depend on proc_ptr pointing somewhere. */ if(!proc_ptr) proc_ptr = rp; /* If this process has its own page table, VM will set the * PT up and manage it. VM will signal the kernel when it has * done this; until then, don't let it run. */ if(ip->flags & PROC_FULLVM) RTS_SET(rp, RTS_VMINHIBIT); /* IDLE & HARDWARE task is never put on a run queue as it is * never ready to run. */ if (rp->p_nr == HARDWARE) RTS_SET(rp, RTS_PROC_STOP); if (rp->p_nr == IDLE) RTS_SET(rp, RTS_PROC_STOP); RTS_UNSET(rp, RTS_SLOT_FREE); /* remove RTS_SLOT_FREE and schedule */ alloc_segments(rp); } /* for */ /* Architecture-dependent initialization. */ arch_init(); #ifdef CONFIG_DEBUG_KERNEL_STATS_PROFILE sprofiling = 0; /* we're not profiling until instructed to */ #endif cprof_procs_no = 0; /* init nr of hash table slots used */ #ifdef CONFIG_IDLE_TSC idle_tsc = cvu64(0); #endif vm_running = 0; krandom.random_sources = RANDOM_SOURCES; krandom.random_elements = RANDOM_ELEMENTS; /* Nucleos is now ready. All boot image processes are on the ready queue. * Return to the assembly code to start running the current process. */ bill_ptr = proc_addr(IDLE); /* it has to point somewhere */ announce(); /* print Nucleos startup banner */ /* * enable timer interrupts and clock task on the boot CPU */ if (boot_cpu_init_timer(system_hz)) { kernel_panic("FATAL : failed to initialize timer interrupts, " "cannot continue without any clock source!", NO_NUM); } /* Warnings for sanity checks that take time. These warnings are printed * so it's a clear warning no full release should be done with them * enabled. */ #ifdef CONFIG_DEBUG_KERNEL_SCHED_CHECK FIXME("CONFIG_DEBUG_KERNEL_SCHED_CHECK enabled"); #endif #ifdef CONFIG_DEBUG_KERNEL_VMASSERT FIXME("CONFIG_DEBUG_KERNEL_VMASSERT enabled"); #endif #ifdef CONFIG_DEBUG_PROC_CHECK FIXME("PROC check enabled"); #endif restart(); }
/*===========================================================================* * do_sdevio * *===========================================================================*/ PUBLIC int do_sdevio(struct proc * caller, message *m_ptr) { vir_bytes newoffset; endpoint_t newep; int proc_nr; endpoint_t proc_nr_e = m_ptr->DIO_VEC_ENDPT; vir_bytes count = m_ptr->DIO_VEC_SIZE; long port = m_ptr->DIO_PORT; phys_bytes phys_buf; int i, req_type, req_dir, size, nr_io_range; struct priv *privp; struct io_range *iorp; struct proc *destproc; int retval; /* Allow safe copies and accesses to SELF */ if ((m_ptr->DIO_REQUEST & _DIO_SAFEMASK) != _DIO_SAFE && proc_nr_e != SELF) { static int first= 1; if (first) { first= 0; printf("do_sdevio: for %d, req %d\n", m_ptr->m_source, m_ptr->DIO_REQUEST); } } /* Check if process endpoint is OK. * A driver may directly provide a pointer to a buffer at the user-process * that initiated the device I/O. Kernel processes, of course, are denied. */ if (proc_nr_e == SELF) proc_nr = _ENDPOINT_P(caller->p_endpoint); else if(!isokendpt(proc_nr_e, &proc_nr)) return(EINVAL); if (iskerneln(proc_nr)) return(EPERM); /* Extract direction (in or out) and type (size). */ req_dir = m_ptr->DIO_REQUEST & _DIO_DIRMASK; req_type = m_ptr->DIO_REQUEST & _DIO_TYPEMASK; /* Check for 'safe' variants. */ if((m_ptr->DIO_REQUEST & _DIO_SAFEMASK) == _DIO_SAFE) { /* Map grant address to physical address. */ if(verify_grant(proc_nr_e, caller->p_endpoint, (cp_grant_id_t) m_ptr->DIO_VEC_ADDR, count, req_dir == _DIO_INPUT ? CPF_WRITE : CPF_READ, (vir_bytes) m_ptr->DIO_OFFSET, &newoffset, &newep) != OK) { printf("do_sdevio: verify_grant failed\n"); return EPERM; } if(!isokendpt(newep, &proc_nr)) return(EINVAL); destproc = proc_addr(proc_nr); if ((phys_buf = umap_local(destproc, D, (vir_bytes) newoffset, count)) == 0) { printf("do_sdevio: umap_local failed\n"); return(EFAULT); } } else { if(proc_nr != _ENDPOINT_P(caller->p_endpoint)) { printf("do_sdevio: unsafe sdevio by %d in %d denied\n", caller->p_endpoint, proc_nr_e); return EPERM; } /* Get and check physical address. */ if ((phys_buf = umap_local(proc_addr(proc_nr), D, (vir_bytes) m_ptr->DIO_VEC_ADDR, count)) == 0) return(EFAULT); destproc = proc_addr(proc_nr); } /* current process must be target for phys_* to be OK */ switch_address_space(destproc); switch (req_type) { case _DIO_BYTE: size= 1; break; case _DIO_WORD: size= 2; break; case _DIO_LONG: size= 4; break; default: size= 4; break; /* Be conservative */ } privp= priv(caller); if (privp && privp->s_flags & CHECK_IO_PORT) { port= m_ptr->DIO_PORT; nr_io_range= privp->s_nr_io_range; for (i= 0, iorp= privp->s_io_tab; i<nr_io_range; i++, iorp++) { if (port >= iorp->ior_base && port+size-1 <= iorp->ior_limit) break; } if (i >= nr_io_range) { printf( "do_sdevio: I/O port check failed for proc %d, port 0x%x\n", m_ptr->m_source, port); retval = EPERM; goto return_error; } } if (port & (size-1)) { printf("do_devio: unaligned port 0x%x (size %d)\n", port, size); retval = EPERM; goto return_error; } /* Perform device I/O for bytes and words. Longs are not supported. */ if (req_dir == _DIO_INPUT) { switch (req_type) { case _DIO_BYTE: phys_insb(port, phys_buf, count); break; case _DIO_WORD: phys_insw(port, phys_buf, count); break; default: retval = EINVAL; goto return_error; } } else if (req_dir == _DIO_OUTPUT) { switch (req_type) { case _DIO_BYTE: phys_outsb(port, phys_buf, count); break; case _DIO_WORD: phys_outsw(port, phys_buf, count); break; default: retval = EINVAL; goto return_error; } } else { retval = EINVAL; goto return_error; } retval = OK; return_error: /* switch back to the address of the process which made the call */ switch_address_space(caller); return retval; }
/*===========================================================================* * do_getinfo * *===========================================================================*/ PUBLIC int do_getinfo(struct proc * caller, message * m_ptr) { /* Request system information to be copied to caller's address space. This * call simply copies entire data structures to the caller. */ size_t length; vir_bytes src_vir; int nr_e, nr, r; int wipe_rnd_bin = -1; struct proc *p; #if !defined(__ELF__) struct exec e_hdr; #endif /* Set source address and length based on request type. */ switch (m_ptr->I_REQUEST) { case GET_MACHINE: { length = sizeof(struct machine); src_vir = (vir_bytes) &machine; break; } case GET_KINFO: { length = sizeof(struct kinfo); src_vir = (vir_bytes) &kinfo; break; } case GET_LOADINFO: { length = sizeof(struct loadinfo); src_vir = (vir_bytes) &kloadinfo; break; } case GET_CPUINFO: { length = sizeof(cpu_info); src_vir = (vir_bytes) &cpu_info; break; } case GET_HZ: { length = sizeof(system_hz); src_vir = (vir_bytes) &system_hz; break; } case GET_IMAGE: { length = sizeof(struct boot_image) * NR_BOOT_PROCS; src_vir = (vir_bytes) image; break; } case GET_IRQHOOKS: { length = sizeof(struct irq_hook) * NR_IRQ_HOOKS; src_vir = (vir_bytes) irq_hooks; break; } case GET_PROCTAB: { update_idle_time(); length = sizeof(struct proc) * (NR_PROCS + NR_TASKS); src_vir = (vir_bytes) proc; break; } case GET_PRIVTAB: { length = sizeof(struct priv) * (NR_SYS_PROCS); src_vir = (vir_bytes) priv; break; } case GET_PROC: { nr_e = (m_ptr->I_VAL_LEN2_E == SELF) ? caller->p_endpoint : m_ptr->I_VAL_LEN2_E; if(!isokendpt(nr_e, &nr)) return EINVAL; /* validate request */ length = sizeof(struct proc); src_vir = (vir_bytes) proc_addr(nr); break; } case GET_PRIV: { nr_e = (m_ptr->I_VAL_LEN2_E == SELF) ? caller->p_endpoint : m_ptr->I_VAL_LEN2_E; if(!isokendpt(nr_e, &nr)) return EINVAL; /* validate request */ length = sizeof(struct priv); src_vir = (vir_bytes) priv_addr(nr_to_id(nr)); break; } case GET_REGS: { nr_e = (m_ptr->I_VAL_LEN2_E == SELF) ? caller->p_endpoint : m_ptr->I_VAL_LEN2_E; if(!isokendpt(nr_e, &nr)) return EINVAL; /* validate request */ p = proc_addr(nr); length = sizeof(p->p_reg); src_vir = (vir_bytes) &p->p_reg; break; } case GET_WHOAMI: { int len; /* GET_WHOAMI uses m3 and only uses the message contents for info. */ m_ptr->GIWHO_EP = caller->p_endpoint; len = MIN(sizeof(m_ptr->GIWHO_NAME), sizeof(caller->p_name))-1; strncpy(m_ptr->GIWHO_NAME, caller->p_name, len); m_ptr->GIWHO_NAME[len] = '\0'; m_ptr->GIWHO_PRIVFLAGS = priv(caller)->s_flags; return OK; } case GET_MONPARAMS: { src_vir = (vir_bytes) params_buffer; length = sizeof(params_buffer); break; } case GET_RANDOMNESS: { static struct k_randomness copy; /* copy to keep counters */ int i; copy = krandom; for (i= 0; i<RANDOM_SOURCES; i++) { krandom.bin[i].r_size = 0; /* invalidate random data */ krandom.bin[i].r_next = 0; } length = sizeof(copy); src_vir = (vir_bytes) © break; } case GET_RANDOMNESS_BIN: { int bin = m_ptr->I_VAL_LEN2_E; if(bin < 0 || bin >= RANDOM_SOURCES) { printf("SYSTEM: GET_RANDOMNESS_BIN: %d out of range\n", bin); return EINVAL; } if(krandom.bin[bin].r_size < RANDOM_ELEMENTS) return ENOENT; length = sizeof(krandom.bin[bin]); src_vir = (vir_bytes) &krandom.bin[bin]; wipe_rnd_bin = bin; break; } case GET_KMESSAGES: { length = sizeof(struct kmessages); src_vir = (vir_bytes) &kmess; break; } case GET_IRQACTIDS: { length = sizeof(irq_actids); src_vir = (vir_bytes) irq_actids; break; } case GET_IDLETSC: { struct proc * idl; update_idle_time(); idl = proc_addr(IDLE); length = sizeof(idl->p_cycles); src_vir = (vir_bytes) &idl->p_cycles; break; } #if !defined(__ELF__) case GET_AOUTHEADER: { int hdrindex, index = m_ptr->I_VAL_LEN2_E; if(index < 0 || index >= NR_BOOT_PROCS) { return EINVAL; } if (iskerneln(_ENDPOINT_P(image[index].endpoint))) { hdrindex = 0; } else { hdrindex = 1 + index-NR_TASKS; } arch_get_aout_headers(hdrindex, &e_hdr); length = sizeof(e_hdr); src_vir = (vir_bytes) &e_hdr; break; } #endif default: printf("do_getinfo: invalid request %d\n", m_ptr->I_REQUEST); return(EINVAL); } /* Try to make the actual copy for the requested data. */ if (m_ptr->I_VAL_LEN > 0 && length > m_ptr->I_VAL_LEN) return (E2BIG); r = data_copy_vmcheck(caller, KERNEL, src_vir, caller->p_endpoint, (vir_bytes) m_ptr->I_VAL_PTR, length); if(r != OK) return r; if(wipe_rnd_bin >= 0 && wipe_rnd_bin < RANDOM_SOURCES) { krandom.bin[wipe_rnd_bin].r_size = 0; krandom.bin[wipe_rnd_bin].r_next = 0; } return(OK); }