/*===========================================================================* * map_service * *===========================================================================*/ int map_service(struct rprocpub *rpub) { /* Map a new service by storing its device driver properties. */ int r, slot; struct fproc *rfp; if (IS_RPUB_BOOT_USR(rpub)) return(OK); /* Process is a service */ if (isokendpt(rpub->endpoint, &slot) != OK) { printf("VFS: can't map service with unknown endpoint %d\n", rpub->endpoint); return(EINVAL); } rfp = &fproc[slot]; rfp->fp_flags |= FP_SRV_PROC; /* Not a driver, nothing more to do. */ if (rpub->dev_nr == NO_DEV) return(OK); /* Map driver. */ r = map_driver(rpub->label, rpub->dev_nr, rpub->endpoint); if(r != OK) return(r); return(OK); }
/*===========================================================================* * generic_handler * *===========================================================================*/ static int generic_handler(irq_hook_t * hook) { /* This function handles hardware interrupt in a simple and generic way. All * interrupts are transformed into messages to a driver. The IRQ line will be * reenabled if the policy says so. */ int proc_nr; /* As a side-effect, the interrupt handler gathers random information by * timestamping the interrupt events. This is used for /dev/random. */ get_randomness(&krandom, hook->irq); /* Check if the handler is still alive. * If it's dead, this should never happen, as processes that die * automatically get their interrupt hooks unhooked. */ if(!isokendpt(hook->proc_nr_e, &proc_nr)) panic("invalid interrupt handler: %d", hook->proc_nr_e); /* Add a bit for this interrupt to the process' pending interrupts. When * sending the notification message, this bit map will be magically set * as an argument. */ priv(proc_addr(proc_nr))->s_int_pending |= (1 << hook->notify_id); /* Build notification message and return. */ mini_notify(proc_addr(HARDWARE), hook->proc_nr_e); return(hook->policy & IRQ_REENABLE); }
/*===========================================================================* * do_profbuf * *===========================================================================*/ int do_profbuf(struct proc * caller, message * m_ptr) { /* This kernel call is used by profiled system processes when Call * Profiling is enabled. It is called on the first execution of procentry. * By means of this kernel call, the profiled processes inform the kernel * about the location of their profiling table and the control structure * which is used to enable the kernel to have the tables cleared. */ int proc_nr; struct proc *rp; /* Store process name, control struct, table locations. */ if(!isokendpt(caller->p_endpoint, &proc_nr)) return EDEADSRCDST; if(cprof_procs_no >= NR_SYS_PROCS) return ENOSPC; rp = proc_addr(proc_nr); cprof_proc_info[cprof_procs_no].endpt = caller->p_endpoint; cprof_proc_info[cprof_procs_no].name = rp->p_name; cprof_proc_info[cprof_procs_no].ctl_v = (vir_bytes) m_ptr->PROF_CTL_PTR; cprof_proc_info[cprof_procs_no].buf_v = (vir_bytes) m_ptr->PROF_MEM_PTR; cprof_procs_no++; return OK; }
static void print_proc_depends(struct proc *pp, const int level) { struct proc *depproc = NULL; endpoint_t dep; #define COL { int i; for(i = 0; i < level; i++) printf("> "); } if(level >= NR_PROCS) { printf("loop??\n"); return; } COL print_proc(pp); COL proc_stacktrace(pp); dep = P_BLOCKEDON(pp); if(dep != NONE && dep != ANY) { int procno; if(isokendpt(dep, &procno)) { depproc = proc_addr(procno); if(isemptyp(depproc)) depproc = NULL; } if (depproc) print_proc_depends(depproc, level+1); } }
static void print_endpoint(endpoint_t ep) { int proc_nr; struct proc *pp = NULL; switch(ep) { case ANY: printf("ANY"); break; case SELF: printf("SELF"); break; case NONE: printf("NONE"); break; default: if(!isokendpt(ep, &proc_nr)) { printf("??? %d\n", ep); } else { pp = proc_addr(proc_nr); if(isemptyp(pp)) { printf("??? empty slot %d\n", proc_nr); } else { print_proc_name(pp); } } break; } }
/*===========================================================================* * do_nice * *===========================================================================*/ PUBLIC int do_nice(message *m_ptr) { /* Change process priority or stop the process. */ int proc_nr, pri, new_q ; register struct proc *rp; /* Extract the message parameters and do sanity checking. */ if(!isokendpt(m_ptr->PR_ENDPT, &proc_nr)) return EINVAL; if (iskerneln(proc_nr)) return(EPERM); pri = m_ptr->PR_PRIORITY; rp = proc_addr(proc_nr); /* The value passed in is currently between PRIO_MIN and PRIO_MAX. * We have to scale this between MIN_USER_Q and MAX_USER_Q to match * the kernel's scheduling queues. */ if (pri < PRIO_MIN || pri > PRIO_MAX) return(EINVAL); new_q = MAX_USER_Q + (pri-PRIO_MIN) * (MIN_USER_Q-MAX_USER_Q+1) / (PRIO_MAX-PRIO_MIN+1); if (new_q < MAX_USER_Q) new_q = MAX_USER_Q; /* shouldn't happen */ if (new_q > MIN_USER_Q) new_q = MIN_USER_Q; /* shouldn't happen */ /* Make sure the process is not running while changing its priority. * Put the process back in its new queue if it is runnable. */ RTS_LOCK_SET(rp, SYS_LOCK); rp->p_max_priority = rp->p_priority = new_q; RTS_LOCK_UNSET(rp, SYS_LOCK); return(OK); }
/*===========================================================================* * do_setmcontext * *===========================================================================*/ int do_setmcontext(struct proc * caller, message * m_ptr) { /* Set machine context of a process */ register struct proc *rp; int proc_nr, r; mcontext_t mc; if (!isokendpt(m_ptr->m_lsys_krn_sys_setmcontext.endpt, &proc_nr)) return(EINVAL); rp = proc_addr(proc_nr); /* Get the mcontext structure into our address space. */ if ((r = data_copy(m_ptr->m_lsys_krn_sys_setmcontext.endpt, m_ptr->m_lsys_krn_sys_setmcontext.ctx_ptr, KERNEL, (vir_bytes) &mc, (phys_bytes) sizeof(mcontext_t))) != OK) return(r); #if defined(__i386__) /* Copy FPU state */ if (mc.mc_flags & _MC_FPU_SAVED) { rp->p_misc_flags |= MF_FPU_INITIALIZED; assert(sizeof(mc.__fpregs.__fp_reg_set) == FPU_XFP_SIZE); memcpy(rp->p_seg.fpu_state, &(mc.__fpregs.__fp_reg_set), FPU_XFP_SIZE); } else rp->p_misc_flags &= ~MF_FPU_INITIALIZED; /* force reloading FPU in either case */ release_fpu(rp); #endif return(OK); }
/*===========================================================================* * do_runctl * *===========================================================================*/ int do_runctl(struct proc * caller, message * m_ptr) { /* Control a process's RTS_PROC_STOP flag. Used for process management. * If the process is queued sending a message or stopped for system call * tracing, and the RC_DELAY request flag is given, set MF_SIG_DELAY instead * of RTS_PROC_STOP, and send a SIGSNDELAY signal later when the process is done * sending (ending the delay). Used by PM for safe signal delivery. */ int proc_nr, action, flags; register struct proc *rp; /* Extract the message parameters and do sanity checking. */ if (!isokendpt(m_ptr->RC_ENDPT, &proc_nr)) return(EINVAL); if (iskerneln(proc_nr)) return(EPERM); rp = proc_addr(proc_nr); action = m_ptr->RC_ACTION; flags = m_ptr->RC_FLAGS; /* Is the target sending or syscall-traced? Then set MF_SIG_DELAY instead. * Do this only when the RC_DELAY flag is set in the request flags field. * The process will not become runnable before PM has called SYS_ENDKSIG. * Note that asynchronous messages are not covered: a process using SENDA * should not also install signal handlers *and* expect POSIX compliance. */ if (action == RC_STOP && (flags & RC_DELAY)) { if (RTS_ISSET(rp, RTS_SENDING) || (rp->p_misc_flags & MF_SC_DEFER)) rp->p_misc_flags |= MF_SIG_DELAY; if (rp->p_misc_flags & MF_SIG_DELAY) return (EBUSY); } /* Either set or clear the stop flag. */ switch (action) { case RC_STOP: #if CONFIG_SMP /* check if we must stop a process on a different CPU */ if (rp->p_cpu != cpuid) { smp_schedule_stop_proc(rp); break; } #endif RTS_SET(rp, RTS_PROC_STOP); break; case RC_RESUME: assert(RTS_ISSET(rp, RTS_PROC_STOP)); RTS_UNSET(rp, RTS_PROC_STOP); break; default: return(EINVAL); } return(OK); }
/*===========================================================================* * do_vtimer * *===========================================================================*/ int do_vtimer(struct proc * caller, message * m_ptr) { /* Set and/or retrieve the value of one of a process' virtual timers. */ struct proc *rp; /* pointer to process the timer belongs to */ register int pt_flag; /* the misc on/off flag for the req.d timer */ register clock_t *pt_left; /* pointer to the process' ticks-left field */ clock_t old_value; /* the previous number of ticks left */ int proc_nr, proc_nr_e; /* The requesting process must be privileged. */ if (! (priv(caller)->s_flags & SYS_PROC)) return(EPERM); if (m_ptr->VT_WHICH != VT_VIRTUAL && m_ptr->VT_WHICH != VT_PROF) return(EINVAL); /* The target process must be valid. */ proc_nr_e = (m_ptr->VT_ENDPT == SELF) ? caller->p_endpoint : m_ptr->VT_ENDPT; if (!isokendpt(proc_nr_e, &proc_nr)) return(EINVAL); rp = proc_addr(proc_nr); /* Determine which flag and which field in the proc structure we want to * retrieve and/or modify. This saves us having to differentiate between * VT_VIRTUAL and VT_PROF multiple times below. */ if (m_ptr->VT_WHICH == VT_VIRTUAL) { pt_flag = MF_VIRT_TIMER; pt_left = &rp->p_virt_left; } else { /* VT_PROF */ pt_flag = MF_PROF_TIMER; pt_left = &rp->p_prof_left; } /* Retrieve the old value. */ if (rp->p_misc_flags & pt_flag) { old_value = *pt_left; if (old_value < 0) old_value = 0; } else { old_value = 0; } if (m_ptr->VT_SET) { rp->p_misc_flags &= ~pt_flag; /* disable virtual timer */ if (m_ptr->VT_VALUE > 0) { *pt_left = m_ptr->VT_VALUE; /* set new timer value */ rp->p_misc_flags |= pt_flag; /* (re)enable virtual timer */ } else { *pt_left = 0; /* clear timer value */ } } m_ptr->VT_VALUE = old_value; return(OK); }
/*===========================================================================* * do_mount * *===========================================================================*/ PUBLIC int do_mount() { /* Perform the mount(name, mfile, mount_flags) system call. */ endpoint_t fs_e; int r, slot, rdonly, nodev; char fullpath[PATH_MAX]; char mount_label[LABEL_MAX]; dev_t dev; /* Only the super-user may do MOUNT. */ if (!super_user) return(EPERM); /* FS process' endpoint number */ if (m_in.mount_flags & MS_LABEL16) { /* Get the label from the caller, and ask DS for the endpoint. */ r = sys_datacopy(who_e, (vir_bytes) m_in.fs_label, SELF, (vir_bytes) mount_label, (phys_bytes) sizeof(mount_label)); if (r != OK) return(r); mount_label[sizeof(mount_label)-1] = 0; r = ds_retrieve_label_endpt(mount_label, &fs_e); if (r != OK) return(r); } else { /* Legacy support: get the endpoint from the request itself. */ fs_e = (endpoint_t) m_in.fs_label; mount_label[0] = 0; } /* Sanity check on process number. */ if (isokendpt(fs_e, &slot) != OK) return(EINVAL); /* Should the file system be mounted read-only? */ rdonly = (m_in.mount_flags & MS_RDONLY); /* A null string for block special device means don't use a device at all. */ nodev = (m_in.name1_length == 0); if (!nodev) { /* If 'name' is not for a block special file, return error. */ if (fetch_name(m_in.name1, m_in.name1_length, M1, fullpath) != OK) return(err_code); if ((dev = name_to_dev(FALSE /*allow_mountpt*/, fullpath)) == NO_DEV) return(err_code); } else { /* Find a free pseudo-device as substitute for an actual device. */ if ((dev = find_free_nonedev()) == NO_DEV) return(err_code); } /* Fetch the name of the mountpoint */ if (fetch_name(m_in.name2, m_in.name2_length, M1, fullpath) != OK) return(err_code); /* Do the actual job */ return mount_fs(dev, fullpath, fs_e, rdonly, mount_label); }
/*===========================================================================* * do_mapdriver * *===========================================================================*/ int do_mapdriver(void) { /* Create a device->driver mapping. RS will tell us which major is driven by * this driver, what type of device it is (regular, TTY, asynchronous, clone, * etc), and its label. This label is registered with DS, and allows us to * retrieve the driver's endpoint. */ int r, slot; devmajor_t major; endpoint_t endpoint; vir_bytes label_vir; size_t label_len; char label[LABEL_MAX]; struct fproc *rfp; /* Only RS can map drivers. */ if (who_e != RS_PROC_NR) return(EPERM); label_vir = job_m_in.m_lsys_vfs_mapdriver.label; label_len = job_m_in.m_lsys_vfs_mapdriver.labellen; major = job_m_in.m_lsys_vfs_mapdriver.major; /* Get the label */ if (label_len > sizeof(label)) { /* Can we store this label? */ printf("VFS: do_mapdriver: label too long\n"); return(EINVAL); } r = sys_vircopy(who_e, label_vir, SELF, (vir_bytes) label, label_len, CP_FLAG_TRY); if (r != OK) { printf("VFS: do_mapdriver: sys_vircopy failed: %d\n", r); return(EINVAL); } if (label[label_len-1] != '\0') { printf("VFS: do_mapdriver: label not null-terminated\n"); return(EINVAL); } /* Now we know how the driver is called, fetch its endpoint */ r = ds_retrieve_label_endpt(label, &endpoint); if (r != OK) { printf("VFS: do_mapdriver: label '%s' unknown\n", label); return(EINVAL); } /* Process is a service */ if (isokendpt(endpoint, &slot) != OK) { printf("VFS: can't map driver to unknown endpoint %d\n", endpoint); return(EINVAL); } rfp = &fproc[slot]; rfp->fp_flags |= FP_SRV_PROC; /* Try to update device mapping. */ return map_driver(label, major, endpoint); }
/*===========================================================================* * do_rt_set * *===========================================================================*/ PUBLIC int do_rt_set(message *m_ptr) { /* Transform a normal user process into a real-time process */ struct proc *rp; /* pointer to process that wants to be real-time */ int proc_nr; /* process number of process that wants to be real-time */ /* if scheduler is undefined we cannot * make processes real-time */ if (rt_sched == SCHED_UNDEFINED) { return (EPERM); } /* if rt_sched is not equal to the * scheduler defined in the message * we cannot make this process real-time */ if (rt_sched != m_ptr->RT_SCHED) { return (EINVAL); } /* check if endpoint is valid and convert endpoint * to process number stored in proc_nr */ if (! isokendpt(m_ptr->RT_ENDPT, &proc_nr)) { return (EINVAL); } /* get pointer to process from process number */ rp = proc_addr(proc_nr); /* a process that is already real-time may * not call this function. */ if (is_rtp(rp)) { return (EPERM); } /* Dispatch to the right function. * Each scheduler type has its own function. */ switch (rt_sched) { case SCHED_RM: return do_rt_set_rm(m_ptr, rp); case SCHED_EDF: return do_rt_set_edf(m_ptr, rp); default: return (EINVAL); } /* should not be reached */ return (EPERM); }
/*===========================================================================* * do_sigreturn * *===========================================================================*/ PUBLIC int do_sigreturn(struct proc * caller, message * m_ptr) { /* POSIX style signals require sys_sigreturn to put things in order before * the signalled process can resume execution */ struct sigcontext sc; register struct proc *rp; int proc_nr, r; if (! isokendpt(m_ptr->SIG_ENDPT, &proc_nr)) return(EINVAL); if (iskerneln(proc_nr)) return(EPERM); rp = proc_addr(proc_nr); /* Copy in the sigcontext structure. */ if((r=data_copy(m_ptr->SIG_ENDPT, (vir_bytes) m_ptr->SIG_CTXT_PTR, KERNEL, (vir_bytes) &sc, sizeof(struct sigcontext))) != OK) return r; /* Restore user bits of psw from sc, maintain system bits from proc. */ sc.sc_psw = (sc.sc_psw & X86_FLAGS_USER) | (rp->p_reg.psw & ~X86_FLAGS_USER); #if (_MINIX_CHIP == _CHIP_INTEL) /* Don't panic kernel if user gave bad selectors. */ sc.sc_cs = rp->p_reg.cs; sc.sc_ds = rp->p_reg.ds; sc.sc_es = rp->p_reg.es; sc.sc_ss = rp->p_reg.ss; #if _WORD_SIZE == 4 sc.sc_fs = rp->p_reg.fs; sc.sc_gs = rp->p_reg.gs; #endif #endif /* Restore the registers. */ memcpy(&rp->p_reg, &sc.sc_regs, sizeof(sigregs)); #if (_MINIX_CHIP == _CHIP_INTEL) if(sc.sc_flags & MF_FPU_INITIALIZED) { memcpy(rp->p_fpu_state.fpu_save_area_p, &sc.sc_fpu_state, FPU_XFP_SIZE); rp->p_misc_flags |= MF_FPU_INITIALIZED; /* Restore math usage flag. */ /* force reloading FPU */ if (fpu_owner == rp) release_fpu(); } #endif rp->p_misc_flags |= MF_CONTEXT_SET; return(OK); }
/** * Get the size of a string specified by linear address. * @proc process address * @s The string to measure (linear address). * @maxlen The maximum valid length * * Get the size of a NUL-terminated string. * * Returns the size of the string _including_ the terminating NULL. * On kernel exception, returns 0. * If the string is too long, returns a value greater than @maxlen. */ long do_strnlen(kipc_msg_t *m_ptr) { int proc_nr; struct proc *p; if (!m_ptr->STRNLEN_STR || !isokendpt(m_ptr->STRNLEN_PROC_E, &proc_nr) || m_ptr->STRNLEN_MAXLEN <= 0) return -EINVAL; p = proc_addr(proc_nr); return strnlen_user(p, m_ptr->STRNLEN_STR, m_ptr->STRNLEN_MAXLEN); }
/*===========================================================================* * do_newmap * *===========================================================================*/ int do_newmap(struct proc * caller, message * m_ptr) { /* Handle sys_newmap(). Fetch the memory map. */ struct proc *rp; /* process whose map is to be loaded */ struct mem_map *map_ptr; /* virtual address of map inside caller */ int proc_nr; map_ptr = (struct mem_map *) m_ptr->PR_MEM_PTR; if (! isokendpt(m_ptr->PR_ENDPT, &proc_nr)) return(EINVAL); if (iskerneln(proc_nr)) return(EPERM); rp = proc_addr(proc_nr); return newmap(caller, rp, map_ptr); }
/*===========================================================================* * do_iopenable * *===========================================================================*/ int do_iopenable(struct proc * caller, message * m_ptr) { int proc_nr; #if 1 /* ENABLE_USERPRIV && ENABLE_USERIOPL */ if (m_ptr->IOP_ENDPT == SELF) { proc_nr = _ENDPOINT_P(caller->p_endpoint); } else if(!isokendpt(m_ptr->IOP_ENDPT, &proc_nr)) return(EINVAL); enable_iop(proc_addr(proc_nr)); return(OK); #else return(EPERM); #endif }
/*===========================================================================* * do_getmcontext * *===========================================================================*/ int do_getmcontext(struct proc * caller, message * m_ptr) { /* Retrieve machine context of a process */ register struct proc *rp; int proc_nr, r; mcontext_t mc; if (!isokendpt(m_ptr->m_lsys_krn_sys_getmcontext.endpt, &proc_nr)) return(EINVAL); if (iskerneln(proc_nr)) return(EPERM); rp = proc_addr(proc_nr); #if defined(__i386__) if (!proc_used_fpu(rp)) return(OK); /* No state to copy */ #endif /* Get the mcontext structure into our address space. */ if ((r = data_copy(m_ptr->m_lsys_krn_sys_getmcontext.endpt, m_ptr->m_lsys_krn_sys_getmcontext.ctx_ptr, KERNEL, (vir_bytes) &mc, (phys_bytes) sizeof(mcontext_t))) != OK) return(r); mc.mc_flags = 0; #if defined(__i386__) /* Copy FPU state */ if (proc_used_fpu(rp)) { /* make sure that the FPU context is saved into proc structure first */ save_fpu(rp); mc.mc_flags = (rp->p_misc_flags & MF_FPU_INITIALIZED) ? _MC_FPU_SAVED : 0; assert(sizeof(mc.__fpregs.__fp_reg_set) == FPU_XFP_SIZE); memcpy(&(mc.__fpregs.__fp_reg_set), rp->p_seg.fpu_state, FPU_XFP_SIZE); } #endif /* Copy the mcontext structure to the user's address space. */ if ((r = data_copy(KERNEL, (vir_bytes) &mc, m_ptr->m_lsys_krn_sys_getmcontext.endpt, m_ptr->m_lsys_krn_sys_getmcontext.ctx_ptr, (phys_bytes) sizeof(mcontext_t))) != OK) return(r); return(OK); }
/*===========================================================================* * do_nice * *===========================================================================*/ PUBLIC int do_nice(message *m_ptr) { /* Change process priority or stop the process. */ int proc_nr, pri, new_q ; register struct proc *rp; /* Extract the message parameters and do sanity checking. */ if(!isokendpt(m_ptr->PR_ENDPT, &proc_nr)) return EINVAL; if (iskerneln(proc_nr)) return(EPERM); pri = m_ptr->PR_PRIORITY; rp = proc_addr(proc_nr); if (is_rtp(rp)) return (EPERM); /* don't allow nice for RT processes */ if (pri == PRIO_STOP) { /* Take process off the scheduling queues. */ lock_dequeue(rp); rp->p_rts_flags |= NO_PRIORITY; return(OK); } else if (pri >= PRIO_MIN && pri <= PRIO_MAX) { /* The value passed in is currently between PRIO_MIN and PRIO_MAX. * We have to scale this between MIN_USER_Q and MAX_USER_Q to match * the kernel's scheduling queues. */ new_q = MAX_USER_Q + (pri-PRIO_MIN) * (MIN_USER_Q-MAX_USER_Q+1) / (PRIO_MAX-PRIO_MIN+1); if (new_q < MAX_USER_Q) new_q = MAX_USER_Q; /* shouldn't happen */ if (new_q > MIN_USER_Q) new_q = MIN_USER_Q; /* shouldn't happen */ if (new_q == RT_Q && !is_rtp(rp)) return (EINVAL); /* don't allow other processes in the RT queue */ /* Make sure the process is not running while changing its priority. * Put the process back in its new queue if it is runnable. */ lock_dequeue(rp); rp->p_max_priority = rp->p_priority = new_q; if (! rp->p_rts_flags) lock_enqueue(rp); return(OK); } return(EINVAL); }
/*===========================================================================* * do_schedctl * *===========================================================================*/ PUBLIC int do_schedctl(struct proc * caller, message * m_ptr) { struct proc *p; unsigned flags; int priority, quantum, cpu; int proc_nr; int r; /* check parameter validity */ flags = (unsigned) m_ptr->SCHEDCTL_FLAGS; if (flags & ~SCHEDCTL_FLAG_KERNEL) { printf("do_schedctl: flags 0x%x invalid, caller=%d\n", flags, caller - proc); return EINVAL; } if (!isokendpt(m_ptr->SCHEDCTL_ENDPOINT, &proc_nr)) return EINVAL; p = proc_addr(proc_nr); if ((flags & SCHEDCTL_FLAG_KERNEL) == SCHEDCTL_FLAG_KERNEL) { /* the kernel becomes the scheduler and starts * scheduling the process. */ priority = (int) m_ptr->SCHEDCTL_PRIORITY; quantum = (int) m_ptr->SCHEDCTL_QUANTUM; cpu = (int) m_ptr->SCHEDCTL_CPU; /* Try to schedule the process. */ if((r = sched_proc(p, priority, quantum, cpu) != OK)) return r; p->p_scheduler = NULL; } else { /* the caller becomes the scheduler */ p->p_scheduler = caller; } return(OK); }
/*===========================================================================* * do_sysctl * *===========================================================================*/ PUBLIC int do_sysctl(struct proc * caller, message * m_ptr) { vir_bytes len, buf; static char mybuf[DIAG_BUFSIZE]; int s, i, proc_nr; switch (m_ptr->SYSCTL_CODE) { case SYSCTL_CODE_DIAG: buf = (vir_bytes) m_ptr->SYSCTL_ARG1; len = (vir_bytes) m_ptr->SYSCTL_ARG2; if(len < 1 || len > DIAG_BUFSIZE) { printf("do_sysctl: diag for %d: len %d out of range\n", caller->p_endpoint, len); return EINVAL; } if((s=data_copy_vmcheck(caller, caller->p_endpoint, buf, KERNEL, (vir_bytes) mybuf, len)) != OK) { printf("do_sysctl: diag for %d: len %d: copy failed: %d\n", caller->p_endpoint, len, s); return s; } for(i = 0; i < len; i++) kputc(mybuf[i]); kputc(END_OF_KMESS); return OK; case SYSCTL_CODE_STACKTRACE: if(!isokendpt(m_ptr->SYSCTL_ARG2, &proc_nr)) return EINVAL; proc_stacktrace(proc_addr(proc_nr)); proc_dump_regs(proc_addr(proc_nr)); return OK; default: printf("do_sysctl: invalid request %d\n", m_ptr->SYSCTL_CODE); return(EINVAL); } panic("do_sysctl: can't happen"); return(OK); }
/*===========================================================================* * do_schedule * *===========================================================================*/ int do_schedule(struct proc * caller, message * m_ptr) { struct proc *p; int proc_nr; int priority, quantum, cpu; if (!isokendpt(m_ptr->SCHEDULING_ENDPOINT, &proc_nr)) return EINVAL; p = proc_addr(proc_nr); /* Only this process' scheduler can schedule it */ if (caller != p->p_scheduler) return(EPERM); /* Try to schedule the process. */ priority = (int) m_ptr->SCHEDULING_PRIORITY; quantum = (int) m_ptr->SCHEDULING_QUANTUM; cpu = (int) m_ptr->SCHEDULING_CPU; return sched_proc(p, priority, quantum, cpu); }
/*===========================================================================* * do_schedule * *===========================================================================*/ int do_schedule(struct proc * caller, message * m_ptr) { struct proc *p; int proc_nr; int priority, quantum, cpu, niced; if (!isokendpt(m_ptr->m_lsys_krn_schedule.endpoint, &proc_nr)) return EINVAL; p = proc_addr(proc_nr); /* Only this process' scheduler can schedule it */ if (caller != p->p_scheduler) return(EPERM); /* Try to schedule the process. */ priority = m_ptr->m_lsys_krn_schedule.priority; quantum = m_ptr->m_lsys_krn_schedule.quantum; cpu = m_ptr->m_lsys_krn_schedule.cpu; niced = !!(m_ptr->m_lsys_krn_schedule.niced); return sched_proc(p, priority, quantum, cpu, niced); }
/*===========================================================================* * do_times * *===========================================================================*/ int do_times(struct proc * caller, message * m_ptr) { /* Handle sys_times(). Retrieve the accounting information. */ register const struct proc *rp; int proc_nr; endpoint_t e_proc_nr; /* Insert the times needed by the SYS_TIMES kernel call in the message. * The clock's interrupt handler may run to update the user or system time * while in this code, but that cannot do any harm. */ e_proc_nr = (m_ptr->m_lsys_krn_sys_times.endpt == SELF) ? caller->p_endpoint : m_ptr->m_lsys_krn_sys_times.endpt; if(e_proc_nr != NONE && isokendpt(e_proc_nr, &proc_nr)) { rp = proc_addr(proc_nr); m_ptr->m_krn_lsys_sys_times.user_time = rp->p_user_time; m_ptr->m_krn_lsys_sys_times.system_time = rp->p_sys_time; } m_ptr->m_krn_lsys_sys_times.boot_ticks = get_monotonic(); m_ptr->m_krn_lsys_sys_times.real_ticks = get_realtime(); m_ptr->m_krn_lsys_sys_times.boot_time = boottime; return(OK); }
/*===========================================================================* * do_exec * *===========================================================================*/ int do_exec(struct proc * caller, message * m_ptr) { /* Handle sys_exec(). A process has done a successful EXEC. Patch it up. */ register struct proc *rp; int proc_nr; char name[PROC_NAME_LEN]; if(!isokendpt(m_ptr->PR_ENDPT, &proc_nr)) return EINVAL; rp = proc_addr(proc_nr); if(rp->p_misc_flags & MF_DELIVERMSG) { rp->p_misc_flags &= ~MF_DELIVERMSG; } /* Save command name for debugging, ps(1) output, etc. */ if(data_copy(caller->p_endpoint, (vir_bytes) m_ptr->PR_NAME_PTR, KERNEL, (vir_bytes) name, (phys_bytes) sizeof(name) - 1) != OK) strncpy(name, "<unset>", PROC_NAME_LEN); name[sizeof(name)-1] = '\0'; /* Set process state. */ arch_proc_init(rp, (u32_t) m_ptr->PR_IP_PTR, (u32_t) m_ptr->PR_STACK_PTR, name); /* No reply to EXEC call */ RTS_UNSET(rp, RTS_RECEIVING); /* Mark fpu_regs contents as not significant, so fpu * will be initialized, when it's used next time. */ rp->p_misc_flags &= ~MF_FPU_INITIALIZED; /* force reloading FPU if the current process is the owner */ release_fpu(rp); return(OK); }
/*===========================================================================* * QueueMess * *===========================================================================*/ PRIVATE int QueueMess(endpoint_t ep, vir_bytes msg_lin, struct proc *dst) { int k; phys_bytes addr; NOREC_ENTER(queuemess); /* Queue a message from the src process (in memory) to the dst * process (using dst process table entry). Do actual copy to * kernel here; it's an error if the copy fails into kernel. */ vmassert(!(dst->p_misc_flags & MF_DELIVERMSG)); vmassert(dst->p_delivermsg_lin); vmassert(isokendpt(ep, &k)); #if 0 if(INMEMORY(dst)) { PHYS_COPY_CATCH(msg_lin, dst->p_delivermsg_lin, sizeof(message), addr); if(!addr) { PHYS_COPY_CATCH(vir2phys(&ep), dst->p_delivermsg_lin, sizeof(ep), addr); if(!addr) { NOREC_RETURN(queuemess, OK); } } } #endif PHYS_COPY_CATCH(msg_lin, vir2phys(&dst->p_delivermsg), sizeof(message), addr); if(addr) { NOREC_RETURN(queuemess, EFAULT); } dst->p_delivermsg.m_source = ep; dst->p_misc_flags |= MF_DELIVERMSG; NOREC_RETURN(queuemess, OK); }
/*===========================================================================* * do_chdir * *===========================================================================*/ PUBLIC int do_chdir() { /* Change directory. This function is also called by MM to simulate a chdir * in order to do EXEC, etc. It also changes the root directory, the uids and * gids, and the umask. */ int r; register struct fproc *rfp; if (who_e == PM_PROC_NR) { int slot; if(isokendpt(m_in.endpt1, &slot) != OK) return EINVAL; rfp = &fproc[slot]; put_inode(fp->fp_rootdir); dup_inode(fp->fp_rootdir = rfp->fp_rootdir); put_inode(fp->fp_workdir); dup_inode(fp->fp_workdir = rfp->fp_workdir); /* MM uses access() to check permissions. To make this work, pretend * that the user's real ids are the same as the user's effective ids. * FS calls other than access() do not use the real ids, so are not * affected. */ fp->fp_realuid = fp->fp_effuid = rfp->fp_effuid; fp->fp_realgid = fp->fp_effgid = rfp->fp_effgid; fp->fp_umask = rfp->fp_umask; return(OK); } /* Perform the chdir(name) system call. */ r = change(&fp->fp_workdir, m_in.name, m_in.name_length); return(r); }
/*===========================================================================* * do_endksig * *===========================================================================*/ int do_endksig(struct proc * caller, message * m_ptr) { /* Finish up after a kernel type signal, caused by a SYS_KILL message or a * call to cause_sig by a task. This is called by a signal manager after * processing a signal it got with SYS_GETKSIG. */ register struct proc *rp; int proc_nr; /* Get process pointer and verify that it had signals pending. If the * process is already dead its flags will be reset. */ if(!isokendpt(m_ptr->m_sigcalls.endpt, &proc_nr)) return EINVAL; rp = proc_addr(proc_nr); if (caller->p_endpoint != priv(rp)->s_sig_mgr) return(EPERM); if (!RTS_ISSET(rp, RTS_SIG_PENDING)) return(EINVAL); /* The signal manager has finished one kernel signal. Is the process ready? */ if (!RTS_ISSET(rp, RTS_SIGNALED)) /* new signal arrived */ RTS_UNSET(rp, RTS_SIG_PENDING); /* remove pending flag */ return(OK); }
/*===========================================================================* * do_vmctl * *===========================================================================*/ int do_vmctl(struct proc * caller, message * m_ptr) { int proc_nr; endpoint_t ep = m_ptr->SVMCTL_WHO; struct proc *p, *rp, **rpp, *target; if(ep == SELF) { ep = caller->p_endpoint; } if(!isokendpt(ep, &proc_nr)) { printf("do_vmctl: unexpected endpoint %d from VM\n", ep); return EINVAL; } p = proc_addr(proc_nr); switch(m_ptr->SVMCTL_PARAM) { case VMCTL_CLEAR_PAGEFAULT: assert(RTS_ISSET(p,RTS_PAGEFAULT)); RTS_UNSET(p, RTS_PAGEFAULT); return OK; case VMCTL_MEMREQ_GET: /* Send VM the information about the memory request. We can * not simply send the first request on the list, because IPC * filters may forbid VM from getting requests for particular * sources. However, IPC filters are used only in rare cases. */ for (rpp = &vmrequest; *rpp != NULL; rpp = &(*rpp)->p_vmrequest.nextrequestor) { rp = *rpp; assert(RTS_ISSET(rp, RTS_VMREQUEST)); okendpt(rp->p_vmrequest.target, &proc_nr); target = proc_addr(proc_nr); /* Check against IPC filters. */ if (!allow_ipc_filtered_memreq(rp, target)) continue; /* Reply with request fields. */ if (rp->p_vmrequest.req_type != VMPTYPE_CHECK) panic("VMREQUEST wrong type"); m_ptr->SVMCTL_MRG_TARGET = rp->p_vmrequest.target; m_ptr->SVMCTL_MRG_ADDR = rp->p_vmrequest.params.check.start; m_ptr->SVMCTL_MRG_LENGTH = rp->p_vmrequest.params.check.length; m_ptr->SVMCTL_MRG_FLAG = rp->p_vmrequest.params.check.writeflag; m_ptr->SVMCTL_MRG_REQUESTOR = (void *) rp->p_endpoint; rp->p_vmrequest.vmresult = VMSUSPEND; /* Remove from request chain. */ *rpp = rp->p_vmrequest.nextrequestor; return rp->p_vmrequest.req_type; } return ENOENT; case VMCTL_MEMREQ_REPLY: assert(RTS_ISSET(p, RTS_VMREQUEST)); assert(p->p_vmrequest.vmresult == VMSUSPEND); okendpt(p->p_vmrequest.target, &proc_nr); target = proc_addr(proc_nr); p->p_vmrequest.vmresult = m_ptr->SVMCTL_VALUE; assert(p->p_vmrequest.vmresult != VMSUSPEND); switch(p->p_vmrequest.type) { case VMSTYPE_KERNELCALL: /* * we will have to resume execution of the kernel call * as soon the scheduler picks up this process again */ p->p_misc_flags |= MF_KCALL_RESUME; break; case VMSTYPE_DELIVERMSG: assert(p->p_misc_flags & MF_DELIVERMSG); assert(p == target); assert(RTS_ISSET(p, RTS_VMREQUEST)); break; case VMSTYPE_MAP: assert(RTS_ISSET(p, RTS_VMREQUEST)); break; default: panic("strange request type: %d",p->p_vmrequest.type); } RTS_UNSET(p, RTS_VMREQUEST); return OK; case VMCTL_KERN_PHYSMAP: { int i = m_ptr->SVMCTL_VALUE; return arch_phys_map(i, (phys_bytes *) &m_ptr->SVMCTL_MAP_PHYS_ADDR, (phys_bytes *) &m_ptr->SVMCTL_MAP_PHYS_LEN, &m_ptr->SVMCTL_MAP_FLAGS); } case VMCTL_KERN_MAP_REPLY: { return arch_phys_map_reply(m_ptr->SVMCTL_VALUE, (vir_bytes) m_ptr->SVMCTL_MAP_VIR_ADDR); } case VMCTL_VMINHIBIT_SET: /* check if we must stop a process on a different CPU */ #if CONFIG_SMP if (p->p_cpu != cpuid) { smp_schedule_vminhibit(p); } else #endif RTS_SET(p, RTS_VMINHIBIT); #if CONFIG_SMP p->p_misc_flags |= MF_FLUSH_TLB; #endif return OK; case VMCTL_VMINHIBIT_CLEAR: assert(RTS_ISSET(p, RTS_VMINHIBIT)); /* * the processes is certainly not runnable, no need to tell its * cpu */ RTS_UNSET(p, RTS_VMINHIBIT); #ifdef CONFIG_SMP if (p->p_misc_flags & MF_SENDA_VM_MISS) { struct priv *privp; p->p_misc_flags &= ~MF_SENDA_VM_MISS; privp = priv(p); try_deliver_senda(p, (asynmsg_t *) privp->s_asyntab, privp->s_asynsize); } /* * We don't know whether kernel has the changed mapping * installed to access userspace memory. And if so, on what CPU. * More over we don't know what mapping has changed and how and * therefore we must invalidate all mappings we have anywhere. * Next time we map memory, we map it fresh. */ bits_fill(p->p_stale_tlb, CONFIG_MAX_CPUS); #endif return OK; case VMCTL_CLEARMAPCACHE: /* VM says: forget about old mappings we have cached. */ mem_clear_mapcache(); return OK; case VMCTL_BOOTINHIBIT_CLEAR: RTS_UNSET(p, RTS_BOOTINHIBIT); return OK; } /* Try architecture-specific vmctls. */ return arch_do_vmctl(m_ptr, p); }
/*===========================================================================* * do_fork * *===========================================================================*/ int do_fork(struct proc * caller, message * m_ptr) { /* Handle sys_fork(). * m_lsys_krn_sys_fork.endpt has forked. * The child is m_lsys_krn_sys_fork.slot. */ #if defined(__i386__) char *old_fpu_save_area_p; #endif register struct proc *rpc; /* child process pointer */ struct proc *rpp; /* parent process pointer */ int gen; int p_proc; int namelen; if(!isokendpt(m_ptr->m_lsys_krn_sys_fork.endpt, &p_proc)) return EINVAL; rpp = proc_addr(p_proc); rpc = proc_addr(m_ptr->m_lsys_krn_sys_fork.slot); if (isemptyp(rpp) || ! isemptyp(rpc)) return(EINVAL); assert(!(rpp->p_misc_flags & MF_DELIVERMSG)); /* needs to be receiving so we know where the message buffer is */ if(!RTS_ISSET(rpp, RTS_RECEIVING)) { printf("kernel: fork not done synchronously?\n"); return EINVAL; } /* make sure that the FPU context is saved in parent before copy */ save_fpu(rpp); /* Copy parent 'proc' struct to child. And reinitialize some fields. */ gen = _ENDPOINT_G(rpc->p_endpoint); #if defined(__i386__) old_fpu_save_area_p = rpc->p_seg.fpu_state; #endif *rpc = *rpp; /* copy 'proc' struct */ #if defined(__i386__) rpc->p_seg.fpu_state = old_fpu_save_area_p; if(proc_used_fpu(rpp)) memcpy(rpc->p_seg.fpu_state, rpp->p_seg.fpu_state, FPU_XFP_SIZE); #endif if(++gen >= _ENDPOINT_MAX_GENERATION) /* increase generation */ gen = 1; /* generation number wraparound */ rpc->p_nr = m_ptr->m_lsys_krn_sys_fork.slot; /* this was obliterated by copy */ rpc->p_endpoint = _ENDPOINT(gen, rpc->p_nr); /* new endpoint of slot */ rpc->p_reg.retreg = 0; /* child sees pid = 0 to know it is child */ rpc->p_user_time = 0; /* set all the accounting times to 0 */ rpc->p_sys_time = 0; rpc->p_misc_flags &= ~(MF_VIRT_TIMER | MF_PROF_TIMER | MF_SC_TRACE | MF_SPROF_SEEN | MF_STEP); rpc->p_virt_left = 0; /* disable, clear the process-virtual timers */ rpc->p_prof_left = 0; /* Mark process name as being a forked copy */ namelen = strlen(rpc->p_name); #define FORKSTR "*F" if(namelen+strlen(FORKSTR) < sizeof(rpc->p_name)) strcat(rpc->p_name, FORKSTR); /* the child process is not runnable until it's scheduled. */ RTS_SET(rpc, RTS_NO_QUANTUM); reset_proc_accounting(rpc); rpc->p_cpu_time_left = 0; rpc->p_cycles = 0; rpc->p_kcall_cycles = 0; rpc->p_kipc_cycles = 0; rpc->p_signal_received = 0; /* If the parent is a privileged process, take away the privileges from the * child process and inhibit it from running by setting the NO_PRIV flag. * The caller should explicitly set the new privileges before executing. */ if (priv(rpp)->s_flags & SYS_PROC) { rpc->p_priv = priv_addr(USER_PRIV_ID); rpc->p_rts_flags |= RTS_NO_PRIV; } /* Calculate endpoint identifier, so caller knows what it is. */ m_ptr->m_krn_lsys_sys_fork.endpt = rpc->p_endpoint; m_ptr->m_krn_lsys_sys_fork.msgaddr = rpp->p_delivermsg_vir; /* Don't schedule process in VM mode until it has a new pagetable. */ if(m_ptr->m_lsys_krn_sys_fork.flags & PFF_VMINHIBIT) { RTS_SET(rpc, RTS_VMINHIBIT); } /* * Only one in group should have RTS_SIGNALED, child doesn't inherit tracing. */ RTS_UNSET(rpc, (RTS_SIGNALED | RTS_SIG_PENDING | RTS_P_STOP)); (void) sigemptyset(&rpc->p_pending); #if defined(__i386__) rpc->p_seg.p_cr3 = 0; rpc->p_seg.p_cr3_v = NULL; #elif defined(__arm__) rpc->p_seg.p_ttbr = 0; rpc->p_seg.p_ttbr_v = NULL; #endif return OK; }
/*===========================================================================* * do_sprofile * *===========================================================================*/ PUBLIC int do_sprofile(struct proc * caller, message * m_ptr) { int proc_nr; switch(m_ptr->PROF_ACTION) { case PROF_START: /* Starting profiling. * * Check if profiling is not already running. Calculate physical * addresses of user pointers. Reset counters. Start CMOS timer. * Turn on profiling. */ if (sprofiling) { printf("SYSTEM: start s-profiling: already started\n"); return EBUSY; } /* Test endpoint number. */ if(!isokendpt(m_ptr->PROF_ENDPT, &proc_nr)) return EINVAL; /* Set parameters for statistical profiler. */ sprof_ep = m_ptr->PROF_ENDPT; sprof_info_addr_vir = (vir_bytes) m_ptr->PROF_CTL_PTR; sprof_data_addr_vir = (vir_bytes) m_ptr->PROF_MEM_PTR; sprof_info.mem_used = 0; sprof_info.total_samples = 0; sprof_info.idle_samples = 0; sprof_info.system_samples = 0; sprof_info.user_samples = 0; sprof_mem_size = m_ptr->PROF_MEM_SIZE; init_profile_clock(m_ptr->PROF_FREQ); sprofiling = 1; return OK; case PROF_STOP: /* Stopping profiling. * * Check if profiling is indeed running. Turn off profiling. * Stop CMOS timer. Copy info struct to user process. */ if (!sprofiling) { printf("SYSTEM: stop s-profiling: not started\n"); return EBUSY; } sprofiling = 0; stop_profile_clock(); data_copy(KERNEL, (vir_bytes) &sprof_info, sprof_ep, sprof_info_addr_vir, sizeof(sprof_info)); return OK; default: return EINVAL; } }