static void pagezero_start(void __unused *arg) { int error; error = kthread_create(vm_pagezero, NULL, &pagezero_proc, RFSTOPPED, 0, "pagezero"); if (error) panic("pagezero_start: error %d\n", error); /* * We're an idle task, don't count us in the load. */ PROC_LOCK(pagezero_proc); pagezero_proc->p_flag |= P_NOLOAD; PROC_UNLOCK(pagezero_proc); mtx_lock_spin(&sched_lock); setrunqueue(FIRST_THREAD_IN_PROC(pagezero_proc), SRQ_BORING); mtx_unlock_spin(&sched_lock); }
/* * Define the code needed before returning to user mode, for * trap, mem_access_fault, and syscall. */ static inline void userret(struct proc *p, int pc, u_quad_t oticks) { int sig; /* take pending signals */ while ((sig = CURSIG(p)) != 0) postsig(sig); p->p_priority = p->p_usrpri; if (want_ast) { want_ast = 0; if (p->p_flag & P_OWEUPC) { p->p_flag &= ~P_OWEUPC; ADDUPROF(p); } } if (want_resched) { /* * Since we are curproc, clock will normally just change * our priority without moving us from one queue to another * (since the running process is not on a queue.) * If that happened after we put ourselves on the run queue * but before we switched, we might not be on the queue * indicated by our priority. */ (void) splstatclock(); setrunqueue(p); p->p_stats->p_ru.ru_nivcsw++; mi_switch(); (void) spl0(); while ((sig = CURSIG(p)) != 0) postsig(sig); } /* * If profiling, charge recent system time to the trapped pc. */ if (p->p_flag & P_PROFIL) addupc_task(p, pc, (int)(p->p_sticks - oticks)); curpriority = p->p_priority; }
/* * Change the priority of a thread that is on the run queue. */ void adjustrunqueue( struct thread *td, int newpri) { struct ksegrp *kg; struct kse *ke; mtx_assert(&sched_lock, MA_OWNED); KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue")); ke = td->td_kse; CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td); /* * If it is not a threaded process, take the shortcut. */ if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { /* We only care about the kse in the run queue. */ td->td_priority = newpri; if (ke->ke_rqindex != (newpri / RQ_PPQ)) { sched_rem(td); sched_add(td, SRQ_BORING); } return; } /* It is a threaded process */ kg = td->td_ksegrp; if (ke->ke_state == KES_ONRUNQ) { if (kg->kg_last_assigned == td) { kg->kg_last_assigned = TAILQ_PREV(td, threadqueue, td_runq); } sched_rem(td); } TAILQ_REMOVE(&kg->kg_runq, td, td_runq); kg->kg_runnable--; TD_SET_CAN_RUN(td); td->td_priority = newpri; setrunqueue(td, SRQ_BORING); }
int fork1(struct proc *p1, int exitsig, int flags, void *stack, size_t stacksize, void (*func)(void *), void *arg, register_t *retval, struct proc **rnewprocp) { struct proc *p2; uid_t uid; struct vmspace *vm; int count; vaddr_t uaddr; int s; extern void endtsleep(void *); extern void realitexpire(void *); /* * Although process entries are dynamically created, we still keep * a global limit on the maximum number we will create. We reserve * the last 5 processes to root. The variable nprocs is the current * number of processes, maxproc is the limit. */ uid = p1->p_cred->p_ruid; if ((nprocs >= maxproc - 5 && uid != 0) || nprocs >= maxproc) { static struct timeval lasttfm; if (ratecheck(&lasttfm, &fork_tfmrate)) tablefull("proc"); return (EAGAIN); } nprocs++; /* * Increment the count of procs running with this uid. Don't allow * a nonprivileged user to exceed their current limit. */ count = chgproccnt(uid, 1); if (uid != 0 && count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur) { (void)chgproccnt(uid, -1); nprocs--; return (EAGAIN); } uaddr = uvm_km_alloc1(kernel_map, USPACE, USPACE_ALIGN, 1); if (uaddr == 0) { chgproccnt(uid, -1); nprocs--; return (ENOMEM); } /* * From now on, we're committed to the fork and cannot fail. */ /* Allocate new proc. */ p2 = pool_get(&proc_pool, PR_WAITOK); p2->p_stat = SIDL; /* protect against others */ p2->p_exitsig = exitsig; p2->p_forw = p2->p_back = NULL; #ifdef RTHREADS if (flags & FORK_THREAD) { atomic_setbits_int(&p2->p_flag, P_THREAD); p2->p_p = p1->p_p; TAILQ_INSERT_TAIL(&p2->p_p->ps_threads, p2, p_thr_link); } else { process_new(p2, p1); } #else process_new(p2, p1); #endif /* * Make a proc table entry for the new process. * Start by zeroing the section of proc that is zero-initialized, * then copy the section that is copied directly from the parent. */ bzero(&p2->p_startzero, (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero)); bcopy(&p1->p_startcopy, &p2->p_startcopy, (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy)); /* * Initialize the timeouts. */ timeout_set(&p2->p_sleep_to, endtsleep, p2); timeout_set(&p2->p_realit_to, realitexpire, p2); #if defined(__HAVE_CPUINFO) p2->p_cpu = p1->p_cpu; #endif /* * Duplicate sub-structures as needed. * Increase reference counts on shared objects. * The p_stats and p_sigacts substructs are set in vm_fork. */ p2->p_flag = 0; p2->p_emul = p1->p_emul; if (p1->p_flag & P_PROFIL) startprofclock(p2); atomic_setbits_int(&p2->p_flag, p1->p_flag & (P_SUGID | P_SUGIDEXEC)); if (flags & FORK_PTRACE) atomic_setbits_int(&p2->p_flag, p1->p_flag & P_TRACED); #ifdef RTHREADS if (flags & FORK_THREAD) { /* nothing */ } else #endif { p2->p_p->ps_cred = pool_get(&pcred_pool, PR_WAITOK); bcopy(p1->p_p->ps_cred, p2->p_p->ps_cred, sizeof(*p2->p_p->ps_cred)); p2->p_p->ps_cred->p_refcnt = 1; crhold(p1->p_ucred); } TAILQ_INIT(&p2->p_selects); /* bump references to the text vnode (for procfs) */ p2->p_textvp = p1->p_textvp; if (p2->p_textvp) VREF(p2->p_textvp); if (flags & FORK_CLEANFILES) p2->p_fd = fdinit(p1); else if (flags & FORK_SHAREFILES) p2->p_fd = fdshare(p1); else p2->p_fd = fdcopy(p1); /* * If ps_limit is still copy-on-write, bump refcnt, * otherwise get a copy that won't be modified. * (If PL_SHAREMOD is clear, the structure is shared * copy-on-write.) */ #ifdef RTHREADS if (flags & FORK_THREAD) { /* nothing */ } else #endif { if (p1->p_p->ps_limit->p_lflags & PL_SHAREMOD) p2->p_p->ps_limit = limcopy(p1->p_p->ps_limit); else { p2->p_p->ps_limit = p1->p_p->ps_limit; p2->p_p->ps_limit->p_refcnt++; } } if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) atomic_setbits_int(&p2->p_flag, P_CONTROLT); if (flags & FORK_PPWAIT) atomic_setbits_int(&p2->p_flag, P_PPWAIT); p2->p_pptr = p1; if (flags & FORK_NOZOMBIE) atomic_setbits_int(&p2->p_flag, P_NOZOMBIE); LIST_INIT(&p2->p_children); #ifdef KTRACE /* * Copy traceflag and tracefile if enabled. * If not inherited, these were zeroed above. */ if (p1->p_traceflag & KTRFAC_INHERIT) { p2->p_traceflag = p1->p_traceflag; if ((p2->p_tracep = p1->p_tracep) != NULL) VREF(p2->p_tracep); } #endif /* * set priority of child to be that of parent * XXX should move p_estcpu into the region of struct proc which gets * copied. */ scheduler_fork_hook(p1, p2); /* * Create signal actions for the child process. */ if (flags & FORK_SIGHAND) sigactsshare(p1, p2); else p2->p_sigacts = sigactsinit(p1); /* * If emulation has process fork hook, call it now. */ if (p2->p_emul->e_proc_fork) (*p2->p_emul->e_proc_fork)(p2, p1); p2->p_addr = (struct user *)uaddr; /* * Finish creating the child process. It will return through a * different path later. */ uvm_fork(p1, p2, ((flags & FORK_SHAREVM) ? TRUE : FALSE), stack, stacksize, func ? func : child_return, arg ? arg : p2); timeout_set(&p2->p_stats->p_virt_to, virttimer_trampoline, p2); timeout_set(&p2->p_stats->p_prof_to, proftimer_trampoline, p2); vm = p2->p_vmspace; if (flags & FORK_FORK) { forkstat.cntfork++; forkstat.sizfork += vm->vm_dsize + vm->vm_ssize; } else if (flags & FORK_VFORK) { forkstat.cntvfork++; forkstat.sizvfork += vm->vm_dsize + vm->vm_ssize; } else if (flags & FORK_RFORK) { forkstat.cntrfork++; forkstat.sizrfork += vm->vm_dsize + vm->vm_ssize; } else { forkstat.cntkthread++; forkstat.sizkthread += vm->vm_dsize + vm->vm_ssize; } /* Find an unused pid satisfying 1 <= lastpid <= PID_MAX */ do { lastpid = 1 + (randompid ? arc4random() : lastpid) % PID_MAX; } while (pidtaken(lastpid)); p2->p_pid = lastpid; LIST_INSERT_HEAD(&allproc, p2, p_list); LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash); LIST_INSERT_HEAD(&p1->p_children, p2, p_sibling); LIST_INSERT_AFTER(p1, p2, p_pglist); if (p2->p_flag & P_TRACED) { p2->p_oppid = p1->p_pid; if (p2->p_pptr != p1->p_pptr) proc_reparent(p2, p1->p_pptr); /* * Set ptrace status. */ if (flags & FORK_FORK) { p2->p_ptstat = malloc(sizeof(*p2->p_ptstat), M_SUBPROC, M_WAITOK); p1->p_ptstat->pe_report_event = PTRACE_FORK; p2->p_ptstat->pe_report_event = PTRACE_FORK; p1->p_ptstat->pe_other_pid = p2->p_pid; p2->p_ptstat->pe_other_pid = p1->p_pid; } } #if NSYSTRACE > 0 if (ISSET(p1->p_flag, P_SYSTRACE)) systrace_fork(p1, p2); #endif /* * Make child runnable, set start time, and add to run queue. */ SCHED_LOCK(s); getmicrotime(&p2->p_stats->p_start); p2->p_acflag = AFORK; p2->p_stat = SRUN; setrunqueue(p2); SCHED_UNLOCK(s); /* * Notify any interested parties about the new process. */ KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); /* * Update stats now that we know the fork was successfull. */ uvmexp.forks++; if (flags & FORK_PPWAIT) uvmexp.forks_ppwait++; if (flags & FORK_SHAREVM) uvmexp.forks_sharevm++; /* * Pass a pointer to the new process to the caller. */ if (rnewprocp != NULL) *rnewprocp = p2; /* * Preserve synchronization semantics of vfork. If waiting for * child to exec or exit, set P_PPWAIT on child, and sleep on our * proc (in case of exit). */ if (flags & FORK_PPWAIT) while (p2->p_flag & P_PPWAIT) tsleep(p1, PWAIT, "ppwait", 0); /* * If we're tracing the child, alert the parent too. */ if ((flags & FORK_PTRACE) && (p1->p_flag & P_TRACED)) psignal(p1, SIGTRAP); /* * Return child pid to parent process, * marking us as parent via retval[1]. */ if (retval != NULL) { retval[0] = p2->p_pid; retval[1] = 0; } return (0); }