static void racctd(void) { struct thread *td; struct proc *p; struct timeval wallclock; uint64_t runtime; for (;;) { sx_slock(&allproc_lock); FOREACH_PROC_IN_SYSTEM(p) { if (p->p_state != PRS_NORMAL) continue; if (p->p_flag & P_SYSTEM) continue; microuptime(&wallclock); timevalsub(&wallclock, &p->p_stats->p_start); PROC_LOCK(p); PROC_SLOCK(p); FOREACH_THREAD_IN_PROC(p, td) { ruxagg(p, td); thread_lock(td); thread_unlock(td); } runtime = cputick2usec(p->p_rux.rux_runtime); PROC_SUNLOCK(p); #ifdef notyet KASSERT(runtime >= p->p_prev_runtime, ("runtime < p_prev_runtime")); #else if (runtime < p->p_prev_runtime) runtime = p->p_prev_runtime; #endif p->p_prev_runtime = runtime; mtx_lock(&racct_lock); racct_set_locked(p, RACCT_CPU, runtime); racct_set_locked(p, RACCT_WALLCLOCK, wallclock.tv_sec * 1000000 + wallclock.tv_usec); mtx_unlock(&racct_lock); PROC_UNLOCK(p); } sx_sunlock(&allproc_lock); pause("-", hz); }
void shutdown_nice(int howto) { shutdown_howto = howto; /* Send a signal to init(8) and have it shutdown the world */ if (initproc != NULL) { PROC_LOCK(initproc); kern_psignal(initproc, SIGINT); PROC_UNLOCK(initproc); } else { /* No init(8) running, so simply reboot */ kern_reboot(RB_NOSYNC); } return; }
int freebsd32_getcontext(struct thread *td, struct freebsd32_getcontext_args *uap) { ucontext32_t uc; int ret; if (uap->ucp == NULL) ret = EINVAL; else { get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET); PROC_LOCK(td->td_proc); uc.uc_sigmask = td->td_sigmask; PROC_UNLOCK(td->td_proc); ret = copyout(&uc, uap->ucp, UC32_COPY_SIZE); } return (ret); }
/* * Get machine context. */ static int ia32_get_mcontext(struct thread *td, struct ia32_mcontext *mcp, int flags) { struct pcb *pcb; struct trapframe *tp; pcb = td->td_pcb; tp = td->td_frame; PROC_LOCK(curthread->td_proc); mcp->mc_onstack = sigonstack(tp->tf_rsp); PROC_UNLOCK(curthread->td_proc); /* Entry into kernel always sets TF_HASSEGS */ mcp->mc_gs = tp->tf_gs; mcp->mc_fs = tp->tf_fs; mcp->mc_es = tp->tf_es; mcp->mc_ds = tp->tf_ds; mcp->mc_edi = tp->tf_rdi; mcp->mc_esi = tp->tf_rsi; mcp->mc_ebp = tp->tf_rbp; mcp->mc_isp = tp->tf_rsp; mcp->mc_eflags = tp->tf_rflags; if (flags & GET_MC_CLEAR_RET) { mcp->mc_eax = 0; mcp->mc_edx = 0; mcp->mc_eflags &= ~PSL_C; } else { mcp->mc_eax = tp->tf_rax; mcp->mc_edx = tp->tf_rdx; } mcp->mc_ebx = tp->tf_rbx; mcp->mc_ecx = tp->tf_rcx; mcp->mc_eip = tp->tf_rip; mcp->mc_cs = tp->tf_cs; mcp->mc_esp = tp->tf_rsp; mcp->mc_ss = tp->tf_ss; mcp->mc_len = sizeof(*mcp); mcp->mc_flags = tp->tf_flags; ia32_get_fpcontext(td, mcp, NULL, 0); mcp->mc_fsbase = pcb->pcb_fsbase; mcp->mc_gsbase = pcb->pcb_gsbase; mcp->mc_xfpustate = 0; mcp->mc_xfpustate_len = 0; bzero(mcp->mc_spare2, sizeof(mcp->mc_spare2)); return (0); }
/* * Iterate through directory entries */ static int pfs_iterate(struct thread *td, struct proc *proc, struct pfs_node *pd, struct pfs_node **pn, struct proc **p) { int visible; sx_assert(&allproc_lock, SX_SLOCKED); pfs_assert_owned(pd); again: if (*pn == NULL) { /* first node */ *pn = pd->pn_nodes; } else if ((*pn)->pn_type != pfstype_procdir) { /* next node */ *pn = (*pn)->pn_next; } if (*pn != NULL && (*pn)->pn_type == pfstype_procdir) { /* next process */ if (*p == NULL) *p = LIST_FIRST(&allproc); else *p = LIST_NEXT(*p, p_list); /* out of processes: next node */ if (*p == NULL) *pn = (*pn)->pn_next; else PROC_LOCK(*p); } if ((*pn) == NULL) return (-1); if (*p != NULL) { visible = pfs_visible_proc(td, *pn, *p); PROC_UNLOCK(*p); } else if (proc != NULL) { visible = pfs_visible_proc(td, *pn, proc); } else { visible = 1; } if (!visible) goto again; return (0); }
int freebsd32_getcontext(struct thread *td, struct freebsd32_getcontext_args *uap) { struct ia32_ucontext uc; int ret; if (uap->ucp == NULL) ret = EINVAL; else { ia32_get_mcontext(td, &uc.uc_mcontext, GET_MC_CLEAR_RET); PROC_LOCK(td->td_proc); uc.uc_sigmask = td->td_sigmask; PROC_UNLOCK(td->td_proc); bzero(&uc.__spare__, sizeof(uc.__spare__)); ret = copyout(&uc, uap->ucp, UC_COPY_SIZE); } return (ret); }
/* * System call to enter capability mode for the process. */ int sys_cap_enter(struct thread *td, struct cap_enter_args *uap) { struct ucred *newcred, *oldcred; struct proc *p; if (IN_CAPABILITY_MODE(td)) return (0); newcred = crget(); p = td->td_proc; PROC_LOCK(p); oldcred = crcopysafe(p, newcred); newcred->cr_flags |= CRED_FLAG_CAPMODE; proc_set_cred(p, newcred); PROC_UNLOCK(p); crfree(oldcred); return (0); }
static void pagezero_start(void __unused *arg) { int error; error = kthread_create(vm_pagezero, NULL, &pagezero_proc, RFSTOPPED, 0, "pagezero"); if (error) panic("pagezero_start: error %d\n", error); /* * We're an idle task, don't count us in the load. */ PROC_LOCK(pagezero_proc); pagezero_proc->p_flag |= P_NOLOAD; PROC_UNLOCK(pagezero_proc); mtx_lock_spin(&sched_lock); setrunqueue(FIRST_THREAD_IN_PROC(pagezero_proc), SRQ_BORING); mtx_unlock_spin(&sched_lock); }
/* * Called by events that want to shut down.. e.g <CTL><ALT><DEL> on a PC */ void shutdown_nice(int howto) { if (initproc != NULL) { /* Send a signal to init(8) and have it shutdown the world. */ PROC_LOCK(initproc); if (howto & RB_POWEROFF) kern_psignal(initproc, SIGUSR2); else if (howto & RB_HALT) kern_psignal(initproc, SIGUSR1); else kern_psignal(initproc, SIGINT); PROC_UNLOCK(initproc); } else { /* No init(8) running, so simply reboot. */ kern_reboot(howto | RB_NOSYNC); } }
void debugger_writemem_callback(struct allocation_t* ref) { struct message_t* message = __get(ref); if (!message) return; if (message->header.request != 1) goto cleanup; if (message->socket < 0) goto cleanup; if (!message->payload) goto cleanup; struct debugger_writemem_t* request = (struct debugger_writemem_t*)message->payload; if (request->process_id < 0) goto cleanup; if (request->address == 0) goto cleanup; if (request->dataLength == 0) goto cleanup; struct proc* (*pfind)(pid_t) = kdlsym(pfind); struct proc* process = pfind(request->process_id); if (process == 0) goto cleanup; void(*_mtx_unlock_flags)(struct mtx *m, int opts, const char *file, int line) = kdlsym(_mtx_unlock_flags); int result = proc_rw_mem(process, (void*)request->address, request->dataLength, request->data, &request->dataLength, 1); // You need to unlock the process, or the kernel will assert and hang PROC_UNLOCK(process); WriteLog(LL_Debug, "proc_rw_mem returned %d", result); cleanup: __dec(ref); }
static int filemon_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag __unused, struct thread *td) { int error = 0; struct filemon *filemon; struct proc *p; #if __FreeBSD_version >= 900041 cap_rights_t rights; #endif devfs_get_cdevpriv((void **) &filemon); switch (cmd) { /* Set the output file descriptor. */ case FILEMON_SET_FD: error = fget_write(td, *(int *)data, #if __FreeBSD_version >= 900041 cap_rights_init(&rights, CAP_PWRITE), #endif &filemon->fp); if (error == 0) /* Write the file header. */ filemon_comment(filemon); break; /* Set the monitored process ID. */ case FILEMON_SET_PID: error = pget(*((pid_t *)data), PGET_CANDEBUG | PGET_NOTWEXIT, &p); if (error == 0) { filemon->pid = p->p_pid; PROC_UNLOCK(p); } break; default: error = EINVAL; break; } return (error); }
void uhid_intr(usbd_xfer_handle xfer, usbd_private_handle addr, usbd_status status) { struct uhid_softc *sc = addr; #ifdef USB_DEBUG if (uhiddebug > 5) { u_int32_t cc, i; usbd_get_xfer_status(xfer, NULL, NULL, &cc, NULL); DPRINTF(("uhid_intr: status=%d cc=%d\n", status, cc)); DPRINTF(("uhid_intr: data =")); for (i = 0; i < cc; i++) DPRINTF((" %02x", sc->sc_ibuf[i])); DPRINTF(("\n")); } #endif if (status == USBD_CANCELLED) return; if (status != USBD_NORMAL_COMPLETION) { DPRINTF(("uhid_intr: status=%d\n", status)); if (status == USBD_STALLED) sc->sc_state |= UHID_NEEDCLEAR; return; } (void) b_to_q(sc->sc_ibuf, sc->sc_isize, &sc->sc_q); if (sc->sc_state & UHID_ASLP) { sc->sc_state &= ~UHID_ASLP; DPRINTFN(5, ("uhid_intr: waking %p\n", &sc->sc_q)); wakeup(&sc->sc_q); } selwakeup(&sc->sc_rsel); if (sc->sc_async != NULL) { DPRINTFN(3, ("uhid_intr: sending SIGIO %p\n", sc->sc_async)); PROC_LOCK(sc->sc_async); psignal(sc->sc_async, SIGIO); PROC_UNLOCK(sc->sc_async); } }
int pre_execve(struct thread *td, struct vmspace **oldvmspace) { struct proc *p; int error; KASSERT(td == curthread, ("non-current thread %p", td)); error = 0; p = td->td_proc; if ((p->p_flag & P_HADTHREADS) != 0) { PROC_LOCK(p); if (thread_single(p, SINGLE_BOUNDARY) != 0) error = ERESTART; PROC_UNLOCK(p); } KASSERT(error != 0 || (td->td_pflags & TDP_EXECVMSPC) == 0, ("nested execve")); *oldvmspace = p->p_vmspace; return (error); }
/* * Acquire the proc's p_filemon reference and lock the filemon. * The proc's p_filemon may not match this filemon on return. */ static struct filemon * filemon_proc_get(struct proc *p) { struct filemon *filemon; if (p->p_filemon == NULL) return (NULL); PROC_LOCK(p); filemon = filemon_acquire(p->p_filemon); PROC_UNLOCK(p); if (filemon == NULL) return (NULL); /* * The p->p_filemon may have changed by now. That case is handled * by the exit and fork hooks and filemon_attach_proc specially. */ sx_xlock(&filemon->lock); return (filemon); }
static int soo_write(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { struct socket *so = fp->f_data; int error; #ifdef MAC error = mac_socket_check_send(active_cred, so); if (error) return (error); #endif error = sosend(so, 0, uio, 0, 0, 0, uio->uio_td); if (error == EPIPE && (so->so_options & SO_NOSIGPIPE) == 0) { PROC_LOCK(uio->uio_td->td_proc); tdsignal(uio->uio_td, SIGPIPE); PROC_UNLOCK(uio->uio_td->td_proc); } return (error); }
/* * Filler function for proc/pid/self */ int procfs_doprocfile(PFS_FILL_ARGS) { char *fullpath; char *freepath; struct vnode *textvp; int error; freepath = NULL; PROC_LOCK(p); textvp = p->p_textvp; vhold(textvp); PROC_UNLOCK(p); error = vn_fullpath(td, textvp, &fullpath, &freepath); vdrop(textvp); if (error == 0) sbuf_printf(sb, "%s", fullpath); if (freepath != NULL) free(freepath, M_TEMP); return (error); }
//2. Define syscall(struct thread *td, struct syscall_args *arg){...} static int getProcessTickets(struct thread *td, struct getProcessTickets_args *arg) { //getProcessTickets logic here //if PID exists, get tickets from such process //if PID does not exists, return error code (-1) struct proc *process1; int procTickets; process1 = pfind(arg->pid); if(process1 == NULL){ td->td_retval[0] = -1; return 0; } else{ procTickets = process1->tickets; td->td_retval[0] = procTickets; PROC_UNLOCK(process1); return 0; } }
//2. Define syscall(struct thread *td, struct syscall_args *arg){...} static int setSocialInfo(struct thread *td, struct setSocialInfo_args *arg) { //setSocialInfo logic: //if PID exists, set social_info to such process //if PID does not exists, return error code (-1) struct proc *process2; process2 = pfind(arg->pid); if(process2 == NULL){ td->td_retval[0] = -1; return 0; } else{ process2->social_info = arg->social_info; //set process's social_info with arg's social_info PROC_UNLOCK(process2); return 0; } }
/* Remove and release the filemon on the given process. */ static void filemon_proc_drop(struct proc *p) { struct filemon *filemon; KASSERT(p->p_filemon != NULL, ("%s: proc %p NULL p_filemon", __func__, p)); sx_assert(&p->p_filemon->lock, SA_XLOCKED); PROC_LOCK(p); filemon = p->p_filemon; p->p_filemon = NULL; --filemon->proccnt; PROC_UNLOCK(p); /* * This should not be the last reference yet. filemon_release() * cannot be called with filemon locked, which the caller expects * will stay locked. */ KASSERT(filemon->refcnt > 1, ("%s: proc %p dropping filemon %p " "with last reference", __func__, p, filemon)); filemon_release(filemon); }
int setcontext(struct thread *td, struct setcontext_args *uap) { ucontext_t uc; int ret; if (uap->ucp == NULL) ret = EINVAL; else { ret = copyin(uap->ucp, &uc, UC_COPY_SIZE); if (ret == 0) { ret = set_mcontext(td, &uc.uc_mcontext); if (ret == 0) { SIG_CANTMASK(uc.uc_sigmask); PROC_LOCK(td->td_proc); td->td_sigmask = uc.uc_sigmask; PROC_UNLOCK(td->td_proc); } } } return (ret == 0 ? EJUSTRETURN : ret); }
/* ARGSUSED */ int sys_setloginclass(struct thread *td, struct setloginclass_args *uap) { struct proc *p = td->td_proc; int error; char lcname[MAXLOGNAME]; struct loginclass *newlc; struct ucred *newcred, *oldcred; error = priv_check(td, PRIV_PROC_SETLOGINCLASS); if (error != 0) return (error); error = copyinstr(uap->namebuf, lcname, sizeof(lcname), NULL); if (error != 0) return (error); newlc = loginclass_find(lcname); if (newlc == NULL) return (EINVAL); newcred = crget(); PROC_LOCK(p); oldcred = crcopysafe(p, newcred); newcred->cr_loginclass = newlc; proc_set_cred(p, newcred); #ifdef RACCT racct_proc_ucred_changed(p, oldcred, newcred); crhold(newcred); #endif PROC_UNLOCK(p); #ifdef RCTL rctl_proc_ucred_changed(p, newcred); crfree(newcred); #endif loginclass_free(oldcred->cr_loginclass); crfree(oldcred); return (0); }
int sys_thr_exit(struct thread *td, struct thr_exit_args *uap) /* long *state */ { struct proc *p; p = td->td_proc; /* Signal userland that it can free the stack. */ if ((void *)uap->state != NULL) { suword_lwpid(uap->state, 1); kern_umtx_wake(td, uap->state, INT_MAX, 0); } rw_wlock(&tidhash_lock); PROC_LOCK(p); if (p->p_numthreads != 1) { racct_sub(p, RACCT_NTHR, 1); LIST_REMOVE(td, td_hash); rw_wunlock(&tidhash_lock); tdsigcleanup(td); PROC_SLOCK(p); thread_stopped(p); thread_exit(); /* NOTREACHED */ } /* * Ignore attempts to shut down last thread in the proc. This * will actually call _exit(2) in the usermode trampoline when * it returns. */ PROC_UNLOCK(p); rw_wunlock(&tidhash_lock); return (0); }
//2. Define syscall(struct thread *td, struct syscall_args *arg){...} static u_int64_t getSocialInfo(struct thread *td, struct getSocialInfo_args *arg) { //getSocialInfo logic: //if PID exists, get social_info from such process //if PID does not exists, return error code (-1) struct proc *process0; u_int64_t social_info; process0 = pfind(arg->pid); if(process0 == NULL){ td->td_retval[0] = -1; return 0; } else{ social_info = process0->social_info; //get process's social_info td->td_retval[0] = social_info; //will just truncate PROC_UNLOCK(process0); return 0; } }
/* * Resume a thread previously put asleep with kthread_suspend(). */ int kthread_resume(struct thread *td) { struct proc *p; p = td->td_proc; /* * td_pflags should not be read by any thread other than * curthread, but as long as this flag is invariant during the * thread's lifetime, it is OK to check its state. */ if ((td->td_pflags & TDP_KTHREAD) == 0) return (EINVAL); PROC_LOCK(p); thread_lock(td); td->td_flags &= ~TDF_KTH_SUSP; thread_unlock(td); wakeup(&td->td_flags); PROC_UNLOCK(p); return (0); }
//2. Define syscall(struct thread *td, struct syscall_args *arg){...} static int setProcessTickets(struct thread *td, struct setProcessTickets_args *arg) { /* setProcessTickets logic: if PID exists, set tickets to such process if PID does not exists, return error code (-1) */ struct proc *process0; process0 = pfind(arg->pid); if(process0 == NULL){ td->td_retval[0] = -1; return 0; } else{ process0->tickets = arg->tickets; //set process1's tickets with arg's tickets td->td_retval[0] = process0->tickets; PROC_UNLOCK(process0); return 0; } }
static void vm_pagezero(void) { struct thread *td; struct proc *p; struct rtprio rtp; int pages = 0; int pri; td = curthread; p = td->td_proc; rtp.prio = RTP_PRIO_MAX; rtp.type = RTP_PRIO_IDLE; mtx_lock_spin(&sched_lock); rtp_to_pri(&rtp, td->td_ksegrp); pri = td->td_priority; mtx_unlock_spin(&sched_lock); PROC_LOCK(p); p->p_flag |= P_NOLOAD; PROC_UNLOCK(p); for (;;) { if (vm_page_zero_check()) { pages += vm_page_zero_idle(); if (pages > idlezero_maxrun || sched_runnable()) { mtx_lock_spin(&sched_lock); td->td_proc->p_stats->p_ru.ru_nvcsw++; mi_switch(); mtx_unlock_spin(&sched_lock); pages = 0; } } else { tsleep(&zero_state, pri, "pgzero", hz * 300); pages = 0; } } }
static int svr4_fixup(register_t **stack_base, struct image_params *imgp) { Elf32_Auxargs *args = (Elf32_Auxargs *)imgp->auxargs; register_t *pos; pos = *stack_base + (imgp->argc + imgp->envc + 2); if (args->trace) { AUXARGS_ENTRY(pos, AT_DEBUG, 1); } if (args->execfd != -1) { AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd); } AUXARGS_ENTRY(pos, AT_PHDR, args->phdr); AUXARGS_ENTRY(pos, AT_PHENT, args->phent); AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum); AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz); AUXARGS_ENTRY(pos, AT_FLAGS, args->flags); AUXARGS_ENTRY(pos, AT_ENTRY, args->entry); AUXARGS_ENTRY(pos, AT_BASE, args->base); PROC_LOCK(imgp->proc); AUXARGS_ENTRY(pos, AT_UID, imgp->proc->p_ucred->cr_ruid); AUXARGS_ENTRY(pos, AT_EUID, imgp->proc->p_ucred->cr_svuid); AUXARGS_ENTRY(pos, AT_GID, imgp->proc->p_ucred->cr_rgid); AUXARGS_ENTRY(pos, AT_EGID, imgp->proc->p_ucred->cr_svgid); PROC_UNLOCK(imgp->proc); AUXARGS_ENTRY(pos, AT_NULL, 0); free(imgp->auxargs, M_TEMP); imgp->auxargs = NULL; (*stack_base)--; **stack_base = (int)imgp->argc; return 0; }
void racct_proc_exit(struct proc *p) { int i; uint64_t runtime; PROC_LOCK(p); /* * We don't need to calculate rux, proc_reap() has already done this. */ runtime = cputick2usec(p->p_rux.rux_runtime); #ifdef notyet KASSERT(runtime >= p->p_prev_runtime, ("runtime < p_prev_runtime")); #else if (runtime < p->p_prev_runtime) runtime = p->p_prev_runtime; #endif mtx_lock(&racct_lock); racct_set_locked(p, RACCT_CPU, runtime); for (i = 0; i <= RACCT_MAX; i++) { if (p->p_racct->r_resources[i] == 0) continue; if (!RACCT_IS_RECLAIMABLE(i)) continue; racct_set_locked(p, i, 0); } mtx_unlock(&racct_lock); PROC_UNLOCK(p); #ifdef RCTL rctl_racct_release(p->p_racct); #endif racct_destroy(&p->p_racct); }
/* * Used by the thread to poll as to whether it should yield/sleep * and notify the caller that is has happened. */ void kthread_suspend_check() { struct proc *p; struct thread *td; td = curthread; p = td->td_proc; if ((td->td_pflags & TDP_KTHREAD) == 0) panic("%s: curthread is not a valid kthread", __func__); /* * As long as the double-lock protection is used when accessing the * TDF_KTH_SUSP flag, synchronizing the read operation via proc mutex * is fine. */ PROC_LOCK(p); while (td->td_flags & TDF_KTH_SUSP) { wakeup(&td->td_flags); msleep(&td->td_flags, &p->p_mtx, PPAUSE, "ktsusp", 0); } PROC_UNLOCK(p); }
int sys_sched_setscheduler(struct thread *td, struct sched_setscheduler_args *uap) { int e; struct sched_param sched_param; struct thread *targettd; struct proc *targetp; /* Don't allow non root user to set a scheduler policy. */ e = priv_check(td, PRIV_SCHED_SET); if (e) return (e); e = copyin(uap->param, &sched_param, sizeof(sched_param)); if (e) return (e); if (uap->pid == 0) { targetp = td->td_proc; targettd = td; PROC_LOCK(targetp); } else { targetp = pfind(uap->pid); if (targetp == NULL) return (ESRCH); targettd = FIRST_THREAD_IN_PROC(targetp); } e = p_cansched(td, targetp); if (e == 0) { e = ksched_setscheduler(ksched, targettd, uap->policy, (const struct sched_param *)&sched_param); } PROC_UNLOCK(targetp); return (e); }