void userret(struct proc *p, u_int32_t pc, quad_t oticks) { int sig; /* Take pending signals. */ while ((sig = (CURSIG(p))) != 0) postsig(sig); p->p_priority = p->p_usrpri; if (want_resched) { /* * We're being preempted. */ preempt(NULL); while ((sig = CURSIG(p)) != 0) postsig(sig); } /* * If profiling, charge recent system time to the trapped pc. */ if (p->p_flag & P_PROFIL) { extern int psratio; addupc_task(p, pc, (int)(p->p_sticks - oticks) * psratio); } curpriority = p->p_priority; }
static __inline void userret (struct lwp *l, register_t pc, u_quad_t oticks) { struct proc *p = l->l_proc; int sig; /* take pending signals */ while ((sig = CURSIG(l)) != 0) postsig(sig); l->l_priority = l->l_usrpri; if (want_resched) { /* * We're being preempted. */ preempt(0); while ((sig = CURSIG(l)) != 0) postsig(sig); } /* * If profiling, charge recent system time to the trapped pc. */ if (l->l_flag & P_PROFIL) { extern int psratio; addupc_task(p, pc, (int)(p->p_sticks - oticks) * psratio); } curcpu()->ci_schedstate.spc_curpriority = l->l_priority; }
/* * Same as above, but also handles writeback completion on 68040. */ void wb_userret(struct proc *p, struct frame *fp) { int sig; union sigval sv; /* take pending signals */ while ((sig = CURSIG(p)) != 0) postsig(sig); p->p_priority = p->p_usrpri; /* * Deal with user mode writebacks (from trap, or from sigreturn). * If any writeback fails, go back and attempt signal delivery. * unless we have already been here and attempted the writeback * (e.g. bad address with user ignoring SIGSEGV). In that case * we just return to the user without successfully completing * the writebacks. Maybe we should just drop the sucker? */ if (mmutype == MMU_68040 && fp->f_format == FMT7) { if ((sig = writeback(fp)) != 0) { sv.sival_int = fp->f_fmt7.f_fa; trapsignal(p, sig, T_MMUFLT, SEGV_MAPERR, sv); while ((sig = CURSIG(p)) != 0) postsig(sig); p->p_priority = p->p_usrpri; } } curcpu()->ci_schedstate.spc_curpriority = p->p_priority; }
static inline void userret(struct proc *p, struct trapframe *frame, u_quad_t oticks) { int sig; /* take pending signals */ while ((sig = CURSIG(p)) != 0) postsig(sig); p->p_priority = p->p_usrpri; if (want_resched) { /* * We're being preempted. */ preempt(NULL); while ((sig = CURSIG(p)) != 0) postsig(sig); } /* * If profiling, charge recent system time to the trapped pc. */ if (p->p_flag & P_PROFIL) { extern int psratio; addupc_task(p, frame->tf_sxip & XIP_ADDR, (int)(p->p_sticks - oticks) * psratio); } curpriority = p->p_priority; }
/* ARGSUSED */ int mfs_start(struct mount *mp, int flags, struct proc *p) { struct vnode *vp = VFSTOUFS(mp)->um_devvp; struct mfsnode *mfsp = VTOMFS(vp); struct buf *bp; caddr_t base; int sleepreturn = 0; base = mfsp->mfs_baseoff; while (mfsp->mfs_buflist != (struct buf *)-1) { while ((bp = mfsp->mfs_buflist) != NULL) { mfsp->mfs_buflist = bp->b_actf; mfs_doio(bp, base); wakeup((caddr_t)bp); } /* * If a non-ignored signal is received, try to unmount. * If that fails, clear the signal (it has been "processed"), * otherwise we will loop here, as tsleep will always return * EINTR/ERESTART. */ if (sleepreturn != 0) { if (vfs_busy(mp, VB_WRITE|VB_NOWAIT) || dounmount(mp, 0, p, NULL)) CLRSIG(p, CURSIG(p)); sleepreturn = 0; continue; } sleepreturn = tsleep((caddr_t)vp, mfs_pri, "mfsidl", 0); } return (0); }
/* * Trap and syscall both need the following work done before returning * to user mode. */ void userret(struct proc *p) { int sig; /* take pending signals */ while ((sig = CURSIG(p)) != 0) postsig(sig); curcpu()->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri; }
/* * Define the code needed before returning to user mode, for * trap, mem_access_fault, and syscall. */ static inline void userret(struct proc *p, int pc, u_quad_t oticks) { int sig; /* take pending signals */ while ((sig = CURSIG(p)) != 0) postsig(sig); p->p_priority = p->p_usrpri; if (want_ast) { want_ast = 0; if (p->p_flag & P_OWEUPC) { p->p_flag &= ~P_OWEUPC; ADDUPROF(p); } } if (want_resched) { /* * Since we are curproc, clock will normally just change * our priority without moving us from one queue to another * (since the running process is not on a queue.) * If that happened after we put ourselves on the run queue * but before we switched, we might not be on the queue * indicated by our priority. */ (void) splstatclock(); setrunqueue(p); p->p_stats->p_ru.ru_nivcsw++; mi_switch(); (void) spl0(); while ((sig = CURSIG(p)) != 0) postsig(sig); } /* * If profiling, charge recent system time to the trapped pc. */ if (p->p_flag & P_PROFIL) addupc_task(p, pc, (int)(p->p_sticks - oticks)); curpriority = p->p_priority; }
/* * Define the code needed before returning to user mode, for * trap, mem_access_fault, and syscall. */ static inline void userret(struct proc *p, int pc, u_quad_t oticks) { int sig; /* take pending signals */ while ((sig = CURSIG(p)) != 0) psig(sig); p->p_pri = p->p_usrpri; if (want_ast) { want_ast = 0; if (p->p_flag & SOWEUPC) { p->p_flag &= ~SOWEUPC; ADDUPROF(p); } } if (want_resched) { /* * Since we are curproc, a clock interrupt could * change our priority without changing run queues * (the running process is not kept on a run queue). * If this happened after we setrq ourselves but * before we swtch()'ed, we might not be on the queue * indicated by our priority. */ (void) splstatclock(); setrq(p); p->p_stats->p_ru.ru_nivcsw++; swtch(); (void) spl0(); while ((sig = CURSIG(p)) != 0) psig(sig); } /* * If profiling, charge recent system time to the trapped pc. */ if (p->p_flag & SPROFIL) addupc_task(p, pc, (int)(p->p_sticks - oticks)); curpri = p->p_pri; }
/* * userret: * * Common code used by various exception handlers to * return to usermode. */ static __inline void userret(struct proc *p) { int sig; /* Take pending signals. */ while ((sig = CURSIG(p)) !=0) postsig(sig); p->p_cpu->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri; }
/* ARGSUSED */ int mfs_start(struct mount *mp, int flags, struct proc *p) { struct vnode *vp = VFSTOUFS(mp)->um_devvp; struct mfsnode *mfsp = VTOMFS(vp); struct buf *bp; int sleepreturn = 0; while (1) { while (1) { if (mfsp->mfs_shutdown == 1) break; bp = bufq_dequeue(&mfsp->mfs_bufq); if (bp == NULL) break; mfs_doio(mfsp, bp); wakeup(bp); } if (mfsp->mfs_shutdown == 1) break; /* * If a non-ignored signal is received, try to unmount. * If that fails, clear the signal (it has been "processed"), * otherwise we will loop here, as tsleep will always return * EINTR/ERESTART. */ if (sleepreturn != 0) { if (vfs_busy(mp, VB_WRITE|VB_NOWAIT) || dounmount(mp, (CURSIG(p) == SIGKILL) ? MNT_FORCE : 0, p, NULL)) CLRSIG(p, CURSIG(p)); sleepreturn = 0; continue; } sleepreturn = tsleep((caddr_t)vp, mfs_pri, "mfsidl", 0); } return (0); }
int hammer2_signal_check(time_t *timep) { int error = 0; lwkt_user_yield(); if (*timep != time_second) { *timep = time_second; if (CURSIG(curthread->td_lwp) != 0) error = EINTR; } return error; }
/* * Check for a user signal interrupting a long operation * * MPSAFE */ int hammer_signal_check(hammer_mount_t hmp) { int sig; lwkt_user_yield(); if (++hmp->check_interrupt < 100) return(0); hmp->check_interrupt = 0; if ((sig = CURSIG(curthread->td_lwp)) != 0) return(EINTR); return(0); }
/* * Define the code needed before returning to user mode, for * trap and syscall. */ void userret(struct proc *p) { int sig; /* Do any deferred user pmap operations. */ PMAP_USERRET(vm_map_pmap(&p->p_vmspace->vm_map)); /* take pending signals */ while ((sig = CURSIG(p)) != 0) postsig(sig); curcpu()->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri; }
void userret(struct proc *p, register_t pc, u_quad_t oticks) { int sig; /* take pending signals */ while ((sig = CURSIG(p)) != 0) postsig(sig); p->p_priority = p->p_usrpri; if (astpending) { astpending = 0; if (p->p_flag & P_OWEUPC) { ADDUPROF(p); } } if (want_resched) { /* * We're being preempted. */ preempt(NULL); while ((sig = CURSIG(p)) != 0) postsig(sig); } /* * If profiling, charge recent system time to the trapped pc. */ if (p->p_flag & P_PROFIL) { extern int psratio; addupc_task(p, pc, (int)(p->p_sticks - oticks) * psratio); } p->p_cpu->ci_schedstate.spc_curpriority = p->p_priority; }
/* * Define the code needed before returning to user mode, for * trap, mem_access_fault, and syscall. */ static __inline void userret(struct proc *p) { int sig; /* take pending signals */ while ((sig = CURSIG(p)) != 0) postsig(sig); #ifdef notyet curcpu()->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri; #else curpriority = p->p_priority = p->p_usrpri; #endif }
/* * trap and syscall both need the following work done before returning * to user mode. */ void userret(struct proc *p) { int sig; #ifdef MAC if (p->p_flag & P_MACPEND) mac_proc_userret(p); #endif /* take pending signals */ while ((sig = CURSIG(p)) != 0) postsig(sig); curcpu()->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri; }
static int mmrw(cdev_t dev, struct uio *uio, int flags) { int o; u_int c; u_int poolsize; u_long v; struct iovec *iov; int error = 0; caddr_t buf = NULL; while (uio->uio_resid > 0 && error == 0) { iov = uio->uio_iov; if (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt < 0) panic("mmrw"); continue; } switch (minor(dev)) { case 0: /* * minor device 0 is physical memory, /dev/mem */ v = uio->uio_offset; v &= ~(long)PAGE_MASK; pmap_kenter((vm_offset_t)ptvmmap, v); o = (int)uio->uio_offset & PAGE_MASK; c = (u_int)(PAGE_SIZE - ((uintptr_t)iov->iov_base & PAGE_MASK)); c = min(c, (u_int)(PAGE_SIZE - o)); c = min(c, (u_int)iov->iov_len); error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio); pmap_kremove((vm_offset_t)ptvmmap); continue; case 1: { /* * minor device 1 is kernel memory, /dev/kmem */ vm_offset_t saddr, eaddr; int prot; c = iov->iov_len; /* * Make sure that all of the pages are currently * resident so that we don't create any zero-fill * pages. */ saddr = trunc_page(uio->uio_offset); eaddr = round_page(uio->uio_offset + c); if (saddr > eaddr) return EFAULT; /* * Make sure the kernel addresses are mapped. * platform_direct_mapped() can be used to bypass * default mapping via the page table (virtual kernels * contain a lot of out-of-band data). */ prot = VM_PROT_READ; if (uio->uio_rw != UIO_READ) prot |= VM_PROT_WRITE; error = kvm_access_check(saddr, eaddr, prot); if (error) return (error); error = uiomove((caddr_t)(vm_offset_t)uio->uio_offset, (int)c, uio); continue; } case 2: /* * minor device 2 (/dev/null) is EOF/RATHOLE */ if (uio->uio_rw == UIO_READ) return (0); c = iov->iov_len; break; case 3: /* * minor device 3 (/dev/random) is source of filth * on read, seeder on write */ if (buf == NULL) buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK); c = min(iov->iov_len, PAGE_SIZE); if (uio->uio_rw == UIO_WRITE) { error = uiomove(buf, (int)c, uio); if (error == 0 && seedenable && securelevel <= 0) { error = add_buffer_randomness_src(buf, c, RAND_SRC_SEEDING); } else if (error == 0) { error = EPERM; } } else { poolsize = read_random(buf, c); if (poolsize == 0) { if (buf) kfree(buf, M_TEMP); if ((flags & IO_NDELAY) != 0) return (EWOULDBLOCK); return (0); } c = min(c, poolsize); error = uiomove(buf, (int)c, uio); } continue; case 4: /* * minor device 4 (/dev/urandom) is source of muck * on read, writes are disallowed. */ c = min(iov->iov_len, PAGE_SIZE); if (uio->uio_rw == UIO_WRITE) { error = EPERM; break; } if (CURSIG(curthread->td_lwp) != 0) { /* * Use tsleep() to get the error code right. * It should return immediately. */ error = tsleep(&rand_bolt, PCATCH, "urand", 1); if (error != 0 && error != EWOULDBLOCK) continue; } if (buf == NULL) buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK); poolsize = read_random_unlimited(buf, c); c = min(c, poolsize); error = uiomove(buf, (int)c, uio); continue; case 12: /* * minor device 12 (/dev/zero) is source of nulls * on read, write are disallowed. */ if (uio->uio_rw == UIO_WRITE) { c = iov->iov_len; break; } if (zbuf == NULL) { zbuf = (caddr_t)kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK | M_ZERO); } c = min(iov->iov_len, PAGE_SIZE); error = uiomove(zbuf, (int)c, uio); continue; default: return (ENODEV); } if (error) break; iov->iov_base = (char *)iov->iov_base + c; iov->iov_len -= c; uio->uio_offset += c; uio->uio_resid -= c; } if (buf) kfree(buf, M_TEMP); return (error); }