/* * dmio_ioctl: * * Ioctl file op. */ static int dmio_ioctl(struct file *fp, u_long cmd, void *data) { struct dmio_state *ds = (struct dmio_state *) fp->f_data; int error, s; switch (cmd) { case FIONBIO: case FIOASYNC: return (0); case DMIO_SETFUNC: { struct dmio_setfunc *dsf = data; struct dmover_session *dses; s = splsoftclock(); simple_lock(&ds->ds_slock); if (ds->ds_session != NULL || (ds->ds_flags & DMIO_STATE_LARVAL) != 0) { simple_unlock(&ds->ds_slock); splx(s); return (EBUSY); } ds->ds_flags |= DMIO_STATE_LARVAL; simple_unlock(&ds->ds_slock); splx(s); dsf->dsf_name[DMIO_MAX_FUNCNAME - 1] = '\0'; error = dmover_session_create(dsf->dsf_name, &dses); s = splsoftclock(); simple_lock(&ds->ds_slock); if (error == 0) { dses->dses_cookie = ds; ds->ds_session = dses; } ds->ds_flags &= ~DMIO_STATE_LARVAL; simple_unlock(&ds->ds_slock); splx(s); break; } default: error = ENOTTY; } return (error); }
/* * pcu_load: load/initialize the PCU state of current LWP on current CPU. */ void pcu_load(const pcu_ops_t *pcu) { const u_int id = pcu->pcu_id; struct cpu_info *ci, *curci; lwp_t * const l = curlwp; uint64_t where; int s; KASSERT(!cpu_intr_p() && !cpu_softintr_p()); s = splsoftclock(); curci = curcpu(); ci = l->l_pcu_cpu[id]; /* Does this CPU already have our PCU state loaded? */ if (ci == curci) { KASSERT(curci->ci_pcu_curlwp[id] == l); pcu->pcu_state_load(l, PCU_ENABLE); /* Re-enable */ splx(s); return; } /* If PCU state of this LWP is on the remote CPU - save it there. */ if (ci) { splx(s); /* Note: there is a race; see description in the top. */ where = xc_unicast(XC_HIGHPRI, (xcfunc_t)pcu_cpu_op, __UNCONST(pcu), (void *)(PCU_SAVE | PCU_RELEASE), ci); xc_wait(where); /* Enter IPL_SOFTCLOCK and re-fetch the current CPU. */ s = splsoftclock(); curci = curcpu(); } KASSERT(l->l_pcu_cpu[id] == NULL); /* Save the PCU state on the current CPU, if there is any. */ pcu_cpu_op(pcu, PCU_SAVE | PCU_RELEASE); KASSERT(curci->ci_pcu_curlwp[id] == NULL); /* * Finally, load the state for this LWP on this CPU. Indicate to * load function whether PCU was used before. Note the usage. */ pcu_do_op(pcu, l, PCU_CLAIM | PCU_ENABLE | PCU_RELOAD); splx(s); }
/* * does not implement security features of kern_time.c:settime() */ void afs_osi_SetTime(osi_timeval_t * atv) { #ifdef AFS_FBSD50_ENV printf("afs attempted to set clock; use \"afsd -nosettime\"\n"); #else struct timespec ts; struct timeval tv, delta; int s; AFS_GUNLOCK(); s = splclock(); microtime(&tv); delta = *atv; timevalsub(&delta, &tv); ts.tv_sec = atv->tv_sec; ts.tv_nsec = atv->tv_usec * 1000; set_timecounter(&ts); (void)splsoftclock(); lease_updatetime(delta.tv_sec); splx(s); resettodr(); AFS_GLOCK(); #endif }
int satlinkkqfilter(dev_t dev, struct knote *kn) { struct satlink_softc *sc; struct klist *klist; int s; sc = device_lookup_private(&satlink_cd, minor(dev)); switch (kn->kn_filter) { case EVFILT_READ: klist = &sc->sc_selq.sel_klist; kn->kn_fop = &satlinkread_filtops; break; case EVFILT_WRITE: klist = &sc->sc_selq.sel_klist; kn->kn_fop = &satlink_seltrue_filtops; break; default: return (EINVAL); } kn->kn_hook = sc; s = splsoftclock(); SLIST_INSERT_HEAD(klist, kn, kn_selnext); splx(s); return (0); }
void pcu_discard_all(lwp_t *l) { const uint32_t pcu_inuse = l->l_pcu_used[PCU_USER]; KASSERT(l == curlwp || ((l->l_flag & LW_SYSTEM) && pcu_inuse == 0)); KASSERT(l->l_pcu_used[PCU_KERNEL] == 0); if (__predict_true(pcu_inuse == 0)) { /* PCUs are not in use. */ return; } const int s = splsoftclock(); for (u_int id = 0; id < PCU_UNIT_COUNT; id++) { if ((pcu_inuse & (1 << id)) == 0) { continue; } if (__predict_true(l->l_pcu_cpu[id] == NULL)) { continue; } const pcu_ops_t * const pcu = pcu_ops_md_defs[id]; /* * We aren't releasing since this LWP isn't giving up PCU, * just saving it. */ pcu_lwp_op(pcu, l, PCU_RELEASE); } l->l_pcu_used[PCU_USER] = 0; splx(s); }
static void kdstart(struct tty *tp) { int s1, s2; s1 = splsoftclock(); s2 = spltty(); if (tp->t_state & (TS_BUSY|TS_TTSTOP|TS_TIMEOUT)) goto out; if (ttypull(tp)) { tp->t_state |= TS_BUSY; if ((s1 & PSR_PIL) == 0) { /* called at level zero - update screen now. */ splx(s2); kd_putfb(tp); s2 = spltty(); tp->t_state &= ~TS_BUSY; } else { /* called at interrupt level - do it later */ callout_schedule(&tp->t_rstrt_ch, 0); } } out: splx(s2); splx(s1); }
static void filt_satlinkrdetach(struct knote *kn) { struct satlink_softc *sc = kn->kn_hook; int s; s = splsoftclock(); SLIST_REMOVE(&sc->sc_selq.sel_klist, kn, knote, kn_selnext); splx(s); }
static void dmio_usrreq_fini1(struct work *wk, void *dummy) { struct dmio_usrreq_state *dus = (void *)wk; int s; KASSERT(wk == &dus->dus_work); uvmspace_free(dus->dus_vmspace); s = splsoftclock(); pool_put(&dmio_usrreq_state_pool, dus); splx(s); }
static int pmtimer_suspend(device_t dev) { int pl; pl = splsoftclock(); microtime(&diff_time); inittodr(0); microtime(&suspend_time); timevalsub(&diff_time, &suspend_time); splx(pl); return (0); }
/* * pcu_lwp_op: perform PCU state save, release or both operations on LWP. */ static void pcu_lwp_op(const pcu_ops_t *pcu, lwp_t *l, const int flags) { const u_int id = pcu->pcu_id; struct cpu_info *ci; uint64_t where; int s; /* * Caller should have re-checked if there is any state to manage. * Block the interrupts and inspect again, since cross-call sent * by remote CPU could have changed the state. */ s = splsoftclock(); ci = l->l_pcu_cpu[id]; if (ci == curcpu()) { /* * State is on the current CPU - just perform the operations. */ KASSERT((flags & PCU_CLAIM) == 0); KASSERTMSG(ci->ci_pcu_curlwp[id] == l, "%s: cpu%u: pcu_curlwp[%u] (%p) != l (%p)", __func__, cpu_index(ci), id, ci->ci_pcu_curlwp[id], l); pcu_do_op(pcu, l, flags); splx(s); return; } if (__predict_false(ci == NULL)) { if (flags & PCU_CLAIM) { pcu_do_op(pcu, l, flags); } /* Cross-call has won the race - no state to manage. */ splx(s); return; } splx(s); /* * State is on the remote CPU - perform the operations there. * Note: there is a race condition; see description in the top. */ where = xc_unicast(XC_HIGHPRI, (xcfunc_t)pcu_cpu_op, __UNCONST(pcu), (void *)(uintptr_t)flags, ci); xc_wait(where); KASSERT((flags & PCU_RELEASE) == 0 || l->l_pcu_cpu[id] == NULL); }
int satlinkclose(dev_t dev, int flags, int fmt, struct lwp *l) { struct satlink_softc *sc; int s; sc = device_lookup_private(&satlink_cd, minor(dev)); s = splsoftclock(); sc->sc_flags &= ~SATF_ISOPEN; splx(s); isa_dmaabort(sc->sc_ic, sc->sc_drq); callout_stop(&sc->sc_ch); return (0); }
/* * dmio_close: * * Close file op. */ static int dmio_close(struct file *fp) { struct dmio_state *ds = (struct dmio_state *) fp->f_data; struct dmio_usrreq_state *dus; struct dmover_session *dses; int s; s = splsoftclock(); simple_lock(&ds->ds_slock); ds->ds_flags |= DMIO_STATE_DEAD; /* Garbage-collect all the responses on the queue. */ while ((dus = TAILQ_FIRST(&ds->ds_complete)) != NULL) { TAILQ_REMOVE(&ds->ds_complete, dus, dus_q); ds->ds_nreqs--; dmover_request_free(dus->dus_req); dmio_usrreq_fini(ds, dus); } /* * If there are any requests pending, we have to wait for * them. Don't free the dmio_state in this case. */ if (ds->ds_nreqs == 0) { dses = ds->ds_session; simple_unlock(&ds->ds_slock); seldestroy(&ds->ds_selq); pool_put(&dmio_state_pool, ds); } else { dses = NULL; simple_unlock(&ds->ds_slock); } splx(s); fp->f_data = NULL; if (dses != NULL) dmover_session_destroy(dses); return (0); }
static int pmtimer_resume(device_t dev) { int pl; u_int second, minute, hour; struct timeval resume_time, tmp_time; /* modified for adjkerntz */ pl = splsoftclock(); timer_restore(); /* restore the all timers */ inittodr(0); /* adjust time to RTC */ microtime(&resume_time); getmicrotime(&tmp_time); timevaladd(&tmp_time, &diff_time); #ifdef FIXME /* XXX THIS DOESN'T WORK!!! */ time = tmp_time; #endif #ifdef PMTIMER_FIXUP_CALLTODO /* Calculate the delta time suspended */ timevalsub(&resume_time, &suspend_time); /* Fixup the calltodo list with the delta time. */ adjust_timeout_calltodo(&resume_time); #endif /* PMTIMER_FIXUP_CALLTODOK */ splx(pl); #ifndef PMTIMER_FIXUP_CALLTODO second = resume_time.tv_sec - suspend_time.tv_sec; #else /* PMTIMER_FIXUP_CALLTODO */ /* * We've already calculated resume_time to be the delta between * the suspend and the resume. */ second = resume_time.tv_sec; #endif /* PMTIMER_FIXUP_CALLTODO */ hour = second / 3600; second %= 3600; minute = second / 60; second %= 60; log(LOG_NOTICE, "wakeup from sleeping state (slept %02d:%02d:%02d)\n", hour, minute, second); return (0); }
/* * dmio_poll: * * Poll file op. */ static int dmio_poll(struct file *fp, int events) { struct dmio_state *ds = (struct dmio_state *) fp->f_data; int s, revents = 0; if ((events & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM)) == 0) return (revents); s = splsoftclock(); simple_lock(&ds->ds_slock); if (ds->ds_flags & DMIO_STATE_DEAD) { /* EOF */ revents |= events & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM); goto out; } /* We can read if there are completed requests. */ if (events & (POLLIN | POLLRDNORM)) if (TAILQ_EMPTY(&ds->ds_complete) == 0) revents |= events & (POLLIN | POLLRDNORM); /* * We can write if there is there are fewer then DMIO_NREQS_MAX * are already in the queue. */ if (events & (POLLOUT | POLLWRNORM)) if (ds->ds_nreqs < DMIO_NREQS_MAX) revents |= events & (POLLOUT | POLLWRNORM); if (revents == 0) { selrecord(curlwp, &ds->ds_selq); ds->ds_flags |= DMIO_STATE_SEL; } out: simple_unlock(&ds->ds_slock); splx(s); return (revents); }
/* * apm_thread() calls this routine approximately once per second. */ int zapm_get_event(struct pxa2x0_apm_softc *pxa_sc, u_int *typep) { struct zapm_softc *sc = (struct zapm_softc *)pxa_sc; int s; s = splsoftclock(); /* Don't interfere with discharging. */ if (sc->sc_discharging) *typep = sc->sc_event; else if (sc->sc_event == APM_NOEVENT) { zapm_poll(sc); *typep = sc->sc_event; } sc->sc_event = APM_NOEVENT; splx(s); return (*typep == APM_NOEVENT); }
void pcu_save_all(lwp_t *l) { const uint32_t pcu_inuse = l->l_pcu_used[PCU_USER]; /* * Unless LW_WCORE, we aren't releasing since this LWP isn't giving * up PCU, just saving it. */ const int flags = PCU_SAVE | (l->l_flag & LW_WCORE ? PCU_RELEASE : 0); /* * Normally we save for the current LWP, but sometimes we get called * with a different LWP (forking a system LWP or doing a coredump of * a process with multiple threads) and we need to deal with that. */ KASSERT(l == curlwp || (((l->l_flag & LW_SYSTEM) || (curlwp->l_proc == l->l_proc && l->l_stat == LSSUSPENDED)) && pcu_inuse == 0)); KASSERT(l->l_pcu_used[PCU_KERNEL] == 0); if (__predict_true(pcu_inuse == 0)) { /* PCUs are not in use. */ return; } const int s = splsoftclock(); for (u_int id = 0; id < PCU_UNIT_COUNT; id++) { if ((pcu_inuse & (1 << id)) == 0) { continue; } if (__predict_true(l->l_pcu_cpu[id] == NULL)) { continue; } const pcu_ops_t * const pcu = pcu_ops_md_defs[id]; pcu_lwp_op(pcu, l, flags); } splx(s); }
void hypcnstart(struct tty *tp) { spl_t o_pri; int ch; unsigned char c; if (tp->t_state & TS_TTSTOP) return; while (1) { tp->t_state &= ~TS_BUSY; if (tp->t_state & TS_TTSTOP) break; if ((tp->t_outq.c_cc <= 0) || (ch = getc(&tp->t_outq)) == -1) break; c = ch; o_pri = splsoftclock(); hypputc(c); splx(o_pri); } if (tp->t_outq.c_cc <= TTLOWAT(tp)) { tt_write_wakeup(tp); } }
/* * dmoverioopen: * * Device switch open routine. */ int dmoverioopen(dev_t dev, int flag, int mode, struct lwp *l) { struct dmio_state *ds; struct file *fp; int error, fd, s; /* falloc() will use the descriptor for us. */ if ((error = fd_allocfile(&fp, &fd)) != 0) return (error); s = splsoftclock(); ds = pool_get(&dmio_state_pool, PR_WAITOK); splx(s); memset(ds, 0, sizeof(*ds)); simple_lock_init(&ds->ds_slock); TAILQ_INIT(&ds->ds_pending); TAILQ_INIT(&ds->ds_complete); selinit(&ds->ds_selq); return fd_clone(fp, fd, flag, &dmio_fileops, ds); }
/* * Return power status to the generic APM driver. */ void zapm_power_info(struct pxa2x0_apm_softc *pxa_sc, struct apm_power_info *power) { struct zapm_softc *sc = (struct zapm_softc *)pxa_sc; int s; int ac_on; int volt; int charging; s = splsoftclock(); ac_on = sc->sc_ac_on; volt = sc->sc_batt_volt; charging = sc->sc_charging; splx(s); power->ac_state = ac_on ? APM_AC_ON : APM_AC_OFF; if (charging) power->battery_state = APM_BATT_CHARGING; else power->battery_state = zapm_batt_state(volt); power->battery_life = zapm_batt_life(volt); power->minutes_left = zapm_batt_minutes(power->battery_life); }
int satlinkpoll(dev_t dev, int events, struct lwp *l) { struct satlink_softc *sc; int s, revents; sc = device_lookup_private(&satlink_cd, minor(dev)); revents = events & (POLLOUT | POLLWRNORM); /* Attempt to save some work. */ if ((events & (POLLIN | POLLRDNORM)) == 0) return (revents); /* We're timeout-driven, so must block the clock. */ s = splsoftclock(); if (sc->sc_uptr != sc->sc_sptr) revents |= events & (POLLIN | POLLRDNORM); else selrecord(l, &sc->sc_selq); splx(s); return (revents); }
static void slugled_shutdown(void *arg) { struct slugled_softc *sc = arg; uint32_t reg; int s; ixp425_intr_disestablish(sc->sc_usb0_ih); ixp425_intr_disestablish(sc->sc_usb1_ih); ixp425_intr_disestablish(sc->sc_tmr_ih); /* Cancel the callouts */ s = splsoftclock(); callout_stop(&sc->sc_usb0); callout_stop(&sc->sc_usb1); splx(s); /* Turn off the disk LEDs, and set Ready/Status to amber */ s = splhigh(); reg = GPIO_CONF_READ_4(ixp425_softc,IXP425_GPIO_GPOUTR); reg |= LEDBITS_USB0 | LEDBITS_USB1 | LEDBITS_STATUS | LEDBITS_READY; GPIO_CONF_WRITE_4(ixp425_softc,IXP425_GPIO_GPOUTR, reg); splx(s); }
int parrw(dev_t dev, struct uio *uio) { int unit = UNIT(dev); struct par_softc *sc = device_lookup_private(&par_cd, unit); int len=0xdeadbeef; /* XXX: shutup gcc */ int s, cnt=0; char *cp; int error = 0; int buflen; char *buf; if (!!(sc->sc_flags & PARF_OREAD) ^ (uio->uio_rw == UIO_READ)) return EINVAL; if (uio->uio_resid == 0) return(0); buflen = min(sc->sc_burst, uio->uio_resid); buf = (char *)malloc(buflen, M_DEVBUF, M_WAITOK); sc->sc_flags |= PARF_UIO; if (sc->sc_timo > 0) { sc->sc_flags |= PARF_TIMO; callout_reset(&sc->sc_timo_ch, sc->sc_timo, partimo, sc); } while (uio->uio_resid > 0) { len = min(buflen, uio->uio_resid); cp = buf; if (uio->uio_rw == UIO_WRITE) { error = uiomove(cp, len, uio); if (error) break; } again: s = splsoftclock(); /* * Check if we timed out during sleep or uiomove */ if ((sc->sc_flags & PARF_UIO) == 0) { #ifdef DEBUG if (pardebug & PDB_IO) printf("parrw: uiomove/sleep timo, flags %x\n", sc->sc_flags); #endif if (sc->sc_flags & PARF_TIMO) { callout_stop(&sc->sc_timo_ch); sc->sc_flags &= ~PARF_TIMO; } splx(s); break; } splx(s); /* * Perform the operation */ cnt = parsend(sc, cp, len); if (cnt < 0) { error = -cnt; break; } s = splsoftclock(); /* * Operation timeout (or non-blocking), quit now. */ if ((sc->sc_flags & PARF_UIO) == 0) { #ifdef DEBUG if (pardebug & PDB_IO) printf("parrw: timeout/done\n"); #endif splx(s); break; } /* * Implement inter-read delay */ if (sc->sc_delay > 0) { sc->sc_flags |= PARF_DELAY; callout_reset(&sc->sc_start_ch, sc->sc_delay, parstart, sc); error = tsleep(sc, PCATCH|(PZERO-1), "par-cdelay", 0); if (error) { splx(s); break; } } splx(s); /* * Must not call uiomove again til we've used all data * that we already grabbed. */ if (uio->uio_rw == UIO_WRITE && cnt != len) { cp += cnt; len -= cnt; cnt = 0; goto again; } } s = splsoftclock(); if (sc->sc_flags & PARF_TIMO) { callout_stop(&sc->sc_timo_ch); sc->sc_flags &= ~PARF_TIMO; } if (sc->sc_flags & PARF_DELAY) { callout_stop(&sc->sc_start_ch); sc->sc_flags &= ~PARF_DELAY; } splx(s); /* * Adjust for those chars that we uiomove'ed but never wrote */ /* * XXXjdolecek: this len usage is wrong, this will be incorrect * if the transfer size is longer than sc_burst */ if (uio->uio_rw == UIO_WRITE && cnt != len) { uio->uio_resid += (len - cnt); #ifdef DEBUG if (pardebug & PDB_IO) printf("parrw: short write, adjust by %d\n", len-cnt); #endif } free(buf, M_DEVBUF); #ifdef DEBUG if (pardebug & (PDB_FOLLOW|PDB_IO)) printf("parrw: return %d, resid %d\n", error, uio->uio_resid); #endif return (error); }
int mbpp_rw(dev_t dev, struct uio *uio, int flag) { int card = MAGMA_CARD(dev); int port = MAGMA_PORT(dev); struct mbpp_softc *ms = device_lookup_private(&mbpp_cd, card); struct mbpp_port *mp = &ms->ms_port[port]; char *buffer, *ptr; int buflen, cnt, len; int s, error = 0; int gotdata = 0; if( uio->uio_resid == 0 ) return(0); buflen = min(uio->uio_resid, mp->mp_burst); buffer = malloc(buflen, M_DEVBUF, M_WAITOK); if( buffer == NULL ) return(ENOMEM); SET(mp->mp_flags, MBPPF_UIO); /* * start timeout, if needed */ if( mp->mp_timeout > 0 ) { SET(mp->mp_flags, MBPPF_TIMEOUT); callout_reset(&mp->mp_timeout_ch, mp->mp_timeout, mbpp_timeout, mp); } len = cnt = 0; while( uio->uio_resid > 0 ) { len = min(buflen, uio->uio_resid); ptr = buffer; if( uio->uio_rw == UIO_WRITE ) { error = uiomove(ptr, len, uio); if( error ) break; } again: /* goto bad */ /* timed out? */ if( !ISSET(mp->mp_flags, MBPPF_UIO) ) break; /* * perform the operation */ if( uio->uio_rw == UIO_WRITE ) { cnt = mbpp_send(mp, ptr, len); } else { cnt = mbpp_recv(mp, ptr, len); } if( uio->uio_rw == UIO_READ ) { if( cnt ) { error = uiomove(ptr, cnt, uio); if( error ) break; gotdata++; } else if( gotdata ) /* consider us done */ break; } /* timed out? */ if( !ISSET(mp->mp_flags, MBPPF_UIO) ) break; /* * poll delay? */ if( mp->mp_delay > 0 ) { s = splsoftclock(); SET(mp->mp_flags, MBPPF_DELAY); callout_reset(&mp->mp_start_ch, mp->mp_delay, mbpp_start, mp); error = tsleep(mp, PCATCH | PZERO, "mbppdelay", 0); splx(s); if( error ) break; } /* * don't call uiomove again until we used all the data we grabbed */ if( uio->uio_rw == UIO_WRITE && cnt != len ) { ptr += cnt; len -= cnt; cnt = 0; goto again; } } /* * clear timeouts */ s = splsoftclock(); if( ISSET(mp->mp_flags, MBPPF_TIMEOUT) ) { callout_stop(&mp->mp_timeout_ch); CLR(mp->mp_flags, MBPPF_TIMEOUT); } if( ISSET(mp->mp_flags, MBPPF_DELAY) ) { callout_stop(&mp->mp_start_ch); CLR(mp->mp_flags, MBPPF_DELAY); } splx(s); /* * adjust for those chars that we uiomoved but never actually wrote */ if( uio->uio_rw == UIO_WRITE && cnt != len ) { uio->uio_resid += (len - cnt); } free(buffer, M_DEVBUF); return(error); }
/* * dmio_write: * * Write file op. */ static int dmio_write(struct file *fp, off_t *offp, struct uio *uio, kauth_cred_t cred, int flags) { struct dmio_state *ds = (struct dmio_state *) fp->f_data; struct dmio_usrreq_state *dus; struct dmover_request *dreq; struct dmio_usrreq req; int error = 0, s, progress = 0; if ((uio->uio_resid % sizeof(req)) != 0) return (EINVAL); if (ds->ds_session == NULL) return (ENXIO); s = splsoftclock(); simple_lock(&ds->ds_slock); while (uio->uio_resid != 0) { if (ds->ds_nreqs == DMIO_NREQS_MAX) { if (fp->f_flag & FNONBLOCK) { error = progress ? 0 : EWOULDBLOCK; break; } ds->ds_flags |= DMIO_STATE_WRITE_WAIT; error = ltsleep(&ds->ds_nreqs, PRIBIO | PCATCH, "dmiowr", 0, &ds->ds_slock); if (error) break; continue; } ds->ds_nreqs++; simple_unlock(&ds->ds_slock); splx(s); progress = 1; error = uiomove(&req, sizeof(req), uio); if (error) { s = splsoftclock(); simple_lock(&ds->ds_slock); ds->ds_nreqs--; break; } /* XXX How should this interact with FNONBLOCK? */ dreq = dmover_request_alloc(ds->ds_session, NULL); if (dreq == NULL) { /* XXX */ s = splsoftclock(); simple_lock(&ds->ds_slock); ds->ds_nreqs--; error = ENOMEM; break; } s = splsoftclock(); dus = pool_get(&dmio_usrreq_state_pool, PR_WAITOK); splx(s); error = dmio_usrreq_init(fp, dus, &req, dreq); if (error) { dmover_request_free(dreq); s = splsoftclock(); pool_put(&dmio_usrreq_state_pool, dus); simple_lock(&ds->ds_slock); break; } dreq->dreq_callback = dmio_usrreq_done; dreq->dreq_cookie = dus; dus->dus_req = dreq; dus->dus_id = req.req_id; s = splsoftclock(); simple_lock(&ds->ds_slock); TAILQ_INSERT_TAIL(&ds->ds_pending, dus, dus_q); simple_unlock(&ds->ds_slock); splx(s); dmover_process(dreq); s = splsoftclock(); simple_lock(&ds->ds_slock); } simple_unlock(&ds->ds_slock); splx(s); return (error); }
int parrw(dev_t dev, register struct uio *uio) { int unit = UNIT(dev); register struct par_softc *sc = getparsp(unit); register int s, len, cnt; register char *cp; int error = 0, gotdata = 0; int buflen; char *buf; len = 0; cnt = 0; if (!!(sc->sc_flags & PARF_OREAD) ^ (uio->uio_rw == UIO_READ)) return EINVAL; if (uio->uio_resid == 0) return(0); #ifdef DEBUG if (pardebug & (PDB_FOLLOW|PDB_IO)) printf("parrw(%llx, %p, %c): burst %d, timo %d, resid %x\n", dev, uio, uio->uio_rw == UIO_READ ? 'R' : 'W', sc->sc_burst, sc->sc_timo, uio->uio_resid); #endif buflen = min(sc->sc_burst, uio->uio_resid); buf = (char *)malloc(buflen, M_DEVBUF, M_WAITOK); sc->sc_flags |= PARF_UIO; if (sc->sc_timo > 0) { sc->sc_flags |= PARF_TIMO; callout_reset(&sc->sc_timo_ch, sc->sc_timo, partimo, sc); } while (uio->uio_resid > 0) { len = min(buflen, uio->uio_resid); cp = buf; if (uio->uio_rw == UIO_WRITE) { error = uiomove(cp, len, uio); if (error) break; } again: #if 0 if ((sc->sc_flags & PARF_UIO) && hpibreq(&sc->sc_dq) == 0) sleep(sc, PRIBIO+1); #endif /* * Check if we timed out during sleep or uiomove */ s = splsoftclock(); if ((sc->sc_flags & PARF_UIO) == 0) { #ifdef DEBUG if (pardebug & PDB_IO) printf("parrw: uiomove/sleep timo, flags %x\n", sc->sc_flags); #endif if (sc->sc_flags & PARF_TIMO) { callout_stop(&sc->sc_timo_ch); sc->sc_flags &= ~PARF_TIMO; } splx(s); break; } splx(s); /* * Perform the operation */ if (uio->uio_rw == UIO_WRITE) cnt = parsend (cp, len); else cnt = parreceive (cp, len); if (cnt < 0) { error = -cnt; break; } s = splbio(); #if 0 hpibfree(&sc->sc_dq); #endif #ifdef DEBUG if (pardebug & PDB_IO) printf("parrw: %s(%p, %d) -> %d\n", uio->uio_rw == UIO_READ ? "recv" : "send", cp, len, cnt); #endif splx(s); if (uio->uio_rw == UIO_READ) { if (cnt) { error = uiomove(cp, cnt, uio); if (error) break; gotdata++; } /* * Didn't get anything this time, but did in the past. * Consider us done. */ else if (gotdata) break; } s = splsoftclock(); /* * Operation timeout (or non-blocking), quit now. */ if ((sc->sc_flags & PARF_UIO) == 0) { #ifdef DEBUG if (pardebug & PDB_IO) printf("parrw: timeout/done\n"); #endif splx(s); break; } /* * Implement inter-read delay */ if (sc->sc_delay > 0) { sc->sc_flags |= PARF_DELAY; callout_reset(&sc->sc_start_ch, sc->sc_delay, parstart, sc); error = tsleep(sc, PCATCH | (PZERO - 1), "par-cdelay", 0); if (error) { splx(s); break; } } splx(s); /* * Must not call uiomove again til we've used all data * that we already grabbed. */ if (uio->uio_rw == UIO_WRITE && cnt != len) { cp += cnt; len -= cnt; cnt = 0; goto again; } } s = splsoftclock(); if (sc->sc_flags & PARF_TIMO) { callout_stop(&sc->sc_timo_ch); sc->sc_flags &= ~PARF_TIMO; } if (sc->sc_flags & PARF_DELAY) { callout_stop(&sc->sc_start_ch); sc->sc_flags &= ~PARF_DELAY; } splx(s); /* * Adjust for those chars that we uiomove'ed but never wrote */ if (uio->uio_rw == UIO_WRITE && cnt != len) { uio->uio_resid += (len - cnt); #ifdef DEBUG if (pardebug & PDB_IO) printf("parrw: short write, adjust by %d\n", len-cnt); #endif } free(buf, M_DEVBUF); #ifdef DEBUG if (pardebug & (PDB_FOLLOW|PDB_IO)) printf("parrw: return %d, resid %d\n", error, uio->uio_resid); #endif return (error); }
int satlinkread(dev_t dev, struct uio *uio, int flags) { struct satlink_softc *sc; int error, s, count, sptr; int wrapcnt, oresid; sc = device_lookup_private(&satlink_cd, minor(dev)); s = splsoftclock(); /* Wait for data to be available. */ while (sc->sc_sptr == sc->sc_uptr) { if (flags & O_NONBLOCK) { splx(s); return (EWOULDBLOCK); } sc->sc_flags |= SATF_DATA; if ((error = tsleep(sc, TTIPRI | PCATCH, "satio", 0)) != 0) { splx(s); return (error); } } sptr = sc->sc_sptr; splx(s); /* Compute number of readable bytes. */ if (sptr > sc->sc_uptr) count = sptr - sc->sc_uptr; else count = sc->sc_bufsize - sc->sc_uptr + sptr; if (count > uio->uio_resid) count = uio->uio_resid; /* Send data out to user. */ if (sptr > sc->sc_uptr) { /* * Easy case - no wrap-around. */ error = uiomove((char *)sc->sc_buf + sc->sc_uptr, count, uio); if (error == 0) { sc->sc_uptr += count; if (sc->sc_uptr == sc->sc_bufsize) sc->sc_uptr = 0; } return (error); } /* * We wrap around. Copy to the end of the ring... */ wrapcnt = sc->sc_bufsize - sc->sc_uptr; oresid = uio->uio_resid; if (wrapcnt > uio->uio_resid) wrapcnt = uio->uio_resid; error = uiomove((char *)sc->sc_buf + sc->sc_uptr, wrapcnt, uio); sc->sc_uptr = 0; if (error != 0 || wrapcnt == oresid) return (error); /* ...and the rest. */ count -= wrapcnt; error = uiomove(sc->sc_buf, count, uio); sc->sc_uptr += count; if (sc->sc_uptr == sc->sc_bufsize) sc->sc_uptr = 0; return (error); }
/* * dmio_read: * * Read file op. */ static int dmio_read(struct file *fp, off_t *offp, struct uio *uio, kauth_cred_t cred, int flags) { struct dmio_state *ds = (struct dmio_state *) fp->f_data; struct dmio_usrreq_state *dus; struct dmover_request *dreq; struct dmio_usrresp resp; int s, error = 0, progress = 0; if ((uio->uio_resid % sizeof(resp)) != 0) return (EINVAL); if (ds->ds_session == NULL) return (ENXIO); s = splsoftclock(); simple_lock(&ds->ds_slock); while (uio->uio_resid != 0) { for (;;) { dus = TAILQ_FIRST(&ds->ds_complete); if (dus == NULL) { if (fp->f_flag & FNONBLOCK) { error = progress ? 0 : EWOULDBLOCK; goto out; } ds->ds_flags |= DMIO_STATE_READ_WAIT; error = ltsleep(&ds->ds_complete, PRIBIO | PCATCH, "dmvrrd", 0, &ds->ds_slock); if (error) goto out; continue; } /* Have a completed request. */ TAILQ_REMOVE(&ds->ds_complete, dus, dus_q); ds->ds_nreqs--; if (ds->ds_flags & DMIO_STATE_WRITE_WAIT) { ds->ds_flags &= ~DMIO_STATE_WRITE_WAIT; wakeup(&ds->ds_nreqs); } if (ds->ds_flags & DMIO_STATE_SEL) { ds->ds_flags &= ~DMIO_STATE_SEL; selnotify(&ds->ds_selq, POLLIN | POLLRDNORM, 0); } break; } simple_unlock(&ds->ds_slock); dreq = dus->dus_req; resp.resp_id = dus->dus_id; if (dreq->dreq_flags & DMOVER_REQ_ERROR) resp.resp_error = dreq->dreq_error; else { resp.resp_error = 0; memcpy(resp.resp_immediate, dreq->dreq_immediate, sizeof(resp.resp_immediate)); } dmio_usrreq_fini(ds, dus); splx(s); progress = 1; dmover_request_free(dreq); error = uiomove(&resp, sizeof(resp), uio); if (error) return (error); s = splsoftclock(); simple_lock(&ds->ds_slock); } out: simple_unlock(&ds->ds_slock); splx(s); return (error); }