int am7930hwintr(void *v) { struct audioamd_softc *sc; struct auio *au; uint8_t *d, *e; int k; sc = v; au = &sc->sc_au; mutex_spin_enter(&sc->sc_lock); /* clear interrupt */ k = audioamd_codec_dread(sc, AM7930_DREG_IR); if ((k & (AM7930_IR_DTTHRSH|AM7930_IR_DRTHRSH|AM7930_IR_DSRI| AM7930_IR_DERI|AM7930_IR_BBUFF)) == 0) { mutex_spin_exit(&sc->sc_lock); return 0; } /* receive incoming data */ d = au->au_rdata; e = au->au_rend; if (d && d <= e) { *d = audioamd_codec_dread(sc, AM7930_DREG_BBRB); au->au_rdata++; if (d == e) { DPRINTFN(1, ("am7930hwintr: swintr(r) requested")); softint_schedule(sc->sc_sicookie); } } /* send outgoing data */ d = au->au_pdata; e = au->au_pend; if (d && d <= e) { audioamd_codec_dwrite(sc, AM7930_DREG_BBTB, *d); au->au_pdata++; if (d == e) { DPRINTFN(1, ("am7930hwintr: swintr(p) requested")); softint_schedule(sc->sc_sicookie); } } au->au_intrcnt.ev_count++; mutex_spin_exit(&sc->sc_lock); return 1; }
/*ARGSUSED*/ static void wsintr(void *arg) { struct ws_softc *sc = arg; struct sioreg *sio = sc->sc_ctl; uint8_t code; int rr; rr = getsiocsr(sio); if ((rr & RR_RXRDY) != 0) { do { code = sio->sio_data; if (rr & (RR_FRAMING | RR_OVERRUN | RR_PARITY)) { sio->sio_cmd = WR0_ERRRST; continue; } sc->sc_rxq[sc->sc_rxqtail] = code; sc->sc_rxqtail = OMKBD_NEXTRXQ(sc->sc_rxqtail); } while (((rr = getsiocsr(sio)) & RR_RXRDY) != 0); softint_schedule(sc->sc_si); } if ((rr & RR_TXRDY) != 0) sio->sio_cmd = WR0_RSTPEND; /* not capable of transmit, yet */ }
static void vaudio_intr(void *opaque) { struct vaudio_stream *st = opaque; softint_schedule(st->st_sih); }
/* * Modem change interrupt routine */ int clmpcc_mdintr(void *arg) { struct clmpcc_softc *sc = (struct clmpcc_softc *)arg; u_char mir; /* Modem status interrupt active? */ mir = clmpcc_rdreg(sc, CLMPCC_REG_MIR); /* * If we're using auto-vectored interrupts, we have to * verify if the chip is generating the interrupt. */ if ( sc->sc_vector_base == 0 && (mir & CLMPCC_MIR_MACT) == 0 ) return 0; /* Dummy read of the interrupt status register */ (void) clmpcc_rdreg(sc, CLMPCC_REG_MISR); /* Retrieve current status of modem lines. */ sc->sc_chans[mir & CLMPCC_MIR_MCN_MASK].ch_control |= clmpcc_rd_msvr(sc) & CLMPCC_MSVR_CD; clmpcc_wrreg(sc, CLMPCC_REG_MEOIR, 0); softint_schedule(sc->sc_softintr_cookie); return 1; }
/* * Our ZS chips all share a common, autovectored interrupt, * so we have to look at all of them on each interrupt. */ static int zshard(void *arg) { register struct zsc_softc *zsc; register int rr3, unit, rval, softreq; rval = 0; for (unit = 0; unit < zsc_cd.cd_ndevs; unit++) { zsc = device_lookup_private(&zsc_cd, unit); if (zsc == NULL) continue; zsc->zsc_intrcnt.ev_count++; while ((rr3 = zsc_intr_hard(zsc))) { rval |= rr3; } softreq = zsc->zsc_cs[0]->cs_softreq; softreq |= zsc->zsc_cs[1]->cs_softreq; if (softreq && (zssoftpending == 0)) { zssoftpending = 1; softint_schedule(zsc->sc_si); } } return rval; }
/* * callout_hardclock: * * Called from hardclock() once every tick. We schedule a soft * interrupt if there is work to be done. */ void callout_hardclock(void) { struct callout_cpu *cc; int needsoftclock, ticks; cc = curcpu()->ci_data.cpu_callout; mutex_spin_enter(cc->cc_lock); ticks = ++cc->cc_ticks; MOVEBUCKET(cc, 0, ticks); if (MASKWHEEL(0, ticks) == 0) { MOVEBUCKET(cc, 1, ticks); if (MASKWHEEL(1, ticks) == 0) { MOVEBUCKET(cc, 2, ticks); if (MASKWHEEL(2, ticks) == 0) MOVEBUCKET(cc, 3, ticks); } } needsoftclock = !CIRCQ_EMPTY(&cc->cc_todo); mutex_spin_exit(cc->cc_lock); if (needsoftclock) softint_schedule(callout_sih); }
/* * dmover_done: [back-end interface function] * * Back-end notification that the dmover is done. */ void dmover_done(struct dmover_request *dreq) { struct dmover_session *dses = dreq->dreq_session; int s; s = splbio(); /* XXXLOCK */ dmover_session_remque(dses, dreq); /* backend has removed it from its queue */ /* XXXUNLOCK */ dreq->dreq_flags |= DMOVER_REQ_DONE; dreq->dreq_flags &= ~DMOVER_REQ_RUNNING; dreq->dreq_assignment = NULL; if (dreq->dreq_callback != NULL) { mutex_enter(&dmover_completed_q_lock); TAILQ_INSERT_TAIL(&dmover_completed_q, dreq, dreq_dmbq); mutex_exit(&dmover_completed_q_lock); softint_schedule(dmover_completed_si); } else if (dreq->dreq_flags & DMOVER_REQ_WAIT) wakeup(dreq); splx(s); }
static void midi_rcv_asense(void *arg) { struct midi_softc *sc; sc = arg; mutex_enter(sc->lock); if (sc->dying || !sc->isopen) { mutex_exit(sc->lock); return; } if (sc->rcv_quiescent) { sc->rcv_eof = 1; sc->rcv_quiescent = 0; sc->rcv_expect_asense = 0; cv_broadcast(&sc->rchan); selnotify(&sc->rsel, 0, NOTE_SUBMIT); if (sc->async) softint_schedule(sc->sih); mutex_exit(sc->lock); return; } sc->rcv_quiescent = 1; callout_schedule(&sc->rcv_asense_co, MIDI_RCV_ASENSE_PERIOD); mutex_exit(sc->lock); }
int spif_stcintr(void *vsc) { struct spif_softc *sc = (struct spif_softc *)vsc; int needsoft = 0, r = 0, i; uint8_t ar; for (i = 0; i < 8; i++) { ar = ISTC_READ(sc, STC_RRAR) & CD180_GSVR_IMASK; if (ar == CD180_GSVR_RXGOOD) r |= spif_stcintr_rx(sc, &needsoft); else if (ar == CD180_GSVR_RXEXCEPTION) r |= spif_stcintr_rxexception(sc, &needsoft); } for (i = 0; i < 8; i++) { ar = ISTC_READ(sc, STC_TRAR) & CD180_GSVR_IMASK; if (ar == CD180_GSVR_TXDATA) r |= spif_stcintr_tx(sc, &needsoft); } for (i = 0; i < 8; i++) { ar = ISTC_READ(sc, STC_MRAR) & CD180_GSVR_IMASK; if (ar == CD180_GSVR_STATCHG) r |= spif_stcintr_mx(sc, &needsoft); } if (needsoft) softint_schedule(sc->sc_softih); return (r); }
/* * This is the function where we SEND packets. * * There is no 'receive' equivalent. A typical driver will get * interrupts from the hardware, and from there will inject new packets * into the network stack. * * Once handled, a packet must be freed. A real driver might not be able * to fit all the pending packets into the hardware, and is allowed to * return before having sent all the packets. It should then use the * if_flags flag IFF_OACTIVE to notify the upper layer. * * There are also other flags one should check, such as IFF_PAUSE. * * It is our duty to make packets available to BPF listeners. * * You should be aware that this function is called by the Ethernet layer * at splnet(). * * When the device is opened, we have to pass the packet(s) to the * userland. For that we stay in OACTIVE mode while the userland gets * the packets, and we send a signal to the processes waiting to read. * * wakeup(sc) is the counterpart to the tsleep call in * tap_dev_read, while selnotify() is used for kevent(2) and * poll(2) (which includes select(2)) listeners. */ static void tap_start(struct ifnet *ifp) { struct tap_softc *sc = (struct tap_softc *)ifp->if_softc; struct mbuf *m0; if ((sc->sc_flags & TAP_INUSE) == 0) { /* Simply drop packets */ for(;;) { IFQ_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) return; ifp->if_opackets++; bpf_mtap(ifp, m0); m_freem(m0); } } else if (!IFQ_IS_EMPTY(&ifp->if_snd)) { ifp->if_flags |= IFF_OACTIVE; wakeup(sc); selnotify(&sc->sc_rsel, 0, 1); if (sc->sc_flags & TAP_ASYNCIO) softint_schedule(sc->sc_sih); } }
int mgnsc_dmaintr(void *arg) { struct siop_softc *sc = arg; siop_regmap_p rp; u_char istat; if (sc->sc_flags & SIOP_INTSOFF) return (0); /* interrupts are not active */ rp = sc->sc_siopp; istat = rp->siop_istat; if ((istat & (SIOP_ISTAT_SIP | SIOP_ISTAT_DIP)) == 0) return (0); /* * save interrupt status, DMA status, and SCSI status 0 * (may need to deal with stacked interrupts?) */ sc->sc_sstat0 = rp->siop_sstat0; sc->sc_istat = istat; sc->sc_dstat = rp->siop_dstat; /* * disable interrupts until the callback can process this * interrupt. */ rp->siop_sien = 0; rp->siop_dien = 0; sc->sc_flags |= SIOP_INTDEFER | SIOP_INTSOFF; softint_schedule(sc->sc_siop_si); return (1); }
int bppintr(void *arg) { struct bpp_softc *sc = arg; struct lsi64854_softc *lsi = &sc->sc_lsi64854; uint16_t irq; /* First handle any possible DMA interrupts */ if (lsi64854_pp_intr((void *)lsi) == -1) sc->sc_error = 1; irq = bus_space_read_2(lsi->sc_bustag, lsi->sc_regs, L64854_REG_ICR); /* Ack all interrupts */ bus_space_write_2(lsi->sc_bustag, lsi->sc_regs, L64854_REG_ICR, irq | BPP_ALLIRQ); DPRINTF(("%s: %x\n", __func__, irq)); /* Did our device interrupt? */ if ((irq & BPP_ALLIRQ) == 0) return 0; if ((sc->sc_flags & BPP_LOCKED) != 0) wakeup(sc); else if ((sc->sc_flags & BPP_WANT) != 0) { sc->sc_flags &= ~BPP_WANT; wakeup(sc->sc_buf); } else { selnotify(&sc->sc_wsel, 0, 0); if (sc->sc_asyncproc != NULL) softint_schedule(sc->sc_sih); } return 1; }
static int at91usart_intr(void* arg) { struct at91usart_softc *sc = arg; u_int csr, imr; // get out if interrupts are not enabled imr = at91usart_readreg(sc, US_IMR); if (!imr) return 0; // get out if pending interrupt is not enabled csr = at91usart_readreg(sc, US_CSR); DPRINTFN(6,("%s: csr=%08X imr=%08X\n", device_xname(sc->sc_dev), csr, imr)); if (!ISSET(csr, imr)) return 0; // ok, we DO have some interrupts to serve! let softint do it sc->sc_csr = csr; at91usart_writereg(sc, US_IDR, -1); /* Wake up the poller. */ softint_schedule(sc->sc_si); /* we're done for now */ return (1); }
/* * The interrupt flavor acquires spl and lock once and releases at the end, * as it expects to write only one byte or message. The interface convention * is that if hw_if->output returns 0, it has initiated transmission and the * completion interrupt WILL be forthcoming; if it has not returned 0, NO * interrupt will be forthcoming, and if it returns EINPROGRESS it wants * another byte right away. */ static int midi_intr_out(struct midi_softc *sc) { struct midi_buffer *mb; int error, msglen; MIDI_BUF_DECLARE(idx); MIDI_BUF_DECLARE(buf); int armed = 0; KASSERT(mutex_owned(sc->lock)); error = 0; mb = &sc->outbuf; MIDI_BUF_CONSUMER_INIT(mb,idx); MIDI_BUF_CONSUMER_INIT(mb,buf); while (idx_cur != idx_lim) { if (sc->hw_if_ext) { error = midi_msg_out(sc, &idx_cur, &idx_lim, &buf_cur, &buf_lim); if (!error ) /* no EINPROGRESS from extended hw_if */ armed = 1; break; } /* or, lacking hw_if_ext ... */ msglen = MB_IDX_LEN(*idx_cur); error = sc->hw_if->output(sc->hw_hdl, *buf_cur); if (error && error != EINPROGRESS) break; ++ buf_cur; MIDI_BUF_WRAP(buf); -- msglen; if (msglen) *idx_cur = PACK_MB_IDX(MB_IDX_CAT(*idx_cur),msglen); else { ++ idx_cur; MIDI_BUF_WRAP(idx); } if (!error) { armed = 1; break; } } MIDI_BUF_CONSUMER_WBACK(mb,idx); MIDI_BUF_CONSUMER_WBACK(mb,buf); if (!armed) { sc->pbus = 0; callout_schedule(&sc->xmt_asense_co, MIDI_XMT_ASENSE_PERIOD); } cv_broadcast(&sc->wchan); selnotify(&sc->wsel, 0, NOTE_SUBMIT); if (sc->async) { softint_schedule(sc->sih); } if (error) { DPRINTF(("midi_intr_output error %d\n", error)); } return error; }
static inline int gemini_ipm_dispatch(gemini_ipm_softc_t *sc) { ipm_dispatch_entry_t *disp; ipm_desc_t desc; ipmqindex_t ix_read; ipmqindex_t ix_write; int rv = 0; ix_read = sc->sc_rxqueue->ix_read; ix_write = sc->sc_rxqueue->ix_write; if (! ipmqisempty(ix_read, ix_write)) { rv = 1; do { gemini_ipm_desc_read(&desc, &sc->sc_rxqueue->ipm_desc[ix_read]); ix_read = ipmqnext(ix_read); KASSERT(desc.tag != IPM_TAG_NONE); disp = &sc->sc_dispatch_tab[desc.tag]; #ifdef NOTYET softint_schedule(disp->sih); #else (*disp->consume)(disp->arg, &desc); #endif ix_write = sc->sc_rxqueue->ix_write; sc->sc_rxqueue->ix_read = ix_read; sc->sc_rxcount++; } while (! ipmqisempty(ix_read, ix_write)); } else { DPRINTFN(1, ("%s: ipmqisempty %d %d\n", __FUNCTION__, ix_read, ix_write)); } return rv; }
static void ucomstart(struct tty *tp) { struct ucom_softc *sc = device_lookup_private(&ucom_cd, UCOMUNIT(tp->t_dev)); struct ucom_buffer *ub; int s; u_char *data; int cnt; if (sc->sc_dying) return; s = spltty(); if (ISSET(tp->t_state, TS_BUSY | TS_TIMEOUT | TS_TTSTOP)) { DPRINTFN(4,("ucomstart: no go, state=0x%x\n", tp->t_state)); goto out; } if (sc->sc_tx_stopped) goto out; if (!ttypull(tp)) goto out; /* Grab the first contiguous region of buffer space. */ data = tp->t_outq.c_cf; cnt = ndqb(&tp->t_outq, 0); if (cnt == 0) { DPRINTF(("ucomstart: cnt==0\n")); goto out; } ub = SIMPLEQ_FIRST(&sc->sc_obuff_free); KASSERT(ub != NULL); SIMPLEQ_REMOVE_HEAD(&sc->sc_obuff_free, ub_link); if (SIMPLEQ_FIRST(&sc->sc_obuff_free) == NULL) SET(tp->t_state, TS_BUSY); if (cnt > sc->sc_obufsize) cnt = sc->sc_obufsize; if (sc->sc_methods->ucom_write != NULL) sc->sc_methods->ucom_write(sc->sc_parent, sc->sc_portno, ub->ub_data, data, &cnt); else memcpy(ub->ub_data, data, cnt); ub->ub_len = cnt; ub->ub_index = 0; SIMPLEQ_INSERT_TAIL(&sc->sc_obuff_full, ub, ub_link); softint_schedule(sc->sc_si); out: splx(s); }
static int ld_ataraid_start_span(struct ld_softc *ld, struct buf *bp) { struct ld_ataraid_softc *sc = (void *) ld; struct ataraid_array_info *aai = sc->sc_aai; struct ataraid_disk_info *adi; struct cbuf *cbp; char *addr; daddr_t bn; long bcount, rcount; u_int comp; /* Allocate component buffers. */ addr = bp->b_data; /* Find the first component. */ comp = 0; adi = &aai->aai_disks[comp]; bn = bp->b_rawblkno; while (bn >= adi->adi_compsize) { bn -= adi->adi_compsize; adi = &aai->aai_disks[++comp]; } bp->b_resid = bp->b_bcount; for (bcount = bp->b_bcount; bcount > 0; bcount -= rcount) { rcount = bp->b_bcount; if ((adi->adi_compsize - bn) < btodb(rcount)) rcount = dbtob(adi->adi_compsize - bn); cbp = ld_ataraid_make_cbuf(sc, bp, comp, bn, addr, rcount); if (cbp == NULL) { /* Free the already allocated component buffers. */ while ((cbp = SIMPLEQ_FIRST(&sc->sc_cbufq)) != NULL) { SIMPLEQ_REMOVE_HEAD(&sc->sc_cbufq, cb_q); CBUF_PUT(cbp); } return EAGAIN; } /* * For a span, we always know we advance to the next disk, * and always start at offset 0 on that disk. */ adi = &aai->aai_disks[++comp]; bn = 0; SIMPLEQ_INSERT_TAIL(&sc->sc_cbufq, cbp, cb_q); addr += rcount; } /* Now fire off the requests. */ softint_schedule(sc->sc_sih_cookie); return 0; }
static inline void sacom_schedrx(struct sacom_softc *sc) { sc->sc_rx_ready = 1; /* Wake up the poller. */ softint_schedule(sc->sc_si); }
integrate void sscom_schedrx(struct sscom_softc *sc) { sc->sc_rx_ready = 1; /* Wake up the poller. */ softint_schedule(sc->sc_si); }
int kbdintr(void *arg) { u_char c, st; struct kbd_softc *sc = arg; struct firm_event *fe; int put; /* clear receiver error if any */ st = mfp_get_rsr(); c = mfp_get_udr(); if ((st & MFP_RSR_BF) == 0) return 0; /* intr caused by an err -- no char received */ /* if not in event mode, deliver straight to ite to process key stroke */ if (!sc->sc_event_mode) { kbdbuf[kbdputoff++ & KBDBUFMASK] = c; softint_schedule(sc->sc_softintr_cookie); return 0; } /* Keyboard is generating events. Turn this keystroke into an event and put it in the queue. If the queue is full, the keystroke is lost (sorry!). */ put = sc->sc_events.ev_put; fe = &sc->sc_events.ev_q[put]; put = (put + 1) % EV_QSIZE; if (put == sc->sc_events.ev_get) { log(LOG_WARNING, "keyboard event queue overflow\n"); /* ??? */ return 0; } fe->id = KEY_CODE(c); fe->value = KEY_UP(c) ? VKEY_UP : VKEY_DOWN; firm_gettime(fe); sc->sc_events.ev_put = put; softint_schedule(sc->sc_softintr_cookie); return 0; }
/* * _stop() is called when an interface goes down. It is our * responsability to validate that state by clearing the * IFF_RUNNING flag. * * We have to wake up all the sleeping processes to have the pending * read requests cancelled. */ static void tap_stop(struct ifnet *ifp, int disable) { struct tap_softc *sc = (struct tap_softc *)ifp->if_softc; ifp->if_flags &= ~IFF_RUNNING; wakeup(sc); selnotify(&sc->sc_rsel, 0, 1); if (sc->sc_flags & TAP_ASYNCIO) softint_schedule(sc->sc_sih); }
int zshard(void *arg) { struct zsc_softc *zsc; int rval; zsc = arg; rval = zsc_intr_hard(zsc); if (zsc->zsc_cs[0]->cs_softreq || zsc->zsc_cs[1]->cs_softreq) softint_schedule(zsc->zsc_si); return rval; }
STATIC __inline void gtmpsc_intr_tx(struct gtmpsc_softc *sc) { gtmpsc_polltx_t *vtxp; uint32_t csr; int ix; /* * If we've delayed a parameter change, do it now, * and restart output. */ if (sc->sc_heldchange) { gtmpsc_loadchannelregs(sc); sc->sc_heldchange = 0; sc->sc_tbc = sc->sc_heldtbc; sc->sc_heldtbc = 0; } /* Clean-up TX descriptors and buffers */ ix = sc->sc_lasttx; while (ix != sc->sc_nexttx) { vtxp = &sc->sc_poll_sdmapage->tx[ix]; bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map, ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); csr = vtxp->txdesc.sdma_csr; if (csr & SDMA_CSR_TX_OWN) { bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map, ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map, ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t), sizeof(vtxp->txbuf), BUS_DMASYNC_POSTWRITE); ix = (ix + 1) % GTMPSC_NTXDESC; } sc->sc_lasttx = ix; /* Output the next chunk of the contiguous buffer */ gtmpsc_write(sc); if (sc->sc_tbc == 0 && sc->sc_tx_busy) { sc->sc_tx_busy = 0; sc->sc_tx_done = 1; softint_schedule(sc->sc_si); sdma_imask &= ~SDMA_INTR_TXBUF(sc->sc_unit); gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask); } }
/* * For news68k-port, we don't use autovectored interrupt. * We do not need to look at all of the zs chips. */ static int zshard(void *arg) { struct zsc_softc *zsc = arg; int rval; rval = zsc_intr_hard(zsc); /* We are at splzs here, so no need to lock. */ if (zsc->zsc_cs[0]->cs_softreq || zsc->zsc_cs[1]->cs_softreq) { softint_schedule(zsc->zsc_softintr_cookie); } return rval; }
static int jsc_pckbdintr(void *vsc) { struct pckbc_js_softc *jsc = vsc; softint_schedule(jsc->jsc_int_cookie); pckbcintr_hard(&jsc->jsc_pckbc); /* * This interrupt is not shared on javastations, avoid "stray" * warnings. XXX - why do "stray interrupt" warnings happen if * we don't claim the interrupt always? */ return 1; }
void test_callout(void *cookie) { int s; /* Trigger soft interrupt. */ s = splhigh(); softint_schedule(test_sih); splx(s); mutex_enter(&test_mutex); test_done = 1; cv_broadcast(&test_cv); mutex_exit(&test_mutex); }
static void ingenic_rng_get_cb(size_t bytes_wanted, void *priv) { struct ingenic_rng_softc * const sc = priv; mutex_spin_enter(&sc->sc_intr_lock); if (sc->sc_bytes_wanted == 0) { softint_schedule(sc->sc_sih); } if (bytes_wanted > (UINT_MAX - sc->sc_bytes_wanted)) { sc->sc_bytes_wanted = UINT_MAX; } else { sc->sc_bytes_wanted += bytes_wanted; } mutex_spin_exit(&sc->sc_intr_lock); }
int dt_intr(void *cookie) { struct dt_softc *sc; struct dt_msg *msg, *pend; sc = cookie; switch (dt_msg_get(&sc->sc_msg, 1)) { case DT_GET_ERROR: /* * Ugh! The most common occurrence of a data overrun is upon * a key press and the result is a software generated "stuck * key". All I can think to do is fake an "all keys up" * whenever a data overrun occurs. */ sc->sc_msg.src = dt_kbd_addr; sc->sc_msg.ctl = DT_CTL(1, 0, 0); sc->sc_msg.body[0] = DT_KBD_EMPTY; #ifdef DIAGNOSTIC printf("%s: data overrun or stray interrupt\n", device_xname(sc->sc_dev)); #endif break; case DT_GET_DONE: break; case DT_GET_NOTYET: return (1); } if ((msg = SLIST_FIRST(&sc->sc_free)) == NULL) { printf("%s: input overflow\n", device_xname(sc->sc_dev)); return (1); } SLIST_REMOVE_HEAD(&sc->sc_free, chain.slist); memcpy(msg, &sc->sc_msg, sizeof(*msg)); pend = SIMPLEQ_FIRST(&sc->sc_queue); SIMPLEQ_INSERT_TAIL(&sc->sc_queue, msg, chain.simpleq); if (pend == NULL) softint_schedule(sc->sc_sih); return (1); }
void drsc_handler(void) { struct siop_softc *sc = drsc_softc; siop_regmap_p rp; int istat; if (sc->sc_flags & SIOP_INTSOFF) return; /* interrupts are not active */ rp = sc->sc_siopp; istat = rp->siop_istat; if ((istat & (SIOP_ISTAT_SIP | SIOP_ISTAT_DIP)) == 0) return; /* * save interrupt status, DMA status, and SCSI status 0 * (may need to deal with stacked interrupts?) */ sc->sc_sstat0 = rp->siop_sstat0; sc->sc_istat = istat; sc->sc_dstat = rp->siop_dstat; /* * disable interrupts until the callback can process this * interrupt. */ #ifdef DRSC_NOCALLBACK (void)spl1(); siopintr(sc); #else rp->siop_sien = 0; rp->siop_dien = 0; sc->sc_flags |= SIOP_INTDEFER | SIOP_INTSOFF; single_inst_bclr_b(*draco_intpen, DRIRQ_SCSI); #ifdef DEBUG if (*draco_intpen & DRIRQ_SCSI) printf("%s: intpen still 0x%x\n", device_xname(sc->sc_dev), *draco_intpen); #endif softint_schedule(sc->sc_siop_si); #endif return; }
static void bcmrng_get_cb(size_t bytes_wanted, void *arg) { struct bcm2835rng_softc *sc = arg; /* * Deferring to a softint is necessary until the rnd(9) locking * is fixed. */ mutex_spin_enter(&sc->sc_intr_lock); if (sc->sc_bytes_wanted == 0) softint_schedule(sc->sc_sih); if (bytes_wanted > (UINT_MAX - sc->sc_bytes_wanted)) sc->sc_bytes_wanted = UINT_MAX; else sc->sc_bytes_wanted += bytes_wanted; mutex_spin_exit(&sc->sc_intr_lock); }