Example #1
0
/*
 * dmio_usrreq_done:
 *
 *	Dmover completion callback.
 */
static void
dmio_usrreq_done(struct dmover_request *dreq)
{
	struct dmio_usrreq_state *dus = dreq->dreq_cookie;
	struct dmio_state *ds = dreq->dreq_session->dses_cookie;

	/* We're already at splsoftclock(). */

	simple_lock(&ds->ds_slock);
	TAILQ_REMOVE(&ds->ds_pending, dus, dus_q);
	if (ds->ds_flags & DMIO_STATE_DEAD) {
		ds->ds_nreqs--;
		dmio_usrreq_fini(ds, dus);
		dmover_request_free(dreq);
		if (ds->ds_nreqs == 0) {
			simple_unlock(&ds->ds_slock);
			seldestroy(&ds->ds_selq);
			pool_put(&dmio_state_pool, ds);
			return;
		}
	} else {
		TAILQ_INSERT_TAIL(&ds->ds_complete, dus, dus_q);
		if (ds->ds_flags & DMIO_STATE_READ_WAIT) {
			ds->ds_flags &= ~DMIO_STATE_READ_WAIT;
			wakeup(&ds->ds_complete);
		}
		if (ds->ds_flags & DMIO_STATE_SEL) {
			ds->ds_flags &= ~DMIO_STATE_SEL;
			selnotify(&ds->ds_selq, POLLOUT | POLLWRNORM, 0);
		}
	}
	simple_unlock(&ds->ds_slock);
}
static void
midi_rcv_asense(void *arg)
{
	struct midi_softc *sc;

	sc = arg;

	mutex_enter(sc->lock);
	if (sc->dying || !sc->isopen) {
		mutex_exit(sc->lock);
		return;
	}
	if (sc->rcv_quiescent) {
		sc->rcv_eof = 1;
		sc->rcv_quiescent = 0;
		sc->rcv_expect_asense = 0;
		cv_broadcast(&sc->rchan);
		selnotify(&sc->rsel, 0, NOTE_SUBMIT);
		if (sc->async)
			softint_schedule(sc->sih);
		mutex_exit(sc->lock);
		return;
	}	
	sc->rcv_quiescent = 1;
	callout_schedule(&sc->rcv_asense_co, MIDI_RCV_ASENSE_PERIOD);
	mutex_exit(sc->lock);
}
/*---------------------------------------------------------------------------*
 *	this routine is called from L4 handler at connect time
 *---------------------------------------------------------------------------*/
static void
rbch_connect(void *softc, void *cdp)
{
	call_desc_t *cd = (call_desc_t *)cdp;
	struct rbch_softc *sc = softc;

	sc->sc_bprot = cd->bprot;

#if I4BRBCHACCT
	if(sc->sc_bprot == BPROT_RHDLC)
	{
		sc->sc_iinb = 0;
		sc->sc_ioutb = 0;
		sc->sc_linb = 0;
		sc->sc_loutb = 0;

		START_TIMER(sc->sc_callout, rbch_timeout, sc, I4BRBCHACCTINTVL*hz);
	}
#endif
	if(!(sc->sc_devstate & ST_CONNECTED))
	{
		NDBGL4(L4_RBCHDBG, "B channel %d at ISDN %d, wakeup",
			cd->channelid, cd->isdnif);
		sc->sc_devstate |= ST_CONNECTED;
		sc->sc_cd = cdp;
		wakeup((void *)sc);
		selnotify(&sc->selp, 0, 0);
	}
}
Example #4
0
int
bppintr(void *arg)
{
	struct bpp_softc *sc = arg;
	struct lsi64854_softc *lsi = &sc->sc_lsi64854;
	uint16_t irq;

	/* First handle any possible DMA interrupts */
	if (lsi64854_pp_intr((void *)lsi) == -1)
		sc->sc_error = 1;

	irq = bus_space_read_2(lsi->sc_bustag, lsi->sc_regs, L64854_REG_ICR);
	/* Ack all interrupts */
	bus_space_write_2(lsi->sc_bustag, lsi->sc_regs, L64854_REG_ICR,
	    irq | BPP_ALLIRQ);

	DPRINTF(("%s: %x\n", __func__, irq));
	/* Did our device interrupt? */
	if ((irq & BPP_ALLIRQ) == 0)
		return 0;

	if ((sc->sc_flags & BPP_LOCKED) != 0)
		wakeup(sc);
	else if ((sc->sc_flags & BPP_WANT) != 0) {
		sc->sc_flags &= ~BPP_WANT;
		wakeup(sc->sc_buf);
	} else {
		selnotify(&sc->sc_wsel, 0, 0);
		if (sc->sc_asyncproc != NULL)
			softint_schedule(sc->sc_sih);
	}
	return 1;
}
/*
 * The interrupt flavor acquires spl and lock once and releases at the end,
 * as it expects to write only one byte or message. The interface convention
 * is that if hw_if->output returns 0, it has initiated transmission and the
 * completion interrupt WILL be forthcoming; if it has not returned 0, NO
 * interrupt will be forthcoming, and if it returns EINPROGRESS it wants
 * another byte right away.
 */
static int
midi_intr_out(struct midi_softc *sc)
{
	struct midi_buffer *mb;
	int error, msglen;
	MIDI_BUF_DECLARE(idx);
	MIDI_BUF_DECLARE(buf);
	int armed = 0;

	KASSERT(mutex_owned(sc->lock));

	error = 0;
	mb = &sc->outbuf;

	MIDI_BUF_CONSUMER_INIT(mb,idx);
	MIDI_BUF_CONSUMER_INIT(mb,buf);
	
	while (idx_cur != idx_lim) {
		if (sc->hw_if_ext) {
			error = midi_msg_out(sc, &idx_cur, &idx_lim,
			    &buf_cur, &buf_lim);
			if (!error ) /* no EINPROGRESS from extended hw_if */
				armed = 1;
			break;
		}
		/* or, lacking hw_if_ext ... */
		msglen = MB_IDX_LEN(*idx_cur);
		error = sc->hw_if->output(sc->hw_hdl, *buf_cur);
		if (error &&  error != EINPROGRESS)
			break;
		++ buf_cur;
		MIDI_BUF_WRAP(buf);
		-- msglen;
		if (msglen)
			*idx_cur = PACK_MB_IDX(MB_IDX_CAT(*idx_cur),msglen);
		else {
			++ idx_cur;
			MIDI_BUF_WRAP(idx);
		}
		if (!error) {
			armed = 1;
			break;
		}
	}
	MIDI_BUF_CONSUMER_WBACK(mb,idx);
	MIDI_BUF_CONSUMER_WBACK(mb,buf);
	if (!armed) {
		sc->pbus = 0;
		callout_schedule(&sc->xmt_asense_co, MIDI_XMT_ASENSE_PERIOD);
	}
	cv_broadcast(&sc->wchan);
	selnotify(&sc->wsel, 0, NOTE_SUBMIT);
	if (sc->async) {
		softint_schedule(sc->sih);
	}
	if (error) {
		DPRINTF(("midi_intr_output error %d\n", error));
	}
	return error;
}
/*
 * Wakeup processes waiting on a socket buffer.
 * Do asynchronous notification via SIGIO
 * if the socket buffer has the SB_ASYNC flag set.
 */
void
sowakeup(struct socket *so, struct sockbuf *sb, int code)
{
	int band;

	KASSERT(solocked(so));
	KASSERT(sb->sb_so == so);

	if (code == POLL_IN)
		band = POLLIN|POLLRDNORM;
	else
		band = POLLOUT|POLLWRNORM;
#if 0 /* VADIM */
	sb->sb_flags &= ~SB_NOTIFY;

	selnotify(&sb->sb_sel, band, NOTE_SUBMIT);
	cv_broadcast(&sb->sb_cv);

	if (sb->sb_flags & SB_ASYNC)
		fownsignal(so->so_pgid, SIGIO, code, band, so);
#endif
	if(so->so_upcall2)
		(*so->so_upcall2)(so, so->so_upcallarg2, band, M_DONTWAIT);
	if (sb->sb_flags & SB_UPCALL)
		(*so->so_upcall)(so, so->so_upcallarg, band, M_DONTWAIT);
}
Example #7
0
void
xenevt_notify(void)
{
	struct xenevt_d *d;

	for (;;) {
		mutex_enter(&devevent_lock);
		d = STAILQ_FIRST(&devevent_pending);
		if (d == NULL) {
			mutex_exit(&devevent_lock);
			break;
		}
		STAILQ_REMOVE_HEAD(&devevent_pending, pendingq);
		d->pending = false;
		mutex_enter(&d->lock);
		if (d->flags & XENEVT_F_FREE) {
			xenevt_free(d);
			mutex_exit(&devevent_lock);
		} else {
			mutex_exit(&devevent_lock);
			selnotify(&d->sel, 0, 1);
			cv_broadcast(&d->cv);
			mutex_exit(&d->lock);
		}
	}
}
Example #8
0
/*
 * Place the event in the event queue and wakeup any waiting processes.
 */
static void 
aed_enqevent(adb_event_t *event)
{
	int s;

	s = splvm();

#ifdef DIAGNOSTIC
	if (aed_sc->sc_evq_tail < 0 || aed_sc->sc_evq_tail >= AED_MAX_EVENTS)
		panic("adb: event queue tail is out of bounds");

	if (aed_sc->sc_evq_len < 0 || aed_sc->sc_evq_len > AED_MAX_EVENTS)
		panic("adb: event queue len is out of bounds");
#endif

	if (aed_sc->sc_evq_len == AED_MAX_EVENTS) {
		splx(s);
		return;		/* Oh, well... */
	}
	aed_sc->sc_evq[(aed_sc->sc_evq_len + aed_sc->sc_evq_tail) %
	    AED_MAX_EVENTS] = *event;
	aed_sc->sc_evq_len++;

	selnotify(&aed_sc->sc_selinfo, 0, 0);
	if (aed_sc->sc_ioproc)
		psignal(aed_sc->sc_ioproc, SIGIO);

	splx(s);
}
Example #9
0
static void
emdtv_ir_worker(struct work *wk, void *opaque)
{
	struct emdtv_softc *sc = opaque;
	struct cir_softc *csc;
	uint8_t evt[3];
	int pos;

	if (sc->sc_cirdev == NULL || sc->sc_dying == true ||
	    sc->sc_ir_open == false)
		return;

	emdtv_read_multi_1(sc, UR_GET_STATUS, EM28XX_REG_IR, evt, sizeof(evt));

	csc = device_private(sc->sc_cirdev);

	mutex_enter(&sc->sc_ir_mutex);
	pos = (sc->sc_ir_ptr + sc->sc_ir_cnt) % EMDTV_CIR_BUFLEN;
	memcpy(&sc->sc_ir_queue[pos], evt, sizeof(evt));
	if (sc->sc_ir_cnt < EMDTV_CIR_BUFLEN - 1) {
		++sc->sc_ir_cnt;
		++csc->sc_rdframes;
	}
	selnotify(&csc->sc_rdsel, 0, 1);
	mutex_exit(&sc->sc_ir_mutex);
}
Example #10
0
static void
ch_event(struct ch_softc *sc, u_int event)
{

	sc->sc_events |= event;
	selnotify(&sc->sc_selq, 0, 0);
}
Example #11
0
/*
 * This is the function where we SEND packets.
 *
 * There is no 'receive' equivalent.  A typical driver will get
 * interrupts from the hardware, and from there will inject new packets
 * into the network stack.
 *
 * Once handled, a packet must be freed.  A real driver might not be able
 * to fit all the pending packets into the hardware, and is allowed to
 * return before having sent all the packets.  It should then use the
 * if_flags flag IFF_OACTIVE to notify the upper layer.
 *
 * There are also other flags one should check, such as IFF_PAUSE.
 *
 * It is our duty to make packets available to BPF listeners.
 *
 * You should be aware that this function is called by the Ethernet layer
 * at splnet().
 *
 * When the device is opened, we have to pass the packet(s) to the
 * userland.  For that we stay in OACTIVE mode while the userland gets
 * the packets, and we send a signal to the processes waiting to read.
 *
 * wakeup(sc) is the counterpart to the tsleep call in
 * tap_dev_read, while selnotify() is used for kevent(2) and
 * poll(2) (which includes select(2)) listeners.
 */
static void
tap_start(struct ifnet *ifp)
{
	struct tap_softc *sc = (struct tap_softc *)ifp->if_softc;
	struct mbuf *m0;

	if ((sc->sc_flags & TAP_INUSE) == 0) {
		/* Simply drop packets */
		for(;;) {
			IFQ_DEQUEUE(&ifp->if_snd, m0);
			if (m0 == NULL)
				return;

			ifp->if_opackets++;
			bpf_mtap(ifp, m0);

			m_freem(m0);
		}
	} else if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
		ifp->if_flags |= IFF_OACTIVE;
		wakeup(sc);
		selnotify(&sc->sc_rsel, 0, 1);
		if (sc->sc_flags & TAP_ASYNCIO)
			softint_schedule(sc->sc_sih);
	}
}
/*---------------------------------------------------------------------------*
 *	this routine is called from the HSCX interrupt handler
 *	each time a packet is received or transmitted
 *---------------------------------------------------------------------------*/
static void
rbch_activity(void *softc, int rxtx)
{
	struct rbch_softc *sc = softc;

	if (sc->sc_cd)
		sc->sc_cd->last_active_time = SECOND;
	selnotify(&sc->selp, 0, 0);
}
/*
 * tunclose - close the device - mark i/f down & delete
 * routing info
 */
int
tunclose(dev_t dev, int flag, int mode,
    struct lwp *l)
{
	int	s;
	struct tun_softc *tp;
	struct ifnet	*ifp;

	s = splnet();
	if ((tp = tun_find_zunit(minor(dev))) != NULL) {
		/* interface was "destroyed" before the close */
		seldestroy(&tp->tun_rsel);
		seldestroy(&tp->tun_wsel);
		softint_disestablish(tp->tun_osih);
		softint_disestablish(tp->tun_isih);
		mutex_destroy(&tp->tun_lock);
		free(tp, M_DEVBUF);
		goto out_nolock;
	}

	if ((tp = tun_find_unit(dev)) == NULL)
		goto out_nolock;

	ifp = &tp->tun_if;

	tp->tun_flags &= ~TUN_OPEN;

	tp->tun_pgid = 0;
	selnotify(&tp->tun_rsel, 0, 0);

	TUNDEBUG ("%s: closed\n", ifp->if_xname);
	mutex_exit(&tp->tun_lock);

	/*
	 * junk all pending output
	 */
	IFQ_PURGE(&ifp->if_snd);

	if (ifp->if_flags & IFF_UP) {
		if_down(ifp);
		if (ifp->if_flags & IFF_RUNNING) {
			/* find internet addresses and delete routes */
			struct ifaddr *ifa;
			IFADDR_FOREACH(ifa, ifp) {
#if defined(INET) || defined(INET6)
				if (ifa->ifa_addr->sa_family == AF_INET ||
				    ifa->ifa_addr->sa_family == AF_INET6) {
					rtinit(ifa, (int)RTM_DELETE,
					       tp->tun_flags & TUN_DSTADDR
							? RTF_HOST
							: 0);
				}
#endif
			}
		}
	}
Example #14
0
/*
 * _stop() is called when an interface goes down.  It is our
 * responsability to validate that state by clearing the
 * IFF_RUNNING flag.
 *
 * We have to wake up all the sleeping processes to have the pending
 * read requests cancelled.
 */
static void
tap_stop(struct ifnet *ifp, int disable)
{
	struct tap_softc *sc = (struct tap_softc *)ifp->if_softc;

	ifp->if_flags &= ~IFF_RUNNING;
	wakeup(sc);
	selnotify(&sc->sc_rsel, 0, 1);
	if (sc->sc_flags & TAP_ASYNCIO)
		softint_schedule(sc->sc_sih);
}
static int
tun_clone_destroy(struct ifnet *ifp)
{
	struct tun_softc *tp = (void *)ifp;
	int s, zombie = 0;

	IF_PURGE(&ifp->if_snd);
	ifp->if_flags &= ~IFF_RUNNING;

	s = splnet();
	simple_lock(&tun_softc_lock);
	mutex_enter(&tp->tun_lock);
	LIST_REMOVE(tp, tun_list);
	if (tp->tun_flags & TUN_OPEN) {
		/* Hang on to storage until last close */
		zombie = 1;
		tp->tun_flags &= ~TUN_INITED;
		LIST_INSERT_HEAD(&tunz_softc_list, tp, tun_list);
	}
	simple_unlock(&tun_softc_lock);

	if (tp->tun_flags & TUN_RWAIT) {
		tp->tun_flags &= ~TUN_RWAIT;
		wakeup((void *)tp);
	}
	selnotify(&tp->tun_rsel, 0, 0);

	mutex_exit(&tp->tun_lock);
	splx(s);

	if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
		fownsignal(tp->tun_pgid, SIGIO, POLL_HUP, 0, NULL);

	bpf_detach(ifp);
	if_detach(ifp);

	if (!zombie) {
		seldestroy(&tp->tun_rsel);
		seldestroy(&tp->tun_wsel);
		softint_disestablish(tp->tun_osih);
		softint_disestablish(tp->tun_isih);
		mutex_destroy(&tp->tun_lock);
		free(tp, M_DEVBUF);
	}

	return (0);
}
/*---------------------------------------------------------------------------*
 *	this routine is called from the HSCX interrupt handler
 *	when the last frame has been sent out and there is no
 *	further frame (mbuf) in the tx queue.
 *---------------------------------------------------------------------------*/
static void
rbch_tx_queue_empty(void *softc)
{
	struct rbch_softc *sc = softc;

	if(sc->sc_devstate & ST_WRWAITEMPTY)
	{
		NDBGL4(L4_RBCHDBG, "(minor=%d): wakeup", sc->sc_unit);
		sc->sc_devstate &= ~ST_WRWAITEMPTY;
		wakeup((void *) &sc->sc_ilt->tx_queue);
	}
	else
	{
		NDBGL4(L4_RBCHDBG, "(minor=%d) NO wakeup", sc->sc_unit);
	}
	selnotify(&sc->selp, 0, 0);
}
Example #17
0
/* Returns 0 if more data required, 1 if a complete frame was extracted */
static int
deframe_rd_ur(struct udsir_softc *sc)
{

	if (sc->sc_rd_index == 0) {
		KASSERT(sc->sc_rd_count == sc->sc_rd_maxpsz);
		/* valid count */
		sc->sc_rd_count = sc->sc_rd_buf[sc->sc_rd_index++] + 1;
		KASSERT(sc->sc_rd_count < sc->sc_rd_maxpsz);
	}

	while (sc->sc_rd_index < sc->sc_rd_count) {
		uint8_t const *buf;
		size_t buflen;
		enum frameresult fresult;

		buf = &sc->sc_rd_buf[sc->sc_rd_index];
		buflen = sc->sc_rd_count - sc->sc_rd_index;

		fresult = deframe_process(&sc->sc_framestate, &buf, &buflen);

		sc->sc_rd_index = sc->sc_rd_count - buflen;

		DPRINTFN(1,("%s: result=%d\n", __func__, (int)fresult));

		switch (fresult) {
		case FR_IDLE:
		case FR_INPROGRESS:
		case FR_FRAMEBADFCS:
		case FR_FRAMEMALFORMED:
		case FR_BUFFEROVERRUN:
			break;
		case FR_FRAMEOK:
			sc->sc_ur_framelen = sc->sc_framestate.bufindex;
			wakeup(&sc->sc_ur_framelen); /* XXX should use flag */
			selnotify(&sc->sc_rd_sel, 0, 0);
			return 1;
		}
	}

	/* Reset indices into USB-side buffer */
	sc->sc_rd_index = sc->sc_rd_count = 0;

	return 0;
}
Example #18
0
void
ev_wakeup(struct evvar *ev)
{

	mutex_enter(ev->ev_lock);
	selnotify(&ev->ev_sel, 0, 0);
	if (ev->ev_wanted) {
		ev->ev_wanted = false;
		cv_signal(&ev->ev_cv);
	}
	mutex_exit(ev->ev_lock);

	if (ev->ev_async) {
		mutex_enter(proc_lock);
		psignal(ev->ev_io, SIGIO);
		mutex_exit(proc_lock);
	}
}
Example #19
0
/*
 * return 0 if the user will notice and handle the event,
 * return 1 if the kernel driver should do so.
 */
static int
apm_record_event(struct apm_softc *sc, u_int event_type)
{
	struct apm_event_info *evp;

	if ((sc->sc_flags & SCFLAG_OPEN) == 0)
		return 1;		/* no user waiting */
	if (sc->sc_event_count == APM_NEVENTS)
		return 1;			/* overflow */
	evp = &sc->sc_event_list[sc->sc_event_ptr];
	sc->sc_event_count++;
	sc->sc_event_ptr++;
	sc->sc_event_ptr %= APM_NEVENTS;
	evp->type = event_type;
	evp->index = ++apm_evindex;
	selnotify(&sc->sc_rsel, 0, 0);
	return (sc->sc_flags & SCFLAG_OWRITE) ? 0 : 1; /* user may handle */
}
Example #20
0
/*
 * return 0 if the user will notice and handle the event,
 * return 1 if the kernel driver should do so.
 */
static int
tctrl_apm_record_event(struct tctrl_softc *sc, u_int event_type)
{
	struct apm_event_info *evp;

	if ((sc->sc_flags & TCTRL_APM_CTLOPEN) &&
	    (sc->sc_event_count < APM_NEVENTS)) {
		evp = &sc->sc_event_list[sc->sc_event_ptr];
		sc->sc_event_count++;
		sc->sc_event_ptr++;
		sc->sc_event_ptr %= APM_NEVENTS;
		evp->type = event_type;
		evp->index = ++tctrl_apm_evindex;
		selnotify(&sc->sc_rsel, 0, 0);
		return(sc->sc_flags & TCTRL_APM_CTLOPEN) ? 0 : 1;
	}
	return(1);
}
Example #21
0
/*
 * Reseed with the given seed.  If we now have full entropy, notify waiters.
 */
static void
cprng_strong_reseed_from(struct cprng_strong *cprng,
    const void *seed, size_t bytes, bool full_entropy)
{
	const uint32_t cc = cprng_counter();

	KASSERT(bytes == NIST_BLOCK_KEYLEN_BYTES);
	KASSERT(mutex_owned(&cprng->cs_lock));

	/*
	 * Notify anyone interested in the partiality of entropy in our
	 * seed -- anyone waiting for full entropy, or any system
	 * operators interested in knowing when the entropy pool is
	 * running on fumes.
	 */
	if (full_entropy) {
		if (!cprng->cs_ready) {
			cprng->cs_ready = true;
			cv_broadcast(&cprng->cs_cv);
			selnotify(&cprng->cs_selq, (POLLIN | POLLRDNORM),
			    NOTE_SUBMIT);
		}
	} else {
		/*
		 * XXX Is there is any harm in reseeding with partial
		 * entropy when we had full entropy before?  If so,
		 * remove the conditional on this message.
		 */
		if (!cprng->cs_ready &&
		    !ISSET(cprng->cs_flags, CPRNG_REKEY_ANY))
			printf("cprng %s: reseeding with partial entropy\n",
			    cprng->cs_name);
	}

	if (nist_ctr_drbg_reseed(&cprng->cs_drbg, seed, bytes, &cc, sizeof(cc)))
		/* XXX Fix nist_ctr_drbg API so this can't happen.  */
		panic("cprng %s: NIST CTR_DRBG reseed failed", cprng->cs_name);

#if DIAGNOSTIC
	cprng_strong_rngtest(cprng);
#endif
}
Example #22
0
/* XXX: userspace can leak kernel resources */
void
puffs_parkdone_poll(struct puffs_mount *pmp, struct puffs_req *preq, void *arg)
{
	struct puffs_vnmsg_poll *poll_msg = (void *)preq;
	struct puffs_node *pn = arg;
	int revents, error;

	error = checkerr(pmp, preq->preq_rv, __func__);
	if (error)
		revents = poll_msg->pvnr_events;
	else
		revents = POLLERR;

	mutex_enter(&pn->pn_mtx);
	pn->pn_revents |= revents;
	mutex_exit(&pn->pn_mtx);

	selnotify(&pn->pn_sel, revents, 0);

	puffs_releasenode(pn);
}
/*---------------------------------------------------------------------------*
 *	this routine is called from L4 handler at disconnect time
 *---------------------------------------------------------------------------*/
static void
rbch_disconnect(void *softc, void *cdp)
{
	call_desc_t *cd = cdp;
	struct rbch_softc *sc = softc;

	int s;

        if(cd != sc->sc_cd)
	{
		NDBGL4(L4_RBCHDBG, "B channel %d at ISDN %d not active",
		    cd->channelid, cd->isdnif);
		return;
	}

	s = splnet();

	NDBGL4(L4_RBCHDBG, "B channel %d at ISDN %d disconnect",
	    cd->channelid, cd->isdnif);

	sc->sc_devstate &= ~ST_CONNECTED;

#if I4BRBCHACCT
	if (sc->sc_cd)
		i4b_l4_accounting(sc->sc_cd->cdid, ACCT_FINAL,
		    sc->sc_ioutb, sc->sc_iinb, 0, 0, sc->sc_ioutb, sc->sc_iinb);

	STOP_TIMER(sc->sc_callout, rbch_timeout, sc);
#endif

	sc->sc_cd = NULL;
	if (sc->sc_devstate & ST_RDWAITDATA)
		wakeup(&sc->sc_ilt->rx_queue);
	if (sc->sc_devstate & ST_WRWAITEMPTY)
		wakeup(&sc->sc_ilt->tx_queue);

	splx(s);

	selnotify(&sc->selp, 0, 0);
}
void
uirda_rd_cb(usbd_xfer_handle xfer, usbd_private_handle priv,
	    usbd_status status)
{
	struct uirda_softc *sc = priv;
	u_int32_t size;

	DPRINTFN(1,("%s: sc=%p\n", __func__, sc));

	if (status == USBD_CANCELLED) /* this is normal */
		return;
	if (status) {
		size = sc->sc_hdszi;
		sc->sc_rd_err = 1;
	} else {
		usbd_get_xfer_status(xfer, NULL, NULL, &size, NULL);
	}
	DPRINTFN(1,("%s: sc=%p size=%u, err=%d\n", __func__, sc, size,
		    sc->sc_rd_err));
	sc->sc_rd_count = size;
	wakeup(&sc->sc_rd_count); /* XXX should use flag */
	selnotify(&sc->sc_rd_sel, 0, 0);
}
/*---------------------------------------------------------------------------*
 *	this routine is called from the HSCX interrupt handler
 *	when a new frame (mbuf) has been received and is to be put on
 *	the rx queue.
 *---------------------------------------------------------------------------*/
static void
rbch_rx_data_rdy(void *softc)
{
	struct rbch_softc *sc = softc;

	if(sc->sc_bprot == BPROT_RHDLC)
	{
		register struct mbuf *m;

		if((m = *sc->sc_ilt->rx_mbuf) == NULL)
			return;

		m->m_pkthdr.len = m->m_len;

		if(IF_QFULL(&sc->sc_hdlcq))
		{
			NDBGL4(L4_RBCHDBG, "(minor=%d) hdlc rx queue full!", sc->sc_unit);
			m_freem(m);
		}
		else
		{
			IF_ENQUEUE(&sc->sc_hdlcq, m);
		}
	}

	if(sc->sc_devstate & ST_RDWAITDATA)
	{
		NDBGL4(L4_RBCHDBG, "(minor=%d) wakeup", sc->sc_unit);
		sc->sc_devstate &= ~ST_RDWAITDATA;
		wakeup((void *) &sc->sc_ilt->rx_queue);
	}
	else
	{
		NDBGL4(L4_RBCHDBG, "(minor=%d) NO wakeup", sc->sc_unit);
	}
	selnotify(&sc->selp, 0, 0);
}
Example #26
0
/*---------------------------------------------------------------------------*
 *	i4bputqueue_hipri - put message into front of queue to userland
 *---------------------------------------------------------------------------*/
void
i4bputqueue_hipri(struct mbuf *m)
{
	int x;

	if(!openflag)
	{
		i4b_Dfreembuf(m);
		return;
	}

	x = splnet();

	if(IF_QFULL(&i4b_rdqueue))
	{
		struct mbuf *m1;
		IF_DEQUEUE(&i4b_rdqueue, m1);
		i4b_Dfreembuf(m1);
		NDBGL4(L4_ERR, "ERROR, queue full, removing entry!");
	}

	IF_PREPEND(&i4b_rdqueue, m);

	splx(x);

	if(readflag)
	{
		readflag = 0;
		wakeup((void *) &i4b_rdqueue);
	}

	if(selflag)
	{
		selflag = 0;
		selnotify(&select_rd_info, 0, 0);
	}
}
Example #27
0
void
satlinktimeout(void *arg)
{
	struct satlink_softc *sc = arg;
	bus_size_t resid;
	int newidx;

	if ((sc->sc_flags & SATF_ISOPEN) == 0)
		return;

	/*
	 * Get the current residual count from the DMA controller
	 * and compute the satlink's index into the ring buffer.
	 */
	resid = isa_dmacount(sc->sc_ic, sc->sc_drq);
	newidx = sc->sc_bufsize - resid;
	if (newidx == sc->sc_bufsize)
		newidx = 0;

	if (newidx == sc->sc_sptr)
		goto out;

	sc->sc_sptr = newidx;

	/* Wake up anyone blocked in read... */
	if (sc->sc_flags & SATF_DATA) {
		sc->sc_flags &= ~SATF_DATA;
		wakeup(sc);
	}

	/* Wake up anyone blocked in poll... */
	selnotify(&sc->sc_selq, 0, 0);

 out:
	callout_reset(&sc->sc_ch, SATLINK_TIMEOUT, satlinktimeout, sc);
}
Example #28
0
static void
udsir_rd_cb(struct usbd_xfer *xfer, void * priv, usbd_status status)
{
	struct udsir_softc *sc = priv;
	uint32_t size;

	DPRINTFN(60, ("%s: sc=%p\n", __func__, sc));

	/* Read is no longer in progress */
	sc->sc_rd_readinprogress = 0;

	if (status == USBD_CANCELLED || sc->sc_closing)	/* this is normal */
		return;
	if (status) {
		size = 0;
		sc->sc_rd_err = 1;

		if (sc->sc_direction == udir_input ||
		    sc->sc_direction == udir_idle) {
			/*
			 * Receive error, probably need to clear error
			 * condition.
			 */
			sc->sc_direction = udir_stalled;
		}
	} else
		usbd_get_xfer_status(xfer, NULL, NULL, &size, NULL);

	sc->sc_rd_index = 0;
	sc->sc_rd_count = size;

	DPRINTFN(((size > 0 || sc->sc_rd_err != 0) ? 20 : 60),
		 ("%s: sc=%p size=%u, err=%d\n",
		  __func__, sc, size, sc->sc_rd_err));

#ifdef UDSIR_DEBUG
	if (udsirdebug >= 20 && size > 0)
		udsir_dumpdata(sc->sc_rd_buf, size, __func__);
#endif

	if (deframe_rd_ur(sc) == 0) {
		if (!deframe_isclear(&sc->sc_framestate) && size == 0 &&
		    sc->sc_rd_expectdataticks == 0) {
			/*
			 * Expected data, but didn't get it
			 * within expected time...
			 */
			DPRINTFN(5,("%s: incoming packet timeout\n",
				    __func__));
			deframe_clear(&sc->sc_framestate);
		} else if (size > 0) {
			/*
			 * If we also received actual data, reset the
			 * data read timeout and wake up the possibly
			 * sleeping thread...
			 */
			sc->sc_rd_expectdataticks = 2;
			wakeup(&sc->sc_thread);
		}
	}

	/*
	 * Check if incoming data has stopped, or that we cannot
	 * safely read any more data.  In the case of the latter we
	 * must switch to idle so that a write will not block...
	 */
	if (sc->sc_direction == udir_input &&
	    ((size == 0 && sc->sc_rd_expectdataticks == 0) ||
	     UDSIR_BLOCK_RX_DATA(sc))) {
		DPRINTFN(8, ("%s: idling on packet timeout, "
			     "complete frame, or no data\n", __func__));
		sc->sc_direction = udir_idle;

		/* Wake up for possible output */
		wakeup(&sc->sc_wr_buf);
		selnotify(&sc->sc_wr_sel, 0, 0);
	}
}
Example #29
0
static void
midi_in(void *addr, int data)
{
	struct midi_softc *sc;
	struct midi_buffer *mb;
	int i, count;
	enum fst_ret got;
	MIDI_BUF_DECLARE(idx);
	MIDI_BUF_DECLARE(buf);

	sc = addr;
	mb = &sc->inbuf;

	KASSERT(mutex_owned(sc->lock));

	if (!sc->isopen)
		return;

	if ((sc->flags & FREAD) == 0)
		return;		/* discard data if not reading */
	
sxp_again:
	do {
		got = midi_fst(&sc->rcv, data, FST_CANON);
	} while (got == FST_HUH);
	
	switch (got) {
	case FST_MORE:
	case FST_ERR:
		return;
	case FST_CHN:
	case FST_COM:
	case FST_RT:
#if NSEQUENCER > 0
		if (sc->seqopen) {
			extern void midiseq_in(struct midi_dev *,u_char *,int);
			count = sc->rcv.end - sc->rcv.pos;
			midiseq_in(sc->seq_md, sc->rcv.pos, count);
			return;
		}
#endif
        	/*
		 * Pass Active Sense to the sequencer if it's open, but not to
		 * a raw reader. (Really should do something intelligent with
		 * it then, though....)
		 */
		if (got == FST_RT && MIDI_ACK == sc->rcv.pos[0]) {
			if (!sc->rcv_expect_asense) {
				sc->rcv_expect_asense = 1;
				callout_schedule(&sc->rcv_asense_co,
				    MIDI_RCV_ASENSE_PERIOD);
			}
			sc->rcv_quiescent = 0;
			sc->rcv_eof = 0;
			return;
		}
		/* FALLTHROUGH */
	/*
	 * Ultimately SysEx msgs should be offered to the sequencer also; the
	 * sequencer API addresses them - but maybe our sequencer can't handle
	 * them yet, so offer only to raw reader. (Which means, ultimately,
	 * discard them if the sequencer's open, as it's not doing reads!)
	 * -> When SysEx support is added to the sequencer, be sure to handle
	 *    FST_SXP there too.
	 */
	case FST_SYX:
	case FST_SXP:
		count = sc->rcv.end - sc->rcv.pos;
		sc->rcv_quiescent = 0;
		sc->rcv_eof = 0;
		if (0 == count)
			break;
		MIDI_BUF_PRODUCER_INIT(mb,idx);
		MIDI_BUF_PRODUCER_INIT(mb,buf);
		if (count > buf_lim - buf_cur
		     || 1 > idx_lim - idx_cur) {
			sc->rcv.bytesDiscarded.ev_count += count;
			DPRINTF(("midi_in: buffer full, discard data=0x%02x\n", 
				 sc->rcv.pos[0]));
			return;
		}
		for (i = 0; i < count; i++) {
			*buf_cur++ = sc->rcv.pos[i];
			MIDI_BUF_WRAP(buf);
		}
		*idx_cur++ = PACK_MB_IDX(got,count);
		MIDI_BUF_WRAP(idx);
		MIDI_BUF_PRODUCER_WBACK(mb,buf);
		MIDI_BUF_PRODUCER_WBACK(mb,idx);
		cv_broadcast(&sc->rchan);
		selnotify(&sc->rsel, 0, NOTE_SUBMIT);
		if (sc->async != 0)
			softint_schedule(sc->sih);
		break;
	default: /* don't #ifdef this away, gcc will say FST_HUH not handled */
		printf("midi_in: midi_fst returned %d?!\n", got);
	}
	if (FST_SXP == got)
		goto sxp_again;
}
Example #30
0
/*
 * dmio_read:
 *
 *	Read file op.
 */
static int
dmio_read(struct file *fp, off_t *offp, struct uio *uio,
    kauth_cred_t cred, int flags)
{
	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
	struct dmio_usrreq_state *dus;
	struct dmover_request *dreq;
	struct dmio_usrresp resp;
	int s, error = 0, progress = 0;

	if ((uio->uio_resid % sizeof(resp)) != 0)
		return (EINVAL);

	if (ds->ds_session == NULL)
		return (ENXIO);

	s = splsoftclock();
	simple_lock(&ds->ds_slock);

	while (uio->uio_resid != 0) {

		for (;;) {
			dus = TAILQ_FIRST(&ds->ds_complete);
			if (dus == NULL) {
				if (fp->f_flag & FNONBLOCK) {
					error = progress ? 0 : EWOULDBLOCK;
					goto out;
				}
				ds->ds_flags |= DMIO_STATE_READ_WAIT;
				error = ltsleep(&ds->ds_complete,
				    PRIBIO | PCATCH, "dmvrrd", 0,
				    &ds->ds_slock);
				if (error)
					goto out;
				continue;
			}
			/* Have a completed request. */
			TAILQ_REMOVE(&ds->ds_complete, dus, dus_q);
			ds->ds_nreqs--;
			if (ds->ds_flags & DMIO_STATE_WRITE_WAIT) {
				ds->ds_flags &= ~DMIO_STATE_WRITE_WAIT;
				wakeup(&ds->ds_nreqs);
			}
			if (ds->ds_flags & DMIO_STATE_SEL) {
				ds->ds_flags &= ~DMIO_STATE_SEL;
				selnotify(&ds->ds_selq, POLLIN | POLLRDNORM, 0);
			}
			break;
		}

		simple_unlock(&ds->ds_slock);

		dreq = dus->dus_req;
		resp.resp_id = dus->dus_id;
		if (dreq->dreq_flags & DMOVER_REQ_ERROR)
			resp.resp_error = dreq->dreq_error;
		else {
			resp.resp_error = 0;
			memcpy(resp.resp_immediate, dreq->dreq_immediate,
			    sizeof(resp.resp_immediate));
		}

		dmio_usrreq_fini(ds, dus);

		splx(s);

		progress = 1;

		dmover_request_free(dreq);

		error = uiomove(&resp, sizeof(resp), uio);
		if (error)
			return (error);

		s = splsoftclock();
		simple_lock(&ds->ds_slock);
	}

 out:
	simple_unlock(&ds->ds_slock);
	splx(s);

	return (error);
}