Пример #1
0
/*
 * Wakeup processes waiting on a socket buffer.  Do asynchronous notification
 * via SIGIO if the socket has the SS_ASYNC flag set.
 *
 * Called with the socket buffer lock held; will release the lock by the end
 * of the function.  This allows the caller to acquire the socket buffer lock
 * while testing for the need for various sorts of wakeup and hold it through
 * to the point where it's no longer required.  We currently hold the lock
 * through calls out to other subsystems (with the exception of kqueue), and
 * then release it to avoid lock order issues.  It's not clear that's
 * correct.
 */
void
sowakeup(struct socket *so, struct sockbuf *sb)
{
	int ret;

	SOCKBUF_LOCK_ASSERT(sb);

	selwakeuppri(sb->sb_sel, PSOCK);
	if (!SEL_WAITING(sb->sb_sel))
		sb->sb_flags &= ~SB_SEL;
	if (sb->sb_flags & SB_WAIT) {
		sb->sb_flags &= ~SB_WAIT;
		wakeup(&sb->sb_acc);
	}
	KNOTE_LOCKED(&sb->sb_sel->si_note, 0);
	if (sb->sb_upcall != NULL) {
		ret = sb->sb_upcall(so, sb->sb_upcallarg, M_NOWAIT);
		if (ret == SU_ISCONNECTED) {
			KASSERT(sb == &so->so_rcv,
			    ("SO_SND upcall returned SU_ISCONNECTED"));
			soupcall_clear(so, SO_RCV);
		}
	} else
		ret = SU_OK;
	if (sb->sb_flags & SB_AIO)
		sowakeup_aio(so, sb);
	SOCKBUF_UNLOCK(sb);
	if (ret == SU_ISCONNECTED)
		soisconnected(so);
	if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL)
		pgsigio(&so->so_sigio, SIGIO, 0);
	mtx_assert(SOCKBUF_MTX(sb), MA_NOTOWNED);
}
Пример #2
0
void
ofp_sowakeup(struct socket *so, struct sockbuf *sb)
{
	(void)so;
	SOCKBUF_UNLOCK(sb);

	SOCKBUF_LOCK_ASSERT(sb);

	/*HJo selwakeuppri(&sb->sb_sel, PSOCK);*/
	ofp_wakeup(NULL);
#if 0
	if (!SEL_WAITING(&sb->sb_sel))
		sb->sb_flags &= ~SB_SEL;
#endif

	if (sb->sb_flags & SB_WAIT) {
		ofp_wakeup(&sb->sb_cc);
	}
#if 0
	KNOTE_LOCKED(&sb->sb_sel.si_note, 0);
	if (sb->sb_upcall != NULL) {
		ret = sb->sb_upcall(so, sb->sb_upcallarg, M_DONTWAIT);
		if (ret == SU_ISCONNECTED) {
			KASSERT(sb == &so->so_rcv,
			    ("OFP_SO_SND upcall returned SU_ISCONNECTED"));
			ofp_soupcall_clear(so, OFP_SO_RCV);
		}
	} else
		ret = SU_OK;
	if (sb->sb_flags & SB_AIO)
		aio_swake(so, sb);
#endif

	SOCKBUF_UNLOCK(sb);
#if 0
	if (ret == SU_ISCONNECTED)
		ofp_soisconnected(so);
	if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL)
		pgsigio(&so->so_sigio, SIGIO, 0);
	mtx_assert(SOCKBUF_MTX(sb), MA_NOTOWNED);
#endif
}
Пример #3
0
static void
dektec_intr (void *parameter)
{
	struct dektec_sc *sc = parameter;

	uint32_t status;

	if (dta1xx_gen_status_reg_get_per_int (sc->dta_base_bt, sc->dta_base_bh, sc->gen_base)) {
		dta1xx_gen_status_reg_clr_per_int (sc->dta_base_bt, sc->dta_base_bh, sc->gen_base);

		if (sc->model == BOARD_MODEL_145 || sc->model == BOARD_MODEL_2145)
			dta1xx_gen_pulse_watchdog (sc->dta_base_bt, sc->dta_base_bh, sc->gen_base);
	}

	/* FIXME use PCI905X_INTCSR_DMA0_INTACT / PCI905X_INTCSR_DMA1_INTACT */

	if (sc->legacy_plx) {
		/* DMA0 is used for writing */
		status = bus_space_read_1 (sc->plx_base_bt, sc->plx_base_bh, PCI905X_DMA0_COMMAND_STAT);

		if ((status & PCI905X_DMACSR_DONE) == PCI905X_DMACSR_DONE) {
			if ((sc->tx_buffer.flags & DMA_BUSY) == DMA_BUSY) {
				bus_space_write_1 (sc->plx_base_bt, sc->plx_base_bh, PCI905X_DMA0_COMMAND_STAT,
						   PCI905X_DMACSR_ENABLE |
						   PCI905X_DMACSR_CLEARINT);

				bus_space_read_1 (sc->plx_base_bt, sc->plx_base_bh, PCI905X_DMA0_COMMAND_STAT);

				mtx_lock_spin (&sc->tx_buffer.buffer_mtx);

				sc->tx_buffer.flags &= ~DMA_BUSY;
				sc->tx_buffer.flags |= DMA_COMPLETE;

				mtx_unlock_spin (&sc->tx_buffer.buffer_mtx);
			}
		}

		/* DMA1 is used for reading */
		status = bus_space_read_1 (sc->plx_base_bt, sc->plx_base_bh, PCI905X_DMA1_COMMAND_STAT);

		if ((status & PCI905X_DMACSR_DONE) == PCI905X_DMACSR_DONE) {
			if ((sc->rx_buffer.flags & DMA_BUSY) == DMA_BUSY) {
				bus_space_write_1 (sc->plx_base_bt, sc->plx_base_bh, PCI905X_DMA1_COMMAND_STAT,
						   PCI905X_DMACSR_ENABLE |
						   PCI905X_DMACSR_CLEARINT);

				bus_space_read_1 (sc->plx_base_bt, sc->plx_base_bh, PCI905X_DMA1_COMMAND_STAT);

				mtx_lock_spin (&sc->rx_buffer.buffer_mtx);

				sc->rx_buffer.flags &= ~DMA_BUSY;
				sc->rx_buffer.flags |= DMA_COMPLETE;

				mtx_unlock_spin (&sc->rx_buffer.buffer_mtx);
			}
		}
	} else {
		/* DMA1 is used for writing */
		status = bus_space_read_1 (sc->dta_base_bt, sc->dta_base_bh, sc->dma_base1 + REG_CMD_STAT);

		/* PCI905X_DMACSR_DONE */

		if ((status & DTA1XX_DMACSR_INTACT) != 0) {
			bus_space_write_1 (sc->dta_base_bt, sc->dta_base_bh, sc->dma_base1 + REG_CMD_STAT,
					   PCI905X_DMACSR_ENABLE |
					   PCI905X_DMACSR_CLEARINT);

			bus_space_read_1 (sc->dta_base_bt, sc->dta_base_bh, sc->dma_base1 + REG_CMD_STAT);

			mtx_lock_spin (&sc->tx_buffer.buffer_mtx);

			sc->tx_buffer.flags &= ~DMA_BUSY;
			sc->tx_buffer.flags |= DMA_COMPLETE;

			mtx_unlock_spin (&sc->tx_buffer.buffer_mtx);
		}

		/* DMA0 is used for reading */
		status = bus_space_read_1 (sc->dta_base_bt, sc->dta_base_bh, sc->dma_base0 + REG_CMD_STAT);

		if ((status & DTA1XX_DMACSR_INTACT) != 0) {
			bus_space_write_1 (sc->dta_base_bt, sc->dta_base_bh, sc->dma_base0 + REG_CMD_STAT,
					   PCI905X_DMACSR_ENABLE |
					   PCI905X_DMACSR_CLEARINT);

			bus_space_read_1 (sc->dta_base_bt, sc->dta_base_bh, sc->dma_base0 + REG_CMD_STAT);

			mtx_lock_spin (&sc->rx_buffer.buffer_mtx);

			sc->rx_buffer.flags &= ~DMA_BUSY;
			sc->rx_buffer.flags |= DMA_COMPLETE;

			mtx_unlock_spin (&sc->rx_buffer.buffer_mtx);
		}
	}

	dta1xx_rx_set_rx_status_reg (sc->dta_base_bt, sc->dta_base_bh, sc->rx_base,
				     DTA1XX_RXSTAT_PERINT  |
				     DTA1XX_RXSTAT_OVFINT  |
				     DTA1XX_RXSTAT_SYNCINT |
				     DTA1XX_RXSTAT_THRINT  |
				     DTA1XX_RXSTAT_RATEOVFINT);

	dta1xx_tx_set_tx_status_reg (sc->dta_base_bt, sc->dta_base_bh, sc->tx_base,
				     DTA1XX_TXSTAT_PERINT  |
				     DTA1XX_TXSTAT_UFLINT  |
				     DTA1XX_TXSTAT_SYNCINT |
				     DTA1XX_TXSTAT_THRINT  |
				     DTA1XX_TXSTAT_SHORTINT);

	if (SEL_WAITING (&sc->selinfo))
		selwakeup (&sc->selinfo);

	taskqueue_enqueue (taskqueue_swi, &sc->task);
}