Beispiel #1
0
static int
smc_intr(void *context)
{
	struct smc_softc	*sc;
	
	sc = (struct smc_softc *)context;
	taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_intr);
	return (FILTER_HANDLED);
}
Beispiel #2
0
int
dmar_qi_intr(void *arg)
{
	struct dmar_unit *unit;

	unit = arg;
	KASSERT(unit->qi_enabled, ("dmar%d: QI is not enabled", unit->unit));
	taskqueue_enqueue_fast(unit->qi_taskqueue, &unit->qi_task);
	return (FILTER_HANDLED);
}
Beispiel #3
0
static int
smc_intr(void *context)
{
	struct smc_softc	*sc;
	
	sc = (struct smc_softc *)context;
	/*
	 * Block interrupts in order to let smc_task_intr to kick in
	 */
	smc_write_1(sc, MSK, 0);
	taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_intr);
	return (FILTER_HANDLED);
}
Beispiel #4
0
static void
smc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
{
	struct smc_softc	*sc;

	sc = ifp->if_softc;

	SMC_LOCK(sc);
	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
		SMC_UNLOCK(sc);
		return;
	}
	SMC_UNLOCK(sc);

	if (cmd == POLL_AND_CHECK_STATUS)
		taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_intr);
}
Beispiel #5
0
static void
am335x_dmtimer_tc_poll_pps(struct timecounter *tc)
{
	struct am335x_dmtimer_softc *sc;

	sc = tc->tc_priv;

	/*
	 * Note that we don't have the TCAR interrupt enabled, but the hardware
	 * still provides the status bits in the "RAW" status register even when
	 * they're masked from generating an irq.  However, when clearing the
	 * TCAR status to re-arm the capture for the next second, we have to
	 * write to the IRQ status register, not the RAW register.  Quirky.
	 */
	if (am335x_dmtimer_tc_read_4(sc, DMT_IRQSTATUS_RAW) & DMT_IRQ_TCAR) {
		pps_capture(&sc->pps);
		sc->pps.capcount = am335x_dmtimer_tc_read_4(sc, DMT_TCAR1);
		am335x_dmtimer_tc_write_4(sc, DMT_IRQSTATUS, DMT_IRQ_TCAR);
		taskqueue_enqueue_fast(taskqueue_fast, &sc->pps_task);
	}
}
Beispiel #6
0
static void
smc_task_intr(void *context, int pending)
{
	struct smc_softc	*sc;
	struct ifnet		*ifp;
	u_int			status, packet, counter, tcr;

	(void)pending;
	ifp = (struct ifnet *)context;
	sc = ifp->if_softc;

	SMC_LOCK(sc);
	
	smc_select_bank(sc, 2);

	/*
	 * Get the current mask, and then block all interrupts while we're
	 * working.
	 */
	if ((ifp->if_capenable & IFCAP_POLLING) == 0)
		smc_write_1(sc, MSK, 0);

	/*
	 * Find out what interrupts are flagged.
	 */
	status = smc_read_1(sc, IST) & sc->smc_mask;

	/*
	 * Transmit error
	 */
	if (status & TX_INT) {
		/*
		 * Kill off the packet if there is one and re-enable transmit.
		 */
		packet = smc_read_1(sc, FIFO_TX);
		if ((packet & FIFO_EMPTY) == 0) {
			smc_write_1(sc, PNR, packet);
			smc_write_2(sc, PTR, 0 | PTR_READ | 
			    PTR_AUTO_INCR);
			tcr = smc_read_2(sc, DATA0);
			if ((tcr & EPHSR_TX_SUC) == 0)
				device_printf(sc->smc_dev,
				    "bad packet\n");
			smc_mmu_wait(sc);
			smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE_PKT);

			smc_select_bank(sc, 0);
			tcr = smc_read_2(sc, TCR);
			tcr |= TCR_TXENA | TCR_PAD_EN;
			smc_write_2(sc, TCR, tcr);
			smc_select_bank(sc, 2);
			taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx);
		}

		/*
		 * Ack the interrupt.
		 */
		smc_write_1(sc, ACK, TX_INT);
	}

	/*
	 * Receive
	 */
	if (status & RCV_INT) {
		smc_write_1(sc, ACK, RCV_INT);
		sc->smc_mask &= ~RCV_INT;
		taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_rx);
	}

	/*
	 * Allocation
	 */
	if (status & ALLOC_INT) {
		smc_write_1(sc, ACK, ALLOC_INT);
		sc->smc_mask &= ~ALLOC_INT;
		taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx);
	}

	/*
	 * Receive overrun
	 */
	if (status & RX_OVRN_INT) {
		smc_write_1(sc, ACK, RX_OVRN_INT);
		ifp->if_ierrors++;
	}

	/*
	 * Transmit empty
	 */
	if (status & TX_EMPTY_INT) {
		smc_write_1(sc, ACK, TX_EMPTY_INT);
		sc->smc_mask &= ~TX_EMPTY_INT;
		callout_stop(&sc->smc_watchdog);

		/*
		 * Update collision stats.
		 */
		smc_select_bank(sc, 0);
		counter = smc_read_2(sc, ECR);
		smc_select_bank(sc, 2);
		ifp->if_collisions +=
		    (counter & ECR_SNGLCOL_MASK) >> ECR_SNGLCOL_SHIFT;
		ifp->if_collisions +=
		    (counter & ECR_MULCOL_MASK) >> ECR_MULCOL_SHIFT;

		/*
		 * See if there are any packets to transmit.
		 */
		taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx);
	}
Beispiel #7
0
static void
smc_start_locked(struct ifnet *ifp)
{
	struct smc_softc	*sc;
	struct mbuf		*m;
	u_int			len, npages, spin_count;

	sc = ifp->if_softc;
	SMC_ASSERT_LOCKED(sc);

	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
		return;
	if (IFQ_IS_EMPTY(&ifp->if_snd))
		return;

	/*
	 * Grab the next packet.  If it's too big, drop it.
	 */
	IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
	len = m_length(m, NULL);
	len += (len & 1);
	if (len > ETHER_MAX_LEN - ETHER_CRC_LEN) {
		if_printf(ifp, "large packet discarded\n");
		++ifp->if_oerrors;
		m_freem(m);
		return; /* XXX readcheck? */
	}

	/*
	 * Flag that we're busy.
	 */
	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
	sc->smc_pending = m;

	/*
	 * Work out how many 256 byte "pages" we need.  We have to include the
	 * control data for the packet in this calculation.
	 */
	npages = (len * PKT_CTRL_DATA_LEN) >> 8;
	if (npages == 0)
		npages = 1;

	/*
	 * Request memory.
	 */
	smc_select_bank(sc, 2);
	smc_mmu_wait(sc);
	smc_write_2(sc, MMUCR, MMUCR_CMD_TX_ALLOC | npages);

	/*
	 * Spin briefly to see if the allocation succeeds.
	 */
	spin_count = TX_ALLOC_WAIT_TIME;
	do {
		if (smc_read_1(sc, IST) & ALLOC_INT) {
			smc_write_1(sc, ACK, ALLOC_INT);
			break;
		}
	} while (--spin_count);

	/*
	 * If the allocation is taking too long, unmask the alloc interrupt
	 * and wait.
	 */
	if (spin_count == 0) {
		sc->smc_mask |= ALLOC_INT;
		if ((ifp->if_capenable & IFCAP_POLLING) == 0)
			smc_write_1(sc, MSK, sc->smc_mask);
		return;
	}

	taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx);
}
/**
 * Handle data on netgraph hooks.
 * Frames processing is deferred to a taskqueue because this might
 * be called with non-sleepable locks held and code paths inside
 * the virtual switch might sleep.
 */
static int ng_vboxnetflt_rcvdata(hook_p hook, item_p item)
{
    const node_p node = NG_HOOK_NODE(hook);
    PVBOXNETFLTINS pThis = NG_NODE_PRIVATE(node);
    struct ifnet *ifp = pThis->u.s.ifp;
    struct mbuf *m;
    struct m_tag *mtag;
    bool fActive;

    VBOXCURVNET_SET(ifp->if_vnet);
    fActive = vboxNetFltTryRetainBusyActive(pThis);

    NGI_GET_M(item, m);
    NG_FREE_ITEM(item);

    /* Locate tag to see if processing should be skipped for this frame */
    mtag = m_tag_locate(m, MTAG_VBOX, PACKET_TAG_VBOX, NULL);
    if (mtag != NULL)
    {
        m_tag_unlink(m, mtag);
        m_tag_free(mtag);
    }

    /*
     * Handle incoming hook. This is connected to the
     * input path of the interface, thus handling incoming frames.
     */
    if (pThis->u.s.input == hook)
    {
        if (mtag != NULL || !fActive)
        {
            ether_demux(ifp, m);
            if (fActive)
                vboxNetFltRelease(pThis, true /*fBusy*/);
            VBOXCURVNET_RESTORE();
            return (0);
        }
        mtx_lock_spin(&pThis->u.s.inq.ifq_mtx);
        _IF_ENQUEUE(&pThis->u.s.inq, m);
        mtx_unlock_spin(&pThis->u.s.inq.ifq_mtx);
        taskqueue_enqueue_fast(taskqueue_fast, &pThis->u.s.tskin);
    }
    /*
     * Handle mbufs on the outgoing hook, frames going to the interface
     */
    else if (pThis->u.s.output == hook)
    {
        if (mtag != NULL || !fActive)
        {
            int rc = ether_output_frame(ifp, m);
            if (fActive)
                vboxNetFltRelease(pThis, true /*fBusy*/);
            VBOXCURVNET_RESTORE();
            return rc;
        }
        mtx_lock_spin(&pThis->u.s.outq.ifq_mtx);
        _IF_ENQUEUE(&pThis->u.s.outq, m);
        mtx_unlock_spin(&pThis->u.s.outq.ifq_mtx);
        taskqueue_enqueue_fast(taskqueue_fast, &pThis->u.s.tskout);
    }
    else
    {
        m_freem(m);
    }

    if (fActive)
        vboxNetFltRelease(pThis, true /*fBusy*/);
    VBOXCURVNET_RESTORE();
    return (0);
}