예제 #1
0
static irqreturn_t ixpdev_interrupt(int irq, void *dev_id)
{
	u32 status;

	status = ixp2000_reg_read(IXP2000_IRQ_THD_STATUS_A_0);
	if (status == 0)
		return IRQ_NONE;

	/*
	 * Any of the eight receive units signaled RX?
	 */
	if (status & 0x00ff) {
		struct net_device *dev = nds[0];
		struct ixpdev_priv *ip = netdev_priv(dev);

		ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff);
		if (likely(napi_schedule_prep(&ip->napi))) {
			__netif_rx_schedule(&ip->napi);
		} else {
			printk(KERN_CRIT "ixp2000: irq while polling!!\n");
		}
	}

	/*
	 * Any of the eight transmit units signaled TXdone?
	 */
	if (status & 0xff00) {
		ixp2000_reg_wrb(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0xff00);
		ixpdev_tx_complete();
	}

	return IRQ_HANDLED;
}
예제 #2
0
static irqreturn_t cpmac_irq(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct cpmac_priv *priv;
	int queue;
	u32 status;

	priv = netdev_priv(dev);

	status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR);

	if (unlikely(netif_msg_intr(priv)))
		printk(KERN_DEBUG "%s: interrupt status: 0x%08x\n", dev->name,
		       status);

	if (status & MAC_INT_TX)
		cpmac_end_xmit(dev, (status & 7));

	if (status & MAC_INT_RX) {
		queue = (status >> 8) & 7;
		if (netif_rx_schedule_prep(dev, &priv->napi)) {
			cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
			__netif_rx_schedule(dev, &priv->napi);
		}
	}
예제 #3
0
static irqreturn_t enic_isr_legacy(int irq, void *data)
{
	struct net_device *netdev = data;
	struct enic *enic = netdev_priv(netdev);
	u32 pba;

	vnic_intr_mask(&enic->intr[ENIC_INTX_WQ_RQ]);

	pba = vnic_intr_legacy_pba(enic->legacy_pba);
	if (!pba) {
		vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
		return IRQ_NONE;	/* not our interrupt */
	}

	if (ENIC_TEST_INTR(pba, ENIC_INTX_NOTIFY))
		enic_notify_check(enic);

	if (ENIC_TEST_INTR(pba, ENIC_INTX_ERR)) {
		enic_log_q_error(enic);
		/* schedule recovery from WQ/RQ error */
		schedule_work(&enic->reset);
		return IRQ_HANDLED;
	}

	if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) {
		if (netif_rx_schedule_prep(&enic->napi))
			__netif_rx_schedule(&enic->napi);
	} else {
		vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
	}

	return IRQ_HANDLED;
}
예제 #4
0
static int virtnet_poll(struct napi_struct *napi, int budget)
{
	struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
	struct sk_buff *skb = NULL;
	unsigned int len, received = 0;

again:
	while (received < budget &&
	       (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
		__skb_unlink(skb, &vi->recv);
		receive_skb(vi->dev, skb, len);
		vi->num--;
		received++;
	}

	/* FIXME: If we oom and completely run out of inbufs, we need
	 * to start a timer trying to fill more. */
	if (vi->num < vi->max / 2)
		try_fill_recv(vi);

	/* Out of packets? */
	if (received < budget) {
		netif_rx_complete(vi->dev, napi);
		if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
		    && napi_schedule_prep(napi)) {
			vi->rvq->vq_ops->disable_cb(vi->rvq);
			__netif_rx_schedule(vi->dev, napi);
			goto again;
		}
	}

	return received;
}
예제 #5
0
static void tsi108_rx_int(net_device * dev)
{
	tsi108_prv_data *data = netdev_priv(dev);

	/* A race could cause dev to already be scheduled, so it's not an
	 * error if that happens (and interrupts shouldn't be re-masked,
	 * because that can cause harmful races, if poll has already
	 * unmasked them but not cleared LINK_STATE_SCHED).
	 *
	 * This can happen if this code races with tsi108_poll(), which masks
	 * the interrupts after tsi108_irq_one() read the mask, but before
	 * netif_rx_schedule is called.  It could also happen due to calls
	 * from tsi108_check_rxring().
	 */

	if (netif_rx_schedule_prep(dev)) {
		/* Mask, rather than ack, the receive interrupts.  The ack
		 * will happen in tsi108_poll().
		 */

		TSI108_ETH_WRITE_REG(TSI108_EC_INTMASK,
				     TSI108_ETH_READ_REG(TSI108_EC_INTMASK) |
				     TSI108_INT_RXQUEUE0
				     | TSI108_INT_RXTHRESH |
				     TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR |
				     TSI108_INT_RXWAIT);
		mb();
		__netif_rx_schedule(dev);
	} else {
		if (!netif_running(dev)) {
			/* This can happen if an interrupt occurs while the
			 * interface is being brought down, as the START
			 * bit is cleared before the stop function is called.
			 *
			 * In this case, the interrupts must be masked, or
			 * they will continue indefinitely.
			 *
			 * There's a race here if the interface is brought down
			 * and then up in rapid succession, as the device could
			 * be made running after the above check and before
			 * the masking below.  This will only happen if the IRQ
			 * thread has a lower priority than the task brining
			 * up the interface.  Fixing this race would likely
			 * require changes in generic code.
			 */

			TSI108_ETH_WRITE_REG(TSI108_EC_INTMASK,
					     TSI108_ETH_READ_REG
					     (TSI108_EC_INTMASK) |
					     TSI108_INT_RXQUEUE0 |
					     TSI108_INT_RXTHRESH |
					     TSI108_INT_RXOVERRUN |
					     TSI108_INT_RXERROR |
					     TSI108_INT_RXWAIT);
			mb();
		}
	}
}
예제 #6
0
static irqreturn_t
mambonet_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
{
	struct net_device *dev = dev_instance;
	if (netif_rx_schedule_prep(dev)) {
		__netif_rx_schedule(dev);
	}
	return IRQ_HANDLED;
}
예제 #7
0
static void skb_recv_done(struct virtqueue *rvq)
{
	struct virtnet_info *vi = rvq->vdev->priv;
	/* Schedule NAPI, Suppress further interrupts if successful. */
	if (netif_rx_schedule_prep(vi->dev, &vi->napi)) {
		rvq->vq_ops->disable_cb(rvq);
		__netif_rx_schedule(vi->dev, &vi->napi);
	}
}
예제 #8
0
static irqreturn_t
cp_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
{
	struct net_device *dev = dev_instance;
	struct cp_private *cp;
	u16 status;

	if (unlikely(dev == NULL))
		return IRQ_NONE;
	cp = netdev_priv(dev);

	status = cpr16(IntrStatus);
	if (!status || (status == 0xFFFF))
		return IRQ_NONE;

	if (netif_msg_intr(cp))
		printk(KERN_DEBUG "%s: intr, status %04x cmd %02x cpcmd %04x\n",
		        dev->name, status, cpr8(Cmd), cpr16(CpCmd));

	cpw16(IntrStatus, status & ~cp_rx_intr_mask);

	spin_lock(&cp->lock);

	/* close possible race's with dev_close */
	if (unlikely(!netif_running(dev))) {
		cpw16(IntrMask, 0);
		spin_unlock(&cp->lock);
		return IRQ_HANDLED;
	}

	if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
		if (netif_rx_schedule_prep(dev)) {
			cpw16_f(IntrMask, cp_norx_intr_mask);
			__netif_rx_schedule(dev);
		}

	if (status & (TxOK | TxErr | TxEmpty | SWInt))
		cp_tx(cp);
	if (status & LinkChg)
		mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);

	spin_unlock(&cp->lock);

	if (status & PciErr) {
		u16 pci_status;

		pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
		pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
		printk(KERN_ERR "%s: PCI bus error, status=%04x, PCI status=%04x\n",
		       dev->name, status, pci_status);

		/* TODO: reset hardware */
	}

	return IRQ_HANDLED;
}
예제 #9
0
/*
 * The interrupt handler.
 * This is called from the MPC core interrupt.
 */
static irqreturn_t
fs_enet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
	struct net_device *dev = dev_id;
	struct fs_enet_private *fep;
	const struct fs_platform_info *fpi;
	u32 int_events;
	u32 int_clr_events;
	int nr, napi_ok;
	int handled;

	fep = netdev_priv(dev);
	fpi = fep->fpi;

	nr = 0;
	while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {

		nr++;

		int_clr_events = int_events;
		if (fpi->use_napi)
			int_clr_events &= ~fep->ev_napi_rx;

		(*fep->ops->clear_int_events)(dev, int_clr_events);

		if (int_events & fep->ev_err)
			(*fep->ops->ev_error)(dev, int_events);

		if (int_events & fep->ev_rx) {
			if (!fpi->use_napi)
				fs_enet_rx_non_napi(dev);
			else {
				napi_ok = netif_rx_schedule_prep(dev);

				(*fep->ops->napi_disable_rx)(dev);
				(*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);

				/* NOTE: it is possible for FCCs in NAPI mode    */
				/* to submit a spurious interrupt while in poll  */
				if (napi_ok)
					__netif_rx_schedule(dev);
			}
		}

		if (int_events & fep->ev_tx)
			fs_enet_tx(dev);
	}

	handled = nr > 0;
	return IRQ_RETVAL(handled);
}
예제 #10
0
/*
 * our  irq handler, just  ack it  and schedule  the right  tasklet to
 * handle this
 */
static irqreturn_t enet_isr(int irq, void *dev_id)
{
	struct net_device *dev;
	struct tangox_enet_priv *priv;
	unsigned long val = 0;

	dev = (struct net_device *)dev_id;
	priv = netdev_priv(dev);

	/* tx interrupt */
	if ((val = enet_readl(ENET_TXC_SR(priv->enet_mac_base))) != 0) {
		enet_writel(ENET_TXC_SR(priv->enet_mac_base), 0xff);
		//if (likely(val & TSR_DI)) {
		if (likely(val & TSR_TI)) {
			tasklet_schedule(&priv->tx_reclaim_tasklet);
		}
		if (unlikely(val & TSR_DE))
			printk("TX DMA error\n");
		if (unlikely(val & TSR_TO))
			printk("TX FIFO overflow\n");
	}
	/* rx interrupt */
	if ((val = enet_readl(ENET_RXC_SR(priv->enet_mac_base))) != 0) {
		enet_writel(ENET_RXC_SR(priv->enet_mac_base), 0xff);
		if (likely(val & RSR_RI)) {
			if (netif_rx_schedule_prep(dev)) {
				/*todo: disable rx interrupt */
				/*avoid reentering */
				enet_writel(ENET_RXC_SR(priv->enet_mac_base), 0xff);
				__netif_rx_schedule(dev);
			}
		}

		if (unlikely(val & RSR_DI)) 
			DBG("RX EOC\n");			
		if (unlikely(val & RSR_DE))
			DBG("RX DMA error\n");
		if (unlikely(val & RSR_RO))
			DBG("RX FIFO overflow\n");
	}

 	/* wake on lan */
 	if ((val = enet_readb(ENET_WAKEUP(priv->enet_mac_base))) == 1) {
 		/* clear sleeping mode */
 		enet_writeb(ENET_SLEEP_MODE(priv->enet_mac_base), 0);
 		/* clear wakeup mode */
 		enet_writeb(ENET_WAKEUP(priv->enet_mac_base), 0);
 	}

	return IRQ_HANDLED;
}
예제 #11
0
파일: ibmveth.c 프로젝트: maraz/linux-2.6
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
{
	struct net_device *netdev = dev_instance;
	struct ibmveth_adapter *adapter = netdev->priv;
	unsigned long lpar_rc;

	if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
				       VIO_IRQ_DISABLE);
		ibmveth_assert(lpar_rc == H_SUCCESS);
		__netif_rx_schedule(netdev, &adapter->napi);
	}
	return IRQ_HANDLED;
}
예제 #12
0
static int virtnet_open(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);

	napi_enable(&vi->napi);

	/* If all buffers were filled by other side before we napi_enabled, we
	 * won't get another interrupt, so process any outstanding packets
	 * now.  virtnet_poll wants re-enable the queue, so we disable here.
	 * We synchronize against interrupts via NAPI_STATE_SCHED */
	if (netif_rx_schedule_prep(dev, &vi->napi)) {
		vi->rvq->vq_ops->disable_cb(vi->rvq);
		__netif_rx_schedule(dev, &vi->napi);
	}
	return 0;
}
예제 #13
0
파일: nicdriver.c 프로젝트: coder03/ldd
static irqreturn_t nic_interrupt(int irq, void *dev_num,struct pt_regs *regs)
{
	struct net_device *dev = (struct net_device *)dev_num;
	struct nic_private *np = netdev_priv(dev);
	u16 status;
	int txstatus;
	u16 handled=0;

	printk("In interrupt...");

	status = readb(np->iobase+ISR);

	 /* shared irq? */
	if (status == 0)
		goto out;

	handled = 1;
	
	/* Receive packets are processed by poll routine.
	   If not running start it now. */
	if(status & RxAckBits){
		if (netif_rx_schedule_prep(dev))
			__netif_rx_schedule(dev);
	}

	/*handle trasmission interrupts*/
	 if (status & (TxOK | TxErr)) {
		 /*get txstatus form TSD*/
		 txstatus= readl(np->iobase+TxStatus0);
		 if (!(txstatus & TxStatOK))
			 goto out;  /* It still hasn't been Txed */

			 np->txcount++;
		 if (status & TxErr)
			 writew (TxErr,np->iobase+ISR);
	 }


out:
	return IRQ_RETVAL(handled);
}
예제 #14
0
static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
	struct net_device *dev = dev_id;
	struct b44 *bp = netdev_priv(dev);
	unsigned long flags;
	u32 istat, imask;
	int handled = 0;

	spin_lock_irqsave(&bp->lock, flags);

	istat = br32(bp, B44_ISTAT);
	imask = br32(bp, B44_IMASK);

	/* ??? What the f**k is the purpose of the interrupt mask
	 * ??? register if we have to mask it out by hand anyways?
	 */
	istat &= imask;
	if (istat) {
		handled = 1;
		if (netif_rx_schedule_prep(dev)) {
			/* NOTE: These writes are posted by the readback of
			 *       the ISTAT register below.
			 */
			bp->istat = istat;
			__b44_disable_ints(bp);
			__netif_rx_schedule(dev);
		} else {
			printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
			       dev->name);
		}

		bw32(bp, B44_ISTAT, istat);
		br32(bp, B44_ISTAT);
	}
	spin_unlock_irqrestore(&bp->lock, flags);
	return IRQ_RETVAL(handled);
}
예제 #15
0
파일: korina.c 프로젝트: anchowee/linino
/* Ethernet Rx DMA interrupt */
static irqreturn_t
rc32434_rx_dma_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = (struct net_device *)dev_id;
	struct rc32434_local* lp;
	volatile u32 dmas,dmasm;
	irqreturn_t retval;
	
	ASSERT(dev != NULL);
	
	lp = (struct rc32434_local *)dev->priv;
	
	spin_lock(&lp->lock);
	dmas = __raw_readl(&lp->rx_dma_regs->dmas);
	if(dmas & (DMAS_d_m|DMAS_h_m|DMAS_e_m)) {
		/* Mask D H E bit in Rx DMA */
		dmasm = __raw_readl(&lp->rx_dma_regs->dmasm);
		__raw_writel(dmasm | (DMASM_d_m | DMASM_h_m | DMASM_e_m), &lp->rx_dma_regs->dmasm);
#ifdef CONFIG_IDT_USE_NAPI
		if(netif_rx_schedule_prep(dev))
                        __netif_rx_schedule(dev);
#else
		tasklet_hi_schedule(lp->rx_tasklet);
#endif
		
		if (dmas & DMAS_e_m)
			ERR(": DMA error\n");
		
		retval = IRQ_HANDLED;
	}
	else
		retval = IRQ_NONE;
	
	spin_unlock(&lp->lock);
	return retval;
}