Пример #1
0
/**
 * vb_net_capture_cmd - Send a vbb to the network stack.
 * @vb: Interface card received the command.
 * @vbb: Voicebus buffer to pass up..
 * @tx: 1 if this is a vbb that the driver is sending to the card.
 *
 */
void vb_net_capture_vbb(struct voicebus *vb, const void *vbb, const int tx,
			const u32 des0, const u16 tag)
{
	struct sk_buff *skb;
	struct net_device *netdev = vb->netdev;
	const int MAX_CAPTURED_PACKETS = 5000;

	if (!netdev)
		return;

	/* If the interface isn't up, we don't need to capture the packet. */
	if (!(netdev->flags & IFF_UP))
		return;

	if (skb_queue_len(&vb->captured_packets) > MAX_CAPTURED_PACKETS) {
		WARN_ON_ONCE(1);
		return;
	}

	skb = vbb_to_skb(netdev, vbb, tx, des0, tag);
	if (!skb)
		return;

	skb_queue_tail(&vb->captured_packets, skb);
#	if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
	netif_rx_schedule(netdev);
#	elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
	netif_rx_schedule(netdev, &vb->napi);
#	elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
	netif_rx_schedule(&vb->napi);
#	else
	napi_schedule(&vb->napi);
#	endif
	return;
}
Пример #2
0
/* Ethernet Rx DMA interrupt */
static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct korina_private *lp = netdev_priv(dev);
	u32 dmas, dmasm;
	irqreturn_t retval;

	dmas = readl(&lp->rx_dma_regs->dmas);
	if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) {
		dmasm = readl(&lp->rx_dma_regs->dmasm);
		writel(dmasm | (DMA_STAT_DONE |
				DMA_STAT_HALT | DMA_STAT_ERR),
				&lp->rx_dma_regs->dmasm);

		netif_rx_schedule(&lp->napi);

		if (dmas & DMA_STAT_ERR)
			printk(KERN_ERR DRV_NAME "%s: DMA error\n", dev->name);

		retval = IRQ_HANDLED;
	} else
		retval = IRQ_NONE;

	return retval;
}
Пример #3
0
static void ag71xx_oom_timer_handler(unsigned long data)
{
	struct net_device *dev = (struct net_device *) data;
	struct ag71xx *ag = netdev_priv(dev);

	netif_rx_schedule(dev, &ag->napi);
}
Пример #4
0
/* Ethernet Rx DMA interrupt */
static irqreturn_t
rc32434_rx_dma_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = (struct net_device *)dev_id;
	struct rc32434_local* lp;
	volatile u32 dmas,dmasm;
	irqreturn_t retval;
	
	ASSERT(dev != NULL);
	
	lp = (struct rc32434_local *)dev->priv;
	
	dmas = __raw_readl(&lp->rx_dma_regs->dmas);
	if(dmas & (DMAS_d_m|DMAS_h_m|DMAS_e_m)) {
		/* Mask D H E bit in Rx DMA */
		dmasm = __raw_readl(&lp->rx_dma_regs->dmasm);
		__raw_writel(dmasm | (DMASM_d_m | DMASM_h_m | DMASM_e_m), &lp->rx_dma_regs->dmasm);
		netif_rx_schedule(dev, &lp->napi);

		if (dmas & DMAS_e_m)
			ERR(": DMA error\n");
		
		retval = IRQ_HANDLED;
	}
	else
		retval = IRQ_NONE;
	
	return retval;
}
Пример #5
0
static void xen_network_done_notify(void)
{
	static struct net_device *eth0_dev = NULL;
	if (unlikely(eth0_dev == NULL))
		eth0_dev = __dev_get_by_name("eth0");
	netif_rx_schedule(eth0_dev);
}
Пример #6
0
static irqreturn_t ag71xx_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct ag71xx *ag = netdev_priv(dev);
	u32 status;

	status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
	ag71xx_dump_intr(ag, "raw", status);

	if (unlikely(!status))
		return IRQ_NONE;

	if (unlikely(status & AG71XX_INT_ERR)) {
		if (status & AG71XX_INT_TX_BE) {
			ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE);
			dev_err(&dev->dev, "TX BUS error\n");
		}
		if (status & AG71XX_INT_RX_BE) {
			ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE);
			dev_err(&dev->dev, "RX BUS error\n");
		}
	}

	if (likely(status & AG71XX_INT_POLL)) {
		ag71xx_int_disable(ag, AG71XX_INT_POLL);
		DBG("%s: enable polling mode\n", dev->name);
		netif_rx_schedule(dev, &ag->napi);
	}

	return IRQ_HANDLED;
}
Пример #7
0
static bool skb_recv_done(struct virtqueue *rvq)
{
	struct virtnet_info *vi = rvq->vdev->priv;
	netif_rx_schedule(vi->dev, &vi->napi);
	/* Suppress further interrupts. */
	return false;
}
Пример #8
0
/**
 * gelic_net_interrupt - event handler for gelic_net
 */
static irqreturn_t gelic_net_interrupt(int irq, void *ptr)
{
	unsigned long flags;
	struct net_device *netdev = ptr;
	struct gelic_net_card *card = netdev_priv(netdev);
	u64 status;

	status = card->irq_status;

	if (!status)
		return IRQ_NONE;

	if (card->rx_dma_restart_required) {
		card->rx_dma_restart_required = 0;
		gelic_net_enable_rxdmac(card);
	}

	if (status & GELIC_NET_RXINT) {
		gelic_net_rx_irq_off(card);
		netif_rx_schedule(netdev);
	}

	if (status & GELIC_NET_TXINT) {
		spin_lock_irqsave(&card->tx_dma_lock, flags);
		card->tx_dma_progress = 0;
		gelic_net_release_tx_chain(card, 0);
		/* kick outstanding tx descriptor if any */
		gelic_net_kick_txdma(card, card->tx_chain.tail);
		spin_unlock_irqrestore(&card->tx_dma_lock, flags);
	}
	return IRQ_HANDLED;
}
Пример #9
0
static irqreturn_t enic_isr_msix_rq(int irq, void *data)
{
	struct enic *enic = data;

	/* schedule NAPI polling for RQ cleanup */
	netif_rx_schedule(&enic->napi);

	return IRQ_HANDLED;
}
Пример #10
0
void mlx4_en_rx_irq(struct mlx4_cq *mcq)
{
	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
	struct mlx4_en_priv *priv = netdev_priv(cq->dev);

	if (priv->port_up)
		netif_rx_schedule(cq->poll_dev);
	else
		mlx4_en_arm_cq(priv, cq);
}
Пример #11
0
static void hss_hdlc_rx_irq(void *pdev)
{
	struct net_device *dev = pdev;
	struct port *port = dev_to_port(dev);

#if DEBUG_RX
	printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name);
#endif
	qmgr_disable_irq(queue_ids[port->id].rx);
	netif_rx_schedule(&port->napi);
}
Пример #12
0
/*
 * rx/tx dma interrupt handler
 */
static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
{
	struct net_device *dev;
	struct bcm_enet_priv *priv;

	dev = dev_id;
	priv = netdev_priv(dev);

	/* mask rx/tx interrupts */
	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));

	netif_rx_schedule(dev, &priv->napi);

	return IRQ_HANDLED;
}
static int xennet_open(struct net_device *dev)
{
	struct netfront_info *np = netdev_priv(dev);

	napi_enable(&np->napi);

	spin_lock_bh(&np->rx_lock);
	if (netif_carrier_ok(dev)) {
		xennet_alloc_rx_buffers(dev);
		np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
		if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
			netif_rx_schedule(dev, &np->napi);
	}
	spin_unlock_bh(&np->rx_lock);

	netif_start_queue(dev);

	return 0;
}
Пример #14
0
static irqreturn_t ag71xx_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct ag71xx *ag = netdev_priv(dev);
	u32 status;

	status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
	status &= ag71xx_rr(ag, AG71XX_REG_INT_ENABLE);

	if (unlikely(!status))
		return IRQ_NONE;

	if (unlikely(status & AG71XX_INT_ERR)) {
		if (status & AG71XX_INT_TX_BE) {
			ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE);
			dev_err(&dev->dev, "TX BUS error\n");
		}
		if (status & AG71XX_INT_RX_BE) {
			ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE);
			dev_err(&dev->dev, "RX BUS error\n");
		}
	}

#if 0
	if (unlikely(status & AG71XX_INT_TX_UR)) {
		ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_UR);
		DBG("%s: TX underrun\n", dev->name);
	}
#endif

#ifndef AG71XX_NAPI_TX
	if (likely(status & AG71XX_INT_TX_PS))
		ag71xx_tx_packets(ag);
#endif

	if (likely(status & AG71XX_INT_POLL)) {
		ag71xx_int_disable(ag, AG71XX_INT_POLL);
		DBG("%s: enable polling mode\n", dev->name);
		netif_rx_schedule(dev, &ag->napi);
	}

	return IRQ_HANDLED;
}
Пример #15
0
static void vnic_start_interrupts(netfront_accel_vnic *vnic)
{
	unsigned long flags;
	
	/* Prime our interrupt */
	spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
	if (!netfront_accel_vi_enable_interrupts(vnic)) {
		/* Cripes, that was quick, better pass it up */
		netfront_accel_disable_net_interrupts(vnic);
		vnic->irq_enabled = 0;
		NETFRONT_ACCEL_STATS_OP(vnic->stats.poll_schedule_count++);
		netif_rx_schedule(vnic->net_dev);
	} else {
		/*
		 * Nothing yet, make sure we get interrupts through
		 * back end 
		 */
		vnic->irq_enabled = 1;
		netfront_accel_enable_net_interrupts(vnic);
	}
	spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
}
Пример #16
0
/* Process an interrupt received from the NIC via backend */
irqreturn_t netfront_accel_net_channel_irq_from_bend(int irq, void *context, 
						     struct pt_regs *unused)
{
	netfront_accel_vnic *vnic = (netfront_accel_vnic *)context;
	struct net_device *net_dev = vnic->net_dev;
	unsigned long flags;

	VPRINTK("net irq %d from device %s\n", irq, vnic->dev->nodename);
	
	NETFRONT_ACCEL_STATS_OP(vnic->stats.irq_count++);

	BUG_ON(net_dev==NULL);

	spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
	if (vnic->irq_enabled) {
		netfront_accel_disable_net_interrupts(vnic);
		vnic->irq_enabled = 0;
		spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);

#if NETFRONT_ACCEL_STATS
		vnic->stats.poll_schedule_count++;
		if (vnic->stats.event_count_since_irq >
		    vnic->stats.events_per_irq_max)
			vnic->stats.events_per_irq_max = 
				vnic->stats.event_count_since_irq;
		vnic->stats.event_count_since_irq = 0;
#endif
		netif_rx_schedule(net_dev);
	}
	else {
		spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
		NETFRONT_ACCEL_STATS_OP(vnic->stats.useless_irq_count++);
		DPRINTK("%s: irq when disabled\n", __FUNCTION__);
	}
	
	return IRQ_HANDLED;
}
Пример #17
0
static irqreturn_t enic_isr_msi(int irq, void *data)
{
	struct enic *enic = data;

	/* With MSI, there is no sharing of interrupts, so this is
	 * our interrupt and there is no need to ack it.  The device
	 * is not providing per-vector masking, so the OS will not
	 * write to PCI config space to mask/unmask the interrupt.
	 * We're using mask_on_assertion for MSI, so the device
	 * automatically masks the interrupt when the interrupt is
	 * generated.  Later, when exiting polling, the interrupt
	 * will be unmasked (see enic_poll).
	 *
	 * Also, the device uses the same PCIe Traffic Class (TC)
	 * for Memory Write data and MSI, so there are no ordering
	 * issues; the MSI will always arrive at the Root Complex
	 * _after_ corresponding Memory Writes (i.e. descriptor
	 * writes).
	 */

	netif_rx_schedule(&enic->napi);

	return IRQ_HANDLED;
}
static void rx_refill_timeout(unsigned long data)
{
	struct net_device *dev = (struct net_device *)data;
	struct netfront_info *np = netdev_priv(dev);
	netif_rx_schedule(dev, &np->napi);
}
Пример #19
0
/* The interrupt handler does all of the Rx thread work and cleans up
   after the Tx thread. */
irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
{
	struct net_device *dev = (struct net_device *)dev_instance;
	struct tulip_private *tp = netdev_priv(dev);
	void __iomem *ioaddr = tp->base_addr;
	int csr5;
	int missed;
	int rx = 0;
	int tx = 0;
	int oi = 0;
	int maxrx = RX_RING_SIZE;
	int maxtx = TX_RING_SIZE;
	int maxoi = TX_RING_SIZE;
#ifdef CONFIG_TULIP_NAPI
	int rxd = 0;
#else
	int entry;
#endif
	unsigned int work_count = tulip_max_interrupt_work;
	unsigned int handled = 0;

	/* Let's see whether the interrupt really is for us */
	csr5 = ioread32(ioaddr + CSR5);

        if (tp->flags & HAS_PHY_IRQ) 
	        handled = phy_interrupt (dev);
    
	if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
		return IRQ_RETVAL(handled);

	tp->nir++;

	do {

#ifdef CONFIG_TULIP_NAPI

		if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
			rxd++;
			/* Mask RX intrs and add the device to poll list. */
			iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
			netif_rx_schedule(dev);
			
			if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
                               break;
		}
		
               /* Acknowledge the interrupt sources we handle here ASAP
                  the poll function does Rx and RxNoBuf acking */
		
		iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);

#else 
		/* Acknowledge all of the current interrupt sources ASAP. */
		iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);


		if (csr5 & (RxIntr | RxNoBuf)) {
				rx += tulip_rx(dev);
			tulip_refill_rx(dev);
		}

#endif /*  CONFIG_TULIP_NAPI */
		
		if (tulip_debug > 4)
			printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
			       dev->name, csr5, ioread32(ioaddr + CSR5));
		

		if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
			unsigned int dirty_tx;

			spin_lock(&tp->lock);

			for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
				 dirty_tx++) {
				int entry = dirty_tx % TX_RING_SIZE;
				int status = le32_to_cpu(tp->tx_ring[entry].status);

				if (status < 0)
					break;			/* It still has not been Txed */

				/* Check for Rx filter setup frames. */
				if (tp->tx_buffers[entry].skb == NULL) {
					/* test because dummy frames not mapped */
					if (tp->tx_buffers[entry].mapping)
						pci_unmap_single(tp->pdev,
							 tp->tx_buffers[entry].mapping,
							 sizeof(tp->setup_frame),
							 PCI_DMA_TODEVICE);
					continue;
				}

				if (status & 0x8000) {
					/* There was an major error, log it. */
#ifndef final_version
					if (tulip_debug > 1)
						printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
							   dev->name, status);
#endif
					tp->stats.tx_errors++;
					if (status & 0x4104) tp->stats.tx_aborted_errors++;
					if (status & 0x0C00) tp->stats.tx_carrier_errors++;
					if (status & 0x0200) tp->stats.tx_window_errors++;
					if (status & 0x0002) tp->stats.tx_fifo_errors++;
					if ((status & 0x0080) && tp->full_duplex == 0)
						tp->stats.tx_heartbeat_errors++;
				} else {
					tp->stats.tx_bytes +=
						tp->tx_buffers[entry].skb->len;
					tp->stats.collisions += (status >> 3) & 15;
					tp->stats.tx_packets++;
				}

				pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
						 tp->tx_buffers[entry].skb->len,
						 PCI_DMA_TODEVICE);

				/* Free the original skb. */
				dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
				tp->tx_buffers[entry].skb = NULL;
				tp->tx_buffers[entry].mapping = 0;
				tx++;
			}

#ifndef final_version
			if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
				printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
					   dev->name, dirty_tx, tp->cur_tx);
				dirty_tx += TX_RING_SIZE;
			}
#endif

			if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
				netif_wake_queue(dev);

			tp->dirty_tx = dirty_tx;
			if (csr5 & TxDied) {
				if (tulip_debug > 2)
					printk(KERN_WARNING "%s: The transmitter stopped."
						   "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
						   dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6);
				tulip_restart_rxtx(tp);
			}
			spin_unlock(&tp->lock);
		}

		/* Log errors. */
		if (csr5 & AbnormalIntr) {	/* Abnormal error summary bit. */
			if (csr5 == 0xffffffff)
				break;
			if (csr5 & TxJabber) tp->stats.tx_errors++;
			if (csr5 & TxFIFOUnderflow) {
				if ((tp->csr6 & 0xC000) != 0xC000)
					tp->csr6 += 0x4000;	/* Bump up the Tx threshold */
				else
					tp->csr6 |= 0x00200000;  /* Store-n-forward. */
				/* Restart the transmit process. */
				tulip_restart_rxtx(tp);
				iowrite32(0, ioaddr + CSR1);
			}
			if (csr5 & (RxDied | RxNoBuf)) {
				if (tp->flags & COMET_MAC_ADDR) {
					iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
					iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
				}
			}
			if (csr5 & RxDied) {		/* Missed a Rx frame. */
                                tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
				tp->stats.rx_errors++;
				tulip_start_rxtx(tp);
			}
			/*
			 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
			 * call is ever done under the spinlock
			 */
			if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
				if (tp->link_change)
					(tp->link_change)(dev, csr5);
			}
			if (csr5 & SytemError) {
				int error = (csr5 >> 23) & 7;
				/* oops, we hit a PCI error.  The code produced corresponds
				 * to the reason:
				 *  0 - parity error
				 *  1 - master abort
				 *  2 - target abort
				 * Note that on parity error, we should do a software reset
				 * of the chip to get it back into a sane state (according
				 * to the 21142/3 docs that is).
				 *   -- rmk
				 */
				printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
					dev->name, tp->nir, error);
			}
			/* Clear all error sources, included undocumented ones! */
			iowrite32(0x0800f7ba, ioaddr + CSR5);
			oi++;
		}
Пример #20
0
void oom_timer(unsigned long data)
{
        struct net_device *dev = (struct net_device *)data;
	netif_rx_schedule(dev);
}
Пример #21
0
void oom_timer(unsigned long data)
{
        struct net_device *dev = (struct net_device *)data;
	struct tulip_private *tp = netdev_priv(dev);
	netif_rx_schedule(dev, &tp->napi);
}
Пример #22
0
static int hss_hdlc_open(struct net_device *dev)
{
	struct port *port = dev_to_port(dev);
	unsigned long flags;
	int i, err = 0;

	if ((err = hdlc_open(dev)))
		return err;

	if ((err = hss_load_firmware(port)))
		goto err_hdlc_close;

	if ((err = request_hdlc_queues(port)))
		goto err_hdlc_close;

	if ((err = init_hdlc_queues(port)))
		goto err_destroy_queues;

	spin_lock_irqsave(&npe_lock, flags);
	if (port->plat->open)
		if ((err = port->plat->open(port->id, dev,
					    hss_hdlc_set_carrier)))
			goto err_unlock;
	spin_unlock_irqrestore(&npe_lock, flags);

	/* Populate queues with buffers, no failure after this point */
	for (i = 0; i < TX_DESCS; i++)
		queue_put_desc(port->plat->txreadyq,
			       tx_desc_phys(port, i), tx_desc_ptr(port, i));

	for (i = 0; i < RX_DESCS; i++)
		queue_put_desc(queue_ids[port->id].rxfree,
			       rx_desc_phys(port, i), rx_desc_ptr(port, i));

	napi_enable(&port->napi);
	netif_start_queue(dev);

	qmgr_set_irq(queue_ids[port->id].rx, QUEUE_IRQ_SRC_NOT_EMPTY,
		     hss_hdlc_rx_irq, dev);

	qmgr_set_irq(queue_ids[port->id].txdone, QUEUE_IRQ_SRC_NOT_EMPTY,
		     hss_hdlc_txdone_irq, dev);
	qmgr_enable_irq(queue_ids[port->id].txdone);

	ports_open++;

	hss_set_hdlc_cfg(port);
	hss_config(port);

	hss_start_hdlc(port);

	/* we may already have RX data, enables IRQ */
	netif_rx_schedule(&port->napi);
	return 0;

err_unlock:
	spin_unlock_irqrestore(&npe_lock, flags);
err_destroy_queues:
	destroy_hdlc_queues(port);
	release_hdlc_queues(port);
err_hdlc_close:
	hdlc_close(dev);
	return err;
}