示例#1
0
文件: sn_netdev.c 项目: NetSys/bess
/* This function is called in IRQ context on a remote core.
 * (on the local core, it is in user context)
 * Interrupts are disabled in both cases, anyway.
 *
 * For host mode, this function is invoked by sndrv_ioctl_kick_rx().
 * For guest mode, it should be called in the MSIX handler. */
void sn_trigger_softirq(void *info)
{
	struct sn_device *dev = info;
	int cpu = raw_smp_processor_id();

	if (unlikely(dev->cpu_to_rxqs[cpu][0] == -1)) {
		struct sn_queue *rx_queue = dev->rx_queues[0];

		rx_queue->rx.stats.interrupts++;
		napi_schedule(&rx_queue->rx.napi);
	} else {
		/* One core can be mapped to multiple RX queues. Awake them all. */
		int i = 0;
		int rxq;

		while ((rxq = dev->cpu_to_rxqs[cpu][i]) != -1) {
			struct sn_queue *rx_queue = dev->rx_queues[rxq];

			rx_queue->rx.stats.interrupts++;
			napi_schedule(&rx_queue->rx.napi);

			i++;
		}
	}
}
示例#2
0
static inline void _stmmac_schedule(struct stmmac_priv *priv)
{
	if (likely(stmmac_has_work(priv))) {
		stmmac_disable_irq(priv);
		napi_schedule(&priv->napi);
	}
}
static irqreturn_t greth_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct greth_private *greth;
	u32 status;
	irqreturn_t retval = IRQ_NONE;

	greth = netdev_priv(dev);

	spin_lock(&greth->devlock);

	/* Get the interrupt events that caused us to be here. */
	status = GRETH_REGLOAD(greth->regs->status);

	/* Handle rx and tx interrupts through poll */
	if (status & (GRETH_INT_RX | GRETH_INT_TX)) {

		/* Clear interrupt status */
		GRETH_REGORIN(greth->regs->status,
			      status & (GRETH_INT_RX | GRETH_INT_TX));

		retval = IRQ_HANDLED;

		/* Disable interrupts and schedule poll() */
		greth_disable_irqs(greth);
		napi_schedule(&greth->napi);
	}

	mmiowb();
	spin_unlock(&greth->devlock);

	return retval;
}
示例#4
0
文件: greth.c 项目: 513855417/linux
static irqreturn_t greth_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct greth_private *greth;
	u32 status, ctrl;
	irqreturn_t retval = IRQ_NONE;

	greth = netdev_priv(dev);

	spin_lock(&greth->devlock);

	/* Get the interrupt events that caused us to be here. */
	status = GRETH_REGLOAD(greth->regs->status);

	/* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be
	 * set regardless of whether IRQ is enabled or not. Especially
	 * important when shared IRQ.
	 */
	ctrl = GRETH_REGLOAD(greth->regs->control);

	/* Handle rx and tx interrupts through poll */
	if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) ||
	    ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) {
		retval = IRQ_HANDLED;

		/* Disable interrupts and schedule poll() */
		greth_disable_irqs(greth);
		napi_schedule(&greth->napi);
	}

	mmiowb();
	spin_unlock(&greth->devlock);

	return retval;
}
示例#5
0
/* Ethernet Rx DMA interrupt */
static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct korina_private *lp = netdev_priv(dev);
	u32 dmas, dmasm;
	irqreturn_t retval;

	dmas = readl(&lp->rx_dma_regs->dmas);
	if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) {
		dmasm = readl(&lp->rx_dma_regs->dmasm);
		writel(dmasm | (DMA_STAT_DONE |
				DMA_STAT_HALT | DMA_STAT_ERR),
				&lp->rx_dma_regs->dmasm);

		napi_schedule(&lp->napi);

		if (dmas & DMA_STAT_ERR)
			printk(KERN_ERR "%s: DMA error\n", dev->name);

		retval = IRQ_HANDLED;
	} else
		retval = IRQ_NONE;

	return retval;
}
示例#6
0
static void port_net_md_state_notice(struct ccci_port *port, MD_STATE state)
{
    struct netdev_entity *nent = (struct netdev_entity *)port->private_data;
    struct net_device *dev = nent->ndev;

    switch(state) {
    case RX_IRQ:
        mod_timer(&nent->polling_timer, jiffies+HZ);
        napi_schedule(&nent->napi);
        wake_lock_timeout(&port->rx_wakelock, HZ);
        break;
    case TX_IRQ:
        if(netif_running(dev) && netif_queue_stopped(dev) && atomic_read(&port->usage_cnt)>0)
            netif_wake_queue(dev);
        port->flags &= ~PORT_F_RX_FULLED;
        break;
    case TX_FULL:
        netif_stop_queue(dev);
        port->flags |= PORT_F_RX_FULLED; // for convenient in traffic log
        break;
    case READY:
        netif_carrier_on(dev);
        break;
    case EXCEPTION:
    case RESET:
        netif_carrier_off(dev);
#ifndef FEATURE_SEQ_CHECK_EN
        nent->tx_seq_num = 0;
        nent->rx_seq_num = 0;
#endif
        break;
    default:
        break;
    };
}
示例#7
0
/**
 * vb_net_capture_cmd - Send a vbb to the network stack.
 * @vb: Interface card received the command.
 * @vbb: Voicebus buffer to pass up..
 * @tx: 1 if this is a vbb that the driver is sending to the card.
 *
 */
void vb_net_capture_vbb(struct voicebus *vb, const void *vbb, const int tx,
			const u32 des0, const u16 tag)
{
	struct sk_buff *skb;
	struct net_device *netdev = vb->netdev;
	const int MAX_CAPTURED_PACKETS = 5000;

	if (!netdev)
		return;

	/* If the interface isn't up, we don't need to capture the packet. */
	if (!(netdev->flags & IFF_UP))
		return;

	if (skb_queue_len(&vb->captured_packets) > MAX_CAPTURED_PACKETS) {
		WARN_ON_ONCE(1);
		return;
	}

	skb = vbb_to_skb(netdev, vbb, tx, des0, tag);
	if (!skb)
		return;

	skb_queue_tail(&vb->captured_packets, skb);
#	if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
	netif_rx_schedule(netdev);
#	elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
	netif_rx_schedule(netdev, &vb->napi);
#	elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
	netif_rx_schedule(&vb->napi);
#	else
	napi_schedule(&vb->napi);
#	endif
	return;
}
示例#8
0
static irqreturn_t ag71xx_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct ag71xx *ag = netdev_priv(dev);
	u32 status;

	status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
	ag71xx_dump_intr(ag, "raw", status);

	if (unlikely(!status))
		return IRQ_NONE;

	if (unlikely(status & AG71XX_INT_ERR)) {
		if (status & AG71XX_INT_TX_BE) {
			ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE);
			dev_err(&dev->dev, "TX BUS error\n");
		}
		if (status & AG71XX_INT_RX_BE) {
			ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE);
			dev_err(&dev->dev, "RX BUS error\n");
		}
	}

	if (likely(status & AG71XX_INT_POLL)) {
		ag71xx_int_disable(ag, AG71XX_INT_POLL);
		DBG("%s: enable polling mode\n", dev->name);
		napi_schedule(&ag->napi);
	}

	ag71xx_debugfs_update_int_stats(ag, status);

	return IRQ_HANDLED;
}
示例#9
0
static void sprdwl_handler(int event, void *data)
{
	struct sprdwl_priv *priv = (struct sprdwl_priv *)data;

	switch (event) {
	case SBLOCK_NOTIFY_GET:
#ifdef CONFIG_SPRDWL_FW_ZEROCOPY
		sprdwl_tx_flow_control_handler(priv);
#endif
		priv->tx_free++;
		dev_dbg(&priv->ndev->dev, "SBLOCK_NOTIFY_GET is received\n");
		break;
	case SBLOCK_NOTIFY_RECV:
		dev_dbg(&priv->ndev->dev, "SBLOCK_NOTIFY_RECV is received\n");
		napi_schedule(&priv->napi);
		break;
	case SBLOCK_NOTIFY_STATUS:
		dev_dbg(&priv->ndev->dev, "SBLOCK_NOTIFY_STATUS is received\n");
		break;
	case SBLOCK_NOTIFY_OPEN:
		dev_dbg(&priv->ndev->dev, "SBLOCK_NOTIFY_OPEN is received\n");
		break;
	case SBLOCK_NOTIFY_CLOSE:
		dev_dbg(&priv->ndev->dev, "SBLOCK_NOTIFY_CLOSE is received\n");
		break;
	default:
		dev_err(&priv->ndev->dev, "invalid data event (%d)\n", event);
	}
}
示例#10
0
文件: en_txrx.c 项目: doniexun/linux
int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{
	struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
					       napi);
	bool busy = false;
	int work_done;
	int i;

	clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);

	for (i = 0; i < c->num_tc; i++)
		busy |= mlx5e_poll_tx_cq(&c->sq[i].cq);

	work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
	busy |= work_done == budget;
	busy |= mlx5e_post_rx_wqes(&c->rq);

	if (busy)
		return budget;

	napi_complete_done(napi, work_done);

	/* avoid losing completion event during/after polling cqs */
	if (test_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags)) {
		napi_schedule(napi);
		return work_done;
	}

	for (i = 0; i < c->num_tc; i++)
		mlx5e_cq_arm(&c->sq[i].cq);
	mlx5e_cq_arm(&c->rq.cq);

	return work_done;
}
示例#11
0
static void ag71xx_oom_timer_handler(unsigned long data)
{
	struct net_device *dev = (struct net_device *) data;
	struct ag71xx *ag = netdev_priv(dev);

	napi_schedule(&ag->napi);
}
示例#12
0
static int ccmni_open(struct net_device *dev)
{
	ccmni_instance_t *ccmni = (ccmni_instance_t *)netdev_priv(dev);
	ccmni_ctl_block_t *ccmni_ctl = ccmni_ctl_blk[ccmni->md_id];
	ccmni_instance_t *ccmni_tmp = NULL;

	if (unlikely(ccmni_ctl == NULL)) {
		CCMNI_ERR_MSG(ccmni->md_id, "%s_Open: MD%d ctlb is NULL\n", dev->name, ccmni->md_id);
		return -1;
	}

	if (ccmni_ctl->ccci_ops->md_ability & MODEM_CAP_CCMNI_MQ)
		netif_tx_start_all_queues(dev);
	else
		netif_start_queue(dev);

	if (unlikely(ccmni_ctl->ccci_ops->md_ability & MODEM_CAP_NAPI)) {
		napi_enable(&ccmni->napi);
		napi_schedule(&ccmni->napi);
	}

	atomic_inc(&ccmni->usage);
	ccmni_tmp = ccmni_ctl->ccmni_inst[ccmni->index];
	if (ccmni != ccmni_tmp)
		atomic_inc(&ccmni_tmp->usage);

	CCMNI_INF_MSG(ccmni->md_id, "%s_Open: cnt=(%d,%d), md_ab=0x%X\n",
		dev->name, atomic_read(&ccmni->usage),
		atomic_read(&ccmni_tmp->usage), ccmni_ctl->ccci_ops->md_ability);
	return 0;
}
示例#13
0
文件: sn_netdev.c 项目: apanda/bess
/* Low latency socket callback. Called with bh disabled */
static int sn_poll_ll(struct napi_struct *napi)
{
	struct sn_queue *rx_queue = container_of(napi, struct sn_queue, napi);

	int idle_cnt = 0;
	int ret;

	if (!spin_trylock(&rx_queue->lock))
		return LL_FLUSH_BUSY;

	rx_queue->rx_stats.ll_polls++;

	sn_disable_interrupt(rx_queue);

	/* Meh... Since there is no notification for busy loop completion,
	 * there is no clean way to avoid race condition w.r.t. interrupts.
	 * Instead, do a roughly 5-us polling in this function. */

	do {
		ret = sn_poll_action(rx_queue, SN_BUSY_POLL_BUDGET);
		if (ret == 0)
			cpu_relax();
	} while (ret == 0 && idle_cnt++ < 1000);
	
	sn_enable_interrupt(rx_queue);

	if (rx_queue->dev->ops->pending_rx(rx_queue)) {
		sn_disable_interrupt(rx_queue);
		napi_schedule(napi);
	}

	spin_unlock(&rx_queue->lock);

	return ret;
}
示例#14
0
void mlx5e_completion_event(struct mlx5_core_cq *mcq)
{
	struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);

	napi_schedule(cq->napi);
	cq->event_ctr++;
	cq->channel->stats->events++;
}
示例#15
0
void
roq_eth_rx_ib_compl(struct ib_cq *cq, void *dev_ptr)
{
	struct net_device *dev = dev_ptr;
	struct roq_eth_priv *vdev = netdev_priv(dev);

	napi_schedule(&vdev->napi);
}
示例#16
0
文件: en_txrx.c 项目: doniexun/linux
void mlx5e_completion_event(struct mlx5_core_cq *mcq)
{
	struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);

	set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
	barrier();
	napi_schedule(cq->napi);
}
示例#17
0
static void cfv_recv(struct virtio_device *vdev, struct vringh *vr_rx)
{
	struct cfv_info *cfv = vdev->priv;

	++cfv->stats.rx_kicks;
	vringh_notify_disable_kern(cfv->vr_rx);
	napi_schedule(&cfv->napi);
}
示例#18
0
static irqreturn_t enic_isr_msix_rq(int irq, void *data)
{
	struct enic *enic = data;

	
	napi_schedule(&enic->napi);

	return IRQ_HANDLED;
}
示例#19
0
文件: interface.c 项目: mhei/linux
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
{
    struct xenvif_queue *queue = dev_id;

    if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
        napi_schedule(&queue->napi);

    return IRQ_HANDLED;
}
示例#20
0
static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
{
	struct fjes_hw *hw = &adapter->hw;

	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);

	adapter->unset_rx_last = true;
	napi_schedule(&adapter->napi);
}
示例#21
0
void mlx4_en_rx_irq(struct mlx4_cq *mcq)
{
	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
	struct mlx4_en_priv *priv = netdev_priv(cq->dev);

	if (priv->port_up)
		napi_schedule(&cq->napi);
	else
		mlx4_en_arm_cq(priv, cq);
}
示例#22
0
static void hss_hdlc_rx_irq(void *pdev)
{
	struct net_device *dev = pdev;
	struct port *port = dev_to_port(dev);

#if DEBUG_RX
	printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name);
#endif
	qmgr_disable_irq(queue_ids[port->id].rx);
	napi_schedule(&port->napi);
}
示例#23
0
static void eth_rx_irq(void *pdev)
{
	struct net_device *dev = pdev;
	struct port *port = netdev_priv(dev);

#if DEBUG_RX
	printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
#endif
	qmgr_disable_irq(port->plat->rxq);
	napi_schedule(&port->napi);
}
示例#24
0
static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
{
	struct cpsw_priv *priv = dev_id;

	if (likely(netif_running(priv->ndev))) {
		cpsw_intr_disable(priv);
		cpsw_disable_irq(priv);
		napi_schedule(&priv->napi);
	}
	return IRQ_HANDLED;
}
示例#25
0
文件: core.c 项目: Anjali05/linux
irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
{
	struct mt7603_dev *dev = dev_instance;
	u32 intr;

	intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
	mt76_wr(dev, MT_INT_SOURCE_CSR, intr);

	if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
		return IRQ_NONE;

	intr &= dev->mt76.mmio.irqmask;

	if (intr & MT_INT_MAC_IRQ3) {
		u32 hwintr = mt76_rr(dev, MT_HW_INT_STATUS(3));

		mt76_wr(dev, MT_HW_INT_STATUS(3), hwintr);
		if (hwintr & MT_HW_INT3_PRE_TBTT0)
			tasklet_schedule(&dev->pre_tbtt_tasklet);

		if ((hwintr & MT_HW_INT3_TBTT0) && dev->mt76.csa_complete)
			mt76_csa_finish(&dev->mt76);
	}

	if (intr & MT_INT_TX_DONE_ALL) {
		mt7603_irq_disable(dev, MT_INT_TX_DONE_ALL);
		tasklet_schedule(&dev->tx_tasklet);
	}

	if (intr & MT_INT_RX_DONE(0)) {
		mt7603_irq_disable(dev, MT_INT_RX_DONE(0));
		napi_schedule(&dev->mt76.napi[0]);
	}

	if (intr & MT_INT_RX_DONE(1)) {
		mt7603_irq_disable(dev, MT_INT_RX_DONE(1));
		napi_schedule(&dev->mt76.napi[1]);
	}

	return IRQ_HANDLED;
}
static void port_net_md_state_notice(struct ccci_port *port, MD_STATE state)
{
	switch(state) {
	case RX_IRQ:
#ifdef CCCI_USE_NAPI
		napi_schedule(&((struct netdev_entity *)port->private_data)->napi);
		wake_lock_timeout(&port->rx_wakelock, HZ);
#endif
		break;
	default:
		break;
	};
}
示例#27
0
/******************************************************************************
 * interrupt handler
 *****************************************************************************/
static irqreturn_t ftmac100_interrupt(int irq, void *dev_id)
{
	struct net_device *netdev = dev_id;
	struct ftmac100 *priv = netdev_priv(netdev);

	if (likely(netif_running(netdev))) {
		/* Disable interrupts for polling */
		ftmac100_disable_all_int(priv);
		napi_schedule(&priv->napi);
	}

	return IRQ_HANDLED;
}
示例#28
0
文件: ahb.c 项目: Anjali05/linux
static irqreturn_t ath10k_ahb_interrupt_handler(int irq, void *arg)
{
	struct ath10k *ar = arg;

	if (!ath10k_pci_irq_pending(ar))
		return IRQ_NONE;

	ath10k_pci_disable_and_clear_legacy_irq(ar);
	ath10k_pci_irq_msi_fw_mask(ar);
	napi_schedule(&ar->napi);

	return IRQ_HANDLED;
}
示例#29
0
/* Enable the CAIF interface and allocate the memory-pool */
static int cfv_netdev_open(struct net_device *netdev)
{
	struct cfv_info *cfv = netdev_priv(netdev);

	if (cfv_create_genpool(cfv))
		return -ENOMEM;

	netif_carrier_on(netdev);
	napi_enable(&cfv->napi);

	/* Schedule NAPI to read any pending packets */
	napi_schedule(&cfv->napi);
	return 0;
}
示例#30
0
/**
 * arc_emac_intr - Global interrupt handler for EMAC.
 * @irq:		irq number.
 * @dev_instance:	device instance.
 *
 * returns: IRQ_HANDLED for all cases.
 *
 * ARC EMAC has only 1 interrupt line, and depending on bits raised in
 * STATUS register we may tell what is a reason for interrupt to fire.
 */
static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
{
	struct net_device *ndev = dev_instance;
	struct arc_emac_priv *priv = netdev_priv(ndev);
	struct net_device_stats *stats = &ndev->stats;
	unsigned int status;

	status = arc_reg_get(priv, R_STATUS);
	status &= ~MDIO_MASK;

	/* Reset all flags except "MDIO complete" */
	arc_reg_set(priv, R_STATUS, status);

	if (status & (RXINT_MASK | TXINT_MASK)) {
		if (likely(napi_schedule_prep(&priv->napi))) {
			arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
			__napi_schedule(&priv->napi);
		}
	}

	if (status & ERR_MASK) {
		/* MSER/RXCR/RXFR/RXFL interrupt fires on corresponding
		 * 8-bit error counter overrun.
		 */

		if (status & MSER_MASK) {
			stats->rx_missed_errors += 0x100;
			stats->rx_errors += 0x100;
			priv->rx_missed_errors += 0x100;
			napi_schedule(&priv->napi);
		}

		if (status & RXCR_MASK) {
			stats->rx_crc_errors += 0x100;
			stats->rx_errors += 0x100;
		}

		if (status & RXFR_MASK) {
			stats->rx_frame_errors += 0x100;
			stats->rx_errors += 0x100;
		}

		if (status & RXFL_MASK) {
			stats->rx_over_errors += 0x100;
			stats->rx_errors += 0x100;
		}
	}

	return IRQ_HANDLED;
}